1 Index: linux-2.6.x/crypto/Kconfig
2 ===================================================================
3 RCS file: /cvs/sw/linux-2.6.x/crypto/Kconfig,v
4 retrieving revision 1.1.1.29
5 diff -u -r1.1.1.29 Kconfig
6 --- linux-2.6.x/crypto/Kconfig 10 Oct 2007 00:54:29 -0000 1.1.1.29
7 +++ linux-2.6.x/crypto/Kconfig 15 Dec 2007 11:08:08 -0000
9 source "drivers/crypto/Kconfig"
13 +source "crypto/ocf/Kconfig"
15 Index: linux-2.6.x/crypto/Makefile
16 ===================================================================
17 RCS file: /cvs/sw/linux-2.6.x/crypto/Makefile,v
18 retrieving revision 1.1.1.23
19 diff -u -r1.1.1.23 Makefile
20 --- linux-2.6.x/crypto/Makefile 10 Oct 2007 00:54:29 -0000 1.1.1.23
21 +++ linux-2.6.x/crypto/Makefile 15 Dec 2007 11:08:08 -0000
24 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
26 +obj-$(CONFIG_OCF_OCF) += ocf/
29 # generic algorithms and the async_tx api
31 Index: linux-2.6.x/drivers/char/random.c
32 ===================================================================
33 RCS file: /cvs/sw/linux-2.6.x/drivers/char/random.c,v
34 retrieving revision 1.1.1.41
35 retrieving revision 1.6
36 diff -u -r1.1.1.41 -r1.6
37 --- linux-2.6.x/drivers/char/random.c 22 Apr 2008 01:36:57 -0000 1.1.1.41
38 +++ linux-2.6.x/drivers/char/random.c 22 Apr 2008 04:48:56 -0000 1.6
40 * unsigned int value);
41 * void add_interrupt_randomness(int irq);
43 + * void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
44 + * int random_input_wait(void);
46 * add_input_randomness() uses the input layer interrupt timing, as well as
47 * the event type information from the hardware.
50 * a better measure, since the timing of the disk interrupts are more
53 + * random_input_words() just provides a raw block of entropy to the input
54 + * pool, such as from a hardware entropy generator.
56 + * random_input_wait() suspends the caller until such time as the
57 + * entropy pool falls below the write threshold, and returns a count of how
58 + * much entropy (in bits) is needed to sustain the pool.
60 * All of these routines try to estimate how many bits of randomness a
61 * particular randomness source. They do this by keeping track of the
62 * first and second order deltas of the event timings.
68 + * random_input_words - add bulk entropy to pool
70 + * @buf: buffer to add
71 + * @wordcount: number of __u32 words to add
72 + * @ent_count: total amount of entropy (in bits) to credit
74 + * this provides bulk input of entropy to the input pool
77 +void random_input_words(__u32 *buf, size_t wordcount, int ent_count)
79 + add_entropy_words(&input_pool, buf, wordcount);
81 + credit_entropy_store(&input_pool, ent_count);
83 + DEBUG_ENT("crediting %d bits => %d\n",
84 + ent_count, input_pool.entropy_count);
86 + * Wake up waiting processes if we have enough
89 + if (input_pool.entropy_count >= random_read_wakeup_thresh)
90 + wake_up_interruptible(&random_read_wait);
92 +EXPORT_SYMBOL(random_input_words);
95 + * random_input_wait - wait until random needs entropy
97 + * this function sleeps until the /dev/random subsystem actually
98 + * needs more entropy, and then return the amount of entropy
99 + * that it would be nice to have added to the system.
101 +int random_input_wait(void)
105 + wait_event_interruptible(random_write_wait,
106 + input_pool.entropy_count < random_write_wakeup_thresh);
108 + count = random_write_wakeup_thresh - input_pool.entropy_count;
110 + /* likely we got woken up due to a signal */
111 + if (count <= 0) count = random_read_wakeup_thresh;
113 + DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
115 + input_pool.entropy_count, random_write_wakeup_thresh);
119 +EXPORT_SYMBOL(random_input_wait);
122 #define EXTRACT_SIZE 10
124 /*********************************************************************
125 Index: linux-2.6.x/fs/fcntl.c
126 ===================================================================
127 RCS file: /cvs/sw/linux-2.6.x/fs/fcntl.c,v
128 retrieving revision 1.1.1.39
129 retrieving revision 1.5
130 diff -u -r1.1.1.39 -r1.5
131 --- linux-2.6.x/fs/fcntl.c 22 Apr 2008 01:37:55 -0000 1.1.1.39
132 +++ linux-2.6.x/fs/fcntl.c 22 Apr 2008 04:49:02 -0000 1.5
134 ret = dupfd(file, 0, 0);
137 +EXPORT_SYMBOL(sys_dup);
139 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
141 Index: linux-2.6.x/include/linux/miscdevice.h
142 ===================================================================
143 RCS file: /cvs/sw/linux-2.6.x/include/linux/miscdevice.h,v
144 retrieving revision 1.1.1.16
145 retrieving revision 1.8
146 diff -u -r1.1.1.16 -r1.8
147 --- linux-2.6.x/include/linux/miscdevice.h 22 Apr 2008 01:36:52 -0000 1.1.1.16
148 +++ linux-2.6.x/include/linux/miscdevice.h 22 Apr 2008 04:49:10 -0000 1.8
150 #define APOLLO_MOUSE_MINOR 7
151 #define PC110PAD_MINOR 9
152 /*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
153 +#define CRYPTODEV_MINOR 70 /* /dev/crypto */
154 #define WATCHDOG_MINOR 130 /* Watchdog timer */
155 #define TEMP_MINOR 131 /* Temperature Sensor */
156 #define RTC_MINOR 135
157 Index: linux-2.6.x/include/linux/random.h
158 ===================================================================
159 RCS file: /cvs/sw/linux-2.6.x/include/linux/random.h,v
160 retrieving revision 1.1.1.12
161 retrieving revision 1.5
162 diff -u -r1.1.1.12 -r1.5
163 --- linux-2.6.x/include/linux/random.h 26 Apr 2007 11:16:52 -0000 1.1.1.12
164 +++ linux-2.6.x/include/linux/random.h 22 May 2008 03:31:38 -0000 1.5
166 #define _LINUX_RANDOM_H
168 #include <linux/ioctl.h>
169 +#include <linux/types.h> /* for __u32 in user space */
171 /* ioctl()'s for the random number generator */
174 /* Clear the entropy pool and associated counters. (Superuser only.) */
175 #define RNDCLEARPOOL _IO( 'R', 0x06 )
177 +#ifdef CONFIG_FIPS_RNG
179 +/* Size of seed value - equal to AES blocksize */
180 +#define AES_BLOCK_SIZE_BYTES 16
181 +#define SEED_SIZE_BYTES AES_BLOCK_SIZE_BYTES
182 +/* Size of AES key */
183 +#define KEY_SIZE_BYTES 16
185 +/* ioctl() structure used by FIPS 140-2 Tests */
186 +struct rand_fips_test {
187 + unsigned char key[KEY_SIZE_BYTES]; /* Input */
188 + unsigned char datetime[SEED_SIZE_BYTES]; /* Input */
189 + unsigned char seed[SEED_SIZE_BYTES]; /* Input */
190 + unsigned char result[SEED_SIZE_BYTES]; /* Output */
193 +/* FIPS 140-2 RNG Variable Seed Test. (Superuser only.) */
194 +#define RNDFIPSVST _IOWR('R', 0x10, struct rand_fips_test)
196 +/* FIPS 140-2 RNG Monte Carlo Test. (Superuser only.) */
197 +#define RNDFIPSMCT _IOWR('R', 0x11, struct rand_fips_test)
199 +#endif /* #ifdef CONFIG_FIPS_RNG */
201 struct rand_pool_info {
206 extern void add_interrupt_randomness(int irq);
208 +extern void random_input_words(__u32 *buf, size_t wordcount, int ent_count);
209 +extern int random_input_wait(void);
210 +#define HAS_RANDOM_INPUT_WAIT 1
212 extern void get_random_bytes(void *buf, int nbytes);
213 void generate_random_uuid(unsigned char uuid_out[16]);
215 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
216 +++ linux/crypto/ocf/hifn/Makefile 2007-07-25 11:02:33.000000000 +1000
218 +# for SGlinux builds
219 +-include $(ROOTDIR)/modules/.config
221 +obj-$(CONFIG_OCF_HIFN) += hifn7751.o
222 +obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
225 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
228 +-include $(TOPDIR)/Rules.make
231 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
232 +++ linux/crypto/ocf/safe/Makefile 2007-07-25 11:02:33.000000000 +1000
234 +# for SGlinux builds
235 +-include $(ROOTDIR)/modules/.config
237 +obj-$(CONFIG_OCF_SAFE) += safe.o
240 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
243 +-include $(TOPDIR)/Rules.make
246 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
247 +++ linux/crypto/ocf/Makefile 2008-07-04 14:48:17.000000000 +1000
249 +# for SGlinux builds
250 +-include $(ROOTDIR)/modules/.config
252 +OCF_OBJS = crypto.o criov.o
254 +ifdef CONFIG_OCF_RANDOMHARVEST
255 + OCF_OBJS += random.o
258 +ifdef CONFIG_OCF_FIPS
259 + OCF_OBJS += rndtest.o
262 +# Add in autoconf.h to get #defines for CONFIG_xxx
263 +AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
264 +ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
265 + EXTRA_CFLAGS += -include $(AUTOCONF_H)
266 + export EXTRA_CFLAGS
272 + mod-subdirs := safe hifn ixp4xx talitos ocfnull
273 + export-objs += crypto.o criov.o random.o
274 + list-multi += ocf.o
281 +EXTRA_CFLAGS += -I$(obj)/.
283 +obj-$(CONFIG_OCF_OCF) += ocf.o
284 +obj-$(CONFIG_OCF_CRYPTODEV) += cryptodev.o
285 +obj-$(CONFIG_OCF_CRYPTOSOFT) += cryptosoft.o
286 +obj-$(CONFIG_OCF_BENCH) += ocf-bench.o
288 +$(_obj)-$(CONFIG_OCF_SAFE) += safe$(_slash)
289 +$(_obj)-$(CONFIG_OCF_HIFN) += hifn$(_slash)
290 +$(_obj)-$(CONFIG_OCF_IXP4XX) += ixp4xx$(_slash)
291 +$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
292 +$(_obj)-$(CONFIG_OCF_PASEMI) += pasemi$(_slash)
293 +$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
295 +ocf-objs := $(OCF_OBJS)
297 +$(list-multi) dummy1: $(ocf-objs)
298 + $(LD) -r -o $@ $(ocf-objs)
302 + rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
303 + rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
306 +-include $(TOPDIR)/Rules.make
310 +# release gen targets
315 + REL=`date +%Y%m%d`; \
316 + patch=ocf-linux-$$REL.patch; \
317 + patch24=ocf-linux-24-$$REL.patch; \
318 + patch26=ocf-linux-26-$$REL.patch; \
320 + find . -name Makefile; \
321 + find . -name Config.in; \
322 + find . -name Kconfig; \
323 + find . -name README; \
324 + find . -name '*.[ch]' | grep -v '.mod.c'; \
325 + ) | while read t; do \
326 + diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
328 + cat patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
329 + cat patches/linux-2.6.25-ocf.patch $$patch > $$patch26
333 + REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
335 + rm -rf /tmp/ocf-linux-$$REL*; \
336 + mkdir -p $$RELDIR/tools; \
337 + cp README* $$RELDIR; \
338 + cp patches/openss*.patch $$RELDIR; \
339 + cp patches/crypto-tools.patch $$RELDIR; \
340 + cp tools/[!C]* $$RELDIR/tools; \
342 + tar cvf $$RELDIR/ocf-linux.tar \
347 + --exclude=*.mod.* \
348 + --exclude=README* \
349 + --exclude=ocf-*.patch \
350 + --exclude=ocf/patches/openss*.patch \
351 + --exclude=ocf/patches/crypto-tools.patch \
352 + --exclude=ocf/tools \
354 + gzip -9 $$RELDIR/ocf-linux.tar; \
356 + tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
357 + gzip -9 ocf-linux-$$REL.tar; \
358 + cd $$CURDIR/../../user; \
359 + rm -rf /tmp/crypto-tools-$$REL*; \
360 + tar cvf /tmp/crypto-tools-$$REL.tar \
364 + --exclude=cryptotest \
365 + --exclude=cryptokeytest \
367 + gzip -9 /tmp/crypto-tools-$$REL.tar
369 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
370 +++ linux/crypto/ocf/talitos/Makefile 2007-07-25 11:02:33.000000000 +1000
372 +# for SGlinux builds
373 +-include $(ROOTDIR)/modules/.config
375 +obj-$(CONFIG_OCF_TALITOS) += talitos.o
378 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
381 +-include $(TOPDIR)/Rules.make
384 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
385 +++ linux/crypto/ocf/ixp4xx/Makefile 2007-10-19 11:24:59.000000000 +1000
387 +# for SGlinux builds
388 +-include $(ROOTDIR)/modules/.config
391 +# You will need to point this at your Intel ixp425 includes, this portion
392 +# of the Makefile only really works under SGLinux with the appropriate libs
393 +# installed. They can be downloaded from http://www.snapgear.org/
395 +ifeq ($(CONFIG_CPU_IXP46X),y)
398 +ifeq ($(CONFIG_CPU_IXP43X),y)
405 +ifdef CONFIG_IXP400_LIB_2_4
406 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
407 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
409 +ifdef CONFIG_IXP400_LIB_2_1
410 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
411 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
413 +ifdef CONFIG_IXP400_LIB_2_0
414 +IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
415 +OSAL_DIR = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
418 +ifdef CONFIG_IXP400_LIB_2_4
421 + -I$(IX_XSCALE_SW)/src/include \
422 + -I$(OSAL_DIR)/common/include/ \
423 + -I$(OSAL_DIR)/common/include/modules/ \
424 + -I$(OSAL_DIR)/common/include/modules/ddk/ \
425 + -I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
426 + -I$(OSAL_DIR)/common/include/modules/ioMem/ \
427 + -I$(OSAL_DIR)/common/os/linux/include/ \
428 + -I$(OSAL_DIR)/common/os/linux/include/core/ \
429 + -I$(OSAL_DIR)/common/os/linux/include/modules/ \
430 + -I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
431 + -I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
432 + -I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
433 + -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
434 + -I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
435 + -DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
436 + -DUSE_IXP4XX_CRYPTO
440 + -I$(IX_XSCALE_SW)/src/include \
442 + -I$(OSAL_DIR)/os/linux/include/ \
443 + -I$(OSAL_DIR)/os/linux/include/modules/ \
444 + -I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
445 + -I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
446 + -I$(OSAL_DIR)/os/linux/include/core/ \
447 + -I$(OSAL_DIR)/os/linux/include/platforms/ \
448 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
449 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
450 + -I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
451 + -I$(OSAL_DIR)/os/linux/include/core/ \
452 + -I$(OSAL_DIR)/include/ \
453 + -I$(OSAL_DIR)/include/modules/ \
454 + -I$(OSAL_DIR)/include/modules/bufferMgt/ \
455 + -I$(OSAL_DIR)/include/modules/ioMem/ \
456 + -I$(OSAL_DIR)/include/platforms/ \
457 + -I$(OSAL_DIR)/include/platforms/ixp400/ \
458 + -DUSE_IXP4XX_CRYPTO
461 +ifdef CONFIG_IXP400_LIB_1_4
464 + -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
465 + -I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
466 + -DUSE_IXP4XX_CRYPTO
469 +IXPDIR = ixp-version-is-not-supported
472 +ifeq ($(CONFIG_CPU_IXP46X),y)
473 +IXP_CFLAGS += -D__ixp46X
475 +ifeq ($(CONFIG_CPU_IXP43X),y)
476 +IXP_CFLAGS += -D__ixp43X
478 +IXP_CFLAGS += -D__ixp42X
482 +obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
485 +EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
488 +-include $(TOPDIR)/Rules.make
491 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
492 +++ linux/crypto/ocf/ocfnull/Makefile 2007-07-25 11:02:33.000000000 +1000
494 +# for SGlinux builds
495 +-include $(ROOTDIR)/modules/.config
497 +obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
500 +EXTRA_CFLAGS += -I$(obj)/..
503 +-include $(TOPDIR)/Rules.make
506 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
507 +++ linux/crypto/ocf/pasemi/Makefile 2007-12-12 11:36:18.000000000 +1000
509 +# for SGlinux builds
510 +-include $(ROOTDIR)/modules/.config
512 +obj-$(CONFIG_OCF_PASEMI) += pasemi.o
515 +EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
518 +-include $(TOPDIR)/Rules.make
521 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
522 +++ linux/crypto/ocf/Config.in 2008-06-23 10:09:55.000000000 +1000
524 +#############################################################################
526 +mainmenu_option next_comment
527 +comment 'OCF Configuration'
528 +tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
529 +dep_mbool ' enable fips RNG checks (fips check on RNG data before use)' \
530 + CONFIG_OCF_FIPS $CONFIG_OCF_OCF
531 +dep_mbool ' enable harvesting entropy for /dev/random' \
532 + CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
533 +dep_tristate ' cryptodev (user space support)' \
534 + CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
535 +dep_tristate ' cryptosoft (software crypto engine)' \
536 + CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
537 +dep_tristate ' safenet (HW crypto engine)' \
538 + CONFIG_OCF_SAFE $CONFIG_OCF_OCF
539 +dep_tristate ' IXP4xx (HW crypto engine)' \
540 + CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
541 +dep_mbool ' Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
542 + CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
543 +dep_tristate ' hifn (HW crypto engine)' \
544 + CONFIG_OCF_HIFN $CONFIG_OCF_OCF
545 +dep_tristate ' talitos (HW crypto engine)' \
546 + CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
547 +dep_tristate ' pasemi (HW crypto engine)' \
548 + CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
549 +dep_tristate ' ocfnull (does no crypto)' \
550 + CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
551 +dep_tristate ' ocf-bench (HW crypto in-kernel benchmark)' \
552 + CONFIG_OCF_BENCH $CONFIG_OCF_OCF
555 +#############################################################################
556 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
557 +++ linux/crypto/ocf/Kconfig 2008-06-23 10:10:33.000000000 +1000
559 +menu "OCF Configuration"
562 + tristate "OCF (Open Cryptograhic Framework)"
564 + A linux port of the OpenBSD/FreeBSD crypto framework.
566 +config OCF_RANDOMHARVEST
567 + bool "crypto random --- harvest entropy for /dev/random"
570 + Includes code to harvest random numbers from devices that support it.
573 + bool "enable fips RNG checks"
574 + depends on OCF_OCF && OCF_RANDOMHARVEST
576 + Run all RNG provided data through a fips check before
577 + adding it /dev/random's entropy pool.
579 +config OCF_CRYPTODEV
580 + tristate "cryptodev (user space support)"
583 + The user space API to access crypto hardware.
585 +config OCF_CRYPTOSOFT
586 + tristate "cryptosoft (software crypto engine)"
589 + A software driver for the OCF framework that uses
590 + the kernel CryptoAPI.
593 + tristate "safenet (HW crypto engine)"
596 + A driver for a number of the safenet Excel crypto accelerators.
597 + Currently tested and working on the 1141 and 1741.
600 + tristate "IXP4xx (HW crypto engine)"
603 + XScale IXP4xx crypto accelerator driver. Requires the
604 + Intel Access library.
606 +config OCF_IXP4XX_SHA1_MD5
607 + bool "IXP4xx SHA1 and MD5 Hashing"
608 + depends on OCF_IXP4XX
610 + Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
611 + Note: this is MUCH slower than using cryptosoft (software crypto engine).
614 + tristate "hifn (HW crypto engine)"
617 + OCF driver for various HIFN based crypto accelerators.
618 + (7951, 7955, 7956, 7751, 7811)
621 + tristate "Hifn HIPP (HW packet crypto engine)"
624 + OCF driver for various HIFN (HIPP) based crypto accelerators
628 + tristate "talitos (HW crypto engine)"
631 + OCF driver for Freescale's security engine (SEC/talitos).
634 + tristate "pasemi (HW crypto engine)"
635 + depends on OCF_OCF && PPC_PASEMI
637 + OCF driver for for PA Semi PWRficient DMA Engine
640 + tristate "ocfnull (fake crypto engine)"
643 + OCF driver for measuring ipsec overheads (does no crypto)
646 + tristate "ocf-bench (HW crypto in-kernel benchmark)"
649 + A very simple encryption test for the in-kernel interface
650 + of OCF. Also includes code to benchmark the IXP Access library
654 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
655 +++ linux/crypto/ocf/README 2007-12-15 21:31:03.000000000 +1000
657 +README - ocf-linux-20071215
658 +---------------------------
660 +This README provides instructions for getting ocf-linux compiled and
661 +operating in a generic linux environment. For other information you
662 +might like to visit the home page for this project:
664 + http://ocf-linux.sourceforge.net/
669 + Not much in this file for now, just some notes. I usually build
670 + the ocf support as modules but it can be built into the kernel as
673 + * mknod /dev/crypto c 10 70
675 + * to add OCF to your kernel source, you have two options. Apply
676 + the kernel specific patch:
678 + cd linux-2.4*; gunzip < ocf-linux-24-XXXXXXXX.patch.gz | patch -p1
679 + cd linux-2.6*; gunzip < ocf-linux-26-XXXXXXXX.patch.gz | patch -p1
681 + if you do one of the above, then you can proceed to the next step,
682 + or you can do the above process by hand with using the patches against
683 + linux-2.4.35 and 2.6.23 to include the ocf code under crypto/ocf.
684 + Here's how to add it:
686 + for 2.4.35 (and later)
688 + cd linux-2.4.35/crypto
689 + tar xvzf ocf-linux.tar.gz
691 + patch -p1 < crypto/ocf/patches/linux-2.4.35-ocf.patch
693 + for 2.6.23 (and later)
695 + cd linux-2.6.23/crypto
696 + tar xvzf ocf-linux.tar.gz
698 + patch -p1 < crypto/ocf/patches/linux-2.6.23-ocf.patch
700 + It should be easy to take this patch and apply it to other more
701 + recent versions of the kernels. The same patches should also work
702 + relatively easily on kernels as old as 2.6.11 and 2.4.18.
704 + * under 2.4 if you are on a non-x86 platform, you may need to:
706 + cp linux-2.X.x/include/asm-i386/kmap_types.h linux-2.X.x/include/asm-YYY
708 + so that you can build the kernel crypto support needed for the cryptosoft
711 + * For simplicity you should enable all the crypto support in your kernel
712 + except for the test driver. Likewise for the OCF options. Do not
713 + enable OCF crypto drivers for HW that you do not have (for example
714 + ixp4xx will not compile on non-Xscale systems).
716 + * make sure that cryptodev.h (from ocf-linux.tar.gz) is installed as
717 + crypto/cryptodev.h in an include directory that is used for building
718 + applications for your platform. For example on a host system that
721 + /usr/include/crypto/cryptodev.h
723 + * patch your openssl-0.9.8g code with the openssl-0.9.8g.patch.
724 + (NOTE: there is no longer a need to patch ssh). The patch is against:
727 + If you need a patch for an older version of openssl, you should look
728 + to older OCF releases. This patch is unlikely to work on older
731 + openssl-0.9.8g.patch
732 + - enables --with-cryptodev for non BSD systems
733 + - adds -cpu option to openssl speed for calculating CPU load
735 + - fixes null pointer in openssl speed multi thread output.
736 + - fixes test keys to work with linux crypto's more stringent
738 + - adds MD5/SHA acceleration (Ronen Shitrit), only enabled
739 + with the --with-cryptodev-digests option
740 + - fixes bug in engine code caching.
742 + * build crypto-tools-XXXXXXXX.tar.gz if you want to try some of the BSD
743 + tools for testing OCF (ie., cryptotest).
745 +How to load the OCF drivers
746 +---------------------------
748 + First insert the base modules:
753 + You can then install the software OCF driver with:
757 + and one or more of the OCF HW drivers with:
764 + all the drivers take a debug option to enable verbose debug so that
765 + you can see what is going on. For debug you load them as:
767 + insmod ocf crypto_debug=1
768 + insmod cryptodev cryptodev_debug=1
769 + insmod cryptosoft swcr_debug=1
771 + You may load more than one OCF crypto driver but then there is no guarantee
772 + as to which will be used.
774 + You can also enable debug at run time on 2.6 systems with the following:
776 + echo 1 > /sys/module/ocf/parameters/crypto_debug
777 + echo 1 > /sys/module/cryptodev/parameters/cryptodev_debug
778 + echo 1 > /sys/module/cryptosoft/parameters/swcr_debug
779 + echo 1 > /sys/module/hifn7751/parameters/hifn_debug
780 + echo 1 > /sys/module/safe/parameters/safe_debug
781 + echo 1 > /sys/module/ixp4xx/parameters/ixp_debug
784 +Testing the OCF support
785 +-----------------------
787 + run "cryptotest", it should do a short test for a couple of
788 + des packets. If it does everything is working.
790 + If this works, then ssh will use the driver when invoked as:
792 + ssh -c 3des username@host
794 + to see for sure that it is operating, enable debug as defined above.
796 + To get a better idea of performance run:
798 + cryptotest 100 4096
800 + There are more options to cryptotest, see the help.
802 + It is also possible to use openssl to test the speed of the crypto
805 + openssl speed -evp des -engine cryptodev -elapsed
806 + openssl speed -evp des3 -engine cryptodev -elapsed
807 + openssl speed -evp aes128 -engine cryptodev -elapsed
809 + and multiple threads (10) with:
811 + openssl speed -evp des -engine cryptodev -elapsed -multi 10
812 + openssl speed -evp des3 -engine cryptodev -elapsed -multi 10
813 + openssl speed -evp aes128 -engine cryptodev -elapsed -multi 10
815 + for public key testing you can try:
818 + openssl speed -engine cryptodev rsa -elapsed
819 + openssl speed -engine cryptodev dsa -elapsed
822 +david_mccullough@securecomputing.com
823 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
824 +++ linux/crypto/ocf/hifn/hifn7751reg.h 2007-06-20 09:15:58.000000000 +1000
826 +/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
827 +/* $OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $ */
830 + * Invertex AEON / Hifn 7751 driver
831 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
832 + * Copyright (c) 1999 Theo de Raadt
833 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
834 + * http://www.netsec.net
836 + * Please send any comments, feedback, bug-fixes, or feature requests to
837 + * software@invertex.com.
839 + * Redistribution and use in source and binary forms, with or without
840 + * modification, are permitted provided that the following conditions
843 + * 1. Redistributions of source code must retain the above copyright
844 + * notice, this list of conditions and the following disclaimer.
845 + * 2. Redistributions in binary form must reproduce the above copyright
846 + * notice, this list of conditions and the following disclaimer in the
847 + * documentation and/or other materials provided with the distribution.
848 + * 3. The name of the author may not be used to endorse or promote products
849 + * derived from this software without specific prior written permission.
852 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
853 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
854 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
855 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
856 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
857 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
858 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
859 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
860 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
861 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
863 + * Effort sponsored in part by the Defense Advanced Research Projects
864 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
865 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
872 + * Some PCI configuration space offset defines. The names were made
873 + * identical to the names used by the Linux kernel.
875 +#define HIFN_BAR0 PCIR_BAR(0) /* PUC register map */
876 +#define HIFN_BAR1 PCIR_BAR(1) /* DMA register map */
877 +#define HIFN_TRDY_TIMEOUT 0x40
878 +#define HIFN_RETRY_TIMEOUT 0x41
881 + * PCI vendor and device identifiers
882 + * (the names are preserved from their OpenBSD source).
884 +#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
885 +#define PCI_PRODUCT_HIFN_7751 0x0005 /* 7751 */
886 +#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
887 +#define PCI_PRODUCT_HIFN_7811 0x0007 /* 7811 */
888 +#define PCI_PRODUCT_HIFN_7855 0x001f /* 7855 */
889 +#define PCI_PRODUCT_HIFN_7951 0x0012 /* 7951 */
890 +#define PCI_PRODUCT_HIFN_7955 0x0020 /* 7954/7955 */
891 +#define PCI_PRODUCT_HIFN_7956 0x001d /* 7956 */
893 +#define PCI_VENDOR_INVERTEX 0x14e1 /* Invertex */
894 +#define PCI_PRODUCT_INVERTEX_AEON 0x0005 /* AEON */
896 +#define PCI_VENDOR_NETSEC 0x1660 /* NetSec */
897 +#define PCI_PRODUCT_NETSEC_7751 0x7751 /* 7751 */
900 + * The values below should multiple of 4 -- and be large enough to handle
901 + * any command the driver implements.
903 + * MAX_COMMAND = base command + mac command + encrypt command +
904 + * mac-key + rc4-key
905 + * MAX_RESULT = base result + mac result + mac + encrypt result
909 +#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
910 +#define HIFN_MAX_RESULT (8 + 4 + 20 + 4)
915 + * Holds an individual descriptor for any of the rings.
917 +typedef struct hifn_desc {
918 + volatile u_int32_t l; /* length and status bits */
919 + volatile u_int32_t p;
923 + * Masks for the "length" field of struct hifn_desc.
925 +#define HIFN_D_LENGTH 0x0000ffff /* length bit mask */
926 +#define HIFN_D_MASKDONEIRQ 0x02000000 /* mask the done interrupt */
927 +#define HIFN_D_DESTOVER 0x04000000 /* destination overflow */
928 +#define HIFN_D_OVER 0x08000000 /* overflow */
929 +#define HIFN_D_LAST 0x20000000 /* last descriptor in chain */
930 +#define HIFN_D_JUMP 0x40000000 /* jump descriptor */
931 +#define HIFN_D_VALID 0x80000000 /* valid bit */
935 + * Processing Unit Registers (offset from BASEREG0)
937 +#define HIFN_0_PUDATA 0x00 /* Processing Unit Data */
938 +#define HIFN_0_PUCTRL 0x04 /* Processing Unit Control */
939 +#define HIFN_0_PUISR 0x08 /* Processing Unit Interrupt Status */
940 +#define HIFN_0_PUCNFG 0x0c /* Processing Unit Configuration */
941 +#define HIFN_0_PUIER 0x10 /* Processing Unit Interrupt Enable */
942 +#define HIFN_0_PUSTAT 0x14 /* Processing Unit Status/Chip ID */
943 +#define HIFN_0_FIFOSTAT 0x18 /* FIFO Status */
944 +#define HIFN_0_FIFOCNFG 0x1c /* FIFO Configuration */
945 +#define HIFN_0_PUCTRL2 0x28 /* Processing Unit Control (2nd map) */
946 +#define HIFN_0_MUTE1 0x80
947 +#define HIFN_0_MUTE2 0x90
948 +#define HIFN_0_SPACESIZE 0x100 /* Register space size */
950 +/* Processing Unit Control Register (HIFN_0_PUCTRL) */
951 +#define HIFN_PUCTRL_CLRSRCFIFO 0x0010 /* clear source fifo */
952 +#define HIFN_PUCTRL_STOP 0x0008 /* stop pu */
953 +#define HIFN_PUCTRL_LOCKRAM 0x0004 /* lock ram */
954 +#define HIFN_PUCTRL_DMAENA 0x0002 /* enable dma */
955 +#define HIFN_PUCTRL_RESET 0x0001 /* Reset processing unit */
957 +/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
958 +#define HIFN_PUISR_CMDINVAL 0x8000 /* Invalid command interrupt */
959 +#define HIFN_PUISR_DATAERR 0x4000 /* Data error interrupt */
960 +#define HIFN_PUISR_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
961 +#define HIFN_PUISR_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
962 +#define HIFN_PUISR_DSTOVER 0x0200 /* Destination overrun interrupt */
963 +#define HIFN_PUISR_SRCCMD 0x0080 /* Source command interrupt */
964 +#define HIFN_PUISR_SRCCTX 0x0040 /* Source context interrupt */
965 +#define HIFN_PUISR_SRCDATA 0x0020 /* Source data interrupt */
966 +#define HIFN_PUISR_DSTDATA 0x0010 /* Destination data interrupt */
967 +#define HIFN_PUISR_DSTRESULT 0x0004 /* Destination result interrupt */
969 +/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
970 +#define HIFN_PUCNFG_DRAMMASK 0xe000 /* DRAM size mask */
971 +#define HIFN_PUCNFG_DSZ_256K 0x0000 /* 256k dram */
972 +#define HIFN_PUCNFG_DSZ_512K 0x2000 /* 512k dram */
973 +#define HIFN_PUCNFG_DSZ_1M 0x4000 /* 1m dram */
974 +#define HIFN_PUCNFG_DSZ_2M 0x6000 /* 2m dram */
975 +#define HIFN_PUCNFG_DSZ_4M 0x8000 /* 4m dram */
976 +#define HIFN_PUCNFG_DSZ_8M 0xa000 /* 8m dram */
977 +#define HIFN_PUNCFG_DSZ_16M 0xc000 /* 16m dram */
978 +#define HIFN_PUCNFG_DSZ_32M 0xe000 /* 32m dram */
979 +#define HIFN_PUCNFG_DRAMREFRESH 0x1800 /* DRAM refresh rate mask */
980 +#define HIFN_PUCNFG_DRFR_512 0x0000 /* 512 divisor of ECLK */
981 +#define HIFN_PUCNFG_DRFR_256 0x0800 /* 256 divisor of ECLK */
982 +#define HIFN_PUCNFG_DRFR_128 0x1000 /* 128 divisor of ECLK */
983 +#define HIFN_PUCNFG_TCALLPHASES 0x0200 /* your guess is as good as mine... */
984 +#define HIFN_PUCNFG_TCDRVTOTEM 0x0100 /* your guess is as good as mine... */
985 +#define HIFN_PUCNFG_BIGENDIAN 0x0080 /* DMA big endian mode */
986 +#define HIFN_PUCNFG_BUS32 0x0040 /* Bus width 32bits */
987 +#define HIFN_PUCNFG_BUS16 0x0000 /* Bus width 16 bits */
988 +#define HIFN_PUCNFG_CHIPID 0x0020 /* Allow chipid from PUSTAT */
989 +#define HIFN_PUCNFG_DRAM 0x0010 /* Context RAM is DRAM */
990 +#define HIFN_PUCNFG_SRAM 0x0000 /* Context RAM is SRAM */
991 +#define HIFN_PUCNFG_COMPSING 0x0004 /* Enable single compression context */
992 +#define HIFN_PUCNFG_ENCCNFG 0x0002 /* Encryption configuration */
994 +/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
995 +#define HIFN_PUIER_CMDINVAL 0x8000 /* Invalid command interrupt */
996 +#define HIFN_PUIER_DATAERR 0x4000 /* Data error interrupt */
997 +#define HIFN_PUIER_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
998 +#define HIFN_PUIER_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
999 +#define HIFN_PUIER_DSTOVER 0x0200 /* Destination overrun interrupt */
1000 +#define HIFN_PUIER_SRCCMD 0x0080 /* Source command interrupt */
1001 +#define HIFN_PUIER_SRCCTX 0x0040 /* Source context interrupt */
1002 +#define HIFN_PUIER_SRCDATA 0x0020 /* Source data interrupt */
1003 +#define HIFN_PUIER_DSTDATA 0x0010 /* Destination data interrupt */
1004 +#define HIFN_PUIER_DSTRESULT 0x0004 /* Destination result interrupt */
1006 +/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
1007 +#define HIFN_PUSTAT_CMDINVAL 0x8000 /* Invalid command interrupt */
1008 +#define HIFN_PUSTAT_DATAERR 0x4000 /* Data error interrupt */
1009 +#define HIFN_PUSTAT_SRCFIFO 0x2000 /* Source FIFO ready interrupt */
1010 +#define HIFN_PUSTAT_DSTFIFO 0x1000 /* Destination FIFO ready interrupt */
1011 +#define HIFN_PUSTAT_DSTOVER 0x0200 /* Destination overrun interrupt */
1012 +#define HIFN_PUSTAT_SRCCMD 0x0080 /* Source command interrupt */
1013 +#define HIFN_PUSTAT_SRCCTX 0x0040 /* Source context interrupt */
1014 +#define HIFN_PUSTAT_SRCDATA 0x0020 /* Source data interrupt */
1015 +#define HIFN_PUSTAT_DSTDATA 0x0010 /* Destination data interrupt */
1016 +#define HIFN_PUSTAT_DSTRESULT 0x0004 /* Destination result interrupt */
1017 +#define HIFN_PUSTAT_CHIPREV 0x00ff /* Chip revision mask */
1018 +#define HIFN_PUSTAT_CHIPENA 0xff00 /* Chip enabled mask */
1019 +#define HIFN_PUSTAT_ENA_2 0x1100 /* Level 2 enabled */
1020 +#define HIFN_PUSTAT_ENA_1 0x1000 /* Level 1 enabled */
1021 +#define HIFN_PUSTAT_ENA_0 0x3000 /* Level 0 enabled */
1022 +#define HIFN_PUSTAT_REV_2 0x0020 /* 7751 PT6/2 */
1023 +#define HIFN_PUSTAT_REV_3 0x0030 /* 7751 PT6/3 */
1025 +/* FIFO Status Register (HIFN_0_FIFOSTAT) */
1026 +#define HIFN_FIFOSTAT_SRC 0x7f00 /* Source FIFO available */
1027 +#define HIFN_FIFOSTAT_DST 0x007f /* Destination FIFO available */
1029 +/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
1030 +#define HIFN_FIFOCNFG_THRESHOLD 0x0400 /* must be written as this value */
1033 + * DMA Interface Registers (offset from BASEREG1)
1035 +#define HIFN_1_DMA_CRAR 0x0c /* DMA Command Ring Address */
1036 +#define HIFN_1_DMA_SRAR 0x1c /* DMA Source Ring Address */
1037 +#define HIFN_1_DMA_RRAR 0x2c /* DMA Result Ring Address */
1038 +#define HIFN_1_DMA_DRAR 0x3c /* DMA Destination Ring Address */
1039 +#define HIFN_1_DMA_CSR 0x40 /* DMA Status and Control */
1040 +#define HIFN_1_DMA_IER 0x44 /* DMA Interrupt Enable */
1041 +#define HIFN_1_DMA_CNFG 0x48 /* DMA Configuration */
1042 +#define HIFN_1_PLL 0x4c /* 7955/7956: PLL config */
1043 +#define HIFN_1_7811_RNGENA 0x60 /* 7811: rng enable */
1044 +#define HIFN_1_7811_RNGCFG 0x64 /* 7811: rng config */
1045 +#define HIFN_1_7811_RNGDAT 0x68 /* 7811: rng data */
1046 +#define HIFN_1_7811_RNGSTS 0x6c /* 7811: rng status */
1047 +#define HIFN_1_DMA_CNFG2 0x6c /* 7955/7956: dma config #2 */
1048 +#define HIFN_1_7811_MIPSRST 0x94 /* 7811: MIPS reset */
1049 +#define HIFN_1_REVID 0x98 /* Revision ID */
1051 +#define HIFN_1_PUB_RESET 0x204 /* Public/RNG Reset */
1052 +#define HIFN_1_PUB_BASE 0x300 /* Public Base Address */
1053 +#define HIFN_1_PUB_OPLEN 0x304 /* 7951-compat Public Operand Length */
1054 +#define HIFN_1_PUB_OP 0x308 /* 7951-compat Public Operand */
1055 +#define HIFN_1_PUB_STATUS 0x30c /* 7951-compat Public Status */
1056 +#define HIFN_1_PUB_IEN 0x310 /* Public Interrupt enable */
1057 +#define HIFN_1_RNG_CONFIG 0x314 /* RNG config */
1058 +#define HIFN_1_RNG_DATA 0x318 /* RNG data */
1059 +#define HIFN_1_PUB_MODE 0x320 /* PK mode */
1060 +#define HIFN_1_PUB_FIFO_OPLEN 0x380 /* first element of oplen fifo */
1061 +#define HIFN_1_PUB_FIFO_OP 0x384 /* first element of op fifo */
1062 +#define HIFN_1_PUB_MEM 0x400 /* start of Public key memory */
1063 +#define HIFN_1_PUB_MEMEND 0xbff /* end of Public key memory */
1065 +/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
1066 +#define HIFN_DMACSR_D_CTRLMASK 0xc0000000 /* Destinition Ring Control */
1067 +#define HIFN_DMACSR_D_CTRL_NOP 0x00000000 /* Dest. Control: no-op */
1068 +#define HIFN_DMACSR_D_CTRL_DIS 0x40000000 /* Dest. Control: disable */
1069 +#define HIFN_DMACSR_D_CTRL_ENA 0x80000000 /* Dest. Control: enable */
1070 +#define HIFN_DMACSR_D_ABORT 0x20000000 /* Destinition Ring PCIAbort */
1071 +#define HIFN_DMACSR_D_DONE 0x10000000 /* Destinition Ring Done */
1072 +#define HIFN_DMACSR_D_LAST 0x08000000 /* Destinition Ring Last */
1073 +#define HIFN_DMACSR_D_WAIT 0x04000000 /* Destinition Ring Waiting */
1074 +#define HIFN_DMACSR_D_OVER 0x02000000 /* Destinition Ring Overflow */
1075 +#define HIFN_DMACSR_R_CTRL 0x00c00000 /* Result Ring Control */
1076 +#define HIFN_DMACSR_R_CTRL_NOP 0x00000000 /* Result Control: no-op */
1077 +#define HIFN_DMACSR_R_CTRL_DIS 0x00400000 /* Result Control: disable */
1078 +#define HIFN_DMACSR_R_CTRL_ENA 0x00800000 /* Result Control: enable */
1079 +#define HIFN_DMACSR_R_ABORT 0x00200000 /* Result Ring PCI Abort */
1080 +#define HIFN_DMACSR_R_DONE 0x00100000 /* Result Ring Done */
1081 +#define HIFN_DMACSR_R_LAST 0x00080000 /* Result Ring Last */
1082 +#define HIFN_DMACSR_R_WAIT 0x00040000 /* Result Ring Waiting */
1083 +#define HIFN_DMACSR_R_OVER 0x00020000 /* Result Ring Overflow */
1084 +#define HIFN_DMACSR_S_CTRL 0x0000c000 /* Source Ring Control */
1085 +#define HIFN_DMACSR_S_CTRL_NOP 0x00000000 /* Source Control: no-op */
1086 +#define HIFN_DMACSR_S_CTRL_DIS 0x00004000 /* Source Control: disable */
1087 +#define HIFN_DMACSR_S_CTRL_ENA 0x00008000 /* Source Control: enable */
1088 +#define HIFN_DMACSR_S_ABORT 0x00002000 /* Source Ring PCI Abort */
1089 +#define HIFN_DMACSR_S_DONE 0x00001000 /* Source Ring Done */
1090 +#define HIFN_DMACSR_S_LAST 0x00000800 /* Source Ring Last */
1091 +#define HIFN_DMACSR_S_WAIT 0x00000400 /* Source Ring Waiting */
1092 +#define HIFN_DMACSR_ILLW 0x00000200 /* Illegal write (7811 only) */
1093 +#define HIFN_DMACSR_ILLR 0x00000100 /* Illegal read (7811 only) */
1094 +#define HIFN_DMACSR_C_CTRL 0x000000c0 /* Command Ring Control */
1095 +#define HIFN_DMACSR_C_CTRL_NOP 0x00000000 /* Command Control: no-op */
1096 +#define HIFN_DMACSR_C_CTRL_DIS 0x00000040 /* Command Control: disable */
1097 +#define HIFN_DMACSR_C_CTRL_ENA 0x00000080 /* Command Control: enable */
1098 +#define HIFN_DMACSR_C_ABORT 0x00000020 /* Command Ring PCI Abort */
1099 +#define HIFN_DMACSR_C_DONE 0x00000010 /* Command Ring Done */
1100 +#define HIFN_DMACSR_C_LAST 0x00000008 /* Command Ring Last */
1101 +#define HIFN_DMACSR_C_WAIT 0x00000004 /* Command Ring Waiting */
1102 +#define HIFN_DMACSR_PUBDONE 0x00000002 /* Public op done (7951 only) */
1103 +#define HIFN_DMACSR_ENGINE 0x00000001 /* Command Ring Engine IRQ */
1105 +/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
1106 +#define HIFN_DMAIER_D_ABORT 0x20000000 /* Destination Ring PCIAbort */
1107 +#define HIFN_DMAIER_D_DONE 0x10000000 /* Destination Ring Done */
1108 +#define HIFN_DMAIER_D_LAST 0x08000000 /* Destination Ring Last */
1109 +#define HIFN_DMAIER_D_WAIT 0x04000000 /* Destination Ring Waiting */
1110 +#define HIFN_DMAIER_D_OVER 0x02000000 /* Destination Ring Overflow */
1111 +#define HIFN_DMAIER_R_ABORT 0x00200000 /* Result Ring PCI Abort */
1112 +#define HIFN_DMAIER_R_DONE 0x00100000 /* Result Ring Done */
1113 +#define HIFN_DMAIER_R_LAST 0x00080000 /* Result Ring Last */
1114 +#define HIFN_DMAIER_R_WAIT 0x00040000 /* Result Ring Waiting */
1115 +#define HIFN_DMAIER_R_OVER 0x00020000 /* Result Ring Overflow */
1116 +#define HIFN_DMAIER_S_ABORT 0x00002000 /* Source Ring PCI Abort */
1117 +#define HIFN_DMAIER_S_DONE 0x00001000 /* Source Ring Done */
1118 +#define HIFN_DMAIER_S_LAST 0x00000800 /* Source Ring Last */
1119 +#define HIFN_DMAIER_S_WAIT 0x00000400 /* Source Ring Waiting */
1120 +#define HIFN_DMAIER_ILLW 0x00000200 /* Illegal write (7811 only) */
1121 +#define HIFN_DMAIER_ILLR 0x00000100 /* Illegal read (7811 only) */
1122 +#define HIFN_DMAIER_C_ABORT 0x00000020 /* Command Ring PCI Abort */
1123 +#define HIFN_DMAIER_C_DONE 0x00000010 /* Command Ring Done */
1124 +#define HIFN_DMAIER_C_LAST 0x00000008 /* Command Ring Last */
1125 +#define HIFN_DMAIER_C_WAIT 0x00000004 /* Command Ring Waiting */
1126 +#define HIFN_DMAIER_PUBDONE 0x00000002 /* public op done (7951 only) */
1127 +#define HIFN_DMAIER_ENGINE 0x00000001 /* Engine IRQ */
1129 +/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
1130 +#define HIFN_DMACNFG_BIGENDIAN 0x10000000 /* big endian mode */
1131 +#define HIFN_DMACNFG_POLLFREQ 0x00ff0000 /* Poll frequency mask */
1132 +#define HIFN_DMACNFG_UNLOCK 0x00000800
1133 +#define HIFN_DMACNFG_POLLINVAL 0x00000700 /* Invalid Poll Scalar */
1134 +#define HIFN_DMACNFG_LAST 0x00000010 /* Host control LAST bit */
1135 +#define HIFN_DMACNFG_MODE 0x00000004 /* DMA mode */
1136 +#define HIFN_DMACNFG_DMARESET 0x00000002 /* DMA Reset # */
1137 +#define HIFN_DMACNFG_MSTRESET 0x00000001 /* Master Reset # */
1139 +/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
1140 +#define HIFN_DMACNFG2_PKSWAP32 (1 << 19) /* swap the OPLEN/OP reg */
1141 +#define HIFN_DMACNFG2_PKSWAP8 (1 << 18) /* swap the bits of OPLEN/OP */
1142 +#define HIFN_DMACNFG2_BAR0_SWAP32 (1<<17) /* swap the bytes of BAR0 */
1143 +#define HIFN_DMACNFG2_BAR1_SWAP8 (1<<16) /* swap the bits of BAR0 */
1144 +#define HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
1145 +#define HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
1146 +#define HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
1147 +#define HIFN_DMACNFG2_TGT_READ_BURST_SHIFT 0
1149 +/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
1150 +#define HIFN_7811_RNGENA_ENA 0x00000001 /* enable RNG */
1152 +/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
1153 +#define HIFN_7811_RNGCFG_PRE1 0x00000f00 /* first prescalar */
1154 +#define HIFN_7811_RNGCFG_OPRE 0x00000080 /* output prescalar */
1155 +#define HIFN_7811_RNGCFG_DEFL 0x00000f80 /* 2 words/ 1/100 sec */
1157 +/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
1158 +#define HIFN_7811_RNGSTS_RDY 0x00004000 /* two numbers in FIFO */
1159 +#define HIFN_7811_RNGSTS_UFL 0x00001000 /* rng underflow */
1161 +/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
1162 +#define HIFN_MIPSRST_BAR2SIZE 0xffff0000 /* sdram size */
1163 +#define HIFN_MIPSRST_GPRAMINIT 0x00008000 /* gpram can be accessed */
1164 +#define HIFN_MIPSRST_CRAMINIT 0x00004000 /* ctxram can be accessed */
1165 +#define HIFN_MIPSRST_LED2 0x00000400 /* external LED2 */
1166 +#define HIFN_MIPSRST_LED1 0x00000200 /* external LED1 */
1167 +#define HIFN_MIPSRST_LED0 0x00000100 /* external LED0 */
1168 +#define HIFN_MIPSRST_MIPSDIS 0x00000004 /* disable MIPS */
1169 +#define HIFN_MIPSRST_MIPSRST 0x00000002 /* warm reset MIPS */
1170 +#define HIFN_MIPSRST_MIPSCOLD 0x00000001 /* cold reset MIPS */
1172 +/* Public key reset register (HIFN_1_PUB_RESET) */
1173 +#define HIFN_PUBRST_RESET 0x00000001 /* reset public/rng unit */
1175 +/* Public operation register (HIFN_1_PUB_OP) */
1176 +#define HIFN_PUBOP_AOFFSET 0x0000003e /* A offset */
1177 +#define HIFN_PUBOP_BOFFSET 0x00000fc0 /* B offset */
1178 +#define HIFN_PUBOP_MOFFSET 0x0003f000 /* M offset */
1179 +#define HIFN_PUBOP_OP_MASK 0x003c0000 /* Opcode: */
1180 +#define HIFN_PUBOP_OP_NOP 0x00000000 /* NOP */
1181 +#define HIFN_PUBOP_OP_ADD 0x00040000 /* ADD */
1182 +#define HIFN_PUBOP_OP_ADDC 0x00080000 /* ADD w/carry */
1183 +#define HIFN_PUBOP_OP_SUB 0x000c0000 /* SUB */
1184 +#define HIFN_PUBOP_OP_SUBC 0x00100000 /* SUB w/carry */
1185 +#define HIFN_PUBOP_OP_MODADD 0x00140000 /* Modular ADD */
1186 +#define HIFN_PUBOP_OP_MODSUB 0x00180000 /* Modular SUB */
1187 +#define HIFN_PUBOP_OP_INCA 0x001c0000 /* INC A */
1188 +#define HIFN_PUBOP_OP_DECA 0x00200000 /* DEC A */
1189 +#define HIFN_PUBOP_OP_MULT 0x00240000 /* MULT */
1190 +#define HIFN_PUBOP_OP_MODMULT 0x00280000 /* Modular MULT */
1191 +#define HIFN_PUBOP_OP_MODRED 0x002c0000 /* Modular Red */
1192 +#define HIFN_PUBOP_OP_MODEXP 0x00300000 /* Modular Exp */
1194 +/* Public operand length register (HIFN_1_PUB_OPLEN) */
1195 +#define HIFN_PUBOPLEN_MODLEN 0x0000007f
1196 +#define HIFN_PUBOPLEN_EXPLEN 0x0003ff80
1197 +#define HIFN_PUBOPLEN_REDLEN 0x003c0000
1199 +/* Public status register (HIFN_1_PUB_STATUS) */
1200 +#define HIFN_PUBSTS_DONE 0x00000001 /* operation done */
1201 +#define HIFN_PUBSTS_CARRY 0x00000002 /* carry */
1202 +#define HIFN_PUBSTS_FIFO_EMPTY 0x00000100 /* fifo empty */
1203 +#define HIFN_PUBSTS_FIFO_FULL 0x00000200 /* fifo full */
1204 +#define HIFN_PUBSTS_FIFO_OVFL 0x00000400 /* fifo overflow */
1205 +#define HIFN_PUBSTS_FIFO_WRITE 0x000f0000 /* fifo write */
1206 +#define HIFN_PUBSTS_FIFO_READ 0x0f000000 /* fifo read */
1208 +/* Public interrupt enable register (HIFN_1_PUB_IEN) */
1209 +#define HIFN_PUBIEN_DONE 0x00000001 /* operation done interrupt */
1211 +/* Random number generator config register (HIFN_1_RNG_CONFIG) */
1212 +#define HIFN_RNGCFG_ENA 0x00000001 /* enable rng */
1215 + * Register offsets in register set 1
1218 +#define HIFN_UNLOCK_SECRET1 0xf4
1219 +#define HIFN_UNLOCK_SECRET2 0xfc
1222 + * PLL config register
1224 + * This register is present only on 7954/7955/7956 parts. It must be
1225 + * programmed according to the bus interface method used by the h/w.
1226 + * Note that the parts require a stable clock. Since the PCI clock
1227 + * may vary the reference clock must usually be used. To avoid
1228 + * overclocking the core logic, setup must be done carefully, refer
1229 + * to the driver for details. The exact multiplier required varies
1230 + * by part and system configuration; refer to the Hifn documentation.
1232 +#define HIFN_PLL_REF_SEL 0x00000001 /* REF/HBI clk selection */
1233 +#define HIFN_PLL_BP 0x00000002 /* bypass (used during setup) */
1234 +/* bit 2 reserved */
1235 +#define HIFN_PLL_PK_CLK_SEL 0x00000008 /* public key clk select */
1236 +#define HIFN_PLL_PE_CLK_SEL 0x00000010 /* packet engine clk select */
1237 +/* bits 5-9 reserved */
1238 +#define HIFN_PLL_MBSET 0x00000400 /* must be set to 1 */
1239 +#define HIFN_PLL_ND 0x00003800 /* Fpll_ref multiplier select */
1240 +#define HIFN_PLL_ND_SHIFT 11
1241 +#define HIFN_PLL_ND_2 0x00000000 /* 2x */
1242 +#define HIFN_PLL_ND_4 0x00000800 /* 4x */
1243 +#define HIFN_PLL_ND_6 0x00001000 /* 6x */
1244 +#define HIFN_PLL_ND_8 0x00001800 /* 8x */
1245 +#define HIFN_PLL_ND_10 0x00002000 /* 10x */
1246 +#define HIFN_PLL_ND_12 0x00002800 /* 12x */
1247 +/* bits 14-15 reserved */
1248 +#define HIFN_PLL_IS 0x00010000 /* charge pump current select */
1249 +/* bits 17-31 reserved */
1252 + * Board configuration specifies only these bits.
1254 +#define HIFN_PLL_CONFIG (HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
1257 + * Public Key Engine Mode Register
1259 +#define HIFN_PKMODE_HOSTINVERT (1 << 0) /* HOST INVERT */
1260 +#define HIFN_PKMODE_ENHANCED (1 << 1) /* Enable enhanced mode */
1263 +/*********************************************************************
1264 + * Structs for board commands
1266 + *********************************************************************/
1269 + * Structure to help build up the command data structure.
1271 +typedef struct hifn_base_command {
1272 + volatile u_int16_t masks;
1273 + volatile u_int16_t session_num;
1274 + volatile u_int16_t total_source_count;
1275 + volatile u_int16_t total_dest_count;
1276 +} hifn_base_command_t;
1278 +#define HIFN_BASE_CMD_MAC 0x0400
1279 +#define HIFN_BASE_CMD_CRYPT 0x0800
1280 +#define HIFN_BASE_CMD_DECODE 0x2000
1281 +#define HIFN_BASE_CMD_SRCLEN_M 0xc000
1282 +#define HIFN_BASE_CMD_SRCLEN_S 14
1283 +#define HIFN_BASE_CMD_DSTLEN_M 0x3000
1284 +#define HIFN_BASE_CMD_DSTLEN_S 12
1285 +#define HIFN_BASE_CMD_LENMASK_HI 0x30000
1286 +#define HIFN_BASE_CMD_LENMASK_LO 0x0ffff
1289 + * Structure to help build up the command data structure.
1291 +typedef struct hifn_crypt_command {
1292 + volatile u_int16_t masks;
1293 + volatile u_int16_t header_skip;
1294 + volatile u_int16_t source_count;
1295 + volatile u_int16_t reserved;
1296 +} hifn_crypt_command_t;
1298 +#define HIFN_CRYPT_CMD_ALG_MASK 0x0003 /* algorithm: */
1299 +#define HIFN_CRYPT_CMD_ALG_DES 0x0000 /* DES */
1300 +#define HIFN_CRYPT_CMD_ALG_3DES 0x0001 /* 3DES */
1301 +#define HIFN_CRYPT_CMD_ALG_RC4 0x0002 /* RC4 */
1302 +#define HIFN_CRYPT_CMD_ALG_AES 0x0003 /* AES */
1303 +#define HIFN_CRYPT_CMD_MODE_MASK 0x0018 /* Encrypt mode: */
1304 +#define HIFN_CRYPT_CMD_MODE_ECB 0x0000 /* ECB */
1305 +#define HIFN_CRYPT_CMD_MODE_CBC 0x0008 /* CBC */
1306 +#define HIFN_CRYPT_CMD_MODE_CFB 0x0010 /* CFB */
1307 +#define HIFN_CRYPT_CMD_MODE_OFB 0x0018 /* OFB */
1308 +#define HIFN_CRYPT_CMD_CLR_CTX 0x0040 /* clear context */
1309 +#define HIFN_CRYPT_CMD_NEW_KEY 0x0800 /* expect new key */
1310 +#define HIFN_CRYPT_CMD_NEW_IV 0x1000 /* expect new iv */
1312 +#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
1313 +#define HIFN_CRYPT_CMD_SRCLEN_S 14
1315 +#define HIFN_CRYPT_CMD_KSZ_MASK 0x0600 /* AES key size: */
1316 +#define HIFN_CRYPT_CMD_KSZ_128 0x0000 /* 128 bit */
1317 +#define HIFN_CRYPT_CMD_KSZ_192 0x0200 /* 192 bit */
1318 +#define HIFN_CRYPT_CMD_KSZ_256 0x0400 /* 256 bit */
1321 + * Structure to help build up the command data structure.
1323 +typedef struct hifn_mac_command {
1324 + volatile u_int16_t masks;
1325 + volatile u_int16_t header_skip;
1326 + volatile u_int16_t source_count;
1327 + volatile u_int16_t reserved;
1328 +} hifn_mac_command_t;
1330 +#define HIFN_MAC_CMD_ALG_MASK 0x0001
1331 +#define HIFN_MAC_CMD_ALG_SHA1 0x0000
1332 +#define HIFN_MAC_CMD_ALG_MD5 0x0001
1333 +#define HIFN_MAC_CMD_MODE_MASK 0x000c
1334 +#define HIFN_MAC_CMD_MODE_HMAC 0x0000
1335 +#define HIFN_MAC_CMD_MODE_SSL_MAC 0x0004
1336 +#define HIFN_MAC_CMD_MODE_HASH 0x0008
1337 +#define HIFN_MAC_CMD_MODE_FULL 0x0004
1338 +#define HIFN_MAC_CMD_TRUNC 0x0010
1339 +#define HIFN_MAC_CMD_RESULT 0x0020
1340 +#define HIFN_MAC_CMD_APPEND 0x0040
1341 +#define HIFN_MAC_CMD_SRCLEN_M 0xc000
1342 +#define HIFN_MAC_CMD_SRCLEN_S 14
1345 + * MAC POS IPsec initiates authentication after encryption on encodes
1346 + * and before decryption on decodes.
1348 +#define HIFN_MAC_CMD_POS_IPSEC 0x0200
1349 +#define HIFN_MAC_CMD_NEW_KEY 0x0800
1352 + * The poll frequency and poll scalar defines are unshifted values used
1353 + * to set fields in the DMA Configuration Register.
1355 +#ifndef HIFN_POLL_FREQUENCY
1356 +#define HIFN_POLL_FREQUENCY 0x1
1359 +#ifndef HIFN_POLL_SCALAR
1360 +#define HIFN_POLL_SCALAR 0x0
1363 +#define HIFN_MAX_SEGLEN 0xffff /* maximum dma segment len */
1364 +#define HIFN_MAX_DMALEN 0x3ffff /* maximum dma length */
1365 +#endif /* __HIFN_H__ */
1366 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
1367 +++ linux/crypto/ocf/hifn/hifn7751var.h 2007-06-20 09:22:39.000000000 +1000
1369 +/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
1370 +/* $OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $ */
1373 + * Invertex AEON / Hifn 7751 driver
1374 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
1375 + * Copyright (c) 1999 Theo de Raadt
1376 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
1377 + * http://www.netsec.net
1379 + * Please send any comments, feedback, bug-fixes, or feature requests to
1380 + * software@invertex.com.
1382 + * Redistribution and use in source and binary forms, with or without
1383 + * modification, are permitted provided that the following conditions
1386 + * 1. Redistributions of source code must retain the above copyright
1387 + * notice, this list of conditions and the following disclaimer.
1388 + * 2. Redistributions in binary form must reproduce the above copyright
1389 + * notice, this list of conditions and the following disclaimer in the
1390 + * documentation and/or other materials provided with the distribution.
1391 + * 3. The name of the author may not be used to endorse or promote products
1392 + * derived from this software without specific prior written permission.
1395 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1396 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1397 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1398 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1399 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1400 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1401 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1402 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1403 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1404 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1406 + * Effort sponsored in part by the Defense Advanced Research Projects
1407 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
1408 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
1412 +#ifndef __HIFN7751VAR_H__
1413 +#define __HIFN7751VAR_H__
1418 + * Some configurable values for the driver. By default command+result
1419 + * descriptor rings are the same size. The src+dst descriptor rings
1420 + * are sized at 3.5x the number of potential commands. Slower parts
1421 + * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
1422 + * src+cmd/result descriptors. It's not clear that increasing the size
1423 + * of the descriptor rings helps performance significantly as other
1424 + * factors tend to come into play (e.g. copying misaligned packets).
1426 +#define HIFN_D_CMD_RSIZE 24 /* command descriptors */
1427 +#define HIFN_D_SRC_RSIZE ((HIFN_D_CMD_RSIZE * 7) / 2) /* source descriptors */
1428 +#define HIFN_D_RES_RSIZE HIFN_D_CMD_RSIZE /* result descriptors */
1429 +#define HIFN_D_DST_RSIZE HIFN_D_SRC_RSIZE /* destination descriptors */
1432 + * Length values for cryptography
1434 +#define HIFN_DES_KEY_LENGTH 8
1435 +#define HIFN_3DES_KEY_LENGTH 24
1436 +#define HIFN_MAX_CRYPT_KEY_LENGTH HIFN_3DES_KEY_LENGTH
1437 +#define HIFN_IV_LENGTH 8
1438 +#define HIFN_AES_IV_LENGTH 16
1439 +#define HIFN_MAX_IV_LENGTH HIFN_AES_IV_LENGTH
1442 + * Length values for authentication
1444 +#define HIFN_MAC_KEY_LENGTH 64
1445 +#define HIFN_MD5_LENGTH 16
1446 +#define HIFN_SHA1_LENGTH 20
1447 +#define HIFN_MAC_TRUNC_LENGTH 12
1449 +#define MAX_SCATTER 64
1452 + * Data structure to hold all 4 rings and any other ring related data.
1456 + * Descriptor rings. We add +1 to the size to accomidate the
1457 + * jump descriptor.
1459 + struct hifn_desc cmdr[HIFN_D_CMD_RSIZE+1];
1460 + struct hifn_desc srcr[HIFN_D_SRC_RSIZE+1];
1461 + struct hifn_desc dstr[HIFN_D_DST_RSIZE+1];
1462 + struct hifn_desc resr[HIFN_D_RES_RSIZE+1];
1464 + struct hifn_command *hifn_commands[HIFN_D_RES_RSIZE];
1466 + u_char command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
1467 + u_char result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
1468 + u_int32_t slop[HIFN_D_CMD_RSIZE];
1470 + u_int64_t test_src, test_dst;
1473 + * Our current positions for insertion and removal from the desriptor
1476 + int cmdi, srci, dsti, resi;
1477 + volatile int cmdu, srcu, dstu, resu;
1478 + int cmdk, srck, dstk, resk;
1481 +struct hifn_session {
1484 + u_int8_t hs_iv[HIFN_MAX_IV_LENGTH];
1487 +#define HIFN_RING_SYNC(sc, r, i, f) \
1488 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1490 +#define HIFN_CMDR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), cmdr, (i), (f))
1491 +#define HIFN_RESR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), resr, (i), (f))
1492 +#define HIFN_SRCR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), srcr, (i), (f))
1493 +#define HIFN_DSTR_SYNC(sc, i, f) HIFN_RING_SYNC((sc), dstr, (i), (f))
1495 +#define HIFN_CMD_SYNC(sc, i, f) \
1496 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1498 +#define HIFN_RES_SYNC(sc, i, f) \
1499 + /* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
1501 +typedef int bus_size_t;
1504 + * Holds data specific to a single HIFN board.
1506 +struct hifn_softc {
1507 + softc_device_decl sc_dev;
1509 + struct pci_dev *sc_pcidev; /* PCI device pointer */
1510 + spinlock_t sc_mtx; /* per-instance lock */
1512 + int sc_num; /* for multiple devs */
1514 + ocf_iomem_t sc_bar0;
1515 + bus_size_t sc_bar0_lastreg;/* bar0 last reg written */
1516 + ocf_iomem_t sc_bar1;
1517 + bus_size_t sc_bar1_lastreg;/* bar1 last reg written */
1521 + u_int32_t sc_dmaier;
1522 + u_int32_t sc_drammodel; /* 1=dram, 0=sram */
1523 + u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
1525 + struct hifn_dma *sc_dma;
1526 + dma_addr_t sc_dma_physaddr;/* physical address of sc_dma */
1532 + struct hifn_session *sc_sessions;
1535 +#define HIFN_HAS_RNG 0x1 /* includes random number generator */
1536 +#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
1537 +#define HIFN_HAS_AES 0x4 /* includes AES support */
1538 +#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
1539 +#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
1541 + struct timer_list sc_tickto; /* for managing DMA */
1544 + int sc_rnghz; /* RNG polling frequency */
1546 + int sc_c_busy; /* command ring busy */
1547 + int sc_s_busy; /* source data ring busy */
1548 + int sc_d_busy; /* destination data ring busy */
1549 + int sc_r_busy; /* result ring busy */
1550 + int sc_active; /* for initial countdown */
1551 + int sc_needwakeup; /* ops q'd wating on resources */
1552 + int sc_curbatch; /* # ops submitted w/o int */
1554 +#ifdef HIFN_VULCANDEV
1555 + struct cdev *sc_pkdev;
1559 +#define HIFN_LOCK(_sc) spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
1560 +#define HIFN_UNLOCK(_sc) spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
1565 + * This is the control structure used to pass commands to hifn_encrypt().
1569 + * Flags is the bitwise "or" values for command configuration. A single
1570 + * encrypt direction needs to be set:
1572 + * HIFN_ENCODE or HIFN_DECODE
1574 + * To use cryptography, a single crypto algorithm must be included:
1576 + * HIFN_CRYPT_3DES or HIFN_CRYPT_DES
1578 + * To use authentication is used, a single MAC algorithm must be included:
1580 + * HIFN_MAC_MD5 or HIFN_MAC_SHA1
1582 + * By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
1583 + * If the value below is set, hash values are truncated or assumed
1584 + * truncated to 12 bytes:
1588 + * Keys for encryption and authentication can be sent as part of a command,
1589 + * or the last key value used with a particular session can be retrieved
1590 + * and used again if either of these flags are not specified.
1592 + * HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
1596 + * A number between 0 and 2048 (for DRAM models) or a number between
1597 + * 0 and 768 (for SRAM models). Those who don't want to use session
1598 + * numbers should leave value at zero and send a new crypt key and/or
1599 + * new MAC key on every command. If you use session numbers and
1600 + * don't send a key with a command, the last key sent for that same
1601 + * session number will be used.
1603 + * Warning: Using session numbers and multiboard at the same time
1604 + * is currently broken.
1608 + * Either fill in the mbuf pointer and npa=0 or
1609 + * fill packp[] and packl[] and set npa to > 0
1613 + * The number of bytes of the source_buf that are skipped over before
1614 + * authentication begins. This must be a number between 0 and 2^16-1
1615 + * and can be used by IPsec implementers to skip over IP headers.
1616 + * *** Value ignored if authentication not used ***
1618 + * crypt_header_skip
1619 + * -----------------
1620 + * The number of bytes of the source_buf that are skipped over before
1621 + * the cryptographic operation begins. This must be a number between 0
1622 + * and 2^16-1. For IPsec, this number will always be 8 bytes larger
1623 + * than the auth_header_skip (to skip over the ESP header).
1624 + * *** Value ignored if cryptography not used ***
1627 +struct hifn_operand {
1629 + struct sk_buff *skb;
1631 + unsigned char *buf;
1634 + bus_size_t mapsize;
1637 + dma_addr_t ds_addr;
1639 + } segs[MAX_SCATTER];
1642 +struct hifn_command {
1643 + u_int16_t session_num;
1644 + u_int16_t base_masks, cry_masks, mac_masks;
1645 + u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
1647 + int sloplen, slopidx;
1649 + struct hifn_operand src;
1650 + struct hifn_operand dst;
1652 + struct hifn_softc *softc;
1653 + struct cryptop *crp;
1654 + struct cryptodesc *enccrd, *maccrd;
1657 +#define src_skb src.u.skb
1658 +#define src_io src.u.io
1659 +#define src_map src.map
1660 +#define src_mapsize src.mapsize
1661 +#define src_segs src.segs
1662 +#define src_nsegs src.nsegs
1663 +#define src_buf src.u.buf
1665 +#define dst_skb dst.u.skb
1666 +#define dst_io dst.u.io
1667 +#define dst_map dst.map
1668 +#define dst_mapsize dst.mapsize
1669 +#define dst_segs dst.segs
1670 +#define dst_nsegs dst.nsegs
1671 +#define dst_buf dst.u.buf
1674 + * Return values for hifn_crypto()
1676 +#define HIFN_CRYPTO_SUCCESS 0
1677 +#define HIFN_CRYPTO_BAD_INPUT (-1)
1678 +#define HIFN_CRYPTO_RINGS_FULL (-2)
1680 +/**************************************************************************
1682 + * Function: hifn_crypto
1684 + * Purpose: Called by external drivers to begin an encryption on the
1687 + * Blocking/Non-blocking Issues
1688 + * ============================
1689 + * The driver cannot block in hifn_crypto (no calls to tsleep) currently.
1690 + * hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
1691 + * room in any of the rings for the request to proceed.
1695 + * 0 for success, negative values on error
1697 + * Defines for negative error codes are:
1699 + * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings.
1700 + * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking
1701 + * behaviour was requested.
1703 + *************************************************************************/
1706 + * Convert back and forth from 'sid' to 'card' and 'session'
1708 +#define HIFN_CARD(sid) (((sid) & 0xf0000000) >> 28)
1709 +#define HIFN_SESSION(sid) ((sid) & 0x000007ff)
1710 +#define HIFN_SID(crd,ses) (((crd) << 28) | ((ses) & 0x7ff))
1712 +#endif /* _KERNEL */
1714 +struct hifn_stats {
1715 + u_int64_t hst_ibytes;
1716 + u_int64_t hst_obytes;
1717 + u_int32_t hst_ipackets;
1718 + u_int32_t hst_opackets;
1719 + u_int32_t hst_invalid;
1720 + u_int32_t hst_nomem; /* malloc or one of hst_nomem_* */
1721 + u_int32_t hst_abort;
1722 + u_int32_t hst_noirq; /* IRQ for no reason */
1723 + u_int32_t hst_totbatch; /* ops submitted w/o interrupt */
1724 + u_int32_t hst_maxbatch; /* max ops submitted together */
1725 + u_int32_t hst_unaligned; /* unaligned src caused copy */
1727 + * The following divides hst_nomem into more specific buckets.
1729 + u_int32_t hst_nomem_map; /* bus_dmamap_create failed */
1730 + u_int32_t hst_nomem_load; /* bus_dmamap_load_* failed */
1731 + u_int32_t hst_nomem_mbuf; /* MGET* failed */
1732 + u_int32_t hst_nomem_mcl; /* MCLGET* failed */
1733 + u_int32_t hst_nomem_cr; /* out of command/result descriptor */
1734 + u_int32_t hst_nomem_sd; /* out of src/dst descriptors */
1737 +#endif /* __HIFN7751VAR_H__ */
1738 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
1739 +++ linux/crypto/ocf/hifn/hifn7751.c 2008-02-14 14:59:01.000000000 +1000
1741 +/* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
1744 + * Invertex AEON / Hifn 7751 driver
1745 + * Copyright (c) 1999 Invertex Inc. All rights reserved.
1746 + * Copyright (c) 1999 Theo de Raadt
1747 + * Copyright (c) 2000-2001 Network Security Technologies, Inc.
1748 + * http://www.netsec.net
1749 + * Copyright (c) 2003 Hifn Inc.
1751 + * This driver is based on a previous driver by Invertex, for which they
1752 + * requested: Please send any comments, feedback, bug-fixes, or feature
1753 + * requests to software@invertex.com.
1755 + * Redistribution and use in source and binary forms, with or without
1756 + * modification, are permitted provided that the following conditions
1759 + * 1. Redistributions of source code must retain the above copyright
1760 + * notice, this list of conditions and the following disclaimer.
1761 + * 2. Redistributions in binary form must reproduce the above copyright
1762 + * notice, this list of conditions and the following disclaimer in the
1763 + * documentation and/or other materials provided with the distribution.
1764 + * 3. The name of the author may not be used to endorse or promote products
1765 + * derived from this software without specific prior written permission.
1767 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1768 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1769 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1770 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1771 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1772 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1773 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1774 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1775 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
1776 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1778 + * Effort sponsored in part by the Defense Advanced Research Projects
1779 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
1780 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
1783 +__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
1787 + * Driver for various Hifn encryption processors.
1789 +#ifndef AUTOCONF_INCLUDED
1790 +#include <linux/config.h>
1792 +#include <linux/module.h>
1793 +#include <linux/init.h>
1794 +#include <linux/list.h>
1795 +#include <linux/slab.h>
1796 +#include <linux/wait.h>
1797 +#include <linux/sched.h>
1798 +#include <linux/pci.h>
1799 +#include <linux/delay.h>
1800 +#include <linux/interrupt.h>
1801 +#include <linux/spinlock.h>
1802 +#include <linux/random.h>
1803 +#include <linux/version.h>
1804 +#include <linux/skbuff.h>
1805 +#include <asm/io.h>
1807 +#include <cryptodev.h>
1809 +#include <hifn/hifn7751reg.h>
1810 +#include <hifn/hifn7751var.h>
1813 +#define DPRINTF(a...) if (hifn_debug) { \
1814 + printk("%s: ", sc ? \
1815 + device_get_nameunit(sc->sc_dev) : "hifn"); \
1819 +#define DPRINTF(a...)
1823 +pci_get_revid(struct pci_dev *dev)
1826 + pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
1830 +static struct hifn_stats hifnstats;
1832 +#define debug hifn_debug
1833 +int hifn_debug = 0;
1834 +module_param(hifn_debug, int, 0644);
1835 +MODULE_PARM_DESC(hifn_debug, "Enable debug");
1837 +int hifn_maxbatch = 1;
1838 +module_param(hifn_maxbatch, int, 0644);
1839 +MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
1842 +char *hifn_pllconfig = NULL;
1843 +MODULE_PARM(hifn_pllconfig, "s");
1845 +char hifn_pllconfig[32]; /* This setting is RO after loading */
1846 +module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
1848 +MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
1850 +#ifdef HIFN_VULCANDEV
1851 +#include <sys/conf.h>
1852 +#include <sys/uio.h>
1854 +static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
1858 + * Prototypes and count for the pci_device structure
1860 +static int hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
1861 +static void hifn_remove(struct pci_dev *dev);
1863 +static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
1864 +static int hifn_freesession(device_t, u_int64_t);
1865 +static int hifn_process(device_t, struct cryptop *, int);
1867 +static device_method_t hifn_methods = {
1868 + /* crypto device methods */
1869 + DEVMETHOD(cryptodev_newsession, hifn_newsession),
1870 + DEVMETHOD(cryptodev_freesession,hifn_freesession),
1871 + DEVMETHOD(cryptodev_process, hifn_process),
1874 +static void hifn_reset_board(struct hifn_softc *, int);
1875 +static void hifn_reset_puc(struct hifn_softc *);
1876 +static void hifn_puc_wait(struct hifn_softc *);
1877 +static int hifn_enable_crypto(struct hifn_softc *);
1878 +static void hifn_set_retry(struct hifn_softc *sc);
1879 +static void hifn_init_dma(struct hifn_softc *);
1880 +static void hifn_init_pci_registers(struct hifn_softc *);
1881 +static int hifn_sramsize(struct hifn_softc *);
1882 +static int hifn_dramsize(struct hifn_softc *);
1883 +static int hifn_ramtype(struct hifn_softc *);
1884 +static void hifn_sessions(struct hifn_softc *);
1885 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
1886 +static irqreturn_t hifn_intr(int irq, void *arg);
1888 +static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
1890 +static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
1891 +static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
1892 +static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
1893 +static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
1894 +static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
1895 +static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
1896 +static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
1897 +static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
1898 +static int hifn_init_pubrng(struct hifn_softc *);
1899 +static void hifn_tick(unsigned long arg);
1900 +static void hifn_abort(struct hifn_softc *);
1901 +static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
1903 +static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
1904 +static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
1906 +#ifdef CONFIG_OCF_RANDOMHARVEST
1907 +static int hifn_read_random(void *arg, u_int32_t *buf, int len);
1910 +#define HIFN_MAX_CHIPS 8
1911 +static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
1913 +static __inline u_int32_t
1914 +READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
1916 + u_int32_t v = readl(sc->sc_bar0 + reg);
1917 + sc->sc_bar0_lastreg = (bus_size_t) -1;
1920 +#define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
1922 +static __inline u_int32_t
1923 +READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
1925 + u_int32_t v = readl(sc->sc_bar1 + reg);
1926 + sc->sc_bar1_lastreg = (bus_size_t) -1;
1929 +#define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
1932 + * map in a given buffer (great on some arches :-)
1936 +pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
1938 + struct iovec *iov = uio->uio_iov;
1940 + DPRINTF("%s()\n", __FUNCTION__);
1943 + for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
1944 + buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
1945 + iov->iov_base, iov->iov_len,
1946 + PCI_DMA_BIDIRECTIONAL);
1947 + buf->segs[buf->nsegs].ds_len = iov->iov_len;
1948 + buf->mapsize += iov->iov_len;
1952 + /* identify this buffer by the first segment */
1953 + buf->map = (void *) buf->segs[0].ds_addr;
1958 + * map in a given sk_buff
1962 +pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
1966 + DPRINTF("%s()\n", __FUNCTION__);
1970 + buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
1971 + skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
1972 + buf->segs[0].ds_len = skb_headlen(skb);
1973 + buf->mapsize += buf->segs[0].ds_len;
1977 + for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
1978 + buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
1979 + buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
1980 + page_address(skb_shinfo(skb)->frags[i].page) +
1981 + skb_shinfo(skb)->frags[i].page_offset,
1982 + buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
1983 + buf->mapsize += buf->segs[buf->nsegs].ds_len;
1987 + /* identify this buffer by the first segment */
1988 + buf->map = (void *) buf->segs[0].ds_addr;
1993 + * map in a given contiguous buffer
1997 +pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
1999 + DPRINTF("%s()\n", __FUNCTION__);
2002 + buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
2003 + b, len, PCI_DMA_BIDIRECTIONAL);
2004 + buf->segs[0].ds_len = len;
2005 + buf->mapsize += buf->segs[0].ds_len;
2008 + /* identify this buffer by the first segment */
2009 + buf->map = (void *) buf->segs[0].ds_addr;
2013 +#if 0 /* not needed at this time */
2015 +pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
2019 + DPRINTF("%s()\n", __FUNCTION__);
2020 + for (i = 0; i < buf->nsegs; i++)
2021 + pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
2022 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
2027 +pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
2030 + DPRINTF("%s()\n", __FUNCTION__);
2031 + for (i = 0; i < buf->nsegs; i++) {
2032 + pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
2033 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
2034 + buf->segs[i].ds_addr = 0;
2035 + buf->segs[i].ds_len = 0;
2043 +hifn_partname(struct hifn_softc *sc)
2045 + /* XXX sprintf numbers when not decoded */
2046 + switch (pci_get_vendor(sc->sc_pcidev)) {
2047 + case PCI_VENDOR_HIFN:
2048 + switch (pci_get_device(sc->sc_pcidev)) {
2049 + case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
2050 + case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
2051 + case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
2052 + case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
2053 + case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
2054 + case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
2056 + return "Hifn unknown-part";
2057 + case PCI_VENDOR_INVERTEX:
2058 + switch (pci_get_device(sc->sc_pcidev)) {
2059 + case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
2061 + return "Invertex unknown-part";
2062 + case PCI_VENDOR_NETSEC:
2063 + switch (pci_get_device(sc->sc_pcidev)) {
2064 + case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
2066 + return "NetSec unknown-part";
2068 + return "Unknown-vendor unknown-part";
2072 +checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
2074 + struct hifn_softc *sc = pci_get_drvdata(dev);
2076 + device_printf(sc->sc_dev, "Warning, %s %u out of range, "
2077 + "using max %u\n", what, v, max);
2079 + } else if (v < min) {
2080 + device_printf(sc->sc_dev, "Warning, %s %u out of range, "
2081 + "using min %u\n", what, v, min);
2088 + * Select PLL configuration for 795x parts. This is complicated in
2089 + * that we cannot determine the optimal parameters without user input.
2090 + * The reference clock is derived from an external clock through a
2091 + * multiplier. The external clock is either the host bus (i.e. PCI)
2092 + * or an external clock generator. When using the PCI bus we assume
2093 + * the clock is either 33 or 66 MHz; for an external source we cannot
2096 + * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
2097 + * for an external source, followed by the frequency. We calculate
2098 + * the appropriate multiplier and PLL register contents accordingly.
2099 + * When no configuration is given we default to "pci66" since that
2100 + * always will allow the card to work. If a card is using the PCI
2101 + * bus clock and in a 33MHz slot then it will be operating at half
2102 + * speed until the correct information is provided.
2104 + * We use a default setting of "ext66" because according to Mike Ham
2105 + * of HiFn, almost every board in existence has an external crystal
2106 + * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
2107 + * because PCI33 can have clocks from 0 to 33Mhz, and some have
2108 + * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
2111 +hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
2113 + const char *pllspec = hifn_pllconfig;
2114 + u_int freq, mul, fl, fh;
2115 + u_int32_t pllconfig;
2118 + if (pllspec == NULL)
2119 + pllspec = "ext66";
2122 + if (strncmp(pllspec, "ext", 3) == 0) {
2124 + pllconfig |= HIFN_PLL_REF_SEL;
2125 + switch (pci_get_device(dev)) {
2126 + case PCI_PRODUCT_HIFN_7955:
2127 + case PCI_PRODUCT_HIFN_7956:
2128 + fl = 20, fh = 100;
2131 + case PCI_PRODUCT_HIFN_7954:
2136 + } else if (strncmp(pllspec, "pci", 3) == 0)
2138 + freq = strtoul(pllspec, &nxt, 10);
2139 + if (nxt == pllspec)
2142 + freq = checkmaxmin(dev, "frequency", freq, fl, fh);
2144 + * Calculate multiplier. We target a Fck of 266 MHz,
2145 + * allowing only even values, possibly rounded down.
2146 + * Multipliers > 8 must set the charge pump current.
2148 + mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
2149 + pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
2151 + pllconfig |= HIFN_PLL_IS;
2156 + * Attach an interface that successfully probed.
2159 +hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2161 + struct hifn_softc *sc = NULL;
2163 + u_int16_t ena, rev;
2165 + unsigned long mem_start, mem_len;
2166 + static int num_chips = 0;
2168 + DPRINTF("%s()\n", __FUNCTION__);
2170 + if (pci_enable_device(dev) < 0)
2173 + if (pci_set_mwi(dev))
2177 + printk("hifn: found device with no IRQ assigned. check BIOS settings!");
2178 + pci_disable_device(dev);
2182 + sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
2185 + memset(sc, 0, sizeof(*sc));
2187 + softc_device_init(sc, "hifn", num_chips, hifn_methods);
2189 + sc->sc_pcidev = dev;
2192 + sc->sc_num = num_chips++;
2193 + if (sc->sc_num < HIFN_MAX_CHIPS)
2194 + hifn_chip_idx[sc->sc_num] = sc;
2196 + pci_set_drvdata(sc->sc_pcidev, sc);
2198 + spin_lock_init(&sc->sc_mtx);
2200 + /* XXX handle power management */
2203 + * The 7951 and 795x have a random number generator and
2204 + * public key support; note this.
2206 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2207 + (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
2208 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
2209 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
2210 + sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
2212 + * The 7811 has a random number generator and
2213 + * we also note it's identity 'cuz of some quirks.
2215 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2216 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
2217 + sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
2220 + * The 795x parts support AES.
2222 + if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
2223 + (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
2224 + pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
2225 + sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
2227 + * Select PLL configuration. This depends on the
2228 + * bus and board design and must be manually configured
2229 + * if the default setting is unacceptable.
2231 + hifn_getpllconfig(dev, &sc->sc_pllconfig);
2235 + * Setup PCI resources. Note that we record the bus
2236 + * tag and handle for each register mapping, this is
2237 + * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
2238 + * and WRITE_REG_1 macros throughout the driver.
2240 + mem_start = pci_resource_start(sc->sc_pcidev, 0);
2241 + mem_len = pci_resource_len(sc->sc_pcidev, 0);
2242 + sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
2243 + if (!sc->sc_bar0) {
2244 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
2247 + sc->sc_bar0_lastreg = (bus_size_t) -1;
2249 + mem_start = pci_resource_start(sc->sc_pcidev, 1);
2250 + mem_len = pci_resource_len(sc->sc_pcidev, 1);
2251 + sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
2252 + if (!sc->sc_bar1) {
2253 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
2256 + sc->sc_bar1_lastreg = (bus_size_t) -1;
2258 + /* fix up the bus size */
2259 + if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
2260 + device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
2263 + if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
2264 + device_printf(sc->sc_dev,
2265 + "No usable consistent DMA configuration, aborting.\n");
2269 + hifn_set_retry(sc);
2272 + * Setup the area where the Hifn DMA's descriptors
2273 + * and associated data structures.
2275 + sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
2276 + sizeof(*sc->sc_dma),
2277 + &sc->sc_dma_physaddr);
2278 + if (!sc->sc_dma) {
2279 + device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
2282 + bzero(sc->sc_dma, sizeof(*sc->sc_dma));
2285 + * Reset the board and do the ``secret handshake''
2286 + * to enable the crypto support. Then complete the
2287 + * initialization procedure by setting up the interrupt
2288 + * and hooking in to the system crypto support so we'll
2289 + * get used for system services like the crypto device,
2290 + * IPsec, RNG device, etc.
2292 + hifn_reset_board(sc, 0);
2294 + if (hifn_enable_crypto(sc) != 0) {
2295 + device_printf(sc->sc_dev, "crypto enabling failed\n");
2298 + hifn_reset_puc(sc);
2300 + hifn_init_dma(sc);
2301 + hifn_init_pci_registers(sc);
2303 + pci_set_master(sc->sc_pcidev);
2305 + /* XXX can't dynamically determine ram type for 795x; force dram */
2306 + if (sc->sc_flags & HIFN_IS_7956)
2307 + sc->sc_drammodel = 1;
2308 + else if (hifn_ramtype(sc))
2311 + if (sc->sc_drammodel == 0)
2312 + hifn_sramsize(sc);
2314 + hifn_dramsize(sc);
2317 + * Workaround for NetSec 7751 rev A: half ram size because two
2318 + * of the address lines were left floating
2320 + if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
2321 + pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
2322 + pci_get_revid(dev) == 0x61) /*XXX???*/
2323 + sc->sc_ramsize >>= 1;
2326 + * Arrange the interrupt line.
2328 + rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
2330 + device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
2333 + sc->sc_irq = dev->irq;
2335 + hifn_sessions(sc);
2338 + * NB: Keep only the low 16 bits; this masks the chip id
2341 + rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
2343 + rseg = sc->sc_ramsize / 1024;
2345 + if (sc->sc_ramsize >= (1024 * 1024)) {
2349 + device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
2350 + hifn_partname(sc), rev,
2351 + rseg, rbase, sc->sc_drammodel ? 'd' : 's');
2352 + if (sc->sc_flags & HIFN_IS_7956)
2353 + printf(", pll=0x%x<%s clk, %ux mult>",
2355 + sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
2356 + 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
2359 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
2360 + if (sc->sc_cid < 0) {
2361 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
2365 + WRITE_REG_0(sc, HIFN_0_PUCNFG,
2366 + READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
2367 + ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2370 + case HIFN_PUSTAT_ENA_2:
2371 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
2372 + crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
2373 + if (sc->sc_flags & HIFN_HAS_AES)
2374 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
2376 + case HIFN_PUSTAT_ENA_1:
2377 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
2378 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
2379 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
2380 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
2381 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
2385 + if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
2386 + hifn_init_pubrng(sc);
2388 + init_timer(&sc->sc_tickto);
2389 + sc->sc_tickto.function = hifn_tick;
2390 + sc->sc_tickto.data = (unsigned long) sc->sc_num;
2391 + mod_timer(&sc->sc_tickto, jiffies + HZ);
2396 + if (sc->sc_cid >= 0)
2397 + crypto_unregister_all(sc->sc_cid);
2398 + if (sc->sc_irq != -1)
2399 + free_irq(sc->sc_irq, sc);
2401 + /* Turn off DMA polling */
2402 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2403 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2405 + pci_free_consistent(sc->sc_pcidev,
2406 + sizeof(*sc->sc_dma),
2407 + sc->sc_dma, sc->sc_dma_physaddr);
2414 + * Detach an interface that successfully probed.
2417 +hifn_remove(struct pci_dev *dev)
2419 + struct hifn_softc *sc = pci_get_drvdata(dev);
2420 + unsigned long l_flags;
2422 + DPRINTF("%s()\n", __FUNCTION__);
2424 + KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
2426 + /* disable interrupts */
2428 + WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
2431 + /*XXX other resources */
2432 + del_timer_sync(&sc->sc_tickto);
2434 + /* Turn off DMA polling */
2435 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2436 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2438 + crypto_unregister_all(sc->sc_cid);
2440 + free_irq(sc->sc_irq, sc);
2442 + pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
2443 + sc->sc_dma, sc->sc_dma_physaddr);
2448 +hifn_init_pubrng(struct hifn_softc *sc)
2452 + DPRINTF("%s()\n", __FUNCTION__);
2454 + if ((sc->sc_flags & HIFN_IS_7811) == 0) {
2455 + /* Reset 7951 public key/rng engine */
2456 + WRITE_REG_1(sc, HIFN_1_PUB_RESET,
2457 + READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
2459 + for (i = 0; i < 100; i++) {
2461 + if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
2462 + HIFN_PUBRST_RESET) == 0)
2467 + device_printf(sc->sc_dev, "public key init failed\n");
2472 + /* Enable the rng, if available */
2473 +#ifdef CONFIG_OCF_RANDOMHARVEST
2474 + if (sc->sc_flags & HIFN_HAS_RNG) {
2475 + if (sc->sc_flags & HIFN_IS_7811) {
2477 + r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
2478 + if (r & HIFN_7811_RNGENA_ENA) {
2479 + r &= ~HIFN_7811_RNGENA_ENA;
2480 + WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
2482 + WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
2483 + HIFN_7811_RNGCFG_DEFL);
2484 + r |= HIFN_7811_RNGENA_ENA;
2485 + WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
2487 + WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
2488 + READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
2491 + sc->sc_rngfirst = 1;
2492 + crypto_rregister(sc->sc_cid, hifn_read_random, sc);
2496 + /* Enable public key engine, if available */
2497 + if (sc->sc_flags & HIFN_HAS_PUBLIC) {
2498 + WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
2499 + sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
2500 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2501 +#ifdef HIFN_VULCANDEV
2502 + sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
2503 + UID_ROOT, GID_WHEEL, 0666,
2505 + sc->sc_pkdev->si_drv1 = sc;
2512 +#ifdef CONFIG_OCF_RANDOMHARVEST
2514 +hifn_read_random(void *arg, u_int32_t *buf, int len)
2516 + struct hifn_softc *sc = (struct hifn_softc *) arg;
2523 + if (sc->sc_flags & HIFN_IS_7811) {
2524 + /* ONLY VALID ON 7811!!!! */
2525 + for (i = 0; i < 5; i++) {
2526 + sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
2527 + if (sts & HIFN_7811_RNGSTS_UFL) {
2528 + device_printf(sc->sc_dev,
2529 + "RNG underflow: disabling\n");
2530 + /* DAVIDM perhaps return -1 */
2533 + if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
2537 + * There are at least two words in the RNG FIFO
2541 + buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
2543 + buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
2546 + buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
2548 + /* NB: discard first data read */
2549 + if (sc->sc_rngfirst) {
2550 + sc->sc_rngfirst = 0;
2556 +#endif /* CONFIG_OCF_RANDOMHARVEST */
2559 +hifn_puc_wait(struct hifn_softc *sc)
2562 + int reg = HIFN_0_PUCTRL;
2564 + if (sc->sc_flags & HIFN_IS_7956) {
2565 + reg = HIFN_0_PUCTRL2;
2568 + for (i = 5000; i > 0; i--) {
2570 + if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
2574 + device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
2575 + READ_REG_0(sc, HIFN_0_PUCTRL));
2579 + * Reset the processing unit.
2582 +hifn_reset_puc(struct hifn_softc *sc)
2584 + /* Reset processing unit */
2585 + int reg = HIFN_0_PUCTRL;
2587 + if (sc->sc_flags & HIFN_IS_7956) {
2588 + reg = HIFN_0_PUCTRL2;
2590 + WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
2592 + hifn_puc_wait(sc);
2596 + * Set the Retry and TRDY registers; note that we set them to
2597 + * zero because the 7811 locks up when forced to retry (section
2598 + * 3.6 of "Specification Update SU-0014-04". Not clear if we
2599 + * should do this for all Hifn parts, but it doesn't seem to hurt.
2602 +hifn_set_retry(struct hifn_softc *sc)
2604 + DPRINTF("%s()\n", __FUNCTION__);
2605 + /* NB: RETRY only responds to 8-bit reads/writes */
2606 + pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
2607 + pci_write_config_dword(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
2611 + * Resets the board. Values in the regesters are left as is
2612 + * from the reset (i.e. initial values are assigned elsewhere).
2615 +hifn_reset_board(struct hifn_softc *sc, int full)
2619 + DPRINTF("%s()\n", __FUNCTION__);
2621 + * Set polling in the DMA configuration register to zero. 0x7 avoids
2622 + * resetting the board and zeros out the other fields.
2624 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2625 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2628 + * Now that polling has been disabled, we have to wait 1 ms
2629 + * before resetting the board.
2633 + /* Reset the DMA unit */
2635 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
2638 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
2639 + HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
2640 + hifn_reset_puc(sc);
2643 + KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
2644 + bzero(sc->sc_dma, sizeof(*sc->sc_dma));
2646 + /* Bring dma unit out of reset */
2647 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2648 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2650 + hifn_puc_wait(sc);
2651 + hifn_set_retry(sc);
2653 + if (sc->sc_flags & HIFN_IS_7811) {
2654 + for (reg = 0; reg < 1000; reg++) {
2655 + if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
2656 + HIFN_MIPSRST_CRAMINIT)
2661 + device_printf(sc->sc_dev, ": cram init timeout\n");
2663 + /* set up DMA configuration register #2 */
2664 + /* turn off all PK and BAR0 swaps */
2665 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
2666 + (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
2667 + (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
2668 + (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
2669 + (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
2674 +hifn_next_signature(u_int32_t a, u_int cnt)
2679 + for (i = 0; i < cnt; i++) {
2681 + /* get the parity */
2682 + v = a & 0x80080125;
2689 + a = (v & 1) ^ (a << 1);
2697 + * Checks to see if crypto is already enabled. If crypto isn't enable,
2698 + * "hifn_enable_crypto" is called to enable it. The check is important,
2699 + * as enabling crypto twice will lock the board.
2702 +hifn_enable_crypto(struct hifn_softc *sc)
2704 + u_int32_t dmacfg, ramcfg, encl, addr, i;
2705 + char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2706 + 0x00, 0x00, 0x00, 0x00 };
2708 + DPRINTF("%s()\n", __FUNCTION__);
2710 + ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
2711 + dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
2714 + * The RAM config register's encrypt level bit needs to be set before
2715 + * every read performed on the encryption level register.
2717 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
2719 + encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2722 + * Make sure we don't re-unlock. Two unlocks kills chip until the
2725 + if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
2728 + device_printf(sc->sc_dev,
2729 + "Strong crypto already enabled!\n");
2734 + if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
2737 + device_printf(sc->sc_dev,
2738 + "Unknown encryption level 0x%x\n", encl);
2743 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
2744 + HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
2746 + addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
2748 + WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
2751 + for (i = 0; i <= 12; i++) {
2752 + addr = hifn_next_signature(addr, offtbl[i] + 0x101);
2753 + WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
2758 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
2759 + encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
2763 + if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
2764 + device_printf(sc->sc_dev, "Engine is permanently "
2765 + "locked until next system reset!\n");
2767 + device_printf(sc->sc_dev, "Engine enabled "
2768 + "successfully!\n");
2773 + WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
2774 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
2777 + case HIFN_PUSTAT_ENA_1:
2778 + case HIFN_PUSTAT_ENA_2:
2780 + case HIFN_PUSTAT_ENA_0:
2782 + device_printf(sc->sc_dev, "disabled\n");
2790 + * Give initial values to the registers listed in the "Register Space"
2791 + * section of the HIFN Software Development reference manual.
2794 +hifn_init_pci_registers(struct hifn_softc *sc)
2796 + DPRINTF("%s()\n", __FUNCTION__);
2798 + /* write fixed values needed by the Initialization registers */
2799 + WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
2800 + WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
2801 + WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
2803 + /* write all 4 ring address registers */
2804 + WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
2805 + offsetof(struct hifn_dma, cmdr[0]));
2806 + WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
2807 + offsetof(struct hifn_dma, srcr[0]));
2808 + WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
2809 + offsetof(struct hifn_dma, dstr[0]));
2810 + WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
2811 + offsetof(struct hifn_dma, resr[0]));
2815 + /* write status register */
2816 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
2817 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
2818 + HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
2819 + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
2820 + HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
2821 + HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
2822 + HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
2823 + HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
2824 + HIFN_DMACSR_S_WAIT |
2825 + HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
2826 + HIFN_DMACSR_C_WAIT |
2827 + HIFN_DMACSR_ENGINE |
2828 + ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
2829 + HIFN_DMACSR_PUBDONE : 0) |
2830 + ((sc->sc_flags & HIFN_IS_7811) ?
2831 + HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
2833 + sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
2834 + sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
2835 + HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
2836 + HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
2837 + ((sc->sc_flags & HIFN_IS_7811) ?
2838 + HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
2839 + sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2840 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2843 + if (sc->sc_flags & HIFN_IS_7956) {
2846 + WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
2847 + HIFN_PUCNFG_TCALLPHASES |
2848 + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
2850 + /* turn off the clocks and insure bypass is set */
2851 + pll = READ_REG_1(sc, HIFN_1_PLL);
2852 + pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
2853 + | HIFN_PLL_BP | HIFN_PLL_MBSET;
2854 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
2855 + DELAY(10*1000); /* 10ms */
2857 + /* change configuration */
2858 + pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
2859 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
2860 + DELAY(10*1000); /* 10ms */
2862 + /* disable bypass */
2863 + pll &= ~HIFN_PLL_BP;
2864 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
2865 + /* enable clocks with new configuration */
2866 + pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
2867 + WRITE_REG_1(sc, HIFN_1_PLL, pll);
2869 + WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
2870 + HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
2871 + HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
2872 + (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
2875 + WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
2876 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
2877 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
2878 + ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
2879 + ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
2883 + * The maximum number of sessions supported by the card
2884 + * is dependent on the amount of context ram, which
2885 + * encryption algorithms are enabled, and how compression
2886 + * is configured. This should be configured before this
2887 + * routine is called.
2890 +hifn_sessions(struct hifn_softc *sc)
2895 + DPRINTF("%s()\n", __FUNCTION__);
2897 + pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
2899 + if (pucnfg & HIFN_PUCNFG_COMPSING) {
2900 + if (pucnfg & HIFN_PUCNFG_ENCCNFG)
2905 + * 7955/7956 has internal context memory of 32K
2907 + if (sc->sc_flags & HIFN_IS_7956)
2908 + sc->sc_maxses = 32768 / ctxsize;
2910 + sc->sc_maxses = 1 +
2911 + ((sc->sc_ramsize - 32768) / ctxsize);
2913 + sc->sc_maxses = sc->sc_ramsize / 16384;
2915 + if (sc->sc_maxses > 2048)
2916 + sc->sc_maxses = 2048;
2920 + * Determine ram type (sram or dram). Board should be just out of a reset
2921 + * state when this is called.
2924 +hifn_ramtype(struct hifn_softc *sc)
2926 + u_int8_t data[8], dataexpect[8];
2929 + for (i = 0; i < sizeof(data); i++)
2930 + data[i] = dataexpect[i] = 0x55;
2931 + if (hifn_writeramaddr(sc, 0, data))
2933 + if (hifn_readramaddr(sc, 0, data))
2935 + if (bcmp(data, dataexpect, sizeof(data)) != 0) {
2936 + sc->sc_drammodel = 1;
2940 + for (i = 0; i < sizeof(data); i++)
2941 + data[i] = dataexpect[i] = 0xaa;
2942 + if (hifn_writeramaddr(sc, 0, data))
2944 + if (hifn_readramaddr(sc, 0, data))
2946 + if (bcmp(data, dataexpect, sizeof(data)) != 0) {
2947 + sc->sc_drammodel = 1;
2954 +#define HIFN_SRAM_MAX (32 << 20)
2955 +#define HIFN_SRAM_STEP_SIZE 16384
2956 +#define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
2959 +hifn_sramsize(struct hifn_softc *sc)
2963 + u_int8_t dataexpect[sizeof(data)];
2966 + for (i = 0; i < sizeof(data); i++)
2967 + data[i] = dataexpect[i] = i ^ 0x5a;
2969 + for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
2970 + a = i * HIFN_SRAM_STEP_SIZE;
2971 + bcopy(&i, data, sizeof(i));
2972 + hifn_writeramaddr(sc, a, data);
2975 + for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
2976 + a = i * HIFN_SRAM_STEP_SIZE;
2977 + bcopy(&i, dataexpect, sizeof(i));
2978 + if (hifn_readramaddr(sc, a, data) < 0)
2980 + if (bcmp(data, dataexpect, sizeof(data)) != 0)
2982 + sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
2989 + * XXX For dram boards, one should really try all of the
2990 + * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
2991 + * is already set up correctly.
2994 +hifn_dramsize(struct hifn_softc *sc)
2998 + if (sc->sc_flags & HIFN_IS_7956) {
3000 + * 7955/7956 have a fixed internal ram of only 32K.
3002 + sc->sc_ramsize = 32768;
3004 + cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
3005 + HIFN_PUCNFG_DRAMMASK;
3006 + sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
3012 +hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
3014 + struct hifn_dma *dma = sc->sc_dma;
3016 + DPRINTF("%s()\n", __FUNCTION__);
3018 + if (dma->cmdi == HIFN_D_CMD_RSIZE) {
3020 + dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3022 + dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
3023 + HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
3024 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3026 + *cmdp = dma->cmdi++;
3027 + dma->cmdk = dma->cmdi;
3029 + if (dma->srci == HIFN_D_SRC_RSIZE) {
3031 + dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3033 + dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
3034 + HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
3035 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3037 + *srcp = dma->srci++;
3038 + dma->srck = dma->srci;
3040 + if (dma->dsti == HIFN_D_DST_RSIZE) {
3042 + dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3044 + dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
3045 + HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
3046 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3048 + *dstp = dma->dsti++;
3049 + dma->dstk = dma->dsti;
3051 + if (dma->resi == HIFN_D_RES_RSIZE) {
3053 + dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3055 + dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
3056 + HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
3057 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3059 + *resp = dma->resi++;
3060 + dma->resk = dma->resi;
3064 +hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
3066 + struct hifn_dma *dma = sc->sc_dma;
3067 + hifn_base_command_t wc;
3068 + const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
3069 + int r, cmdi, resi, srci, dsti;
3071 + DPRINTF("%s()\n", __FUNCTION__);
3073 + wc.masks = htole16(3 << 13);
3074 + wc.session_num = htole16(addr >> 14);
3075 + wc.total_source_count = htole16(8);
3076 + wc.total_dest_count = htole16(addr & 0x3fff);
3078 + hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
3080 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3081 + HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
3082 + HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
3084 + /* build write command */
3085 + bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
3086 + *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
3087 + bcopy(data, &dma->test_src, sizeof(dma->test_src));
3089 + dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
3090 + + offsetof(struct hifn_dma, test_src));
3091 + dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
3092 + + offsetof(struct hifn_dma, test_dst));
3094 + dma->cmdr[cmdi].l = htole32(16 | masks);
3095 + dma->srcr[srci].l = htole32(8 | masks);
3096 + dma->dstr[dsti].l = htole32(4 | masks);
3097 + dma->resr[resi].l = htole32(4 | masks);
3099 + for (r = 10000; r >= 0; r--) {
3101 + if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
3105 + device_printf(sc->sc_dev, "writeramaddr -- "
3106 + "result[%d](addr %d) still valid\n", resi, addr);
3112 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3113 + HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
3114 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
3120 +hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
3122 + struct hifn_dma *dma = sc->sc_dma;
3123 + hifn_base_command_t rc;
3124 + const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
3125 + int r, cmdi, srci, dsti, resi;
3127 + DPRINTF("%s()\n", __FUNCTION__);
3129 + rc.masks = htole16(2 << 13);
3130 + rc.session_num = htole16(addr >> 14);
3131 + rc.total_source_count = htole16(addr & 0x3fff);
3132 + rc.total_dest_count = htole16(8);
3134 + hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
3136 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3137 + HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
3138 + HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
3140 + bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
3141 + *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
3143 + dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
3144 + offsetof(struct hifn_dma, test_src));
3145 + dma->test_src = 0;
3146 + dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
3147 + offsetof(struct hifn_dma, test_dst));
3148 + dma->test_dst = 0;
3149 + dma->cmdr[cmdi].l = htole32(8 | masks);
3150 + dma->srcr[srci].l = htole32(8 | masks);
3151 + dma->dstr[dsti].l = htole32(8 | masks);
3152 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
3154 + for (r = 10000; r >= 0; r--) {
3156 + if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
3160 + device_printf(sc->sc_dev, "readramaddr -- "
3161 + "result[%d](addr %d) still valid\n", resi, addr);
3165 + bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
3168 + WRITE_REG_1(sc, HIFN_1_DMA_CSR,
3169 + HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
3170 + HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
3176 + * Initialize the descriptor rings.
3179 +hifn_init_dma(struct hifn_softc *sc)
3181 + struct hifn_dma *dma = sc->sc_dma;
3184 + DPRINTF("%s()\n", __FUNCTION__);
3186 + hifn_set_retry(sc);
3188 + /* initialize static pointer values */
3189 + for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
3190 + dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
3191 + offsetof(struct hifn_dma, command_bufs[i][0]));
3192 + for (i = 0; i < HIFN_D_RES_RSIZE; i++)
3193 + dma->resr[i].p = htole32(sc->sc_dma_physaddr +
3194 + offsetof(struct hifn_dma, result_bufs[i][0]));
3196 + dma->cmdr[HIFN_D_CMD_RSIZE].p =
3197 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
3198 + dma->srcr[HIFN_D_SRC_RSIZE].p =
3199 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
3200 + dma->dstr[HIFN_D_DST_RSIZE].p =
3201 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
3202 + dma->resr[HIFN_D_RES_RSIZE].p =
3203 + htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
3205 + dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
3206 + dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
3207 + dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
3211 + * Writes out the raw command buffer space. Returns the
3212 + * command buffer size.
3215 +hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
3217 + struct hifn_softc *sc = NULL;
3218 + u_int8_t *buf_pos;
3219 + hifn_base_command_t *base_cmd;
3220 + hifn_mac_command_t *mac_cmd;
3221 + hifn_crypt_command_t *cry_cmd;
3222 + int using_mac, using_crypt, len, ivlen;
3223 + u_int32_t dlen, slen;
3225 + DPRINTF("%s()\n", __FUNCTION__);
3228 + using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
3229 + using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
3231 + base_cmd = (hifn_base_command_t *)buf_pos;
3232 + base_cmd->masks = htole16(cmd->base_masks);
3233 + slen = cmd->src_mapsize;
3235 + dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
3237 + dlen = cmd->dst_mapsize;
3238 + base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
3239 + base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
3242 + base_cmd->session_num = htole16(
3243 + ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
3244 + ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
3245 + buf_pos += sizeof(hifn_base_command_t);
3248 + mac_cmd = (hifn_mac_command_t *)buf_pos;
3249 + dlen = cmd->maccrd->crd_len;
3250 + mac_cmd->source_count = htole16(dlen & 0xffff);
3252 + mac_cmd->masks = htole16(cmd->mac_masks |
3253 + ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
3254 + mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
3255 + mac_cmd->reserved = 0;
3256 + buf_pos += sizeof(hifn_mac_command_t);
3259 + if (using_crypt) {
3260 + cry_cmd = (hifn_crypt_command_t *)buf_pos;
3261 + dlen = cmd->enccrd->crd_len;
3262 + cry_cmd->source_count = htole16(dlen & 0xffff);
3264 + cry_cmd->masks = htole16(cmd->cry_masks |
3265 + ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
3266 + cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
3267 + cry_cmd->reserved = 0;
3268 + buf_pos += sizeof(hifn_crypt_command_t);
3271 + if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
3272 + bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
3273 + buf_pos += HIFN_MAC_KEY_LENGTH;
3276 + if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
3277 + switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
3278 + case HIFN_CRYPT_CMD_ALG_3DES:
3279 + bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
3280 + buf_pos += HIFN_3DES_KEY_LENGTH;
3282 + case HIFN_CRYPT_CMD_ALG_DES:
3283 + bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
3284 + buf_pos += HIFN_DES_KEY_LENGTH;
3286 + case HIFN_CRYPT_CMD_ALG_RC4:
3291 + clen = MIN(cmd->cklen, len);
3292 + bcopy(cmd->ck, buf_pos, clen);
3295 + } while (len > 0);
3296 + bzero(buf_pos, 4);
3299 + case HIFN_CRYPT_CMD_ALG_AES:
3301 + * AES keys are variable 128, 192 and
3302 + * 256 bits (16, 24 and 32 bytes).
3304 + bcopy(cmd->ck, buf_pos, cmd->cklen);
3305 + buf_pos += cmd->cklen;
3310 + if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
3311 + switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
3312 + case HIFN_CRYPT_CMD_ALG_AES:
3313 + ivlen = HIFN_AES_IV_LENGTH;
3316 + ivlen = HIFN_IV_LENGTH;
3319 + bcopy(cmd->iv, buf_pos, ivlen);
3323 + if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
3324 + bzero(buf_pos, 8);
3328 + return (buf_pos - buf);
3332 +hifn_dmamap_aligned(struct hifn_operand *op)
3334 + struct hifn_softc *sc = NULL;
3337 + DPRINTF("%s()\n", __FUNCTION__);
3339 + for (i = 0; i < op->nsegs; i++) {
3340 + if (op->segs[i].ds_addr & 3)
3342 + if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
3348 +static __inline int
3349 +hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
3351 + struct hifn_dma *dma = sc->sc_dma;
3353 + if (++idx == HIFN_D_DST_RSIZE) {
3354 + dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
3355 + HIFN_D_MASKDONEIRQ);
3356 + HIFN_DSTR_SYNC(sc, idx,
3357 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3364 +hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
3366 + struct hifn_dma *dma = sc->sc_dma;
3367 + struct hifn_operand *dst = &cmd->dst;
3369 + int idx, used = 0, i;
3371 + DPRINTF("%s()\n", __FUNCTION__);
3374 + for (i = 0; i < dst->nsegs - 1; i++) {
3375 + dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
3376 + dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
3378 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3379 + HIFN_DSTR_SYNC(sc, idx,
3380 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3383 + idx = hifn_dmamap_dstwrap(sc, idx);
3386 + if (cmd->sloplen == 0) {
3387 + p = dst->segs[i].ds_addr;
3388 + l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
3389 + dst->segs[i].ds_len;
3391 + p = sc->sc_dma_physaddr +
3392 + offsetof(struct hifn_dma, slop[cmd->slopidx]);
3393 + l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
3394 + sizeof(u_int32_t);
3396 + if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
3397 + dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
3398 + dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
3399 + (dst->segs[i].ds_len - cmd->sloplen));
3401 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3402 + HIFN_DSTR_SYNC(sc, idx,
3403 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3406 + idx = hifn_dmamap_dstwrap(sc, idx);
3409 + dma->dstr[idx].p = htole32(p);
3410 + dma->dstr[idx].l = htole32(l);
3412 + dma->dstr[idx].l |= htole32(HIFN_D_VALID);
3413 + HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3416 + idx = hifn_dmamap_dstwrap(sc, idx);
3419 + dma->dstu += used;
3423 +static __inline int
3424 +hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
3426 + struct hifn_dma *dma = sc->sc_dma;
3428 + if (++idx == HIFN_D_SRC_RSIZE) {
3429 + dma->srcr[idx].l = htole32(HIFN_D_VALID |
3430 + HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
3431 + HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
3432 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3439 +hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
3441 + struct hifn_dma *dma = sc->sc_dma;
3442 + struct hifn_operand *src = &cmd->src;
3444 + u_int32_t last = 0;
3446 + DPRINTF("%s()\n", __FUNCTION__);
3449 + for (i = 0; i < src->nsegs; i++) {
3450 + if (i == src->nsegs - 1)
3451 + last = HIFN_D_LAST;
3453 + dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
3454 + dma->srcr[idx].l = htole32(src->segs[i].ds_len |
3455 + HIFN_D_MASKDONEIRQ | last);
3457 + dma->srcr[idx].l |= htole32(HIFN_D_VALID);
3458 + HIFN_SRCR_SYNC(sc, idx,
3459 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3461 + idx = hifn_dmamap_srcwrap(sc, idx);
3464 + dma->srcu += src->nsegs;
3471 + struct hifn_softc *sc,
3472 + struct hifn_command *cmd,
3473 + struct cryptop *crp,
3476 + struct hifn_dma *dma = sc->sc_dma;
3477 + u_int32_t cmdlen, csr;
3478 + int cmdi, resi, err = 0;
3479 + unsigned long l_flags;
3481 + DPRINTF("%s()\n", __FUNCTION__);
3484 + * need 1 cmd, and 1 res
3486 + * NB: check this first since it's easy.
3489 + if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
3490 + (dma->resu + 1) > HIFN_D_RES_RSIZE) {
3493 + device_printf(sc->sc_dev,
3494 + "cmd/result exhaustion, cmdu %u resu %u\n",
3495 + dma->cmdu, dma->resu);
3498 + hifnstats.hst_nomem_cr++;
3499 + sc->sc_needwakeup |= CRYPTO_SYMQ;
3501 + return (ERESTART);
3504 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
3505 + if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
3506 + hifnstats.hst_nomem_load++;
3510 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
3511 + if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
3512 + hifnstats.hst_nomem_load++;
3517 + if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
3518 + hifnstats.hst_nomem_load++;
3524 + if (hifn_dmamap_aligned(&cmd->src)) {
3525 + cmd->sloplen = cmd->src_mapsize & 3;
3526 + cmd->dst = cmd->src;
3528 + if (crp->crp_flags & CRYPTO_F_IOV) {
3529 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
3532 + } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
3535 + struct mbuf *m, *m0, *mlast;
3537 + KASSERT(cmd->dst_m == cmd->src_m,
3538 + ("hifn_crypto: dst_m initialized improperly"));
3539 + hifnstats.hst_unaligned++;
3541 + * Source is not aligned on a longword boundary.
3542 + * Copy the data to insure alignment. If we fail
3543 + * to allocate mbufs or clusters while doing this
3544 + * we return ERESTART so the operation is requeued
3545 + * at the crypto later, but only if there are
3546 + * ops already posted to the hardware; otherwise we
3547 + * have no guarantee that we'll be re-entered.
3549 + totlen = cmd->src_mapsize;
3550 + if (cmd->src_m->m_flags & M_PKTHDR) {
3552 + MGETHDR(m0, M_DONTWAIT, MT_DATA);
3553 + if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
3559 + MGET(m0, M_DONTWAIT, MT_DATA);
3562 + hifnstats.hst_nomem_mbuf++;
3563 + err = dma->cmdu ? ERESTART : ENOMEM;
3566 + if (totlen >= MINCLSIZE) {
3567 + MCLGET(m0, M_DONTWAIT);
3568 + if ((m0->m_flags & M_EXT) == 0) {
3569 + hifnstats.hst_nomem_mcl++;
3570 + err = dma->cmdu ? ERESTART : ENOMEM;
3577 + m0->m_pkthdr.len = m0->m_len = len;
3580 + while (totlen > 0) {
3581 + MGET(m, M_DONTWAIT, MT_DATA);
3583 + hifnstats.hst_nomem_mbuf++;
3584 + err = dma->cmdu ? ERESTART : ENOMEM;
3589 + if (totlen >= MINCLSIZE) {
3590 + MCLGET(m, M_DONTWAIT);
3591 + if ((m->m_flags & M_EXT) == 0) {
3592 + hifnstats.hst_nomem_mcl++;
3593 + err = dma->cmdu ? ERESTART : ENOMEM;
3594 + mlast->m_next = m;
3602 + m0->m_pkthdr.len += len;
3605 + mlast->m_next = m;
3610 + device_printf(sc->sc_dev,
3611 + "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
3612 + __FILE__, __LINE__);
3617 + device_printf(sc->sc_dev,
3618 + "%s,%d: unaligned contig buffers not implemented\n",
3619 + __FILE__, __LINE__);
3625 + if (cmd->dst_map == NULL) {
3626 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
3627 + if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
3628 + hifnstats.hst_nomem_map++;
3632 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
3633 + if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
3634 + hifnstats.hst_nomem_load++;
3639 + if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
3640 + hifnstats.hst_nomem_load++;
3649 + device_printf(sc->sc_dev,
3650 + "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
3651 + READ_REG_1(sc, HIFN_1_DMA_CSR),
3652 + READ_REG_1(sc, HIFN_1_DMA_IER),
3653 + dma->cmdu, dma->srcu, dma->dstu, dma->resu,
3654 + cmd->src_nsegs, cmd->dst_nsegs);
3659 + if (cmd->src_map == cmd->dst_map) {
3660 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3661 + BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
3663 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
3664 + BUS_DMASYNC_PREWRITE);
3665 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
3666 + BUS_DMASYNC_PREREAD);
3671 + * need N src, and N dst
3673 + if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
3674 + (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
3677 + device_printf(sc->sc_dev,
3678 + "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
3679 + dma->srcu, cmd->src_nsegs,
3680 + dma->dstu, cmd->dst_nsegs);
3683 + hifnstats.hst_nomem_sd++;
3688 + if (dma->cmdi == HIFN_D_CMD_RSIZE) {
3690 + dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3692 + dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
3693 + HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
3694 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3696 + cmdi = dma->cmdi++;
3697 + cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
3698 + HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
3700 + /* .p for command/result already set */
3701 + dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
3702 + HIFN_D_MASKDONEIRQ);
3704 + dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
3705 + HIFN_CMDR_SYNC(sc, cmdi,
3706 + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3710 + * We don't worry about missing an interrupt (which a "command wait"
3711 + * interrupt salvages us from), unless there is more than one command
3714 + if (dma->cmdu > 1) {
3715 + sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
3716 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
3719 + hifnstats.hst_ipackets++;
3720 + hifnstats.hst_ibytes += cmd->src_mapsize;
3722 + hifn_dmamap_load_src(sc, cmd);
3725 + * Unlike other descriptors, we don't mask done interrupt from
3726 + * result descriptor.
3730 + device_printf(sc->sc_dev, "load res\n");
3732 + if (dma->resi == HIFN_D_RES_RSIZE) {
3734 + dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
3736 + dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
3737 + HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
3738 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3740 + resi = dma->resi++;
3741 + KASSERT(dma->hifn_commands[resi] == NULL,
3742 + ("hifn_crypto: command slot %u busy", resi));
3743 + dma->hifn_commands[resi] = cmd;
3744 + HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
3745 + if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
3746 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
3747 + HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
3749 + dma->resr[resi].l |= htole32(HIFN_D_VALID);
3750 + sc->sc_curbatch++;
3751 + if (sc->sc_curbatch > hifnstats.hst_maxbatch)
3752 + hifnstats.hst_maxbatch = sc->sc_curbatch;
3753 + hifnstats.hst_totbatch++;
3755 + dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
3757 + dma->resr[resi].l |= htole32(HIFN_D_VALID);
3758 + sc->sc_curbatch = 0;
3760 + HIFN_RESR_SYNC(sc, resi,
3761 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3765 + cmd->slopidx = resi;
3767 + hifn_dmamap_load_dst(sc, cmd);
3770 + if (sc->sc_c_busy == 0) {
3771 + csr |= HIFN_DMACSR_C_CTRL_ENA;
3772 + sc->sc_c_busy = 1;
3774 + if (sc->sc_s_busy == 0) {
3775 + csr |= HIFN_DMACSR_S_CTRL_ENA;
3776 + sc->sc_s_busy = 1;
3778 + if (sc->sc_r_busy == 0) {
3779 + csr |= HIFN_DMACSR_R_CTRL_ENA;
3780 + sc->sc_r_busy = 1;
3782 + if (sc->sc_d_busy == 0) {
3783 + csr |= HIFN_DMACSR_D_CTRL_ENA;
3784 + sc->sc_d_busy = 1;
3787 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
3791 + device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
3792 + READ_REG_1(sc, HIFN_1_DMA_CSR),
3793 + READ_REG_1(sc, HIFN_1_DMA_IER));
3797 + sc->sc_active = 5;
3799 + KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
3800 + return (err); /* success */
3803 + if (cmd->src_map != cmd->dst_map)
3804 + pci_unmap_buf(sc, &cmd->dst);
3807 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
3808 + if (cmd->src_skb != cmd->dst_skb)
3810 + m_freem(cmd->dst_m);
3812 + device_printf(sc->sc_dev,
3813 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
3814 + __FILE__, __LINE__);
3817 + pci_unmap_buf(sc, &cmd->src);
3824 +hifn_tick(unsigned long arg)
3826 + struct hifn_softc *sc;
3827 + unsigned long l_flags;
3829 + if (arg >= HIFN_MAX_CHIPS)
3831 + sc = hifn_chip_idx[arg];
3836 + if (sc->sc_active == 0) {
3837 + struct hifn_dma *dma = sc->sc_dma;
3840 + if (dma->cmdu == 0 && sc->sc_c_busy) {
3841 + sc->sc_c_busy = 0;
3842 + r |= HIFN_DMACSR_C_CTRL_DIS;
3844 + if (dma->srcu == 0 && sc->sc_s_busy) {
3845 + sc->sc_s_busy = 0;
3846 + r |= HIFN_DMACSR_S_CTRL_DIS;
3848 + if (dma->dstu == 0 && sc->sc_d_busy) {
3849 + sc->sc_d_busy = 0;
3850 + r |= HIFN_DMACSR_D_CTRL_DIS;
3852 + if (dma->resu == 0 && sc->sc_r_busy) {
3853 + sc->sc_r_busy = 0;
3854 + r |= HIFN_DMACSR_R_CTRL_DIS;
3857 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
3861 + mod_timer(&sc->sc_tickto, jiffies + HZ);
3865 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
3866 +hifn_intr(int irq, void *arg)
3868 +hifn_intr(int irq, void *arg, struct pt_regs *regs)
3871 + struct hifn_softc *sc = arg;
3872 + struct hifn_dma *dma;
3873 + u_int32_t dmacsr, restart;
3875 + unsigned long l_flags;
3877 + dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
3879 + /* Nothing in the DMA unit interrupted */
3880 + if ((dmacsr & sc->sc_dmaier) == 0)
3889 + device_printf(sc->sc_dev,
3890 + "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
3891 + dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
3892 + dma->cmdi, dma->srci, dma->dsti, dma->resi,
3893 + dma->cmdk, dma->srck, dma->dstk, dma->resk,
3894 + dma->cmdu, dma->srcu, dma->dstu, dma->resu);
3898 + WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
3900 + if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
3901 + (dmacsr & HIFN_DMACSR_PUBDONE))
3902 + WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
3903 + READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
3905 + restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
3907 + device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
3909 + if (sc->sc_flags & HIFN_IS_7811) {
3910 + if (dmacsr & HIFN_DMACSR_ILLR)
3911 + device_printf(sc->sc_dev, "illegal read\n");
3912 + if (dmacsr & HIFN_DMACSR_ILLW)
3913 + device_printf(sc->sc_dev, "illegal write\n");
3916 + restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
3917 + HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
3919 + device_printf(sc->sc_dev, "abort, resetting.\n");
3920 + hifnstats.hst_abort++;
3923 + return IRQ_HANDLED;
3926 + if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
3928 + * If no slots to process and we receive a "waiting on
3929 + * command" interrupt, we disable the "waiting on command"
3930 + * (by clearing it).
3932 + sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
3933 + WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
3936 + /* clear the rings */
3937 + i = dma->resk; u = dma->resu;
3939 + HIFN_RESR_SYNC(sc, i,
3940 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3941 + if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
3942 + HIFN_RESR_SYNC(sc, i,
3943 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3947 + if (i != HIFN_D_RES_RSIZE) {
3948 + struct hifn_command *cmd;
3949 + u_int8_t *macbuf = NULL;
3951 + HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
3952 + cmd = dma->hifn_commands[i];
3953 + KASSERT(cmd != NULL,
3954 + ("hifn_intr: null command slot %u", i));
3955 + dma->hifn_commands[i] = NULL;
3957 + if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
3958 + macbuf = dma->result_bufs[i];
3962 + hifn_callback(sc, cmd, macbuf);
3963 + hifnstats.hst_opackets++;
3967 + if (++i == (HIFN_D_RES_RSIZE + 1))
3970 + dma->resk = i; dma->resu = u;
3972 + i = dma->srck; u = dma->srcu;
3974 + if (i == HIFN_D_SRC_RSIZE)
3976 + HIFN_SRCR_SYNC(sc, i,
3977 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3978 + if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
3979 + HIFN_SRCR_SYNC(sc, i,
3980 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3985 + dma->srck = i; dma->srcu = u;
3987 + i = dma->cmdk; u = dma->cmdu;
3989 + HIFN_CMDR_SYNC(sc, i,
3990 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3991 + if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
3992 + HIFN_CMDR_SYNC(sc, i,
3993 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3996 + if (i != HIFN_D_CMD_RSIZE) {
3998 + HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
4000 + if (++i == (HIFN_D_CMD_RSIZE + 1))
4003 + dma->cmdk = i; dma->cmdu = u;
4007 + if (sc->sc_needwakeup) { /* XXX check high watermark */
4008 + int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
4011 + device_printf(sc->sc_dev,
4012 + "wakeup crypto (%x) u %d/%d/%d/%d\n",
4013 + sc->sc_needwakeup,
4014 + dma->cmdu, dma->srcu, dma->dstu, dma->resu);
4016 + sc->sc_needwakeup &= ~wakeup;
4017 + crypto_unblock(sc->sc_cid, wakeup);
4020 + return IRQ_HANDLED;
4024 + * Allocate a new 'session' and return an encoded session id. 'sidp'
4025 + * contains our registration id, and should contain an encoded session
4026 + * id on successful allocation.
4029 +hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
4031 + struct hifn_softc *sc = device_get_softc(dev);
4032 + struct cryptoini *c;
4033 + int mac = 0, cry = 0, sesn;
4034 + struct hifn_session *ses = NULL;
4035 + unsigned long l_flags;
4037 + DPRINTF("%s()\n", __FUNCTION__);
4039 + KASSERT(sc != NULL, ("hifn_newsession: null softc"));
4040 + if (sidp == NULL || cri == NULL || sc == NULL) {
4041 + DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
4046 + if (sc->sc_sessions == NULL) {
4047 + ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
4049 + if (ses == NULL) {
4054 + sc->sc_nsessions = 1;
4056 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
4057 + if (!sc->sc_sessions[sesn].hs_used) {
4058 + ses = &sc->sc_sessions[sesn];
4063 + if (ses == NULL) {
4064 + sesn = sc->sc_nsessions;
4065 + ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
4067 + if (ses == NULL) {
4071 + bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
4072 + bzero(sc->sc_sessions, sesn * sizeof(*ses));
4073 + kfree(sc->sc_sessions);
4074 + sc->sc_sessions = ses;
4075 + ses = &sc->sc_sessions[sesn];
4076 + sc->sc_nsessions++;
4081 + bzero(ses, sizeof(*ses));
4084 + for (c = cri; c != NULL; c = c->cri_next) {
4085 + switch (c->cri_alg) {
4088 + case CRYPTO_MD5_HMAC:
4089 + case CRYPTO_SHA1_HMAC:
4091 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4095 + ses->hs_mlen = c->cri_mlen;
4096 + if (ses->hs_mlen == 0) {
4097 + switch (c->cri_alg) {
4099 + case CRYPTO_MD5_HMAC:
4100 + ses->hs_mlen = 16;
4103 + case CRYPTO_SHA1_HMAC:
4104 + ses->hs_mlen = 20;
4109 + case CRYPTO_DES_CBC:
4110 + case CRYPTO_3DES_CBC:
4111 + case CRYPTO_AES_CBC:
4112 + /* XXX this may read fewer, does it matter? */
4113 + read_random(ses->hs_iv,
4114 + c->cri_alg == CRYPTO_AES_CBC ?
4115 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4119 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4125 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4129 + if (mac == 0 && cry == 0) {
4130 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4134 + *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
4140 + * Deallocate a session.
4141 + * XXX this routine should run a zero'd mac/encrypt key into context ram.
4142 + * XXX to blow away any keys already stored there.
4145 +hifn_freesession(device_t dev, u_int64_t tid)
4147 + struct hifn_softc *sc = device_get_softc(dev);
4148 + int session, error;
4149 + u_int32_t sid = CRYPTO_SESID2LID(tid);
4150 + unsigned long l_flags;
4152 + DPRINTF("%s()\n", __FUNCTION__);
4154 + KASSERT(sc != NULL, ("hifn_freesession: null softc"));
4156 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4161 + session = HIFN_SESSION(sid);
4162 + if (session < sc->sc_nsessions) {
4163 + bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
4166 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4175 +hifn_process(device_t dev, struct cryptop *crp, int hint)
4177 + struct hifn_softc *sc = device_get_softc(dev);
4178 + struct hifn_command *cmd = NULL;
4179 + int session, err, ivlen;
4180 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
4182 + DPRINTF("%s()\n", __FUNCTION__);
4184 + if (crp == NULL || crp->crp_callback == NULL) {
4185 + hifnstats.hst_invalid++;
4186 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4189 + session = HIFN_SESSION(crp->crp_sid);
4191 + if (sc == NULL || session >= sc->sc_nsessions) {
4192 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4197 + cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
4198 + if (cmd == NULL) {
4199 + hifnstats.hst_nomem++;
4203 + memset(cmd, 0, sizeof(*cmd));
4205 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
4206 + cmd->src_skb = (struct sk_buff *)crp->crp_buf;
4207 + cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
4208 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
4209 + cmd->src_io = (struct uio *)crp->crp_buf;
4210 + cmd->dst_io = (struct uio *)crp->crp_buf;
4212 + cmd->src_buf = crp->crp_buf;
4213 + cmd->dst_buf = crp->crp_buf;
4216 + crd1 = crp->crp_desc;
4217 + if (crd1 == NULL) {
4218 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4222 + crd2 = crd1->crd_next;
4224 + if (crd2 == NULL) {
4225 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
4226 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
4227 + crd1->crd_alg == CRYPTO_SHA1 ||
4228 + crd1->crd_alg == CRYPTO_MD5) {
4231 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
4232 + crd1->crd_alg == CRYPTO_3DES_CBC ||
4233 + crd1->crd_alg == CRYPTO_AES_CBC ||
4234 + crd1->crd_alg == CRYPTO_ARC4) {
4235 + if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
4236 + cmd->base_masks |= HIFN_BASE_CMD_DECODE;
4240 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4245 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
4246 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
4247 + crd1->crd_alg == CRYPTO_MD5 ||
4248 + crd1->crd_alg == CRYPTO_SHA1) &&
4249 + (crd2->crd_alg == CRYPTO_DES_CBC ||
4250 + crd2->crd_alg == CRYPTO_3DES_CBC ||
4251 + crd2->crd_alg == CRYPTO_AES_CBC ||
4252 + crd2->crd_alg == CRYPTO_ARC4) &&
4253 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
4254 + cmd->base_masks = HIFN_BASE_CMD_DECODE;
4257 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
4258 + crd1->crd_alg == CRYPTO_ARC4 ||
4259 + crd1->crd_alg == CRYPTO_3DES_CBC ||
4260 + crd1->crd_alg == CRYPTO_AES_CBC) &&
4261 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
4262 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
4263 + crd2->crd_alg == CRYPTO_MD5 ||
4264 + crd2->crd_alg == CRYPTO_SHA1) &&
4265 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
4270 + * We cannot order the 7751 as requested
4272 + DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
4279 + cmd->enccrd = enccrd;
4280 + cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
4281 + switch (enccrd->crd_alg) {
4283 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
4285 + case CRYPTO_DES_CBC:
4286 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
4287 + HIFN_CRYPT_CMD_MODE_CBC |
4288 + HIFN_CRYPT_CMD_NEW_IV;
4290 + case CRYPTO_3DES_CBC:
4291 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
4292 + HIFN_CRYPT_CMD_MODE_CBC |
4293 + HIFN_CRYPT_CMD_NEW_IV;
4295 + case CRYPTO_AES_CBC:
4296 + cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
4297 + HIFN_CRYPT_CMD_MODE_CBC |
4298 + HIFN_CRYPT_CMD_NEW_IV;
4301 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4305 + if (enccrd->crd_alg != CRYPTO_ARC4) {
4306 + ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
4307 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4308 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
4309 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
4310 + bcopy(enccrd->crd_iv, cmd->iv, ivlen);
4312 + bcopy(sc->sc_sessions[session].hs_iv,
4315 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
4317 + crypto_copyback(crp->crp_flags,
4318 + crp->crp_buf, enccrd->crd_inject,
4322 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
4323 + bcopy(enccrd->crd_iv, cmd->iv, ivlen);
4325 + crypto_copydata(crp->crp_flags,
4326 + crp->crp_buf, enccrd->crd_inject,
4332 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
4333 + cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
4334 + cmd->ck = enccrd->crd_key;
4335 + cmd->cklen = enccrd->crd_klen >> 3;
4336 + cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
4339 + * Need to specify the size for the AES key in the masks.
4341 + if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
4342 + HIFN_CRYPT_CMD_ALG_AES) {
4343 + switch (cmd->cklen) {
4345 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
4348 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
4351 + cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
4354 + DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
4362 + cmd->maccrd = maccrd;
4363 + cmd->base_masks |= HIFN_BASE_CMD_MAC;
4365 + switch (maccrd->crd_alg) {
4367 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
4368 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
4369 + HIFN_MAC_CMD_POS_IPSEC;
4371 + case CRYPTO_MD5_HMAC:
4372 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
4373 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
4374 + HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
4377 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
4378 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
4379 + HIFN_MAC_CMD_POS_IPSEC;
4381 + case CRYPTO_SHA1_HMAC:
4382 + cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
4383 + HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
4384 + HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
4388 + if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
4389 + maccrd->crd_alg == CRYPTO_MD5_HMAC) {
4390 + cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
4391 + bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
4392 + bzero(cmd->mac + (maccrd->crd_klen >> 3),
4393 + HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
4398 + cmd->session_num = session;
4401 + err = hifn_crypto(sc, cmd, crp, hint);
4404 + } else if (err == ERESTART) {
4406 + * There weren't enough resources to dispatch the request
4407 + * to the part. Notify the caller so they'll requeue this
4408 + * request and resubmit it again soon.
4412 + device_printf(sc->sc_dev, "requeue request\n");
4415 + sc->sc_needwakeup |= CRYPTO_SYMQ;
4422 + if (err == EINVAL)
4423 + hifnstats.hst_invalid++;
4425 + hifnstats.hst_nomem++;
4426 + crp->crp_etype = err;
4432 +hifn_abort(struct hifn_softc *sc)
4434 + struct hifn_dma *dma = sc->sc_dma;
4435 + struct hifn_command *cmd;
4436 + struct cryptop *crp;
4439 + DPRINTF("%s()\n", __FUNCTION__);
4441 + i = dma->resk; u = dma->resu;
4443 + cmd = dma->hifn_commands[i];
4444 + KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
4445 + dma->hifn_commands[i] = NULL;
4448 + if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
4449 + /* Salvage what we can. */
4452 + if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
4453 + macbuf = dma->result_bufs[i];
4457 + hifnstats.hst_opackets++;
4458 + hifn_callback(sc, cmd, macbuf);
4461 + if (cmd->src_map == cmd->dst_map) {
4462 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4463 + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4465 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4466 + BUS_DMASYNC_POSTWRITE);
4467 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
4468 + BUS_DMASYNC_POSTREAD);
4472 + if (cmd->src_skb != cmd->dst_skb) {
4474 + m_freem(cmd->src_m);
4475 + crp->crp_buf = (caddr_t)cmd->dst_m;
4477 + device_printf(sc->sc_dev,
4478 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
4479 + __FILE__, __LINE__);
4483 + /* non-shared buffers cannot be restarted */
4484 + if (cmd->src_map != cmd->dst_map) {
4486 + * XXX should be EAGAIN, delayed until
4487 + * after the reset.
4489 + crp->crp_etype = ENOMEM;
4490 + pci_unmap_buf(sc, &cmd->dst);
4492 + crp->crp_etype = ENOMEM;
4494 + pci_unmap_buf(sc, &cmd->src);
4497 + if (crp->crp_etype != EAGAIN)
4501 + if (++i == HIFN_D_RES_RSIZE)
4505 + dma->resk = i; dma->resu = u;
4507 + hifn_reset_board(sc, 1);
4508 + hifn_init_dma(sc);
4509 + hifn_init_pci_registers(sc);
4513 +hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
4515 + struct hifn_dma *dma = sc->sc_dma;
4516 + struct cryptop *crp = cmd->crp;
4517 + struct cryptodesc *crd;
4520 + DPRINTF("%s()\n", __FUNCTION__);
4523 + if (cmd->src_map == cmd->dst_map) {
4524 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4525 + BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
4527 + bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
4528 + BUS_DMASYNC_POSTWRITE);
4529 + bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
4530 + BUS_DMASYNC_POSTREAD);
4534 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
4535 + if (cmd->src_skb != cmd->dst_skb) {
4537 + crp->crp_buf = (caddr_t)cmd->dst_m;
4538 + totlen = cmd->src_mapsize;
4539 + for (m = cmd->dst_m; m != NULL; m = m->m_next) {
4540 + if (totlen < m->m_len) {
4541 + m->m_len = totlen;
4544 + totlen -= m->m_len;
4546 + cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
4547 + m_freem(cmd->src_m);
4549 + device_printf(sc->sc_dev,
4550 + "%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
4551 + __FILE__, __LINE__);
4556 + if (cmd->sloplen != 0) {
4557 + crypto_copyback(crp->crp_flags, crp->crp_buf,
4558 + cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
4559 + (caddr_t)&dma->slop[cmd->slopidx]);
4562 + i = dma->dstk; u = dma->dstu;
4564 + if (i == HIFN_D_DST_RSIZE)
4567 + bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
4568 + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4570 + if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
4572 + bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
4573 + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4579 + dma->dstk = i; dma->dstu = u;
4581 + hifnstats.hst_obytes += cmd->dst_mapsize;
4583 + if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
4584 + HIFN_BASE_CMD_CRYPT) {
4585 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4586 + if (crd->crd_alg != CRYPTO_DES_CBC &&
4587 + crd->crd_alg != CRYPTO_3DES_CBC &&
4588 + crd->crd_alg != CRYPTO_AES_CBC)
4590 + ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
4591 + HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
4592 + crypto_copydata(crp->crp_flags, crp->crp_buf,
4593 + crd->crd_skip + crd->crd_len - ivlen, ivlen,
4594 + cmd->softc->sc_sessions[cmd->session_num].hs_iv);
4599 + if (macbuf != NULL) {
4600 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
4603 + if (crd->crd_alg != CRYPTO_MD5 &&
4604 + crd->crd_alg != CRYPTO_SHA1 &&
4605 + crd->crd_alg != CRYPTO_MD5_HMAC &&
4606 + crd->crd_alg != CRYPTO_SHA1_HMAC) {
4609 + len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
4610 + crypto_copyback(crp->crp_flags, crp->crp_buf,
4611 + crd->crd_inject, len, macbuf);
4616 + if (cmd->src_map != cmd->dst_map)
4617 + pci_unmap_buf(sc, &cmd->dst);
4618 + pci_unmap_buf(sc, &cmd->src);
4624 + * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
4625 + * and Group 1 registers; avoid conditions that could create
4626 + * burst writes by doing a read in between the writes.
4628 + * NB: The read we interpose is always to the same register;
4629 + * we do this because reading from an arbitrary (e.g. last)
4630 + * register may not always work.
4633 +hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
4635 + if (sc->sc_flags & HIFN_IS_7811) {
4636 + if (sc->sc_bar0_lastreg == reg - 4)
4637 + readl(sc->sc_bar0 + HIFN_0_PUCNFG);
4638 + sc->sc_bar0_lastreg = reg;
4640 + writel(val, sc->sc_bar0 + reg);
4644 +hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
4646 + if (sc->sc_flags & HIFN_IS_7811) {
4647 + if (sc->sc_bar1_lastreg == reg - 4)
4648 + readl(sc->sc_bar1 + HIFN_1_REVID);
4649 + sc->sc_bar1_lastreg = reg;
4651 + writel(val, sc->sc_bar1 + reg);
4655 +static struct pci_device_id hifn_pci_tbl[] = {
4656 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
4657 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4658 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
4659 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4660 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
4661 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4662 + { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
4663 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4664 + { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
4665 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4666 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
4667 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4669 + * Other vendors share this PCI ID as well, such as
4670 + * http://www.powercrypt.com, and obviously they also
4671 + * use the same key.
4673 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
4674 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4675 + { 0, 0, 0, 0, 0, 0, }
4677 +MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
4679 +static struct pci_driver hifn_driver = {
4681 + .id_table = hifn_pci_tbl,
4682 + .probe = hifn_probe,
4683 + .remove = hifn_remove,
4684 + /* add PM stuff here one day */
4687 +static int __init hifn_init (void)
4689 + struct hifn_softc *sc = NULL;
4692 + DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
4694 + rc = pci_register_driver(&hifn_driver);
4695 + pci_register_driver_compat(&hifn_driver, rc);
4700 +static void __exit hifn_exit (void)
4702 + pci_unregister_driver(&hifn_driver);
4705 +module_init(hifn_init);
4706 +module_exit(hifn_exit);
4708 +MODULE_LICENSE("BSD");
4709 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
4710 +MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
4711 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
4712 +++ linux/crypto/ocf/hifn/hifnHIPP.c 2007-07-25 14:36:45.000000000 +1000
4715 + * Driver for Hifn HIPP-I/II chipset
4716 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
4718 + * Redistribution and use in source and binary forms, with or without
4719 + * modification, are permitted provided that the following conditions
4722 + * 1. Redistributions of source code must retain the above copyright
4723 + * notice, this list of conditions and the following disclaimer.
4724 + * 2. Redistributions in binary form must reproduce the above copyright
4725 + * notice, this list of conditions and the following disclaimer in the
4726 + * documentation and/or other materials provided with the distribution.
4727 + * 3. The name of the author may not be used to endorse or promote products
4728 + * derived from this software without specific prior written permission.
4730 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
4731 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
4732 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
4733 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
4734 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
4735 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
4736 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
4737 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
4738 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
4739 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4741 + * Effort sponsored by Hifn Inc.
4746 + * Driver for various Hifn encryption processors.
4748 +#ifndef AUTOCONF_INCLUDED
4749 +#include <linux/config.h>
4751 +#include <linux/module.h>
4752 +#include <linux/init.h>
4753 +#include <linux/list.h>
4754 +#include <linux/slab.h>
4755 +#include <linux/wait.h>
4756 +#include <linux/sched.h>
4757 +#include <linux/pci.h>
4758 +#include <linux/delay.h>
4759 +#include <linux/interrupt.h>
4760 +#include <linux/spinlock.h>
4761 +#include <linux/random.h>
4762 +#include <linux/version.h>
4763 +#include <linux/skbuff.h>
4764 +#include <linux/uio.h>
4765 +#include <linux/sysfs.h>
4766 +#include <linux/miscdevice.h>
4767 +#include <asm/io.h>
4769 +#include <cryptodev.h>
4771 +#include "hifnHIPPreg.h"
4772 +#include "hifnHIPPvar.h"
4775 +#define DPRINTF(a...) if (hipp_debug) { \
4776 + printk("%s: ", sc ? \
4777 + device_get_nameunit(sc->sc_dev) : "hifn"); \
4781 +#define DPRINTF(a...)
4784 +typedef int bus_size_t;
4787 +pci_get_revid(struct pci_dev *dev)
4790 + pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
4794 +#define debug hipp_debug
4795 +int hipp_debug = 0;
4796 +module_param(hipp_debug, int, 0644);
4797 +MODULE_PARM_DESC(hipp_debug, "Enable debug");
4799 +int hipp_maxbatch = 1;
4800 +module_param(hipp_maxbatch, int, 0644);
4801 +MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
4803 +static int hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
4804 +static void hipp_remove(struct pci_dev *dev);
4805 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
4806 +static irqreturn_t hipp_intr(int irq, void *arg);
4808 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
4811 +static int hipp_num_chips = 0;
4812 +static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
4814 +static int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
4815 +static int hipp_freesession(device_t, u_int64_t);
4816 +static int hipp_process(device_t, struct cryptop *, int);
4818 +static device_method_t hipp_methods = {
4819 + /* crypto device methods */
4820 + DEVMETHOD(cryptodev_newsession, hipp_newsession),
4821 + DEVMETHOD(cryptodev_freesession,hipp_freesession),
4822 + DEVMETHOD(cryptodev_process, hipp_process),
4825 +static __inline u_int32_t
4826 +READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
4828 + u_int32_t v = readl(sc->sc_bar[barno] + reg);
4829 + //sc->sc_bar0_lastreg = (bus_size_t) -1;
4832 +static __inline void
4833 +WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
4835 + writel(val, sc->sc_bar[barno] + reg);
4838 +#define READ_REG_0(sc, reg) READ_REG(sc, 0, reg)
4839 +#define WRITE_REG_0(sc, reg, val) WRITE_REG(sc,0, reg, val)
4840 +#define READ_REG_1(sc, reg) READ_REG(sc, 1, reg)
4841 +#define WRITE_REG_1(sc, reg, val) WRITE_REG(sc,1, reg, val)
4844 +hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
4850 +hipp_freesession(device_t dev, u_int64_t tid)
4856 +hipp_process(device_t dev, struct cryptop *crp, int hint)
4862 +hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
4866 + switch (pci_get_vendor(sc->sc_pcidev)) {
4867 + case PCI_VENDOR_HIFN:
4868 + switch (pci_get_device(sc->sc_pcidev)) {
4869 + case PCI_PRODUCT_HIFN_7855: n = "Hifn 7855";
4870 + case PCI_PRODUCT_HIFN_8155: n = "Hifn 8155";
4871 + case PCI_PRODUCT_HIFN_6500: n = "Hifn 6500";
4876 + snprintf(buf, blen, "VID=%02x,PID=%02x",
4877 + pci_get_vendor(sc->sc_pcidev),
4878 + pci_get_device(sc->sc_pcidev));
4881 + strncat(buf, n, blen);
4886 +struct hipp_fs_entry {
4887 + struct attribute attr;
4893 +cryptoid_show(struct device *dev,
4894 + struct device_attribute *attr,
4897 + struct hipp_softc *sc;
4899 + sc = pci_get_drvdata(to_pci_dev (dev));
4900 + return sprintf (buf, "%d\n", sc->sc_cid);
4903 +struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
4906 + * Attach an interface that successfully probed.
4909 +hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
4911 + struct hipp_softc *sc = NULL;
4919 + DPRINTF("%s()\n", __FUNCTION__);
4921 + if (pci_enable_device(dev) < 0)
4924 + if (pci_set_mwi(dev))
4928 + printk("hifn: found device with no IRQ assigned. check BIOS settings!");
4929 + pci_disable_device(dev);
4933 + sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
4936 + memset(sc, 0, sizeof(*sc));
4938 + softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
4940 + sc->sc_pcidev = dev;
4943 + sc->sc_num = hipp_num_chips++;
4945 + if (sc->sc_num < HIPP_MAX_CHIPS)
4946 + hipp_chip_idx[sc->sc_num] = sc;
4948 + pci_set_drvdata(sc->sc_pcidev, sc);
4950 + spin_lock_init(&sc->sc_mtx);
4953 + * Setup PCI resources.
4954 + * The READ_REG_0, WRITE_REG_0, READ_REG_1,
4955 + * and WRITE_REG_1 macros throughout the driver are used
4956 + * to permit better debugging.
4958 + for(i=0; i<4; i++) {
4959 + unsigned long mem_start, mem_len;
4960 + mem_start = pci_resource_start(sc->sc_pcidev, i);
4961 + mem_len = pci_resource_len(sc->sc_pcidev, i);
4962 + sc->sc_barphy[i] = (caddr_t)mem_start;
4963 + sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
4964 + if (!sc->sc_bar[i]) {
4965 + device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
4970 + //hipp_reset_board(sc, 0);
4971 + pci_set_master(sc->sc_pcidev);
4974 + * Arrange the interrupt line.
4976 + rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
4978 + device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
4981 + sc->sc_irq = dev->irq;
4983 + rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
4987 + device_printf(sc->sc_dev, "%s, rev %u",
4988 + hipp_partname(sc, b, sizeof(b)), rev);
4992 + if (sc->sc_flags & HIFN_IS_7956)
4993 + printf(", pll=0x%x<%s clk, %ux mult>",
4995 + sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
4996 + 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
5000 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
5001 + if (sc->sc_cid < 0) {
5002 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
5006 +#if 0 /* cannot work with a non-GPL module */
5007 + /* make a sysfs entry to let the world know what entry we got */
5008 + sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
5012 + init_timer(&sc->sc_tickto);
5013 + sc->sc_tickto.function = hifn_tick;
5014 + sc->sc_tickto.data = (unsigned long) sc->sc_num;
5015 + mod_timer(&sc->sc_tickto, jiffies + HZ);
5018 +#if 0 /* no code here yet ?? */
5019 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
5025 + if (sc->sc_cid >= 0)
5026 + crypto_unregister_all(sc->sc_cid);
5027 + if (sc->sc_irq != -1)
5028 + free_irq(sc->sc_irq, sc);
5032 + /* Turn off DMA polling */
5033 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
5034 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
5036 + pci_free_consistent(sc->sc_pcidev,
5037 + sizeof(*sc->sc_dma),
5038 + sc->sc_dma, sc->sc_dma_physaddr);
5046 + * Detach an interface that successfully probed.
5049 +hipp_remove(struct pci_dev *dev)
5051 + struct hipp_softc *sc = pci_get_drvdata(dev);
5052 + unsigned long l_flags;
5054 + DPRINTF("%s()\n", __FUNCTION__);
5056 + /* disable interrupts */
5060 + WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
5063 + /*XXX other resources */
5064 + del_timer_sync(&sc->sc_tickto);
5066 + /* Turn off DMA polling */
5067 + WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
5068 + HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
5071 + crypto_unregister_all(sc->sc_cid);
5073 + free_irq(sc->sc_irq, sc);
5076 + pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
5077 + sc->sc_dma, sc->sc_dma_physaddr);
5081 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
5082 +static irqreturn_t hipp_intr(int irq, void *arg)
5084 +static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
5087 + struct hipp_softc *sc = arg;
5089 + sc = sc; /* shut up compiler */
5091 + return IRQ_HANDLED;
5094 +static struct pci_device_id hipp_pci_tbl[] = {
5095 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
5096 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5097 + { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
5098 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
5100 +MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
5102 +static struct pci_driver hipp_driver = {
5104 + .id_table = hipp_pci_tbl,
5105 + .probe = hipp_probe,
5106 + .remove = hipp_remove,
5107 + /* add PM stuff here one day */
5110 +static int __init hipp_init (void)
5112 + struct hipp_softc *sc = NULL;
5115 + DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
5117 + rc = pci_register_driver(&hipp_driver);
5118 + pci_register_driver_compat(&hipp_driver, rc);
5123 +static void __exit hipp_exit (void)
5125 + pci_unregister_driver(&hipp_driver);
5128 +module_init(hipp_init);
5129 +module_exit(hipp_exit);
5131 +MODULE_LICENSE("BSD");
5132 +MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
5133 +MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
5134 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
5135 +++ linux/crypto/ocf/hifn/hifnHIPPreg.h 2007-07-25 10:11:22.000000000 +1000
5138 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
5139 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
5141 + * Redistribution and use in source and binary forms, with or without
5142 + * modification, are permitted provided that the following conditions
5145 + * 1. Redistributions of source code must retain the above copyright
5146 + * notice, this list of conditions and the following disclaimer.
5147 + * 2. Redistributions in binary form must reproduce the above copyright
5148 + * notice, this list of conditions and the following disclaimer in the
5149 + * documentation and/or other materials provided with the distribution.
5150 + * 3. The name of the author may not be used to endorse or promote products
5151 + * derived from this software without specific prior written permission.
5154 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
5155 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5156 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5157 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5158 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
5159 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5160 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5161 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5162 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
5163 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5165 + * Effort sponsored by Hifn inc.
5169 +#ifndef __HIFNHIPP_H__
5170 +#define __HIFNHIPP_H__
5173 + * PCI vendor and device identifiers
5175 +#define PCI_VENDOR_HIFN 0x13a3 /* Hifn */
5176 +#define PCI_PRODUCT_HIFN_6500 0x0006 /* 6500 */
5177 +#define PCI_PRODUCT_HIFN_7855 0x001f /* 7855 */
5178 +#define PCI_PRODUCT_HIFN_8155 0x999 /* XXX 8155 */
5180 +#define HIPP_1_REVID 0x01 /* BOGUS */
5182 +#endif /* __HIPP_H__ */
5183 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
5184 +++ linux/crypto/ocf/hifn/hifnHIPPvar.h 2007-07-25 13:47:04.000000000 +1000
5187 + * Hifn HIPP-I/HIPP-II (7855/8155) driver.
5188 + * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> *
5190 + * Redistribution and use in source and binary forms, with or without
5191 + * modification, are permitted provided that the following conditions
5194 + * 1. Redistributions of source code must retain the above copyright
5195 + * notice, this list of conditions and the following disclaimer.
5196 + * 2. Redistributions in binary form must reproduce the above copyright
5197 + * notice, this list of conditions and the following disclaimer in the
5198 + * documentation and/or other materials provided with the distribution.
5199 + * 3. The name of the author may not be used to endorse or promote products
5200 + * derived from this software without specific prior written permission.
5203 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
5204 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
5205 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
5206 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
5207 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
5208 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
5209 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
5210 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
5211 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
5212 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
5214 + * Effort sponsored by Hifn inc.
5218 +#ifndef __HIFNHIPPVAR_H__
5219 +#define __HIFNHIPPVAR_H__
5221 +#define HIPP_MAX_CHIPS 8
5224 + * Holds data specific to a single Hifn HIPP-I board.
5226 +struct hipp_softc {
5227 + softc_device_decl sc_dev;
5229 + struct pci_dev *sc_pcidev; /* device backpointer */
5230 + ocf_iomem_t sc_bar[5];
5231 + caddr_t sc_barphy[5]; /* physical address */
5232 + int sc_num; /* for multiple devs */
5233 + spinlock_t sc_mtx; /* per-instance lock */
5239 + u_int32_t sc_dmaier;
5240 + u_int32_t sc_drammodel; /* 1=dram, 0=sram */
5241 + u_int32_t sc_pllconfig; /* 7954/7955/7956 PLL config */
5243 + struct hifn_dma *sc_dma;
5244 + dma_addr_t sc_dma_physaddr;/* physical address of sc_dma */
5249 + struct hifn_session *sc_sessions;
5252 +#define HIFN_HAS_RNG 0x1 /* includes random number generator */
5253 +#define HIFN_HAS_PUBLIC 0x2 /* includes public key support */
5254 +#define HIFN_HAS_AES 0x4 /* includes AES support */
5255 +#define HIFN_IS_7811 0x8 /* Hifn 7811 part */
5256 +#define HIFN_IS_7956 0x10 /* Hifn 7956/7955 don't have SDRAM */
5258 + struct timer_list sc_tickto; /* for managing DMA */
5261 + int sc_rnghz; /* RNG polling frequency */
5263 + int sc_c_busy; /* command ring busy */
5264 + int sc_s_busy; /* source data ring busy */
5265 + int sc_d_busy; /* destination data ring busy */
5266 + int sc_r_busy; /* result ring busy */
5267 + int sc_active; /* for initial countdown */
5268 + int sc_needwakeup; /* ops q'd wating on resources */
5269 + int sc_curbatch; /* # ops submitted w/o int */
5271 + struct miscdevice sc_miscdev;
5275 +#define HIPP_LOCK(_sc) spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
5276 +#define HIPP_UNLOCK(_sc) spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
5278 +#endif /* __HIFNHIPPVAR_H__ */
5279 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
5280 +++ linux/crypto/ocf/safe/md5.c 2005-05-20 10:30:52.000000000 +1000
5282 +/* $KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
5284 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5285 + * All rights reserved.
5287 + * Redistribution and use in source and binary forms, with or without
5288 + * modification, are permitted provided that the following conditions
5290 + * 1. Redistributions of source code must retain the above copyright
5291 + * notice, this list of conditions and the following disclaimer.
5292 + * 2. Redistributions in binary form must reproduce the above copyright
5293 + * notice, this list of conditions and the following disclaimer in the
5294 + * documentation and/or other materials provided with the distribution.
5295 + * 3. Neither the name of the project nor the names of its contributors
5296 + * may be used to endorse or promote products derived from this software
5297 + * without specific prior written permission.
5299 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
5300 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5301 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5302 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
5303 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5304 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5305 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5306 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5307 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5308 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5313 +#include <sys/cdefs.h>
5314 +__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
5316 +#include <sys/types.h>
5317 +#include <sys/cdefs.h>
5318 +#include <sys/time.h>
5319 +#include <sys/systm.h>
5320 +#include <crypto/md5.h>
5323 +#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
5325 +#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
5326 +#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
5327 +#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
5328 +#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
5330 +#define ROUND1(a, b, c, d, k, s, i) { \
5331 + (a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
5332 + (a) = SHIFT((a), (s)); \
5333 + (a) = (b) + (a); \
5336 +#define ROUND2(a, b, c, d, k, s, i) { \
5337 + (a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
5338 + (a) = SHIFT((a), (s)); \
5339 + (a) = (b) + (a); \
5342 +#define ROUND3(a, b, c, d, k, s, i) { \
5343 + (a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
5344 + (a) = SHIFT((a), (s)); \
5345 + (a) = (b) + (a); \
5348 +#define ROUND4(a, b, c, d, k, s, i) { \
5349 + (a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
5350 + (a) = SHIFT((a), (s)); \
5351 + (a) = (b) + (a); \
5374 +#define MD5_A0 0x67452301
5375 +#define MD5_B0 0xefcdab89
5376 +#define MD5_C0 0x98badcfe
5377 +#define MD5_D0 0x10325476
5379 +/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
5380 +static const u_int32_t T[65] = {
5382 + 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee,
5383 + 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501,
5384 + 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be,
5385 + 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821,
5387 + 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa,
5388 + 0xd62f105d, 0x2441453, 0xd8a1e681, 0xe7d3fbc8,
5389 + 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed,
5390 + 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a,
5392 + 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c,
5393 + 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70,
5394 + 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x4881d05,
5395 + 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665,
5397 + 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039,
5398 + 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1,
5399 + 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1,
5400 + 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391,
5403 +static const u_int8_t md5_paddat[MD5_BUFLEN] = {
5404 + 0x80, 0, 0, 0, 0, 0, 0, 0,
5405 + 0, 0, 0, 0, 0, 0, 0, 0,
5406 + 0, 0, 0, 0, 0, 0, 0, 0,
5407 + 0, 0, 0, 0, 0, 0, 0, 0,
5408 + 0, 0, 0, 0, 0, 0, 0, 0,
5409 + 0, 0, 0, 0, 0, 0, 0, 0,
5410 + 0, 0, 0, 0, 0, 0, 0, 0,
5411 + 0, 0, 0, 0, 0, 0, 0, 0,
5414 +static void md5_calc(u_int8_t *, md5_ctxt *);
5416 +void md5_init(ctxt)
5421 + ctxt->md5_sta = MD5_A0;
5422 + ctxt->md5_stb = MD5_B0;
5423 + ctxt->md5_stc = MD5_C0;
5424 + ctxt->md5_std = MD5_D0;
5425 + bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
5428 +void md5_loop(ctxt, input, len)
5431 + u_int len; /* number of bytes */
5435 + ctxt->md5_n += len * 8; /* byte to bit */
5436 + gap = MD5_BUFLEN - ctxt->md5_i;
5439 + bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
5441 + md5_calc(ctxt->md5_buf, ctxt);
5443 + for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
5444 + md5_calc((u_int8_t *)(input + i), ctxt);
5447 + ctxt->md5_i = len - i;
5448 + bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
5450 + bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
5452 + ctxt->md5_i += len;
5461 + /* Don't count up padding. Keep md5_n. */
5462 + gap = MD5_BUFLEN - ctxt->md5_i;
5465 + (void *)(ctxt->md5_buf + ctxt->md5_i),
5466 + gap - sizeof(ctxt->md5_n));
5468 + /* including gap == 8 */
5469 + bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
5471 + md5_calc(ctxt->md5_buf, ctxt);
5472 + bcopy((md5_paddat + gap),
5473 + (void *)ctxt->md5_buf,
5474 + MD5_BUFLEN - sizeof(ctxt->md5_n));
5478 +#if BYTE_ORDER == LITTLE_ENDIAN
5479 + bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
5481 +#if BYTE_ORDER == BIG_ENDIAN
5482 + ctxt->md5_buf[56] = ctxt->md5_n8[7];
5483 + ctxt->md5_buf[57] = ctxt->md5_n8[6];
5484 + ctxt->md5_buf[58] = ctxt->md5_n8[5];
5485 + ctxt->md5_buf[59] = ctxt->md5_n8[4];
5486 + ctxt->md5_buf[60] = ctxt->md5_n8[3];
5487 + ctxt->md5_buf[61] = ctxt->md5_n8[2];
5488 + ctxt->md5_buf[62] = ctxt->md5_n8[1];
5489 + ctxt->md5_buf[63] = ctxt->md5_n8[0];
5492 + md5_calc(ctxt->md5_buf, ctxt);
5495 +void md5_result(digest, ctxt)
5499 + /* 4 byte words */
5500 +#if BYTE_ORDER == LITTLE_ENDIAN
5501 + bcopy(&ctxt->md5_st8[0], digest, 16);
5503 +#if BYTE_ORDER == BIG_ENDIAN
5504 + digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
5505 + digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
5506 + digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
5507 + digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
5508 + digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
5509 + digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
5510 + digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
5511 + digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
5515 +static void md5_calc(b64, ctxt)
5519 + u_int32_t A = ctxt->md5_sta;
5520 + u_int32_t B = ctxt->md5_stb;
5521 + u_int32_t C = ctxt->md5_stc;
5522 + u_int32_t D = ctxt->md5_std;
5523 +#if BYTE_ORDER == LITTLE_ENDIAN
5524 + u_int32_t *X = (u_int32_t *)b64;
5526 +#if BYTE_ORDER == BIG_ENDIAN
5527 + /* 4 byte words */
5528 + /* what a brute force but fast! */
5530 + u_int8_t *y = (u_int8_t *)X;
5531 + y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
5532 + y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
5533 + y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
5534 + y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
5535 + y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
5536 + y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
5537 + y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
5538 + y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
5539 + y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
5540 + y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
5541 + y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
5542 + y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
5543 + y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
5544 + y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
5545 + y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
5546 + y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
5549 + ROUND1(A, B, C, D, 0, Sa, 1); ROUND1(D, A, B, C, 1, Sb, 2);
5550 + ROUND1(C, D, A, B, 2, Sc, 3); ROUND1(B, C, D, A, 3, Sd, 4);
5551 + ROUND1(A, B, C, D, 4, Sa, 5); ROUND1(D, A, B, C, 5, Sb, 6);
5552 + ROUND1(C, D, A, B, 6, Sc, 7); ROUND1(B, C, D, A, 7, Sd, 8);
5553 + ROUND1(A, B, C, D, 8, Sa, 9); ROUND1(D, A, B, C, 9, Sb, 10);
5554 + ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
5555 + ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
5556 + ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
5558 + ROUND2(A, B, C, D, 1, Se, 17); ROUND2(D, A, B, C, 6, Sf, 18);
5559 + ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A, 0, Sh, 20);
5560 + ROUND2(A, B, C, D, 5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
5561 + ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A, 4, Sh, 24);
5562 + ROUND2(A, B, C, D, 9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
5563 + ROUND2(C, D, A, B, 3, Sg, 27); ROUND2(B, C, D, A, 8, Sh, 28);
5564 + ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C, 2, Sf, 30);
5565 + ROUND2(C, D, A, B, 7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
5567 + ROUND3(A, B, C, D, 5, Si, 33); ROUND3(D, A, B, C, 8, Sj, 34);
5568 + ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
5569 + ROUND3(A, B, C, D, 1, Si, 37); ROUND3(D, A, B, C, 4, Sj, 38);
5570 + ROUND3(C, D, A, B, 7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
5571 + ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C, 0, Sj, 42);
5572 + ROUND3(C, D, A, B, 3, Sk, 43); ROUND3(B, C, D, A, 6, Sl, 44);
5573 + ROUND3(A, B, C, D, 9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
5574 + ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A, 2, Sl, 48);
5576 + ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50);
5577 + ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52);
5578 + ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54);
5579 + ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56);
5580 + ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
5581 + ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
5582 + ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
5583 + ROUND4(C, D, A, B, 2, So, 63); ROUND4(B, C, D, A, 9, Sp, 64);
5585 + ctxt->md5_sta += A;
5586 + ctxt->md5_stb += B;
5587 + ctxt->md5_stc += C;
5588 + ctxt->md5_std += D;
5590 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
5591 +++ linux/crypto/ocf/safe/md5.h 2005-05-20 10:30:52.000000000 +1000
5593 +/* $FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $ */
5594 +/* $KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $ */
5597 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5598 + * All rights reserved.
5600 + * Redistribution and use in source and binary forms, with or without
5601 + * modification, are permitted provided that the following conditions
5603 + * 1. Redistributions of source code must retain the above copyright
5604 + * notice, this list of conditions and the following disclaimer.
5605 + * 2. Redistributions in binary form must reproduce the above copyright
5606 + * notice, this list of conditions and the following disclaimer in the
5607 + * documentation and/or other materials provided with the distribution.
5608 + * 3. Neither the name of the project nor the names of its contributors
5609 + * may be used to endorse or promote products derived from this software
5610 + * without specific prior written permission.
5612 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
5613 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5614 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5615 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
5616 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5617 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5618 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5619 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5620 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5621 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5625 +#ifndef _NETINET6_MD5_H_
5626 +#define _NETINET6_MD5_H_
5628 +#define MD5_BUFLEN 64
5632 + u_int32_t md5_state32[4];
5633 + u_int8_t md5_state8[16];
5636 +#define md5_sta md5_st.md5_state32[0]
5637 +#define md5_stb md5_st.md5_state32[1]
5638 +#define md5_stc md5_st.md5_state32[2]
5639 +#define md5_std md5_st.md5_state32[3]
5640 +#define md5_st8 md5_st.md5_state8
5643 + u_int64_t md5_count64;
5644 + u_int8_t md5_count8[8];
5646 +#define md5_n md5_count.md5_count64
5647 +#define md5_n8 md5_count.md5_count8
5650 + u_int8_t md5_buf[MD5_BUFLEN];
5653 +extern void md5_init(md5_ctxt *);
5654 +extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
5655 +extern void md5_pad(md5_ctxt *);
5656 +extern void md5_result(u_int8_t *, md5_ctxt *);
5658 +/* compatibility */
5659 +#define MD5_CTX md5_ctxt
5660 +#define MD5Init(x) md5_init((x))
5661 +#define MD5Update(x, y, z) md5_loop((x), (y), (z))
5662 +#define MD5Final(x, y) \
5665 + md5_result((x), (y)); \
5668 +#endif /* ! _NETINET6_MD5_H_*/
5669 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
5670 +++ linux/crypto/ocf/safe/safe.c 2007-07-27 11:34:59.000000000 +1000
5673 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
5674 + * Copyright (C) 2004-2007 David McCullough
5675 + * The license and original author are listed below.
5677 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
5678 + * Copyright (c) 2003 Global Technology Associates, Inc.
5679 + * All rights reserved.
5681 + * Redistribution and use in source and binary forms, with or without
5682 + * modification, are permitted provided that the following conditions
5684 + * 1. Redistributions of source code must retain the above copyright
5685 + * notice, this list of conditions and the following disclaimer.
5686 + * 2. Redistributions in binary form must reproduce the above copyright
5687 + * notice, this list of conditions and the following disclaimer in the
5688 + * documentation and/or other materials provided with the distribution.
5690 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
5691 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5692 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5693 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
5694 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5695 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5696 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5697 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5698 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
5699 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
5702 +__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
5705 +#ifndef AUTOCONF_INCLUDED
5706 +#include <linux/config.h>
5708 +#include <linux/module.h>
5709 +#include <linux/kernel.h>
5710 +#include <linux/init.h>
5711 +#include <linux/list.h>
5712 +#include <linux/slab.h>
5713 +#include <linux/wait.h>
5714 +#include <linux/sched.h>
5715 +#include <linux/pci.h>
5716 +#include <linux/delay.h>
5717 +#include <linux/interrupt.h>
5718 +#include <linux/spinlock.h>
5719 +#include <linux/random.h>
5720 +#include <linux/version.h>
5721 +#include <linux/skbuff.h>
5722 +#include <asm/io.h>
5725 + * SafeNet SafeXcel-1141 hardware crypto accelerator
5728 +#include <cryptodev.h>
5730 +#include <safe/safereg.h>
5731 +#include <safe/safevar.h>
5734 +#define DPRINTF(a) do { \
5736 + printk("%s: ", sc ? \
5737 + device_get_nameunit(sc->sc_dev) : "safe"); \
5746 + * until we find a cleaner way, include the BSD md5/sha1 code
5749 +#define HMAC_HACK 1
5751 +#define LITTLE_ENDIAN 1234
5752 +#define BIG_ENDIAN 4321
5753 +#ifdef __LITTLE_ENDIAN
5754 +#define BYTE_ORDER LITTLE_ENDIAN
5756 +#ifdef __BIG_ENDIAN
5757 +#define BYTE_ORDER BIG_ENDIAN
5759 +#include <safe/md5.h>
5760 +#include <safe/md5.c>
5761 +#include <safe/sha1.h>
5762 +#include <safe/sha1.c>
5764 +u_int8_t hmac_ipad_buffer[64] = {
5765 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5766 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5767 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5768 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5769 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5770 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5771 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
5772 + 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
5775 +u_int8_t hmac_opad_buffer[64] = {
5776 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5777 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5778 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5779 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5780 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5781 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5782 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
5783 + 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
5785 +#endif /* HMAC_HACK */
5787 +/* add proc entry for this */
5788 +struct safe_stats safestats;
5790 +#define debug safe_debug
5791 +int safe_debug = 0;
5792 +module_param(safe_debug, int, 0644);
5793 +MODULE_PARM_DESC(safe_debug, "Enable debug");
5795 +static void safe_callback(struct safe_softc *, struct safe_ringentry *);
5796 +static void safe_feed(struct safe_softc *, struct safe_ringentry *);
5797 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
5798 +static void safe_rng_init(struct safe_softc *);
5799 +int safe_rngbufsize = 8; /* 32 bytes each read */
5800 +module_param(safe_rngbufsize, int, 0644);
5801 +MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
5802 +int safe_rngmaxalarm = 8; /* max alarms before reset */
5803 +module_param(safe_rngmaxalarm, int, 0644);
5804 +MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
5805 +#endif /* SAFE_NO_RNG */
5807 +static void safe_totalreset(struct safe_softc *sc);
5808 +static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
5809 +static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
5810 +static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
5811 +static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
5812 +static int safe_kstart(struct safe_softc *sc);
5813 +static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
5814 +static void safe_kfeed(struct safe_softc *sc);
5815 +static void safe_kpoll(unsigned long arg);
5816 +static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
5817 + u_int32_t len, struct crparam *n);
5819 +static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
5820 +static int safe_freesession(device_t, u_int64_t);
5821 +static int safe_process(device_t, struct cryptop *, int);
5823 +static device_method_t safe_methods = {
5824 + /* crypto device methods */
5825 + DEVMETHOD(cryptodev_newsession, safe_newsession),
5826 + DEVMETHOD(cryptodev_freesession,safe_freesession),
5827 + DEVMETHOD(cryptodev_process, safe_process),
5828 + DEVMETHOD(cryptodev_kprocess, safe_kprocess),
5831 +#define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
5832 +#define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
5834 +#define SAFE_MAX_CHIPS 8
5835 +static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
5838 + * split our buffers up into safe DMAable byte fragments to avoid lockup
5839 + * bug in 1141 HW on rev 1.0.
5844 + struct safe_softc *sc,
5845 + struct safe_operand *buf,
5850 + int chunk, tlen = len;
5852 + tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
5854 + buf->mapsize += len;
5856 + chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
5857 + buf->segs[buf->nsegs].ds_addr = tmp;
5858 + buf->segs[buf->nsegs].ds_len = chunk;
5859 + buf->segs[buf->nsegs].ds_tlen = tlen;
5869 + * map in a given uio buffer (great on some arches :-)
5873 +pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
5875 + struct iovec *iov = uio->uio_iov;
5878 + DPRINTF(("%s()\n", __FUNCTION__));
5883 + for (n = 0; n < uio->uio_iovcnt; n++) {
5884 + pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
5888 + /* identify this buffer by the first segment */
5889 + buf->map = (void *) buf->segs[0].ds_addr;
5894 + * map in a given sk_buff
5898 +pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
5902 + DPRINTF(("%s()\n", __FUNCTION__));
5907 + pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
5909 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5910 + pci_map_linear(sc, buf,
5911 + page_address(skb_shinfo(skb)->frags[i].page) +
5912 + skb_shinfo(skb)->frags[i].page_offset,
5913 + skb_shinfo(skb)->frags[i].size);
5916 + /* identify this buffer by the first segment */
5917 + buf->map = (void *) buf->segs[0].ds_addr;
5922 +#if 0 /* not needed at this time */
5924 +pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
5928 + DPRINTF(("%s()\n", __FUNCTION__));
5929 + for (i = 0; i < buf->nsegs; i++)
5930 + pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
5931 + buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
5936 +pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
5939 + DPRINTF(("%s()\n", __FUNCTION__));
5940 + for (i = 0; i < buf->nsegs; i++) {
5941 + if (buf->segs[i].ds_tlen) {
5942 + DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
5943 + pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
5944 + buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
5945 + DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
5947 + buf->segs[i].ds_addr = 0;
5948 + buf->segs[i].ds_len = 0;
5949 + buf->segs[i].ds_tlen = 0;
5958 + * SafeXcel Interrupt routine
5961 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
5962 +safe_intr(int irq, void *arg)
5964 +safe_intr(int irq, void *arg, struct pt_regs *regs)
5967 + struct safe_softc *sc = arg;
5969 + unsigned long flags;
5971 + stat = READ_REG(sc, SAFE_HM_STAT);
5973 + DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
5975 + if (stat == 0) /* shared irq, not for us */
5978 + WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
5980 + if ((stat & SAFE_INT_PE_DDONE)) {
5982 + * Descriptor(s) done; scan the ring and
5983 + * process completed operations.
5985 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
5986 + while (sc->sc_back != sc->sc_front) {
5987 + struct safe_ringentry *re = sc->sc_back;
5991 + safe_dump_ringstate(sc, __func__);
5992 + safe_dump_request(sc, __func__, re);
5996 + * safe_process marks ring entries that were allocated
5997 + * but not used with a csr of zero. This insures the
5998 + * ring front pointer never needs to be set backwards
5999 + * in the event that an entry is allocated but not used
6000 + * because of a setup error.
6002 + DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
6003 + if (re->re_desc.d_csr != 0) {
6004 + if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
6005 + DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
6008 + if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
6009 + DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
6013 + safe_callback(sc, re);
6015 + if (++(sc->sc_back) == sc->sc_ringtop)
6016 + sc->sc_back = sc->sc_ring;
6018 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6022 + * Check to see if we got any DMA Error
6024 + if (stat & SAFE_INT_PE_ERROR) {
6025 + printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
6026 + (int)READ_REG(sc, SAFE_PE_DMASTAT));
6027 + safestats.st_dmaerr++;
6028 + safe_totalreset(sc);
6034 + if (sc->sc_needwakeup) { /* XXX check high watermark */
6035 + int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
6036 + DPRINTF(("%s: wakeup crypto %x\n", __func__,
6037 + sc->sc_needwakeup));
6038 + sc->sc_needwakeup &= ~wakeup;
6039 + crypto_unblock(sc->sc_cid, wakeup);
6042 + return IRQ_HANDLED;
6046 + * safe_feed() - post a request to chip
6049 +safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
6051 + DPRINTF(("%s()\n", __FUNCTION__));
6054 + safe_dump_ringstate(sc, __func__);
6055 + safe_dump_request(sc, __func__, re);
6059 + if (sc->sc_nqchip > safestats.st_maxqchip)
6060 + safestats.st_maxqchip = sc->sc_nqchip;
6061 + /* poke h/w to check descriptor ring, any value can be written */
6062 + WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
6065 +#define N(a) (sizeof(a) / sizeof (a[0]))
6067 +safe_setup_enckey(struct safe_session *ses, caddr_t key)
6071 + bcopy(key, ses->ses_key, ses->ses_klen / 8);
6073 + /* PE is little-endian, insure proper byte order */
6074 + for (i = 0; i < N(ses->ses_key); i++)
6075 + ses->ses_key[i] = htole32(ses->ses_key[i]);
6079 +safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
6087 + for (i = 0; i < klen; i++)
6088 + key[i] ^= HMAC_IPAD_VAL;
6090 + if (algo == CRYPTO_MD5_HMAC) {
6092 + MD5Update(&md5ctx, key, klen);
6093 + MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
6094 + bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
6096 + SHA1Init(&sha1ctx);
6097 + SHA1Update(&sha1ctx, key, klen);
6098 + SHA1Update(&sha1ctx, hmac_ipad_buffer,
6099 + SHA1_HMAC_BLOCK_LEN - klen);
6100 + bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
6103 + for (i = 0; i < klen; i++)
6104 + key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
6106 + if (algo == CRYPTO_MD5_HMAC) {
6108 + MD5Update(&md5ctx, key, klen);
6109 + MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
6110 + bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
6112 + SHA1Init(&sha1ctx);
6113 + SHA1Update(&sha1ctx, key, klen);
6114 + SHA1Update(&sha1ctx, hmac_opad_buffer,
6115 + SHA1_HMAC_BLOCK_LEN - klen);
6116 + bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
6119 + for (i = 0; i < klen; i++)
6120 + key[i] ^= HMAC_OPAD_VAL;
6124 + * this code prevents SHA working on a BE host,
6125 + * so it is obviously wrong. I think the byte
6126 + * swap setup we do with the chip fixes this for us
6129 + /* PE is little-endian, insure proper byte order */
6130 + for (i = 0; i < N(ses->ses_hminner); i++) {
6131 + ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
6132 + ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
6135 +#else /* HMAC_HACK */
6136 + printk("safe: md5/sha not implemented\n");
6137 +#endif /* HMAC_HACK */
6142 + * Allocate a new 'session' and return an encoded session id. 'sidp'
6143 + * contains our registration id, and should contain an encoded session
6144 + * id on successful allocation.
6147 +safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
6149 + struct safe_softc *sc = device_get_softc(dev);
6150 + struct cryptoini *c, *encini = NULL, *macini = NULL;
6151 + struct safe_session *ses = NULL;
6154 + DPRINTF(("%s()\n", __FUNCTION__));
6156 + if (sidp == NULL || cri == NULL || sc == NULL)
6159 + for (c = cri; c != NULL; c = c->cri_next) {
6160 + if (c->cri_alg == CRYPTO_MD5_HMAC ||
6161 + c->cri_alg == CRYPTO_SHA1_HMAC ||
6162 + c->cri_alg == CRYPTO_NULL_HMAC) {
6166 + } else if (c->cri_alg == CRYPTO_DES_CBC ||
6167 + c->cri_alg == CRYPTO_3DES_CBC ||
6168 + c->cri_alg == CRYPTO_AES_CBC ||
6169 + c->cri_alg == CRYPTO_NULL_CBC) {
6176 + if (encini == NULL && macini == NULL)
6178 + if (encini) { /* validate key length */
6179 + switch (encini->cri_alg) {
6180 + case CRYPTO_DES_CBC:
6181 + if (encini->cri_klen != 64)
6184 + case CRYPTO_3DES_CBC:
6185 + if (encini->cri_klen != 192)
6188 + case CRYPTO_AES_CBC:
6189 + if (encini->cri_klen != 128 &&
6190 + encini->cri_klen != 192 &&
6191 + encini->cri_klen != 256)
6197 + if (sc->sc_sessions == NULL) {
6198 + ses = sc->sc_sessions = (struct safe_session *)
6199 + kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
6202 + memset(ses, 0, sizeof(struct safe_session));
6204 + sc->sc_nsessions = 1;
6206 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
6207 + if (sc->sc_sessions[sesn].ses_used == 0) {
6208 + ses = &sc->sc_sessions[sesn];
6213 + if (ses == NULL) {
6214 + sesn = sc->sc_nsessions;
6215 + ses = (struct safe_session *)
6216 + kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
6219 + memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
6220 + bcopy(sc->sc_sessions, ses, sesn *
6221 + sizeof(struct safe_session));
6222 + bzero(sc->sc_sessions, sesn *
6223 + sizeof(struct safe_session));
6224 + kfree(sc->sc_sessions);
6225 + sc->sc_sessions = ses;
6226 + ses = &sc->sc_sessions[sesn];
6227 + sc->sc_nsessions++;
6231 + bzero(ses, sizeof(struct safe_session));
6232 + ses->ses_used = 1;
6236 + /* XXX may read fewer than requested */
6237 + read_random(ses->ses_iv, sizeof(ses->ses_iv));
6239 + ses->ses_klen = encini->cri_klen;
6240 + if (encini->cri_key != NULL)
6241 + safe_setup_enckey(ses, encini->cri_key);
6245 + ses->ses_mlen = macini->cri_mlen;
6246 + if (ses->ses_mlen == 0) {
6247 + if (macini->cri_alg == CRYPTO_MD5_HMAC)
6248 + ses->ses_mlen = MD5_HASH_LEN;
6250 + ses->ses_mlen = SHA1_HASH_LEN;
6253 + if (macini->cri_key != NULL) {
6254 + safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
6255 + macini->cri_klen / 8);
6259 + *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
6264 + * Deallocate a session.
6267 +safe_freesession(device_t dev, u_int64_t tid)
6269 + struct safe_softc *sc = device_get_softc(dev);
6271 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
6273 + DPRINTF(("%s()\n", __FUNCTION__));
6278 + session = SAFE_SESSION(sid);
6279 + if (session < sc->sc_nsessions) {
6280 + bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
6289 +safe_process(device_t dev, struct cryptop *crp, int hint)
6291 + struct safe_softc *sc = device_get_softc(dev);
6292 + int err = 0, i, nicealign, uniform;
6293 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
6294 + int bypass, oplen, ivsize;
6297 + struct safe_session *ses;
6298 + struct safe_ringentry *re;
6299 + struct safe_sarec *sa;
6300 + struct safe_pdesc *pd;
6301 + u_int32_t cmd0, cmd1, staterec;
6302 + unsigned long flags;
6304 + DPRINTF(("%s()\n", __FUNCTION__));
6306 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
6307 + safestats.st_invalid++;
6310 + if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
6311 + safestats.st_badsession++;
6315 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
6316 + if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
6317 + safestats.st_ringfull++;
6318 + sc->sc_needwakeup |= CRYPTO_SYMQ;
6319 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6320 + return (ERESTART);
6322 + re = sc->sc_front;
6324 + staterec = re->re_sa.sa_staterec; /* save */
6325 + /* NB: zero everything but the PE descriptor */
6326 + bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
6327 + re->re_sa.sa_staterec = staterec; /* restore */
6330 + re->re_sesn = SAFE_SESSION(crp->crp_sid);
6332 + re->re_src.nsegs = 0;
6333 + re->re_dst.nsegs = 0;
6335 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
6336 + re->re_src_skb = (struct sk_buff *)crp->crp_buf;
6337 + re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
6338 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
6339 + re->re_src_io = (struct uio *)crp->crp_buf;
6340 + re->re_dst_io = (struct uio *)crp->crp_buf;
6342 + safestats.st_badflags++;
6344 + goto errout; /* XXX we don't handle contiguous blocks! */
6348 + ses = &sc->sc_sessions[re->re_sesn];
6350 + crd1 = crp->crp_desc;
6351 + if (crd1 == NULL) {
6352 + safestats.st_nodesc++;
6356 + crd2 = crd1->crd_next;
6358 + cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
6360 + if (crd2 == NULL) {
6361 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
6362 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
6363 + crd1->crd_alg == CRYPTO_NULL_HMAC) {
6366 + cmd0 |= SAFE_SA_CMD0_OP_HASH;
6367 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
6368 + crd1->crd_alg == CRYPTO_3DES_CBC ||
6369 + crd1->crd_alg == CRYPTO_AES_CBC ||
6370 + crd1->crd_alg == CRYPTO_NULL_CBC) {
6373 + cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
6375 + safestats.st_badalg++;
6380 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
6381 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
6382 + crd1->crd_alg == CRYPTO_NULL_HMAC) &&
6383 + (crd2->crd_alg == CRYPTO_DES_CBC ||
6384 + crd2->crd_alg == CRYPTO_3DES_CBC ||
6385 + crd2->crd_alg == CRYPTO_AES_CBC ||
6386 + crd2->crd_alg == CRYPTO_NULL_CBC) &&
6387 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
6390 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
6391 + crd1->crd_alg == CRYPTO_3DES_CBC ||
6392 + crd1->crd_alg == CRYPTO_AES_CBC ||
6393 + crd1->crd_alg == CRYPTO_NULL_CBC) &&
6394 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
6395 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
6396 + crd2->crd_alg == CRYPTO_NULL_HMAC) &&
6397 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
6401 + safestats.st_badalg++;
6405 + cmd0 |= SAFE_SA_CMD0_OP_BOTH;
6409 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
6410 + safe_setup_enckey(ses, enccrd->crd_key);
6412 + if (enccrd->crd_alg == CRYPTO_DES_CBC) {
6413 + cmd0 |= SAFE_SA_CMD0_DES;
6414 + cmd1 |= SAFE_SA_CMD1_CBC;
6415 + ivsize = 2*sizeof(u_int32_t);
6416 + } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
6417 + cmd0 |= SAFE_SA_CMD0_3DES;
6418 + cmd1 |= SAFE_SA_CMD1_CBC;
6419 + ivsize = 2*sizeof(u_int32_t);
6420 + } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
6421 + cmd0 |= SAFE_SA_CMD0_AES;
6422 + cmd1 |= SAFE_SA_CMD1_CBC;
6423 + if (ses->ses_klen == 128)
6424 + cmd1 |= SAFE_SA_CMD1_AES128;
6425 + else if (ses->ses_klen == 192)
6426 + cmd1 |= SAFE_SA_CMD1_AES192;
6428 + cmd1 |= SAFE_SA_CMD1_AES256;
6429 + ivsize = 4*sizeof(u_int32_t);
6431 + cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
6436 + * Setup encrypt/decrypt state. When using basic ops
6437 + * we can't use an inline IV because hash/crypt offset
6438 + * must be from the end of the IV to the start of the
6439 + * crypt data and this leaves out the preceding header
6440 + * from the hash calculation. Instead we place the IV
6441 + * in the state record and set the hash/crypt offset to
6442 + * copy both the header+IV.
6444 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
6445 + cmd0 |= SAFE_SA_CMD0_OUTBOUND;
6447 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
6448 + iv = enccrd->crd_iv;
6450 + iv = (caddr_t) ses->ses_iv;
6451 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
6452 + crypto_copyback(crp->crp_flags, crp->crp_buf,
6453 + enccrd->crd_inject, ivsize, iv);
6455 + bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
6457 + for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
6458 + re->re_sastate.sa_saved_iv[i] =
6459 + cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
6460 + cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
6461 + re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
6463 + cmd0 |= SAFE_SA_CMD0_INBOUND;
6465 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
6466 + bcopy(enccrd->crd_iv,
6467 + re->re_sastate.sa_saved_iv, ivsize);
6469 + crypto_copydata(crp->crp_flags, crp->crp_buf,
6470 + enccrd->crd_inject, ivsize,
6471 + (caddr_t)re->re_sastate.sa_saved_iv);
6474 + for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
6475 + re->re_sastate.sa_saved_iv[i] =
6476 + cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
6477 + cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
6480 + * For basic encryption use the zero pad algorithm.
6481 + * This pads results to an 8-byte boundary and
6482 + * suppresses padding verification for inbound (i.e.
6483 + * decrypt) operations.
6485 + * NB: Not sure if the 8-byte pad boundary is a problem.
6487 + cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
6489 + /* XXX assert key bufs have the same size */
6490 + bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
6494 + if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
6495 + safe_setup_mackey(ses, maccrd->crd_alg,
6496 + maccrd->crd_key, maccrd->crd_klen / 8);
6499 + if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
6500 + cmd0 |= SAFE_SA_CMD0_MD5;
6501 + cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
6502 + } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
6503 + cmd0 |= SAFE_SA_CMD0_SHA1;
6504 + cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
6506 + cmd0 |= SAFE_SA_CMD0_HASH_NULL;
6509 + * Digest data is loaded from the SA and the hash
6510 + * result is saved to the state block where we
6511 + * retrieve it for return to the caller.
6513 + /* XXX assert digest bufs have the same size */
6514 + bcopy(ses->ses_hminner, sa->sa_indigest,
6515 + sizeof(sa->sa_indigest));
6516 + bcopy(ses->ses_hmouter, sa->sa_outdigest,
6517 + sizeof(sa->sa_outdigest));
6519 + cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
6520 + re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
6523 + if (enccrd && maccrd) {
6525 + * The offset from hash data to the start of
6526 + * crypt data is the difference in the skips.
6528 + bypass = maccrd->crd_skip;
6529 + coffset = enccrd->crd_skip - maccrd->crd_skip;
6530 + if (coffset < 0) {
6531 + DPRINTF(("%s: hash does not precede crypt; "
6532 + "mac skip %u enc skip %u\n",
6533 + __func__, maccrd->crd_skip, enccrd->crd_skip));
6534 + safestats.st_skipmismatch++;
6538 + oplen = enccrd->crd_skip + enccrd->crd_len;
6539 + if (maccrd->crd_skip + maccrd->crd_len != oplen) {
6540 + DPRINTF(("%s: hash amount %u != crypt amount %u\n",
6541 + __func__, maccrd->crd_skip + maccrd->crd_len,
6543 + safestats.st_lenmismatch++;
6549 + printf("mac: skip %d, len %d, inject %d\n",
6550 + maccrd->crd_skip, maccrd->crd_len,
6551 + maccrd->crd_inject);
6552 + printf("enc: skip %d, len %d, inject %d\n",
6553 + enccrd->crd_skip, enccrd->crd_len,
6554 + enccrd->crd_inject);
6555 + printf("bypass %d coffset %d oplen %d\n",
6556 + bypass, coffset, oplen);
6559 + if (coffset & 3) { /* offset must be 32-bit aligned */
6560 + DPRINTF(("%s: coffset %u misaligned\n",
6561 + __func__, coffset));
6562 + safestats.st_coffmisaligned++;
6567 + if (coffset > 255) { /* offset must be <256 dwords */
6568 + DPRINTF(("%s: coffset %u too big\n",
6569 + __func__, coffset));
6570 + safestats.st_cofftoobig++;
6575 + * Tell the hardware to copy the header to the output.
6576 + * The header is defined as the data from the end of
6577 + * the bypass to the start of data to be encrypted.
6578 + * Typically this is the inline IV. Note that you need
6579 + * to do this even if src+dst are the same; it appears
6580 + * that w/o this bit the crypted data is written
6581 + * immediately after the bypass data.
6583 + cmd1 |= SAFE_SA_CMD1_HDRCOPY;
6585 + * Disable IP header mutable bit handling. This is
6586 + * needed to get correct HMAC calculations.
6588 + cmd1 |= SAFE_SA_CMD1_MUTABLE;
6591 + bypass = enccrd->crd_skip;
6592 + oplen = bypass + enccrd->crd_len;
6594 + bypass = maccrd->crd_skip;
6595 + oplen = bypass + maccrd->crd_len;
6599 + /* XXX verify multiple of 4 when using s/g */
6600 + if (bypass > 96) { /* bypass offset must be <= 96 bytes */
6601 + DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
6602 + safestats.st_bypasstoobig++;
6607 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
6608 + if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
6609 + safestats.st_noload++;
6613 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
6614 + if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
6615 + safestats.st_noload++;
6620 + nicealign = safe_dmamap_aligned(sc, &re->re_src);
6621 + uniform = safe_dmamap_uniform(sc, &re->re_src);
6623 + DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
6624 + nicealign, uniform, re->re_src.nsegs));
6625 + if (re->re_src.nsegs > 1) {
6626 + re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
6627 + ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
6628 + for (i = 0; i < re->re_src_nsegs; i++) {
6629 + /* NB: no need to check if there's space */
6630 + pd = sc->sc_spfree;
6631 + if (++(sc->sc_spfree) == sc->sc_springtop)
6632 + sc->sc_spfree = sc->sc_spring;
6634 + KASSERT((pd->pd_flags&3) == 0 ||
6635 + (pd->pd_flags&3) == SAFE_PD_DONE,
6636 + ("bogus source particle descriptor; flags %x",
6638 + pd->pd_addr = re->re_src_segs[i].ds_addr;
6639 + pd->pd_size = re->re_src_segs[i].ds_len;
6640 + pd->pd_flags = SAFE_PD_READY;
6642 + cmd0 |= SAFE_SA_CMD0_IGATHER;
6645 + * No need for gather, reference the operand directly.
6647 + re->re_desc.d_src = re->re_src_segs[0].ds_addr;
6650 + if (enccrd == NULL && maccrd != NULL) {
6652 + * Hash op; no destination needed.
6655 + if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
6657 + safestats.st_iovmisaligned++;
6661 + if (uniform != 1) {
6662 + device_printf(sc->sc_dev, "!uniform source\n");
6665 + * There's no way to handle the DMA
6666 + * requirements with this uio. We
6667 + * could create a separate DMA area for
6668 + * the result and then copy it back,
6669 + * but for now we just bail and return
6670 + * an error. Note that uio requests
6671 + * > SAFE_MAX_DSIZE are handled because
6672 + * the DMA map and segment list for the
6673 + * destination wil result in a
6674 + * destination particle list that does
6675 + * the necessary scatter DMA.
6677 + safestats.st_iovnotuniform++;
6682 + re->re_dst = re->re_src;
6684 + safestats.st_badflags++;
6689 + if (re->re_dst.nsegs > 1) {
6690 + re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
6691 + ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
6692 + for (i = 0; i < re->re_dst_nsegs; i++) {
6693 + pd = sc->sc_dpfree;
6694 + KASSERT((pd->pd_flags&3) == 0 ||
6695 + (pd->pd_flags&3) == SAFE_PD_DONE,
6696 + ("bogus dest particle descriptor; flags %x",
6698 + if (++(sc->sc_dpfree) == sc->sc_dpringtop)
6699 + sc->sc_dpfree = sc->sc_dpring;
6700 + pd->pd_addr = re->re_dst_segs[i].ds_addr;
6701 + pd->pd_flags = SAFE_PD_READY;
6703 + cmd0 |= SAFE_SA_CMD0_OSCATTER;
6706 + * No need for scatter, reference the operand directly.
6708 + re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
6713 + * All done with setup; fillin the SA command words
6714 + * and the packet engine descriptor. The operation
6715 + * is now ready for submission to the hardware.
6717 + sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
6718 + sa->sa_cmd1 = cmd1
6719 + | (coffset << SAFE_SA_CMD1_OFFSET_S)
6720 + | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
6721 + | SAFE_SA_CMD1_SRPCI
6724 + * NB: the order of writes is important here. In case the
6725 + * chip is scanning the ring because of an outstanding request
6726 + * it might nab this one too. In that case we need to make
6727 + * sure the setup is complete before we write the length
6728 + * field of the descriptor as it signals the descriptor is
6729 + * ready for processing.
6731 + re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
6733 + re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
6735 + re->re_desc.d_len = oplen
6736 + | SAFE_PE_LEN_READY
6737 + | (bypass << SAFE_PE_LEN_BYPASS_S)
6740 + safestats.st_ipackets++;
6741 + safestats.st_ibytes += oplen;
6743 + if (++(sc->sc_front) == sc->sc_ringtop)
6744 + sc->sc_front = sc->sc_ring;
6746 + /* XXX honor batching */
6747 + safe_feed(sc, re);
6748 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6752 + if (re->re_src.map != re->re_dst.map)
6753 + pci_unmap_operand(sc, &re->re_dst);
6754 + if (re->re_src.map)
6755 + pci_unmap_operand(sc, &re->re_src);
6756 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
6757 + if (err != ERESTART) {
6758 + crp->crp_etype = err;
6761 + sc->sc_needwakeup |= CRYPTO_SYMQ;
6767 +safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
6769 + struct cryptop *crp = (struct cryptop *)re->re_crp;
6770 + struct cryptodesc *crd;
6772 + DPRINTF(("%s()\n", __FUNCTION__));
6774 + safestats.st_opackets++;
6775 + safestats.st_obytes += re->re_dst.mapsize;
6777 + if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
6778 + device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
6779 + re->re_desc.d_csr,
6780 + re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
6781 + safestats.st_peoperr++;
6782 + crp->crp_etype = EIO; /* something more meaningful? */
6785 + if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
6786 + pci_unmap_operand(sc, &re->re_dst);
6787 + pci_unmap_operand(sc, &re->re_src);
6790 + * If result was written to a differet mbuf chain, swap
6791 + * it in as the return value and reclaim the original.
6793 + if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
6794 + device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
6795 + /* kfree_skb(skb) */
6796 + /* crp->crp_buf = (caddr_t)re->re_dst_skb */
6800 + if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
6801 + /* copy out IV for future use */
6802 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
6806 + if (crd->crd_alg == CRYPTO_DES_CBC ||
6807 + crd->crd_alg == CRYPTO_3DES_CBC) {
6808 + ivsize = 2*sizeof(u_int32_t);
6809 + } else if (crd->crd_alg == CRYPTO_AES_CBC) {
6810 + ivsize = 4*sizeof(u_int32_t);
6813 + crypto_copydata(crp->crp_flags, crp->crp_buf,
6814 + crd->crd_skip + crd->crd_len - ivsize, ivsize,
6815 + (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
6817 + i < ivsize/sizeof(sc->sc_sessions[re->re_sesn].ses_iv[0]);
6819 + sc->sc_sessions[re->re_sesn].ses_iv[i] =
6820 + cpu_to_le32(sc->sc_sessions[re->re_sesn].ses_iv[i]);
6825 + if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
6826 + /* copy out ICV result */
6827 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
6828 + if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
6829 + crd->crd_alg == CRYPTO_SHA1_HMAC ||
6830 + crd->crd_alg == CRYPTO_NULL_HMAC))
6832 + if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
6834 + * SHA-1 ICV's are byte-swapped; fix 'em up
6835 + * before copy them to their destination.
6837 + re->re_sastate.sa_saved_indigest[0] =
6838 + cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
6839 + re->re_sastate.sa_saved_indigest[1] =
6840 + cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
6841 + re->re_sastate.sa_saved_indigest[2] =
6842 + cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
6844 + re->re_sastate.sa_saved_indigest[0] =
6845 + cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
6846 + re->re_sastate.sa_saved_indigest[1] =
6847 + cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
6848 + re->re_sastate.sa_saved_indigest[2] =
6849 + cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
6851 + crypto_copyback(crp->crp_flags, crp->crp_buf,
6853 + sc->sc_sessions[re->re_sesn].ses_mlen,
6854 + (caddr_t)re->re_sastate.sa_saved_indigest);
6862 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
6863 +#define SAFE_RNG_MAXWAIT 1000
6866 +safe_rng_init(struct safe_softc *sc)
6871 + DPRINTF(("%s()\n", __FUNCTION__));
6873 + WRITE_REG(sc, SAFE_RNG_CTRL, 0);
6874 + /* use default value according to the manual */
6875 + WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
6876 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
6879 + * There is a bug in rev 1.0 of the 1140 that when the RNG
6880 + * is brought out of reset the ready status flag does not
6881 + * work until the RNG has finished its internal initialization.
6883 + * So in order to determine the device is through its
6884 + * initialization we must read the data register, using the
6885 + * status reg in the read in case it is initialized. Then read
6886 + * the data register until it changes from the first read.
6887 + * Once it changes read the data register until it changes
6888 + * again. At this time the RNG is considered initialized.
6889 + * This could take between 750ms - 1000ms in time.
6892 + w = READ_REG(sc, SAFE_RNG_OUT);
6894 + v = READ_REG(sc, SAFE_RNG_OUT);
6900 + } while (++i < SAFE_RNG_MAXWAIT);
6902 + /* Wait Until data changes again */
6905 + v = READ_REG(sc, SAFE_RNG_OUT);
6909 + } while (++i < SAFE_RNG_MAXWAIT);
6912 +static __inline void
6913 +safe_rng_disable_short_cycle(struct safe_softc *sc)
6915 + DPRINTF(("%s()\n", __FUNCTION__));
6917 + WRITE_REG(sc, SAFE_RNG_CTRL,
6918 + READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
6921 +static __inline void
6922 +safe_rng_enable_short_cycle(struct safe_softc *sc)
6924 + DPRINTF(("%s()\n", __FUNCTION__));
6926 + WRITE_REG(sc, SAFE_RNG_CTRL,
6927 + READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
6930 +static __inline u_int32_t
6931 +safe_rng_read(struct safe_softc *sc)
6936 + while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
6938 + return READ_REG(sc, SAFE_RNG_OUT);
6942 +safe_read_random(void *arg, u_int32_t *buf, int maxwords)
6944 + struct safe_softc *sc = (struct safe_softc *) arg;
6947 + DPRINTF(("%s()\n", __FUNCTION__));
6949 + safestats.st_rng++;
6951 + * Fetch the next block of data.
6953 + if (maxwords > safe_rngbufsize)
6954 + maxwords = safe_rngbufsize;
6955 + if (maxwords > SAFE_RNG_MAXBUFSIZ)
6956 + maxwords = SAFE_RNG_MAXBUFSIZ;
6958 + /* read as much as we can */
6959 + for (rc = 0; rc < maxwords; rc++) {
6960 + if (READ_REG(sc, SAFE_RNG_STAT) != 0)
6962 + buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
6967 + * Check the comparator alarm count and reset the h/w if
6968 + * it exceeds our threshold. This guards against the
6969 + * hardware oscillators resonating with external signals.
6971 + if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
6972 + u_int32_t freq_inc, w;
6974 + DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
6975 + (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
6976 + safestats.st_rngalarm++;
6977 + safe_rng_enable_short_cycle(sc);
6979 + for (i = 0; i < 64; i++) {
6980 + w = READ_REG(sc, SAFE_RNG_CNFG);
6981 + freq_inc = ((w + freq_inc) & 0x3fL);
6982 + w = ((w & ~0x3fL) | freq_inc);
6983 + WRITE_REG(sc, SAFE_RNG_CNFG, w);
6985 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
6987 + (void) safe_rng_read(sc);
6990 + if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
6991 + safe_rng_disable_short_cycle(sc);
6996 + safe_rng_disable_short_cycle(sc);
6998 + WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
7002 +#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
7006 + * Resets the board. Values in the regesters are left as is
7007 + * from the reset (i.e. initial values are assigned elsewhere).
7010 +safe_reset_board(struct safe_softc *sc)
7014 + * Reset the device. The manual says no delay
7015 + * is needed between marking and clearing reset.
7017 + DPRINTF(("%s()\n", __FUNCTION__));
7019 + v = READ_REG(sc, SAFE_PE_DMACFG) &~
7020 + (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
7021 + SAFE_PE_DMACFG_SGRESET);
7022 + WRITE_REG(sc, SAFE_PE_DMACFG, v
7023 + | SAFE_PE_DMACFG_PERESET
7024 + | SAFE_PE_DMACFG_PDRRESET
7025 + | SAFE_PE_DMACFG_SGRESET);
7026 + WRITE_REG(sc, SAFE_PE_DMACFG, v);
7030 + * Initialize registers we need to touch only once.
7033 +safe_init_board(struct safe_softc *sc)
7035 + u_int32_t v, dwords;
7037 + DPRINTF(("%s()\n", __FUNCTION__));
7039 + v = READ_REG(sc, SAFE_PE_DMACFG);
7040 + v &=~ ( SAFE_PE_DMACFG_PEMODE
7041 + | SAFE_PE_DMACFG_FSENA /* failsafe enable */
7042 + | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
7043 + | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
7044 + | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
7045 + | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
7046 + | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
7047 + | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
7049 + v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
7050 + | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
7051 + | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
7052 + | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
7053 + | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
7054 + | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
7056 + | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
7059 + WRITE_REG(sc, SAFE_PE_DMACFG, v);
7061 +#ifdef __BIG_ENDIAN
7062 + /* tell the safenet that we are 4321 and not 1234 */
7063 + WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
7066 + if (sc->sc_chiprev == SAFE_REV(1,0)) {
7068 + * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
7069 + * "target mode transfers" done while the chip is DMA'ing
7070 + * >1020 bytes cause the hardware to lockup. To avoid this
7071 + * we reduce the max PCI transfer size and use small source
7072 + * particle descriptors (<= 256 bytes).
7074 + WRITE_REG(sc, SAFE_DMA_CFG, 256);
7075 + device_printf(sc->sc_dev,
7076 + "Reduce max DMA size to %u words for rev %u.%u WAR\n",
7077 + (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
7078 + (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
7079 + (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
7080 + sc->sc_max_dsize = 256;
7082 + sc->sc_max_dsize = SAFE_MAX_DSIZE;
7085 + /* NB: operands+results are overlaid */
7086 + WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
7087 + WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
7089 + * Configure ring entry size and number of items in the ring.
7091 + KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
7092 + ("PE ring entry not 32-bit aligned!"));
7093 + dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
7094 + WRITE_REG(sc, SAFE_PE_RINGCFG,
7095 + (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
7096 + WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
7098 + WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
7099 + WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
7100 + WRITE_REG(sc, SAFE_PE_PARTSIZE,
7101 + (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
7103 + * NB: destination particles are fixed size. We use
7104 + * an mbuf cluster and require all results go to
7105 + * clusters or smaller.
7107 + WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
7109 + /* it's now safe to enable PE mode, do it */
7110 + WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
7113 + * Configure hardware to use level-triggered interrupts and
7114 + * to interrupt after each descriptor is processed.
7116 + WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
7117 + WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
7118 + WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
7119 + WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
7124 + * Clean up after a chip crash.
7125 + * It is assumed that the caller in splimp()
7128 +safe_cleanchip(struct safe_softc *sc)
7130 + DPRINTF(("%s()\n", __FUNCTION__));
7132 + if (sc->sc_nqchip != 0) {
7133 + struct safe_ringentry *re = sc->sc_back;
7135 + while (re != sc->sc_front) {
7136 + if (re->re_desc.d_csr != 0)
7137 + safe_free_entry(sc, re);
7138 + if (++re == sc->sc_ringtop)
7142 + sc->sc_nqchip = 0;
7148 + * It is assumed that the caller is within splimp().
7151 +safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
7153 + struct cryptop *crp;
7155 + DPRINTF(("%s()\n", __FUNCTION__));
7160 + if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
7162 + m_freem(re->re_dst_m);
7164 + printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
7167 + crp = (struct cryptop *)re->re_crp;
7169 + re->re_desc.d_csr = 0;
7171 + crp->crp_etype = EFAULT;
7177 + * Routine to reset the chip and clean up.
7178 + * It is assumed that the caller is in splimp()
7181 +safe_totalreset(struct safe_softc *sc)
7183 + DPRINTF(("%s()\n", __FUNCTION__));
7185 + safe_reset_board(sc);
7186 + safe_init_board(sc);
7187 + safe_cleanchip(sc);
7191 + * Is the operand suitable aligned for direct DMA. Each
7192 + * segment must be aligned on a 32-bit boundary and all
7193 + * but the last segment must be a multiple of 4 bytes.
7196 +safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
7200 + DPRINTF(("%s()\n", __FUNCTION__));
7202 + for (i = 0; i < op->nsegs; i++) {
7203 + if (op->segs[i].ds_addr & 3)
7205 + if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
7212 + * Is the operand suitable for direct DMA as the destination
7213 + * of an operation. The hardware requires that each ``particle''
7214 + * but the last in an operation result have the same size. We
7215 + * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
7216 + * 0 if some segment is not a multiple of of this size, 1 if all
7217 + * segments are exactly this size, or 2 if segments are at worst
7218 + * a multple of this size.
7221 +safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
7225 + DPRINTF(("%s()\n", __FUNCTION__));
7227 + if (op->nsegs > 0) {
7230 + for (i = 0; i < op->nsegs-1; i++) {
7231 + if (op->segs[i].ds_len % sc->sc_max_dsize)
7233 + if (op->segs[i].ds_len != sc->sc_max_dsize)
7241 +safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
7243 + struct safe_softc *sc = device_get_softc(dev);
7244 + struct safe_pkq *q;
7245 + unsigned long flags;
7247 + DPRINTF(("%s()\n", __FUNCTION__));
7250 + krp->krp_status = EINVAL;
7254 + if (krp->krp_op != CRK_MOD_EXP) {
7255 + krp->krp_status = EOPNOTSUPP;
7259 + q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
7261 + krp->krp_status = ENOMEM;
7264 + memset(q, 0, sizeof(*q));
7266 + INIT_LIST_HEAD(&q->pkq_list);
7268 + spin_lock_irqsave(&sc->sc_pkmtx, flags);
7269 + list_add_tail(&q->pkq_list, &sc->sc_pkq);
7271 + spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
7275 + crypto_kdone(krp);
7279 +#define SAFE_CRK_PARAM_BASE 0
7280 +#define SAFE_CRK_PARAM_EXP 1
7281 +#define SAFE_CRK_PARAM_MOD 2
7284 +safe_kstart(struct safe_softc *sc)
7286 + struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
7287 + int exp_bits, mod_bits, base_bits;
7288 + u_int32_t op, a_off, b_off, c_off, d_off;
7290 + DPRINTF(("%s()\n", __FUNCTION__));
7292 + if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
7293 + krp->krp_status = EINVAL;
7297 + base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
7298 + if (base_bits > 2048)
7300 + if (base_bits <= 0) /* 5. base not zero */
7303 + exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
7304 + if (exp_bits > 2048)
7306 + if (exp_bits <= 0) /* 1. exponent word length > 0 */
7307 + goto too_small; /* 4. exponent not zero */
7309 + mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
7310 + if (mod_bits > 2048)
7312 + if (mod_bits <= 32) /* 2. modulus word length > 1 */
7313 + goto too_small; /* 8. MSW of modulus != zero */
7314 + if (mod_bits < exp_bits) /* 3 modulus len >= exponent len */
7316 + if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
7317 + goto bad_domain; /* 6. modulus is odd */
7318 + if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
7319 + goto too_small; /* make sure result will fit */
7321 + /* 7. modulus > base */
7322 + if (mod_bits < base_bits)
7324 + if (mod_bits == base_bits) {
7325 + u_int8_t *basep, *modp;
7328 + basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
7329 + ((base_bits + 7) / 8) - 1;
7330 + modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
7331 + ((mod_bits + 7) / 8) - 1;
7333 + for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
7334 + if (*modp < *basep)
7336 + if (*modp > *basep)
7341 + /* And on the 9th step, he rested. */
7343 + WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
7344 + WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
7345 + if (mod_bits > 1024) {
7346 + op = SAFE_PK_FUNC_EXP4;
7352 + op = SAFE_PK_FUNC_EXP16;
7358 + sc->sc_pk_reslen = b_off - a_off;
7359 + sc->sc_pk_resoff = d_off;
7361 + /* A is exponent, B is modulus, C is base, D is result */
7362 + safe_kload_reg(sc, a_off, b_off - a_off,
7363 + &krp->krp_param[SAFE_CRK_PARAM_EXP]);
7364 + WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
7365 + safe_kload_reg(sc, b_off, b_off - a_off,
7366 + &krp->krp_param[SAFE_CRK_PARAM_MOD]);
7367 + WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
7368 + safe_kload_reg(sc, c_off, b_off - a_off,
7369 + &krp->krp_param[SAFE_CRK_PARAM_BASE]);
7370 + WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
7371 + WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
7373 + WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
7378 + krp->krp_status = E2BIG;
7381 + krp->krp_status = ERANGE;
7384 + krp->krp_status = EDOM;
7389 +safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
7391 + u_int plen = (cr->crp_nbits + 7) / 8;
7392 + int i, sig = plen * 8;
7393 + u_int8_t c, *p = cr->crp_p;
7395 + DPRINTF(("%s()\n", __FUNCTION__));
7397 + for (i = plen - 1; i >= 0; i--) {
7400 + while ((c & 0x80) == 0) {
7412 +safe_kfeed(struct safe_softc *sc)
7414 + struct safe_pkq *q, *tmp;
7416 + DPRINTF(("%s()\n", __FUNCTION__));
7418 + if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
7420 + if (sc->sc_pkq_cur != NULL)
7422 + list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
7423 + sc->sc_pkq_cur = q;
7424 + list_del(&q->pkq_list);
7425 + if (safe_kstart(sc) != 0) {
7426 + crypto_kdone(q->pkq_krp);
7428 + sc->sc_pkq_cur = NULL;
7430 + /* op started, start polling */
7431 + mod_timer(&sc->sc_pkto, jiffies + 1);
7438 +safe_kpoll(unsigned long arg)
7440 + struct safe_softc *sc = NULL;
7441 + struct safe_pkq *q;
7442 + struct crparam *res;
7444 + u_int32_t buf[64];
7445 + unsigned long flags;
7447 + DPRINTF(("%s()\n", __FUNCTION__));
7449 + if (arg >= SAFE_MAX_CHIPS)
7451 + sc = safe_chip_idx[arg];
7453 + DPRINTF(("%s() - bad callback\n", __FUNCTION__));
7457 + spin_lock_irqsave(&sc->sc_pkmtx, flags);
7458 + if (sc->sc_pkq_cur == NULL)
7460 + if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
7461 + /* still running, check back later */
7462 + mod_timer(&sc->sc_pkto, jiffies + 1);
7466 + q = sc->sc_pkq_cur;
7467 + res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
7468 + bzero(buf, sizeof(buf));
7469 + bzero(res->crp_p, (res->crp_nbits + 7) / 8);
7470 + for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
7471 + buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
7472 + sc->sc_pk_resoff + (i << 2)));
7473 + bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
7475 + * reduce the bits that need copying if possible
7477 + res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
7478 + res->crp_nbits = safe_ksigbits(sc, res);
7480 + for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
7481 + WRITE_REG(sc, i, 0);
7483 + crypto_kdone(q->pkq_krp);
7485 + sc->sc_pkq_cur = NULL;
7489 + spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
7493 +safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
7494 + struct crparam *n)
7496 + u_int32_t buf[64], i;
7498 + DPRINTF(("%s()\n", __FUNCTION__));
7500 + bzero(buf, sizeof(buf));
7501 + bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
7503 + for (i = 0; i < len >> 2; i++)
7504 + WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
7505 + cpu_to_le32(buf[i]));
7510 +safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
7512 + printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
7514 + , READ_REG(sc, SAFE_DMA_ENDIAN)
7515 + , READ_REG(sc, SAFE_DMA_SRCADDR)
7516 + , READ_REG(sc, SAFE_DMA_DSTADDR)
7517 + , READ_REG(sc, SAFE_DMA_STAT)
7522 +safe_dump_intrstate(struct safe_softc *sc, const char *tag)
7524 + printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
7526 + , READ_REG(sc, SAFE_HI_CFG)
7527 + , READ_REG(sc, SAFE_HI_MASK)
7528 + , READ_REG(sc, SAFE_HI_DESC_CNT)
7529 + , READ_REG(sc, SAFE_HU_STAT)
7530 + , READ_REG(sc, SAFE_HM_STAT)
7535 +safe_dump_ringstate(struct safe_softc *sc, const char *tag)
7537 + u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
7539 + /* NB: assume caller has lock on ring */
7540 + printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
7542 + estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
7543 + (unsigned long)(sc->sc_back - sc->sc_ring),
7544 + (unsigned long)(sc->sc_front - sc->sc_ring));
7548 +safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
7552 + ix = re - sc->sc_ring;
7553 + printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
7556 + , re->re_desc.d_csr
7557 + , re->re_desc.d_src
7558 + , re->re_desc.d_dst
7559 + , re->re_desc.d_sa
7560 + , re->re_desc.d_len
7562 + if (re->re_src.nsegs > 1) {
7563 + ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
7564 + sizeof(struct safe_pdesc);
7565 + for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
7566 + printf(" spd[%u] %p: %p size %u flags %x"
7567 + , ix, &sc->sc_spring[ix]
7568 + , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
7569 + , sc->sc_spring[ix].pd_size
7570 + , sc->sc_spring[ix].pd_flags
7572 + if (sc->sc_spring[ix].pd_size == 0)
7573 + printf(" (zero!)");
7575 + if (++ix == SAFE_TOTAL_SPART)
7579 + if (re->re_dst.nsegs > 1) {
7580 + ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
7581 + sizeof(struct safe_pdesc);
7582 + for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
7583 + printf(" dpd[%u] %p: %p flags %x\n"
7584 + , ix, &sc->sc_dpring[ix]
7585 + , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
7586 + , sc->sc_dpring[ix].pd_flags
7588 + if (++ix == SAFE_TOTAL_DPART)
7592 + printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
7593 + re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
7594 + printf("sa: key %x %x %x %x %x %x %x %x\n"
7595 + , re->re_sa.sa_key[0]
7596 + , re->re_sa.sa_key[1]
7597 + , re->re_sa.sa_key[2]
7598 + , re->re_sa.sa_key[3]
7599 + , re->re_sa.sa_key[4]
7600 + , re->re_sa.sa_key[5]
7601 + , re->re_sa.sa_key[6]
7602 + , re->re_sa.sa_key[7]
7604 + printf("sa: indigest %x %x %x %x %x\n"
7605 + , re->re_sa.sa_indigest[0]
7606 + , re->re_sa.sa_indigest[1]
7607 + , re->re_sa.sa_indigest[2]
7608 + , re->re_sa.sa_indigest[3]
7609 + , re->re_sa.sa_indigest[4]
7611 + printf("sa: outdigest %x %x %x %x %x\n"
7612 + , re->re_sa.sa_outdigest[0]
7613 + , re->re_sa.sa_outdigest[1]
7614 + , re->re_sa.sa_outdigest[2]
7615 + , re->re_sa.sa_outdigest[3]
7616 + , re->re_sa.sa_outdigest[4]
7618 + printf("sr: iv %x %x %x %x\n"
7619 + , re->re_sastate.sa_saved_iv[0]
7620 + , re->re_sastate.sa_saved_iv[1]
7621 + , re->re_sastate.sa_saved_iv[2]
7622 + , re->re_sastate.sa_saved_iv[3]
7624 + printf("sr: hashbc %u indigest %x %x %x %x %x\n"
7625 + , re->re_sastate.sa_saved_hashbc
7626 + , re->re_sastate.sa_saved_indigest[0]
7627 + , re->re_sastate.sa_saved_indigest[1]
7628 + , re->re_sastate.sa_saved_indigest[2]
7629 + , re->re_sastate.sa_saved_indigest[3]
7630 + , re->re_sastate.sa_saved_indigest[4]
7635 +safe_dump_ring(struct safe_softc *sc, const char *tag)
7637 + unsigned long flags;
7639 + spin_lock_irqsave(&sc->sc_ringmtx, flags);
7640 + printf("\nSafeNet Ring State:\n");
7641 + safe_dump_intrstate(sc, tag);
7642 + safe_dump_dmastatus(sc, tag);
7643 + safe_dump_ringstate(sc, tag);
7644 + if (sc->sc_nqchip) {
7645 + struct safe_ringentry *re = sc->sc_back;
7647 + safe_dump_request(sc, tag, re);
7648 + if (++re == sc->sc_ringtop)
7650 + } while (re != sc->sc_front);
7652 + spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
7654 +#endif /* SAFE_DEBUG */
7657 +static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
7659 + struct safe_softc *sc = NULL;
7660 + u32 mem_start, mem_len, cmd;
7661 + int i, rc, devinfo;
7663 + static int num_chips = 0;
7665 + DPRINTF(("%s()\n", __FUNCTION__));
7667 + if (pci_enable_device(dev) < 0)
7671 + printk("safe: found device with no IRQ assigned. check BIOS settings!");
7672 + pci_disable_device(dev);
7676 + if (pci_set_mwi(dev)) {
7677 + printk("safe: pci_set_mwi failed!");
7681 + sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
7684 + memset(sc, 0, sizeof(*sc));
7686 + softc_device_init(sc, "safe", num_chips, safe_methods);
7690 + sc->sc_pcidev = dev;
7691 + if (num_chips < SAFE_MAX_CHIPS) {
7692 + safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
7696 + INIT_LIST_HEAD(&sc->sc_pkq);
7697 + spin_lock_init(&sc->sc_pkmtx);
7699 + pci_set_drvdata(sc->sc_pcidev, sc);
7701 + /* we read its hardware registers as memory */
7702 + mem_start = pci_resource_start(sc->sc_pcidev, 0);
7703 + mem_len = pci_resource_len(sc->sc_pcidev, 0);
7705 + sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
7706 + if (!sc->sc_base_addr) {
7707 + device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
7708 + mem_start, mem_start + mem_len - 1);
7712 + /* fix up the bus size */
7713 + if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
7714 + device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
7717 + if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
7718 + device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
7722 + pci_set_master(sc->sc_pcidev);
7724 + pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
7726 + if (!(cmd & PCI_COMMAND_MEMORY)) {
7727 + device_printf(sc->sc_dev, "failed to enable memory mapping\n");
7731 + if (!(cmd & PCI_COMMAND_MASTER)) {
7732 + device_printf(sc->sc_dev, "failed to enable bus mastering\n");
7736 + rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
7738 + device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
7741 + sc->sc_irq = dev->irq;
7743 + sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
7744 + (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
7747 + * Allocate packet engine descriptors.
7749 + sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7750 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7751 + &sc->sc_ringalloc.dma_paddr);
7752 + if (!sc->sc_ringalloc.dma_vaddr) {
7753 + device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
7758 + * Hookup the static portion of all our data structures.
7760 + sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
7761 + sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
7762 + sc->sc_front = sc->sc_ring;
7763 + sc->sc_back = sc->sc_ring;
7764 + raddr = sc->sc_ringalloc.dma_paddr;
7765 + bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
7766 + for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
7767 + struct safe_ringentry *re = &sc->sc_ring[i];
7769 + re->re_desc.d_sa = raddr +
7770 + offsetof(struct safe_ringentry, re_sa);
7771 + re->re_sa.sa_staterec = raddr +
7772 + offsetof(struct safe_ringentry, re_sastate);
7774 + raddr += sizeof (struct safe_ringentry);
7776 + spin_lock_init(&sc->sc_ringmtx);
7779 + * Allocate scatter and gather particle descriptors.
7781 + sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7782 + SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
7783 + &sc->sc_spalloc.dma_paddr);
7784 + if (!sc->sc_spalloc.dma_vaddr) {
7785 + device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
7788 + sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
7789 + sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
7790 + sc->sc_spfree = sc->sc_spring;
7791 + bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
7793 + sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
7794 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7795 + &sc->sc_dpalloc.dma_paddr);
7796 + if (!sc->sc_dpalloc.dma_vaddr) {
7797 + device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
7800 + sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
7801 + sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
7802 + sc->sc_dpfree = sc->sc_dpring;
7803 + bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
7805 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
7806 + if (sc->sc_cid < 0) {
7807 + device_printf(sc->sc_dev, "could not get crypto driver id\n");
7811 + printf("%s:", device_get_nameunit(sc->sc_dev));
7813 + devinfo = READ_REG(sc, SAFE_DEVINFO);
7814 + if (devinfo & SAFE_DEVINFO_RNG) {
7815 + sc->sc_flags |= SAFE_FLAGS_RNG;
7818 + if (devinfo & SAFE_DEVINFO_PKEY) {
7820 + sc->sc_flags |= SAFE_FLAGS_KEY;
7821 + crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
7823 + crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
7825 + init_timer(&sc->sc_pkto);
7826 + sc->sc_pkto.function = safe_kpoll;
7827 + sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
7829 + if (devinfo & SAFE_DEVINFO_DES) {
7830 + printf(" des/3des");
7831 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
7832 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
7834 + if (devinfo & SAFE_DEVINFO_AES) {
7836 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
7838 + if (devinfo & SAFE_DEVINFO_MD5) {
7840 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
7842 + if (devinfo & SAFE_DEVINFO_SHA1) {
7844 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
7847 + crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
7848 + crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
7849 + /* XXX other supported algorithms */
7852 + safe_reset_board(sc); /* reset h/w */
7853 + safe_init_board(sc); /* init h/w */
7855 +#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
7856 + if (sc->sc_flags & SAFE_FLAGS_RNG) {
7857 + safe_rng_init(sc);
7858 + crypto_rregister(sc->sc_cid, safe_read_random, sc);
7860 +#endif /* SAFE_NO_RNG */
7865 + if (sc->sc_cid >= 0)
7866 + crypto_unregister_all(sc->sc_cid);
7867 + if (sc->sc_irq != -1)
7868 + free_irq(sc->sc_irq, sc);
7869 + if (sc->sc_ringalloc.dma_vaddr)
7870 + pci_free_consistent(sc->sc_pcidev,
7871 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7872 + sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
7873 + if (sc->sc_spalloc.dma_vaddr)
7874 + pci_free_consistent(sc->sc_pcidev,
7875 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7876 + sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
7877 + if (sc->sc_dpalloc.dma_vaddr)
7878 + pci_free_consistent(sc->sc_pcidev,
7879 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7880 + sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
7885 +static void safe_remove(struct pci_dev *dev)
7887 + struct safe_softc *sc = pci_get_drvdata(dev);
7889 + DPRINTF(("%s()\n", __FUNCTION__));
7891 + /* XXX wait/abort active ops */
7893 + WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
7895 + del_timer_sync(&sc->sc_pkto);
7897 + crypto_unregister_all(sc->sc_cid);
7899 + safe_cleanchip(sc);
7901 + if (sc->sc_irq != -1)
7902 + free_irq(sc->sc_irq, sc);
7903 + if (sc->sc_ringalloc.dma_vaddr)
7904 + pci_free_consistent(sc->sc_pcidev,
7905 + SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
7906 + sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
7907 + if (sc->sc_spalloc.dma_vaddr)
7908 + pci_free_consistent(sc->sc_pcidev,
7909 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7910 + sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
7911 + if (sc->sc_dpalloc.dma_vaddr)
7912 + pci_free_consistent(sc->sc_pcidev,
7913 + SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
7914 + sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
7916 + sc->sc_ringalloc.dma_vaddr = NULL;
7917 + sc->sc_spalloc.dma_vaddr = NULL;
7918 + sc->sc_dpalloc.dma_vaddr = NULL;
7921 +static struct pci_device_id safe_pci_tbl[] = {
7922 + { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
7923 + PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
7926 +MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
7928 +static struct pci_driver safe_driver = {
7930 + .id_table = safe_pci_tbl,
7931 + .probe = safe_probe,
7932 + .remove = safe_remove,
7933 + /* add PM stuff here one day */
7936 +static int __init safe_init (void)
7938 + struct safe_softc *sc = NULL;
7941 + DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
7943 + rc = pci_register_driver(&safe_driver);
7944 + pci_register_driver_compat(&safe_driver, rc);
7949 +static void __exit safe_exit (void)
7951 + pci_unregister_driver(&safe_driver);
7954 +module_init(safe_init);
7955 +module_exit(safe_exit);
7957 +MODULE_LICENSE("BSD");
7958 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
7959 +MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
7960 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
7961 +++ linux/crypto/ocf/safe/sha1.c 2005-05-20 10:30:53.000000000 +1000
7963 +/* $KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $ */
7965 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
7966 + * All rights reserved.
7968 + * Redistribution and use in source and binary forms, with or without
7969 + * modification, are permitted provided that the following conditions
7971 + * 1. Redistributions of source code must retain the above copyright
7972 + * notice, this list of conditions and the following disclaimer.
7973 + * 2. Redistributions in binary form must reproduce the above copyright
7974 + * notice, this list of conditions and the following disclaimer in the
7975 + * documentation and/or other materials provided with the distribution.
7976 + * 3. Neither the name of the project nor the names of its contributors
7977 + * may be used to endorse or promote products derived from this software
7978 + * without specific prior written permission.
7980 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
7981 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
7982 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
7983 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
7984 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
7985 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
7986 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
7987 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
7988 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
7989 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
7994 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
7995 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
7996 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
8000 +#include <sys/cdefs.h>
8001 +__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
8003 +#include <sys/types.h>
8004 +#include <sys/cdefs.h>
8005 +#include <sys/time.h>
8006 +#include <sys/systm.h>
8008 +#include <crypto/sha1.h>
8012 +#if BYTE_ORDER != BIG_ENDIAN
8013 +# if BYTE_ORDER != LITTLE_ENDIAN
8014 +# define unsupported 1
8018 +#ifndef unsupported
8020 +/* constant table */
8021 +static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
8022 +#define K(t) _K[(t) / 20]
8024 +#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d)))
8025 +#define F1(b, c, d) (((b) ^ (c)) ^ (d))
8026 +#define F2(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
8027 +#define F3(b, c, d) (((b) ^ (c)) ^ (d))
8029 +#define S(n, x) (((x) << (n)) | ((x) >> (32 - n)))
8032 +#define H(n) (ctxt->h.b32[(n)])
8033 +#define COUNT (ctxt->count)
8034 +#define BCOUNT (ctxt->c.b64[0] / 8)
8035 +#define W(n) (ctxt->m.b32[(n)])
8037 +#define PUTBYTE(x) { \
8038 + ctxt->m.b8[(COUNT % 64)] = (x); \
8041 + ctxt->c.b64[0] += 8; \
8042 + if (COUNT % 64 == 0) \
8043 + sha1_step(ctxt); \
8046 +#define PUTPAD(x) { \
8047 + ctxt->m.b8[(COUNT % 64)] = (x); \
8050 + if (COUNT % 64 == 0) \
8051 + sha1_step(ctxt); \
8054 +static void sha1_step(struct sha1_ctxt *);
8058 + struct sha1_ctxt *ctxt;
8060 + u_int32_t a, b, c, d, e;
8064 +#if BYTE_ORDER == LITTLE_ENDIAN
8065 + struct sha1_ctxt tctxt;
8066 + bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
8067 + ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
8068 + ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
8069 + ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
8070 + ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
8071 + ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
8072 + ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
8073 + ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
8074 + ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
8075 + ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
8076 + ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
8077 + ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
8078 + ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
8079 + ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
8080 + ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
8081 + ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
8082 + ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
8083 + ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
8084 + ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
8085 + ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
8086 + ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
8087 + ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
8088 + ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
8089 + ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
8090 + ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
8091 + ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
8092 + ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
8093 + ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
8094 + ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
8095 + ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
8096 + ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
8097 + ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
8098 + ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
8101 + a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
8103 + for (t = 0; t < 20; t++) {
8106 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8108 + tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
8109 + e = d; d = c; c = S(30, b); b = a; a = tmp;
8111 + for (t = 20; t < 40; t++) {
8113 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8114 + tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
8115 + e = d; d = c; c = S(30, b); b = a; a = tmp;
8117 + for (t = 40; t < 60; t++) {
8119 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8120 + tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
8121 + e = d; d = c; c = S(30, b); b = a; a = tmp;
8123 + for (t = 60; t < 80; t++) {
8125 + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
8126 + tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
8127 + e = d; d = c; c = S(30, b); b = a; a = tmp;
8136 + bzero(&ctxt->m.b8[0], 64);
8139 +/*------------------------------------------------------------*/
8143 + struct sha1_ctxt *ctxt;
8145 + bzero(ctxt, sizeof(struct sha1_ctxt));
8146 + H(0) = 0x67452301;
8147 + H(1) = 0xefcdab89;
8148 + H(2) = 0x98badcfe;
8149 + H(3) = 0x10325476;
8150 + H(4) = 0xc3d2e1f0;
8155 + struct sha1_ctxt *ctxt;
8157 + size_t padlen; /*pad length in bytes*/
8162 + padstart = COUNT % 64;
8163 + padlen = 64 - padstart;
8165 + bzero(&ctxt->m.b8[padstart], padlen);
8169 + padstart = COUNT % 64; /* should be 0 */
8170 + padlen = 64 - padstart; /* should be 64 */
8172 + bzero(&ctxt->m.b8[padstart], padlen - 8);
8173 + COUNT += (padlen - 8);
8175 +#if BYTE_ORDER == BIG_ENDIAN
8176 + PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
8177 + PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
8178 + PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
8179 + PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
8181 + PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
8182 + PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
8183 + PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
8184 + PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
8189 +sha1_loop(ctxt, input, len)
8190 + struct sha1_ctxt *ctxt;
8191 + const u_int8_t *input;
8201 + while (off < len) {
8202 + gapstart = COUNT % 64;
8203 + gaplen = 64 - gapstart;
8205 + copysiz = (gaplen < len - off) ? gaplen : len - off;
8206 + bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
8209 + ctxt->c.b64[0] += copysiz * 8;
8210 + if (COUNT % 64 == 0)
8217 +sha1_result(ctxt, digest0)
8218 + struct sha1_ctxt *ctxt;
8223 + digest = (u_int8_t *)digest0;
8225 +#if BYTE_ORDER == BIG_ENDIAN
8226 + bcopy(&ctxt->h.b8[0], digest, 20);
8228 + digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
8229 + digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
8230 + digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
8231 + digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
8232 + digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
8233 + digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
8234 + digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
8235 + digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
8236 + digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
8237 + digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
8241 +#endif /*unsupported*/
8242 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
8243 +++ linux/crypto/ocf/safe/sha1.h 2005-05-20 10:30:53.000000000 +1000
8245 +/* $FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $ */
8246 +/* $KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $ */
8249 + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
8250 + * All rights reserved.
8252 + * Redistribution and use in source and binary forms, with or without
8253 + * modification, are permitted provided that the following conditions
8255 + * 1. Redistributions of source code must retain the above copyright
8256 + * notice, this list of conditions and the following disclaimer.
8257 + * 2. Redistributions in binary form must reproduce the above copyright
8258 + * notice, this list of conditions and the following disclaimer in the
8259 + * documentation and/or other materials provided with the distribution.
8260 + * 3. Neither the name of the project nor the names of its contributors
8261 + * may be used to endorse or promote products derived from this software
8262 + * without specific prior written permission.
8264 + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
8265 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8266 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8267 + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
8268 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8269 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8270 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8271 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8272 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8273 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8277 + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
8278 + * based on: http://csrc.nist.gov/fips/fip180-1.txt
8279 + * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
8282 +#ifndef _NETINET6_SHA1_H_
8283 +#define _NETINET6_SHA1_H_
8296 + u_int32_t b32[16];
8302 +extern void sha1_init(struct sha1_ctxt *);
8303 +extern void sha1_pad(struct sha1_ctxt *);
8304 +extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
8305 +extern void sha1_result(struct sha1_ctxt *, caddr_t);
8307 +/* compatibilty with other SHA1 source codes */
8308 +typedef struct sha1_ctxt SHA1_CTX;
8309 +#define SHA1Init(x) sha1_init((x))
8310 +#define SHA1Update(x, y, z) sha1_loop((x), (y), (z))
8311 +#define SHA1Final(x, y) sha1_result((y), (x))
8312 +#endif /* __KERNEL__ */
8314 +#define SHA1_RESULTLEN (160/8)
8316 +#endif /*_NETINET6_SHA1_H_*/
8317 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
8318 +++ linux/crypto/ocf/safe/safereg.h 2005-03-16 15:19:57.000000000 +1000
8321 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
8322 + * Copyright (c) 2003 Global Technology Associates, Inc.
8323 + * All rights reserved.
8325 + * Redistribution and use in source and binary forms, with or without
8326 + * modification, are permitted provided that the following conditions
8328 + * 1. Redistributions of source code must retain the above copyright
8329 + * notice, this list of conditions and the following disclaimer.
8330 + * 2. Redistributions in binary form must reproduce the above copyright
8331 + * notice, this list of conditions and the following disclaimer in the
8332 + * documentation and/or other materials provided with the distribution.
8334 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
8335 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8336 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8337 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8338 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8339 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8340 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8341 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8342 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8343 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8346 + * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
8348 +#ifndef _SAFE_SAFEREG_H_
8349 +#define _SAFE_SAFEREG_H_
8352 + * Register definitions for SafeNet SafeXcel-1141 crypto device.
8353 + * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
8356 +#define BS_BAR 0x10 /* DMA base address register */
8357 +#define BS_TRDY_TIMEOUT 0x40 /* TRDY timeout */
8358 +#define BS_RETRY_TIMEOUT 0x41 /* DMA retry timeout */
8360 +#define PCI_VENDOR_SAFENET 0x16ae /* SafeNet, Inc. */
8363 +#define PCI_PRODUCT_SAFEXCEL 0x1141 /* 1141 */
8365 +#define SAFE_PE_CSR 0x0000 /* Packet Enginge Ctrl/Status */
8366 +#define SAFE_PE_SRC 0x0004 /* Packet Engine Source */
8367 +#define SAFE_PE_DST 0x0008 /* Packet Engine Destination */
8368 +#define SAFE_PE_SA 0x000c /* Packet Engine SA */
8369 +#define SAFE_PE_LEN 0x0010 /* Packet Engine Length */
8370 +#define SAFE_PE_DMACFG 0x0040 /* Packet Engine DMA Configuration */
8371 +#define SAFE_PE_DMASTAT 0x0044 /* Packet Engine DMA Status */
8372 +#define SAFE_PE_PDRBASE 0x0048 /* Packet Engine Descriptor Ring Base */
8373 +#define SAFE_PE_RDRBASE 0x004c /* Packet Engine Result Ring Base */
8374 +#define SAFE_PE_RINGCFG 0x0050 /* Packet Engine Ring Configuration */
8375 +#define SAFE_PE_RINGPOLL 0x0054 /* Packet Engine Ring Poll */
8376 +#define SAFE_PE_IRNGSTAT 0x0058 /* Packet Engine Internal Ring Status */
8377 +#define SAFE_PE_ERNGSTAT 0x005c /* Packet Engine External Ring Status */
8378 +#define SAFE_PE_IOTHRESH 0x0060 /* Packet Engine I/O Threshold */
8379 +#define SAFE_PE_GRNGBASE 0x0064 /* Packet Engine Gather Ring Base */
8380 +#define SAFE_PE_SRNGBASE 0x0068 /* Packet Engine Scatter Ring Base */
8381 +#define SAFE_PE_PARTSIZE 0x006c /* Packet Engine Particlar Ring Size */
8382 +#define SAFE_PE_PARTCFG 0x0070 /* Packet Engine Particle Ring Config */
8383 +#define SAFE_CRYPTO_CTRL 0x0080 /* Crypto Control */
8384 +#define SAFE_DEVID 0x0084 /* Device ID */
8385 +#define SAFE_DEVINFO 0x0088 /* Device Info */
8386 +#define SAFE_HU_STAT 0x00a0 /* Host Unmasked Status */
8387 +#define SAFE_HM_STAT 0x00a4 /* Host Masked Status (read-only) */
8388 +#define SAFE_HI_CLR 0x00a4 /* Host Clear Interrupt (write-only) */
8389 +#define SAFE_HI_MASK 0x00a8 /* Host Mask Control */
8390 +#define SAFE_HI_CFG 0x00ac /* Interrupt Configuration */
8391 +#define SAFE_HI_RD_DESCR 0x00b4 /* Force Descriptor Read */
8392 +#define SAFE_HI_DESC_CNT 0x00b8 /* Host Descriptor Done Count */
8393 +#define SAFE_DMA_ENDIAN 0x00c0 /* Master Endian Status */
8394 +#define SAFE_DMA_SRCADDR 0x00c4 /* DMA Source Address Status */
8395 +#define SAFE_DMA_DSTADDR 0x00c8 /* DMA Destination Address Status */
8396 +#define SAFE_DMA_STAT 0x00cc /* DMA Current Status */
8397 +#define SAFE_DMA_CFG 0x00d4 /* DMA Configuration/Status */
8398 +#define SAFE_ENDIAN 0x00e0 /* Endian Configuration */
8399 +#define SAFE_PK_A_ADDR 0x0800 /* Public Key A Address */
8400 +#define SAFE_PK_B_ADDR 0x0804 /* Public Key B Address */
8401 +#define SAFE_PK_C_ADDR 0x0808 /* Public Key C Address */
8402 +#define SAFE_PK_D_ADDR 0x080c /* Public Key D Address */
8403 +#define SAFE_PK_A_LEN 0x0810 /* Public Key A Length */
8404 +#define SAFE_PK_B_LEN 0x0814 /* Public Key B Length */
8405 +#define SAFE_PK_SHIFT 0x0818 /* Public Key Shift */
8406 +#define SAFE_PK_FUNC 0x081c /* Public Key Function */
8407 +#define SAFE_PK_RAM_START 0x1000 /* Public Key RAM start address */
8408 +#define SAFE_PK_RAM_END 0x1fff /* Public Key RAM end address */
8410 +#define SAFE_RNG_OUT 0x0100 /* RNG Output */
8411 +#define SAFE_RNG_STAT 0x0104 /* RNG Status */
8412 +#define SAFE_RNG_CTRL 0x0108 /* RNG Control */
8413 +#define SAFE_RNG_A 0x010c /* RNG A */
8414 +#define SAFE_RNG_B 0x0110 /* RNG B */
8415 +#define SAFE_RNG_X_LO 0x0114 /* RNG X [31:0] */
8416 +#define SAFE_RNG_X_MID 0x0118 /* RNG X [63:32] */
8417 +#define SAFE_RNG_X_HI 0x011c /* RNG X [80:64] */
8418 +#define SAFE_RNG_X_CNTR 0x0120 /* RNG Counter */
8419 +#define SAFE_RNG_ALM_CNT 0x0124 /* RNG Alarm Count */
8420 +#define SAFE_RNG_CNFG 0x0128 /* RNG Configuration */
8421 +#define SAFE_RNG_LFSR1_LO 0x012c /* RNG LFSR1 [31:0] */
8422 +#define SAFE_RNG_LFSR1_HI 0x0130 /* RNG LFSR1 [47:32] */
8423 +#define SAFE_RNG_LFSR2_LO 0x0134 /* RNG LFSR1 [31:0] */
8424 +#define SAFE_RNG_LFSR2_HI 0x0138 /* RNG LFSR1 [47:32] */
8426 +#define SAFE_PE_CSR_READY 0x00000001 /* ready for processing */
8427 +#define SAFE_PE_CSR_DONE 0x00000002 /* h/w completed processing */
8428 +#define SAFE_PE_CSR_LOADSA 0x00000004 /* load SA digests */
8429 +#define SAFE_PE_CSR_HASHFINAL 0x00000010 /* do hash pad & write result */
8430 +#define SAFE_PE_CSR_SABUSID 0x000000c0 /* bus id for SA */
8431 +#define SAFE_PE_CSR_SAPCI 0x00000040 /* PCI bus id for SA */
8432 +#define SAFE_PE_CSR_NXTHDR 0x0000ff00 /* next hdr value for IPsec */
8433 +#define SAFE_PE_CSR_FPAD 0x0000ff00 /* fixed pad for basic ops */
8434 +#define SAFE_PE_CSR_STATUS 0x00ff0000 /* operation result status */
8435 +#define SAFE_PE_CSR_AUTH_FAIL 0x00010000 /* ICV mismatch (inbound) */
8436 +#define SAFE_PE_CSR_PAD_FAIL 0x00020000 /* pad verify fail (inbound) */
8437 +#define SAFE_PE_CSR_SEQ_FAIL 0x00040000 /* sequence number (inbound) */
8438 +#define SAFE_PE_CSR_XERROR 0x00080000 /* extended error follows */
8439 +#define SAFE_PE_CSR_XECODE 0x00f00000 /* extended error code */
8440 +#define SAFE_PE_CSR_XECODE_S 20
8441 +#define SAFE_PE_CSR_XECODE_BADCMD 0 /* invalid command */
8442 +#define SAFE_PE_CSR_XECODE_BADALG 1 /* invalid algorithm */
8443 +#define SAFE_PE_CSR_XECODE_ALGDIS 2 /* algorithm disabled */
8444 +#define SAFE_PE_CSR_XECODE_ZEROLEN 3 /* zero packet length */
8445 +#define SAFE_PE_CSR_XECODE_DMAERR 4 /* bus DMA error */
8446 +#define SAFE_PE_CSR_XECODE_PIPEABORT 5 /* secondary bus DMA error */
8447 +#define SAFE_PE_CSR_XECODE_BADSPI 6 /* IPsec SPI mismatch */
8448 +#define SAFE_PE_CSR_XECODE_TIMEOUT 10 /* failsafe timeout */
8449 +#define SAFE_PE_CSR_PAD 0xff000000 /* ESP padding control/status */
8450 +#define SAFE_PE_CSR_PAD_MIN 0x00000000 /* minimum IPsec padding */
8451 +#define SAFE_PE_CSR_PAD_16 0x08000000 /* pad to 16-byte boundary */
8452 +#define SAFE_PE_CSR_PAD_32 0x10000000 /* pad to 32-byte boundary */
8453 +#define SAFE_PE_CSR_PAD_64 0x20000000 /* pad to 64-byte boundary */
8454 +#define SAFE_PE_CSR_PAD_128 0x40000000 /* pad to 128-byte boundary */
8455 +#define SAFE_PE_CSR_PAD_256 0x80000000 /* pad to 256-byte boundary */
8458 + * Check the CSR to see if the PE has returned ownership to
8459 + * the host. Note that before processing a descriptor this
8460 + * must be done followed by a check of the SAFE_PE_LEN register
8461 + * status bits to avoid premature processing of a descriptor
8462 + * on its way back to the host.
8464 +#define SAFE_PE_CSR_IS_DONE(_csr) \
8465 + (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
8467 +#define SAFE_PE_LEN_LENGTH 0x000fffff /* total length (bytes) */
8468 +#define SAFE_PE_LEN_READY 0x00400000 /* ready for processing */
8469 +#define SAFE_PE_LEN_DONE 0x00800000 /* h/w completed processing */
8470 +#define SAFE_PE_LEN_BYPASS 0xff000000 /* bypass offset (bytes) */
8471 +#define SAFE_PE_LEN_BYPASS_S 24
8473 +#define SAFE_PE_LEN_IS_DONE(_len) \
8474 + (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
8476 +/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
8477 +#define SAFE_INT_PE_CDONE 0x00000002 /* PE context done */
8478 +#define SAFE_INT_PE_DDONE 0x00000008 /* PE descriptor done */
8479 +#define SAFE_INT_PE_ERROR 0x00000010 /* PE error */
8480 +#define SAFE_INT_PE_ODONE 0x00000020 /* PE operation done */
8482 +#define SAFE_HI_CFG_PULSE 0x00000001 /* use pulse interrupt */
8483 +#define SAFE_HI_CFG_LEVEL 0x00000000 /* use level interrupt */
8484 +#define SAFE_HI_CFG_AUTOCLR 0x00000002 /* auto-clear pulse interrupt */
8486 +#define SAFE_ENDIAN_PASS 0x000000e4 /* straight pass-thru */
8487 +#define SAFE_ENDIAN_SWAB 0x0000001b /* swap bytes in 32-bit word */
8489 +#define SAFE_PE_DMACFG_PERESET 0x00000001 /* reset packet engine */
8490 +#define SAFE_PE_DMACFG_PDRRESET 0x00000002 /* reset PDR counters/ptrs */
8491 +#define SAFE_PE_DMACFG_SGRESET 0x00000004 /* reset scatter/gather cache */
8492 +#define SAFE_PE_DMACFG_FSENA 0x00000008 /* enable failsafe reset */
8493 +#define SAFE_PE_DMACFG_PEMODE 0x00000100 /* packet engine mode */
8494 +#define SAFE_PE_DMACFG_SAPREC 0x00000200 /* SA precedes packet */
8495 +#define SAFE_PE_DMACFG_PKFOLL 0x00000400 /* packet follows descriptor */
8496 +#define SAFE_PE_DMACFG_GPRBID 0x00003000 /* gather particle ring busid */
8497 +#define SAFE_PE_DMACFG_GPRPCI 0x00001000 /* PCI gather particle ring */
8498 +#define SAFE_PE_DMACFG_SPRBID 0x0000c000 /* scatter part. ring busid */
8499 +#define SAFE_PE_DMACFG_SPRPCI 0x00004000 /* PCI scatter part. ring */
8500 +#define SAFE_PE_DMACFG_ESDESC 0x00010000 /* endian swap descriptors */
8501 +#define SAFE_PE_DMACFG_ESSA 0x00020000 /* endian swap SA data */
8502 +#define SAFE_PE_DMACFG_ESPACKET 0x00040000 /* endian swap packet data */
8503 +#define SAFE_PE_DMACFG_ESPDESC 0x00080000 /* endian swap particle desc. */
8504 +#define SAFE_PE_DMACFG_NOPDRUP 0x00100000 /* supp. PDR ownership update */
8505 +#define SAFE_PD_EDMACFG_PCIMODE 0x01000000 /* PCI target mode */
8507 +#define SAFE_PE_DMASTAT_PEIDONE 0x00000001 /* PE core input done */
8508 +#define SAFE_PE_DMASTAT_PEODONE 0x00000002 /* PE core output done */
8509 +#define SAFE_PE_DMASTAT_ENCDONE 0x00000004 /* encryption done */
8510 +#define SAFE_PE_DMASTAT_IHDONE 0x00000008 /* inner hash done */
8511 +#define SAFE_PE_DMASTAT_OHDONE 0x00000010 /* outer hash (HMAC) done */
8512 +#define SAFE_PE_DMASTAT_PADFLT 0x00000020 /* crypto pad fault */
8513 +#define SAFE_PE_DMASTAT_ICVFLT 0x00000040 /* ICV fault */
8514 +#define SAFE_PE_DMASTAT_SPIMIS 0x00000080 /* SPI mismatch */
8515 +#define SAFE_PE_DMASTAT_CRYPTO 0x00000100 /* crypto engine timeout */
8516 +#define SAFE_PE_DMASTAT_CQACT 0x00000200 /* command queue active */
8517 +#define SAFE_PE_DMASTAT_IRACT 0x00000400 /* input request active */
8518 +#define SAFE_PE_DMASTAT_ORACT 0x00000800 /* output request active */
8519 +#define SAFE_PE_DMASTAT_PEISIZE 0x003ff000 /* PE input size:32-bit words */
8520 +#define SAFE_PE_DMASTAT_PEOSIZE 0xffc00000 /* PE out. size:32-bit words */
8522 +#define SAFE_PE_RINGCFG_SIZE 0x000003ff /* ring size (descriptors) */
8523 +#define SAFE_PE_RINGCFG_OFFSET 0xffff0000 /* offset btw desc's (dwords) */
8524 +#define SAFE_PE_RINGCFG_OFFSET_S 16
8526 +#define SAFE_PE_RINGPOLL_POLL 0x00000fff /* polling frequency/divisor */
8527 +#define SAFE_PE_RINGPOLL_RETRY 0x03ff0000 /* polling frequency/divisor */
8528 +#define SAFE_PE_RINGPOLL_CONT 0x80000000 /* continuously poll */
8530 +#define SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001 /* command queue available */
8532 +#define SAFE_PE_ERNGSTAT_NEXT 0x03ff0000 /* index of next packet desc. */
8533 +#define SAFE_PE_ERNGSTAT_NEXT_S 16
8535 +#define SAFE_PE_IOTHRESH_INPUT 0x000003ff /* input threshold (dwords) */
8536 +#define SAFE_PE_IOTHRESH_OUTPUT 0x03ff0000 /* output threshold (dwords) */
8538 +#define SAFE_PE_PARTCFG_SIZE 0x0000ffff /* scatter particle size */
8539 +#define SAFE_PE_PARTCFG_GBURST 0x00030000 /* gather particle burst */
8540 +#define SAFE_PE_PARTCFG_GBURST_2 0x00000000
8541 +#define SAFE_PE_PARTCFG_GBURST_4 0x00010000
8542 +#define SAFE_PE_PARTCFG_GBURST_8 0x00020000
8543 +#define SAFE_PE_PARTCFG_GBURST_16 0x00030000
8544 +#define SAFE_PE_PARTCFG_SBURST 0x000c0000 /* scatter particle burst */
8545 +#define SAFE_PE_PARTCFG_SBURST_2 0x00000000
8546 +#define SAFE_PE_PARTCFG_SBURST_4 0x00040000
8547 +#define SAFE_PE_PARTCFG_SBURST_8 0x00080000
8548 +#define SAFE_PE_PARTCFG_SBURST_16 0x000c0000
8550 +#define SAFE_PE_PARTSIZE_SCAT 0xffff0000 /* scatter particle ring size */
8551 +#define SAFE_PE_PARTSIZE_GATH 0x0000ffff /* gather particle ring size */
8553 +#define SAFE_CRYPTO_CTRL_3DES 0x00000001 /* enable 3DES support */
8554 +#define SAFE_CRYPTO_CTRL_PKEY 0x00010000 /* enable public key support */
8555 +#define SAFE_CRYPTO_CTRL_RNG 0x00020000 /* enable RNG support */
8557 +#define SAFE_DEVINFO_REV_MIN 0x0000000f /* minor rev for chip */
8558 +#define SAFE_DEVINFO_REV_MAJ 0x000000f0 /* major rev for chip */
8559 +#define SAFE_DEVINFO_REV_MAJ_S 4
8560 +#define SAFE_DEVINFO_DES 0x00000100 /* DES/3DES support present */
8561 +#define SAFE_DEVINFO_ARC4 0x00000200 /* ARC4 support present */
8562 +#define SAFE_DEVINFO_AES 0x00000400 /* AES support present */
8563 +#define SAFE_DEVINFO_MD5 0x00001000 /* MD5 support present */
8564 +#define SAFE_DEVINFO_SHA1 0x00002000 /* SHA-1 support present */
8565 +#define SAFE_DEVINFO_RIPEMD 0x00004000 /* RIPEMD support present */
8566 +#define SAFE_DEVINFO_DEFLATE 0x00010000 /* Deflate support present */
8567 +#define SAFE_DEVINFO_SARAM 0x00100000 /* on-chip SA RAM present */
8568 +#define SAFE_DEVINFO_EMIBUS 0x00200000 /* EMI bus present */
8569 +#define SAFE_DEVINFO_PKEY 0x00400000 /* public key support present */
8570 +#define SAFE_DEVINFO_RNG 0x00800000 /* RNG present */
8572 +#define SAFE_REV(_maj, _min) (((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
8573 +#define SAFE_REV_MAJ(_chiprev) \
8574 + (((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
8575 +#define SAFE_REV_MIN(_chiprev) ((_chiprev) & SAFE_DEVINFO_REV_MIN)
8577 +#define SAFE_PK_FUNC_MULT 0x00000001 /* Multiply function */
8578 +#define SAFE_PK_FUNC_SQUARE 0x00000004 /* Square function */
8579 +#define SAFE_PK_FUNC_ADD 0x00000010 /* Add function */
8580 +#define SAFE_PK_FUNC_SUB 0x00000020 /* Subtract function */
8581 +#define SAFE_PK_FUNC_LSHIFT 0x00000040 /* Left-shift function */
8582 +#define SAFE_PK_FUNC_RSHIFT 0x00000080 /* Right-shift function */
8583 +#define SAFE_PK_FUNC_DIV 0x00000100 /* Divide function */
8584 +#define SAFE_PK_FUNC_CMP 0x00000400 /* Compare function */
8585 +#define SAFE_PK_FUNC_COPY 0x00000800 /* Copy function */
8586 +#define SAFE_PK_FUNC_EXP16 0x00002000 /* Exponentiate (4-bit ACT) */
8587 +#define SAFE_PK_FUNC_EXP4 0x00004000 /* Exponentiate (2-bit ACT) */
8588 +#define SAFE_PK_FUNC_RUN 0x00008000 /* start/status */
8590 +#define SAFE_RNG_STAT_BUSY 0x00000001 /* busy, data not valid */
8592 +#define SAFE_RNG_CTRL_PRE_LFSR 0x00000001 /* enable output pre-LFSR */
8593 +#define SAFE_RNG_CTRL_TST_MODE 0x00000002 /* enable test mode */
8594 +#define SAFE_RNG_CTRL_TST_RUN 0x00000004 /* start test state machine */
8595 +#define SAFE_RNG_CTRL_ENA_RING1 0x00000008 /* test entropy oscillator #1 */
8596 +#define SAFE_RNG_CTRL_ENA_RING2 0x00000010 /* test entropy oscillator #2 */
8597 +#define SAFE_RNG_CTRL_DIS_ALARM 0x00000020 /* disable RNG alarm reports */
8598 +#define SAFE_RNG_CTRL_TST_CLOCK 0x00000040 /* enable test clock */
8599 +#define SAFE_RNG_CTRL_SHORTEN 0x00000080 /* shorten state timers */
8600 +#define SAFE_RNG_CTRL_TST_ALARM 0x00000100 /* simulate alarm state */
8601 +#define SAFE_RNG_CTRL_RST_LFSR 0x00000200 /* reset LFSR */
8604 + * Packet engine descriptor. Note that d_csr is a copy of the
8605 + * SAFE_PE_CSR register and all definitions apply, and d_len
8606 + * is a copy of the SAFE_PE_LEN register and all definitions apply.
8607 + * d_src and d_len may point directly to contiguous data or to a
8608 + * list of ``particle descriptors'' when using scatter/gather i/o.
8611 + u_int32_t d_csr; /* per-packet control/status */
8612 + u_int32_t d_src; /* source address */
8613 + u_int32_t d_dst; /* destination address */
8614 + u_int32_t d_sa; /* SA address */
8615 + u_int32_t d_len; /* length, bypass, status */
8619 + * Scatter/Gather particle descriptor.
8621 + * NB: scatter descriptors do not specify a size; this is fixed
8622 + * by the setting of the SAFE_PE_PARTCFG register.
8624 +struct safe_pdesc {
8625 + u_int32_t pd_addr; /* particle address */
8626 +#ifdef __BIG_ENDIAN
8627 + u_int16_t pd_flags; /* control word */
8628 + u_int16_t pd_size; /* particle size (bytes) */
8630 + u_int16_t pd_flags; /* control word */
8631 + u_int16_t pd_size; /* particle size (bytes) */
8635 +#define SAFE_PD_READY 0x0001 /* ready for processing */
8636 +#define SAFE_PD_DONE 0x0002 /* h/w completed processing */
8639 + * Security Association (SA) Record (Rev 1). One of these is
8640 + * required for each operation processed by the packet engine.
8642 +struct safe_sarec {
8643 + u_int32_t sa_cmd0;
8644 + u_int32_t sa_cmd1;
8645 + u_int32_t sa_resv0;
8646 + u_int32_t sa_resv1;
8647 + u_int32_t sa_key[8]; /* DES/3DES/AES key */
8648 + u_int32_t sa_indigest[5]; /* inner digest */
8649 + u_int32_t sa_outdigest[5]; /* outer digest */
8650 + u_int32_t sa_spi; /* SPI */
8651 + u_int32_t sa_seqnum; /* sequence number */
8652 + u_int32_t sa_seqmask[2]; /* sequence number mask */
8653 + u_int32_t sa_resv2;
8654 + u_int32_t sa_staterec; /* address of state record */
8655 + u_int32_t sa_resv3[2];
8656 + u_int32_t sa_samgmt0; /* SA management field 0 */
8657 + u_int32_t sa_samgmt1; /* SA management field 0 */
8660 +#define SAFE_SA_CMD0_OP 0x00000007 /* operation code */
8661 +#define SAFE_SA_CMD0_OP_CRYPT 0x00000000 /* encrypt/decrypt (basic) */
8662 +#define SAFE_SA_CMD0_OP_BOTH 0x00000001 /* encrypt-hash/hash-decrypto */
8663 +#define SAFE_SA_CMD0_OP_HASH 0x00000003 /* hash (outbound-only) */
8664 +#define SAFE_SA_CMD0_OP_ESP 0x00000000 /* ESP in/out (proto) */
8665 +#define SAFE_SA_CMD0_OP_AH 0x00000001 /* AH in/out (proto) */
8666 +#define SAFE_SA_CMD0_INBOUND 0x00000008 /* inbound operation */
8667 +#define SAFE_SA_CMD0_OUTBOUND 0x00000000 /* outbound operation */
8668 +#define SAFE_SA_CMD0_GROUP 0x00000030 /* operation group */
8669 +#define SAFE_SA_CMD0_BASIC 0x00000000 /* basic operation */
8670 +#define SAFE_SA_CMD0_PROTO 0x00000010 /* protocol/packet operation */
8671 +#define SAFE_SA_CMD0_BUNDLE 0x00000020 /* bundled operation (resvd) */
8672 +#define SAFE_SA_CMD0_PAD 0x000000c0 /* crypto pad method */
8673 +#define SAFE_SA_CMD0_PAD_IPSEC 0x00000000 /* IPsec padding */
8674 +#define SAFE_SA_CMD0_PAD_PKCS7 0x00000040 /* PKCS#7 padding */
8675 +#define SAFE_SA_CMD0_PAD_CONS 0x00000080 /* constant padding */
8676 +#define SAFE_SA_CMD0_PAD_ZERO 0x000000c0 /* zero padding */
8677 +#define SAFE_SA_CMD0_CRYPT_ALG 0x00000f00 /* symmetric crypto algorithm */
8678 +#define SAFE_SA_CMD0_DES 0x00000000 /* DES crypto algorithm */
8679 +#define SAFE_SA_CMD0_3DES 0x00000100 /* 3DES crypto algorithm */
8680 +#define SAFE_SA_CMD0_AES 0x00000300 /* AES crypto algorithm */
8681 +#define SAFE_SA_CMD0_CRYPT_NULL 0x00000f00 /* null crypto algorithm */
8682 +#define SAFE_SA_CMD0_HASH_ALG 0x0000f000 /* hash algorithm */
8683 +#define SAFE_SA_CMD0_MD5 0x00000000 /* MD5 hash algorithm */
8684 +#define SAFE_SA_CMD0_SHA1 0x00001000 /* SHA-1 hash algorithm */
8685 +#define SAFE_SA_CMD0_HASH_NULL 0x0000f000 /* null hash algorithm */
8686 +#define SAFE_SA_CMD0_HDR_PROC 0x00080000 /* header processing */
8687 +#define SAFE_SA_CMD0_IBUSID 0x00300000 /* input bus id */
8688 +#define SAFE_SA_CMD0_IPCI 0x00100000 /* PCI input bus id */
8689 +#define SAFE_SA_CMD0_OBUSID 0x00c00000 /* output bus id */
8690 +#define SAFE_SA_CMD0_OPCI 0x00400000 /* PCI output bus id */
8691 +#define SAFE_SA_CMD0_IVLD 0x03000000 /* IV loading */
8692 +#define SAFE_SA_CMD0_IVLD_NONE 0x00000000 /* IV no load (reuse) */
8693 +#define SAFE_SA_CMD0_IVLD_IBUF 0x01000000 /* IV load from input buffer */
8694 +#define SAFE_SA_CMD0_IVLD_STATE 0x02000000 /* IV load from state */
8695 +#define SAFE_SA_CMD0_HSLD 0x0c000000 /* hash state loading */
8696 +#define SAFE_SA_CMD0_HSLD_SA 0x00000000 /* hash state load from SA */
8697 +#define SAFE_SA_CMD0_HSLD_STATE 0x08000000 /* hash state load from state */
8698 +#define SAFE_SA_CMD0_HSLD_NONE 0x0c000000 /* hash state no load */
8699 +#define SAFE_SA_CMD0_SAVEIV 0x10000000 /* save IV */
8700 +#define SAFE_SA_CMD0_SAVEHASH 0x20000000 /* save hash state */
8701 +#define SAFE_SA_CMD0_IGATHER 0x40000000 /* input gather */
8702 +#define SAFE_SA_CMD0_OSCATTER 0x80000000 /* output scatter */
8704 +#define SAFE_SA_CMD1_HDRCOPY 0x00000002 /* copy header to output */
8705 +#define SAFE_SA_CMD1_PAYCOPY 0x00000004 /* copy payload to output */
8706 +#define SAFE_SA_CMD1_PADCOPY 0x00000008 /* copy pad to output */
8707 +#define SAFE_SA_CMD1_IPV4 0x00000000 /* IPv4 protocol */
8708 +#define SAFE_SA_CMD1_IPV6 0x00000010 /* IPv6 protocol */
8709 +#define SAFE_SA_CMD1_MUTABLE 0x00000020 /* mutable bit processing */
8710 +#define SAFE_SA_CMD1_SRBUSID 0x000000c0 /* state record bus id */
8711 +#define SAFE_SA_CMD1_SRPCI 0x00000040 /* state record from PCI */
8712 +#define SAFE_SA_CMD1_CRMODE 0x00000300 /* crypto mode */
8713 +#define SAFE_SA_CMD1_ECB 0x00000000 /* ECB crypto mode */
8714 +#define SAFE_SA_CMD1_CBC 0x00000100 /* CBC crypto mode */
8715 +#define SAFE_SA_CMD1_OFB 0x00000200 /* OFB crypto mode */
8716 +#define SAFE_SA_CMD1_CFB 0x00000300 /* CFB crypto mode */
8717 +#define SAFE_SA_CMD1_CRFEEDBACK 0x00000c00 /* crypto feedback mode */
8718 +#define SAFE_SA_CMD1_64BIT 0x00000000 /* 64-bit crypto feedback */
8719 +#define SAFE_SA_CMD1_8BIT 0x00000400 /* 8-bit crypto feedback */
8720 +#define SAFE_SA_CMD1_1BIT 0x00000800 /* 1-bit crypto feedback */
8721 +#define SAFE_SA_CMD1_128BIT 0x00000c00 /* 128-bit crypto feedback */
8722 +#define SAFE_SA_CMD1_OPTIONS 0x00001000 /* HMAC/options mutable bit */
8723 +#define SAFE_SA_CMD1_HMAC SAFE_SA_CMD1_OPTIONS
8724 +#define SAFE_SA_CMD1_SAREV1 0x00008000 /* SA Revision 1 */
8725 +#define SAFE_SA_CMD1_OFFSET 0x00ff0000 /* hash/crypto offset(dwords) */
8726 +#define SAFE_SA_CMD1_OFFSET_S 16
8727 +#define SAFE_SA_CMD1_AESKEYLEN 0x0f000000 /* AES key length */
8728 +#define SAFE_SA_CMD1_AES128 0x02000000 /* 128-bit AES key */
8729 +#define SAFE_SA_CMD1_AES192 0x03000000 /* 192-bit AES key */
8730 +#define SAFE_SA_CMD1_AES256 0x04000000 /* 256-bit AES key */
8733 + * Security Associate State Record (Rev 1).
8735 +struct safe_sastate {
8736 + u_int32_t sa_saved_iv[4]; /* saved IV (DES/3DES/AES) */
8737 + u_int32_t sa_saved_hashbc; /* saved hash byte count */
8738 + u_int32_t sa_saved_indigest[5]; /* saved inner digest */
8740 +#endif /* _SAFE_SAFEREG_H_ */
8741 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
8742 +++ linux/crypto/ocf/safe/safevar.h 2007-07-03 09:46:58.000000000 +1000
8745 + * The linux port of this code done by David McCullough
8746 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
8747 + * The license and original author are listed below.
8749 + * Copyright (c) 2003 Sam Leffler, Errno Consulting
8750 + * Copyright (c) 2003 Global Technology Associates, Inc.
8751 + * All rights reserved.
8753 + * Redistribution and use in source and binary forms, with or without
8754 + * modification, are permitted provided that the following conditions
8756 + * 1. Redistributions of source code must retain the above copyright
8757 + * notice, this list of conditions and the following disclaimer.
8758 + * 2. Redistributions in binary form must reproduce the above copyright
8759 + * notice, this list of conditions and the following disclaimer in the
8760 + * documentation and/or other materials provided with the distribution.
8762 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
8763 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
8764 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
8765 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
8766 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
8767 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
8768 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
8769 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
8770 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
8771 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
8774 + * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
8776 +#ifndef _SAFE_SAFEVAR_H_
8777 +#define _SAFE_SAFEVAR_H_
8779 +/* Maximum queue length */
8780 +#ifndef SAFE_MAX_NQUEUE
8781 +#define SAFE_MAX_NQUEUE 60
8784 +#define SAFE_MAX_PART 64 /* Maximum scatter/gather depth */
8785 +#define SAFE_DMA_BOUNDARY 0 /* No boundary for source DMA ops */
8786 +#define SAFE_MAX_DSIZE 2048 /* MCLBYTES Fixed scatter particle size */
8787 +#define SAFE_MAX_SSIZE 0x0ffff /* Maximum gather particle size */
8788 +#define SAFE_MAX_DMA 0xfffff /* Maximum PE operand size (20 bits) */
8789 +/* total src+dst particle descriptors */
8790 +#define SAFE_TOTAL_DPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
8791 +#define SAFE_TOTAL_SPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART)
8793 +#define SAFE_RNG_MAXBUFSIZ 128 /* 32-bit words */
8795 +#define SAFE_CARD(sid) (((sid) & 0xf0000000) >> 28)
8796 +#define SAFE_SESSION(sid) ( (sid) & 0x0fffffff)
8797 +#define SAFE_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
8799 +#define SAFE_DEF_RTY 0xff /* PCI Retry Timeout */
8800 +#define SAFE_DEF_TOUT 0xff /* PCI TRDY Timeout */
8801 +#define SAFE_DEF_CACHELINE 0x01 /* Cache Line setting */
8805 + * State associated with the allocation of each chunk
8806 + * of memory setup for DMA.
8808 +struct safe_dma_alloc {
8809 + dma_addr_t dma_paddr;
8814 + * Cryptographic operand state. One of these exists for each
8815 + * source and destination operand passed in from the crypto
8816 + * subsystem. When possible source and destination operands
8817 + * refer to the same memory. More often they are distinct.
8818 + * We track the virtual address of each operand as well as
8819 + * where each is mapped for DMA.
8821 +struct safe_operand {
8823 + struct sk_buff *skb;
8827 + int mapsize; /* total number of bytes in segs */
8829 + dma_addr_t ds_addr;
8832 + } segs[SAFE_MAX_PART];
8837 + * Packet engine ring entry and cryptographic operation state.
8838 + * The packet engine requires a ring of descriptors that contain
8839 + * pointers to various cryptographic state. However the ring
8840 + * configuration register allows you to specify an arbitrary size
8841 + * for ring entries. We use this feature to collect most of the
8842 + * state for each cryptographic request into one spot. Other than
8843 + * ring entries only the ``particle descriptors'' (scatter/gather
8844 + * lists) and the actual operand data are kept separate. The
8845 + * particle descriptors must also be organized in rings. The
8846 + * operand data can be located aribtrarily (modulo alignment constraints).
8848 + * Note that the descriptor ring is mapped onto the PCI bus so
8849 + * the hardware can DMA data. This means the entire ring must be
8852 +struct safe_ringentry {
8853 + struct safe_desc re_desc; /* command descriptor */
8854 + struct safe_sarec re_sa; /* SA record */
8855 + struct safe_sastate re_sastate; /* SA state record */
8857 + struct cryptop *re_crp; /* crypto operation */
8859 + struct safe_operand re_src; /* source operand */
8860 + struct safe_operand re_dst; /* destination operand */
8862 + int re_sesn; /* crypto session ID */
8864 +#define SAFE_QFLAGS_COPYOUTIV 0x1 /* copy back on completion */
8865 +#define SAFE_QFLAGS_COPYOUTICV 0x2 /* copy back on completion */
8868 +#define re_src_skb re_src.u.skb
8869 +#define re_src_io re_src.u.io
8870 +#define re_src_map re_src.map
8871 +#define re_src_nsegs re_src.nsegs
8872 +#define re_src_segs re_src.segs
8873 +#define re_src_mapsize re_src.mapsize
8875 +#define re_dst_skb re_dst.u.skb
8876 +#define re_dst_io re_dst.u.io
8877 +#define re_dst_map re_dst.map
8878 +#define re_dst_nsegs re_dst.nsegs
8879 +#define re_dst_segs re_dst.segs
8880 +#define re_dst_mapsize re_dst.mapsize
8882 +struct rndstate_test;
8884 +struct safe_session {
8885 + u_int32_t ses_used;
8886 + u_int32_t ses_klen; /* key length in bits */
8887 + u_int32_t ses_key[8]; /* DES/3DES/AES key */
8888 + u_int32_t ses_mlen; /* hmac length in bytes */
8889 + u_int32_t ses_hminner[5]; /* hmac inner state */
8890 + u_int32_t ses_hmouter[5]; /* hmac outer state */
8891 + u_int32_t ses_iv[4]; /* DES/3DES/AES iv */
8895 + struct list_head pkq_list;
8896 + struct cryptkop *pkq_krp;
8899 +struct safe_softc {
8900 + softc_device_decl sc_dev;
8903 + struct pci_dev *sc_pcidev;
8904 + ocf_iomem_t sc_base_addr;
8906 + u_int sc_chiprev; /* major/minor chip revision */
8907 + int sc_flags; /* device specific flags */
8908 +#define SAFE_FLAGS_KEY 0x01 /* has key accelerator */
8909 +#define SAFE_FLAGS_RNG 0x02 /* hardware rng */
8911 + int sc_needwakeup; /* notify crypto layer */
8912 + int32_t sc_cid; /* crypto tag */
8914 + struct safe_dma_alloc sc_ringalloc; /* PE ring allocation state */
8915 + struct safe_ringentry *sc_ring; /* PE ring */
8916 + struct safe_ringentry *sc_ringtop; /* PE ring top */
8917 + struct safe_ringentry *sc_front; /* next free entry */
8918 + struct safe_ringentry *sc_back; /* next pending entry */
8919 + int sc_nqchip; /* # passed to chip */
8920 + spinlock_t sc_ringmtx; /* PE ring lock */
8921 + struct safe_pdesc *sc_spring; /* src particle ring */
8922 + struct safe_pdesc *sc_springtop; /* src particle ring top */
8923 + struct safe_pdesc *sc_spfree; /* next free src particle */
8924 + struct safe_dma_alloc sc_spalloc; /* src particle ring state */
8925 + struct safe_pdesc *sc_dpring; /* dest particle ring */
8926 + struct safe_pdesc *sc_dpringtop; /* dest particle ring top */
8927 + struct safe_pdesc *sc_dpfree; /* next free dest particle */
8928 + struct safe_dma_alloc sc_dpalloc; /* dst particle ring state */
8929 + int sc_nsessions; /* # of sessions */
8930 + struct safe_session *sc_sessions; /* sessions */
8932 + struct timer_list sc_pkto; /* PK polling */
8933 + spinlock_t sc_pkmtx; /* PK lock */
8934 + struct list_head sc_pkq; /* queue of PK requests */
8935 + struct safe_pkq *sc_pkq_cur; /* current processing request */
8936 + u_int32_t sc_pk_reslen, sc_pk_resoff;
8938 + int sc_max_dsize; /* maximum safe DMA size */
8940 +#endif /* __KERNEL__ */
8942 +struct safe_stats {
8943 + u_int64_t st_ibytes;
8944 + u_int64_t st_obytes;
8945 + u_int32_t st_ipackets;
8946 + u_int32_t st_opackets;
8947 + u_int32_t st_invalid; /* invalid argument */
8948 + u_int32_t st_badsession; /* invalid session id */
8949 + u_int32_t st_badflags; /* flags indicate !(mbuf | uio) */
8950 + u_int32_t st_nodesc; /* op submitted w/o descriptors */
8951 + u_int32_t st_badalg; /* unsupported algorithm */
8952 + u_int32_t st_ringfull; /* PE descriptor ring full */
8953 + u_int32_t st_peoperr; /* PE marked error */
8954 + u_int32_t st_dmaerr; /* PE DMA error */
8955 + u_int32_t st_bypasstoobig; /* bypass > 96 bytes */
8956 + u_int32_t st_skipmismatch; /* enc part begins before auth part */
8957 + u_int32_t st_lenmismatch; /* enc length different auth length */
8958 + u_int32_t st_coffmisaligned; /* crypto offset not 32-bit aligned */
8959 + u_int32_t st_cofftoobig; /* crypto offset > 255 words */
8960 + u_int32_t st_iovmisaligned; /* iov op not aligned */
8961 + u_int32_t st_iovnotuniform; /* iov op not suitable */
8962 + u_int32_t st_unaligned; /* unaligned src caused copy */
8963 + u_int32_t st_notuniform; /* non-uniform src caused copy */
8964 + u_int32_t st_nomap; /* bus_dmamap_create failed */
8965 + u_int32_t st_noload; /* bus_dmamap_load_* failed */
8966 + u_int32_t st_nombuf; /* MGET* failed */
8967 + u_int32_t st_nomcl; /* MCLGET* failed */
8968 + u_int32_t st_maxqchip; /* max mcr1 ops out for processing */
8969 + u_int32_t st_rng; /* RNG requests */
8970 + u_int32_t st_rngalarm; /* RNG alarm requests */
8971 + u_int32_t st_noicvcopy; /* ICV data copies suppressed */
8973 +#endif /* _SAFE_SAFEVAR_H_ */
8974 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
8975 +++ linux/crypto/ocf/crypto.c 2008-07-03 10:58:33.000000000 +1000
8978 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
8979 + * Copyright (C) 2006-2007 David McCullough
8980 + * Copyright (C) 2004-2005 Intel Corporation.
8981 + * The license and original author are listed below.
8983 + * Redistribution and use in source and binary forms, with or without
8984 + * Copyright (c) 2002-2006 Sam Leffler. All rights reserved.
8986 + * modification, are permitted provided that the following conditions
8988 + * 1. Redistributions of source code must retain the above copyright
8989 + * notice, this list of conditions and the following disclaimer.
8990 + * 2. Redistributions in binary form must reproduce the above copyright
8991 + * notice, this list of conditions and the following disclaimer in the
8992 + * documentation and/or other materials provided with the distribution.
8994 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
8995 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
8996 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
8997 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
8998 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
8999 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
9000 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
9001 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
9002 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
9003 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9007 +#include <sys/cdefs.h>
9008 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
9012 + * Cryptographic Subsystem.
9014 + * This code is derived from the Openbsd Cryptographic Framework (OCF)
9015 + * that has the copyright shown below. Very little of the original
9019 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
9021 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
9022 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
9023 + * supported the development of this code.
9025 + * Copyright (c) 2000, 2001 Angelos D. Keromytis
9027 + * Permission to use, copy, and modify this software with or without fee
9028 + * is hereby granted, provided that this entire notice is included in
9029 + * all source code copies of any software which is or includes a copy or
9030 + * modification of this software.
9032 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
9033 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
9034 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
9035 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
9038 +__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
9042 +#ifndef AUTOCONF_INCLUDED
9043 +#include <linux/config.h>
9045 +#include <linux/module.h>
9046 +#include <linux/init.h>
9047 +#include <linux/list.h>
9048 +#include <linux/slab.h>
9049 +#include <linux/wait.h>
9050 +#include <linux/sched.h>
9051 +#include <linux/spinlock.h>
9052 +#include <linux/version.h>
9053 +#include <cryptodev.h>
9056 + * keep track of whether or not we have been initialised, a big
9057 + * issue if we are linked into the kernel and a driver gets started before
9060 +static int crypto_initted = 0;
9063 + * Crypto drivers register themselves by allocating a slot in the
9064 + * crypto_drivers table with crypto_get_driverid() and then registering
9065 + * each algorithm they support with crypto_register() and crypto_kregister().
9069 + * lock on driver table
9070 + * we track its state as spin_is_locked does not do anything on non-SMP boxes
9072 +static spinlock_t crypto_drivers_lock;
9073 +static int crypto_drivers_locked; /* for non-SMP boxes */
9075 +#define CRYPTO_DRIVER_LOCK() \
9077 + spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
9078 + crypto_drivers_locked = 1; \
9079 + dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
9081 +#define CRYPTO_DRIVER_UNLOCK() \
9083 + dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
9084 + crypto_drivers_locked = 0; \
9085 + spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
9087 +#define CRYPTO_DRIVER_ASSERT() \
9089 + if (!crypto_drivers_locked) { \
9090 + dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
9095 + * Crypto device/driver capabilities structure.
9097 + * Synchronization:
9098 + * (d) - protected by CRYPTO_DRIVER_LOCK()
9099 + * (q) - protected by CRYPTO_Q_LOCK()
9100 + * Not tagged fields are read-only.
9103 + device_t cc_dev; /* (d) device/driver */
9104 + u_int32_t cc_sessions; /* (d) # of sessions */
9105 + u_int32_t cc_koperations; /* (d) # os asym operations */
9107 + * Largest possible operator length (in bits) for each type of
9108 + * encryption algorithm. XXX not used
9110 + u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
9111 + u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
9112 + u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
9114 + int cc_flags; /* (d) flags */
9115 +#define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */
9116 + int cc_qblocked; /* (q) symmetric q blocked */
9117 + int cc_kqblocked; /* (q) asymmetric q blocked */
9119 +static struct cryptocap *crypto_drivers = NULL;
9120 +static int crypto_drivers_num = 0;
9123 + * There are two queues for crypto requests; one for symmetric (e.g.
9124 + * cipher) operations and one for asymmetric (e.g. MOD)operations.
9125 + * A single mutex is used to lock access to both queues. We could
9126 + * have one per-queue but having one simplifies handling of block/unblock
9129 +static int crp_sleep = 0;
9130 +static LIST_HEAD(crp_q); /* request queues */
9131 +static LIST_HEAD(crp_kq);
9133 +static spinlock_t crypto_q_lock;
9135 +int crypto_all_qblocked = 0; /* protect with Q_LOCK */
9136 +module_param(crypto_all_qblocked, int, 0444);
9137 +MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
9139 +int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
9140 +module_param(crypto_all_kqblocked, int, 0444);
9141 +MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
9143 +#define CRYPTO_Q_LOCK() \
9145 + spin_lock_irqsave(&crypto_q_lock, q_flags); \
9146 + dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
9148 +#define CRYPTO_Q_UNLOCK() \
9150 + dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
9151 + spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
9155 + * There are two queues for processing completed crypto requests; one
9156 + * for the symmetric and one for the asymmetric ops. We only need one
9157 + * but have two to avoid type futzing (cryptop vs. cryptkop). A single
9158 + * mutex is used to lock access to both queues. Note that this lock
9159 + * must be separate from the lock on request queues to insure driver
9160 + * callbacks don't generate lock order reversals.
9162 +static LIST_HEAD(crp_ret_q); /* callback queues */
9163 +static LIST_HEAD(crp_ret_kq);
9165 +static spinlock_t crypto_ret_q_lock;
9166 +#define CRYPTO_RETQ_LOCK() \
9168 + spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
9169 + dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
9171 +#define CRYPTO_RETQ_UNLOCK() \
9173 + dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
9174 + spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
9176 +#define CRYPTO_RETQ_EMPTY() (list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
9178 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
9179 +static kmem_cache_t *cryptop_zone;
9180 +static kmem_cache_t *cryptodesc_zone;
9182 +static struct kmem_cache *cryptop_zone;
9183 +static struct kmem_cache *cryptodesc_zone;
9186 +#define debug crypto_debug
9187 +int crypto_debug = 0;
9188 +module_param(crypto_debug, int, 0644);
9189 +MODULE_PARM_DESC(crypto_debug, "Enable debug");
9190 +EXPORT_SYMBOL(crypto_debug);
9193 + * Maximum number of outstanding crypto requests before we start
9194 + * failing requests. We need this to prevent DOS when too many
9195 + * requests are arriving for us to keep up. Otherwise we will
9196 + * run the system out of memory. Since crypto is slow, we are
9197 + * usually the bottleneck that needs to say, enough is enough.
9199 + * We cannot print errors when this condition occurs, we are already too
9200 + * slow, printing anything will just kill us
9203 +static int crypto_q_cnt = 0;
9204 +module_param(crypto_q_cnt, int, 0444);
9205 +MODULE_PARM_DESC(crypto_q_cnt,
9206 + "Current number of outstanding crypto requests");
9208 +static int crypto_q_max = 1000;
9209 +module_param(crypto_q_max, int, 0644);
9210 +MODULE_PARM_DESC(crypto_q_max,
9211 + "Maximum number of outstanding crypto requests");
9213 +#define bootverbose crypto_verbose
9214 +static int crypto_verbose = 0;
9215 +module_param(crypto_verbose, int, 0644);
9216 +MODULE_PARM_DESC(crypto_verbose,
9217 + "Enable verbose crypto startup");
9219 +int crypto_usercrypto = 1; /* userland may do crypto reqs */
9220 +module_param(crypto_usercrypto, int, 0644);
9221 +MODULE_PARM_DESC(crypto_usercrypto,
9222 + "Enable/disable user-mode access to crypto support");
9224 +int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
9225 +module_param(crypto_userasymcrypto, int, 0644);
9226 +MODULE_PARM_DESC(crypto_userasymcrypto,
9227 + "Enable/disable user-mode access to asymmetric crypto support");
9229 +int crypto_devallowsoft = 0; /* only use hardware crypto */
9230 +module_param(crypto_devallowsoft, int, 0644);
9231 +MODULE_PARM_DESC(crypto_devallowsoft,
9232 + "Enable/disable use of software crypto support");
9234 +static pid_t cryptoproc = (pid_t) -1;
9235 +static struct completion cryptoproc_exited;
9236 +static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
9237 +static pid_t cryptoretproc = (pid_t) -1;
9238 +static struct completion cryptoretproc_exited;
9239 +static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
9241 +static int crypto_proc(void *arg);
9242 +static int crypto_ret_proc(void *arg);
9243 +static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
9244 +static int crypto_kinvoke(struct cryptkop *krp, int flags);
9245 +static void crypto_exit(void);
9246 +static int crypto_init(void);
9248 +static struct cryptostats cryptostats;
9250 +static struct cryptocap *
9251 +crypto_checkdriver(u_int32_t hid)
9253 + if (crypto_drivers == NULL)
9255 + return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
9259 + * Compare a driver's list of supported algorithms against another
9260 + * list; return non-zero if all algorithms are supported.
9263 +driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
9265 + const struct cryptoini *cr;
9267 + /* See if all the algorithms are supported. */
9268 + for (cr = cri; cr; cr = cr->cri_next)
9269 + if (cap->cc_alg[cr->cri_alg] == 0)
9275 + * Select a driver for a new session that supports the specified
9276 + * algorithms and, optionally, is constrained according to the flags.
9277 + * The algorithm we use here is pretty stupid; just use the
9278 + * first driver that supports all the algorithms we need. If there
9279 + * are multiple drivers we choose the driver with the fewest active
9280 + * sessions. We prefer hardware-backed drivers to software ones.
9282 + * XXX We need more smarts here (in real life too, but that's
9283 + * XXX another story altogether).
9285 +static struct cryptocap *
9286 +crypto_select_driver(const struct cryptoini *cri, int flags)
9288 + struct cryptocap *cap, *best;
9291 + CRYPTO_DRIVER_ASSERT();
9294 + * Look first for hardware crypto devices if permitted.
9296 + if (flags & CRYPTOCAP_F_HARDWARE)
9297 + match = CRYPTOCAP_F_HARDWARE;
9299 + match = CRYPTOCAP_F_SOFTWARE;
9302 + for (hid = 0; hid < crypto_drivers_num; hid++) {
9303 + cap = &crypto_drivers[hid];
9305 + * If it's not initialized, is in the process of
9306 + * going away, or is not appropriate (hardware
9307 + * or software based on match), then skip.
9309 + if (cap->cc_dev == NULL ||
9310 + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
9311 + (cap->cc_flags & match) == 0)
9314 + /* verify all the algorithms are supported. */
9315 + if (driver_suitable(cap, cri)) {
9316 + if (best == NULL ||
9317 + cap->cc_sessions < best->cc_sessions)
9323 + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
9324 + /* sort of an Algol 68-style for loop */
9325 + match = CRYPTOCAP_F_SOFTWARE;
9332 + * Create a new session. The crid argument specifies a crypto
9333 + * driver to use or constraints on a driver to select (hardware
9334 + * only, software only, either). Whatever driver is selected
9335 + * must be capable of the requested crypto algorithms.
9338 +crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
9340 + struct cryptocap *cap;
9341 + u_int32_t hid, lid;
9343 + unsigned long d_flags;
9345 + CRYPTO_DRIVER_LOCK();
9346 + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9348 + * Use specified driver; verify it is capable.
9350 + cap = crypto_checkdriver(crid);
9351 + if (cap != NULL && !driver_suitable(cap, cri))
9355 + * No requested driver; select based on crid flags.
9357 + cap = crypto_select_driver(cri, crid);
9359 + * if NULL then can't do everything in one session.
9360 + * XXX Fix this. We need to inject a "virtual" session
9361 + * XXX layer right about here.
9364 + if (cap != NULL) {
9365 + /* Call the driver initialization routine. */
9366 + hid = cap - crypto_drivers;
9367 + lid = hid; /* Pass the driver ID. */
9368 + cap->cc_sessions++;
9369 + CRYPTO_DRIVER_UNLOCK();
9370 + err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
9371 + CRYPTO_DRIVER_LOCK();
9373 + (*sid) = (cap->cc_flags & 0xff000000)
9374 + | (hid & 0x00ffffff);
9376 + (*sid) |= (lid & 0xffffffff);
9378 + cap->cc_sessions--;
9381 + CRYPTO_DRIVER_UNLOCK();
9386 +crypto_remove(struct cryptocap *cap)
9388 + CRYPTO_DRIVER_ASSERT();
9389 + if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
9390 + bzero(cap, sizeof(*cap));
9394 + * Delete an existing session (or a reserved session on an unregistered
9398 +crypto_freesession(u_int64_t sid)
9400 + struct cryptocap *cap;
9403 + unsigned long d_flags;
9405 + dprintk("%s()\n", __FUNCTION__);
9406 + CRYPTO_DRIVER_LOCK();
9408 + if (crypto_drivers == NULL) {
9413 + /* Determine two IDs. */
9414 + hid = CRYPTO_SESID2HID(sid);
9416 + if (hid >= crypto_drivers_num) {
9417 + dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
9421 + cap = &crypto_drivers[hid];
9423 + if (cap->cc_dev) {
9424 + CRYPTO_DRIVER_UNLOCK();
9425 + /* Call the driver cleanup routine, if available, unlocked. */
9426 + err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
9427 + CRYPTO_DRIVER_LOCK();
9430 + if (cap->cc_sessions)
9431 + cap->cc_sessions--;
9433 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
9434 + crypto_remove(cap);
9437 + CRYPTO_DRIVER_UNLOCK();
9442 + * Return an unused driver id. Used by drivers prior to registering
9443 + * support for the algorithms they handle.
9446 +crypto_get_driverid(device_t dev, int flags)
9448 + struct cryptocap *newdrv;
9450 + unsigned long d_flags;
9452 + if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9453 + printf("%s: no flags specified when registering driver\n",
9454 + device_get_nameunit(dev));
9458 + CRYPTO_DRIVER_LOCK();
9460 + for (i = 0; i < crypto_drivers_num; i++) {
9461 + if (crypto_drivers[i].cc_dev == NULL &&
9462 + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
9467 + /* Out of entries, allocate some more. */
9468 + if (i == crypto_drivers_num) {
9469 + /* Be careful about wrap-around. */
9470 + if (2 * crypto_drivers_num <= crypto_drivers_num) {
9471 + CRYPTO_DRIVER_UNLOCK();
9472 + printk("crypto: driver count wraparound!\n");
9476 + newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
9478 + if (newdrv == NULL) {
9479 + CRYPTO_DRIVER_UNLOCK();
9480 + printk("crypto: no space to expand driver table!\n");
9484 + memcpy(newdrv, crypto_drivers,
9485 + crypto_drivers_num * sizeof(struct cryptocap));
9486 + memset(&newdrv[crypto_drivers_num], 0,
9487 + crypto_drivers_num * sizeof(struct cryptocap));
9489 + crypto_drivers_num *= 2;
9491 + kfree(crypto_drivers);
9492 + crypto_drivers = newdrv;
9495 + /* NB: state is zero'd on free */
9496 + crypto_drivers[i].cc_sessions = 1; /* Mark */
9497 + crypto_drivers[i].cc_dev = dev;
9498 + crypto_drivers[i].cc_flags = flags;
9500 + printf("crypto: assign %s driver id %u, flags %u\n",
9501 + device_get_nameunit(dev), i, flags);
9503 + CRYPTO_DRIVER_UNLOCK();
9509 + * Lookup a driver by name. We match against the full device
9510 + * name and unit, and against just the name. The latter gives
9511 + * us a simple widlcarding by device name. On success return the
9512 + * driver/hardware identifier; otherwise return -1.
9515 +crypto_find_driver(const char *match)
9517 + int i, len = strlen(match);
9518 + unsigned long d_flags;
9520 + CRYPTO_DRIVER_LOCK();
9521 + for (i = 0; i < crypto_drivers_num; i++) {
9522 + device_t dev = crypto_drivers[i].cc_dev;
9523 + if (dev == NULL ||
9524 + (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
9526 + if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
9527 + strncmp(match, device_get_name(dev), len) == 0)
9530 + CRYPTO_DRIVER_UNLOCK();
9531 + return i < crypto_drivers_num ? i : -1;
9535 + * Return the device_t for the specified driver or NULL
9536 + * if the driver identifier is invalid.
9539 +crypto_find_device_byhid(int hid)
9541 + struct cryptocap *cap = crypto_checkdriver(hid);
9542 + return cap != NULL ? cap->cc_dev : NULL;
9546 + * Return the device/driver capabilities.
9549 +crypto_getcaps(int hid)
9551 + struct cryptocap *cap = crypto_checkdriver(hid);
9552 + return cap != NULL ? cap->cc_flags : 0;
9556 + * Register support for a key-related algorithm. This routine
9557 + * is called once for each algorithm supported a driver.
9560 +crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
9562 + struct cryptocap *cap;
9564 + unsigned long d_flags;
9566 + dprintk("%s()\n", __FUNCTION__);
9567 + CRYPTO_DRIVER_LOCK();
9569 + cap = crypto_checkdriver(driverid);
9570 + if (cap != NULL &&
9571 + (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
9573 + * XXX Do some performance testing to determine placing.
9574 + * XXX We probably need an auxiliary data structure that
9575 + * XXX describes relative performances.
9578 + cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
9580 + printf("crypto: %s registers key alg %u flags %u\n"
9581 + , device_get_nameunit(cap->cc_dev)
9589 + CRYPTO_DRIVER_UNLOCK();
9594 + * Register support for a non-key-related algorithm. This routine
9595 + * is called once for each such algorithm supported by a driver.
9598 +crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
9601 + struct cryptocap *cap;
9603 + unsigned long d_flags;
9605 + dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
9606 + driverid, alg, maxoplen, flags);
9608 + CRYPTO_DRIVER_LOCK();
9610 + cap = crypto_checkdriver(driverid);
9611 + /* NB: algorithms are in the range [1..max] */
9612 + if (cap != NULL &&
9613 + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
9615 + * XXX Do some performance testing to determine placing.
9616 + * XXX We probably need an auxiliary data structure that
9617 + * XXX describes relative performances.
9620 + cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
9621 + cap->cc_max_op_len[alg] = maxoplen;
9623 + printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
9624 + , device_get_nameunit(cap->cc_dev)
9629 + cap->cc_sessions = 0; /* Unmark */
9634 + CRYPTO_DRIVER_UNLOCK();
9639 +driver_finis(struct cryptocap *cap)
9641 + u_int32_t ses, kops;
9643 + CRYPTO_DRIVER_ASSERT();
9645 + ses = cap->cc_sessions;
9646 + kops = cap->cc_koperations;
9647 + bzero(cap, sizeof(*cap));
9648 + if (ses != 0 || kops != 0) {
9650 + * If there are pending sessions,
9651 + * just mark as invalid.
9653 + cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
9654 + cap->cc_sessions = ses;
9655 + cap->cc_koperations = kops;
9660 + * Unregister a crypto driver. If there are pending sessions using it,
9661 + * leave enough information around so that subsequent calls using those
9662 + * sessions will correctly detect the driver has been unregistered and
9663 + * reroute requests.
9666 +crypto_unregister(u_int32_t driverid, int alg)
9668 + struct cryptocap *cap;
9670 + unsigned long d_flags;
9672 + dprintk("%s()\n", __FUNCTION__);
9673 + CRYPTO_DRIVER_LOCK();
9675 + cap = crypto_checkdriver(driverid);
9676 + if (cap != NULL &&
9677 + (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
9678 + cap->cc_alg[alg] != 0) {
9679 + cap->cc_alg[alg] = 0;
9680 + cap->cc_max_op_len[alg] = 0;
9682 + /* Was this the last algorithm ? */
9683 + for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
9684 + if (cap->cc_alg[i] != 0)
9687 + if (i == CRYPTO_ALGORITHM_MAX + 1)
9688 + driver_finis(cap);
9692 + CRYPTO_DRIVER_UNLOCK();
9697 + * Unregister all algorithms associated with a crypto driver.
9698 + * If there are pending sessions using it, leave enough information
9699 + * around so that subsequent calls using those sessions will
9700 + * correctly detect the driver has been unregistered and reroute
9704 +crypto_unregister_all(u_int32_t driverid)
9706 + struct cryptocap *cap;
9708 + unsigned long d_flags;
9710 + dprintk("%s()\n", __FUNCTION__);
9711 + CRYPTO_DRIVER_LOCK();
9712 + cap = crypto_checkdriver(driverid);
9713 + if (cap != NULL) {
9714 + driver_finis(cap);
9718 + CRYPTO_DRIVER_UNLOCK();
9724 + * Clear blockage on a driver. The what parameter indicates whether
9725 + * the driver is now ready for cryptop's and/or cryptokop's.
9728 +crypto_unblock(u_int32_t driverid, int what)
9730 + struct cryptocap *cap;
9732 + unsigned long q_flags;
9735 + cap = crypto_checkdriver(driverid);
9736 + if (cap != NULL) {
9737 + if (what & CRYPTO_SYMQ) {
9738 + cap->cc_qblocked = 0;
9739 + crypto_all_qblocked = 0;
9741 + if (what & CRYPTO_ASYMQ) {
9742 + cap->cc_kqblocked = 0;
9743 + crypto_all_kqblocked = 0;
9746 + wake_up_interruptible(&cryptoproc_wait);
9750 + CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
9756 + * Add a crypto request to a queue, to be processed by the kernel thread.
9759 +crypto_dispatch(struct cryptop *crp)
9761 + struct cryptocap *cap;
9763 + unsigned long q_flags;
9765 + dprintk("%s()\n", __FUNCTION__);
9767 + cryptostats.cs_ops++;
9770 + if (crypto_q_cnt >= crypto_q_max) {
9771 + CRYPTO_Q_UNLOCK();
9772 + cryptostats.cs_drops++;
9778 + * Caller marked the request to be processed immediately; dispatch
9779 + * it directly to the driver unless the driver is currently blocked.
9781 + if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
9782 + int hid = CRYPTO_SESID2HID(crp->crp_sid);
9783 + cap = crypto_checkdriver(hid);
9784 + /* Driver cannot disappear when there is an active session. */
9785 + KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
9786 + if (!cap->cc_qblocked) {
9787 + crypto_all_qblocked = 0;
9788 + crypto_drivers[hid].cc_qblocked = 1;
9789 + CRYPTO_Q_UNLOCK();
9790 + result = crypto_invoke(cap, crp, 0);
9792 + if (result != ERESTART)
9793 + crypto_drivers[hid].cc_qblocked = 0;
9796 + if (result == ERESTART) {
9798 + * The driver ran out of resources, mark the
9799 + * driver ``blocked'' for cryptop's and put
9800 + * the request back in the queue. It would
9801 + * best to put the request back where we got
9802 + * it but that's hard so for now we put it
9803 + * at the front. This should be ok; putting
9804 + * it at the end does not work.
9806 + list_add(&crp->crp_next, &crp_q);
9807 + cryptostats.cs_blocks++;
9808 + } else if (result == -1) {
9809 + TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
9812 + wake_up_interruptible(&cryptoproc_wait);
9813 + CRYPTO_Q_UNLOCK();
9818 + * Add an asymetric crypto request to a queue,
9819 + * to be processed by the kernel thread.
9822 +crypto_kdispatch(struct cryptkop *krp)
9825 + unsigned long q_flags;
9827 + cryptostats.cs_kops++;
9829 + error = crypto_kinvoke(krp, krp->krp_crid);
9830 + if (error == ERESTART) {
9832 + TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
9834 + wake_up_interruptible(&cryptoproc_wait);
9835 + CRYPTO_Q_UNLOCK();
9842 + * Verify a driver is suitable for the specified operation.
9844 +static __inline int
9845 +kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
9847 + return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
9851 + * Select a driver for an asym operation. The driver must
9852 + * support the necessary algorithm. The caller can constrain
9853 + * which device is selected with the flags parameter. The
9854 + * algorithm we use here is pretty stupid; just use the first
9855 + * driver that supports the algorithms we need. If there are
9856 + * multiple suitable drivers we choose the driver with the
9857 + * fewest active operations. We prefer hardware-backed
9858 + * drivers to software ones when either may be used.
9860 +static struct cryptocap *
9861 +crypto_select_kdriver(const struct cryptkop *krp, int flags)
9863 + struct cryptocap *cap, *best, *blocked;
9866 + CRYPTO_DRIVER_ASSERT();
9869 + * Look first for hardware crypto devices if permitted.
9871 + if (flags & CRYPTOCAP_F_HARDWARE)
9872 + match = CRYPTOCAP_F_HARDWARE;
9874 + match = CRYPTOCAP_F_SOFTWARE;
9878 + for (hid = 0; hid < crypto_drivers_num; hid++) {
9879 + cap = &crypto_drivers[hid];
9881 + * If it's not initialized, is in the process of
9882 + * going away, or is not appropriate (hardware
9883 + * or software based on match), then skip.
9885 + if (cap->cc_dev == NULL ||
9886 + (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
9887 + (cap->cc_flags & match) == 0)
9890 + /* verify all the algorithms are supported. */
9891 + if (kdriver_suitable(cap, krp)) {
9892 + if (best == NULL ||
9893 + cap->cc_koperations < best->cc_koperations)
9899 + if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
9900 + /* sort of an Algol 68-style for loop */
9901 + match = CRYPTOCAP_F_SOFTWARE;
9908 + * Dispatch an assymetric crypto request.
9911 +crypto_kinvoke(struct cryptkop *krp, int crid)
9913 + struct cryptocap *cap = NULL;
9915 + unsigned long d_flags;
9917 + KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
9918 + KASSERT(krp->krp_callback != NULL,
9919 + ("%s: krp->crp_callback == NULL", __func__));
9921 + CRYPTO_DRIVER_LOCK();
9922 + if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
9923 + cap = crypto_checkdriver(crid);
9924 + if (cap != NULL) {
9926 + * Driver present, it must support the necessary
9927 + * algorithm and, if s/w drivers are excluded,
9928 + * it must be registered as hardware-backed.
9930 + if (!kdriver_suitable(cap, krp) ||
9931 + (!crypto_devallowsoft &&
9932 + (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
9937 + * No requested driver; select based on crid flags.
9939 + if (!crypto_devallowsoft) /* NB: disallow s/w drivers */
9940 + crid &= ~CRYPTOCAP_F_SOFTWARE;
9941 + cap = crypto_select_kdriver(krp, crid);
9943 + if (cap != NULL && !cap->cc_kqblocked) {
9944 + krp->krp_hid = cap - crypto_drivers;
9945 + cap->cc_koperations++;
9946 + CRYPTO_DRIVER_UNLOCK();
9947 + error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
9948 + CRYPTO_DRIVER_LOCK();
9949 + if (error == ERESTART) {
9950 + cap->cc_koperations--;
9951 + CRYPTO_DRIVER_UNLOCK();
9954 + /* return the actual device used */
9955 + krp->krp_crid = krp->krp_hid;
9958 + * NB: cap is !NULL if device is blocked; in
9959 + * that case return ERESTART so the operation
9960 + * is resubmitted if possible.
9962 + error = (cap == NULL) ? ENODEV : ERESTART;
9964 + CRYPTO_DRIVER_UNLOCK();
9967 + krp->krp_status = error;
9968 + crypto_kdone(krp);
9975 + * Dispatch a crypto request to the appropriate crypto devices.
9978 +crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
9980 + KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
9981 + KASSERT(crp->crp_callback != NULL,
9982 + ("%s: crp->crp_callback == NULL", __func__));
9983 + KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
9985 + dprintk("%s()\n", __FUNCTION__);
9987 +#ifdef CRYPTO_TIMING
9988 + if (crypto_timing)
9989 + crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
9991 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
9992 + struct cryptodesc *crd;
9996 + * Driver has unregistered; migrate the session and return
9997 + * an error to the caller so they'll resubmit the op.
9999 + * XXX: What if there are more already queued requests for this
10002 + crypto_freesession(crp->crp_sid);
10004 + for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
10005 + crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
10007 + /* XXX propagate flags from initial session? */
10008 + if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
10009 + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
10010 + crp->crp_sid = nid;
10012 + crp->crp_etype = EAGAIN;
10013 + crypto_done(crp);
10017 + * Invoke the driver to process the request.
10019 + return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
10024 + * Release a set of crypto descriptors.
10027 +crypto_freereq(struct cryptop *crp)
10029 + struct cryptodesc *crd;
10036 + struct cryptop *crp2;
10037 + unsigned long q_flags;
10040 + TAILQ_FOREACH(crp2, &crp_q, crp_next) {
10041 + KASSERT(crp2 != crp,
10042 + ("Freeing cryptop from the crypto queue (%p).",
10045 + CRYPTO_Q_UNLOCK();
10046 + CRYPTO_RETQ_LOCK();
10047 + TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
10048 + KASSERT(crp2 != crp,
10049 + ("Freeing cryptop from the return queue (%p).",
10052 + CRYPTO_RETQ_UNLOCK();
10056 + while ((crd = crp->crp_desc) != NULL) {
10057 + crp->crp_desc = crd->crd_next;
10058 + kmem_cache_free(cryptodesc_zone, crd);
10060 + kmem_cache_free(cryptop_zone, crp);
10064 + * Acquire a set of crypto descriptors.
10067 +crypto_getreq(int num)
10069 + struct cryptodesc *crd;
10070 + struct cryptop *crp;
10072 + crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
10073 + if (crp != NULL) {
10074 + memset(crp, 0, sizeof(*crp));
10075 + INIT_LIST_HEAD(&crp->crp_next);
10076 + init_waitqueue_head(&crp->crp_waitq);
10078 + crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
10079 + if (crd == NULL) {
10080 + crypto_freereq(crp);
10083 + memset(crd, 0, sizeof(*crd));
10084 + crd->crd_next = crp->crp_desc;
10085 + crp->crp_desc = crd;
10092 + * Invoke the callback on behalf of the driver.
10095 +crypto_done(struct cryptop *crp)
10097 + unsigned long q_flags;
10099 + dprintk("%s()\n", __FUNCTION__);
10100 + if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
10101 + crp->crp_flags |= CRYPTO_F_DONE;
10104 + CRYPTO_Q_UNLOCK();
10106 + printk("crypto: crypto_done op already done, flags 0x%x",
10108 + if (crp->crp_etype != 0)
10109 + cryptostats.cs_errs++;
10111 + * CBIMM means unconditionally do the callback immediately;
10112 + * CBIFSYNC means do the callback immediately only if the
10113 + * operation was done synchronously. Both are used to avoid
10114 + * doing extraneous context switches; the latter is mostly
10115 + * used with the software crypto driver.
10117 + if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
10118 + ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
10119 + (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
10121 + * Do the callback directly. This is ok when the
10122 + * callback routine does very little (e.g. the
10123 + * /dev/crypto callback method just does a wakeup).
10125 + crp->crp_callback(crp);
10127 + unsigned long r_flags;
10129 + * Normal case; queue the callback for the thread.
10131 + CRYPTO_RETQ_LOCK();
10132 + if (CRYPTO_RETQ_EMPTY())
10133 + wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
10134 + TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
10135 + CRYPTO_RETQ_UNLOCK();
10140 + * Invoke the callback on behalf of the driver.
10143 +crypto_kdone(struct cryptkop *krp)
10145 + struct cryptocap *cap;
10146 + unsigned long d_flags;
10148 + if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
10149 + printk("crypto: crypto_kdone op already done, flags 0x%x",
10151 + krp->krp_flags |= CRYPTO_KF_DONE;
10152 + if (krp->krp_status != 0)
10153 + cryptostats.cs_kerrs++;
10155 + CRYPTO_DRIVER_LOCK();
10156 + /* XXX: What if driver is loaded in the meantime? */
10157 + if (krp->krp_hid < crypto_drivers_num) {
10158 + cap = &crypto_drivers[krp->krp_hid];
10159 + cap->cc_koperations--;
10160 + KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
10161 + if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
10162 + crypto_remove(cap);
10164 + CRYPTO_DRIVER_UNLOCK();
10167 + * CBIMM means unconditionally do the callback immediately;
10168 + * This is used to avoid doing extraneous context switches
10170 + if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
10172 + * Do the callback directly. This is ok when the
10173 + * callback routine does very little (e.g. the
10174 + * /dev/crypto callback method just does a wakeup).
10176 + krp->krp_callback(krp);
10178 + unsigned long r_flags;
10180 + * Normal case; queue the callback for the thread.
10182 + CRYPTO_RETQ_LOCK();
10183 + if (CRYPTO_RETQ_EMPTY())
10184 + wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
10185 + TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
10186 + CRYPTO_RETQ_UNLOCK();
10191 +crypto_getfeat(int *featp)
10193 + int hid, kalg, feat = 0;
10194 + unsigned long d_flags;
10196 + CRYPTO_DRIVER_LOCK();
10197 + for (hid = 0; hid < crypto_drivers_num; hid++) {
10198 + const struct cryptocap *cap = &crypto_drivers[hid];
10200 + if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
10201 + !crypto_devallowsoft) {
10204 + for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
10205 + if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
10206 + feat |= 1 << kalg;
10208 + CRYPTO_DRIVER_UNLOCK();
10214 + * Crypto thread, dispatches crypto requests.
10217 +crypto_proc(void *arg)
10219 + struct cryptop *crp, *submit;
10220 + struct cryptkop *krp, *krpp;
10221 + struct cryptocap *cap;
10223 + int result, hint;
10224 + unsigned long q_flags;
10226 + ocf_daemonize("crypto");
10231 + * we need to make sure we don't get into a busy loop with nothing
10232 + * to do, the two crypto_all_*blocked vars help us find out when
10233 + * we are all full and can do nothing on any driver or Q. If so we
10234 + * wait for an unblock.
10236 + crypto_all_qblocked = !list_empty(&crp_q);
10239 + * Find the first element in the queue that can be
10240 + * processed and look-ahead to see if multiple ops
10241 + * are ready for the same driver.
10245 + list_for_each_entry(crp, &crp_q, crp_next) {
10246 + hid = CRYPTO_SESID2HID(crp->crp_sid);
10247 + cap = crypto_checkdriver(hid);
10249 + * Driver cannot disappear when there is an active
10252 + KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
10253 + __func__, __LINE__));
10254 + if (cap == NULL || cap->cc_dev == NULL) {
10255 + /* Op needs to be migrated, process it. */
10256 + if (submit == NULL)
10260 + if (!cap->cc_qblocked) {
10261 + if (submit != NULL) {
10263 + * We stop on finding another op,
10264 + * regardless whether its for the same
10265 + * driver or not. We could keep
10266 + * searching the queue but it might be
10267 + * better to just use a per-driver
10270 + if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
10271 + hint = CRYPTO_HINT_MORE;
10275 + if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
10277 + /* keep scanning for more are q'd */
10281 + if (submit != NULL) {
10282 + hid = CRYPTO_SESID2HID(submit->crp_sid);
10283 + crypto_all_qblocked = 0;
10284 + list_del(&submit->crp_next);
10285 + crypto_drivers[hid].cc_qblocked = 1;
10286 + cap = crypto_checkdriver(hid);
10287 + CRYPTO_Q_UNLOCK();
10288 + KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
10289 + __func__, __LINE__));
10290 + result = crypto_invoke(cap, submit, hint);
10292 + if (result == ERESTART) {
10294 + * The driver ran out of resources, mark the
10295 + * driver ``blocked'' for cryptop's and put
10296 + * the request back in the queue. It would
10297 + * best to put the request back where we got
10298 + * it but that's hard so for now we put it
10299 + * at the front. This should be ok; putting
10300 + * it at the end does not work.
10302 + /* XXX validate sid again? */
10303 + list_add(&submit->crp_next, &crp_q);
10304 + cryptostats.cs_blocks++;
10306 + crypto_drivers[hid].cc_qblocked=0;
10309 + crypto_all_kqblocked = !list_empty(&crp_kq);
10311 + /* As above, but for key ops */
10313 + list_for_each_entry(krpp, &crp_kq, krp_next) {
10314 + cap = crypto_checkdriver(krpp->krp_hid);
10315 + if (cap == NULL || cap->cc_dev == NULL) {
10317 + * Operation needs to be migrated, invalidate
10318 + * the assigned device so it will reselect a
10319 + * new one below. Propagate the original
10320 + * crid selection flags if supplied.
10322 + krp->krp_hid = krp->krp_crid &
10323 + (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
10324 + if (krp->krp_hid == 0)
10326 + CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
10329 + if (!cap->cc_kqblocked) {
10334 + if (krp != NULL) {
10335 + crypto_all_kqblocked = 0;
10336 + list_del(&krp->krp_next);
10337 + crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
10338 + CRYPTO_Q_UNLOCK();
10339 + result = crypto_kinvoke(krp, krp->krp_hid);
10341 + if (result == ERESTART) {
10343 + * The driver ran out of resources, mark the
10344 + * driver ``blocked'' for cryptkop's and put
10345 + * the request back in the queue. It would
10346 + * best to put the request back where we got
10347 + * it but that's hard so for now we put it
10348 + * at the front. This should be ok; putting
10349 + * it at the end does not work.
10351 + /* XXX validate sid again? */
10352 + list_add(&krp->krp_next, &crp_kq);
10353 + cryptostats.cs_kblocks++;
10355 + crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
10358 + if (submit == NULL && krp == NULL) {
10360 + * Nothing more to be processed. Sleep until we're
10361 + * woken because there are more ops to process.
10362 + * This happens either by submission or by a driver
10363 + * becoming unblocked and notifying us through
10364 + * crypto_unblock. Note that when we wakeup we
10365 + * start processing each queue again from the
10366 + * front. It's not clear that it's important to
10367 + * preserve this ordering since ops may finish
10368 + * out of order if dispatched to different devices
10369 + * and some become blocked while others do not.
10371 + dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
10373 + list_empty(&crp_q), crypto_all_qblocked,
10374 + list_empty(&crp_kq), crypto_all_kqblocked);
10375 + CRYPTO_Q_UNLOCK();
10377 + wait_event_interruptible(cryptoproc_wait,
10378 + !(list_empty(&crp_q) || crypto_all_qblocked) ||
10379 + !(list_empty(&crp_kq) || crypto_all_kqblocked) ||
10380 + cryptoproc == (pid_t) -1);
10382 + if (signal_pending (current)) {
10383 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10384 + spin_lock_irq(¤t->sigmask_lock);
10386 + flush_signals(current);
10387 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10388 + spin_unlock_irq(¤t->sigmask_lock);
10392 + dprintk("%s - awake\n", __FUNCTION__);
10393 + if (cryptoproc == (pid_t) -1)
10395 + cryptostats.cs_intrs++;
10398 + CRYPTO_Q_UNLOCK();
10399 + complete_and_exit(&cryptoproc_exited, 0);
10403 + * Crypto returns thread, does callbacks for processed crypto requests.
10404 + * Callbacks are done here, rather than in the crypto drivers, because
10405 + * callbacks typically are expensive and would slow interrupt handling.
10408 +crypto_ret_proc(void *arg)
10410 + struct cryptop *crpt;
10411 + struct cryptkop *krpt;
10412 + unsigned long r_flags;
10414 + ocf_daemonize("crypto_ret");
10416 + CRYPTO_RETQ_LOCK();
10418 + /* Harvest return q's for completed ops */
10420 + if (!list_empty(&crp_ret_q))
10421 + crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
10422 + if (crpt != NULL)
10423 + list_del(&crpt->crp_next);
10426 + if (!list_empty(&crp_ret_kq))
10427 + krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
10428 + if (krpt != NULL)
10429 + list_del(&krpt->krp_next);
10431 + if (crpt != NULL || krpt != NULL) {
10432 + CRYPTO_RETQ_UNLOCK();
10434 + * Run callbacks unlocked.
10436 + if (crpt != NULL)
10437 + crpt->crp_callback(crpt);
10438 + if (krpt != NULL)
10439 + krpt->krp_callback(krpt);
10440 + CRYPTO_RETQ_LOCK();
10443 + * Nothing more to be processed. Sleep until we're
10444 + * woken because there are more returns to process.
10446 + dprintk("%s - sleeping\n", __FUNCTION__);
10447 + CRYPTO_RETQ_UNLOCK();
10448 + wait_event_interruptible(cryptoretproc_wait,
10449 + cryptoretproc == (pid_t) -1 ||
10450 + !list_empty(&crp_ret_q) ||
10451 + !list_empty(&crp_ret_kq));
10452 + if (signal_pending (current)) {
10453 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10454 + spin_lock_irq(¤t->sigmask_lock);
10456 + flush_signals(current);
10457 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
10458 + spin_unlock_irq(¤t->sigmask_lock);
10461 + CRYPTO_RETQ_LOCK();
10462 + dprintk("%s - awake\n", __FUNCTION__);
10463 + if (cryptoretproc == (pid_t) -1) {
10464 + dprintk("%s - EXITING!\n", __FUNCTION__);
10467 + cryptostats.cs_rets++;
10470 + CRYPTO_RETQ_UNLOCK();
10471 + complete_and_exit(&cryptoretproc_exited, 0);
10475 +#if 0 /* should put this into /proc or something */
10477 +db_show_drivers(void)
10481 + db_printf("%12s %4s %4s %8s %2s %2s\n"
10489 + for (hid = 0; hid < crypto_drivers_num; hid++) {
10490 + const struct cryptocap *cap = &crypto_drivers[hid];
10491 + if (cap->cc_dev == NULL)
10493 + db_printf("%-12s %4u %4u %08x %2u %2u\n"
10494 + , device_get_nameunit(cap->cc_dev)
10495 + , cap->cc_sessions
10496 + , cap->cc_koperations
10498 + , cap->cc_qblocked
10499 + , cap->cc_kqblocked
10504 +DB_SHOW_COMMAND(crypto, db_show_crypto)
10506 + struct cryptop *crp;
10508 + db_show_drivers();
10511 + db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
10512 + "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
10513 + "Desc", "Callback");
10514 + TAILQ_FOREACH(crp, &crp_q, crp_next) {
10515 + db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
10516 + , (int) CRYPTO_SESID2HID(crp->crp_sid)
10517 + , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
10518 + , crp->crp_ilen, crp->crp_olen
10522 + , crp->crp_callback
10525 + if (!TAILQ_EMPTY(&crp_ret_q)) {
10526 + db_printf("\n%4s %4s %4s %8s\n",
10527 + "HID", "Etype", "Flags", "Callback");
10528 + TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
10529 + db_printf("%4u %4u %04x %8p\n"
10530 + , (int) CRYPTO_SESID2HID(crp->crp_sid)
10533 + , crp->crp_callback
10539 +DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
10541 + struct cryptkop *krp;
10543 + db_show_drivers();
10546 + db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
10547 + "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
10548 + TAILQ_FOREACH(krp, &crp_kq, krp_next) {
10549 + db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
10551 + , krp->krp_status
10552 + , krp->krp_iparams, krp->krp_oparams
10553 + , krp->krp_crid, krp->krp_hid
10554 + , krp->krp_callback
10557 + if (!TAILQ_EMPTY(&crp_ret_q)) {
10558 + db_printf("%4s %5s %8s %4s %8s\n",
10559 + "Op", "Status", "CRID", "HID", "Callback");
10560 + TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
10561 + db_printf("%4u %5u %08x %4u %8p\n"
10563 + , krp->krp_status
10564 + , krp->krp_crid, krp->krp_hid
10565 + , krp->krp_callback
10578 + dprintk("%s(0x%x)\n", __FUNCTION__, (int) crypto_init);
10580 + if (crypto_initted)
10582 + crypto_initted = 1;
10584 + spin_lock_init(&crypto_drivers_lock);
10585 + spin_lock_init(&crypto_q_lock);
10586 + spin_lock_init(&crypto_ret_q_lock);
10588 + cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
10589 + 0, SLAB_HWCACHE_ALIGN, NULL
10590 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
10595 + cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
10596 + 0, SLAB_HWCACHE_ALIGN, NULL
10597 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
10602 + if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
10603 + printk("crypto: crypto_init cannot setup crypto zones\n");
10608 + crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
10609 + crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
10611 + if (crypto_drivers == NULL) {
10612 + printk("crypto: crypto_init cannot setup crypto drivers\n");
10617 + memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
10619 + init_completion(&cryptoproc_exited);
10620 + init_completion(&cryptoretproc_exited);
10622 + cryptoproc = 0; /* to avoid race condition where proc runs first */
10623 + cryptoproc = kernel_thread(crypto_proc, NULL, CLONE_FS|CLONE_FILES);
10624 + if (cryptoproc < 0) {
10625 + error = cryptoproc;
10626 + printk("crypto: crypto_init cannot start crypto thread; error %d",
10631 + cryptoretproc = 0; /* to avoid race condition where proc runs first */
10632 + cryptoretproc = kernel_thread(crypto_ret_proc, NULL, CLONE_FS|CLONE_FILES);
10633 + if (cryptoretproc < 0) {
10634 + error = cryptoretproc;
10635 + printk("crypto: crypto_init cannot start cryptoret thread; error %d",
10651 + unsigned long d_flags;
10653 + dprintk("%s()\n", __FUNCTION__);
10656 + * Terminate any crypto threads.
10659 + CRYPTO_DRIVER_LOCK();
10661 + cryptoproc = (pid_t) -1;
10662 + kill_proc(p, SIGTERM, 1);
10663 + wake_up_interruptible(&cryptoproc_wait);
10664 + CRYPTO_DRIVER_UNLOCK();
10666 + wait_for_completion(&cryptoproc_exited);
10668 + CRYPTO_DRIVER_LOCK();
10669 + p = cryptoretproc;
10670 + cryptoretproc = (pid_t) -1;
10671 + kill_proc(p, SIGTERM, 1);
10672 + wake_up_interruptible(&cryptoretproc_wait);
10673 + CRYPTO_DRIVER_UNLOCK();
10675 + wait_for_completion(&cryptoretproc_exited);
10677 + /* XXX flush queues??? */
10680 + * Reclaim dynamically allocated resources.
10682 + if (crypto_drivers != NULL)
10683 + kfree(crypto_drivers);
10685 + if (cryptodesc_zone != NULL)
10686 + kmem_cache_destroy(cryptodesc_zone);
10687 + if (cryptop_zone != NULL)
10688 + kmem_cache_destroy(cryptop_zone);
10692 +EXPORT_SYMBOL(crypto_newsession);
10693 +EXPORT_SYMBOL(crypto_freesession);
10694 +EXPORT_SYMBOL(crypto_get_driverid);
10695 +EXPORT_SYMBOL(crypto_kregister);
10696 +EXPORT_SYMBOL(crypto_register);
10697 +EXPORT_SYMBOL(crypto_unregister);
10698 +EXPORT_SYMBOL(crypto_unregister_all);
10699 +EXPORT_SYMBOL(crypto_unblock);
10700 +EXPORT_SYMBOL(crypto_dispatch);
10701 +EXPORT_SYMBOL(crypto_kdispatch);
10702 +EXPORT_SYMBOL(crypto_freereq);
10703 +EXPORT_SYMBOL(crypto_getreq);
10704 +EXPORT_SYMBOL(crypto_done);
10705 +EXPORT_SYMBOL(crypto_kdone);
10706 +EXPORT_SYMBOL(crypto_getfeat);
10707 +EXPORT_SYMBOL(crypto_userasymcrypto);
10708 +EXPORT_SYMBOL(crypto_getcaps);
10709 +EXPORT_SYMBOL(crypto_find_driver);
10710 +EXPORT_SYMBOL(crypto_find_device_byhid);
10712 +module_init(crypto_init);
10713 +module_exit(crypto_exit);
10715 +MODULE_LICENSE("BSD");
10716 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
10717 +MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
10718 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
10719 +++ linux/crypto/ocf/criov.c 2007-07-18 13:01:47.000000000 +1000
10721 +/* $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $ */
10724 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
10725 + * Copyright (C) 2006-2007 David McCullough
10726 + * Copyright (C) 2004-2005 Intel Corporation.
10727 + * The license and original author are listed below.
10729 + * Copyright (c) 1999 Theo de Raadt
10731 + * Redistribution and use in source and binary forms, with or without
10732 + * modification, are permitted provided that the following conditions
10735 + * 1. Redistributions of source code must retain the above copyright
10736 + * notice, this list of conditions and the following disclaimer.
10737 + * 2. Redistributions in binary form must reproduce the above copyright
10738 + * notice, this list of conditions and the following disclaimer in the
10739 + * documentation and/or other materials provided with the distribution.
10740 + * 3. The name of the author may not be used to endorse or promote products
10741 + * derived from this software without specific prior written permission.
10743 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
10744 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
10745 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
10746 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
10747 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
10748 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
10749 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
10750 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
10751 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
10752 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
10754 +__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
10757 +#ifndef AUTOCONF_INCLUDED
10758 +#include <linux/config.h>
10760 +#include <linux/module.h>
10761 +#include <linux/init.h>
10762 +#include <linux/slab.h>
10763 +#include <linux/uio.h>
10764 +#include <linux/skbuff.h>
10765 +#include <linux/kernel.h>
10766 +#include <linux/mm.h>
10767 +#include <asm/io.h>
10770 +#include <cryptodev.h>
10773 + * This macro is only for avoiding code duplication, as we need to skip
10774 + * given number of bytes in the same way in three functions below.
10776 +#define CUIO_SKIP() do { \
10777 + KASSERT(off >= 0, ("%s: off %d < 0", __func__, off)); \
10778 + KASSERT(len >= 0, ("%s: len %d < 0", __func__, len)); \
10779 + while (off > 0) { \
10780 + KASSERT(iol >= 0, ("%s: empty in skip", __func__)); \
10781 + if (off < iov->iov_len) \
10783 + off -= iov->iov_len; \
10790 +cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
10792 + struct iovec *iov = uio->uio_iov;
10793 + int iol = uio->uio_iovcnt;
10797 + while (len > 0) {
10798 + KASSERT(iol >= 0, ("%s: empty", __func__));
10799 + count = min((int)(iov->iov_len - off), len);
10800 + memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
10810 +cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
10812 + struct iovec *iov = uio->uio_iov;
10813 + int iol = uio->uio_iovcnt;
10817 + while (len > 0) {
10818 + KASSERT(iol >= 0, ("%s: empty", __func__));
10819 + count = min((int)(iov->iov_len - off), len);
10820 + memcpy(((caddr_t)iov->iov_base) + off, cp, count);
10830 + * Return a pointer to iov/offset of location in iovec list.
10833 +cuio_getptr(struct uio *uio, int loc, int *off)
10835 + struct iovec *iov = uio->uio_iov;
10836 + int iol = uio->uio_iovcnt;
10838 + while (loc >= 0) {
10839 + /* Normal end of search */
10840 + if (loc < iov->iov_len) {
10845 + loc -= iov->iov_len;
10848 + /* Point at the end of valid data */
10849 + *off = iov->iov_len;
10861 +EXPORT_SYMBOL(cuio_copyback);
10862 +EXPORT_SYMBOL(cuio_copydata);
10863 +EXPORT_SYMBOL(cuio_getptr);
10867 +skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
10870 + if (offset < skb_headlen(skb)) {
10871 + memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
10872 + len -= skb_headlen(skb);
10873 + cp += skb_headlen(skb);
10875 + offset -= skb_headlen(skb);
10876 + for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
10877 + if (offset < skb_shinfo(skb)->frags[i].size) {
10878 + memcpy(page_address(skb_shinfo(skb)->frags[i].page) +
10879 + skb_shinfo(skb)->frags[i].page_offset,
10880 + cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
10881 + len -= skb_shinfo(skb)->frags[i].size;
10882 + cp += skb_shinfo(skb)->frags[i].size;
10884 + offset -= skb_shinfo(skb)->frags[i].size;
10889 +crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
10892 + if ((flags & CRYPTO_F_SKBUF) != 0)
10893 + skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
10894 + else if ((flags & CRYPTO_F_IOV) != 0)
10895 + cuio_copyback((struct uio *)buf, off, size, in);
10897 + bcopy(in, buf + off, size);
10901 +crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
10904 + if ((flags & CRYPTO_F_SKBUF) != 0)
10905 + skb_copy_bits((struct sk_buff *)buf, off, out, size);
10906 + else if ((flags & CRYPTO_F_IOV) != 0)
10907 + cuio_copydata((struct uio *)buf, off, size, out);
10909 + bcopy(buf + off, out, size);
10913 +crypto_apply(int flags, caddr_t buf, int off, int len,
10914 + int (*f)(void *, void *, u_int), void *arg)
10919 + if ((flags & CRYPTO_F_SKBUF) != 0)
10920 + error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
10921 + else if ((flags & CRYPTO_F_IOV) != 0)
10922 + error = cuio_apply((struct uio *)buf, off, len, f, arg);
10924 + error = (*f)(arg, buf + off, len);
10927 + KASSERT(0, ("crypto_apply not implemented!\n"));
10932 +EXPORT_SYMBOL(crypto_copyback);
10933 +EXPORT_SYMBOL(crypto_copydata);
10934 +EXPORT_SYMBOL(crypto_apply);
10936 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
10937 +++ linux/crypto/ocf/uio.h 2007-07-03 09:52:33.000000000 +1000
10939 +#ifndef _OCF_UIO_H_
10940 +#define _OCF_UIO_H_
10942 +#include <linux/uio.h>
10945 + * The linux uio.h doesn't have all we need. To be fully api compatible
10946 + * with the BSD cryptodev, we need to keep this around. Perhaps this can
10947 + * be moved back into the linux/uio.h
10949 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
10950 + * Copyright (C) 2006-2007 David McCullough
10951 + * Copyright (C) 2004-2005 Intel Corporation.
10955 + * The free distribution and use of this software in both source and binary
10956 + * form is allowed (with or without changes) provided that:
10958 + * 1. distributions of this source code include the above copyright
10959 + * notice, this list of conditions and the following disclaimer;
10961 + * 2. distributions in binary form include the above copyright
10962 + * notice, this list of conditions and the following disclaimer
10963 + * in the documentation and/or other associated materials;
10965 + * 3. the copyright holder's name is not used to endorse products
10966 + * built using this software without specific written permission.
10968 + * ALTERNATIVELY, provided that this notice is retained in full, this product
10969 + * may be distributed under the terms of the GNU General Public License (GPL),
10970 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
10974 + * This software is provided 'as is' with no explicit or implied warranties
10975 + * in respect of its properties, including, but not limited to, correctness
10976 + * and/or fitness for purpose.
10977 + * ---------------------------------------------------------------------------
10981 + struct iovec *uio_iov;
10983 + off_t uio_offset;
10986 + enum uio_seg uio_segflg;
10987 + enum uio_rw uio_rw;
10988 + struct thread *uio_td;
10993 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
10994 +++ linux/crypto/ocf/talitos/talitos.c 2008-04-01 15:48:31.000000000 +1000
10997 + * crypto/ocf/talitos/talitos.c
10999 + * An OCF-Linux module that uses Freescale's SEC to do the crypto.
11000 + * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
11002 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
11004 + * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
11005 + * some code copied from files with the following:
11006 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com
11008 + * Redistribution and use in source and binary forms, with or without
11009 + * modification, are permitted provided that the following conditions
11012 + * 1. Redistributions of source code must retain the above copyright
11013 + * notice, this list of conditions and the following disclaimer.
11014 + * 2. Redistributions in binary form must reproduce the above copyright
11015 + * notice, this list of conditions and the following disclaimer in the
11016 + * documentation and/or other materials provided with the distribution.
11017 + * 3. The name of the author may not be used to endorse or promote products
11018 + * derived from this software without specific prior written permission.
11020 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
11021 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
11022 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
11023 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
11024 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
11025 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
11026 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
11027 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
11028 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
11029 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11031 + * ---------------------------------------------------------------------------
11035 + * The Freescale SEC (also known as 'talitos') resides on the
11036 + * internal bus, and runs asynchronous to the processor core. It has
11037 + * a wide gamut of cryptographic acceleration features, including single-
11038 + * pass IPsec (also known as algorithm chaining). To properly utilize
11039 + * all of the SEC's performance enhancing features, further reworking
11040 + * of higher level code (framework, applications) will be necessary.
11042 + * The following table shows which SEC version is present in which devices:
11044 + * Devices SEC version
11046 + * 8272, 8248 SEC 1.0
11047 + * 885, 875 SEC 1.2
11048 + * 8555E, 8541E SEC 2.0
11052 + * The following table shows the features offered by each SEC version:
11055 + * version Bus I/F Clock nels DEU AESU AFEU MDEU PKEU RNG KEU
11057 + * SEC 1.0 internal 64b 100MHz 4 1 1 1 1 1 1 0
11058 + * SEC 1.2 internal 32b 66MHz 1 1 1 0 1 0 0 0
11059 + * SEC 2.0 internal 64b 166MHz 4 1 1 1 1 1 1 0
11060 + * SEC 2.01 internal 64b 166MHz 4 1 1 1 1 1 1 0
11061 + * SEC 2.1 internal 64b 333MHz 4 1 1 1 1 1 1 1
11063 + * Each execution unit in the SEC has two modes of execution; channel and
11064 + * slave/debug. This driver employs the channel infrastructure in the
11065 + * device for convenience. Only the RNG is directly accessed due to the
11066 + * convenience of its random fifo pool. The relationship between the
11067 + * channels and execution units is depicted in the following diagram:
11069 + * ------- ------------
11070 + * ---| ch0 |---| |
11072 + * | |------+-------+-------+-------+------------
11073 + * ------- | | | | | | |
11074 + * ---| ch1 |---| | | | | | |
11075 + * ------- | | ------ ------ ------ ------ ------
11076 + * |controller| |DEU | |AESU| |MDEU| |PKEU| ... |RNG |
11077 + * ------- | | ------ ------ ------ ------ ------
11078 + * ---| ch2 |---| | | | | | |
11079 + * ------- | | | | | | |
11080 + * | |------+-------+-------+-------+------------
11082 + * ---| ch3 |---| |
11083 + * ------- ------------
11085 + * Channel ch0 may drive an aes operation to the aes unit (AESU),
11086 + * and, at the same time, ch1 may drive a message digest operation
11087 + * to the mdeu. Each channel has an input descriptor FIFO, and the
11088 + * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
11089 + * a buffer overrun error is triggered. The controller is responsible
11090 + * for fetching the data from descriptor pointers, and passing the
11091 + * data to the appropriate EUs. The controller also writes the
11092 + * cryptographic operation's result to memory. The SEC notifies
11093 + * completion by triggering an interrupt and/or setting the 1st byte
11094 + * of the hdr field to 0xff.
11097 + * o support more algorithms
11098 + * o support more versions of the SEC
11099 + * o add support for linux 2.4
11100 + * o scatter-gather (sg) support
11101 + * o add support for public key ops (PKEU)
11102 + * o add statistics
11105 +#ifndef AUTOCONF_INCLUDED
11106 +#include <linux/config.h>
11108 +#include <linux/module.h>
11109 +#include <linux/init.h>
11110 +#include <linux/interrupt.h>
11111 +#include <linux/spinlock.h>
11112 +#include <linux/random.h>
11113 +#include <linux/skbuff.h>
11114 +#include <asm/scatterlist.h>
11115 +#include <linux/dma-mapping.h> /* dma_map_single() */
11116 +#include <linux/moduleparam.h>
11118 +#include <linux/version.h>
11119 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
11120 +#include <linux/platform_device.h>
11123 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
11124 +#include <linux/of_platform.h>
11127 +#include <cryptodev.h>
11130 +#define DRV_NAME "talitos"
11132 +#include "talitos_dev.h"
11133 +#include "talitos_soft.h"
11135 +#define read_random(p,l) get_random_bytes(p,l)
11137 +const char talitos_driver_name[] = "Talitos OCF";
11138 +const char talitos_driver_version[] = "0.2";
11140 +static int talitos_newsession(device_t dev, u_int32_t *sidp,
11141 + struct cryptoini *cri);
11142 +static int talitos_freesession(device_t dev, u_int64_t tid);
11143 +static int talitos_process(device_t dev, struct cryptop *crp, int hint);
11144 +static void dump_talitos_status(struct talitos_softc *sc);
11145 +static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td,
11147 +static void talitos_doneprocessing(struct talitos_softc *sc);
11148 +static void talitos_init_device(struct talitos_softc *sc);
11149 +static void talitos_reset_device_master(struct talitos_softc *sc);
11150 +static void talitos_reset_device(struct talitos_softc *sc);
11151 +static void talitos_errorprocessing(struct talitos_softc *sc);
11152 +#ifdef CONFIG_PPC_MERGE
11153 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
11154 +static int talitos_remove(struct of_device *ofdev);
11156 +static int talitos_probe(struct platform_device *pdev);
11157 +static int talitos_remove(struct platform_device *pdev);
11159 +#ifdef CONFIG_OCF_RANDOMHARVEST
11160 +static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
11161 +static void talitos_rng_init(struct talitos_softc *sc);
11164 +static device_method_t talitos_methods = {
11165 + /* crypto device methods */
11166 + DEVMETHOD(cryptodev_newsession, talitos_newsession),
11167 + DEVMETHOD(cryptodev_freesession,talitos_freesession),
11168 + DEVMETHOD(cryptodev_process, talitos_process),
11171 +#define debug talitos_debug
11172 +int talitos_debug = 0;
11173 +module_param(talitos_debug, int, 0644);
11174 +MODULE_PARM_DESC(talitos_debug, "Enable debug");
11176 +static inline void talitos_write(volatile unsigned *addr, u32 val)
11178 + out_be32(addr, val);
11181 +static inline u32 talitos_read(volatile unsigned *addr)
11184 + val = in_be32(addr);
11188 +static void dump_talitos_status(struct talitos_softc *sc)
11190 + unsigned int v, v_hi, i, *ptr;
11191 + v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
11192 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
11193 + printk(KERN_INFO "%s: MCR 0x%08x_%08x\n",
11194 + device_get_nameunit(sc->sc_cdev), v, v_hi);
11195 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
11196 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
11197 + printk(KERN_INFO "%s: IMR 0x%08x_%08x\n",
11198 + device_get_nameunit(sc->sc_cdev), v, v_hi);
11199 + v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
11200 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
11201 + printk(KERN_INFO "%s: ISR 0x%08x_%08x\n",
11202 + device_get_nameunit(sc->sc_cdev), v, v_hi);
11203 + for (i = 0; i < sc->sc_num_channels; i++) {
11204 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
11205 + TALITOS_CH_CDPR);
11206 + v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
11207 + TALITOS_CH_CDPR_HI);
11208 + printk(KERN_INFO "%s: CDPR ch%d 0x%08x_%08x\n",
11209 + device_get_nameunit(sc->sc_cdev), i, v, v_hi);
11211 + for (i = 0; i < sc->sc_num_channels; i++) {
11212 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
11213 + TALITOS_CH_CCPSR);
11214 + v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
11215 + TALITOS_CH_CCPSR_HI);
11216 + printk(KERN_INFO "%s: CCPSR ch%d 0x%08x_%08x\n",
11217 + device_get_nameunit(sc->sc_cdev), i, v, v_hi);
11219 + ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
11220 + for (i = 0; i < 16; i++) {
11221 + v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
11222 + printk(KERN_INFO "%s: DESCBUF ch0 0x%08x_%08x (tdp%02d)\n",
11223 + device_get_nameunit(sc->sc_cdev), v, v_hi, i);
11229 +#ifdef CONFIG_OCF_RANDOMHARVEST
11231 + * pull random numbers off the RNG FIFO, not exceeding amount available
11234 +talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
11236 + struct talitos_softc *sc = (struct talitos_softc *) arg;
11240 + DPRINTF("%s()\n", __FUNCTION__);
11242 + /* check for things like FIFO underflow */
11243 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
11244 + if (unlikely(v)) {
11245 + printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
11246 + device_get_nameunit(sc->sc_cdev), v);
11250 + * OFL is number of available 64-bit words,
11251 + * shift and convert to a 32-bit word count
11253 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
11254 + v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
11255 + if (maxwords > v)
11257 + for (rc = 0; rc < maxwords; rc++) {
11258 + buf[rc] = talitos_read(sc->sc_base_addr +
11259 + TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
11261 + if (maxwords & 1) {
11263 + * RNG will complain with an AE in the RNGISR
11264 + * if we don't complete the pairs of 32-bit reads
11265 + * to its 64-bit register based FIFO
11267 + v = talitos_read(sc->sc_base_addr +
11268 + TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
11275 +talitos_rng_init(struct talitos_softc *sc)
11279 + DPRINTF("%s()\n", __FUNCTION__);
11280 + /* reset RNG EU */
11281 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
11282 + v |= TALITOS_RNGRCR_HI_SR;
11283 + talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
11284 + while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI)
11285 + & TALITOS_RNGSR_HI_RD) == 0)
11288 + * we tell the RNG to start filling the RNG FIFO
11289 + * by writing the RNGDSR
11291 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
11292 + talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
11294 + * 64 bits of data will be pushed onto the FIFO every
11295 + * 256 SEC cycles until the FIFO is full. The RNG then
11296 + * attempts to keep the FIFO full.
11298 + v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
11300 + printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
11301 + device_get_nameunit(sc->sc_cdev), v);
11305 + * n.b. we need to add a FIPS test here - if the RNG is going
11306 + * to fail, it's going to fail at reset time
11310 +#endif /* CONFIG_OCF_RANDOMHARVEST */
11313 + * Generate a new software session.
11316 +talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
11318 + struct cryptoini *c, *encini = NULL, *macini = NULL;
11319 + struct talitos_softc *sc = device_get_softc(dev);
11320 + struct talitos_session *ses = NULL;
11323 + DPRINTF("%s()\n", __FUNCTION__);
11324 + if (sidp == NULL || cri == NULL || sc == NULL) {
11325 + DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
11328 + for (c = cri; c != NULL; c = c->cri_next) {
11329 + if (c->cri_alg == CRYPTO_MD5 ||
11330 + c->cri_alg == CRYPTO_MD5_HMAC ||
11331 + c->cri_alg == CRYPTO_SHA1 ||
11332 + c->cri_alg == CRYPTO_SHA1_HMAC ||
11333 + c->cri_alg == CRYPTO_NULL_HMAC) {
11337 + } else if (c->cri_alg == CRYPTO_DES_CBC ||
11338 + c->cri_alg == CRYPTO_3DES_CBC ||
11339 + c->cri_alg == CRYPTO_AES_CBC ||
11340 + c->cri_alg == CRYPTO_NULL_CBC) {
11345 + DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
11349 + if (encini == NULL && macini == NULL)
11352 + /* validate key length */
11353 + switch (encini->cri_alg) {
11354 + case CRYPTO_DES_CBC:
11355 + if (encini->cri_klen != 64)
11358 + case CRYPTO_3DES_CBC:
11359 + if (encini->cri_klen != 192) {
11363 + case CRYPTO_AES_CBC:
11364 + if (encini->cri_klen != 128 &&
11365 + encini->cri_klen != 192 &&
11366 + encini->cri_klen != 256)
11370 + DPRINTF("UNKNOWN encini->cri_alg %d\n",
11371 + encini->cri_alg);
11376 + if (sc->sc_sessions == NULL) {
11377 + ses = sc->sc_sessions = (struct talitos_session *)
11378 + kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
11381 + memset(ses, 0, sizeof(struct talitos_session));
11383 + sc->sc_nsessions = 1;
11385 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
11386 + if (sc->sc_sessions[sesn].ses_used == 0) {
11387 + ses = &sc->sc_sessions[sesn];
11392 + if (ses == NULL) {
11393 + /* allocating session */
11394 + sesn = sc->sc_nsessions;
11395 + ses = (struct talitos_session *) kmalloc(
11396 + (sesn + 1) * sizeof(struct talitos_session),
11401 + (sesn + 1) * sizeof(struct talitos_session));
11402 + memcpy(ses, sc->sc_sessions,
11403 + sesn * sizeof(struct talitos_session));
11404 + memset(sc->sc_sessions, 0,
11405 + sesn * sizeof(struct talitos_session));
11406 + kfree(sc->sc_sessions);
11407 + sc->sc_sessions = ses;
11408 + ses = &sc->sc_sessions[sesn];
11409 + sc->sc_nsessions++;
11413 + ses->ses_used = 1;
11417 + /* XXX may read fewer than requested */
11418 + read_random(ses->ses_iv, sizeof(ses->ses_iv));
11420 + ses->ses_klen = (encini->cri_klen + 7) / 8;
11421 + memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
11423 + /* doing hash on top of cipher */
11424 + ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
11425 + memcpy(ses->ses_hmac, macini->cri_key,
11426 + ses->ses_hmac_len);
11428 + } else if (macini) {
11430 + ses->ses_klen = (macini->cri_klen + 7) / 8;
11431 + memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
11434 + /* back compat way of determining MSC result len */
11436 + ses->ses_mlen = macini->cri_mlen;
11437 + if (ses->ses_mlen == 0) {
11438 + if (macini->cri_alg == CRYPTO_MD5_HMAC)
11439 + ses->ses_mlen = MD5_HASH_LEN;
11441 + ses->ses_mlen = SHA1_HASH_LEN;
11445 + /* really should make up a template td here,
11446 + * and only fill things like i/o and direction in process() */
11448 + /* assign session ID */
11449 + *sidp = TALITOS_SID(sc->sc_num, sesn);
11454 + * Deallocate a session.
11457 +talitos_freesession(device_t dev, u_int64_t tid)
11459 + struct talitos_softc *sc = device_get_softc(dev);
11460 + int session, ret;
11461 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
11465 + session = TALITOS_SESSION(sid);
11466 + if (session < sc->sc_nsessions) {
11467 + memset(&sc->sc_sessions[session], 0,
11468 + sizeof(sc->sc_sessions[session]));
11476 + * launch device processing - it will come back with done notification
11477 + * in the form of an interrupt and/or HDR_DONE_BITS in header
11481 + struct talitos_softc *sc,
11482 + struct talitos_desc *td,
11487 + v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
11488 + talitos_write(sc->sc_base_addr +
11489 + chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
11490 + talitos_write(sc->sc_base_addr +
11491 + chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
11496 +talitos_process(device_t dev, struct cryptop *crp, int hint)
11498 + int i, err = 0, ivsize;
11499 + struct talitos_softc *sc = device_get_softc(dev);
11500 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
11502 + struct talitos_session *ses;
11503 + struct talitos_desc *td;
11504 + unsigned long flags;
11505 + /* descriptor mappings */
11506 + int hmac_key, hmac_data, cipher_iv, cipher_key,
11507 + in_fifo, out_fifo, cipher_iv_out;
11508 + static int chsel = -1;
11510 + DPRINTF("%s()\n", __FUNCTION__);
11512 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
11515 + crp->crp_etype = 0;
11516 + if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
11520 + ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
11522 + /* enter the channel scheduler */
11523 + spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11525 + /* reuse channel that already had/has requests for the required EU */
11526 + for (i = 0; i < sc->sc_num_channels; i++) {
11527 + if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
11530 + if (i == sc->sc_num_channels) {
11532 + * haven't seen this algo the last sc_num_channels or more
11533 + * use round robin in this case
11534 + * nb: sc->sc_num_channels must be power of 2
11536 + chsel = (chsel + 1) & (sc->sc_num_channels - 1);
11539 + * matches channel with same target execution unit;
11540 + * use same channel in this case
11544 + sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
11546 + /* release the channel scheduler lock */
11547 + spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11549 + /* acquire the selected channel fifo lock */
11550 + spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
11552 + /* find and reserve next available descriptor-cryptop pair */
11553 + for (i = 0; i < sc->sc_chfifo_len; i++) {
11554 + if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
11556 + * ensure correct descriptor formation by
11557 + * avoiding inadvertently setting "optional" entries
11558 + * e.g. not using "optional" dptr2 for MD/HMAC descs
11560 + memset(&sc->sc_chnfifo[chsel][i].cf_desc,
11562 + /* reserve it with done notification request bit */
11563 + sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
11564 + TALITOS_DONE_NOTIFY;
11568 + spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
11570 + if (i == sc->sc_chfifo_len) {
11576 + td = &sc->sc_chnfifo[chsel][i].cf_desc;
11577 + sc->sc_chnfifo[chsel][i].cf_crp = crp;
11579 + crd1 = crp->crp_desc;
11580 + if (crd1 == NULL) {
11584 + crd2 = crd1->crd_next;
11585 + /* prevent compiler warning */
11588 + if (crd2 == NULL) {
11589 + td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
11590 + /* assign descriptor dword ptr mappings for this desc. type */
11594 + cipher_iv_out = 5;
11595 + if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
11596 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11597 + crd1->crd_alg == CRYPTO_SHA1 ||
11598 + crd1->crd_alg == CRYPTO_MD5) {
11602 + } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
11603 + crd1->crd_alg == CRYPTO_3DES_CBC ||
11604 + crd1->crd_alg == CRYPTO_AES_CBC ||
11605 + crd1->crd_alg == CRYPTO_ARC4) {
11610 + DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
11615 + if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
11616 + td->hdr |= TD_TYPE_IPSEC_ESP;
11618 + DPRINTF("unimplemented: multiple descriptor ipsec\n");
11622 + /* assign descriptor dword ptr mappings for this desc. type */
11629 + cipher_iv_out = 6;
11630 + if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
11631 + crd1->crd_alg == CRYPTO_SHA1_HMAC ||
11632 + crd1->crd_alg == CRYPTO_MD5 ||
11633 + crd1->crd_alg == CRYPTO_SHA1) &&
11634 + (crd2->crd_alg == CRYPTO_DES_CBC ||
11635 + crd2->crd_alg == CRYPTO_3DES_CBC ||
11636 + crd2->crd_alg == CRYPTO_AES_CBC ||
11637 + crd2->crd_alg == CRYPTO_ARC4) &&
11638 + ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
11641 + } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
11642 + crd1->crd_alg == CRYPTO_ARC4 ||
11643 + crd1->crd_alg == CRYPTO_3DES_CBC ||
11644 + crd1->crd_alg == CRYPTO_AES_CBC) &&
11645 + (crd2->crd_alg == CRYPTO_MD5_HMAC ||
11646 + crd2->crd_alg == CRYPTO_SHA1_HMAC ||
11647 + crd2->crd_alg == CRYPTO_MD5 ||
11648 + crd2->crd_alg == CRYPTO_SHA1) &&
11649 + (crd1->crd_flags & CRD_F_ENCRYPT)) {
11653 + /* We cannot order the SEC as requested */
11654 + printk("%s: cannot do the order\n",
11655 + device_get_nameunit(sc->sc_cdev));
11660 + /* assign in_fifo and out_fifo based on input/output struct type */
11661 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
11662 + /* using SKB buffers */
11663 + struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
11664 + if (skb_shinfo(skb)->nr_frags) {
11665 + printk("%s: skb frags unimplemented\n",
11666 + device_get_nameunit(sc->sc_cdev));
11670 + td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
11671 + skb->len, DMA_TO_DEVICE);
11672 + td->ptr[in_fifo].len = skb->len;
11673 + td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
11674 + skb->len, DMA_TO_DEVICE);
11675 + td->ptr[out_fifo].len = skb->len;
11676 + td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
11677 + skb->len, DMA_TO_DEVICE);
11678 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
11679 + /* using IOV buffers */
11680 + struct uio *uiop = (struct uio *)crp->crp_buf;
11681 + if (uiop->uio_iovcnt > 1) {
11682 + printk("%s: iov frags unimplemented\n",
11683 + device_get_nameunit(sc->sc_cdev));
11687 + td->ptr[in_fifo].ptr = dma_map_single(NULL,
11688 + uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
11689 + td->ptr[in_fifo].len = crp->crp_ilen;
11690 + /* crp_olen is never set; always use crp_ilen */
11691 + td->ptr[out_fifo].ptr = dma_map_single(NULL,
11692 + uiop->uio_iov->iov_base,
11693 + crp->crp_ilen, DMA_TO_DEVICE);
11694 + td->ptr[out_fifo].len = crp->crp_ilen;
11696 + /* using contig buffers */
11697 + td->ptr[in_fifo].ptr = dma_map_single(NULL,
11698 + crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
11699 + td->ptr[in_fifo].len = crp->crp_ilen;
11700 + td->ptr[out_fifo].ptr = dma_map_single(NULL,
11701 + crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
11702 + td->ptr[out_fifo].len = crp->crp_ilen;
11705 + switch (enccrd->crd_alg) {
11706 + case CRYPTO_3DES_CBC:
11707 + td->hdr |= TALITOS_MODE0_DEU_3DES;
11708 + /* FALLTHROUGH */
11709 + case CRYPTO_DES_CBC:
11710 + td->hdr |= TALITOS_SEL0_DEU
11711 + | TALITOS_MODE0_DEU_CBC;
11712 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
11713 + td->hdr |= TALITOS_MODE0_DEU_ENC;
11714 + ivsize = 2*sizeof(u_int32_t);
11715 + DPRINTF("%cDES ses %d ch %d len %d\n",
11716 + (td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
11717 + (u32)TALITOS_SESSION(crp->crp_sid),
11718 + chsel, td->ptr[in_fifo].len);
11720 + case CRYPTO_AES_CBC:
11721 + td->hdr |= TALITOS_SEL0_AESU
11722 + | TALITOS_MODE0_AESU_CBC;
11723 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
11724 + td->hdr |= TALITOS_MODE0_AESU_ENC;
11725 + ivsize = 4*sizeof(u_int32_t);
11726 + DPRINTF("AES ses %d ch %d len %d\n",
11727 + (u32)TALITOS_SESSION(crp->crp_sid),
11728 + chsel, td->ptr[in_fifo].len);
11731 + printk("%s: unimplemented enccrd->crd_alg %d\n",
11732 + device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
11737 + * Setup encrypt/decrypt state. When using basic ops
11738 + * we can't use an inline IV because hash/crypt offset
11739 + * must be from the end of the IV to the start of the
11740 + * crypt data and this leaves out the preceding header
11741 + * from the hash calculation. Instead we place the IV
11742 + * in the state record and set the hash/crypt offset to
11743 + * copy both the header+IV.
11745 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
11746 + td->hdr |= TALITOS_DIR_OUTBOUND;
11747 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
11748 + iv = enccrd->crd_iv;
11750 + iv = (caddr_t) ses->ses_iv;
11751 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
11752 + crypto_copyback(crp->crp_flags, crp->crp_buf,
11753 + enccrd->crd_inject, ivsize, iv);
11756 + td->hdr |= TALITOS_DIR_INBOUND;
11757 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
11758 + iv = enccrd->crd_iv;
11759 + bcopy(enccrd->crd_iv, iv, ivsize);
11761 + iv = (caddr_t) ses->ses_iv;
11762 + crypto_copydata(crp->crp_flags, crp->crp_buf,
11763 + enccrd->crd_inject, ivsize, iv);
11766 + td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
11768 + td->ptr[cipher_iv].len = ivsize;
11770 + * we don't need the cipher iv out length/pointer
11771 + * field to do ESP IPsec. Therefore we set the len field as 0,
11772 + * which tells the SEC not to do anything with this len/ptr
11773 + * field. Previously, when length/pointer as pointing to iv,
11774 + * it gave us corruption of packets.
11776 + td->ptr[cipher_iv_out].len = 0;
11778 + if (enccrd && maccrd) {
11779 + /* this is ipsec only for now */
11780 + td->hdr |= TALITOS_SEL1_MDEU
11781 + | TALITOS_MODE1_MDEU_INIT
11782 + | TALITOS_MODE1_MDEU_PAD;
11783 + switch (maccrd->crd_alg) {
11785 + td->hdr |= TALITOS_MODE1_MDEU_MD5;
11787 + case CRYPTO_MD5_HMAC:
11788 + td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
11790 + case CRYPTO_SHA1:
11791 + td->hdr |= TALITOS_MODE1_MDEU_SHA1;
11793 + case CRYPTO_SHA1_HMAC:
11794 + td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
11797 + /* We cannot order the SEC as requested */
11798 + printk("%s: cannot do the order\n",
11799 + device_get_nameunit(sc->sc_cdev));
11803 + if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
11804 + (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
11806 + * The offset from hash data to the start of
11807 + * crypt data is the difference in the skips.
11809 + /* ipsec only for now */
11810 + td->ptr[hmac_key].ptr = dma_map_single(NULL,
11811 + ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
11812 + td->ptr[hmac_key].len = ses->ses_hmac_len;
11813 + td->ptr[in_fifo].ptr += enccrd->crd_skip;
11814 + td->ptr[in_fifo].len = enccrd->crd_len;
11815 + td->ptr[out_fifo].ptr += enccrd->crd_skip;
11816 + td->ptr[out_fifo].len = enccrd->crd_len;
11817 + /* bytes of HMAC to postpend to ciphertext */
11818 + td->ptr[out_fifo].extent = ses->ses_mlen;
11819 + td->ptr[hmac_data].ptr += maccrd->crd_skip;
11820 + td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
11822 + if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
11823 + printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
11824 + device_get_nameunit(sc->sc_cdev));
11827 + if (!enccrd && maccrd) {
11828 + /* single MD5 or SHA */
11829 + td->hdr |= TALITOS_SEL0_MDEU
11830 + | TALITOS_MODE0_MDEU_INIT
11831 + | TALITOS_MODE0_MDEU_PAD;
11832 + switch (maccrd->crd_alg) {
11834 + td->hdr |= TALITOS_MODE0_MDEU_MD5;
11835 + DPRINTF("MD5 ses %d ch %d len %d\n",
11836 + (u32)TALITOS_SESSION(crp->crp_sid),
11837 + chsel, td->ptr[in_fifo].len);
11839 + case CRYPTO_MD5_HMAC:
11840 + td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
11842 + case CRYPTO_SHA1:
11843 + td->hdr |= TALITOS_MODE0_MDEU_SHA1;
11844 + DPRINTF("SHA1 ses %d ch %d len %d\n",
11845 + (u32)TALITOS_SESSION(crp->crp_sid),
11846 + chsel, td->ptr[in_fifo].len);
11848 + case CRYPTO_SHA1_HMAC:
11849 + td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
11852 + /* We cannot order the SEC as requested */
11853 + DPRINTF("cannot do the order\n");
11858 + if (crp->crp_flags & CRYPTO_F_IOV)
11859 + td->ptr[out_fifo].ptr += maccrd->crd_inject;
11861 + if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
11862 + (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
11863 + td->ptr[hmac_key].ptr = dma_map_single(NULL,
11864 + ses->ses_hmac, ses->ses_hmac_len,
11866 + td->ptr[hmac_key].len = ses->ses_hmac_len;
11870 + /* using process key (session data has duplicate) */
11871 + td->ptr[cipher_key].ptr = dma_map_single(NULL,
11872 + enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
11874 + td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
11876 + /* descriptor complete - GO! */
11877 + return talitos_submit(sc, td, chsel);
11880 + if (err != ERESTART) {
11881 + crp->crp_etype = err;
11882 + crypto_done(crp);
11887 +/* go through all channels descriptors, notifying OCF what has
11888 + * _and_hasn't_ successfully completed and reset the device
11889 + * (otherwise it's up to decoding desc hdrs!)
11891 +static void talitos_errorprocessing(struct talitos_softc *sc)
11893 + unsigned long flags;
11896 + /* disable further scheduling until under control */
11897 + spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11899 + if (debug) dump_talitos_status(sc);
11900 + /* go through descriptors, try and salvage those successfully done,
11901 + * and EIO those that weren't
11903 + for (i = 0; i < sc->sc_num_channels; i++) {
11904 + spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
11905 + for (j = 0; j < sc->sc_chfifo_len; j++) {
11906 + if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
11907 + if ((sc->sc_chnfifo[i][j].cf_desc.hdr
11908 + & TALITOS_HDR_DONE_BITS)
11909 + != TALITOS_HDR_DONE_BITS) {
11910 + /* this one didn't finish */
11911 + /* signify in crp->etype */
11912 + sc->sc_chnfifo[i][j].cf_crp->crp_etype
11916 + continue; /* free entry */
11917 + /* either way, notify ocf */
11918 + crypto_done(sc->sc_chnfifo[i][j].cf_crp);
11919 + /* and tag it available again
11921 + * memset to ensure correct descriptor formation by
11922 + * avoiding inadvertently setting "optional" entries
11923 + * e.g. not using "optional" dptr2 MD/HMAC processing
11925 + memset(&sc->sc_chnfifo[i][j].cf_desc,
11926 + 0, sizeof(struct talitos_desc));
11928 + spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
11930 + /* reset and initialize the SEC h/w device */
11931 + talitos_reset_device(sc);
11932 + talitos_init_device(sc);
11933 +#ifdef CONFIG_OCF_RANDOMHARVEST
11934 + if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
11935 + talitos_rng_init(sc);
11938 + /* Okay. Stand by. */
11939 + spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
11944 +/* go through all channels descriptors, notifying OCF what's been done */
11945 +static void talitos_doneprocessing(struct talitos_softc *sc)
11947 + unsigned long flags;
11950 + /* go through descriptors looking for done bits */
11951 + for (i = 0; i < sc->sc_num_channels; i++) {
11952 + spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
11953 + for (j = 0; j < sc->sc_chfifo_len; j++) {
11954 + /* descriptor has done bits set? */
11955 + if ((sc->sc_chnfifo[i][j].cf_desc.hdr
11956 + & TALITOS_HDR_DONE_BITS)
11957 + == TALITOS_HDR_DONE_BITS) {
11959 + crypto_done(sc->sc_chnfifo[i][j].cf_crp);
11960 + /* and tag it available again
11962 + * memset to ensure correct descriptor formation by
11963 + * avoiding inadvertently setting "optional" entries
11964 + * e.g. not using "optional" dptr2 MD/HMAC processing
11966 + memset(&sc->sc_chnfifo[i][j].cf_desc,
11967 + 0, sizeof(struct talitos_desc));
11970 + spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
11975 +static irqreturn_t
11976 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
11977 +talitos_intr(int irq, void *arg)
11979 +talitos_intr(int irq, void *arg, struct pt_regs *regs)
11982 + struct talitos_softc *sc = arg;
11983 + u_int32_t v, v_hi;
11986 + v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
11987 + v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
11988 + talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
11989 + talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
11991 + if (unlikely(v & TALITOS_ISR_ERROR)) {
11992 + /* Okay, Houston, we've had a problem here. */
11993 + printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
11994 + device_get_nameunit(sc->sc_cdev), v, v_hi);
11995 + talitos_errorprocessing(sc);
11997 + if (likely(v & TALITOS_ISR_DONE)) {
11998 + talitos_doneprocessing(sc);
12000 + return IRQ_HANDLED;
12004 + * Initialize registers we need to touch only once.
12007 +talitos_init_device(struct talitos_softc *sc)
12012 + DPRINTF("%s()\n", __FUNCTION__);
12014 + /* init all channels */
12015 + for (i = 0; i < sc->sc_num_channels; i++) {
12016 + v = talitos_read(sc->sc_base_addr +
12017 + i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
12018 + v |= TALITOS_CH_CCCR_HI_CDWE
12019 + | TALITOS_CH_CCCR_HI_CDIE; /* invoke interrupt if done */
12020 + talitos_write(sc->sc_base_addr +
12021 + i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
12023 + /* enable all interrupts */
12024 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
12025 + v |= TALITOS_IMR_ALL;
12026 + talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
12027 + v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
12028 + v |= TALITOS_IMR_HI_ERRONLY;
12029 + talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
12034 + * set the master reset bit on the device.
12037 +talitos_reset_device_master(struct talitos_softc *sc)
12041 + /* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
12042 + v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
12043 + talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
12045 + while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
12052 + * Resets the device. Values in the registers are left as is
12053 + * from the reset (i.e. initial values are assigned elsewhere).
12056 +talitos_reset_device(struct talitos_softc *sc)
12061 + DPRINTF("%s()\n", __FUNCTION__);
12065 + * errata documentation: warning: certain SEC interrupts
12066 + * are not fully cleared by writing the MCR:SWR bit,
12067 + * set bit twice to completely reset
12069 + talitos_reset_device_master(sc); /* once */
12070 + talitos_reset_device_master(sc); /* and once again */
12072 + /* reset all channels */
12073 + for (i = 0; i < sc->sc_num_channels; i++) {
12074 + v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
12075 + TALITOS_CH_CCCR);
12076 + talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
12077 + TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
12081 +/* Set up the crypto device structure, private data,
12082 + * and anything else we need before we start */
12083 +#ifdef CONFIG_PPC_MERGE
12084 +static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
12086 +static int talitos_probe(struct platform_device *pdev)
12089 + struct talitos_softc *sc = NULL;
12090 + struct resource *r;
12091 +#ifdef CONFIG_PPC_MERGE
12092 + struct device *device = &ofdev->dev;
12093 + struct device_node *np = ofdev->node;
12094 + const unsigned int *prop;
12096 + struct resource res;
12098 + static int num_chips = 0;
12102 + DPRINTF("%s()\n", __FUNCTION__);
12104 + sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
12107 + memset(sc, 0, sizeof(*sc));
12109 + softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
12113 +#ifndef CONFIG_PPC_MERGE
12114 + sc->sc_dev = pdev;
12116 + sc->sc_num = num_chips++;
12118 +#ifdef CONFIG_PPC_MERGE
12119 + dev_set_drvdata(device, sc);
12121 + platform_set_drvdata(sc->sc_dev, sc);
12124 + /* get the irq line */
12125 +#ifdef CONFIG_PPC_MERGE
12126 + err = of_address_to_resource(np, 0, &res);
12131 + sc->sc_irq = irq_of_parse_and_map(np, 0);
12133 + /* get a pointer to the register memory */
12134 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12136 + sc->sc_irq = platform_get_irq(pdev, 0);
12138 + rc = request_irq(sc->sc_irq, talitos_intr, 0,
12139 + device_get_nameunit(sc->sc_cdev), sc);
12141 + printk(KERN_ERR "%s: failed to hook irq %d\n",
12142 + device_get_nameunit(sc->sc_cdev), sc->sc_irq);
12147 + sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
12148 + if (!sc->sc_base_addr) {
12149 + printk(KERN_ERR "%s: failed to ioremap\n",
12150 + device_get_nameunit(sc->sc_cdev));
12154 + /* figure out our SEC's properties and capabilities */
12155 + sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
12156 + | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
12157 + DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
12159 +#ifdef CONFIG_PPC_MERGE
12160 + /* get SEC properties from device tree, defaulting to SEC 2.0 */
12162 + prop = of_get_property(np, "num-channels", NULL);
12163 + sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
12165 + prop = of_get_property(np, "channel-fifo-len", NULL);
12166 + sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
12168 + prop = of_get_property(np, "exec-units-mask", NULL);
12169 + sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
12171 + prop = of_get_property(np, "descriptor-types-mask", NULL);
12172 + sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
12174 + /* bulk should go away with openfirmware flat device tree support */
12175 + if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
12176 + sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
12177 + sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
12178 + sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
12179 + sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
12181 + printk(KERN_ERR "%s: failed to id device\n",
12182 + device_get_nameunit(sc->sc_cdev));
12187 + /* + 1 is for the meta-channel lock used by the channel scheduler */
12188 + sc->sc_chnfifolock = (spinlock_t *) kmalloc(
12189 + (sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
12190 + if (!sc->sc_chnfifolock)
12192 + for (i = 0; i < sc->sc_num_channels + 1; i++) {
12193 + spin_lock_init(&sc->sc_chnfifolock[i]);
12196 + sc->sc_chnlastalg = (int *) kmalloc(
12197 + sc->sc_num_channels * sizeof(int), GFP_KERNEL);
12198 + if (!sc->sc_chnlastalg)
12200 + memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
12202 + sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
12203 + sc->sc_num_channels * sizeof(struct desc_cryptop_pair *),
12205 + if (!sc->sc_chnfifo)
12207 + for (i = 0; i < sc->sc_num_channels; i++) {
12208 + sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
12209 + sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair),
12211 + if (!sc->sc_chnfifo[i])
12213 + memset(sc->sc_chnfifo[i], 0,
12214 + sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
12217 + /* reset and initialize the SEC h/w device */
12218 + talitos_reset_device(sc);
12219 + talitos_init_device(sc);
12221 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
12222 + if (sc->sc_cid < 0) {
12223 + printk(KERN_ERR "%s: could not get crypto driver id\n",
12224 + device_get_nameunit(sc->sc_cdev));
12228 + /* register algorithms with the framework */
12229 + printk("%s:", device_get_nameunit(sc->sc_cdev));
12231 + if (sc->sc_exec_units & TALITOS_HAS_EU_RNG) {
12233 +#ifdef CONFIG_OCF_RANDOMHARVEST
12234 + talitos_rng_init(sc);
12235 + crypto_rregister(sc->sc_cid, talitos_read_random, sc);
12238 + if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
12239 + printk(" des/3des");
12240 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
12241 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
12243 + if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
12245 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
12247 + if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
12249 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
12250 + /* HMAC support only with IPsec for now */
12251 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
12253 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
12254 + /* HMAC support only with IPsec for now */
12255 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
12261 +#ifndef CONFIG_PPC_MERGE
12262 + talitos_remove(pdev);
12267 +#ifdef CONFIG_PPC_MERGE
12268 +static int talitos_remove(struct of_device *ofdev)
12270 +static int talitos_remove(struct platform_device *pdev)
12273 +#ifdef CONFIG_PPC_MERGE
12274 + struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
12276 + struct talitos_softc *sc = platform_get_drvdata(pdev);
12280 + DPRINTF("%s()\n", __FUNCTION__);
12281 + if (sc->sc_cid >= 0)
12282 + crypto_unregister_all(sc->sc_cid);
12283 + if (sc->sc_chnfifo) {
12284 + for (i = 0; i < sc->sc_num_channels; i++)
12285 + if (sc->sc_chnfifo[i])
12286 + kfree(sc->sc_chnfifo[i]);
12287 + kfree(sc->sc_chnfifo);
12289 + if (sc->sc_chnlastalg)
12290 + kfree(sc->sc_chnlastalg);
12291 + if (sc->sc_chnfifolock)
12292 + kfree(sc->sc_chnfifolock);
12293 + if (sc->sc_irq != -1)
12294 + free_irq(sc->sc_irq, sc);
12295 + if (sc->sc_base_addr)
12296 + iounmap((void *) sc->sc_base_addr);
12301 +#ifdef CONFIG_PPC_MERGE
12302 +static struct of_device_id talitos_match[] = {
12304 + .type = "crypto",
12305 + .compatible = "talitos",
12310 +MODULE_DEVICE_TABLE(of, talitos_match);
12312 +static struct of_platform_driver talitos_driver = {
12313 + .name = DRV_NAME,
12314 + .match_table = talitos_match,
12315 + .probe = talitos_probe,
12316 + .remove = talitos_remove,
12319 +static int __init talitos_init(void)
12321 + return of_register_platform_driver(&talitos_driver);
12324 +static void __exit talitos_exit(void)
12326 + of_unregister_platform_driver(&talitos_driver);
12329 +/* Structure for a platform device driver */
12330 +static struct platform_driver talitos_driver = {
12331 + .probe = talitos_probe,
12332 + .remove = talitos_remove,
12334 + .name = "fsl-sec2",
12338 +static int __init talitos_init(void)
12340 + return platform_driver_register(&talitos_driver);
12343 +static void __exit talitos_exit(void)
12345 + platform_driver_unregister(&talitos_driver);
12349 +module_init(talitos_init);
12350 +module_exit(talitos_exit);
12352 +MODULE_LICENSE("Dual BSD/GPL");
12353 +MODULE_AUTHOR("kim.phillips@freescale.com");
12354 +MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
12355 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
12356 +++ linux/crypto/ocf/talitos/talitos_soft.h 2007-07-20 11:47:16.000000000 +1000
12359 + * Freescale SEC data structures for integration with ocf-linux
12361 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
12363 + * Redistribution and use in source and binary forms, with or without
12364 + * modification, are permitted provided that the following conditions
12367 + * 1. Redistributions of source code must retain the above copyright
12368 + * notice, this list of conditions and the following disclaimer.
12369 + * 2. Redistributions in binary form must reproduce the above copyright
12370 + * notice, this list of conditions and the following disclaimer in the
12371 + * documentation and/or other materials provided with the distribution.
12372 + * 3. The name of the author may not be used to endorse or promote products
12373 + * derived from this software without specific prior written permission.
12375 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12376 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12377 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12378 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12379 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12380 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12381 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12382 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12383 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12384 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12388 + * paired descriptor and associated crypto operation
12390 +struct desc_cryptop_pair {
12391 + struct talitos_desc cf_desc; /* descriptor ptr */
12392 + struct cryptop *cf_crp; /* cryptop ptr */
12396 + * Holds data specific to a single talitos device.
12398 +struct talitos_softc {
12399 + softc_device_decl sc_cdev;
12400 + struct platform_device *sc_dev; /* device backpointer */
12401 + ocf_iomem_t sc_base_addr;
12403 + int sc_num; /* if we have multiple chips */
12404 + int32_t sc_cid; /* crypto tag */
12405 + u64 sc_chiprev; /* major/minor chip revision */
12406 + int sc_nsessions;
12407 + struct talitos_session *sc_sessions;
12408 + int sc_num_channels;/* number of crypto channels */
12409 + int sc_chfifo_len; /* channel fetch fifo len */
12410 + int sc_exec_units; /* execution units mask */
12411 + int sc_desc_types; /* descriptor types mask */
12413 + * mutual exclusion for intra-channel resources, e.g. fetch fifos
12414 + * the last entry is a meta-channel lock used by the channel scheduler
12416 + spinlock_t *sc_chnfifolock;
12417 + /* sc_chnlastalgo contains last algorithm for that channel */
12418 + int *sc_chnlastalg;
12419 + /* sc_chnfifo holds pending descriptor--crypto operation pairs */
12420 + struct desc_cryptop_pair **sc_chnfifo;
12423 +struct talitos_session {
12424 + u_int32_t ses_used;
12425 + u_int32_t ses_klen; /* key length in bits */
12426 + u_int32_t ses_key[8]; /* DES/3DES/AES key */
12427 + u_int32_t ses_hmac[5]; /* hmac inner state */
12428 + u_int32_t ses_hmac_len; /* hmac length */
12429 + u_int32_t ses_iv[4]; /* DES/3DES/AES iv */
12430 + u_int32_t ses_mlen; /* desired hash result len (12=ipsec or 16) */
12433 +#define TALITOS_SESSION(sid) ((sid) & 0x0fffffff)
12434 +#define TALITOS_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff))
12435 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
12436 +++ linux/crypto/ocf/talitos/talitos_dev.h 2007-11-23 07:31:44.000000000 +1000
12439 + * Freescale SEC (talitos) device dependent data structures
12441 + * Copyright (c) 2006 Freescale Semiconductor, Inc.
12443 + * Redistribution and use in source and binary forms, with or without
12444 + * modification, are permitted provided that the following conditions
12447 + * 1. Redistributions of source code must retain the above copyright
12448 + * notice, this list of conditions and the following disclaimer.
12449 + * 2. Redistributions in binary form must reproduce the above copyright
12450 + * notice, this list of conditions and the following disclaimer in the
12451 + * documentation and/or other materials provided with the distribution.
12452 + * 3. The name of the author may not be used to endorse or promote products
12453 + * derived from this software without specific prior written permission.
12455 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
12456 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
12457 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
12458 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
12459 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
12460 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
12461 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
12462 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
12463 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
12464 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
12468 +/* device ID register values */
12469 +#define TALITOS_ID_SEC_2_0 0x40
12470 +#define TALITOS_ID_SEC_2_1 0x40 /* cross ref with IP block revision reg */
12473 + * following num_channels, channel-fifo-depth, exec-unit-mask, and
12474 + * descriptor-types-mask are for forward-compatibility with openfirmware
12475 + * flat device trees
12479 + * num_channels : the number of channels available in each SEC version.
12482 +/* n.b. this driver requires these values be a power of 2 */
12483 +#define TALITOS_NCHANNELS_SEC_1_0 4
12484 +#define TALITOS_NCHANNELS_SEC_1_2 1
12485 +#define TALITOS_NCHANNELS_SEC_2_0 4
12486 +#define TALITOS_NCHANNELS_SEC_2_01 4
12487 +#define TALITOS_NCHANNELS_SEC_2_1 4
12488 +#define TALITOS_NCHANNELS_SEC_2_4 4
12491 + * channel-fifo-depth : The number of descriptor
12492 + * pointers a channel fetch fifo can hold.
12494 +#define TALITOS_CHFIFOLEN_SEC_1_0 1
12495 +#define TALITOS_CHFIFOLEN_SEC_1_2 1
12496 +#define TALITOS_CHFIFOLEN_SEC_2_0 24
12497 +#define TALITOS_CHFIFOLEN_SEC_2_01 24
12498 +#define TALITOS_CHFIFOLEN_SEC_2_1 24
12499 +#define TALITOS_CHFIFOLEN_SEC_2_4 24
12502 + * exec-unit-mask : The bitmask representing what Execution Units (EUs)
12503 + * are available. EU information should be encoded following the SEC's
12504 + * EU_SEL0 bitfield documentation, i.e. as follows:
12506 + * bit 31 = set if SEC permits no-EU selection (should be always set)
12507 + * bit 30 = set if SEC has the ARC4 EU (AFEU)
12508 + * bit 29 = set if SEC has the des/3des EU (DEU)
12509 + * bit 28 = set if SEC has the message digest EU (MDEU)
12510 + * bit 27 = set if SEC has the random number generator EU (RNG)
12511 + * bit 26 = set if SEC has the public key EU (PKEU)
12512 + * bit 25 = set if SEC has the aes EU (AESU)
12513 + * bit 24 = set if SEC has the Kasumi EU (KEU)
12516 +#define TALITOS_HAS_EU_NONE (1<<0)
12517 +#define TALITOS_HAS_EU_AFEU (1<<1)
12518 +#define TALITOS_HAS_EU_DEU (1<<2)
12519 +#define TALITOS_HAS_EU_MDEU (1<<3)
12520 +#define TALITOS_HAS_EU_RNG (1<<4)
12521 +#define TALITOS_HAS_EU_PKEU (1<<5)
12522 +#define TALITOS_HAS_EU_AESU (1<<6)
12523 +#define TALITOS_HAS_EU_KEU (1<<7)
12525 +/* the corresponding masks for each SEC version */
12526 +#define TALITOS_HAS_EUS_SEC_1_0 0x7f
12527 +#define TALITOS_HAS_EUS_SEC_1_2 0x4d
12528 +#define TALITOS_HAS_EUS_SEC_2_0 0x7f
12529 +#define TALITOS_HAS_EUS_SEC_2_01 0x7f
12530 +#define TALITOS_HAS_EUS_SEC_2_1 0xff
12531 +#define TALITOS_HAS_EUS_SEC_2_4 0x7f
12534 + * descriptor-types-mask : The bitmask representing what descriptors
12535 + * are available. Descriptor type information should be encoded
12536 + * following the SEC's Descriptor Header Dword DESC_TYPE field
12537 + * documentation, i.e. as follows:
12539 + * bit 0 = set if SEC supports the aesu_ctr_nonsnoop desc. type
12540 + * bit 1 = set if SEC supports the ipsec_esp descriptor type
12541 + * bit 2 = set if SEC supports the common_nonsnoop desc. type
12542 + * bit 3 = set if SEC supports the 802.11i AES ccmp desc. type
12543 + * bit 4 = set if SEC supports the hmac_snoop_no_afeu desc. type
12544 + * bit 5 = set if SEC supports the srtp descriptor type
12545 + * bit 6 = set if SEC supports the non_hmac_snoop_no_afeu desc.type
12546 + * bit 7 = set if SEC supports the pkeu_assemble descriptor type
12547 + * bit 8 = set if SEC supports the aesu_key_expand_output desc.type
12548 + * bit 9 = set if SEC supports the pkeu_ptmul descriptor type
12549 + * bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
12550 + * bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
12552 + * ..and so on and so forth.
12554 +#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP (1<<0)
12555 +#define TALITOS_HAS_DT_IPSEC_ESP (1<<1)
12556 +#define TALITOS_HAS_DT_COMMON_NONSNOOP (1<<2)
12558 +/* the corresponding masks for each SEC version */
12559 +#define TALITOS_HAS_DESCTYPES_SEC_2_0 0x01010ebf
12560 +#define TALITOS_HAS_DESCTYPES_SEC_2_1 0x012b0ebf
12563 + * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
12566 +/* global register offset addresses */
12567 +#define TALITOS_ID 0x1020
12568 +#define TALITOS_ID_HI 0x1024
12569 +#define TALITOS_MCR 0x1030 /* master control register */
12570 +#define TALITOS_MCR_HI 0x1038 /* master control register */
12571 +#define TALITOS_MCR_SWR 0x1
12572 +#define TALITOS_IMR 0x1008 /* interrupt mask register */
12573 +#define TALITOS_IMR_ALL 0x00010fff /* enable all interrupts mask */
12574 +#define TALITOS_IMR_ERRONLY 0x00010aaa /* enable error interrupts */
12575 +#define TALITOS_IMR_HI 0x100C /* interrupt mask register */
12576 +#define TALITOS_IMR_HI_ALL 0x00323333 /* enable all interrupts mask */
12577 +#define TALITOS_IMR_HI_ERRONLY 0x00222222 /* enable error interrupts */
12578 +#define TALITOS_ISR 0x1010 /* interrupt status register */
12579 +#define TALITOS_ISR_ERROR 0x00010faa /* errors mask */
12580 +#define TALITOS_ISR_DONE 0x00000055 /* channel(s) done mask */
12581 +#define TALITOS_ISR_HI 0x1014 /* interrupt status register */
12582 +#define TALITOS_ICR 0x1018 /* interrupt clear register */
12583 +#define TALITOS_ICR_HI 0x101C /* interrupt clear register */
12585 +/* channel register address stride */
12586 +#define TALITOS_CH_OFFSET 0x100
12588 +/* channel register offset addresses and bits */
12589 +#define TALITOS_CH_CCCR 0x1108 /* Crypto-Channel Config Register */
12590 +#define TALITOS_CH_CCCR_RESET 0x1 /* Channel Reset bit */
12591 +#define TALITOS_CH_CCCR_HI 0x110c /* Crypto-Channel Config Register */
12592 +#define TALITOS_CH_CCCR_HI_CDWE 0x10 /* Channel done writeback enable bit */
12593 +#define TALITOS_CH_CCCR_HI_NT 0x4 /* Notification type bit */
12594 +#define TALITOS_CH_CCCR_HI_CDIE 0x2 /* Channel Done Interrupt Enable bit */
12595 +#define TALITOS_CH_CCPSR 0x1110 /* Crypto-Channel Pointer Status Reg */
12596 +#define TALITOS_CH_CCPSR_HI 0x1114 /* Crypto-Channel Pointer Status Reg */
12597 +#define TALITOS_CH_FF 0x1148 /* Fetch FIFO */
12598 +#define TALITOS_CH_FF_HI 0x114c /* Fetch FIFO's FETCH_ADRS */
12599 +#define TALITOS_CH_CDPR 0x1140 /* Crypto-Channel Pointer Status Reg */
12600 +#define TALITOS_CH_CDPR_HI 0x1144 /* Crypto-Channel Pointer Status Reg */
12601 +#define TALITOS_CH_DESCBUF 0x1180 /* (thru 11bf) Crypto-Channel
12602 + * Descriptor Buffer (debug) */
12604 +/* execution unit register offset addresses and bits */
12605 +#define TALITOS_DEUSR 0x2028 /* DEU status register */
12606 +#define TALITOS_DEUSR_HI 0x202c /* DEU status register */
12607 +#define TALITOS_DEUISR 0x2030 /* DEU interrupt status register */
12608 +#define TALITOS_DEUISR_HI 0x2034 /* DEU interrupt status register */
12609 +#define TALITOS_DEUICR 0x2038 /* DEU interrupt control register */
12610 +#define TALITOS_DEUICR_HI 0x203c /* DEU interrupt control register */
12611 +#define TALITOS_AESUISR 0x4030 /* AESU interrupt status register */
12612 +#define TALITOS_AESUISR_HI 0x4034 /* AESU interrupt status register */
12613 +#define TALITOS_AESUICR 0x4038 /* AESU interrupt control register */
12614 +#define TALITOS_AESUICR_HI 0x403c /* AESU interrupt control register */
12615 +#define TALITOS_MDEUISR 0x6030 /* MDEU interrupt status register */
12616 +#define TALITOS_MDEUISR_HI 0x6034 /* MDEU interrupt status register */
12617 +#define TALITOS_RNGSR 0xa028 /* RNG status register */
12618 +#define TALITOS_RNGSR_HI 0xa02c /* RNG status register */
12619 +#define TALITOS_RNGSR_HI_RD 0x1 /* RNG Reset done */
12620 +#define TALITOS_RNGSR_HI_OFL 0xff0000/* number of dwords in RNG output FIFO*/
12621 +#define TALITOS_RNGDSR 0xa010 /* RNG data size register */
12622 +#define TALITOS_RNGDSR_HI 0xa014 /* RNG data size register */
12623 +#define TALITOS_RNG_FIFO 0xa800 /* RNG FIFO - pool of random numbers */
12624 +#define TALITOS_RNGISR 0xa030 /* RNG Interrupt status register */
12625 +#define TALITOS_RNGISR_HI 0xa034 /* RNG Interrupt status register */
12626 +#define TALITOS_RNGRCR 0xa018 /* RNG Reset control register */
12627 +#define TALITOS_RNGRCR_HI 0xa01c /* RNG Reset control register */
12628 +#define TALITOS_RNGRCR_HI_SR 0x1 /* RNG RNGRCR:Software Reset */
12630 +/* descriptor pointer entry */
12631 +struct talitos_desc_ptr {
12632 + u16 len; /* length */
12633 + u8 extent; /* jump (to s/g link table) and extent */
12634 + u8 res; /* reserved */
12635 + u32 ptr; /* pointer */
12639 +struct talitos_desc {
12640 + u32 hdr; /* header */
12641 + u32 res; /* reserved */
12642 + struct talitos_desc_ptr ptr[7]; /* ptr/len pair array */
12645 +/* talitos descriptor header (hdr) bits */
12647 +/* primary execution unit select */
12648 +#define TALITOS_SEL0_AFEU 0x10000000
12649 +#define TALITOS_SEL0_DEU 0x20000000
12650 +#define TALITOS_SEL0_MDEU 0x30000000
12651 +#define TALITOS_SEL0_RNG 0x40000000
12652 +#define TALITOS_SEL0_PKEU 0x50000000
12653 +#define TALITOS_SEL0_AESU 0x60000000
12655 +/* primary execution unit mode (MODE0) and derivatives */
12656 +#define TALITOS_MODE0_AESU_CBC 0x00200000
12657 +#define TALITOS_MODE0_AESU_ENC 0x00100000
12658 +#define TALITOS_MODE0_DEU_CBC 0x00400000
12659 +#define TALITOS_MODE0_DEU_3DES 0x00200000
12660 +#define TALITOS_MODE0_DEU_ENC 0x00100000
12661 +#define TALITOS_MODE0_MDEU_INIT 0x01000000 /* init starting regs */
12662 +#define TALITOS_MODE0_MDEU_HMAC 0x00800000
12663 +#define TALITOS_MODE0_MDEU_PAD 0x00400000 /* PD */
12664 +#define TALITOS_MODE0_MDEU_MD5 0x00200000
12665 +#define TALITOS_MODE0_MDEU_SHA256 0x00100000
12666 +#define TALITOS_MODE0_MDEU_SHA1 0x00000000 /* SHA-160 */
12667 +#define TALITOS_MODE0_MDEU_MD5_HMAC \
12668 + (TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
12669 +#define TALITOS_MODE0_MDEU_SHA256_HMAC \
12670 + (TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
12671 +#define TALITOS_MODE0_MDEU_SHA1_HMAC \
12672 + (TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
12674 +/* secondary execution unit select (SEL1) */
12675 +/* it's MDEU or nothing */
12676 +#define TALITOS_SEL1_MDEU 0x00030000
12678 +/* secondary execution unit mode (MODE1) and derivatives */
12679 +#define TALITOS_MODE1_MDEU_INIT 0x00001000 /* init starting regs */
12680 +#define TALITOS_MODE1_MDEU_HMAC 0x00000800
12681 +#define TALITOS_MODE1_MDEU_PAD 0x00000400 /* PD */
12682 +#define TALITOS_MODE1_MDEU_MD5 0x00000200
12683 +#define TALITOS_MODE1_MDEU_SHA256 0x00000100
12684 +#define TALITOS_MODE1_MDEU_SHA1 0x00000000 /* SHA-160 */
12685 +#define TALITOS_MODE1_MDEU_MD5_HMAC \
12686 + (TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
12687 +#define TALITOS_MODE1_MDEU_SHA256_HMAC \
12688 + (TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
12689 +#define TALITOS_MODE1_MDEU_SHA1_HMAC \
12690 + (TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
12692 +/* direction of overall data flow (DIR) */
12693 +#define TALITOS_DIR_OUTBOUND 0x00000000
12694 +#define TALITOS_DIR_INBOUND 0x00000002
12696 +/* done notification (DN) */
12697 +#define TALITOS_DONE_NOTIFY 0x00000001
12699 +/* descriptor types */
12700 +/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
12701 +#define TD_TYPE_AESU_CTR_NONSNOOP (0 << 3)
12702 +#define TD_TYPE_IPSEC_ESP (1 << 3)
12703 +#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU (2 << 3)
12704 +#define TD_TYPE_HMAC_SNOOP_NO_AFEU (4 << 3)
12706 +#define TALITOS_HDR_DONE_BITS 0xff000000
12708 +#define DPRINTF(a...) do { \
12710 + printk("%s: ", sc ? \
12711 + device_get_nameunit(sc->sc_cdev) : "talitos"); \
12715 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
12716 +++ linux/crypto/ocf/random.c 2007-12-04 13:56:03.000000000 +1000
12719 + * A system independant way of adding entropy to the kernels pool
12720 + * this way the drivers can focus on the real work and we can take
12721 + * care of pushing it to the appropriate place in the kernel.
12723 + * This should be fast and callable from timers/interrupts
12725 + * Written by David McCullough <david_mccullough@securecomputing.com>
12726 + * Copyright (C) 2006-2007 David McCullough
12727 + * Copyright (C) 2004-2005 Intel Corporation.
12731 + * The free distribution and use of this software in both source and binary
12732 + * form is allowed (with or without changes) provided that:
12734 + * 1. distributions of this source code include the above copyright
12735 + * notice, this list of conditions and the following disclaimer;
12737 + * 2. distributions in binary form include the above copyright
12738 + * notice, this list of conditions and the following disclaimer
12739 + * in the documentation and/or other associated materials;
12741 + * 3. the copyright holder's name is not used to endorse products
12742 + * built using this software without specific written permission.
12744 + * ALTERNATIVELY, provided that this notice is retained in full, this product
12745 + * may be distributed under the terms of the GNU General Public License (GPL),
12746 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
12750 + * This software is provided 'as is' with no explicit or implied warranties
12751 + * in respect of its properties, including, but not limited to, correctness
12752 + * and/or fitness for purpose.
12755 +#ifndef AUTOCONF_INCLUDED
12756 +#include <linux/config.h>
12758 +#include <linux/module.h>
12759 +#include <linux/init.h>
12760 +#include <linux/list.h>
12761 +#include <linux/slab.h>
12762 +#include <linux/wait.h>
12763 +#include <linux/sched.h>
12764 +#include <linux/spinlock.h>
12765 +#include <linux/version.h>
12766 +#include <linux/unistd.h>
12767 +#include <linux/poll.h>
12768 +#include <linux/random.h>
12769 +#include <cryptodev.h>
12771 +#ifdef CONFIG_OCF_FIPS
12772 +#include "rndtest.h"
12775 +#ifndef HAS_RANDOM_INPUT_WAIT
12776 +#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
12780 + * a hack to access the debug levels from the crypto driver
12782 +extern int crypto_debug;
12783 +#define debug crypto_debug
12786 + * a list of all registered random providers
12788 +static LIST_HEAD(random_ops);
12789 +static int started = 0;
12790 +static int initted = 0;
12792 +struct random_op {
12793 + struct list_head random_list;
12794 + u_int32_t driverid;
12795 + int (*read_random)(void *arg, u_int32_t *buf, int len);
12799 +static int random_proc(void *arg);
12801 +static pid_t randomproc = (pid_t) -1;
12802 +static spinlock_t random_lock;
12805 + * just init the spin locks
12808 +crypto_random_init(void)
12810 + spin_lock_init(&random_lock);
12816 + * Add the given random reader to our list (if not present)
12817 + * and start the thread (if not already started)
12819 + * we have to assume that driver id is ok for now
12823 + u_int32_t driverid,
12824 + int (*read_random)(void *arg, u_int32_t *buf, int len),
12827 + unsigned long flags;
12829 + struct random_op *rops, *tmp;
12831 + dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
12832 + __FUNCTION__, driverid, read_random, arg);
12835 + crypto_random_init();
12838 + struct cryptocap *cap;
12840 + cap = crypto_checkdriver(driverid);
12845 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12846 + if (rops->driverid == driverid && rops->read_random == read_random)
12850 + rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
12854 + rops->driverid = driverid;
12855 + rops->read_random = read_random;
12858 + spin_lock_irqsave(&random_lock, flags);
12859 + list_add_tail(&rops->random_list, &random_ops);
12861 + randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
12862 + if (randomproc < 0) {
12863 + ret = randomproc;
12864 + printk("crypto: crypto_rregister cannot start random thread; "
12865 + "error %d", ret);
12869 + spin_unlock_irqrestore(&random_lock, flags);
12873 +EXPORT_SYMBOL(crypto_rregister);
12876 +crypto_runregister_all(u_int32_t driverid)
12878 + struct random_op *rops, *tmp;
12879 + unsigned long flags;
12881 + dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
12883 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12884 + if (rops->driverid == driverid) {
12885 + list_del(&rops->random_list);
12890 + spin_lock_irqsave(&random_lock, flags);
12891 + if (list_empty(&random_ops) && started)
12892 + kill_proc(randomproc, SIGKILL, 1);
12893 + spin_unlock_irqrestore(&random_lock, flags);
12896 +EXPORT_SYMBOL(crypto_runregister_all);
12899 + * while we can add entropy to random.c continue to read random data from
12900 + * the drivers and push it to random.
12903 +random_proc(void *arg)
12911 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
12913 + spin_lock_irq(¤t->sigmask_lock);
12914 + sigemptyset(¤t->blocked);
12915 + recalc_sigpending(current);
12916 + spin_unlock_irq(¤t->sigmask_lock);
12917 + sprintf(current->comm, "ocf-random");
12919 + daemonize("ocf-random");
12920 + allow_signal(SIGKILL);
12924 + set_fs(get_ds());
12926 +#ifdef CONFIG_OCF_FIPS
12927 +#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
12929 +#define NUM_INT 32
12933 + * some devices can transferr their RNG data direct into memory,
12934 + * so make sure it is device friendly
12936 + buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
12937 + if (NULL == buf) {
12938 + printk("crypto: RNG could not allocate memory\n");
12939 + retval = -ENOMEM;
12943 + wantcnt = NUM_INT; /* start by adding some entropy */
12946 + * its possible due to errors or driver removal that we no longer
12947 + * have anything to do, if so exit or we will consume all the CPU
12950 + while (!list_empty(&random_ops)) {
12951 + struct random_op *rops, *tmp;
12953 +#ifdef CONFIG_OCF_FIPS
12955 + wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
12958 + /* see if we can get enough entropy to make the world
12959 + * a better place.
12961 + while (bufcnt < wantcnt && bufcnt < NUM_INT) {
12962 + list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
12964 + n = (*rops->read_random)(rops->arg, &buf[bufcnt],
12965 + NUM_INT - bufcnt);
12967 + /* on failure remove the random number generator */
12969 + list_del(&rops->random_list);
12970 + printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
12973 + } else if (n > 0)
12976 + /* give up CPU for a bit, just in case as this is a loop */
12981 +#ifdef CONFIG_OCF_FIPS
12982 + if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
12983 + dprintk("crypto: buffer had fips errors, discarding\n");
12989 + * if we have a certified buffer, we can send some data
12990 + * to /dev/random and move along
12992 + if (bufcnt > 0) {
12993 + /* add what we have */
12994 + random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
12998 + /* give up CPU for a bit so we don't hog while filling */
13001 + /* wait for needing more */
13002 + wantcnt = random_input_wait();
13004 + if (wantcnt <= 0)
13005 + wantcnt = 0; /* try to get some info again */
13007 + /* round up to one word or we can loop forever */
13008 + wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
13009 + if (wantcnt > NUM_INT) {
13010 + wantcnt = NUM_INT;
13013 + if (signal_pending(current)) {
13014 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13015 + spin_lock_irq(¤t->sigmask_lock);
13017 + flush_signals(current);
13018 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
13019 + spin_unlock_irq(¤t->sigmask_lock);
13027 + spin_lock_irq(&random_lock);
13028 + randomproc = (pid_t) -1;
13030 + spin_unlock_irq(&random_lock);
13035 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
13036 +++ linux/crypto/ocf/ocf-bench.c 2007-07-23 22:26:12.000000000 +1000
13039 + * A loadable module that benchmarks the OCF crypto speed from kernel space.
13041 + * Copyright (C) 2004-2007 David McCullough <david_mccullough@securecomputing.com>
13045 + * The free distribution and use of this software in both source and binary
13046 + * form is allowed (with or without changes) provided that:
13048 + * 1. distributions of this source code include the above copyright
13049 + * notice, this list of conditions and the following disclaimer;
13051 + * 2. distributions in binary form include the above copyright
13052 + * notice, this list of conditions and the following disclaimer
13053 + * in the documentation and/or other associated materials;
13055 + * 3. the copyright holder's name is not used to endorse products
13056 + * built using this software without specific written permission.
13058 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13059 + * may be distributed under the terms of the GNU General Public License (GPL),
13060 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13064 + * This software is provided 'as is' with no explicit or implied warranties
13065 + * in respect of its properties, including, but not limited to, correctness
13066 + * and/or fitness for purpose.
13070 +#ifndef AUTOCONF_INCLUDED
13071 +#include <linux/config.h>
13073 +#include <linux/module.h>
13074 +#include <linux/init.h>
13075 +#include <linux/list.h>
13076 +#include <linux/slab.h>
13077 +#include <linux/wait.h>
13078 +#include <linux/sched.h>
13079 +#include <linux/spinlock.h>
13080 +#include <linux/version.h>
13081 +#include <linux/interrupt.h>
13082 +#include <cryptodev.h>
13084 +#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
13085 +#define BENCH_IXP_ACCESS_LIB 1
13087 +#ifdef BENCH_IXP_ACCESS_LIB
13088 +#include <IxTypes.h>
13089 +#include <IxOsBuffMgt.h>
13090 +#include <IxNpeDl.h>
13091 +#include <IxCryptoAcc.h>
13092 +#include <IxQMgr.h>
13093 +#include <IxOsServices.h>
13094 +#include <IxOsCacheMMU.h>
13098 + * support for access lib version 1.4
13100 +#ifndef IX_MBUF_PRIV
13101 +#define IX_MBUF_PRIV(x) ((x)->priv)
13105 + * the number of simultaneously active requests
13107 +static int request_q_len = 20;
13108 +module_param(request_q_len, int, 0);
13109 +MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
13111 + * how many requests we want to have processed
13113 +static int request_num = 1024;
13114 +module_param(request_num, int, 0);
13115 +MODULE_PARM_DESC(request_num, "run for at least this many requests");
13117 + * the size of each request
13119 +static int request_size = 1500;
13120 +module_param(request_size, int, 0);
13121 +MODULE_PARM_DESC(request_size, "size of each request");
13124 + * a structure for each request
13127 + struct work_struct work;
13128 +#ifdef BENCH_IXP_ACCESS_LIB
13131 + unsigned char *buffer;
13134 +static request_t *requests;
13136 +static int outstanding;
13139 +/*************************************************************************/
13141 + * OCF benchmark routines
13144 +static uint64_t ocf_cryptoid;
13145 +static int ocf_init(void);
13146 +static int ocf_cb(struct cryptop *crp);
13147 +static void ocf_request(void *arg);
13148 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13149 +static void ocf_request_wq(struct work_struct *work);
13156 + struct cryptoini crie, cria;
13157 + struct cryptodesc crda, crde;
13159 + memset(&crie, 0, sizeof(crie));
13160 + memset(&cria, 0, sizeof(cria));
13161 + memset(&crde, 0, sizeof(crde));
13162 + memset(&crda, 0, sizeof(crda));
13164 + cria.cri_alg = CRYPTO_SHA1_HMAC;
13165 + cria.cri_klen = 20 * 8;
13166 + cria.cri_key = "0123456789abcdefghij";
13168 + crie.cri_alg = CRYPTO_3DES_CBC;
13169 + crie.cri_klen = 24 * 8;
13170 + crie.cri_key = "0123456789abcdefghijklmn";
13172 + crie.cri_next = &cria;
13174 + error = crypto_newsession(&ocf_cryptoid, &crie, 0);
13176 + printk("crypto_newsession failed %d\n", error);
13183 +ocf_cb(struct cryptop *crp)
13185 + request_t *r = (request_t *) crp->crp_opaque;
13187 + if (crp->crp_etype)
13188 + printk("Error in OCF processing: %d\n", crp->crp_etype);
13190 + crypto_freereq(crp);
13193 + if (total > request_num) {
13198 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13199 + INIT_WORK(&r->work, ocf_request_wq);
13201 + INIT_WORK(&r->work, ocf_request, r);
13203 + schedule_work(&r->work);
13209 +ocf_request(void *arg)
13211 + request_t *r = arg;
13212 + struct cryptop *crp = crypto_getreq(2);
13213 + struct cryptodesc *crde, *crda;
13220 + crde = crp->crp_desc;
13221 + crda = crde->crd_next;
13223 + crda->crd_skip = 0;
13224 + crda->crd_flags = 0;
13225 + crda->crd_len = request_size;
13226 + crda->crd_inject = request_size;
13227 + crda->crd_alg = CRYPTO_SHA1_HMAC;
13228 + crda->crd_key = "0123456789abcdefghij";
13229 + crda->crd_klen = 20 * 8;
13231 + crde->crd_skip = 0;
13232 + crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
13233 + crde->crd_len = request_size;
13234 + crde->crd_inject = request_size;
13235 + crde->crd_alg = CRYPTO_3DES_CBC;
13236 + crde->crd_key = "0123456789abcdefghijklmn";
13237 + crde->crd_klen = 24 * 8;
13239 + crp->crp_ilen = request_size + 64;
13240 + crp->crp_flags = CRYPTO_F_CBIMM;
13241 + crp->crp_buf = (caddr_t) r->buffer;
13242 + crp->crp_callback = ocf_cb;
13243 + crp->crp_sid = ocf_cryptoid;
13244 + crp->crp_opaque = (caddr_t) r;
13245 + crypto_dispatch(crp);
13248 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13250 +ocf_request_wq(struct work_struct *work)
13252 + request_t *r = container_of(work, request_t, work);
13257 +/*************************************************************************/
13258 +#ifdef BENCH_IXP_ACCESS_LIB
13259 +/*************************************************************************/
13261 + * CryptoAcc benchmark routines
13264 +static IxCryptoAccCtx ixp_ctx;
13265 +static UINT32 ixp_ctx_id;
13266 +static IX_MBUF ixp_pri;
13267 +static IX_MBUF ixp_sec;
13268 +static int ixp_registered = 0;
13270 +static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
13271 + IxCryptoAccStatus status);
13272 +static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
13273 + IxCryptoAccStatus status);
13274 +static void ixp_request(void *arg);
13275 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13276 +static void ixp_request_wq(struct work_struct *work);
13282 + IxCryptoAccStatus status;
13284 + ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13285 + ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13286 + ixp_ctx.cipherCtx.cipherKeyLen = 24;
13287 + ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13288 + ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
13289 + memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
13291 + ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13292 + ixp_ctx.authCtx.authDigestLen = 12;
13293 + ixp_ctx.authCtx.aadLen = 0;
13294 + ixp_ctx.authCtx.authKeyLen = 20;
13295 + memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
13297 + ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13298 + ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
13300 + IX_MBUF_MLEN(&ixp_pri) = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
13301 + IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13302 + IX_MBUF_MLEN(&ixp_sec) = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
13303 + IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
13305 + status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
13306 + ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
13308 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
13309 + while (!ixp_registered)
13311 + return ixp_registered < 0 ? -1 : 0;
13314 + printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
13319 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
13322 + IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
13323 + kfree(IX_MBUF_MDATA(bufp));
13324 + IX_MBUF_MDATA(bufp) = NULL;
13327 + if (IX_CRYPTO_ACC_STATUS_WAIT == status)
13329 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
13330 + ixp_registered = 1;
13332 + ixp_registered = -1;
13340 + IxCryptoAccStatus status)
13342 + request_t *r = NULL;
13345 + if (total > request_num) {
13350 + if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
13351 + printk("crappo %p %p\n", sbufp, r);
13356 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13357 + INIT_WORK(&r->work, ixp_request_wq);
13359 + INIT_WORK(&r->work, ixp_request, r);
13361 + schedule_work(&r->work);
13365 +ixp_request(void *arg)
13367 + request_t *r = arg;
13368 + IxCryptoAccStatus status;
13370 + memset(&r->mbuf, 0, sizeof(r->mbuf));
13371 + IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
13372 + IX_MBUF_MDATA(&r->mbuf) = r->buffer;
13373 + IX_MBUF_PRIV(&r->mbuf) = r;
13374 + status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
13375 + 0, request_size, 0, request_size, request_size, r->buffer);
13376 + if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
13377 + printk("status1 = %d\n", status);
13384 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13386 +ixp_request_wq(struct work_struct *work)
13388 + request_t *r = container_of(work, request_t, work);
13393 +/*************************************************************************/
13394 +#endif /* BENCH_IXP_ACCESS_LIB */
13395 +/*************************************************************************/
13398 +ocfbench_init(void)
13400 + int i, jstart, jstop;
13402 + printk("Crypto Speed tests\n");
13404 + requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
13406 + printk("malloc failed\n");
13410 + for (i = 0; i < request_q_len; i++) {
13411 + /* +64 for return data */
13412 + requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
13413 + if (!requests[i].buffer) {
13414 + printk("malloc failed\n");
13417 + memset(requests[i].buffer, '0' + i, request_size + 128);
13423 + printk("OCF: testing ...\n");
13425 + total = outstanding = 0;
13426 + jstart = jiffies;
13427 + for (i = 0; i < request_q_len; i++) {
13429 + ocf_request(&requests[i]);
13431 + while (outstanding > 0)
13435 + printk("OCF: %d requests of %d bytes in %d jiffies\n", total, request_size,
13438 +#ifdef BENCH_IXP_ACCESS_LIB
13442 + printk("IXP: testing ...\n");
13444 + total = outstanding = 0;
13445 + jstart = jiffies;
13446 + for (i = 0; i < request_q_len; i++) {
13448 + ixp_request(&requests[i]);
13450 + while (outstanding > 0)
13454 + printk("IXP: %d requests of %d bytes in %d jiffies\n", total, request_size,
13456 +#endif /* BENCH_IXP_ACCESS_LIB */
13458 + for (i = 0; i < request_q_len; i++)
13459 + kfree(requests[i].buffer);
13461 + return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
13464 +static void __exit ocfbench_exit(void)
13468 +module_init(ocfbench_init);
13469 +module_exit(ocfbench_exit);
13471 +MODULE_LICENSE("BSD");
13472 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
13473 +MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
13474 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
13475 +++ linux/crypto/ocf/ixp4xx/ixp4xx.c 2008-07-03 10:28:05.000000000 +1000
13478 + * An OCF module that uses Intels IXP CryptACC API to do the crypto.
13479 + * This driver requires the IXP400 Access Library that is available
13480 + * from Intel in order to operate (or compile).
13482 + * Written by David McCullough <david_mccullough@securecomputing.com>
13483 + * Copyright (C) 2006-2007 David McCullough
13484 + * Copyright (C) 2004-2005 Intel Corporation.
13488 + * The free distribution and use of this software in both source and binary
13489 + * form is allowed (with or without changes) provided that:
13491 + * 1. distributions of this source code include the above copyright
13492 + * notice, this list of conditions and the following disclaimer;
13494 + * 2. distributions in binary form include the above copyright
13495 + * notice, this list of conditions and the following disclaimer
13496 + * in the documentation and/or other associated materials;
13498 + * 3. the copyright holder's name is not used to endorse products
13499 + * built using this software without specific written permission.
13501 + * ALTERNATIVELY, provided that this notice is retained in full, this product
13502 + * may be distributed under the terms of the GNU General Public License (GPL),
13503 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
13507 + * This software is provided 'as is' with no explicit or implied warranties
13508 + * in respect of its properties, including, but not limited to, correctness
13509 + * and/or fitness for purpose.
13512 +#ifndef AUTOCONF_INCLUDED
13513 +#include <linux/config.h>
13515 +#include <linux/module.h>
13516 +#include <linux/init.h>
13517 +#include <linux/list.h>
13518 +#include <linux/slab.h>
13519 +#include <linux/sched.h>
13520 +#include <linux/wait.h>
13521 +#include <linux/crypto.h>
13522 +#include <linux/interrupt.h>
13523 +#include <asm/scatterlist.h>
13525 +#include <IxTypes.h>
13526 +#include <IxOsBuffMgt.h>
13527 +#include <IxNpeDl.h>
13528 +#include <IxCryptoAcc.h>
13529 +#include <IxQMgr.h>
13530 +#include <IxOsServices.h>
13531 +#include <IxOsCacheMMU.h>
13533 +#include <cryptodev.h>
13536 +#ifndef IX_MBUF_PRIV
13537 +#define IX_MBUF_PRIV(x) ((x)->priv)
13543 + struct list_head ixp_q_list;
13544 + struct ixp_data *ixp_q_data;
13545 + struct cryptop *ixp_q_crp;
13546 + struct cryptodesc *ixp_q_ccrd;
13547 + struct cryptodesc *ixp_q_acrd;
13548 + IX_MBUF ixp_q_mbuf;
13549 + UINT8 *ixp_hash_dest; /* Location for hash in client buffer */
13550 + UINT8 *ixp_hash_src; /* Location of hash in internal buffer */
13551 + unsigned char ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
13552 + unsigned char *ixp_q_iv;
13556 + int ixp_registered; /* is the context registered */
13557 + int ixp_crd_flags; /* detect direction changes */
13559 + int ixp_cipher_alg;
13560 + int ixp_auth_alg;
13562 + UINT32 ixp_ctx_id;
13563 + UINT32 ixp_hash_key_id; /* used when hashing */
13564 + IxCryptoAccCtx ixp_ctx;
13565 + IX_MBUF ixp_pri_mbuf;
13566 + IX_MBUF ixp_sec_mbuf;
13568 + struct work_struct ixp_pending_work;
13569 + struct work_struct ixp_registration_work;
13570 + struct list_head ixp_q; /* unprocessed requests */
13575 +#define MAX_IOP_SIZE 64 /* words */
13576 +#define MAX_OOP_SIZE 128
13578 +#define MAX_PARAMS 3
13581 + struct list_head pkq_list;
13582 + struct cryptkop *pkq_krp;
13584 + IxCryptoAccPkeEauInOperands pkq_op;
13585 + IxCryptoAccPkeEauOpResult pkq_result;
13587 + UINT32 pkq_ibuf0[MAX_IOP_SIZE];
13588 + UINT32 pkq_ibuf1[MAX_IOP_SIZE];
13589 + UINT32 pkq_ibuf2[MAX_IOP_SIZE];
13590 + UINT32 pkq_obuf[MAX_OOP_SIZE];
13593 +static LIST_HEAD(ixp_pkq); /* current PK wait list */
13594 +static struct ixp_pkq *ixp_pk_cur;
13595 +static spinlock_t ixp_pkq_lock;
13597 +#endif /* __ixp46X */
13599 +static int ixp_blocked = 0;
13601 +static int32_t ixp_id = -1;
13602 +static struct ixp_data **ixp_sessions = NULL;
13603 +static u_int32_t ixp_sesnum = 0;
13605 +static int ixp_process(device_t, struct cryptop *, int);
13606 +static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
13607 +static int ixp_freesession(device_t, u_int64_t);
13609 +static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
13612 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
13613 +static kmem_cache_t *qcache;
13615 +static struct kmem_cache *qcache;
13618 +#define debug ixp_debug
13619 +static int ixp_debug = 0;
13620 +module_param(ixp_debug, int, 0644);
13621 +MODULE_PARM_DESC(ixp_debug, "Enable debug");
13623 +static int ixp_init_crypto = 1;
13624 +module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
13625 +MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
13627 +static void ixp_process_pending(void *arg);
13628 +static void ixp_registration(void *arg);
13629 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13630 +static void ixp_process_pending_wq(struct work_struct *work);
13631 +static void ixp_registration_wq(struct work_struct *work);
13635 + * dummy device structure
13639 + softc_device_decl sc_dev;
13642 +static device_method_t ixp_methods = {
13643 + /* crypto device methods */
13644 + DEVMETHOD(cryptodev_newsession, ixp_newsession),
13645 + DEVMETHOD(cryptodev_freesession,ixp_freesession),
13646 + DEVMETHOD(cryptodev_process, ixp_process),
13648 + DEVMETHOD(cryptodev_kprocess, ixp_kprocess),
13653 + * Generate a new software session.
13656 +ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
13658 + struct ixp_data *ixp;
13660 +#define AUTH_LEN(cri, def) \
13661 + (cri->cri_mlen ? cri->cri_mlen : (def))
13663 + dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
13664 + if (sid == NULL || cri == NULL) {
13665 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
13669 + if (ixp_sessions) {
13670 + for (i = 1; i < ixp_sesnum; i++)
13671 + if (ixp_sessions[i] == NULL)
13674 + i = 1; /* NB: to silence compiler warning */
13676 + if (ixp_sessions == NULL || i == ixp_sesnum) {
13677 + struct ixp_data **ixpd;
13679 + if (ixp_sessions == NULL) {
13680 + i = 1; /* We leave ixp_sessions[0] empty */
13681 + ixp_sesnum = CRYPTO_SW_SESSIONS;
13685 + ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
13686 + if (ixpd == NULL) {
13687 + /* Reset session number */
13688 + if (ixp_sesnum == CRYPTO_SW_SESSIONS)
13692 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
13695 + memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
13697 + /* Copy existing sessions */
13698 + if (ixp_sessions) {
13699 + memcpy(ixpd, ixp_sessions,
13700 + (ixp_sesnum / 2) * sizeof(struct ixp_data *));
13701 + kfree(ixp_sessions);
13704 + ixp_sessions = ixpd;
13707 + ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
13709 + if (ixp_sessions[i] == NULL) {
13710 + ixp_freesession(NULL, i);
13711 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13717 + ixp = ixp_sessions[i];
13718 + memset(ixp, 0, sizeof(*ixp));
13720 + ixp->ixp_cipher_alg = -1;
13721 + ixp->ixp_auth_alg = -1;
13722 + ixp->ixp_ctx_id = -1;
13723 + INIT_LIST_HEAD(&ixp->ixp_q);
13725 + ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
13728 + switch (cri->cri_alg) {
13729 + case CRYPTO_DES_CBC:
13730 + ixp->ixp_cipher_alg = cri->cri_alg;
13731 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
13732 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13733 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13734 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13735 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13736 + IX_CRYPTO_ACC_DES_IV_64;
13737 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13738 + cri->cri_key, (cri->cri_klen + 7) / 8);
13741 + case CRYPTO_3DES_CBC:
13742 + ixp->ixp_cipher_alg = cri->cri_alg;
13743 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
13744 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13745 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13746 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
13747 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
13748 + IX_CRYPTO_ACC_DES_IV_64;
13749 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13750 + cri->cri_key, (cri->cri_klen + 7) / 8);
13753 + case CRYPTO_RIJNDAEL128_CBC:
13754 + ixp->ixp_cipher_alg = cri->cri_alg;
13755 + ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
13756 + ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
13757 + ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
13758 + ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
13759 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
13760 + memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
13761 + cri->cri_key, (cri->cri_klen + 7) / 8);
13765 + case CRYPTO_MD5_HMAC:
13766 + ixp->ixp_auth_alg = cri->cri_alg;
13767 + ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
13768 + ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
13769 + ixp->ixp_ctx.authCtx.aadLen = 0;
13770 + /* Only MD5_HMAC needs a key */
13771 + if (cri->cri_alg == CRYPTO_MD5_HMAC) {
13772 + ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13773 + if (ixp->ixp_ctx.authCtx.authKeyLen >
13774 + sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13776 + "ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
13778 + ixp_freesession(NULL, i);
13781 + memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13782 + cri->cri_key, (cri->cri_klen + 7) / 8);
13786 + case CRYPTO_SHA1:
13787 + case CRYPTO_SHA1_HMAC:
13788 + ixp->ixp_auth_alg = cri->cri_alg;
13789 + ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
13790 + ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
13791 + ixp->ixp_ctx.authCtx.aadLen = 0;
13792 + /* Only SHA1_HMAC needs a key */
13793 + if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
13794 + ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
13795 + if (ixp->ixp_ctx.authCtx.authKeyLen >
13796 + sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
13798 + "ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
13800 + ixp_freesession(NULL, i);
13803 + memcpy(ixp->ixp_ctx.authCtx.key.authKey,
13804 + cri->cri_key, (cri->cri_klen + 7) / 8);
13809 + printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
13810 + ixp_freesession(NULL, i);
13813 + cri = cri->cri_next;
13816 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
13817 + INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
13818 + INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
13820 + INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
13821 + INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
13829 + * Free a session.
13832 +ixp_freesession(device_t dev, u_int64_t tid)
13834 + u_int32_t sid = CRYPTO_SESID2LID(tid);
13836 + dprintk("%s()\n", __FUNCTION__);
13837 + if (sid > ixp_sesnum || ixp_sessions == NULL ||
13838 + ixp_sessions[sid] == NULL) {
13839 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
13843 + /* Silently accept and return */
13847 + if (ixp_sessions[sid]) {
13848 + if (ixp_sessions[sid]->ixp_ctx_id != -1) {
13849 + ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
13850 + ixp_sessions[sid]->ixp_ctx_id = -1;
13853 + flush_scheduled_work();
13855 + kfree(ixp_sessions[sid]);
13857 + ixp_sessions[sid] = NULL;
13858 + if (ixp_blocked) {
13860 + crypto_unblock(ixp_id, CRYPTO_SYMQ);
13867 + * callback for when hash processing is complete
13871 +ixp_hash_perform_cb(
13872 + UINT32 hash_key_id,
13874 + IxCryptoAccStatus status)
13878 + dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
13880 + if (bufp == NULL) {
13881 + printk("ixp: NULL buf in %s\n", __FUNCTION__);
13885 + q = IX_MBUF_PRIV(bufp);
13887 + printk("ixp: NULL priv in %s\n", __FUNCTION__);
13891 + if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
13892 + /* On success, need to copy hash back into original client buffer */
13893 + memcpy(q->ixp_hash_dest, q->ixp_hash_src,
13894 + (q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
13895 + SHA1_HASH_LEN : MD5_HASH_LEN);
13898 + printk("ixp: hash perform failed status=%d\n", status);
13899 + q->ixp_q_crp->crp_etype = EINVAL;
13902 + /* Free internal buffer used for hashing */
13903 + kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
13905 + crypto_done(q->ixp_q_crp);
13906 + kmem_cache_free(qcache, q);
13910 + * setup a request and perform it
13913 +ixp_q_process(struct ixp_q *q)
13915 + IxCryptoAccStatus status;
13916 + struct ixp_data *ixp = q->ixp_q_data;
13917 + int auth_off = 0;
13918 + int auth_len = 0;
13919 + int crypt_off = 0;
13920 + int crypt_len = 0;
13922 + char *crypt_func;
13924 + dprintk("%s(%p)\n", __FUNCTION__, q);
13926 + if (q->ixp_q_ccrd) {
13927 + if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
13928 + q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
13930 + q->ixp_q_iv = q->ixp_q_iv_data;
13931 + crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
13932 + q->ixp_q_ccrd->crd_inject,
13933 + ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
13934 + (caddr_t) q->ixp_q_iv);
13937 + if (q->ixp_q_acrd) {
13938 + auth_off = q->ixp_q_acrd->crd_skip;
13939 + auth_len = q->ixp_q_acrd->crd_len;
13940 + icv_off = q->ixp_q_acrd->crd_inject;
13943 + crypt_off = q->ixp_q_ccrd->crd_skip;
13944 + crypt_len = q->ixp_q_ccrd->crd_len;
13945 + } else { /* if (q->ixp_q_acrd) */
13946 + auth_off = q->ixp_q_acrd->crd_skip;
13947 + auth_len = q->ixp_q_acrd->crd_len;
13948 + icv_off = q->ixp_q_acrd->crd_inject;
13951 + if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
13952 + struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
13953 + if (skb_shinfo(skb)->nr_frags) {
13955 + * DAVIDM fix this limitation one day by using
13956 + * a buffer pool and chaining, it is not currently
13957 + * needed for current user/kernel space acceleration
13959 + printk("ixp: Cannot handle fragmented skb's yet !\n");
13960 + q->ixp_q_crp->crp_etype = ENOENT;
13963 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
13964 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = skb->len;
13965 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
13966 + } else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
13967 + struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
13968 + if (uiop->uio_iovcnt != 1) {
13970 + * DAVIDM fix this limitation one day by using
13971 + * a buffer pool and chaining, it is not currently
13972 + * needed for current user/kernel space acceleration
13974 + printk("ixp: Cannot handle more than 1 iovec yet !\n");
13975 + q->ixp_q_crp->crp_etype = ENOENT;
13978 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
13979 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
13980 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
13981 + } else /* contig buffer */ {
13982 + IX_MBUF_MLEN(&q->ixp_q_mbuf) =
13983 + IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
13984 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
13987 + IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
13989 + if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
13991 + * For SHA1 and MD5 hash, need to create an internal buffer that is big
13992 + * enough to hold the original data + the appropriate padding for the
13993 + * hash algorithm.
13995 + UINT8 *tbuf = NULL;
13997 + IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
13998 + ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
13999 + tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
14001 + if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
14002 + printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
14003 + IX_MBUF_MLEN(&q->ixp_q_mbuf));
14004 + q->ixp_q_crp->crp_etype = ENOMEM;
14007 + memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
14009 + /* Set location in client buffer to copy hash into */
14010 + q->ixp_hash_dest =
14011 + &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
14013 + IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
14015 + /* Set location in internal buffer for where hash starts */
14016 + q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
14018 + crypt_func = "ixCryptoAccHashPerform";
14019 + status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
14020 + &q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
14021 + &ixp->ixp_hash_key_id);
14024 + crypt_func = "ixCryptoAccAuthCryptPerform";
14025 + status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
14026 + NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
14030 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14033 + if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
14034 + q->ixp_q_crp->crp_etype = ENOMEM;
14038 + printk("ixp: %s failed %u\n", crypt_func, status);
14039 + q->ixp_q_crp->crp_etype = EINVAL;
14042 + crypto_done(q->ixp_q_crp);
14043 + kmem_cache_free(qcache, q);
14048 + * because we cannot process the Q from the Register callback
14049 + * we do it here on a task Q.
14053 +ixp_process_pending(void *arg)
14055 + struct ixp_data *ixp = arg;
14056 + struct ixp_q *q = NULL;
14058 + dprintk("%s(%p)\n", __FUNCTION__, arg);
14063 + while (!list_empty(&ixp->ixp_q)) {
14064 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14065 + list_del(&q->ixp_q_list);
14066 + ixp_q_process(q);
14070 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14072 +ixp_process_pending_wq(struct work_struct *work)
14074 + struct ixp_data *ixp = container_of(work, struct ixp_data,
14075 + ixp_pending_work);
14076 + ixp_process_pending(ixp);
14081 + * callback for when context registration is complete
14085 +ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
14088 + struct ixp_data *ixp;
14091 + dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
14094 + * free any buffer passed in to this routine
14097 + IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
14098 + kfree(IX_MBUF_MDATA(bufp));
14099 + IX_MBUF_MDATA(bufp) = NULL;
14102 + for (i = 0; i < ixp_sesnum; i++) {
14103 + ixp = ixp_sessions[i];
14104 + if (ixp && ixp->ixp_ctx_id == ctx_id)
14107 + if (i >= ixp_sesnum) {
14108 + printk("ixp: invalid context id %d\n", ctx_id);
14112 + if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
14113 + /* this is normal to free the first of two buffers */
14114 + dprintk("ixp: register not finished yet.\n");
14118 + if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
14119 + printk("ixp: register failed 0x%x\n", status);
14120 + while (!list_empty(&ixp->ixp_q)) {
14121 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14122 + list_del(&q->ixp_q_list);
14123 + q->ixp_q_crp->crp_etype = EINVAL;
14124 + crypto_done(q->ixp_q_crp);
14125 + kmem_cache_free(qcache, q);
14131 + * we are now registered, we cannot start processing the Q here
14132 + * or we get strange errors with AES (DES/3DES seem to be ok).
14134 + ixp->ixp_registered = 1;
14135 + schedule_work(&ixp->ixp_pending_work);
14140 + * callback for when data processing is complete
14148 + IxCryptoAccStatus status)
14152 + dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
14155 + if (sbufp == NULL) {
14156 + printk("ixp: NULL sbuf in ixp_perform_cb\n");
14160 + q = IX_MBUF_PRIV(sbufp);
14162 + printk("ixp: NULL priv in ixp_perform_cb\n");
14166 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14167 + printk("ixp: perform failed status=%d\n", status);
14168 + q->ixp_q_crp->crp_etype = EINVAL;
14171 + crypto_done(q->ixp_q_crp);
14172 + kmem_cache_free(qcache, q);
14177 + * registration is not callable at IRQ time, so we defer
14178 + * to a task queue, this routines completes the registration for us
14179 + * when the task queue runs
14181 + * Unfortunately this means we cannot tell OCF that the driver is blocked,
14182 + * we do that on the next request.
14186 +ixp_registration(void *arg)
14188 + struct ixp_data *ixp = arg;
14189 + struct ixp_q *q = NULL;
14190 + IX_MBUF *pri = NULL, *sec = NULL;
14191 + int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
14194 + printk("ixp: ixp_registration with no arg\n");
14198 + if (ixp->ixp_ctx_id != -1) {
14199 + ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
14200 + ixp->ixp_ctx_id = -1;
14203 + if (list_empty(&ixp->ixp_q)) {
14204 + printk("ixp: ixp_registration with no Q\n");
14209 + * setup the primary and secondary buffers
14211 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14212 + if (q->ixp_q_acrd) {
14213 + pri = &ixp->ixp_pri_mbuf;
14214 + sec = &ixp->ixp_sec_mbuf;
14215 + IX_MBUF_MLEN(pri) = IX_MBUF_PKT_LEN(pri) = 128;
14216 + IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
14217 + IX_MBUF_MLEN(sec) = IX_MBUF_PKT_LEN(sec) = 128;
14218 + IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
14221 + /* Only need to register if a crypt op or HMAC op */
14222 + if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
14223 + ixp->ixp_auth_alg == CRYPTO_MD5)) {
14224 + status = ixCryptoAccCtxRegister(
14229 + &ixp->ixp_ctx_id);
14232 + /* Otherwise we start processing pending q */
14233 + schedule_work(&ixp->ixp_pending_work);
14236 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14239 + if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
14240 + printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
14242 + /* perhaps we should return EGAIN on queued ops ? */
14246 + printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
14247 + ixp->ixp_ctx_id = -1;
14250 + * everything waiting is toasted
14252 + while (!list_empty(&ixp->ixp_q)) {
14253 + q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
14254 + list_del(&q->ixp_q_list);
14255 + q->ixp_q_crp->crp_etype = ENOENT;
14256 + crypto_done(q->ixp_q_crp);
14257 + kmem_cache_free(qcache, q);
14261 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
14263 +ixp_registration_wq(struct work_struct *work)
14265 + struct ixp_data *ixp = container_of(work, struct ixp_data,
14266 + ixp_registration_work);
14267 + ixp_registration(ixp);
14272 + * Process a request.
14275 +ixp_process(device_t dev, struct cryptop *crp, int hint)
14277 + struct ixp_data *ixp;
14278 + unsigned int lid;
14279 + struct ixp_q *q = NULL;
14282 + dprintk("%s()\n", __FUNCTION__);
14284 + /* Sanity check */
14285 + if (crp == NULL) {
14286 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
14290 + crp->crp_etype = 0;
14295 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
14296 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
14297 + crp->crp_etype = EINVAL;
14302 + * find the session we are using
14305 + lid = crp->crp_sid & 0xffffffff;
14306 + if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
14307 + ixp_sessions[lid] == NULL) {
14308 + crp->crp_etype = ENOENT;
14309 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
14312 + ixp = ixp_sessions[lid];
14315 + * setup a new request ready for queuing
14317 + q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
14319 + dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
14320 + crp->crp_etype = ENOMEM;
14324 + * save some cycles by only zeroing the important bits
14326 + memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
14327 + q->ixp_q_ccrd = NULL;
14328 + q->ixp_q_acrd = NULL;
14329 + q->ixp_q_crp = crp;
14330 + q->ixp_q_data = ixp;
14333 + * point the cipher and auth descriptors appropriately
14334 + * check that we have something to do
14336 + if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
14337 + q->ixp_q_ccrd = crp->crp_desc;
14338 + else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
14339 + q->ixp_q_acrd = crp->crp_desc;
14341 + crp->crp_etype = ENOENT;
14342 + dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
14345 + if (crp->crp_desc->crd_next) {
14346 + if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
14347 + q->ixp_q_ccrd = crp->crp_desc->crd_next;
14348 + else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
14349 + q->ixp_q_acrd = crp->crp_desc->crd_next;
14351 + crp->crp_etype = ENOENT;
14352 + dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
14358 + * If there is a direction change for this context then we mark it as
14359 + * unregistered and re-register is for the new direction. This is not
14360 + * a very expensive operation and currently only tends to happen when
14361 + * user-space application are doing benchmarks
14363 + * DM - we should be checking for pending requests before unregistering.
14365 + if (q->ixp_q_ccrd && ixp->ixp_registered &&
14366 + ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
14367 + dprintk("%s - detected direction change on session\n", __FUNCTION__);
14368 + ixp->ixp_registered = 0;
14372 + * if we are registered, call straight into the perform code
14374 + if (ixp->ixp_registered) {
14375 + ixp_q_process(q);
14380 + * the only part of the context not set in newsession is the direction
14381 + * dependent parts
14383 + if (q->ixp_q_ccrd) {
14384 + ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
14385 + if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
14386 + ixp->ixp_ctx.operation = q->ixp_q_acrd ?
14387 + IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
14389 + ixp->ixp_ctx.operation = q->ixp_q_acrd ?
14390 + IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
14393 + /* q->ixp_q_acrd must be set if we are here */
14394 + ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
14397 + status = list_empty(&ixp->ixp_q);
14398 + list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
14400 + schedule_work(&ixp->ixp_registration_work);
14405 + kmem_cache_free(qcache, q);
14406 + crypto_done(crp);
14413 + * key processing support for the ixp465
14418 + * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
14419 + * assume zeroed and only copy bits that are significant
14423 +ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
14425 + unsigned char *src = (unsigned char *) p->crp_p;
14426 + unsigned char *dst;
14427 + int len, bits = p->crp_nbits;
14429 + dprintk("%s()\n", __FUNCTION__);
14431 + if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
14432 + dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
14433 + bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
14437 + len = (bits + 31) / 32; /* the number UINT32's needed */
14439 + dst = (unsigned char *) &buf[len];
14442 + while (bits > 0) {
14447 +#if 0 /* no need to zero remaining bits as it is done during request alloc */
14448 + while (dst > (unsigned char *) buf)
14453 + op->dataLen = len;
14458 + * copy out the result, be as forgiving as we can about small output buffers
14462 +ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
14464 + unsigned char *dst = (unsigned char *) p->crp_p;
14465 + unsigned char *src = (unsigned char *) buf;
14466 + int len, z, bits = p->crp_nbits;
14468 + dprintk("%s()\n", __FUNCTION__);
14470 + len = op->dataLen * sizeof(UINT32);
14472 + /* skip leading zeroes to be small buffer friendly */
14474 + while (z < len && src[z] == '\0')
14481 + while (len > 0 && bits > 0) {
14487 + while (bits > 0) {
14493 + dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
14494 + __FUNCTION__, len, z, p->crp_nbits / 8);
14503 + * the parameter offsets for exp_mod
14506 +#define IXP_PARAM_BASE 0
14507 +#define IXP_PARAM_EXP 1
14508 +#define IXP_PARAM_MOD 2
14509 +#define IXP_PARAM_RES 3
14512 + * key processing complete callback, is also used to start processing
14513 + * by passing a NULL for pResult
14518 + IxCryptoAccPkeEauOperation operation,
14519 + IxCryptoAccPkeEauOpResult *pResult,
14520 + BOOL carryOrBorrow,
14521 + IxCryptoAccStatus status)
14523 + struct ixp_pkq *q, *tmp;
14524 + unsigned long flags;
14526 + dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
14527 + carryOrBorrow, status);
14529 + /* handle a completed request */
14531 + if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
14533 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14534 + dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
14535 + q->pkq_krp->krp_status = ERANGE; /* could do better */
14537 + /* copy out the result */
14538 + if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
14539 + &q->pkq_result, q->pkq_obuf))
14540 + q->pkq_krp->krp_status = ERANGE;
14542 + crypto_kdone(q->pkq_krp);
14544 + ixp_pk_cur = NULL;
14546 + printk("%s - callback with invalid result pointer\n", __FUNCTION__);
14549 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14550 + if (ixp_pk_cur || list_empty(&ixp_pkq)) {
14551 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14555 + list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
14557 + list_del(&q->pkq_list);
14560 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14562 + status = ixCryptoAccPkeEauPerform(
14563 + IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
14568 + if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
14569 + dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
14570 + return; /* callback will return here for callback */
14571 + } else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
14572 + printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
14574 + printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
14575 + __FUNCTION__, status);
14577 + q->pkq_krp->krp_status = ERANGE; /* could do better */
14578 + crypto_kdone(q->pkq_krp);
14580 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14582 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14587 +ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
14589 + struct ixp_pkq *q;
14591 + unsigned long flags;
14593 + dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
14594 + krp->krp_param[IXP_PARAM_BASE].crp_nbits,
14595 + krp->krp_param[IXP_PARAM_EXP].crp_nbits,
14596 + krp->krp_param[IXP_PARAM_MOD].crp_nbits,
14597 + krp->krp_param[IXP_PARAM_RES].crp_nbits);
14600 + if (krp->krp_op != CRK_MOD_EXP) {
14601 + krp->krp_status = EOPNOTSUPP;
14605 + q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
14607 + krp->krp_status = ENOMEM;
14612 + * The PKE engine does not appear to zero the output buffer
14613 + * appropriately, so we need to do it all here.
14615 + memset(q, 0, sizeof(*q));
14617 + q->pkq_krp = krp;
14618 + INIT_LIST_HEAD(&q->pkq_list);
14620 + if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
14623 + if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
14624 + &q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
14626 + if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
14627 + &q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
14632 + krp->krp_status = ERANGE;
14636 + q->pkq_result.pData = q->pkq_obuf;
14637 + q->pkq_result.dataLen =
14638 + (krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
14640 + spin_lock_irqsave(&ixp_pkq_lock, flags);
14641 + list_add_tail(&q->pkq_list, &ixp_pkq);
14642 + spin_unlock_irqrestore(&ixp_pkq_lock, flags);
14645 + ixp_kperform_cb(0, NULL, 0, 0);
14649 + crypto_kdone(krp);
14655 +#ifdef CONFIG_OCF_RANDOMHARVEST
14657 + * We run the random number generator output through SHA so that it
14658 + * is FIPS compliant.
14661 +static volatile int sha_done = 0;
14662 +static unsigned char sha_digest[20];
14665 +ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
14667 + dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
14668 + if (sha_digest != digest)
14669 + printk("digest error\n");
14670 + if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
14673 + sha_done = -status;
14677 +ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
14679 + IxCryptoAccStatus status;
14682 + dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
14683 + memset(buf, 0, maxwords * sizeof(*buf));
14684 + status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
14685 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14686 + dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
14687 + __FUNCTION__, status);
14692 + * run the random data through SHA to make it look more random
14695 + n = sizeof(sha_digest); /* process digest bytes at a time */
14698 + for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
14699 + if ((maxwords - i) * sizeof(*buf) < n)
14700 + n = (maxwords - i) * sizeof(*buf);
14702 + status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
14703 + (UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
14704 + if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
14705 + dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
14708 + while (!sha_done)
14710 + if (sha_done < 0) {
14711 + dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
14714 + memcpy(&buf[i], sha_digest, n);
14715 + rc += n / sizeof(*buf);;
14720 +#endif /* CONFIG_OCF_RANDOMHARVEST */
14722 +#endif /* __ixp46X */
14727 + * our driver startup and shutdown routines
14733 + dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
14735 + if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
14736 + printk("ixCryptoAccInit failed, assuming already initialised!\n");
14738 + qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
14739 + SLAB_HWCACHE_ALIGN, NULL
14740 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
14745 + printk("failed to create Qcache\n");
14749 + memset(&ixpdev, 0, sizeof(ixpdev));
14750 + softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
14752 + ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
14753 + CRYPTOCAP_F_HARDWARE);
14755 + panic("IXP/OCF crypto device cannot initialize!");
14757 +#define REGISTER(alg) \
14758 + crypto_register(ixp_id,alg,0,0)
14760 + REGISTER(CRYPTO_DES_CBC);
14761 + REGISTER(CRYPTO_3DES_CBC);
14762 + REGISTER(CRYPTO_RIJNDAEL128_CBC);
14763 +#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
14764 + REGISTER(CRYPTO_MD5);
14765 + REGISTER(CRYPTO_SHA1);
14767 + REGISTER(CRYPTO_MD5_HMAC);
14768 + REGISTER(CRYPTO_SHA1_HMAC);
14772 + spin_lock_init(&ixp_pkq_lock);
14774 + * we do not enable the go fast options here as they can potentially
14775 + * allow timing based attacks
14777 + * http://www.openssl.org/news/secadv_20030219.txt
14779 + ixCryptoAccPkeEauExpConfig(0, 0);
14780 + crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
14781 +#ifdef CONFIG_OCF_RANDOMHARVEST
14782 + crypto_rregister(ixp_id, ixp_read_random, NULL);
14792 + dprintk("%s()\n", __FUNCTION__);
14793 + crypto_unregister_all(ixp_id);
14795 + kmem_cache_destroy(qcache);
14799 +module_init(ixp_init);
14800 +module_exit(ixp_exit);
14802 +MODULE_LICENSE("Dual BSD/GPL");
14803 +MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
14804 +MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
14805 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
14806 +++ linux/crypto/ocf/cryptodev.c 2007-12-14 12:35:04.000000000 +1000
14808 +/* $OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $ */
14811 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
14812 + * Copyright (C) 2006-2007 David McCullough
14813 + * Copyright (C) 2004-2005 Intel Corporation.
14814 + * The license and original author are listed below.
14816 + * Copyright (c) 2001 Theo de Raadt
14817 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
14819 + * Redistribution and use in source and binary forms, with or without
14820 + * modification, are permitted provided that the following conditions
14823 + * 1. Redistributions of source code must retain the above copyright
14824 + * notice, this list of conditions and the following disclaimer.
14825 + * 2. Redistributions in binary form must reproduce the above copyright
14826 + * notice, this list of conditions and the following disclaimer in the
14827 + * documentation and/or other materials provided with the distribution.
14828 + * 3. The name of the author may not be used to endorse or promote products
14829 + * derived from this software without specific prior written permission.
14831 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14832 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
14833 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
14834 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
14835 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
14836 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
14837 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
14838 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
14839 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
14840 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14842 + * Effort sponsored in part by the Defense Advanced Research Projects
14843 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
14844 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
14846 +__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
14849 +#ifndef AUTOCONF_INCLUDED
14850 +#include <linux/config.h>
14852 +#include <linux/types.h>
14853 +#include <linux/time.h>
14854 +#include <linux/delay.h>
14855 +#include <linux/list.h>
14856 +#include <linux/init.h>
14857 +#include <linux/sched.h>
14858 +#include <linux/unistd.h>
14859 +#include <linux/module.h>
14860 +#include <linux/wait.h>
14861 +#include <linux/slab.h>
14862 +#include <linux/fs.h>
14863 +#include <linux/dcache.h>
14864 +#include <linux/file.h>
14865 +#include <linux/mount.h>
14866 +#include <linux/miscdevice.h>
14867 +#include <linux/version.h>
14868 +#include <asm/uaccess.h>
14870 +#include <cryptodev.h>
14873 +extern asmlinkage long sys_dup(unsigned int fildes);
14875 +#define debug cryptodev_debug
14876 +int cryptodev_debug = 0;
14877 +module_param(cryptodev_debug, int, 0644);
14878 +MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
14880 +struct csession_info {
14881 + u_int16_t blocksize;
14882 + u_int16_t minkey, maxkey;
14884 + u_int16_t keysize;
14885 + /* u_int16_t hashsize; */
14886 + u_int16_t authsize;
14887 + /* u_int16_t ctxsize; */
14891 + struct list_head list;
14895 + wait_queue_head_t waitq;
14897 + u_int32_t cipher;
14903 + u_char tmp_iv[EALG_MAX_BLOCK_LEN];
14908 + struct csession_info info;
14910 + struct iovec iovec;
14916 + struct list_head csessions;
14920 +static struct csession *csefind(struct fcrypt *, u_int);
14921 +static int csedelete(struct fcrypt *, struct csession *);
14922 +static struct csession *cseadd(struct fcrypt *, struct csession *);
14923 +static struct csession *csecreate(struct fcrypt *, u_int64_t,
14924 + struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
14925 +static int csefree(struct csession *);
14927 +static int cryptodev_op(struct csession *, struct crypt_op *);
14928 +static int cryptodev_key(struct crypt_kop *);
14929 +static int cryptodev_find(struct crypt_find_op *);
14931 +static int cryptodev_cb(void *);
14932 +static int cryptodev_open(struct inode *inode, struct file *filp);
14935 + * Check a crypto identifier to see if it requested
14936 + * a valid crid and it's capabilities match.
14939 +checkcrid(int crid)
14941 + int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
14942 + int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
14945 + /* if the user hasn't selected a driver, then just call newsession */
14946 + if (hid == 0 && typ != 0)
14949 + caps = crypto_getcaps(hid);
14951 + /* didn't find anything with capabilities */
14953 + dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
14957 + /* the user didn't specify SW or HW, so the driver is ok */
14961 + /* if the type specified didn't match */
14962 + if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
14963 + dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
14972 +cryptodev_op(struct csession *cse, struct crypt_op *cop)
14974 + struct cryptop *crp = NULL;
14975 + struct cryptodesc *crde = NULL, *crda = NULL;
14978 + dprintk("%s()\n", __FUNCTION__);
14979 + if (cop->len > CRYPTO_MAX_DATA_LEN) {
14980 + dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
14984 + if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
14985 + dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
14990 + cse->uio.uio_iov = &cse->iovec;
14991 + cse->uio.uio_iovcnt = 1;
14992 + cse->uio.uio_offset = 0;
14994 + cse->uio.uio_resid = cop->len;
14995 + cse->uio.uio_segflg = UIO_SYSSPACE;
14996 + cse->uio.uio_rw = UIO_WRITE;
14997 + cse->uio.uio_td = td;
14999 + cse->uio.uio_iov[0].iov_len = cop->len;
15000 + if (cse->info.authsize)
15001 + cse->uio.uio_iov[0].iov_len += cse->info.authsize;
15002 + cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
15005 + if (cse->uio.uio_iov[0].iov_base == NULL) {
15006 + dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
15007 + cse->uio.uio_iov[0].iov_len);
15011 + crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
15012 + if (crp == NULL) {
15013 + dprintk("%s: ENOMEM\n", __FUNCTION__);
15018 + if (cse->info.authsize) {
15019 + crda = crp->crp_desc;
15020 + if (cse->info.blocksize)
15021 + crde = crda->crd_next;
15023 + if (cse->info.blocksize)
15024 + crde = crp->crp_desc;
15026 + dprintk("%s: bad request\n", __FUNCTION__);
15032 + if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
15034 + dprintk("%s: bad copy\n", __FUNCTION__);
15039 + crda->crd_skip = 0;
15040 + crda->crd_len = cop->len;
15041 + crda->crd_inject = cop->len;
15043 + crda->crd_alg = cse->mac;
15044 + crda->crd_key = cse->mackey;
15045 + crda->crd_klen = cse->mackeylen * 8;
15049 + if (cop->op == COP_ENCRYPT)
15050 + crde->crd_flags |= CRD_F_ENCRYPT;
15052 + crde->crd_flags &= ~CRD_F_ENCRYPT;
15053 + crde->crd_len = cop->len;
15054 + crde->crd_inject = 0;
15056 + crde->crd_alg = cse->cipher;
15057 + crde->crd_key = cse->key;
15058 + crde->crd_klen = cse->keylen * 8;
15061 + crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
15062 + crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
15063 + | (cop->flags & COP_F_BATCH);
15064 + crp->crp_buf = (caddr_t)&cse->uio;
15065 + crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
15066 + crp->crp_sid = cse->sid;
15067 + crp->crp_opaque = (void *)cse;
15070 + if (crde == NULL) {
15072 + dprintk("%s no crde\n", __FUNCTION__);
15075 + if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
15077 + dprintk("%s arc4 with IV\n", __FUNCTION__);
15080 + if ((error = copy_from_user(cse->tmp_iv, cop->iv,
15081 + cse->info.blocksize))) {
15082 + dprintk("%s bad iv copy\n", __FUNCTION__);
15085 + memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
15086 + crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
15087 + crde->crd_skip = 0;
15088 + } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
15089 + crde->crd_skip = 0;
15090 + } else if (crde) {
15091 + crde->crd_flags |= CRD_F_IV_PRESENT;
15092 + crde->crd_skip = cse->info.blocksize;
15093 + crde->crd_len -= cse->info.blocksize;
15096 + if (cop->mac && crda == NULL) {
15098 + dprintk("%s no crda\n", __FUNCTION__);
15103 + * Let the dispatch run unlocked, then, interlock against the
15104 + * callback before checking if the operation completed and going
15105 + * to sleep. This insures drivers don't inherit our lock which
15106 + * results in a lock order reversal between crypto_dispatch forced
15107 + * entry and the crypto_done callback into us.
15109 + error = crypto_dispatch(crp);
15110 + if (error == 0) {
15111 + dprintk("%s about to WAIT\n", __FUNCTION__);
15113 + * we really need to wait for driver to complete to maintain
15114 + * state, luckily interrupts will be remembered
15117 + error = wait_event_interruptible(crp->crp_waitq,
15118 + ((crp->crp_flags & CRYPTO_F_DONE) != 0));
15120 + * we can't break out of this loop or we will leave behind
15121 + * a huge mess, however, staying here means if your driver
15122 + * is broken user applications can hang and not be killed.
15123 + * The solution, fix your driver :-)
15129 + } while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
15130 + dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
15133 + if (crp->crp_etype != 0) {
15134 + error = crp->crp_etype;
15135 + dprintk("%s error in crp processing\n", __FUNCTION__);
15139 + if (cse->error) {
15140 + error = cse->error;
15141 + dprintk("%s error in cse processing\n", __FUNCTION__);
15145 + if (cop->dst && (error = copy_to_user(cop->dst,
15146 + cse->uio.uio_iov[0].iov_base, cop->len))) {
15147 + dprintk("%s bad dst copy\n", __FUNCTION__);
15152 + (error=copy_to_user(cop->mac,
15153 + (caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
15154 + cse->info.authsize))) {
15155 + dprintk("%s bad mac copy\n", __FUNCTION__);
15161 + crypto_freereq(crp);
15162 + if (cse->uio.uio_iov[0].iov_base)
15163 + kfree(cse->uio.uio_iov[0].iov_base);
15169 +cryptodev_cb(void *op)
15171 + struct cryptop *crp = (struct cryptop *) op;
15172 + struct csession *cse = (struct csession *)crp->crp_opaque;
15175 + dprintk("%s()\n", __FUNCTION__);
15176 + error = crp->crp_etype;
15177 + if (error == EAGAIN) {
15178 + crp->crp_flags &= ~CRYPTO_F_DONE;
15181 + * DAVIDM I am fairly sure that we should turn this into a batch
15182 + * request to stop bad karma/lockup, revisit
15184 + crp->crp_flags |= CRYPTO_F_BATCH;
15186 + return crypto_dispatch(crp);
15188 + if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
15189 + cse->error = error;
15190 + wake_up_interruptible(&crp->crp_waitq);
15196 +cryptodevkey_cb(void *op)
15198 + struct cryptkop *krp = (struct cryptkop *) op;
15199 + dprintk("%s()\n", __FUNCTION__);
15200 + wake_up_interruptible(&krp->krp_waitq);
15205 +cryptodev_key(struct crypt_kop *kop)
15207 + struct cryptkop *krp = NULL;
15208 + int error = EINVAL;
15209 + int in, out, size, i;
15211 + dprintk("%s()\n", __FUNCTION__);
15212 + if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
15213 + dprintk("%s params too big\n", __FUNCTION__);
15217 + in = kop->crk_iparams;
15218 + out = kop->crk_oparams;
15219 + switch (kop->crk_op) {
15220 + case CRK_MOD_EXP:
15221 + if (in == 3 && out == 1)
15224 + case CRK_MOD_EXP_CRT:
15225 + if (in == 6 && out == 1)
15228 + case CRK_DSA_SIGN:
15229 + if (in == 5 && out == 2)
15232 + case CRK_DSA_VERIFY:
15233 + if (in == 7 && out == 0)
15236 + case CRK_DH_COMPUTE_KEY:
15237 + if (in == 3 && out == 1)
15244 + krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
15247 + bzero(krp, sizeof *krp);
15248 + krp->krp_op = kop->crk_op;
15249 + krp->krp_status = kop->crk_status;
15250 + krp->krp_iparams = kop->crk_iparams;
15251 + krp->krp_oparams = kop->crk_oparams;
15252 + krp->krp_crid = kop->crk_crid;
15253 + krp->krp_status = 0;
15254 + krp->krp_flags = CRYPTO_KF_CBIMM;
15255 + krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
15256 + init_waitqueue_head(&krp->krp_waitq);
15258 + for (i = 0; i < CRK_MAXPARAM; i++)
15259 + krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
15260 + for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
15261 + size = (krp->krp_param[i].crp_nbits + 7) / 8;
15264 + krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
15265 + if (i >= krp->krp_iparams)
15267 + error = copy_from_user(krp->krp_param[i].crp_p,
15268 + kop->crk_param[i].crp_p, size);
15273 + error = crypto_kdispatch(krp);
15278 + error = wait_event_interruptible(krp->krp_waitq,
15279 + ((krp->krp_flags & CRYPTO_KF_DONE) != 0));
15281 + * we can't break out of this loop or we will leave behind
15282 + * a huge mess, however, staying here means if your driver
15283 + * is broken user applications can hang and not be killed.
15284 + * The solution, fix your driver :-)
15290 + } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
15292 + dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
15294 + kop->crk_crid = krp->krp_crid; /* device that did the work */
15295 + if (krp->krp_status != 0) {
15296 + error = krp->krp_status;
15300 + for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
15301 + size = (krp->krp_param[i].crp_nbits + 7) / 8;
15304 + error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
15312 + kop->crk_status = krp->krp_status;
15313 + for (i = 0; i < CRK_MAXPARAM; i++) {
15314 + if (krp->krp_param[i].crp_p)
15315 + kfree(krp->krp_param[i].crp_p);
15323 +cryptodev_find(struct crypt_find_op *find)
15327 + if (find->crid != -1) {
15328 + dev = crypto_find_device_byhid(find->crid);
15331 + strlcpy(find->name, device_get_nameunit(dev),
15332 + sizeof(find->name));
15334 + find->crid = crypto_find_driver(find->name);
15335 + if (find->crid == -1)
15341 +static struct csession *
15342 +csefind(struct fcrypt *fcr, u_int ses)
15344 + struct csession *cse;
15346 + dprintk("%s()\n", __FUNCTION__);
15347 + list_for_each_entry(cse, &fcr->csessions, list)
15348 + if (cse->ses == ses)
15354 +csedelete(struct fcrypt *fcr, struct csession *cse_del)
15356 + struct csession *cse;
15358 + dprintk("%s()\n", __FUNCTION__);
15359 + list_for_each_entry(cse, &fcr->csessions, list) {
15360 + if (cse == cse_del) {
15361 + list_del(&cse->list);
15368 +static struct csession *
15369 +cseadd(struct fcrypt *fcr, struct csession *cse)
15371 + dprintk("%s()\n", __FUNCTION__);
15372 + list_add_tail(&cse->list, &fcr->csessions);
15373 + cse->ses = fcr->sesn++;
15377 +static struct csession *
15378 +csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
15379 + struct cryptoini *cria, struct csession_info *info)
15381 + struct csession *cse;
15383 + dprintk("%s()\n", __FUNCTION__);
15384 + cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
15387 + memset(cse, 0, sizeof(struct csession));
15389 + INIT_LIST_HEAD(&cse->list);
15390 + init_waitqueue_head(&cse->waitq);
15392 + cse->key = crie->cri_key;
15393 + cse->keylen = crie->cri_klen/8;
15394 + cse->mackey = cria->cri_key;
15395 + cse->mackeylen = cria->cri_klen/8;
15397 + cse->cipher = crie->cri_alg;
15398 + cse->mac = cria->cri_alg;
15399 + cse->info = *info;
15400 + cseadd(fcr, cse);
15405 +csefree(struct csession *cse)
15409 + dprintk("%s()\n", __FUNCTION__);
15410 + error = crypto_freesession(cse->sid);
15414 + kfree(cse->mackey);
15421 + struct inode *inode,
15422 + struct file *filp,
15423 + unsigned int cmd,
15424 + unsigned long arg)
15426 + struct cryptoini cria, crie;
15427 + struct fcrypt *fcr = filp->private_data;
15428 + struct csession *cse;
15429 + struct csession_info info;
15430 + struct session2_op sop;
15431 + struct crypt_op cop;
15432 + struct crypt_kop kop;
15433 + struct crypt_find_op fop;
15436 + int feat, fd, error = 0, crid;
15439 + dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
15444 + dprintk("%s(CRIOGET)\n", __FUNCTION__);
15446 + set_fs(get_ds());
15447 + for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
15448 + if (files_fdtable(current->files)->fd[fd] == filp)
15450 + fd = sys_dup(fd);
15452 + put_user(fd, (int *) arg);
15453 + return IS_ERR_VALUE(fd) ? fd : 0;
15456 +#define CIOCGSESSSTR (cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
15457 + case CIOCGSESSION:
15458 + case CIOCGSESSION2:
15459 + dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
15460 + memset(&crie, 0, sizeof(crie));
15461 + memset(&cria, 0, sizeof(cria));
15462 + memset(&info, 0, sizeof(info));
15463 + memset(&sop, 0, sizeof(sop));
15465 + if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
15466 + sizeof(struct session_op) : sizeof(sop))) {
15467 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15472 + switch (sop.cipher) {
15474 + dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
15476 + case CRYPTO_NULL_CBC:
15477 + info.blocksize = NULL_BLOCK_LEN;
15478 + info.minkey = NULL_MIN_KEY_LEN;
15479 + info.maxkey = NULL_MAX_KEY_LEN;
15481 + case CRYPTO_DES_CBC:
15482 + info.blocksize = DES_BLOCK_LEN;
15483 + info.minkey = DES_MIN_KEY_LEN;
15484 + info.maxkey = DES_MAX_KEY_LEN;
15486 + case CRYPTO_3DES_CBC:
15487 + info.blocksize = DES3_BLOCK_LEN;
15488 + info.minkey = DES3_MIN_KEY_LEN;
15489 + info.maxkey = DES3_MAX_KEY_LEN;
15491 + case CRYPTO_BLF_CBC:
15492 + info.blocksize = BLOWFISH_BLOCK_LEN;
15493 + info.minkey = BLOWFISH_MIN_KEY_LEN;
15494 + info.maxkey = BLOWFISH_MAX_KEY_LEN;
15496 + case CRYPTO_CAST_CBC:
15497 + info.blocksize = CAST128_BLOCK_LEN;
15498 + info.minkey = CAST128_MIN_KEY_LEN;
15499 + info.maxkey = CAST128_MAX_KEY_LEN;
15501 + case CRYPTO_SKIPJACK_CBC:
15502 + info.blocksize = SKIPJACK_BLOCK_LEN;
15503 + info.minkey = SKIPJACK_MIN_KEY_LEN;
15504 + info.maxkey = SKIPJACK_MAX_KEY_LEN;
15506 + case CRYPTO_AES_CBC:
15507 + info.blocksize = AES_BLOCK_LEN;
15508 + info.minkey = AES_MIN_KEY_LEN;
15509 + info.maxkey = AES_MAX_KEY_LEN;
15511 + case CRYPTO_ARC4:
15512 + info.blocksize = ARC4_BLOCK_LEN;
15513 + info.minkey = ARC4_MIN_KEY_LEN;
15514 + info.maxkey = ARC4_MAX_KEY_LEN;
15516 + case CRYPTO_CAMELLIA_CBC:
15517 + info.blocksize = CAMELLIA_BLOCK_LEN;
15518 + info.minkey = CAMELLIA_MIN_KEY_LEN;
15519 + info.maxkey = CAMELLIA_MAX_KEY_LEN;
15522 + dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
15527 + switch (sop.mac) {
15529 + dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
15531 + case CRYPTO_NULL_HMAC:
15532 + info.authsize = NULL_HASH_LEN;
15535 + info.authsize = MD5_HASH_LEN;
15537 + case CRYPTO_SHA1:
15538 + info.authsize = SHA1_HASH_LEN;
15540 + case CRYPTO_SHA2_256:
15541 + info.authsize = SHA2_256_HASH_LEN;
15543 + case CRYPTO_SHA2_384:
15544 + info.authsize = SHA2_384_HASH_LEN;
15546 + case CRYPTO_SHA2_512:
15547 + info.authsize = SHA2_512_HASH_LEN;
15549 + case CRYPTO_RIPEMD160:
15550 + info.authsize = RIPEMD160_HASH_LEN;
15552 + case CRYPTO_MD5_HMAC:
15553 + info.authsize = MD5_HASH_LEN;
15555 + case CRYPTO_SHA1_HMAC:
15556 + info.authsize = SHA1_HASH_LEN;
15558 + case CRYPTO_SHA2_256_HMAC:
15559 + info.authsize = SHA2_256_HASH_LEN;
15561 + case CRYPTO_SHA2_384_HMAC:
15562 + info.authsize = SHA2_384_HASH_LEN;
15564 + case CRYPTO_SHA2_512_HMAC:
15565 + info.authsize = SHA2_512_HASH_LEN;
15567 + case CRYPTO_RIPEMD160_HMAC:
15568 + info.authsize = RIPEMD160_HASH_LEN;
15571 + dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
15576 + if (info.blocksize) {
15577 + crie.cri_alg = sop.cipher;
15578 + crie.cri_klen = sop.keylen * 8;
15579 + if ((info.maxkey && sop.keylen > info.maxkey) ||
15580 + sop.keylen < info.minkey) {
15581 + dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
15586 + crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
15587 + if (copy_from_user(crie.cri_key, sop.key,
15588 + crie.cri_klen/8)) {
15589 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15593 + if (info.authsize)
15594 + crie.cri_next = &cria;
15597 + if (info.authsize) {
15598 + cria.cri_alg = sop.mac;
15599 + cria.cri_klen = sop.mackeylen * 8;
15600 + if ((info.maxkey && sop.mackeylen > info.maxkey) ||
15601 + sop.keylen < info.minkey) {
15602 + dprintk("%s(%s) - mackeylen %d\n", __FUNCTION__, CIOCGSESSSTR,
15608 + if (cria.cri_klen) {
15609 + cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
15610 + if (copy_from_user(cria.cri_key, sop.mackey,
15611 + cria.cri_klen / 8)) {
15612 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15619 + /* NB: CIOGSESSION2 has the crid */
15620 + if (cmd == CIOCGSESSION2) {
15622 + error = checkcrid(crid);
15624 + dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
15625 + CIOCGSESSSTR, error);
15629 + /* allow either HW or SW to be used */
15630 + crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
15632 + error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
15634 + dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
15638 + cse = csecreate(fcr, sid, &crie, &cria, &info);
15639 + if (cse == NULL) {
15640 + crypto_freesession(sid);
15642 + dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
15645 + sop.ses = cse->ses;
15647 + if (cmd == CIOCGSESSION2) {
15648 + /* return hardware/driver id */
15649 + sop.crid = CRYPTO_SESID2HID(cse->sid);
15652 + if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
15653 + sizeof(struct session_op) : sizeof(sop))) {
15654 + dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
15659 + dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
15660 + if (crie.cri_key)
15661 + kfree(crie.cri_key);
15662 + if (cria.cri_key)
15663 + kfree(cria.cri_key);
15666 + case CIOCFSESSION:
15667 + dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
15668 + get_user(ses, (uint32_t*)arg);
15669 + cse = csefind(fcr, ses);
15670 + if (cse == NULL) {
15672 + dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
15675 + csedelete(fcr, cse);
15676 + error = csefree(cse);
15679 + dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
15680 + if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
15681 + dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
15685 + cse = csefind(fcr, cop.ses);
15686 + if (cse == NULL) {
15688 + dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
15691 + error = cryptodev_op(cse, &cop);
15692 + if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
15693 + dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
15700 + dprintk("%s(CIOCKEY)\n", __FUNCTION__);
15701 + if (!crypto_userasymcrypto)
15702 + return (EPERM); /* XXX compat? */
15703 + if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
15704 + dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
15708 + if (cmd == CIOCKEY) {
15709 + /* NB: crypto core enforces s/w driver use */
15711 + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
15713 + error = cryptodev_key(&kop);
15714 + if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
15715 + dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
15720 + case CIOCASYMFEAT:
15721 + dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
15722 + if (!crypto_userasymcrypto) {
15724 + * NB: if user asym crypto operations are
15725 + * not permitted return "no algorithms"
15726 + * so well-behaved applications will just
15727 + * fallback to doing them in software.
15731 + error = crypto_getfeat(&feat);
15733 + error = copy_to_user((void*)arg, &feat, sizeof(feat));
15736 + case CIOCFINDDEV:
15737 + if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
15738 + dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
15742 + error = cryptodev_find(&fop);
15743 + if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
15744 + dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
15750 + dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
15757 +#ifdef HAVE_UNLOCKED_IOCTL
15759 +cryptodev_unlocked_ioctl(
15760 + struct file *filp,
15761 + unsigned int cmd,
15762 + unsigned long arg)
15764 + return cryptodev_ioctl(NULL, filp, cmd, arg);
15769 +cryptodev_open(struct inode *inode, struct file *filp)
15771 + struct fcrypt *fcr;
15773 + dprintk("%s()\n", __FUNCTION__);
15774 + if (filp->private_data) {
15775 + printk("cryptodev: Private data already exists !\n");
15779 + fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
15781 + dprintk("%s() - malloc failed\n", __FUNCTION__);
15784 + memset(fcr, 0, sizeof(*fcr));
15786 + INIT_LIST_HEAD(&fcr->csessions);
15787 + filp->private_data = fcr;
15792 +cryptodev_release(struct inode *inode, struct file *filp)
15794 + struct fcrypt *fcr = filp->private_data;
15795 + struct csession *cse, *tmp;
15797 + dprintk("%s()\n", __FUNCTION__);
15799 + printk("cryptodev: No private data on release\n");
15803 + list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
15804 + list_del(&cse->list);
15805 + (void)csefree(cse);
15807 + filp->private_data = NULL;
15812 +static struct file_operations cryptodev_fops = {
15813 + .owner = THIS_MODULE,
15814 + .open = cryptodev_open,
15815 + .release = cryptodev_release,
15816 + .ioctl = cryptodev_ioctl,
15817 +#ifdef HAVE_UNLOCKED_IOCTL
15818 + .unlocked_ioctl = cryptodev_unlocked_ioctl,
15822 +static struct miscdevice cryptodev = {
15823 + .minor = CRYPTODEV_MINOR,
15824 + .name = "crypto",
15825 + .fops = &cryptodev_fops,
15829 +cryptodev_init(void)
15833 + dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
15834 + rc = misc_register(&cryptodev);
15836 + printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
15843 +static void __exit
15844 +cryptodev_exit(void)
15846 + dprintk("%s()\n", __FUNCTION__);
15847 + misc_deregister(&cryptodev);
15850 +module_init(cryptodev_init);
15851 +module_exit(cryptodev_exit);
15853 +MODULE_LICENSE("BSD");
15854 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
15855 +MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
15856 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
15857 +++ linux/crypto/ocf/cryptodev.h 2007-09-26 22:15:05.000000000 +1000
15859 +/* $FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $ */
15860 +/* $OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $ */
15863 + * Linux port done by David McCullough <david_mccullough@securecomputing.com>
15864 + * Copyright (C) 2006-2007 David McCullough
15865 + * Copyright (C) 2004-2005 Intel Corporation.
15866 + * The license and original author are listed below.
15868 + * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
15869 + * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
15871 + * This code was written by Angelos D. Keromytis in Athens, Greece, in
15872 + * February 2000. Network Security Technologies Inc. (NSTI) kindly
15873 + * supported the development of this code.
15875 + * Copyright (c) 2000 Angelos D. Keromytis
15877 + * Permission to use, copy, and modify this software with or without fee
15878 + * is hereby granted, provided that this entire notice is included in
15879 + * all source code copies of any software which is or includes a copy or
15880 + * modification of this software.
15882 + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
15883 + * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
15884 + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
15885 + * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
15888 + * Copyright (c) 2001 Theo de Raadt
15890 + * Redistribution and use in source and binary forms, with or without
15891 + * modification, are permitted provided that the following conditions
15894 + * 1. Redistributions of source code must retain the above copyright
15895 + * notice, this list of conditions and the following disclaimer.
15896 + * 2. Redistributions in binary form must reproduce the above copyright
15897 + * notice, this list of conditions and the following disclaimer in the
15898 + * documentation and/or other materials provided with the distribution.
15899 + * 3. The name of the author may not be used to endorse or promote products
15900 + * derived from this software without specific prior written permission.
15902 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15903 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15904 + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
15905 + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
15906 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
15907 + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
15908 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
15909 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
15910 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
15911 + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15913 + * Effort sponsored in part by the Defense Advanced Research Projects
15914 + * Agency (DARPA) and Air Force Research Laboratory, Air Force
15915 + * Materiel Command, USAF, under agreement number F30602-01-2-0537.
15919 +#ifndef _CRYPTO_CRYPTO_H_
15920 +#define _CRYPTO_CRYPTO_H_
15922 +/* Some initial values */
15923 +#define CRYPTO_DRIVERS_INITIAL 4
15924 +#define CRYPTO_SW_SESSIONS 32
15927 +#define NULL_HASH_LEN 0
15928 +#define MD5_HASH_LEN 16
15929 +#define SHA1_HASH_LEN 20
15930 +#define RIPEMD160_HASH_LEN 20
15931 +#define SHA2_256_HASH_LEN 32
15932 +#define SHA2_384_HASH_LEN 48
15933 +#define SHA2_512_HASH_LEN 64
15934 +#define MD5_KPDK_HASH_LEN 16
15935 +#define SHA1_KPDK_HASH_LEN 20
15936 +/* Maximum hash algorithm result length */
15937 +#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */
15940 +#define NULL_HMAC_BLOCK_LEN 1
15941 +#define MD5_HMAC_BLOCK_LEN 64
15942 +#define SHA1_HMAC_BLOCK_LEN 64
15943 +#define RIPEMD160_HMAC_BLOCK_LEN 64
15944 +#define SHA2_256_HMAC_BLOCK_LEN 64
15945 +#define SHA2_384_HMAC_BLOCK_LEN 128
15946 +#define SHA2_512_HMAC_BLOCK_LEN 128
15947 +/* Maximum HMAC block length */
15948 +#define HMAC_MAX_BLOCK_LEN SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
15949 +#define HMAC_IPAD_VAL 0x36
15950 +#define HMAC_OPAD_VAL 0x5C
15952 +/* Encryption algorithm block sizes */
15953 +#define NULL_BLOCK_LEN 1
15954 +#define DES_BLOCK_LEN 8
15955 +#define DES3_BLOCK_LEN 8
15956 +#define BLOWFISH_BLOCK_LEN 8
15957 +#define SKIPJACK_BLOCK_LEN 8
15958 +#define CAST128_BLOCK_LEN 8
15959 +#define RIJNDAEL128_BLOCK_LEN 16
15960 +#define AES_BLOCK_LEN RIJNDAEL128_BLOCK_LEN
15961 +#define CAMELLIA_BLOCK_LEN 16
15962 +#define ARC4_BLOCK_LEN 1
15963 +#define EALG_MAX_BLOCK_LEN AES_BLOCK_LEN /* Keep this updated */
15965 +/* Encryption algorithm min and max key sizes */
15966 +#define NULL_MIN_KEY_LEN 0
15967 +#define NULL_MAX_KEY_LEN 0
15968 +#define DES_MIN_KEY_LEN 8
15969 +#define DES_MAX_KEY_LEN 8
15970 +#define DES3_MIN_KEY_LEN 24
15971 +#define DES3_MAX_KEY_LEN 24
15972 +#define BLOWFISH_MIN_KEY_LEN 4
15973 +#define BLOWFISH_MAX_KEY_LEN 56
15974 +#define SKIPJACK_MIN_KEY_LEN 10
15975 +#define SKIPJACK_MAX_KEY_LEN 10
15976 +#define CAST128_MIN_KEY_LEN 5
15977 +#define CAST128_MAX_KEY_LEN 16
15978 +#define RIJNDAEL128_MIN_KEY_LEN 16
15979 +#define RIJNDAEL128_MAX_KEY_LEN 32
15980 +#define AES_MIN_KEY_LEN RIJNDAEL128_MIN_KEY_LEN
15981 +#define AES_MAX_KEY_LEN RIJNDAEL128_MAX_KEY_LEN
15982 +#define CAMELLIA_MIN_KEY_LEN 16
15983 +#define CAMELLIA_MAX_KEY_LEN 32
15984 +#define ARC4_MIN_KEY_LEN 1
15985 +#define ARC4_MAX_KEY_LEN 256
15987 +/* Max size of data that can be processed */
15988 +#define CRYPTO_MAX_DATA_LEN 64*1024 - 1
15990 +#define CRYPTO_ALGORITHM_MIN 1
15991 +#define CRYPTO_DES_CBC 1
15992 +#define CRYPTO_3DES_CBC 2
15993 +#define CRYPTO_BLF_CBC 3
15994 +#define CRYPTO_CAST_CBC 4
15995 +#define CRYPTO_SKIPJACK_CBC 5
15996 +#define CRYPTO_MD5_HMAC 6
15997 +#define CRYPTO_SHA1_HMAC 7
15998 +#define CRYPTO_RIPEMD160_HMAC 8
15999 +#define CRYPTO_MD5_KPDK 9
16000 +#define CRYPTO_SHA1_KPDK 10
16001 +#define CRYPTO_RIJNDAEL128_CBC 11 /* 128 bit blocksize */
16002 +#define CRYPTO_AES_CBC 11 /* 128 bit blocksize -- the same as above */
16003 +#define CRYPTO_ARC4 12
16004 +#define CRYPTO_MD5 13
16005 +#define CRYPTO_SHA1 14
16006 +#define CRYPTO_NULL_HMAC 15
16007 +#define CRYPTO_NULL_CBC 16
16008 +#define CRYPTO_DEFLATE_COMP 17 /* Deflate compression algorithm */
16009 +#define CRYPTO_SHA2_256_HMAC 18
16010 +#define CRYPTO_SHA2_384_HMAC 19
16011 +#define CRYPTO_SHA2_512_HMAC 20
16012 +#define CRYPTO_CAMELLIA_CBC 21
16013 +#define CRYPTO_SHA2_256 22
16014 +#define CRYPTO_SHA2_384 23
16015 +#define CRYPTO_SHA2_512 24
16016 +#define CRYPTO_RIPEMD160 25
16017 +#define CRYPTO_ALGORITHM_MAX 25 /* Keep updated - see below */
16019 +/* Algorithm flags */
16020 +#define CRYPTO_ALG_FLAG_SUPPORTED 0x01 /* Algorithm is supported */
16021 +#define CRYPTO_ALG_FLAG_RNG_ENABLE 0x02 /* Has HW RNG for DH/DSA */
16022 +#define CRYPTO_ALG_FLAG_DSA_SHA 0x04 /* Can do SHA on msg */
16025 + * Crypto driver/device flags. They can set in the crid
16026 + * parameter when creating a session or submitting a key
16027 + * op to affect the device/driver assigned. If neither
16028 + * of these are specified then the crid is assumed to hold
16029 + * the driver id of an existing (and suitable) device that
16030 + * must be used to satisfy the request.
16032 +#define CRYPTO_FLAG_HARDWARE 0x01000000 /* hardware accelerated */
16033 +#define CRYPTO_FLAG_SOFTWARE 0x02000000 /* software implementation */
16035 +/* NB: deprecated */
16036 +struct session_op {
16037 + u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
16038 + u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
16040 + u_int32_t keylen; /* cipher key */
16042 + int mackeylen; /* mac key */
16045 + u_int32_t ses; /* returns: session # */
16048 +struct session2_op {
16049 + u_int32_t cipher; /* ie. CRYPTO_DES_CBC */
16050 + u_int32_t mac; /* ie. CRYPTO_MD5_HMAC */
16052 + u_int32_t keylen; /* cipher key */
16054 + int mackeylen; /* mac key */
16057 + u_int32_t ses; /* returns: session # */
16058 + int crid; /* driver id + flags (rw) */
16059 + int pad[4]; /* for future expansion */
16064 + u_int16_t op; /* i.e. COP_ENCRYPT */
16065 +#define COP_NONE 0
16066 +#define COP_ENCRYPT 1
16067 +#define COP_DECRYPT 2
16069 +#define COP_F_BATCH 0x0008 /* Batch op if possible */
16071 + caddr_t src, dst; /* become iov[] inside kernel */
16072 + caddr_t mac; /* must be big enough for chosen MAC */
16077 + * Parameters for looking up a crypto driver/device by
16078 + * device name or by id. The latter are returned for
16079 + * created sessions (crid) and completed key operations.
16081 +struct crypt_find_op {
16082 + int crid; /* driver id + flags */
16083 + char name[32]; /* device/driver name */
16086 +/* bignum parameter, in packed bytes, ... */
16092 +#define CRK_MAXPARAM 8
16094 +struct crypt_kop {
16095 + u_int crk_op; /* ie. CRK_MOD_EXP or other */
16096 + u_int crk_status; /* return status */
16097 + u_short crk_iparams; /* # of input parameters */
16098 + u_short crk_oparams; /* # of output parameters */
16099 + u_int crk_crid; /* NB: only used by CIOCKEY2 (rw) */
16100 + struct crparam crk_param[CRK_MAXPARAM];
16102 +#define CRK_ALGORITM_MIN 0
16103 +#define CRK_MOD_EXP 0
16104 +#define CRK_MOD_EXP_CRT 1
16105 +#define CRK_DSA_SIGN 2
16106 +#define CRK_DSA_VERIFY 3
16107 +#define CRK_DH_COMPUTE_KEY 4
16108 +#define CRK_ALGORITHM_MAX 4 /* Keep updated - see below */
16110 +#define CRF_MOD_EXP (1 << CRK_MOD_EXP)
16111 +#define CRF_MOD_EXP_CRT (1 << CRK_MOD_EXP_CRT)
16112 +#define CRF_DSA_SIGN (1 << CRK_DSA_SIGN)
16113 +#define CRF_DSA_VERIFY (1 << CRK_DSA_VERIFY)
16114 +#define CRF_DH_COMPUTE_KEY (1 << CRK_DH_COMPUTE_KEY)
16117 + * done against open of /dev/crypto, to get a cloned descriptor.
16118 + * Please use F_SETFD against the cloned descriptor.
16120 +#define CRIOGET _IOWR('c', 100, u_int32_t)
16121 +#define CRIOASYMFEAT CIOCASYMFEAT
16122 +#define CRIOFINDDEV CIOCFINDDEV
16124 +/* the following are done against the cloned descriptor */
16125 +#define CIOCGSESSION _IOWR('c', 101, struct session_op)
16126 +#define CIOCFSESSION _IOW('c', 102, u_int32_t)
16127 +#define CIOCCRYPT _IOWR('c', 103, struct crypt_op)
16128 +#define CIOCKEY _IOWR('c', 104, struct crypt_kop)
16129 +#define CIOCASYMFEAT _IOR('c', 105, u_int32_t)
16130 +#define CIOCGSESSION2 _IOWR('c', 106, struct session2_op)
16131 +#define CIOCKEY2 _IOWR('c', 107, struct crypt_kop)
16132 +#define CIOCFINDDEV _IOWR('c', 108, struct crypt_find_op)
16134 +struct cryptotstat {
16135 + struct timespec acc; /* total accumulated time */
16136 + struct timespec min; /* min time */
16137 + struct timespec max; /* max time */
16138 + u_int32_t count; /* number of observations */
16141 +struct cryptostats {
16142 + u_int32_t cs_ops; /* symmetric crypto ops submitted */
16143 + u_int32_t cs_errs; /* symmetric crypto ops that failed */
16144 + u_int32_t cs_kops; /* asymetric/key ops submitted */
16145 + u_int32_t cs_kerrs; /* asymetric/key ops that failed */
16146 + u_int32_t cs_intrs; /* crypto swi thread activations */
16147 + u_int32_t cs_rets; /* crypto return thread activations */
16148 + u_int32_t cs_blocks; /* symmetric op driver block */
16149 + u_int32_t cs_kblocks; /* symmetric op driver block */
16151 + * When CRYPTO_TIMING is defined at compile time and the
16152 + * sysctl debug.crypto is set to 1, the crypto system will
16153 + * accumulate statistics about how long it takes to process
16154 + * crypto requests at various points during processing.
16156 + struct cryptotstat cs_invoke; /* crypto_dipsatch -> crypto_invoke */
16157 + struct cryptotstat cs_done; /* crypto_invoke -> crypto_done */
16158 + struct cryptotstat cs_cb; /* crypto_done -> callback */
16159 + struct cryptotstat cs_finis; /* callback -> callback return */
16161 + u_int32_t cs_drops; /* crypto ops dropped due to congestion */
16166 +/* Standard initialization structure beginning */
16167 +struct cryptoini {
16168 + int cri_alg; /* Algorithm to use */
16169 + int cri_klen; /* Key length, in bits */
16170 + int cri_mlen; /* Number of bytes we want from the
16171 + entire hash. 0 means all. */
16172 + caddr_t cri_key; /* key to use */
16173 + u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
16174 + struct cryptoini *cri_next;
16177 +/* Describe boundaries of a single crypto operation */
16178 +struct cryptodesc {
16179 + int crd_skip; /* How many bytes to ignore from start */
16180 + int crd_len; /* How many bytes to process */
16181 + int crd_inject; /* Where to inject results, if applicable */
16184 +#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
16185 +#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
16186 + place, so don't copy. */
16187 +#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
16188 +#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
16189 +#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
16190 +#define CRD_F_COMP 0x0f /* Set when doing compression */
16192 + struct cryptoini CRD_INI; /* Initialization/context data */
16193 +#define crd_iv CRD_INI.cri_iv
16194 +#define crd_key CRD_INI.cri_key
16195 +#define crd_alg CRD_INI.cri_alg
16196 +#define crd_klen CRD_INI.cri_klen
16198 + struct cryptodesc *crd_next;
16201 +/* Structure describing complete operation */
16203 + struct list_head crp_next;
16204 + wait_queue_head_t crp_waitq;
16206 + u_int64_t crp_sid; /* Session ID */
16207 + int crp_ilen; /* Input data total length */
16208 + int crp_olen; /* Result total length */
16210 + int crp_etype; /*
16211 + * Error type (zero means no error).
16212 + * All error codes except EAGAIN
16213 + * indicate possible data corruption (as in,
16214 + * the data have been touched). On all
16215 + * errors, the crp_sid may have changed
16216 + * (reset to a new one), so the caller
16217 + * should always check and use the new
16218 + * value on future requests.
16222 +#define CRYPTO_F_SKBUF 0x0001 /* Input/output are skbuf chains */
16223 +#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
16224 +#define CRYPTO_F_REL 0x0004 /* Must return data in same place */
16225 +#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
16226 +#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
16227 +#define CRYPTO_F_DONE 0x0020 /* Operation completed */
16228 +#define CRYPTO_F_CBIFSYNC 0x0040 /* Do CBIMM if op is synchronous */
16230 + caddr_t crp_buf; /* Data to be processed */
16231 + caddr_t crp_opaque; /* Opaque pointer, passed along */
16232 + struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
16234 + int (*crp_callback)(struct cryptop *); /* Callback function */
16237 +#define CRYPTO_BUF_CONTIG 0x0
16238 +#define CRYPTO_BUF_IOV 0x1
16239 +#define CRYPTO_BUF_SKBUF 0x2
16241 +#define CRYPTO_OP_DECRYPT 0x0
16242 +#define CRYPTO_OP_ENCRYPT 0x1
16245 + * Hints passed to process methods.
16247 +#define CRYPTO_HINT_MORE 0x1 /* more ops coming shortly */
16250 + struct list_head krp_next;
16251 + wait_queue_head_t krp_waitq;
16254 +#define CRYPTO_KF_DONE 0x0001 /* Operation completed */
16255 +#define CRYPTO_KF_CBIMM 0x0002 /* Do callback immediately */
16257 + u_int krp_op; /* ie. CRK_MOD_EXP or other */
16258 + u_int krp_status; /* return status */
16259 + u_short krp_iparams; /* # of input parameters */
16260 + u_short krp_oparams; /* # of output parameters */
16261 + u_int krp_crid; /* desired device, etc. */
16262 + u_int32_t krp_hid;
16263 + struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
16264 + int (*krp_callback)(struct cryptkop *);
16267 +#include <ocf-compat.h>
16270 + * Session ids are 64 bits. The lower 32 bits contain a "local id" which
16271 + * is a driver-private session identifier. The upper 32 bits contain a
16272 + * "hardware id" used by the core crypto code to identify the driver and
16273 + * a copy of the driver's capabilities that can be used by client code to
16274 + * optimize operation.
16276 +#define CRYPTO_SESID2HID(_sid) (((_sid) >> 32) & 0x00ffffff)
16277 +#define CRYPTO_SESID2CAPS(_sid) (((_sid) >> 32) & 0xff000000)
16278 +#define CRYPTO_SESID2LID(_sid) (((u_int32_t) (_sid)) & 0xffffffff)
16280 +extern int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
16281 +extern int crypto_freesession(u_int64_t sid);
16282 +#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
16283 +#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
16284 +#define CRYPTOCAP_F_SYNC 0x04000000 /* operates synchronously */
16285 +extern int32_t crypto_get_driverid(device_t dev, int flags);
16286 +extern int crypto_find_driver(const char *);
16287 +extern device_t crypto_find_device_byhid(int hid);
16288 +extern int crypto_getcaps(int hid);
16289 +extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
16290 + u_int32_t flags);
16291 +extern int crypto_kregister(u_int32_t, int, u_int32_t);
16292 +extern int crypto_unregister(u_int32_t driverid, int alg);
16293 +extern int crypto_unregister_all(u_int32_t driverid);
16294 +extern int crypto_dispatch(struct cryptop *crp);
16295 +extern int crypto_kdispatch(struct cryptkop *);
16296 +#define CRYPTO_SYMQ 0x1
16297 +#define CRYPTO_ASYMQ 0x2
16298 +extern int crypto_unblock(u_int32_t, int);
16299 +extern void crypto_done(struct cryptop *crp);
16300 +extern void crypto_kdone(struct cryptkop *);
16301 +extern int crypto_getfeat(int *);
16303 +extern void crypto_freereq(struct cryptop *crp);
16304 +extern struct cryptop *crypto_getreq(int num);
16306 +extern int crypto_usercrypto; /* userland may do crypto requests */
16307 +extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
16308 +extern int crypto_devallowsoft; /* only use hardware crypto */
16311 + * random number support, crypto_unregister_all will unregister
16313 +extern int crypto_rregister(u_int32_t driverid,
16314 + int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
16315 +extern int crypto_runregister_all(u_int32_t driverid);
16318 + * Crypto-related utility routines used mainly by drivers.
16320 + * XXX these don't really belong here; but for now they're
16321 + * kept apart from the rest of the system.
16324 +extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
16325 +extern void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
16326 +extern struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
16328 +extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
16330 +extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
16332 +extern int crypto_apply(int flags, caddr_t buf, int off, int len,
16333 + int (*f)(void *, void *, u_int), void *arg);
16335 +#endif /* __KERNEL__ */
16336 +#endif /* _CRYPTO_CRYPTO_H_ */
16337 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
16338 +++ linux/crypto/ocf/ocfnull/ocfnull.c 2007-07-13 16:02:05.000000000 +1000
16341 + * An OCF module for determining the cost of crypto versus the cost of
16342 + * IPSec processing outside of OCF. This modules gives us the effect of
16343 + * zero cost encryption, of course you will need to run it at both ends
16344 + * since it does no crypto at all.
16346 + * Written by David McCullough <david_mccullough@securecomputing.com>
16347 + * Copyright (C) 2006-2007 David McCullough
16351 + * The free distribution and use of this software in both source and binary
16352 + * form is allowed (with or without changes) provided that:
16354 + * 1. distributions of this source code include the above copyright
16355 + * notice, this list of conditions and the following disclaimer;
16357 + * 2. distributions in binary form include the above copyright
16358 + * notice, this list of conditions and the following disclaimer
16359 + * in the documentation and/or other associated materials;
16361 + * 3. the copyright holder's name is not used to endorse products
16362 + * built using this software without specific written permission.
16364 + * ALTERNATIVELY, provided that this notice is retained in full, this product
16365 + * may be distributed under the terms of the GNU General Public License (GPL),
16366 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
16370 + * This software is provided 'as is' with no explicit or implied warranties
16371 + * in respect of its properties, including, but not limited to, correctness
16372 + * and/or fitness for purpose.
16375 +#ifndef AUTOCONF_INCLUDED
16376 +#include <linux/config.h>
16378 +#include <linux/module.h>
16379 +#include <linux/init.h>
16380 +#include <linux/list.h>
16381 +#include <linux/slab.h>
16382 +#include <linux/sched.h>
16383 +#include <linux/wait.h>
16384 +#include <linux/crypto.h>
16385 +#include <linux/interrupt.h>
16387 +#include <cryptodev.h>
16390 +static int32_t null_id = -1;
16391 +static u_int32_t null_sesnum = 0;
16393 +static int null_process(device_t, struct cryptop *, int);
16394 +static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
16395 +static int null_freesession(device_t, u_int64_t);
16397 +#define debug ocfnull_debug
16398 +int ocfnull_debug = 0;
16399 +module_param(ocfnull_debug, int, 0644);
16400 +MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
16403 + * dummy device structure
16407 + softc_device_decl sc_dev;
16410 +static device_method_t null_methods = {
16411 + /* crypto device methods */
16412 + DEVMETHOD(cryptodev_newsession, null_newsession),
16413 + DEVMETHOD(cryptodev_freesession,null_freesession),
16414 + DEVMETHOD(cryptodev_process, null_process),
16418 + * Generate a new software session.
16421 +null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
16423 + dprintk("%s()\n", __FUNCTION__);
16424 + if (sid == NULL || cri == NULL) {
16425 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
16429 + if (null_sesnum == 0)
16431 + *sid = null_sesnum++;
16437 + * Free a session.
16440 +null_freesession(device_t arg, u_int64_t tid)
16442 + u_int32_t sid = CRYPTO_SESID2LID(tid);
16444 + dprintk("%s()\n", __FUNCTION__);
16445 + if (sid > null_sesnum) {
16446 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16450 + /* Silently accept and return */
16458 + * Process a request.
16461 +null_process(device_t arg, struct cryptop *crp, int hint)
16463 + unsigned int lid;
16465 + dprintk("%s()\n", __FUNCTION__);
16467 + /* Sanity check */
16468 + if (crp == NULL) {
16469 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16473 + crp->crp_etype = 0;
16475 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
16476 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16477 + crp->crp_etype = EINVAL;
16482 + * find the session we are using
16485 + lid = crp->crp_sid & 0xffffffff;
16486 + if (lid >= null_sesnum || lid == 0) {
16487 + crp->crp_etype = ENOENT;
16488 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
16493 + crypto_done(crp);
16499 + * our driver startup and shutdown routines
16505 + dprintk("%s(%p)\n", __FUNCTION__, null_init);
16507 + memset(&nulldev, 0, sizeof(nulldev));
16508 + softc_device_init(&nulldev, "ocfnull", 0, null_methods);
16510 + null_id = crypto_get_driverid(softc_get_device(&nulldev),
16511 + CRYPTOCAP_F_HARDWARE);
16513 + panic("ocfnull: crypto device cannot initialize!");
16515 +#define REGISTER(alg) \
16516 + crypto_register(null_id,alg,0,0)
16517 + REGISTER(CRYPTO_DES_CBC);
16518 + REGISTER(CRYPTO_3DES_CBC);
16519 + REGISTER(CRYPTO_RIJNDAEL128_CBC);
16520 + REGISTER(CRYPTO_MD5);
16521 + REGISTER(CRYPTO_SHA1);
16522 + REGISTER(CRYPTO_MD5_HMAC);
16523 + REGISTER(CRYPTO_SHA1_HMAC);
16532 + dprintk("%s()\n", __FUNCTION__);
16533 + crypto_unregister_all(null_id);
16537 +module_init(null_init);
16538 +module_exit(null_exit);
16540 +MODULE_LICENSE("Dual BSD/GPL");
16541 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
16542 +MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
16543 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
16544 +++ linux/crypto/ocf/cryptosoft.c 2008-02-14 14:59:01.000000000 +1000
16547 + * An OCF module that uses the linux kernel cryptoapi, based on the
16548 + * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
16549 + * but is mostly unrecognisable,
16551 + * Written by David McCullough <david_mccullough@securecomputing.com>
16552 + * Copyright (C) 2004-2007 David McCullough
16553 + * Copyright (C) 2004-2005 Intel Corporation.
16557 + * The free distribution and use of this software in both source and binary
16558 + * form is allowed (with or without changes) provided that:
16560 + * 1. distributions of this source code include the above copyright
16561 + * notice, this list of conditions and the following disclaimer;
16563 + * 2. distributions in binary form include the above copyright
16564 + * notice, this list of conditions and the following disclaimer
16565 + * in the documentation and/or other associated materials;
16567 + * 3. the copyright holder's name is not used to endorse products
16568 + * built using this software without specific written permission.
16570 + * ALTERNATIVELY, provided that this notice is retained in full, this product
16571 + * may be distributed under the terms of the GNU General Public License (GPL),
16572 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
16576 + * This software is provided 'as is' with no explicit or implied warranties
16577 + * in respect of its properties, including, but not limited to, correctness
16578 + * and/or fitness for purpose.
16579 + * ---------------------------------------------------------------------------
16582 +#ifndef AUTOCONF_INCLUDED
16583 +#include <linux/config.h>
16585 +#include <linux/module.h>
16586 +#include <linux/init.h>
16587 +#include <linux/list.h>
16588 +#include <linux/slab.h>
16589 +#include <linux/sched.h>
16590 +#include <linux/wait.h>
16591 +#include <linux/crypto.h>
16592 +#include <linux/mm.h>
16593 +#include <linux/skbuff.h>
16594 +#include <linux/random.h>
16595 +#include <asm/scatterlist.h>
16597 +#include <cryptodev.h>
16601 + softc_device_decl sc_dev;
16604 +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
16606 +/* Software session entry */
16608 +#define SW_TYPE_CIPHER 0
16609 +#define SW_TYPE_HMAC 1
16610 +#define SW_TYPE_AUTH2 2
16611 +#define SW_TYPE_HASH 3
16612 +#define SW_TYPE_COMP 4
16613 +#define SW_TYPE_BLKCIPHER 5
16615 +struct swcr_data {
16618 + struct crypto_tfm *sw_tfm;
16625 + void *sw_comp_buf;
16627 + struct swcr_data *sw_next;
16630 +#ifndef CRYPTO_TFM_MODE_CBC
16632 + * As of linux-2.6.21 this is no longer defined, and presumably no longer
16633 + * needed to be passed into the crypto core code.
16635 +#define CRYPTO_TFM_MODE_CBC 0
16636 +#define CRYPTO_TFM_MODE_ECB 0
16639 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
16641 + * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
16642 + * API into old API.
16645 + /* Symmetric/Block Cipher */
16646 + struct blkcipher_desc
16648 + struct crypto_tfm *tfm;
16651 + #define ecb(X) #X
16652 + #define cbc(X) #X
16653 + #define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
16654 + #define crypto_blkcipher_cast(X) X
16655 + #define crypto_blkcipher_tfm(X) X
16656 + #define crypto_alloc_blkcipher(X, Y, Z) crypto_alloc_tfm(X, mode)
16657 + #define crypto_blkcipher_ivsize(X) crypto_tfm_alg_ivsize(X)
16658 + #define crypto_blkcipher_blocksize(X) crypto_tfm_alg_blocksize(X)
16659 + #define crypto_blkcipher_setkey(X, Y, Z) crypto_cipher_setkey(X, Y, Z)
16660 + #define crypto_blkcipher_encrypt_iv(W, X, Y, Z) \
16661 + crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
16662 + #define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
16663 + crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
16665 + /* Hash/HMAC/Digest */
16668 + struct crypto_tfm *tfm;
16670 + #define hmac(X) #X
16671 + #define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
16672 + #define crypto_hash_cast(X) X
16673 + #define crypto_hash_tfm(X) X
16674 + #define crypto_alloc_hash(X, Y, Z) crypto_alloc_tfm(X, mode)
16675 + #define crypto_hash_digestsize(X) crypto_tfm_alg_digestsize(X)
16676 + #define crypto_hash_digest(W, X, Y, Z) \
16677 + crypto_digest_digest((W)->tfm, X, sg_num, Z)
16679 + /* Asymmetric Cipher */
16680 + #define crypto_has_cipher(X, Y, Z) crypto_alg_available(X, 0)
16682 + /* Compression */
16683 + #define crypto_has_comp(X, Y, Z) crypto_alg_available(X, 0)
16684 + #define crypto_comp_tfm(X) X
16685 + #define crypto_comp_cast(X) X
16686 + #define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
16688 + #define ecb(X) "ecb(" #X ")"
16689 + #define cbc(X) "cbc(" #X ")"
16690 + #define hmac(X) "hmac(" #X ")"
16691 +#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
16693 +struct crypto_details
16701 + * This needs to be kept updated with CRYPTO_xxx list (cryptodev.h).
16702 + * If the Algorithm is not supported, then insert a {NULL, 0, 0} entry.
16704 + * IMPORTANT: The index to the array IS CRYPTO_xxx.
16706 +static struct crypto_details crypto_details[CRYPTO_ALGORITHM_MAX + 1] = {
16708 + /* CRYPTO_xxx index starts at 1 */
16709 + { cbc(des), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16710 + { cbc(des3_ede), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16711 + { cbc(blowfish), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16712 + { cbc(cast5), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16713 + { cbc(skipjack), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16714 + { hmac(md5), 0, SW_TYPE_HMAC },
16715 + { hmac(sha1), 0, SW_TYPE_HMAC },
16716 + { hmac(ripemd160), 0, SW_TYPE_HMAC },
16717 + { "md5-kpdk??", 0, SW_TYPE_HASH },
16718 + { "sha1-kpdk??", 0, SW_TYPE_HASH },
16719 + { cbc(aes), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16720 + { ecb(arc4), CRYPTO_TFM_MODE_ECB, SW_TYPE_BLKCIPHER },
16721 + { "md5", 0, SW_TYPE_HASH },
16722 + { "sha1", 0, SW_TYPE_HASH },
16723 + { hmac(digest_null), 0, SW_TYPE_HMAC },
16724 + { cbc(cipher_null), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16725 + { "deflate", 0, SW_TYPE_COMP },
16726 + { hmac(sha256), 0, SW_TYPE_HMAC },
16727 + { hmac(sha384), 0, SW_TYPE_HMAC },
16728 + { hmac(sha512), 0, SW_TYPE_HMAC },
16729 + { cbc(camellia), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
16730 + { "sha256", 0, SW_TYPE_HASH },
16731 + { "sha384", 0, SW_TYPE_HASH },
16732 + { "sha512", 0, SW_TYPE_HASH },
16733 + { "ripemd160", 0, SW_TYPE_HASH },
16736 +int32_t swcr_id = -1;
16737 +module_param(swcr_id, int, 0444);
16738 +MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
16740 +int swcr_fail_if_compression_grows = 1;
16741 +module_param(swcr_fail_if_compression_grows, int, 0644);
16742 +MODULE_PARM_DESC(swcr_fail_if_compression_grows,
16743 + "Treat compression that results in more data as a failure");
16745 +static struct swcr_data **swcr_sessions = NULL;
16746 +static u_int32_t swcr_sesnum = 0;
16748 +static int swcr_process(device_t, struct cryptop *, int);
16749 +static int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
16750 +static int swcr_freesession(device_t, u_int64_t);
16752 +static device_method_t swcr_methods = {
16753 + /* crypto device methods */
16754 + DEVMETHOD(cryptodev_newsession, swcr_newsession),
16755 + DEVMETHOD(cryptodev_freesession,swcr_freesession),
16756 + DEVMETHOD(cryptodev_process, swcr_process),
16759 +#define debug swcr_debug
16760 +int swcr_debug = 0;
16761 +module_param(swcr_debug, int, 0644);
16762 +MODULE_PARM_DESC(swcr_debug, "Enable debug");
16765 + * Generate a new software session.
16768 +swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
16770 + struct swcr_data **swd;
16774 + int mode, sw_type;
16776 + dprintk("%s()\n", __FUNCTION__);
16777 + if (sid == NULL || cri == NULL) {
16778 + dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
16782 + if (swcr_sessions) {
16783 + for (i = 1; i < swcr_sesnum; i++)
16784 + if (swcr_sessions[i] == NULL)
16787 + i = 1; /* NB: to silence compiler warning */
16789 + if (swcr_sessions == NULL || i == swcr_sesnum) {
16790 + if (swcr_sessions == NULL) {
16791 + i = 1; /* We leave swcr_sessions[0] empty */
16792 + swcr_sesnum = CRYPTO_SW_SESSIONS;
16794 + swcr_sesnum *= 2;
16796 + swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
16797 + if (swd == NULL) {
16798 + /* Reset session number */
16799 + if (swcr_sesnum == CRYPTO_SW_SESSIONS)
16802 + swcr_sesnum /= 2;
16803 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16806 + memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
16808 + /* Copy existing sessions */
16809 + if (swcr_sessions) {
16810 + memcpy(swd, swcr_sessions,
16811 + (swcr_sesnum / 2) * sizeof(struct swcr_data *));
16812 + kfree(swcr_sessions);
16815 + swcr_sessions = swd;
16818 + swd = &swcr_sessions[i];
16822 + *swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
16824 + if (*swd == NULL) {
16825 + swcr_freesession(NULL, i);
16826 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16829 + memset(*swd, 0, sizeof(struct swcr_data));
16831 + if (cri->cri_alg > CRYPTO_ALGORITHM_MAX) {
16832 + printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
16833 + swcr_freesession(NULL, i);
16837 + algo = crypto_details[cri->cri_alg].alg_name;
16838 + if (!algo || !*algo) {
16839 + printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
16840 + swcr_freesession(NULL, i);
16844 + mode = crypto_details[cri->cri_alg].mode;
16845 + sw_type = crypto_details[cri->cri_alg].sw_type;
16847 + /* Algorithm specific configuration */
16848 + switch (cri->cri_alg) {
16849 + case CRYPTO_NULL_CBC:
16850 + cri->cri_klen = 0; /* make it work with crypto API */
16856 + if (sw_type == SW_TYPE_BLKCIPHER) {
16857 + dprintk("%s crypto_alloc_blkcipher(%s, 0x%x)\n", __FUNCTION__,
16860 + (*swd)->sw_tfm = crypto_blkcipher_tfm(
16861 + crypto_alloc_blkcipher(algo, 0,
16862 + CRYPTO_ALG_ASYNC));
16863 + if (!(*swd)->sw_tfm) {
16864 + dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s,0x%x)\n",
16866 + swcr_freesession(NULL, i);
16871 + dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
16872 + __FUNCTION__,cri->cri_klen,(cri->cri_klen + 7)/8);
16873 + for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
16875 + dprintk("%s0x%x", (i % 8) ? " " : "\n ",cri->cri_key[i]);
16879 + error = crypto_blkcipher_setkey(
16880 + crypto_blkcipher_cast((*swd)->sw_tfm), cri->cri_key,
16881 + (cri->cri_klen + 7) / 8);
16883 + printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
16884 + (*swd)->sw_tfm->crt_flags);
16885 + swcr_freesession(NULL, i);
16888 + } else if (sw_type == SW_TYPE_HMAC || sw_type == SW_TYPE_HASH) {
16889 + dprintk("%s crypto_alloc_hash(%s, 0x%x)\n", __FUNCTION__,
16892 + (*swd)->sw_tfm = crypto_hash_tfm(
16893 + crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
16895 + if (!(*swd)->sw_tfm) {
16896 + dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
16898 + swcr_freesession(NULL, i);
16902 + (*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
16903 + (*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
16905 + if ((*swd)->u.hmac.sw_key == NULL) {
16906 + swcr_freesession(NULL, i);
16907 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16910 + memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
16911 + if (cri->cri_mlen) {
16912 + (*swd)->u.hmac.sw_mlen = cri->cri_mlen;
16914 + (*swd)->u.hmac.sw_mlen =
16915 + crypto_hash_digestsize(
16916 + crypto_hash_cast((*swd)->sw_tfm));
16918 + } else if (sw_type == SW_TYPE_COMP) {
16919 + (*swd)->sw_tfm = crypto_comp_tfm(
16920 + crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
16921 + if (!(*swd)->sw_tfm) {
16922 + dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
16924 + swcr_freesession(NULL, i);
16927 + (*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
16928 + if ((*swd)->u.sw_comp_buf == NULL) {
16929 + swcr_freesession(NULL, i);
16930 + dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
16934 + printk("cryptosoft: Unhandled sw_type %d\n", sw_type);
16935 + swcr_freesession(NULL, i);
16939 + (*swd)->sw_alg = cri->cri_alg;
16940 + (*swd)->sw_type = sw_type;
16942 + cri = cri->cri_next;
16943 + swd = &((*swd)->sw_next);
16949 + * Free a session.
16952 +swcr_freesession(device_t dev, u_int64_t tid)
16954 + struct swcr_data *swd;
16955 + u_int32_t sid = CRYPTO_SESID2LID(tid);
16957 + dprintk("%s()\n", __FUNCTION__);
16958 + if (sid > swcr_sesnum || swcr_sessions == NULL ||
16959 + swcr_sessions[sid] == NULL) {
16960 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
16964 + /* Silently accept and return */
16968 + while ((swd = swcr_sessions[sid]) != NULL) {
16969 + swcr_sessions[sid] = swd->sw_next;
16971 + crypto_free_tfm(swd->sw_tfm);
16972 + if (swd->sw_type == SW_TYPE_COMP) {
16973 + if (swd->u.sw_comp_buf)
16974 + kfree(swd->u.sw_comp_buf);
16976 + if (swd->u.hmac.sw_key)
16977 + kfree(swd->u.hmac.sw_key);
16985 + * Process a software request.
16988 +swcr_process(device_t dev, struct cryptop *crp, int hint)
16990 + struct cryptodesc *crd;
16991 + struct swcr_data *sw;
16993 +#define SCATTERLIST_MAX 16
16994 + struct scatterlist sg[SCATTERLIST_MAX];
16995 + int sg_num, sg_len, skip;
16996 + struct sk_buff *skb = NULL;
16997 + struct uio *uiop = NULL;
16999 + dprintk("%s()\n", __FUNCTION__);
17000 + /* Sanity check */
17001 + if (crp == NULL) {
17002 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17006 + crp->crp_etype = 0;
17008 + if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
17009 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17010 + crp->crp_etype = EINVAL;
17014 + lid = crp->crp_sid & 0xffffffff;
17015 + if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
17016 + swcr_sessions[lid] == NULL) {
17017 + crp->crp_etype = ENOENT;
17018 + dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
17023 + * do some error checking outside of the loop for SKB and IOV processing
17024 + * this leaves us with valid skb or uiop pointers for later
17026 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
17027 + skb = (struct sk_buff *) crp->crp_buf;
17028 + if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
17029 + printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
17030 + skb_shinfo(skb)->nr_frags);
17033 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
17034 + uiop = (struct uio *) crp->crp_buf;
17035 + if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
17036 + printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
17037 + uiop->uio_iovcnt);
17042 + /* Go through crypto descriptors, processing as we go */
17043 + for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
17045 + * Find the crypto context.
17047 + * XXX Note that the logic here prevents us from having
17048 + * XXX the same algorithm multiple times in a session
17049 + * XXX (or rather, we can but it won't give us the right
17050 + * XXX results). To do that, we'd need some way of differentiating
17051 + * XXX between the various instances of an algorithm (so we can
17052 + * XXX locate the correct crypto context).
17054 + for (sw = swcr_sessions[lid]; sw && sw->sw_alg != crd->crd_alg;
17055 + sw = sw->sw_next)
17058 + /* No such context ? */
17059 + if (sw == NULL) {
17060 + crp->crp_etype = EINVAL;
17061 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17065 + skip = crd->crd_skip;
17068 + * setup the SG list skip from the start of the buffer
17070 + memset(sg, 0, sizeof(sg));
17071 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
17077 + if (skip < skb_headlen(skb)) {
17078 + len = skb_headlen(skb) - skip;
17079 + if (len + sg_len > crd->crd_len)
17080 + len = crd->crd_len - sg_len;
17081 + sg_set_page(&sg[sg_num],
17082 + virt_to_page(skb->data + skip), len,
17083 + offset_in_page(skb->data + skip));
17088 + skip -= skb_headlen(skb);
17090 + for (i = 0; sg_len < crd->crd_len &&
17091 + i < skb_shinfo(skb)->nr_frags &&
17092 + sg_num < SCATTERLIST_MAX; i++) {
17093 + if (skip < skb_shinfo(skb)->frags[i].size) {
17094 + len = skb_shinfo(skb)->frags[i].size - skip;
17095 + if (len + sg_len > crd->crd_len)
17096 + len = crd->crd_len - sg_len;
17097 + sg_set_page(&sg[sg_num],
17098 + skb_shinfo(skb)->frags[i].page,
17100 + skb_shinfo(skb)->frags[i].page_offset + skip);
17105 + skip -= skb_shinfo(skb)->frags[i].size;
17107 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
17111 + for (sg_num = 0; sg_len <= crd->crd_len &&
17112 + sg_num < uiop->uio_iovcnt &&
17113 + sg_num < SCATTERLIST_MAX; sg_num++) {
17114 + if (skip <= uiop->uio_iov[sg_num].iov_len) {
17115 + len = uiop->uio_iov[sg_num].iov_len - skip;
17116 + if (len + sg_len > crd->crd_len)
17117 + len = crd->crd_len - sg_len;
17118 + sg_set_page(&sg[sg_num],
17119 + virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
17121 + offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
17125 + skip -= uiop->uio_iov[sg_num].iov_len;
17128 + sg_len = (crp->crp_ilen - skip);
17129 + if (sg_len > crd->crd_len)
17130 + sg_len = crd->crd_len;
17131 + sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip),
17132 + sg_len, offset_in_page(crp->crp_buf + skip));
17137 + switch (sw->sw_type) {
17138 + case SW_TYPE_BLKCIPHER: {
17139 + unsigned char iv[EALG_MAX_BLOCK_LEN];
17140 + unsigned char *ivp = iv;
17142 + crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
17143 + struct blkcipher_desc desc;
17145 + if (sg_len < crypto_blkcipher_blocksize(
17146 + crypto_blkcipher_cast(sw->sw_tfm))) {
17147 + crp->crp_etype = EINVAL;
17148 + dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
17149 + sg_len, crypto_blkcipher_blocksize(
17150 + crypto_blkcipher_cast(sw->sw_tfm)));
17154 + if (ivsize > sizeof(iv)) {
17155 + crp->crp_etype = EINVAL;
17156 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17160 + if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
17164 + dprintk("%s key:", __FUNCTION__);
17165 + for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
17166 + dprintk("%s0x%x", (i % 8) ? " " : "\n ",
17167 + crd->crd_key[i]);
17170 + error = crypto_blkcipher_setkey(
17171 + crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
17172 + (crd->crd_klen + 7) / 8);
17174 + dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
17175 + error, sw->sw_tfm->crt_flags);
17176 + crp->crp_etype = -error;
17180 + memset(&desc, 0, sizeof(desc));
17181 + desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
17183 + if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
17185 + if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
17186 + ivp = crd->crd_iv;
17188 + get_random_bytes(ivp, ivsize);
17191 + * do we have to copy the IV back to the buffer ?
17193 + if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
17194 + crypto_copyback(crp->crp_flags, crp->crp_buf,
17195 + crd->crd_inject, ivsize, (caddr_t)ivp);
17198 + crypto_blkcipher_encrypt_iv(&desc, sg, sg, sg_len);
17200 + } else { /*decrypt */
17202 + if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
17203 + ivp = crd->crd_iv;
17205 + crypto_copydata(crp->crp_flags, crp->crp_buf,
17206 + crd->crd_inject, ivsize, (caddr_t)ivp);
17209 + crypto_blkcipher_decrypt_iv(&desc, sg, sg, sg_len);
17212 + case SW_TYPE_HMAC:
17213 + case SW_TYPE_HASH:
17215 + char result[HASH_MAX_LEN];
17216 + struct hash_desc desc;
17218 + /* check we have room for the result */
17219 + if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
17221 + "cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d digestsize=%d\n",
17222 + crp->crp_ilen, crd->crd_skip + sg_len, crd->crd_inject,
17223 + sw->u.hmac.sw_mlen);
17224 + crp->crp_etype = EINVAL;
17228 + memset(&desc, 0, sizeof(desc));
17229 + desc.tfm = crypto_hash_cast(sw->sw_tfm);
17231 + memset(result, 0, sizeof(result));
17233 + if (sw->sw_type == SW_TYPE_HMAC) {
17234 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
17235 + crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
17236 + sg, sg_num, result);
17238 + crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
17239 + sw->u.hmac.sw_klen);
17240 + crypto_hash_digest(&desc, sg, sg_len, result);
17241 +#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
17243 + } else { /* SW_TYPE_HASH */
17244 + crypto_hash_digest(&desc, sg, sg_len, result);
17247 + crypto_copyback(crp->crp_flags, crp->crp_buf,
17248 + crd->crd_inject, sw->u.hmac.sw_mlen, result);
17252 + case SW_TYPE_COMP: {
17253 + void *ibuf = NULL;
17254 + void *obuf = sw->u.sw_comp_buf;
17255 + int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
17259 + * we need to use an additional copy if there is more than one
17260 + * input chunk since the kernel comp routines do not handle
17261 + * SG yet. Otherwise we just use the input buffer as is.
17262 + * Rather than allocate another buffer we just split the tmp
17263 + * buffer we already have.
17264 + * Perhaps we should just use zlib directly ?
17266 + if (sg_num > 1) {
17270 + for (blk = 0; blk < sg_num; blk++) {
17271 + memcpy(obuf, sg_virt(&sg[blk]),
17273 + obuf += sg[blk].length;
17277 + ibuf = sg_virt(&sg[0]);
17279 + if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
17280 + ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
17281 + ibuf, ilen, obuf, &olen);
17282 + if (!ret && olen > crd->crd_len) {
17283 + dprintk("cryptosoft: ERANGE compress %d into %d\n",
17284 + crd->crd_len, olen);
17285 + if (swcr_fail_if_compression_grows)
17288 + } else { /* decompress */
17289 + ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
17290 + ibuf, ilen, obuf, &olen);
17291 + if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
17292 + dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
17293 + "space for %d,at offset %d\n",
17294 + crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
17299 + dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
17302 + * on success copy result back,
17303 + * linux crpyto API returns -errno, we need to fix that
17305 + crp->crp_etype = ret < 0 ? -ret : ret;
17307 + /* copy back the result and return it's size */
17308 + crypto_copyback(crp->crp_flags, crp->crp_buf,
17309 + crd->crd_inject, olen, obuf);
17310 + crp->crp_olen = olen;
17317 + /* Unknown/unsupported algorithm */
17318 + dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
17319 + crp->crp_etype = EINVAL;
17325 + crypto_done(crp);
17330 +cryptosoft_init(void)
17332 + int i, sw_type, mode;
17335 + dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
17337 + softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
17339 + swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
17340 + CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
17341 + if (swcr_id < 0) {
17342 + printk("Software crypto device cannot initialize!");
17346 +#define REGISTER(alg) \
17347 + crypto_register(swcr_id, alg, 0,0);
17349 + for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
17352 + algo = crypto_details[i].alg_name;
17353 + if (!algo || !*algo)
17355 + dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
17359 + mode = crypto_details[i].mode;
17360 + sw_type = crypto_details[i].sw_type;
17364 + case SW_TYPE_CIPHER:
17365 + if (crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC))
17371 + dprintk("%s:CIPHER algorithm %d:'%s' not supported\n",
17372 + __FUNCTION__, i, algo);
17375 + case SW_TYPE_HMAC:
17376 + if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
17382 + dprintk("%s:HMAC algorithm %d:'%s' not supported\n",
17383 + __FUNCTION__, i, algo);
17386 + case SW_TYPE_HASH:
17387 + if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
17393 + dprintk("%s:HASH algorithm %d:'%s' not supported\n",
17394 + __FUNCTION__, i, algo);
17397 + case SW_TYPE_COMP:
17398 + if (crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC))
17404 + dprintk("%s:COMP algorithm %d:'%s' not supported\n",
17405 + __FUNCTION__, i, algo);
17408 + case SW_TYPE_BLKCIPHER:
17409 + if (crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC))
17415 + dprintk("%s:BLKCIPHER algorithm %d:'%s' not supported\n",
17416 + __FUNCTION__, i, algo);
17421 + "%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
17422 + __FUNCTION__, sw_type, i, algo);
17431 +cryptosoft_exit(void)
17433 + dprintk("%s()\n", __FUNCTION__);
17434 + crypto_unregister_all(swcr_id);
17438 +module_init(cryptosoft_init);
17439 +module_exit(cryptosoft_exit);
17441 +MODULE_LICENSE("Dual BSD/GPL");
17442 +MODULE_AUTHOR("David McCullough <david_mccullough@securecomputing.com>");
17443 +MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
17444 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
17445 +++ linux/crypto/ocf/rndtest.c 2007-07-20 13:22:03.000000000 +1000
17450 + * OCF/Linux port done by David McCullough <david_mccullough@securecomputing.com>
17451 + * Copyright (C) 2006-2007 David McCullough
17452 + * Copyright (C) 2004-2005 Intel Corporation.
17453 + * The license and original author are listed below.
17455 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17456 + * All rights reserved.
17458 + * Redistribution and use in source and binary forms, with or without
17459 + * modification, are permitted provided that the following conditions
17461 + * 1. Redistributions of source code must retain the above copyright
17462 + * notice, this list of conditions and the following disclaimer.
17463 + * 2. Redistributions in binary form must reproduce the above copyright
17464 + * notice, this list of conditions and the following disclaimer in the
17465 + * documentation and/or other materials provided with the distribution.
17466 + * 3. All advertising materials mentioning features or use of this software
17467 + * must display the following acknowledgement:
17468 + * This product includes software developed by Jason L. Wright
17469 + * 4. The name of the author may not be used to endorse or promote products
17470 + * derived from this software without specific prior written permission.
17472 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17473 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17474 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17475 + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17476 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17477 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17478 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17479 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17480 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17481 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17482 + * POSSIBILITY OF SUCH DAMAGE.
17485 +#ifndef AUTOCONF_INCLUDED
17486 +#include <linux/config.h>
17488 +#include <linux/module.h>
17489 +#include <linux/list.h>
17490 +#include <linux/wait.h>
17491 +#include <linux/time.h>
17492 +#include <linux/version.h>
17493 +#include <linux/unistd.h>
17494 +#include <linux/kernel.h>
17495 +#include <linux/string.h>
17496 +#include <linux/time.h>
17497 +#include <cryptodev.h>
17498 +#include "rndtest.h"
17500 +static struct rndtest_stats rndstats;
17502 +static void rndtest_test(struct rndtest_state *);
17504 +/* The tests themselves */
17505 +static int rndtest_monobit(struct rndtest_state *);
17506 +static int rndtest_runs(struct rndtest_state *);
17507 +static int rndtest_longruns(struct rndtest_state *);
17508 +static int rndtest_chi_4(struct rndtest_state *);
17510 +static int rndtest_runs_check(struct rndtest_state *, int, int *);
17511 +static void rndtest_runs_record(struct rndtest_state *, int, int *);
17513 +static const struct rndtest_testfunc {
17514 + int (*test)(struct rndtest_state *);
17515 +} rndtest_funcs[] = {
17516 + { rndtest_monobit },
17517 + { rndtest_runs },
17518 + { rndtest_chi_4 },
17519 + { rndtest_longruns },
17522 +#define RNDTEST_NTESTS (sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
17525 +rndtest_test(struct rndtest_state *rsp)
17529 + rndstats.rst_tests++;
17530 + for (i = 0; i < RNDTEST_NTESTS; i++)
17531 + rv |= (*rndtest_funcs[i].test)(rsp);
17532 + rsp->rs_discard = (rv != 0);
17536 +extern int crypto_debug;
17537 +#define rndtest_verbose 2
17538 +#define rndtest_report(rsp, failure, fmt, a...) \
17539 + { if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
17541 +#define RNDTEST_MONOBIT_MINONES 9725
17542 +#define RNDTEST_MONOBIT_MAXONES 10275
17545 +rndtest_monobit(struct rndtest_state *rsp)
17547 + int i, ones = 0, j;
17550 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17551 + r = rsp->rs_buf[i];
17552 + for (j = 0; j < 8; j++, r <<= 1)
17556 + if (ones > RNDTEST_MONOBIT_MINONES &&
17557 + ones < RNDTEST_MONOBIT_MAXONES) {
17558 + if (rndtest_verbose > 1)
17559 + rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
17560 + RNDTEST_MONOBIT_MINONES, ones,
17561 + RNDTEST_MONOBIT_MAXONES);
17564 + if (rndtest_verbose)
17565 + rndtest_report(rsp, 1,
17566 + "monobit failed (%d ones)", ones);
17567 + rndstats.rst_monobit++;
17572 +#define RNDTEST_RUNS_NINTERVAL 6
17574 +static const struct rndtest_runs_tabs {
17575 + u_int16_t min, max;
17576 +} rndtest_runs_tab[] = {
17586 +rndtest_runs(struct rndtest_state *rsp)
17588 + int i, j, ones, zeros, rv = 0;
17589 + int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
17592 + bzero(onei, sizeof(onei));
17593 + bzero(zeroi, sizeof(zeroi));
17594 + ones = zeros = 0;
17595 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17596 + c = rsp->rs_buf[i];
17597 + for (j = 0; j < 8; j++, c <<= 1) {
17600 + rndtest_runs_record(rsp, zeros, zeroi);
17604 + rndtest_runs_record(rsp, ones, onei);
17609 + rndtest_runs_record(rsp, ones, onei);
17610 + rndtest_runs_record(rsp, zeros, zeroi);
17612 + rv |= rndtest_runs_check(rsp, 0, zeroi);
17613 + rv |= rndtest_runs_check(rsp, 1, onei);
17616 + rndstats.rst_runs++;
17622 +rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
17626 + if (len > RNDTEST_RUNS_NINTERVAL)
17627 + len = RNDTEST_RUNS_NINTERVAL;
17633 +rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
17637 + for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
17638 + if (src[i] < rndtest_runs_tab[i].min ||
17639 + src[i] > rndtest_runs_tab[i].max) {
17640 + rndtest_report(rsp, 1,
17641 + "%s interval %d failed (%d, %d-%d)",
17642 + val ? "ones" : "zeros",
17643 + i + 1, src[i], rndtest_runs_tab[i].min,
17644 + rndtest_runs_tab[i].max);
17647 + rndtest_report(rsp, 0,
17648 + "runs pass %s interval %d (%d < %d < %d)",
17649 + val ? "ones" : "zeros",
17650 + i + 1, rndtest_runs_tab[i].min, src[i],
17651 + rndtest_runs_tab[i].max);
17658 +rndtest_longruns(struct rndtest_state *rsp)
17660 + int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
17663 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17664 + c = rsp->rs_buf[i];
17665 + for (j = 0; j < 8; j++, c <<= 1) {
17669 + if (ones > maxones)
17674 + if (zeros > maxzeros)
17675 + maxzeros = zeros;
17680 + if (maxones < 26 && maxzeros < 26) {
17681 + rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
17682 + maxones, maxzeros);
17685 + rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
17686 + maxones, maxzeros);
17687 + rndstats.rst_longruns++;
17693 + * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
17694 + * but it is really the chi^2 test over 4 bits (the poker test as described
17695 + * by Knuth vol 2 is something different, and I take him as authoritative
17696 + * on nomenclature over NIST).
17698 +#define RNDTEST_CHI4_K 16
17699 +#define RNDTEST_CHI4_K_MASK (RNDTEST_CHI4_K - 1)
17702 + * The unnormalized values are used so that we don't have to worry about
17703 + * fractional precision. The "real" value is found by:
17704 + * (V - 1562500) * (16 / 5000) = Vn (where V is the unnormalized value)
17706 +#define RNDTEST_CHI4_VMIN 1563181 /* 2.1792 */
17707 +#define RNDTEST_CHI4_VMAX 1576929 /* 46.1728 */
17710 +rndtest_chi_4(struct rndtest_state *rsp)
17712 + unsigned int freq[RNDTEST_CHI4_K], i, sum;
17714 + for (i = 0; i < RNDTEST_CHI4_K; i++)
17717 + /* Get number of occurances of each 4 bit pattern */
17718 + for (i = 0; i < RNDTEST_NBYTES; i++) {
17719 + freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
17720 + freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
17723 + for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
17724 + sum += freq[i] * freq[i];
17726 + if (sum >= 1563181 && sum <= 1576929) {
17727 + rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
17730 + rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
17731 + rndstats.rst_chi++;
17737 +rndtest_buf(unsigned char *buf)
17739 + struct rndtest_state rsp;
17741 + memset(&rsp, 0, sizeof(rsp));
17742 + rsp.rs_buf = buf;
17743 + rndtest_test(&rsp);
17744 + return(rsp.rs_discard);
17747 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
17748 +++ linux/crypto/ocf/rndtest.h 2005-05-20 10:28:26.000000000 +1000
17750 +/* $FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $ */
17754 + * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
17755 + * All rights reserved.
17757 + * Redistribution and use in source and binary forms, with or without
17758 + * modification, are permitted provided that the following conditions
17760 + * 1. Redistributions of source code must retain the above copyright
17761 + * notice, this list of conditions and the following disclaimer.
17762 + * 2. Redistributions in binary form must reproduce the above copyright
17763 + * notice, this list of conditions and the following disclaimer in the
17764 + * documentation and/or other materials provided with the distribution.
17765 + * 3. All advertising materials mentioning features or use of this software
17766 + * must display the following acknowledgement:
17767 + * This product includes software developed by Jason L. Wright
17768 + * 4. The name of the author may not be used to endorse or promote products
17769 + * derived from this software without specific prior written permission.
17771 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17772 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17773 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17774 + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
17775 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
17776 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17777 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
17778 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
17779 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
17780 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
17781 + * POSSIBILITY OF SUCH DAMAGE.
17785 +/* Some of the tests depend on these values */
17786 +#define RNDTEST_NBYTES 2500
17787 +#define RNDTEST_NBITS (8 * RNDTEST_NBYTES)
17789 +struct rndtest_state {
17790 + int rs_discard; /* discard/accept random data */
17791 + u_int8_t *rs_buf;
17794 +struct rndtest_stats {
17795 + u_int32_t rst_discard; /* number of bytes discarded */
17796 + u_int32_t rst_tests; /* number of test runs */
17797 + u_int32_t rst_monobit; /* monobit test failures */
17798 + u_int32_t rst_runs; /* 0/1 runs failures */
17799 + u_int32_t rst_longruns; /* longruns failures */
17800 + u_int32_t rst_chi; /* chi^2 failures */
17803 +extern int rndtest_buf(unsigned char *buf);
17804 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
17805 +++ linux/crypto/ocf/ocf-compat.h 2008-04-27 09:30:47.000000000 +1000
17807 +#ifndef _BSD_COMPAT_H_
17808 +#define _BSD_COMPAT_H_ 1
17809 +/****************************************************************************/
17811 + * Provide compat routines for older linux kernels and BSD kernels
17813 + * Written by David McCullough <david_mccullough@securecomputing.com>
17814 + * Copyright (C) 2007 David McCullough <david_mccullough@securecomputing.com>
17818 + * The free distribution and use of this software in both source and binary
17819 + * form is allowed (with or without changes) provided that:
17821 + * 1. distributions of this source code include the above copyright
17822 + * notice, this list of conditions and the following disclaimer;
17824 + * 2. distributions in binary form include the above copyright
17825 + * notice, this list of conditions and the following disclaimer
17826 + * in the documentation and/or other associated materials;
17828 + * 3. the copyright holder's name is not used to endorse products
17829 + * built using this software without specific written permission.
17831 + * ALTERNATIVELY, provided that this notice is retained in full, this file
17832 + * may be distributed under the terms of the GNU General Public License (GPL),
17833 + * in which case the provisions of the GPL apply INSTEAD OF those given above.
17837 + * This software is provided 'as is' with no explicit or implied warranties
17838 + * in respect of its properties, including, but not limited to, correctness
17839 + * and/or fitness for purpose.
17841 +/****************************************************************************/
17844 + * fake some BSD driver interface stuff specifically for OCF use
17847 +typedef struct ocf_device *device_t;
17850 + int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
17851 + int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
17852 + int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
17853 + int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
17854 +} device_method_t;
17855 +#define DEVMETHOD(id, func) id: func
17857 +struct ocf_device {
17858 + char name[32]; /* the driver name */
17859 + char nameunit[32]; /* the driver name + HW instance */
17861 + device_method_t methods;
17865 +#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
17866 + ((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
17867 +#define CRYPTODEV_FREESESSION(dev, sid) \
17868 + ((*(dev)->methods.cryptodev_freesession)(dev, sid))
17869 +#define CRYPTODEV_PROCESS(dev, crp, hint) \
17870 + ((*(dev)->methods.cryptodev_process)(dev, crp, hint))
17871 +#define CRYPTODEV_KPROCESS(dev, krp, hint) \
17872 + ((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
17874 +#define device_get_name(dev) ((dev)->name)
17875 +#define device_get_nameunit(dev) ((dev)->nameunit)
17876 +#define device_get_unit(dev) ((dev)->unit)
17877 +#define device_get_softc(dev) ((dev)->softc)
17879 +#define softc_device_decl \
17880 + struct ocf_device _device; \
17883 +#define softc_device_init(_sc, _name, _unit, _methods) \
17885 + strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
17886 + snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
17887 + (_sc)->_device.unit = _unit; \
17888 + (_sc)->_device.methods = _methods; \
17889 + (_sc)->_device.softc = (void *) _sc; \
17890 + *(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
17893 +#define softc_get_device(_sc) (&(_sc)->_device)
17896 + * iomem support for 2.4 and 2.6 kernels
17898 +#include <linux/version.h>
17899 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17900 +#define ocf_iomem_t unsigned long
17903 + * implement simple workqueue like support for older kernels
17906 +#include <linux/tqueue.h>
17908 +#define work_struct tq_struct
17910 +#define INIT_WORK(wp, fp, ap) \
17912 + (wp)->sync = 0; \
17913 + (wp)->routine = (fp); \
17914 + (wp)->data = (ap); \
17917 +#define schedule_work(wp) \
17919 + queue_task((wp), &tq_immediate); \
17920 + mark_bh(IMMEDIATE_BH); \
17923 +#define flush_scheduled_work() run_task_queue(&tq_immediate)
17926 +#define ocf_iomem_t void __iomem *
17928 +#include <linux/workqueue.h>
17932 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
17933 +#define files_fdtable(files) (files)
17936 +#ifdef MODULE_PARM
17937 +#undef module_param /* just in case */
17938 +#define module_param(a,b,c) MODULE_PARM(a,"i")
17941 +#define bzero(s,l) memset(s,0,l)
17942 +#define bcopy(s,d,l) memcpy(d,s,l)
17943 +#define bcmp(x, y, l) memcmp(x,y,l)
17945 +#define MIN(x,y) ((x) < (y) ? (x) : (y))
17947 +#define device_printf(dev, a...) ({ \
17948 + printk("%s: ", device_get_nameunit(dev)); printk(a); \
17952 +#define printf(fmt...) printk(fmt)
17954 +#define KASSERT(c,p) if (!(c)) { printk p ; } else
17956 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17957 +#define ocf_daemonize(str) \
17959 + spin_lock_irq(¤t->sigmask_lock); \
17960 + sigemptyset(¤t->blocked); \
17961 + recalc_sigpending(current); \
17962 + spin_unlock_irq(¤t->sigmask_lock); \
17963 + sprintf(current->comm, str);
17965 +#define ocf_daemonize(str) daemonize(str);
17968 +#define TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
17969 +#define TAILQ_EMPTY(q) list_empty(q)
17970 +#define TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
17972 +#define read_random(p,l) get_random_bytes(p,l)
17974 +#define DELAY(x) ((x) > 2000 ? mdelay((x)/1000) : udelay(x))
17975 +#define strtoul simple_strtoul
17977 +#define pci_get_vendor(dev) ((dev)->vendor)
17978 +#define pci_get_device(dev) ((dev)->device)
17980 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
17981 +#define pci_set_consistent_dma_mask(dev, mask) (0)
17983 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
17984 +#define pci_dma_sync_single_for_cpu pci_dma_sync_single
17987 +#ifndef DMA_32BIT_MASK
17988 +#define DMA_32BIT_MASK 0x00000000ffffffffULL
17991 +#define htole32(x) cpu_to_le32(x)
17992 +#define htobe32(x) cpu_to_be32(x)
17993 +#define htole16(x) cpu_to_le16(x)
17994 +#define htobe16(x) cpu_to_be16(x)
17996 +/* older kernels don't have these */
18000 +#define IRQ_HANDLED
18001 +#define irqreturn_t void
18003 +#ifndef IRQF_SHARED
18004 +#define IRQF_SHARED SA_SHIRQ
18007 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
18008 +# define strlcpy(dest,src,len) \
18009 + ({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
18013 +#define MAX_ERRNO 4095
18015 +#ifndef IS_ERR_VALUE
18016 +#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
18020 + * common debug for all
18023 +#define dprintk(a...) do { if (debug) printk(a); } while(0)
18025 +#define dprintk(a...)
18028 +#ifndef SLAB_ATOMIC
18029 +/* Changed in 2.6.20, must use GFP_ATOMIC now */
18030 +#define SLAB_ATOMIC GFP_ATOMIC
18034 + * need some additional support for older kernels */
18035 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
18036 +#define pci_register_driver_compat(driver, rc) \
18038 + if ((rc) > 0) { \
18040 + } else if (rc == 0) { \
18041 + (rc) = -ENODEV; \
18043 + pci_unregister_driver(driver); \
18046 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
18047 +#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
18049 +#define pci_register_driver_compat(driver,rc)
18052 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
18054 +#include <asm/scatterlist.h>
18056 +static inline void sg_set_page(struct scatterlist *sg, struct page *page,
18057 + unsigned int len, unsigned int offset)
18060 + sg->offset = offset;
18061 + sg->length = len;
18064 +static inline void *sg_virt(struct scatterlist *sg)
18066 + return page_address(sg->page) + sg->offset;
18071 +#endif /* __KERNEL__ */
18073 +/****************************************************************************/
18074 +#endif /* _BSD_COMPAT_H_ */
18075 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
18076 +++ linux/crypto/ocf/pasemi/pasemi.c 2007-12-12 11:36:18.000000000 +1000
18079 + * Copyright (C) 2007 PA Semi, Inc
18081 + * Driver for the PA Semi PWRficient DMA Crypto Engine
18083 + * This program is free software; you can redistribute it and/or modify
18084 + * it under the terms of the GNU General Public License version 2 as
18085 + * published by the Free Software Foundation.
18087 + * This program is distributed in the hope that it will be useful,
18088 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
18089 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18090 + * GNU General Public License for more details.
18092 + * You should have received a copy of the GNU General Public License
18093 + * along with this program; if not, write to the Free Software
18094 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18097 +#ifndef AUTOCONF_INCLUDED
18098 +#include <linux/config.h>
18100 +#include <linux/module.h>
18101 +#include <linux/init.h>
18102 +#include <linux/interrupt.h>
18103 +#include <linux/timer.h>
18104 +#include <linux/random.h>
18105 +#include <linux/skbuff.h>
18106 +#include <asm/scatterlist.h>
18107 +#include <linux/moduleparam.h>
18108 +#include <linux/pci.h>
18109 +#include <cryptodev.h>
18111 +#include "pasemi_fnu.h"
18113 +#define DRV_NAME "pasemi"
18115 +#define TIMER_INTERVAL 1000
18117 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
18118 +static struct pasdma_status volatile * dma_status;
18121 +module_param(debug, int, 0644);
18122 +MODULE_PARM_DESC(debug, "Enable debug");
18124 +static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
18126 + desc->postop = 0;
18127 + desc->quad[0] = hdr;
18128 + desc->quad_cnt = 1;
18132 +static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
18134 + desc->quad[desc->quad_cnt++] = val;
18135 + desc->size = (desc->quad_cnt + 1) / 2;
18138 +static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
18140 + desc->quad[0] |= hdr;
18143 +static int pasemi_desc_size(struct pasemi_desc *desc)
18145 + return desc->size;
18148 +static void pasemi_ring_add_desc(
18149 + struct pasemi_fnu_txring *ring,
18150 + struct pasemi_desc *desc,
18151 + struct cryptop *crp) {
18153 + int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
18155 + TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
18156 + TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
18157 + TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
18159 + for (i = 0; i < desc->quad_cnt; i += 2) {
18160 + ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
18161 + ring->desc[ring_index] = desc->quad[i];
18162 + ring->desc[ring_index + 1] = desc->quad[i + 1];
18163 + ring->next_to_fill++;
18166 + if (desc->quad_cnt & 1)
18167 + ring->desc[ring_index + 1] = 0;
18170 +static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
18172 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
18177 + * Generate a new software session.
18180 +pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
18182 + struct cryptoini *c, *encini = NULL, *macini = NULL;
18183 + struct pasemi_softc *sc = device_get_softc(dev);
18184 + struct pasemi_session *ses = NULL, **sespp;
18185 + int sesn, blksz = 0;
18187 + unsigned long flags;
18188 + struct pasemi_desc init_desc;
18189 + struct pasemi_fnu_txring *txring;
18191 + DPRINTF("%s()\n", __FUNCTION__);
18192 + if (sidp == NULL || cri == NULL || sc == NULL) {
18193 + DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
18196 + for (c = cri; c != NULL; c = c->cri_next) {
18197 + if (ALG_IS_SIG(c->cri_alg)) {
18201 + } else if (ALG_IS_CIPHER(c->cri_alg)) {
18206 + DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
18210 + if (encini == NULL && macini == NULL)
18213 + /* validate key length */
18214 + switch (encini->cri_alg) {
18215 + case CRYPTO_DES_CBC:
18216 + if (encini->cri_klen != 64)
18218 + ccmd = DMA_CALGO_DES;
18220 + case CRYPTO_3DES_CBC:
18221 + if (encini->cri_klen != 192)
18223 + ccmd = DMA_CALGO_3DES;
18225 + case CRYPTO_AES_CBC:
18226 + if (encini->cri_klen != 128 &&
18227 + encini->cri_klen != 192 &&
18228 + encini->cri_klen != 256)
18230 + ccmd = DMA_CALGO_AES;
18232 + case CRYPTO_ARC4:
18233 + if (encini->cri_klen != 128)
18235 + ccmd = DMA_CALGO_ARC;
18238 + DPRINTF("UNKNOWN encini->cri_alg %d\n",
18239 + encini->cri_alg);
18245 + switch (macini->cri_alg) {
18247 + case CRYPTO_MD5_HMAC:
18250 + case CRYPTO_SHA1:
18251 + case CRYPTO_SHA1_HMAC:
18255 + DPRINTF("UNKNOWN macini->cri_alg %d\n",
18256 + macini->cri_alg);
18259 + if (((macini->cri_klen + 7) / 8) > blksz) {
18260 + DPRINTF("key length %d bigger than blksize %d not supported\n",
18261 + ((macini->cri_klen + 7) / 8), blksz);
18266 + for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
18267 + if (sc->sc_sessions[sesn] == NULL) {
18268 + sc->sc_sessions[sesn] = (struct pasemi_session *)
18269 + kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
18270 + ses = sc->sc_sessions[sesn];
18272 + } else if (sc->sc_sessions[sesn]->used == 0) {
18273 + ses = sc->sc_sessions[sesn];
18278 + if (ses == NULL) {
18279 + sespp = (struct pasemi_session **)
18280 + kzalloc(sc->sc_nsessions * 2 *
18281 + sizeof(struct pasemi_session *), GFP_ATOMIC);
18282 + if (sespp == NULL)
18284 + memcpy(sespp, sc->sc_sessions,
18285 + sc->sc_nsessions * sizeof(struct pasemi_session *));
18286 + kfree(sc->sc_sessions);
18287 + sc->sc_sessions = sespp;
18288 + sesn = sc->sc_nsessions;
18289 + ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
18290 + kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
18293 + sc->sc_nsessions *= 2;
18298 + ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
18299 + sizeof(struct pasemi_session), DMA_TO_DEVICE);
18301 + /* enter the channel scheduler */
18302 + spin_lock_irqsave(&sc->sc_chnlock, flags);
18304 + /* ARC4 has to be processed by the even channel */
18305 + if (encini && (encini->cri_alg == CRYPTO_ARC4))
18306 + ses->chan = sc->sc_lastchn & ~1;
18308 + ses->chan = sc->sc_lastchn;
18309 + sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
18311 + spin_unlock_irqrestore(&sc->sc_chnlock, flags);
18313 + txring = &sc->tx[ses->chan];
18316 + ses->ccmd = ccmd;
18319 + /* XXX may read fewer than requested */
18320 + get_random_bytes(ses->civ, sizeof(ses->civ));
18322 + ses->keysz = (encini->cri_klen - 63) / 64;
18323 + memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
18325 + pasemi_desc_start(&init_desc,
18326 + XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
18327 + pasemi_desc_build(&init_desc,
18328 + XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
18331 + if (macini->cri_alg == CRYPTO_MD5_HMAC ||
18332 + macini->cri_alg == CRYPTO_SHA1_HMAC)
18333 + memcpy(ses->hkey, macini->cri_key, blksz);
18335 + /* Load initialization constants(RFC 1321, 3174) */
18336 + ses->hiv[0] = 0x67452301efcdab89ULL;
18337 + ses->hiv[1] = 0x98badcfe10325476ULL;
18338 + ses->hiv[2] = 0xc3d2e1f000000000ULL;
18340 + ses->hseq = 0ULL;
18343 + spin_lock_irqsave(&txring->fill_lock, flags);
18345 + if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
18346 + txring->next_to_clean) > TX_RING_SIZE) {
18347 + spin_unlock_irqrestore(&txring->fill_lock, flags);
18352 + pasemi_ring_add_desc(txring, &init_desc, NULL);
18353 + pasemi_ring_incr(sc, ses->chan,
18354 + pasemi_desc_size(&init_desc));
18357 + txring->sesn = sesn;
18358 + spin_unlock_irqrestore(&txring->fill_lock, flags);
18360 + *sidp = PASEMI_SID(sesn);
18365 + * Deallocate a session.
18368 +pasemi_freesession(device_t dev, u_int64_t tid)
18370 + struct pasemi_softc *sc = device_get_softc(dev);
18372 + u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
18374 + DPRINTF("%s()\n", __FUNCTION__);
18378 + session = PASEMI_SESSION(sid);
18379 + if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
18382 + pci_unmap_single(sc->dma_pdev,
18383 + sc->sc_sessions[session]->dma_addr,
18384 + sizeof(struct pasemi_session), DMA_TO_DEVICE);
18385 + memset(sc->sc_sessions[session], 0,
18386 + sizeof(struct pasemi_session));
18392 +pasemi_process(device_t dev, struct cryptop *crp, int hint)
18395 + int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
18396 + struct pasemi_softc *sc = device_get_softc(dev);
18397 + struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
18399 + struct pasemi_desc init_desc, work_desc;
18400 + struct pasemi_session *ses;
18401 + struct sk_buff *skb;
18402 + struct uio *uiop;
18403 + unsigned long flags;
18404 + struct pasemi_fnu_txring *txring;
18406 + DPRINTF("%s()\n", __FUNCTION__);
18408 + if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
18411 + crp->crp_etype = 0;
18412 + if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
18415 + ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
18417 + crd1 = crp->crp_desc;
18418 + if (crd1 == NULL) {
18422 + crd2 = crd1->crd_next;
18424 + if (ALG_IS_SIG(crd1->crd_alg)) {
18426 + if (crd2 == NULL)
18428 + else if (ALG_IS_CIPHER(crd2->crd_alg) &&
18429 + (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
18433 + } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
18435 + if (crd2 == NULL)
18437 + else if (ALG_IS_SIG(crd2->crd_alg) &&
18438 + (crd1->crd_flags & CRD_F_ENCRYPT))
18445 + chsel = ses->chan;
18447 + txring = &sc->tx[chsel];
18449 + if (enccrd && !maccrd) {
18450 + if (enccrd->crd_alg == CRYPTO_ARC4)
18452 + reinit_size = 0x40;
18453 + srclen = crp->crp_ilen;
18455 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
18456 + | XCT_FUN_FUN(chsel));
18457 + if (enccrd->crd_flags & CRD_F_ENCRYPT)
18458 + pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
18460 + pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
18461 + } else if (enccrd && maccrd) {
18462 + if (enccrd->crd_alg == CRYPTO_ARC4)
18464 + reinit_size = 0x68;
18466 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
18467 + /* Encrypt -> Authenticate */
18468 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
18469 + | XCT_FUN_A | XCT_FUN_FUN(chsel));
18470 + srclen = maccrd->crd_skip + maccrd->crd_len;
18472 + /* Authenticate -> Decrypt */
18473 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
18474 + | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
18475 + pasemi_desc_build(&work_desc, 0);
18476 + pasemi_desc_build(&work_desc, 0);
18477 + pasemi_desc_build(&work_desc, 0);
18478 + work_desc.postop = PASEMI_CHECK_SIG;
18479 + srclen = crp->crp_ilen;
18482 + pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
18483 + pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
18484 + } else if (!enccrd && maccrd) {
18485 + srclen = maccrd->crd_len;
18487 + pasemi_desc_start(&init_desc,
18488 + XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
18489 + pasemi_desc_build(&init_desc,
18490 + XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
18492 + pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
18493 + | XCT_FUN_A | XCT_FUN_FUN(chsel));
18497 + switch (enccrd->crd_alg) {
18498 + case CRYPTO_3DES_CBC:
18499 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
18500 + XCT_FUN_BCM_CBC);
18501 + ivsize = sizeof(u64);
18503 + case CRYPTO_DES_CBC:
18504 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
18505 + XCT_FUN_BCM_CBC);
18506 + ivsize = sizeof(u64);
18508 + case CRYPTO_AES_CBC:
18509 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
18510 + XCT_FUN_BCM_CBC);
18511 + ivsize = 2 * sizeof(u64);
18513 + case CRYPTO_ARC4:
18514 + pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
18518 + printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
18519 + enccrd->crd_alg);
18524 + ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
18525 + if (enccrd->crd_flags & CRD_F_ENCRYPT) {
18526 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
18527 + memcpy(ivp, enccrd->crd_iv, ivsize);
18528 + /* If IV is not present in the buffer already, it has to be copied there */
18529 + if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
18530 + crypto_copyback(crp->crp_flags, crp->crp_buf,
18531 + enccrd->crd_inject, ivsize, ivp);
18533 + if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
18534 + /* IV is provided expicitly in descriptor */
18535 + memcpy(ivp, enccrd->crd_iv, ivsize);
18537 + /* IV is provided in the packet */
18538 + crypto_copydata(crp->crp_flags, crp->crp_buf,
18539 + enccrd->crd_inject, ivsize,
18545 + switch (maccrd->crd_alg) {
18547 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
18548 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
18550 + case CRYPTO_SHA1:
18551 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
18552 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
18554 + case CRYPTO_MD5_HMAC:
18555 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
18556 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
18558 + case CRYPTO_SHA1_HMAC:
18559 + pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
18560 + XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
18563 + printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
18564 + maccrd->crd_alg);
18570 + if (crp->crp_flags & CRYPTO_F_SKBUF) {
18571 + /* using SKB buffers */
18572 + skb = (struct sk_buff *)crp->crp_buf;
18573 + if (skb_shinfo(skb)->nr_frags) {
18574 + printk(DRV_NAME ": skb frags unimplemented\n");
18578 + pasemi_desc_build(
18580 + XCT_FUN_DST_PTR(skb->len, pci_map_single(
18581 + sc->dma_pdev, skb->data,
18582 + skb->len, DMA_TO_DEVICE)));
18583 + pasemi_desc_build(
18586 + srclen, pci_map_single(
18587 + sc->dma_pdev, skb->data,
18588 + srclen, DMA_TO_DEVICE)));
18589 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
18590 + } else if (crp->crp_flags & CRYPTO_F_IOV) {
18591 + /* using IOV buffers */
18592 + uiop = (struct uio *)crp->crp_buf;
18593 + if (uiop->uio_iovcnt > 1) {
18594 + printk(DRV_NAME ": iov frags unimplemented\n");
18599 + /* crp_olen is never set; always use crp_ilen */
18600 + pasemi_desc_build(
18602 + XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
18604 + uiop->uio_iov->iov_base,
18605 + crp->crp_ilen, DMA_TO_DEVICE)));
18606 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
18608 + pasemi_desc_build(
18610 + XCT_FUN_SRC_PTR(srclen, pci_map_single(
18612 + uiop->uio_iov->iov_base,
18613 + srclen, DMA_TO_DEVICE)));
18615 + /* using contig buffers */
18616 + pasemi_desc_build(
18618 + XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
18621 + crp->crp_ilen, DMA_TO_DEVICE)));
18622 + pasemi_desc_build(
18624 + XCT_FUN_SRC_PTR(srclen, pci_map_single(
18626 + crp->crp_buf, srclen,
18627 + DMA_TO_DEVICE)));
18628 + pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
18631 + spin_lock_irqsave(&txring->fill_lock, flags);
18633 + if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
18634 + txring->sesn = PASEMI_SESSION(crp->crp_sid);
18639 + pasemi_desc_start(&init_desc,
18640 + XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
18641 + pasemi_desc_build(&init_desc,
18642 + XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
18645 + if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
18646 + pasemi_desc_size(&work_desc)) -
18647 + txring->next_to_clean) > TX_RING_SIZE) {
18648 + spin_unlock_irqrestore(&txring->fill_lock, flags);
18653 + pasemi_ring_add_desc(txring, &init_desc, NULL);
18654 + pasemi_ring_add_desc(txring, &work_desc, crp);
18656 + pasemi_ring_incr(sc, chsel,
18657 + pasemi_desc_size(&init_desc) +
18658 + pasemi_desc_size(&work_desc));
18660 + spin_unlock_irqrestore(&txring->fill_lock, flags);
18662 + mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
18667 + printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
18668 + crd1->crd_alg, crd2->crd_alg);
18672 + if (err != ERESTART) {
18673 + crp->crp_etype = err;
18674 + crypto_done(crp);
18679 +static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
18681 + int i, j, ring_idx;
18682 + struct pasemi_fnu_txring *ring = &sc->tx[chan];
18684 + int flags, loops = 10;
18686 + struct cryptop *crp;
18688 + spin_lock_irqsave(&ring->clean_lock, flags);
18690 + while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
18691 + & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
18694 + for (i = 0; i < delta_cnt; i++) {
18695 + desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
18696 + crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
18698 + ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
18699 + if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
18700 + /* Need to make sure signature matched,
18701 + * if not - return error */
18702 + if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
18703 + crp->crp_etype = -EINVAL;
18705 + crypto_done(TX_DESC_INFO(ring,
18706 + ring->next_to_clean).cf_crp);
18707 + TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
18708 + pci_unmap_single(
18710 + XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
18711 + PCI_DMA_TODEVICE);
18713 + ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
18715 + ring->next_to_clean++;
18716 + for (j = 1; j < desc_size; j++) {
18718 + (ring->next_to_clean &
18719 + (TX_RING_SIZE-1));
18720 + pci_unmap_single(
18722 + XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
18723 + PCI_DMA_TODEVICE);
18724 + if (ring->desc[ring_idx + 1])
18725 + pci_unmap_single(
18727 + XCT_PTR_ADDR_LEN(
18730 + PCI_DMA_TODEVICE);
18731 + ring->desc[ring_idx] =
18732 + ring->desc[ring_idx + 1] = 0;
18733 + ring->next_to_clean++;
18736 + for (j = 0; j < desc_size; j++) {
18737 + ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
18738 + ring->desc[ring_idx] =
18739 + ring->desc[ring_idx + 1] = 0;
18740 + ring->next_to_clean++;
18745 + ring->total_pktcnt += delta_cnt;
18747 + spin_unlock_irqrestore(&ring->clean_lock, flags);
18752 +static void sweepup_tx(struct pasemi_softc *sc)
18756 + for (i = 0; i < sc->sc_num_channels; i++)
18757 + pasemi_clean_tx(sc, i);
18760 +static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
18762 + struct pasemi_softc *sc = arg;
18763 + unsigned int reg;
18764 + int chan = irq - sc->base_irq;
18765 + int chan_index = sc->base_chan + chan;
18766 + u64 stat = dma_status->tx_sta[chan_index];
18768 + DPRINTF("%s()\n", __FUNCTION__);
18770 + if (!(stat & PAS_STATUS_CAUSE_M))
18773 + pasemi_clean_tx(sc, chan);
18775 + stat = dma_status->tx_sta[chan_index];
18777 + reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
18778 + PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
18780 + if (stat & PAS_STATUS_SOFT)
18781 + reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
18783 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
18786 + return IRQ_HANDLED;
18789 +static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
18792 + int chan_index = chan + sc->base_chan;
18794 + struct pasemi_fnu_txring *ring;
18796 + ring = &sc->tx[chan];
18798 + spin_lock_init(&ring->fill_lock);
18799 + spin_lock_init(&ring->clean_lock);
18801 + ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
18802 + TX_RING_SIZE, GFP_KERNEL);
18803 + if (!ring->desc_info)
18806 + /* Allocate descriptors */
18807 + ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
18810 + &ring->dma, GFP_KERNEL);
18814 + memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
18816 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
18818 + ring->total_pktcnt = 0;
18820 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
18821 + PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
18823 + val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
18824 + val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
18826 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
18828 + out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
18829 + PAS_DMA_TXCHAN_CFG_TY_FUNC |
18830 + PAS_DMA_TXCHAN_CFG_TATTR(chan) |
18831 + PAS_DMA_TXCHAN_CFG_WT(2));
18833 + /* enable tx channel */
18834 + out_le32(sc->dma_regs +
18835 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
18836 + PAS_DMA_TXCHAN_TCMDSTA_EN);
18838 + out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
18839 + PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
18841 + ring->next_to_fill = 0;
18842 + ring->next_to_clean = 0;
18844 + snprintf(ring->irq_name, sizeof(ring->irq_name),
18845 + "%s%d", "crypto", chan);
18847 + ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
18848 + ret = request_irq(ring->irq, (irq_handler_t)
18849 + pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
18851 + printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
18857 + setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
18862 +static device_method_t pasemi_methods = {
18863 + /* crypto device methods */
18864 + DEVMETHOD(cryptodev_newsession, pasemi_newsession),
18865 + DEVMETHOD(cryptodev_freesession, pasemi_freesession),
18866 + DEVMETHOD(cryptodev_process, pasemi_process),
18869 +/* Set up the crypto device structure, private data,
18870 + * and anything else we need before we start */
18872 +static int __devinit
18873 +pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
18875 + struct pasemi_softc *sc;
18878 + DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
18880 + sc = kzalloc(sizeof(*sc), GFP_KERNEL);
18884 + softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
18886 + pci_set_drvdata(pdev, sc);
18888 + spin_lock_init(&sc->sc_chnlock);
18890 + sc->sc_sessions = (struct pasemi_session **)
18891 + kzalloc(PASEMI_INITIAL_SESSIONS *
18892 + sizeof(struct pasemi_session *), GFP_ATOMIC);
18893 + if (sc->sc_sessions == NULL) {
18898 + sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
18899 + sc->sc_lastchn = 0;
18900 + sc->base_irq = pdev->irq + 6;
18901 + sc->base_chan = 6;
18903 + sc->dma_pdev = pdev;
18905 + sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
18906 + if (!sc->iob_pdev) {
18907 + dev_err(&pdev->dev, "Can't find I/O Bridge\n");
18912 + /* This is hardcoded and ugly, but we have some firmware versions
18913 + * who don't provide the register space in the device tree. Luckily
18914 + * they are at well-known locations so we can just do the math here.
18917 + ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
18919 + ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
18920 + if (!sc->dma_regs || !sc->iob_regs) {
18921 + dev_err(&pdev->dev, "Can't map registers\n");
18926 + dma_status = __ioremap(0xfd800000, 0x1000, 0);
18927 + if (!dma_status) {
18929 + dev_err(&pdev->dev, "Can't map dmastatus space\n");
18933 + sc->tx = (struct pasemi_fnu_txring *)
18934 + kzalloc(sizeof(struct pasemi_fnu_txring)
18935 + * 8, GFP_KERNEL);
18941 + /* Initialize the h/w */
18942 + out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
18943 + (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
18944 + PAS_DMA_COM_CFG_FWF));
18945 + out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
18947 + for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
18948 + sc->sc_num_channels++;
18949 + ret = pasemi_dma_setup_tx_resources(sc, i);
18954 + sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
18955 + CRYPTOCAP_F_HARDWARE);
18956 + if (sc->sc_cid < 0) {
18957 + printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
18962 + /* register algorithms with the framework */
18963 + printk(DRV_NAME ":");
18965 + crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
18966 + crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
18967 + crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
18968 + crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
18969 + crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
18970 + crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
18971 + crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
18972 + crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
18977 + pasemi_dma_remove(pdev);
18981 +#define MAX_RETRIES 5000
18983 +static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
18985 + struct pasemi_fnu_txring *ring = &sc->tx[chan];
18986 + int chan_index = chan + sc->base_chan;
18990 + /* Stop the channel */
18991 + out_le32(sc->dma_regs +
18992 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
18993 + PAS_DMA_TXCHAN_TCMDSTA_ST);
18995 + for (retries = 0; retries < MAX_RETRIES; retries++) {
18996 + stat = in_le32(sc->dma_regs +
18997 + PAS_DMA_TXCHAN_TCMDSTA(chan_index));
18998 + if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
19003 + if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
19004 + dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
19007 + /* Disable the channel */
19008 + out_le32(sc->dma_regs +
19009 + PAS_DMA_TXCHAN_TCMDSTA(chan_index),
19012 + if (ring->desc_info)
19013 + kfree((void *) ring->desc_info);
19015 + dma_free_coherent(&sc->dma_pdev->dev,
19018 + (void *) ring->desc, ring->dma);
19019 + if (ring->irq != -1)
19020 + free_irq(ring->irq, sc);
19022 + del_timer(&ring->crypto_timer);
19025 +static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
19027 + struct pasemi_softc *sc = pci_get_drvdata(pdev);
19030 + DPRINTF("%s()\n", __FUNCTION__);
19032 + if (sc->sc_cid >= 0) {
19033 + crypto_unregister_all(sc->sc_cid);
19037 + for (i = 0; i < sc->sc_num_channels; i++)
19038 + pasemi_free_tx_resources(sc, i);
19042 + if (sc->sc_sessions) {
19043 + for (i = 0; i < sc->sc_nsessions; i++)
19044 + kfree(sc->sc_sessions[i]);
19045 + kfree(sc->sc_sessions);
19047 + if (sc->iob_pdev)
19048 + pci_dev_put(sc->iob_pdev);
19049 + if (sc->dma_regs)
19050 + iounmap(sc->dma_regs);
19051 + if (sc->iob_regs)
19052 + iounmap(sc->iob_regs);
19056 +static struct pci_device_id pasemi_dma_pci_tbl[] = {
19057 + { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
19060 +MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
19062 +static struct pci_driver pasemi_dma_driver = {
19063 + .name = "pasemi_dma",
19064 + .id_table = pasemi_dma_pci_tbl,
19065 + .probe = pasemi_dma_probe,
19066 + .remove = __devexit_p(pasemi_dma_remove),
19069 +static void __exit pasemi_dma_cleanup_module(void)
19071 + pci_unregister_driver(&pasemi_dma_driver);
19072 + __iounmap(dma_status);
19073 + dma_status = NULL;
19076 +int pasemi_dma_init_module(void)
19078 + return pci_register_driver(&pasemi_dma_driver);
19081 +module_init(pasemi_dma_init_module);
19082 +module_exit(pasemi_dma_cleanup_module);
19084 +MODULE_LICENSE("Dual BSD/GPL");
19085 +MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
19086 +MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
19087 --- /dev/null 2007-07-04 13:54:27.000000000 +1000
19088 +++ linux/crypto/ocf/pasemi/pasemi_fnu.h 2007-12-12 11:36:18.000000000 +1000
19091 + * Copyright (C) 2007 PA Semi, Inc
19093 + * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
19094 + * hardware register layouts.
19096 + * This program is free software; you can redistribute it and/or modify
19097 + * it under the terms of the GNU General Public License version 2 as
19098 + * published by the Free Software Foundation.
19100 + * This program is distributed in the hope that it will be useful,
19101 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
19102 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19103 + * GNU General Public License for more details.
19105 + * You should have received a copy of the GNU General Public License
19106 + * along with this program; if not, write to the Free Software
19107 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19110 +#ifndef PASEMI_FNU_H
19111 +#define PASEMI_FNU_H
19113 +#include <linux/spinlock.h>
19115 +#define PASEMI_SESSION(sid) ((sid) & 0xffffffff)
19116 +#define PASEMI_SID(sesn) ((sesn) & 0xffffffff)
19117 +#define DPRINTF(a...) if (debug) { printk(DRV_NAME ": " a); }
19119 +/* Must be a power of two */
19120 +#define RX_RING_SIZE 512
19121 +#define TX_RING_SIZE 512
19122 +#define TX_DESC(ring, num) ((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
19123 +#define TX_DESC_INFO(ring, num) ((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
19124 +#define MAX_DESC_SIZE 8
19125 +#define PASEMI_INITIAL_SESSIONS 10
19126 +#define PASEMI_FNU_CHANNELS 8
19128 +/* DMA descriptor */
19129 +struct pasemi_desc {
19130 + u64 quad[2*MAX_DESC_SIZE];
19137 + * Holds per descriptor data
19139 +struct pasemi_desc_info {
19142 +#define PASEMI_CHECK_SIG 0x1
19144 + struct cryptop *cf_crp;
19148 + * Holds per channel data
19150 +struct pasemi_fnu_txring {
19151 + volatile u64 *desc;
19153 + pasemi_desc_info *desc_info;
19155 + struct timer_list crypto_timer;
19156 + spinlock_t fill_lock;
19157 + spinlock_t clean_lock;
19158 + unsigned int next_to_fill;
19159 + unsigned int next_to_clean;
19160 + u16 total_pktcnt;
19163 + char irq_name[10];
19167 + * Holds data specific to a single pasemi device.
19169 +struct pasemi_softc {
19170 + softc_device_decl sc_cdev;
19171 + struct pci_dev *dma_pdev; /* device backpointer */
19172 + struct pci_dev *iob_pdev; /* device backpointer */
19173 + void __iomem *dma_regs;
19174 + void __iomem *iob_regs;
19177 + int32_t sc_cid; /* crypto tag */
19178 + int sc_nsessions;
19179 + struct pasemi_session **sc_sessions;
19180 + int sc_num_channels;/* number of crypto channels */
19182 + /* pointer to the array of txring datastructures, one txring per channel */
19183 + struct pasemi_fnu_txring *tx;
19186 + * mutual exclusion for the channel scheduler
19188 + spinlock_t sc_chnlock;
19189 + /* last channel used, for now use round-robin to allocate channels */
19193 +struct pasemi_session {
19204 + dma_addr_t dma_addr;
19208 +/* status register layout in IOB region, at 0xfd800000 */
19209 +struct pasdma_status {
19214 +#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC) || \
19215 + (alg == CRYPTO_3DES_CBC) || \
19216 + (alg == CRYPTO_AES_CBC) || \
19217 + (alg == CRYPTO_ARC4) || \
19218 + (alg == CRYPTO_NULL_CBC))
19220 +#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5) || \
19221 + (alg == CRYPTO_MD5_HMAC) || \
19222 + (alg == CRYPTO_SHA1) || \
19223 + (alg == CRYPTO_SHA1_HMAC) || \
19224 + (alg == CRYPTO_NULL_HMAC))
19227 + PAS_DMA_COM_TXCMD = 0x100, /* Transmit Command Register */
19228 + PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */
19229 + PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */
19230 + PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */
19231 + PAS_DMA_COM_CFG = 0x114, /* DMA Configuration Register */
19234 +/* All these registers live in the PCI configuration space for the DMA PCI
19235 + * device. Use the normal PCI config access functions for them.
19238 +#define PAS_DMA_COM_CFG_FWF 0x18000000
19240 +#define PAS_DMA_COM_TXCMD_EN 0x00000001 /* enable */
19241 +#define PAS_DMA_COM_TXSTA_ACT 0x00000001 /* active */
19242 +#define PAS_DMA_COM_RXCMD_EN 0x00000001 /* enable */
19243 +#define PAS_DMA_COM_RXSTA_ACT 0x00000001 /* active */
19245 +#define _PAS_DMA_TXCHAN_STRIDE 0x20 /* Size per channel */
19246 +#define _PAS_DMA_TXCHAN_TCMDSTA 0x300 /* Command / Status */
19247 +#define _PAS_DMA_TXCHAN_CFG 0x304 /* Configuration */
19248 +#define _PAS_DMA_TXCHAN_DSCRBU 0x308 /* Descriptor BU Allocation */
19249 +#define _PAS_DMA_TXCHAN_INCR 0x310 /* Descriptor increment */
19250 +#define _PAS_DMA_TXCHAN_CNT 0x314 /* Descriptor count/offset */
19251 +#define _PAS_DMA_TXCHAN_BASEL 0x318 /* Descriptor ring base (low) */
19252 +#define _PAS_DMA_TXCHAN_BASEU 0x31c /* (high) */
19253 +#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
19254 +#define PAS_DMA_TXCHAN_TCMDSTA_EN 0x00000001 /* Enabled */
19255 +#define PAS_DMA_TXCHAN_TCMDSTA_ST 0x00000002 /* Stop interface */
19256 +#define PAS_DMA_TXCHAN_TCMDSTA_ACT 0x00010000 /* Active */
19257 +#define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
19258 +#define PAS_DMA_TXCHAN_CFG_TY_FUNC 0x00000002 /* Type = interface */
19259 +#define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */
19260 +#define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c
19261 +#define PAS_DMA_TXCHAN_CFG_TATTR_S 2
19262 +#define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
19263 + PAS_DMA_TXCHAN_CFG_TATTR_M)
19264 +#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0
19265 +#define PAS_DMA_TXCHAN_CFG_WT_S 6
19266 +#define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
19267 + PAS_DMA_TXCHAN_CFG_WT_M)
19268 +#define PAS_DMA_TXCHAN_CFG_LPSQ_FAST 0x00000400
19269 +#define PAS_DMA_TXCHAN_CFG_LPDQ_FAST 0x00000800
19270 +#define PAS_DMA_TXCHAN_CFG_CF 0x00001000 /* Clean first line */
19271 +#define PAS_DMA_TXCHAN_CFG_CL 0x00002000 /* Clean last line */
19272 +#define PAS_DMA_TXCHAN_CFG_UP 0x00004000 /* update tx descr when sent */
19273 +#define PAS_DMA_TXCHAN_INCR(c) (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
19274 +#define PAS_DMA_TXCHAN_BASEL(c) (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
19275 +#define PAS_DMA_TXCHAN_BASEL_BRBL_M 0xffffffc0
19276 +#define PAS_DMA_TXCHAN_BASEL_BRBL_S 0
19277 +#define PAS_DMA_TXCHAN_BASEL_BRBL(x) (((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
19278 + PAS_DMA_TXCHAN_BASEL_BRBL_M)
19279 +#define PAS_DMA_TXCHAN_BASEU(c) (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
19280 +#define PAS_DMA_TXCHAN_BASEU_BRBH_M 0x00000fff
19281 +#define PAS_DMA_TXCHAN_BASEU_BRBH_S 0
19282 +#define PAS_DMA_TXCHAN_BASEU_BRBH(x) (((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
19283 + PAS_DMA_TXCHAN_BASEU_BRBH_M)
19284 +/* # of cache lines worth of buffer ring */
19285 +#define PAS_DMA_TXCHAN_BASEU_SIZ_M 0x3fff0000
19286 +#define PAS_DMA_TXCHAN_BASEU_SIZ_S 16 /* 0 = 16K */
19287 +#define PAS_DMA_TXCHAN_BASEU_SIZ(x) (((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
19288 + PAS_DMA_TXCHAN_BASEU_SIZ_M)
19290 +#define PAS_STATUS_PCNT_M 0x000000000000ffffull
19291 +#define PAS_STATUS_PCNT_S 0
19292 +#define PAS_STATUS_DCNT_M 0x00000000ffff0000ull
19293 +#define PAS_STATUS_DCNT_S 16
19294 +#define PAS_STATUS_BPCNT_M 0x0000ffff00000000ull
19295 +#define PAS_STATUS_BPCNT_S 32
19296 +#define PAS_STATUS_CAUSE_M 0xf000000000000000ull
19297 +#define PAS_STATUS_TIMER 0x1000000000000000ull
19298 +#define PAS_STATUS_ERROR 0x2000000000000000ull
19299 +#define PAS_STATUS_SOFT 0x4000000000000000ull
19300 +#define PAS_STATUS_INT 0x8000000000000000ull
19302 +#define PAS_IOB_DMA_RXCH_CFG(i) (0x1100 + (i)*4)
19303 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_M 0x00000fff
19304 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH_S 0
19305 +#define PAS_IOB_DMA_RXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
19306 + PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
19307 +#define PAS_IOB_DMA_TXCH_CFG(i) (0x1200 + (i)*4)
19308 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_M 0x00000fff
19309 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH_S 0
19310 +#define PAS_IOB_DMA_TXCH_CFG_CNTTH(x) (((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
19311 + PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
19312 +#define PAS_IOB_DMA_RXCH_STAT(i) (0x1300 + (i)*4)
19313 +#define PAS_IOB_DMA_RXCH_STAT_INTGEN 0x00001000
19314 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_M 0x00000fff
19315 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL_S 0
19316 +#define PAS_IOB_DMA_RXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
19317 + PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
19318 +#define PAS_IOB_DMA_TXCH_STAT(i) (0x1400 + (i)*4)
19319 +#define PAS_IOB_DMA_TXCH_STAT_INTGEN 0x00001000
19320 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_M 0x00000fff
19321 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL_S 0
19322 +#define PAS_IOB_DMA_TXCH_STAT_CNTDEL(x) (((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
19323 + PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
19324 +#define PAS_IOB_DMA_RXCH_RESET(i) (0x1500 + (i)*4)
19325 +#define PAS_IOB_DMA_RXCH_RESET_PCNT_M 0xffff0000
19326 +#define PAS_IOB_DMA_RXCH_RESET_PCNT_S 16
19327 +#define PAS_IOB_DMA_RXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
19328 + PAS_IOB_DMA_RXCH_RESET_PCNT_M)
19329 +#define PAS_IOB_DMA_RXCH_RESET_PCNTRST 0x00000020
19330 +#define PAS_IOB_DMA_RXCH_RESET_DCNTRST 0x00000010
19331 +#define PAS_IOB_DMA_RXCH_RESET_TINTC 0x00000008
19332 +#define PAS_IOB_DMA_RXCH_RESET_DINTC 0x00000004
19333 +#define PAS_IOB_DMA_RXCH_RESET_SINTC 0x00000002
19334 +#define PAS_IOB_DMA_RXCH_RESET_PINTC 0x00000001
19335 +#define PAS_IOB_DMA_TXCH_RESET(i) (0x1600 + (i)*4)
19336 +#define PAS_IOB_DMA_TXCH_RESET_PCNT_M 0xffff0000
19337 +#define PAS_IOB_DMA_TXCH_RESET_PCNT_S 16
19338 +#define PAS_IOB_DMA_TXCH_RESET_PCNT(x) (((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
19339 + PAS_IOB_DMA_TXCH_RESET_PCNT_M)
19340 +#define PAS_IOB_DMA_TXCH_RESET_PCNTRST 0x00000020
19341 +#define PAS_IOB_DMA_TXCH_RESET_DCNTRST 0x00000010
19342 +#define PAS_IOB_DMA_TXCH_RESET_TINTC 0x00000008
19343 +#define PAS_IOB_DMA_TXCH_RESET_DINTC 0x00000004
19344 +#define PAS_IOB_DMA_TXCH_RESET_SINTC 0x00000002
19345 +#define PAS_IOB_DMA_TXCH_RESET_PINTC 0x00000001
19347 +#define PAS_IOB_DMA_COM_TIMEOUTCFG 0x1700
19348 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M 0x00ffffff
19349 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S 0
19350 +#define PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x) (((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
19351 + PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
19353 +/* Transmit descriptor fields */
19354 +#define XCT_MACTX_T 0x8000000000000000ull
19355 +#define XCT_MACTX_ST 0x4000000000000000ull
19356 +#define XCT_MACTX_NORES 0x0000000000000000ull
19357 +#define XCT_MACTX_8BRES 0x1000000000000000ull
19358 +#define XCT_MACTX_24BRES 0x2000000000000000ull
19359 +#define XCT_MACTX_40BRES 0x3000000000000000ull
19360 +#define XCT_MACTX_I 0x0800000000000000ull
19361 +#define XCT_MACTX_O 0x0400000000000000ull
19362 +#define XCT_MACTX_E 0x0200000000000000ull
19363 +#define XCT_MACTX_VLAN_M 0x0180000000000000ull
19364 +#define XCT_MACTX_VLAN_NOP 0x0000000000000000ull
19365 +#define XCT_MACTX_VLAN_REMOVE 0x0080000000000000ull
19366 +#define XCT_MACTX_VLAN_INSERT 0x0100000000000000ull
19367 +#define XCT_MACTX_VLAN_REPLACE 0x0180000000000000ull
19368 +#define XCT_MACTX_CRC_M 0x0060000000000000ull
19369 +#define XCT_MACTX_CRC_NOP 0x0000000000000000ull
19370 +#define XCT_MACTX_CRC_INSERT 0x0020000000000000ull
19371 +#define XCT_MACTX_CRC_PAD 0x0040000000000000ull
19372 +#define XCT_MACTX_CRC_REPLACE 0x0060000000000000ull
19373 +#define XCT_MACTX_SS 0x0010000000000000ull
19374 +#define XCT_MACTX_LLEN_M 0x00007fff00000000ull
19375 +#define XCT_MACTX_LLEN_S 32ull
19376 +#define XCT_MACTX_LLEN(x) ((((long)(x)) << XCT_MACTX_LLEN_S) & \
19377 + XCT_MACTX_LLEN_M)
19378 +#define XCT_MACTX_IPH_M 0x00000000f8000000ull
19379 +#define XCT_MACTX_IPH_S 27ull
19380 +#define XCT_MACTX_IPH(x) ((((long)(x)) << XCT_MACTX_IPH_S) & \
19382 +#define XCT_MACTX_IPO_M 0x0000000007c00000ull
19383 +#define XCT_MACTX_IPO_S 22ull
19384 +#define XCT_MACTX_IPO(x) ((((long)(x)) << XCT_MACTX_IPO_S) & \
19386 +#define XCT_MACTX_CSUM_M 0x0000000000000060ull
19387 +#define XCT_MACTX_CSUM_NOP 0x0000000000000000ull
19388 +#define XCT_MACTX_CSUM_TCP 0x0000000000000040ull
19389 +#define XCT_MACTX_CSUM_UDP 0x0000000000000060ull
19390 +#define XCT_MACTX_V6 0x0000000000000010ull
19391 +#define XCT_MACTX_C 0x0000000000000004ull
19392 +#define XCT_MACTX_AL2 0x0000000000000002ull
19394 +#define XCT_PTR_T 0x8000000000000000ull
19395 +#define XCT_PTR_LEN_M 0x7ffff00000000000ull
19396 +#define XCT_PTR_LEN_S 44
19397 +#define XCT_PTR_LEN(x) ((((long)(x)) << XCT_PTR_LEN_S) & \
19399 +#define XCT_PTR_ADDR_M 0x00000fffffffffffull
19400 +#define XCT_PTR_ADDR_S 0
19401 +#define XCT_PTR_ADDR(x) ((((long)(x)) << XCT_PTR_ADDR_S) & \
19404 +/* Function descriptor fields */
19405 +#define XCT_FUN_T 0x8000000000000000ull
19406 +#define XCT_FUN_ST 0x4000000000000000ull
19407 +#define XCT_FUN_NORES 0x0000000000000000ull
19408 +#define XCT_FUN_8BRES 0x1000000000000000ull
19409 +#define XCT_FUN_24BRES 0x2000000000000000ull
19410 +#define XCT_FUN_40BRES 0x3000000000000000ull
19411 +#define XCT_FUN_I 0x0800000000000000ull
19412 +#define XCT_FUN_O 0x0400000000000000ull
19413 +#define XCT_FUN_E 0x0200000000000000ull
19414 +#define XCT_FUN_FUN_S 54
19415 +#define XCT_FUN_FUN_M 0x01c0000000000000ull
19416 +#define XCT_FUN_FUN(num) ((((long)(num)) << XCT_FUN_FUN_S) & \
19418 +#define XCT_FUN_CRM_NOP 0x0000000000000000ull
19419 +#define XCT_FUN_CRM_SIG 0x0008000000000000ull
19420 +#define XCT_FUN_CRM_ENC 0x0010000000000000ull
19421 +#define XCT_FUN_CRM_DEC 0x0018000000000000ull
19422 +#define XCT_FUN_CRM_SIG_ENC 0x0020000000000000ull
19423 +#define XCT_FUN_CRM_ENC_SIG 0x0028000000000000ull
19424 +#define XCT_FUN_CRM_SIG_DEC 0x0030000000000000ull
19425 +#define XCT_FUN_CRM_DEC_SIG 0x0038000000000000ull
19426 +#define XCT_FUN_LLEN_M 0x0007ffff00000000ull
19427 +#define XCT_FUN_LLEN_S 32ULL
19428 +#define XCT_FUN_LLEN(x) ((((long)(x)) << XCT_FUN_LLEN_S) & \
19430 +#define XCT_FUN_SHL_M 0x00000000f8000000ull
19431 +#define XCT_FUN_SHL_S 27ull
19432 +#define XCT_FUN_SHL(x) ((((long)(x)) << XCT_FUN_SHL_S) & \
19434 +#define XCT_FUN_CHL_M 0x0000000007c00000ull
19435 +#define XCT_FUN_CHL_S 22ull
19436 +#define XCT_FUN_CHL(x) ((((long)(x)) << XCT_FUN_CHL_S) & \
19438 +#define XCT_FUN_HSZ_M 0x00000000003c0000ull
19439 +#define XCT_FUN_HSZ_S 18ull
19440 +#define XCT_FUN_HSZ(x) ((((long)(x)) << XCT_FUN_HSZ_S) & \
19442 +#define XCT_FUN_ALG_DES 0x0000000000000000ull
19443 +#define XCT_FUN_ALG_3DES 0x0000000000008000ull
19444 +#define XCT_FUN_ALG_AES 0x0000000000010000ull
19445 +#define XCT_FUN_ALG_ARC 0x0000000000018000ull
19446 +#define XCT_FUN_ALG_KASUMI 0x0000000000020000ull
19447 +#define XCT_FUN_BCM_ECB 0x0000000000000000ull
19448 +#define XCT_FUN_BCM_CBC 0x0000000000001000ull
19449 +#define XCT_FUN_BCM_CFB 0x0000000000002000ull
19450 +#define XCT_FUN_BCM_OFB 0x0000000000003000ull
19451 +#define XCT_FUN_BCM_CNT 0x0000000000003800ull
19452 +#define XCT_FUN_BCM_KAS_F8 0x0000000000002800ull
19453 +#define XCT_FUN_BCM_KAS_F9 0x0000000000001800ull
19454 +#define XCT_FUN_BCP_NO_PAD 0x0000000000000000ull
19455 +#define XCT_FUN_BCP_ZRO 0x0000000000000200ull
19456 +#define XCT_FUN_BCP_PL 0x0000000000000400ull
19457 +#define XCT_FUN_BCP_INCR 0x0000000000000600ull
19458 +#define XCT_FUN_SIG_MD5 (0ull << 4)
19459 +#define XCT_FUN_SIG_SHA1 (2ull << 4)
19460 +#define XCT_FUN_SIG_HMAC_MD5 (8ull << 4)
19461 +#define XCT_FUN_SIG_HMAC_SHA1 (10ull << 4)
19462 +#define XCT_FUN_A 0x0000000000000008ull
19463 +#define XCT_FUN_C 0x0000000000000004ull
19464 +#define XCT_FUN_AL2 0x0000000000000002ull
19465 +#define XCT_FUN_SE 0x0000000000000001ull
19467 +#define XCT_FUN_SRC_PTR(len, addr) (XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
19468 +#define XCT_FUN_DST_PTR(len, addr) (XCT_FUN_SRC_PTR(len, addr) | \
19469 + 0x8000000000000000ull)
19471 +#define XCT_CTRL_HDR_FUN_NUM_M 0x01c0000000000000ull
19472 +#define XCT_CTRL_HDR_FUN_NUM_S 54
19473 +#define XCT_CTRL_HDR_LEN_M 0x0007ffff00000000ull
19474 +#define XCT_CTRL_HDR_LEN_S 32
19475 +#define XCT_CTRL_HDR_REG_M 0x00000000000000ffull
19476 +#define XCT_CTRL_HDR_REG_S 0
19478 +#define XCT_CTRL_HDR(funcN,len,reg) (0x9400000000000000ull | \
19479 + ((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
19480 + & XCT_CTRL_HDR_FUN_NUM_M) | \
19481 + ((((long)(len)) << \
19482 + XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
19483 + ((((long)(reg)) << \
19484 + XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
19486 +/* Function config command options */
19487 +#define DMA_CALGO_DES 0x00
19488 +#define DMA_CALGO_3DES 0x01
19489 +#define DMA_CALGO_AES 0x02
19490 +#define DMA_CALGO_ARC 0x03
19492 +#define DMA_FN_CIV0 0x02
19493 +#define DMA_FN_CIV1 0x03
19494 +#define DMA_FN_HKEY0 0x0a
19496 +#define XCT_PTR_ADDR_LEN(ptr) ((ptr) & XCT_PTR_ADDR_M), \
19497 + (((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
19499 +#endif /* PASEMI_FNU_H */