rpcd: iwinfo plugin fixes
[openwrt/svn-archive/archive.git] / target / linux / generic / files / crypto / ocf / pasemi / pasemi.c
1 /*
2 * Copyright (C) 2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient DMA Crypto Engine
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/version.h>
21 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
22 #include <linux/config.h>
23 #endif
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/timer.h>
28 #include <linux/random.h>
29 #include <linux/skbuff.h>
30 #include <asm/scatterlist.h>
31 #include <linux/moduleparam.h>
32 #include <linux/pci.h>
33 #include <cryptodev.h>
34 #include <uio.h>
35 #include "pasemi_fnu.h"
36
37 #define DRV_NAME "pasemi"
38
39 #define TIMER_INTERVAL 1000
40
41 static void pasemi_dma_remove(struct pci_dev *pdev);
42 static struct pasdma_status volatile * dma_status;
43
44 static int debug;
45 module_param(debug, int, 0644);
46 MODULE_PARM_DESC(debug, "Enable debug");
47
48 static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
49 {
50 desc->postop = 0;
51 desc->quad[0] = hdr;
52 desc->quad_cnt = 1;
53 desc->size = 1;
54 }
55
56 static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
57 {
58 desc->quad[desc->quad_cnt++] = val;
59 desc->size = (desc->quad_cnt + 1) / 2;
60 }
61
62 static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
63 {
64 desc->quad[0] |= hdr;
65 }
66
67 static int pasemi_desc_size(struct pasemi_desc *desc)
68 {
69 return desc->size;
70 }
71
72 static void pasemi_ring_add_desc(
73 struct pasemi_fnu_txring *ring,
74 struct pasemi_desc *desc,
75 struct cryptop *crp) {
76 int i;
77 int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
78
79 TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
80 TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
81 TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
82
83 for (i = 0; i < desc->quad_cnt; i += 2) {
84 ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
85 ring->desc[ring_index] = desc->quad[i];
86 ring->desc[ring_index + 1] = desc->quad[i + 1];
87 ring->next_to_fill++;
88 }
89
90 if (desc->quad_cnt & 1)
91 ring->desc[ring_index + 1] = 0;
92 }
93
94 static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
95 {
96 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
97 incr);
98 }
99
100 /*
101 * Generate a new software session.
102 */
103 static int
104 pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
105 {
106 struct cryptoini *c, *encini = NULL, *macini = NULL;
107 struct pasemi_softc *sc = device_get_softc(dev);
108 struct pasemi_session *ses = NULL, **sespp;
109 int sesn, blksz = 0;
110 u64 ccmd = 0;
111 unsigned long flags;
112 struct pasemi_desc init_desc;
113 struct pasemi_fnu_txring *txring;
114
115 DPRINTF("%s()\n", __FUNCTION__);
116 if (sidp == NULL || cri == NULL || sc == NULL) {
117 DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
118 return -EINVAL;
119 }
120 for (c = cri; c != NULL; c = c->cri_next) {
121 if (ALG_IS_SIG(c->cri_alg)) {
122 if (macini)
123 return -EINVAL;
124 macini = c;
125 } else if (ALG_IS_CIPHER(c->cri_alg)) {
126 if (encini)
127 return -EINVAL;
128 encini = c;
129 } else {
130 DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
131 return -EINVAL;
132 }
133 }
134 if (encini == NULL && macini == NULL)
135 return -EINVAL;
136 if (encini) {
137 /* validate key length */
138 switch (encini->cri_alg) {
139 case CRYPTO_DES_CBC:
140 if (encini->cri_klen != 64)
141 return -EINVAL;
142 ccmd = DMA_CALGO_DES;
143 break;
144 case CRYPTO_3DES_CBC:
145 if (encini->cri_klen != 192)
146 return -EINVAL;
147 ccmd = DMA_CALGO_3DES;
148 break;
149 case CRYPTO_AES_CBC:
150 if (encini->cri_klen != 128 &&
151 encini->cri_klen != 192 &&
152 encini->cri_klen != 256)
153 return -EINVAL;
154 ccmd = DMA_CALGO_AES;
155 break;
156 case CRYPTO_ARC4:
157 if (encini->cri_klen != 128)
158 return -EINVAL;
159 ccmd = DMA_CALGO_ARC;
160 break;
161 default:
162 DPRINTF("UNKNOWN encini->cri_alg %d\n",
163 encini->cri_alg);
164 return -EINVAL;
165 }
166 }
167
168 if (macini) {
169 switch (macini->cri_alg) {
170 case CRYPTO_MD5:
171 case CRYPTO_MD5_HMAC:
172 blksz = 16;
173 break;
174 case CRYPTO_SHA1:
175 case CRYPTO_SHA1_HMAC:
176 blksz = 20;
177 break;
178 default:
179 DPRINTF("UNKNOWN macini->cri_alg %d\n",
180 macini->cri_alg);
181 return -EINVAL;
182 }
183 if (((macini->cri_klen + 7) / 8) > blksz) {
184 DPRINTF("key length %d bigger than blksize %d not supported\n",
185 ((macini->cri_klen + 7) / 8), blksz);
186 return -EINVAL;
187 }
188 }
189
190 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
191 if (sc->sc_sessions[sesn] == NULL) {
192 sc->sc_sessions[sesn] = (struct pasemi_session *)
193 kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
194 ses = sc->sc_sessions[sesn];
195 break;
196 } else if (sc->sc_sessions[sesn]->used == 0) {
197 ses = sc->sc_sessions[sesn];
198 break;
199 }
200 }
201
202 if (ses == NULL) {
203 sespp = (struct pasemi_session **)
204 kzalloc(sc->sc_nsessions * 2 *
205 sizeof(struct pasemi_session *), GFP_ATOMIC);
206 if (sespp == NULL)
207 return -ENOMEM;
208 memcpy(sespp, sc->sc_sessions,
209 sc->sc_nsessions * sizeof(struct pasemi_session *));
210 kfree(sc->sc_sessions);
211 sc->sc_sessions = sespp;
212 sesn = sc->sc_nsessions;
213 ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
214 kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
215 if (ses == NULL)
216 return -ENOMEM;
217 sc->sc_nsessions *= 2;
218 }
219
220 ses->used = 1;
221
222 ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
223 sizeof(struct pasemi_session), DMA_TO_DEVICE);
224
225 /* enter the channel scheduler */
226 spin_lock_irqsave(&sc->sc_chnlock, flags);
227
228 /* ARC4 has to be processed by the even channel */
229 if (encini && (encini->cri_alg == CRYPTO_ARC4))
230 ses->chan = sc->sc_lastchn & ~1;
231 else
232 ses->chan = sc->sc_lastchn;
233 sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
234
235 spin_unlock_irqrestore(&sc->sc_chnlock, flags);
236
237 txring = &sc->tx[ses->chan];
238
239 if (encini) {
240 ses->ccmd = ccmd;
241 ses->keysz = (encini->cri_klen - 63) / 64;
242 memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
243
244 pasemi_desc_start(&init_desc,
245 XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
246 pasemi_desc_build(&init_desc,
247 XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
248 }
249 if (macini) {
250 if (macini->cri_alg == CRYPTO_MD5_HMAC ||
251 macini->cri_alg == CRYPTO_SHA1_HMAC)
252 memcpy(ses->hkey, macini->cri_key, blksz);
253 else {
254 /* Load initialization constants(RFC 1321, 3174) */
255 ses->hiv[0] = 0x67452301efcdab89ULL;
256 ses->hiv[1] = 0x98badcfe10325476ULL;
257 ses->hiv[2] = 0xc3d2e1f000000000ULL;
258 }
259 ses->hseq = 0ULL;
260 }
261
262 spin_lock_irqsave(&txring->fill_lock, flags);
263
264 if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
265 txring->next_to_clean) > TX_RING_SIZE) {
266 spin_unlock_irqrestore(&txring->fill_lock, flags);
267 return ERESTART;
268 }
269
270 if (encini) {
271 pasemi_ring_add_desc(txring, &init_desc, NULL);
272 pasemi_ring_incr(sc, ses->chan,
273 pasemi_desc_size(&init_desc));
274 }
275
276 txring->sesn = sesn;
277 spin_unlock_irqrestore(&txring->fill_lock, flags);
278
279 *sidp = PASEMI_SID(sesn);
280 return 0;
281 }
282
283 /*
284 * Deallocate a session.
285 */
286 static int
287 pasemi_freesession(device_t dev, u_int64_t tid)
288 {
289 struct pasemi_softc *sc = device_get_softc(dev);
290 int session;
291 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
292
293 DPRINTF("%s()\n", __FUNCTION__);
294
295 if (sc == NULL)
296 return -EINVAL;
297 session = PASEMI_SESSION(sid);
298 if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
299 return -EINVAL;
300
301 pci_unmap_single(sc->dma_pdev,
302 sc->sc_sessions[session]->dma_addr,
303 sizeof(struct pasemi_session), DMA_TO_DEVICE);
304 memset(sc->sc_sessions[session], 0,
305 sizeof(struct pasemi_session));
306
307 return 0;
308 }
309
310 static int
311 pasemi_process(device_t dev, struct cryptop *crp, int hint)
312 {
313
314 int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
315 struct pasemi_softc *sc = device_get_softc(dev);
316 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
317 caddr_t ivp;
318 struct pasemi_desc init_desc, work_desc;
319 struct pasemi_session *ses;
320 struct sk_buff *skb;
321 struct uio *uiop;
322 unsigned long flags;
323 struct pasemi_fnu_txring *txring;
324
325 DPRINTF("%s()\n", __FUNCTION__);
326
327 if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
328 return -EINVAL;
329
330 crp->crp_etype = 0;
331 if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
332 return -EINVAL;
333
334 ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
335
336 crd1 = crp->crp_desc;
337 if (crd1 == NULL) {
338 err = -EINVAL;
339 goto errout;
340 }
341 crd2 = crd1->crd_next;
342
343 if (ALG_IS_SIG(crd1->crd_alg)) {
344 maccrd = crd1;
345 if (crd2 == NULL)
346 enccrd = NULL;
347 else if (ALG_IS_CIPHER(crd2->crd_alg) &&
348 (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
349 enccrd = crd2;
350 else
351 goto erralg;
352 } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
353 enccrd = crd1;
354 if (crd2 == NULL)
355 maccrd = NULL;
356 else if (ALG_IS_SIG(crd2->crd_alg) &&
357 (crd1->crd_flags & CRD_F_ENCRYPT))
358 maccrd = crd2;
359 else
360 goto erralg;
361 } else
362 goto erralg;
363
364 chsel = ses->chan;
365
366 txring = &sc->tx[chsel];
367
368 if (enccrd && !maccrd) {
369 if (enccrd->crd_alg == CRYPTO_ARC4)
370 reinit = 1;
371 reinit_size = 0x40;
372 srclen = crp->crp_ilen;
373
374 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
375 | XCT_FUN_FUN(chsel));
376 if (enccrd->crd_flags & CRD_F_ENCRYPT)
377 pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
378 else
379 pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
380 } else if (enccrd && maccrd) {
381 if (enccrd->crd_alg == CRYPTO_ARC4)
382 reinit = 1;
383 reinit_size = 0x68;
384
385 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
386 /* Encrypt -> Authenticate */
387 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
388 | XCT_FUN_A | XCT_FUN_FUN(chsel));
389 srclen = maccrd->crd_skip + maccrd->crd_len;
390 } else {
391 /* Authenticate -> Decrypt */
392 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
393 | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
394 pasemi_desc_build(&work_desc, 0);
395 pasemi_desc_build(&work_desc, 0);
396 pasemi_desc_build(&work_desc, 0);
397 work_desc.postop = PASEMI_CHECK_SIG;
398 srclen = crp->crp_ilen;
399 }
400
401 pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
402 pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
403 } else if (!enccrd && maccrd) {
404 srclen = maccrd->crd_len;
405
406 pasemi_desc_start(&init_desc,
407 XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
408 pasemi_desc_build(&init_desc,
409 XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
410
411 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
412 | XCT_FUN_A | XCT_FUN_FUN(chsel));
413 }
414
415 if (enccrd) {
416 switch (enccrd->crd_alg) {
417 case CRYPTO_3DES_CBC:
418 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
419 XCT_FUN_BCM_CBC);
420 ivsize = sizeof(u64);
421 break;
422 case CRYPTO_DES_CBC:
423 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
424 XCT_FUN_BCM_CBC);
425 ivsize = sizeof(u64);
426 break;
427 case CRYPTO_AES_CBC:
428 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
429 XCT_FUN_BCM_CBC);
430 ivsize = 2 * sizeof(u64);
431 break;
432 case CRYPTO_ARC4:
433 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
434 ivsize = 0;
435 break;
436 default:
437 printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
438 enccrd->crd_alg);
439 err = -EINVAL;
440 goto errout;
441 }
442
443 ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
444 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
445 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
446 memcpy(ivp, enccrd->crd_iv, ivsize);
447 else
448 read_random(ivp, ivsize);
449 /* If IV is not present in the buffer already, it has to be copied there */
450 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
451 crypto_copyback(crp->crp_flags, crp->crp_buf,
452 enccrd->crd_inject, ivsize, ivp);
453 } else {
454 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
455 /* IV is provided expicitly in descriptor */
456 memcpy(ivp, enccrd->crd_iv, ivsize);
457 else
458 /* IV is provided in the packet */
459 crypto_copydata(crp->crp_flags, crp->crp_buf,
460 enccrd->crd_inject, ivsize,
461 ivp);
462 }
463 }
464
465 if (maccrd) {
466 switch (maccrd->crd_alg) {
467 case CRYPTO_MD5:
468 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
469 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
470 break;
471 case CRYPTO_SHA1:
472 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
473 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
474 break;
475 case CRYPTO_MD5_HMAC:
476 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
477 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
478 break;
479 case CRYPTO_SHA1_HMAC:
480 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
481 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
482 break;
483 default:
484 printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
485 maccrd->crd_alg);
486 err = -EINVAL;
487 goto errout;
488 }
489 }
490
491 if (crp->crp_flags & CRYPTO_F_SKBUF) {
492 /* using SKB buffers */
493 skb = (struct sk_buff *)crp->crp_buf;
494 if (skb_shinfo(skb)->nr_frags) {
495 printk(DRV_NAME ": skb frags unimplemented\n");
496 err = -EINVAL;
497 goto errout;
498 }
499 pasemi_desc_build(
500 &work_desc,
501 XCT_FUN_DST_PTR(skb->len, pci_map_single(
502 sc->dma_pdev, skb->data,
503 skb->len, DMA_TO_DEVICE)));
504 pasemi_desc_build(
505 &work_desc,
506 XCT_FUN_SRC_PTR(
507 srclen, pci_map_single(
508 sc->dma_pdev, skb->data,
509 srclen, DMA_TO_DEVICE)));
510 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
511 } else if (crp->crp_flags & CRYPTO_F_IOV) {
512 /* using IOV buffers */
513 uiop = (struct uio *)crp->crp_buf;
514 if (uiop->uio_iovcnt > 1) {
515 printk(DRV_NAME ": iov frags unimplemented\n");
516 err = -EINVAL;
517 goto errout;
518 }
519
520 /* crp_olen is never set; always use crp_ilen */
521 pasemi_desc_build(
522 &work_desc,
523 XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
524 sc->dma_pdev,
525 uiop->uio_iov->iov_base,
526 crp->crp_ilen, DMA_TO_DEVICE)));
527 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
528
529 pasemi_desc_build(
530 &work_desc,
531 XCT_FUN_SRC_PTR(srclen, pci_map_single(
532 sc->dma_pdev,
533 uiop->uio_iov->iov_base,
534 srclen, DMA_TO_DEVICE)));
535 } else {
536 /* using contig buffers */
537 pasemi_desc_build(
538 &work_desc,
539 XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
540 sc->dma_pdev,
541 crp->crp_buf,
542 crp->crp_ilen, DMA_TO_DEVICE)));
543 pasemi_desc_build(
544 &work_desc,
545 XCT_FUN_SRC_PTR(srclen, pci_map_single(
546 sc->dma_pdev,
547 crp->crp_buf, srclen,
548 DMA_TO_DEVICE)));
549 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
550 }
551
552 spin_lock_irqsave(&txring->fill_lock, flags);
553
554 if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
555 txring->sesn = PASEMI_SESSION(crp->crp_sid);
556 reinit = 1;
557 }
558
559 if (enccrd) {
560 pasemi_desc_start(&init_desc,
561 XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
562 pasemi_desc_build(&init_desc,
563 XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
564 }
565
566 if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
567 pasemi_desc_size(&work_desc)) -
568 txring->next_to_clean) > TX_RING_SIZE) {
569 spin_unlock_irqrestore(&txring->fill_lock, flags);
570 err = ERESTART;
571 goto errout;
572 }
573
574 pasemi_ring_add_desc(txring, &init_desc, NULL);
575 pasemi_ring_add_desc(txring, &work_desc, crp);
576
577 pasemi_ring_incr(sc, chsel,
578 pasemi_desc_size(&init_desc) +
579 pasemi_desc_size(&work_desc));
580
581 spin_unlock_irqrestore(&txring->fill_lock, flags);
582
583 mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
584
585 return 0;
586
587 erralg:
588 printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
589 crd1->crd_alg, crd2->crd_alg);
590 err = -EINVAL;
591
592 errout:
593 if (err != ERESTART) {
594 crp->crp_etype = err;
595 crypto_done(crp);
596 }
597 return err;
598 }
599
600 static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
601 {
602 int i, j, ring_idx;
603 struct pasemi_fnu_txring *ring = &sc->tx[chan];
604 u16 delta_cnt;
605 int flags, loops = 10;
606 int desc_size;
607 struct cryptop *crp;
608
609 spin_lock_irqsave(&ring->clean_lock, flags);
610
611 while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
612 & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
613 && loops--) {
614
615 for (i = 0; i < delta_cnt; i++) {
616 desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
617 crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
618 if (crp) {
619 ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
620 if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
621 /* Need to make sure signature matched,
622 * if not - return error */
623 if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
624 crp->crp_etype = -EINVAL;
625 }
626 crypto_done(TX_DESC_INFO(ring,
627 ring->next_to_clean).cf_crp);
628 TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
629 pci_unmap_single(
630 sc->dma_pdev,
631 XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
632 PCI_DMA_TODEVICE);
633
634 ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
635
636 ring->next_to_clean++;
637 for (j = 1; j < desc_size; j++) {
638 ring_idx = 2 *
639 (ring->next_to_clean &
640 (TX_RING_SIZE-1));
641 pci_unmap_single(
642 sc->dma_pdev,
643 XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
644 PCI_DMA_TODEVICE);
645 if (ring->desc[ring_idx + 1])
646 pci_unmap_single(
647 sc->dma_pdev,
648 XCT_PTR_ADDR_LEN(
649 ring->desc[
650 ring_idx + 1]),
651 PCI_DMA_TODEVICE);
652 ring->desc[ring_idx] =
653 ring->desc[ring_idx + 1] = 0;
654 ring->next_to_clean++;
655 }
656 } else {
657 for (j = 0; j < desc_size; j++) {
658 ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
659 ring->desc[ring_idx] =
660 ring->desc[ring_idx + 1] = 0;
661 ring->next_to_clean++;
662 }
663 }
664 }
665
666 ring->total_pktcnt += delta_cnt;
667 }
668 spin_unlock_irqrestore(&ring->clean_lock, flags);
669
670 return 0;
671 }
672
673 static void sweepup_tx(struct pasemi_softc *sc)
674 {
675 int i;
676
677 for (i = 0; i < sc->sc_num_channels; i++)
678 pasemi_clean_tx(sc, i);
679 }
680
681 static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
682 {
683 struct pasemi_softc *sc = arg;
684 unsigned int reg;
685 int chan = irq - sc->base_irq;
686 int chan_index = sc->base_chan + chan;
687 u64 stat = dma_status->tx_sta[chan_index];
688
689 DPRINTF("%s()\n", __FUNCTION__);
690
691 if (!(stat & PAS_STATUS_CAUSE_M))
692 return IRQ_NONE;
693
694 pasemi_clean_tx(sc, chan);
695
696 stat = dma_status->tx_sta[chan_index];
697
698 reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
699 PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
700
701 if (stat & PAS_STATUS_SOFT)
702 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
703
704 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
705
706
707 return IRQ_HANDLED;
708 }
709
710 static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
711 {
712 u32 val;
713 int chan_index = chan + sc->base_chan;
714 int ret;
715 struct pasemi_fnu_txring *ring;
716
717 ring = &sc->tx[chan];
718
719 spin_lock_init(&ring->fill_lock);
720 spin_lock_init(&ring->clean_lock);
721
722 ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
723 TX_RING_SIZE, GFP_KERNEL);
724 if (!ring->desc_info)
725 return -ENOMEM;
726
727 /* Allocate descriptors */
728 ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
729 TX_RING_SIZE *
730 2 * sizeof(u64),
731 &ring->dma, GFP_KERNEL);
732 if (!ring->desc)
733 return -ENOMEM;
734
735 memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
736
737 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
738
739 ring->total_pktcnt = 0;
740
741 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
742 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
743
744 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
745 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
746
747 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
748
749 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
750 PAS_DMA_TXCHAN_CFG_TY_FUNC |
751 PAS_DMA_TXCHAN_CFG_TATTR(chan) |
752 PAS_DMA_TXCHAN_CFG_WT(2));
753
754 /* enable tx channel */
755 out_le32(sc->dma_regs +
756 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
757 PAS_DMA_TXCHAN_TCMDSTA_EN);
758
759 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
760 PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
761
762 ring->next_to_fill = 0;
763 ring->next_to_clean = 0;
764
765 snprintf(ring->irq_name, sizeof(ring->irq_name),
766 "%s%d", "crypto", chan);
767
768 ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
769 ret = request_irq(ring->irq, (irq_handler_t)
770 pasemi_intr, 0, ring->irq_name, sc);
771 if (ret) {
772 printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
773 ring->irq, ret);
774 ring->irq = -1;
775 return ret;
776 }
777
778 setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
779
780 return 0;
781 }
782
783 static device_method_t pasemi_methods = {
784 /* crypto device methods */
785 DEVMETHOD(cryptodev_newsession, pasemi_newsession),
786 DEVMETHOD(cryptodev_freesession, pasemi_freesession),
787 DEVMETHOD(cryptodev_process, pasemi_process),
788 };
789
790 /* Set up the crypto device structure, private data,
791 * and anything else we need before we start */
792
793 static int
794 pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
795 {
796 struct pasemi_softc *sc;
797 int ret, i;
798
799 DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
800
801 sc = kzalloc(sizeof(*sc), GFP_KERNEL);
802 if (!sc)
803 return -ENOMEM;
804
805 softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
806
807 pci_set_drvdata(pdev, sc);
808
809 spin_lock_init(&sc->sc_chnlock);
810
811 sc->sc_sessions = (struct pasemi_session **)
812 kzalloc(PASEMI_INITIAL_SESSIONS *
813 sizeof(struct pasemi_session *), GFP_ATOMIC);
814 if (sc->sc_sessions == NULL) {
815 ret = -ENOMEM;
816 goto out;
817 }
818
819 sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
820 sc->sc_lastchn = 0;
821 sc->base_irq = pdev->irq + 6;
822 sc->base_chan = 6;
823 sc->sc_cid = -1;
824 sc->dma_pdev = pdev;
825
826 sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
827 if (!sc->iob_pdev) {
828 dev_err(&pdev->dev, "Can't find I/O Bridge\n");
829 ret = -ENODEV;
830 goto out;
831 }
832
833 /* This is hardcoded and ugly, but we have some firmware versions
834 * who don't provide the register space in the device tree. Luckily
835 * they are at well-known locations so we can just do the math here.
836 */
837 sc->dma_regs =
838 ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
839 sc->iob_regs =
840 ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
841 if (!sc->dma_regs || !sc->iob_regs) {
842 dev_err(&pdev->dev, "Can't map registers\n");
843 ret = -ENODEV;
844 goto out;
845 }
846
847 dma_status = __ioremap(0xfd800000, 0x1000, 0);
848 if (!dma_status) {
849 ret = -ENODEV;
850 dev_err(&pdev->dev, "Can't map dmastatus space\n");
851 goto out;
852 }
853
854 sc->tx = (struct pasemi_fnu_txring *)
855 kzalloc(sizeof(struct pasemi_fnu_txring)
856 * 8, GFP_KERNEL);
857 if (!sc->tx) {
858 ret = -ENOMEM;
859 goto out;
860 }
861
862 /* Initialize the h/w */
863 out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
864 (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
865 PAS_DMA_COM_CFG_FWF));
866 out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
867
868 for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
869 sc->sc_num_channels++;
870 ret = pasemi_dma_setup_tx_resources(sc, i);
871 if (ret)
872 goto out;
873 }
874
875 sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
876 CRYPTOCAP_F_HARDWARE);
877 if (sc->sc_cid < 0) {
878 printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
879 ret = -ENXIO;
880 goto out;
881 }
882
883 /* register algorithms with the framework */
884 printk(DRV_NAME ":");
885
886 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
887 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
888 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
889 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
890 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
891 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
892 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
893 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
894
895 return 0;
896
897 out:
898 pasemi_dma_remove(pdev);
899 return ret;
900 }
901
902 #define MAX_RETRIES 5000
903
904 static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
905 {
906 struct pasemi_fnu_txring *ring = &sc->tx[chan];
907 int chan_index = chan + sc->base_chan;
908 int retries;
909 u32 stat;
910
911 /* Stop the channel */
912 out_le32(sc->dma_regs +
913 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
914 PAS_DMA_TXCHAN_TCMDSTA_ST);
915
916 for (retries = 0; retries < MAX_RETRIES; retries++) {
917 stat = in_le32(sc->dma_regs +
918 PAS_DMA_TXCHAN_TCMDSTA(chan_index));
919 if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
920 break;
921 cond_resched();
922 }
923
924 if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
925 dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
926 chan_index);
927
928 /* Disable the channel */
929 out_le32(sc->dma_regs +
930 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
931 0);
932
933 if (ring->desc_info)
934 kfree((void *) ring->desc_info);
935 if (ring->desc)
936 dma_free_coherent(&sc->dma_pdev->dev,
937 TX_RING_SIZE *
938 2 * sizeof(u64),
939 (void *) ring->desc, ring->dma);
940 if (ring->irq != -1)
941 free_irq(ring->irq, sc);
942
943 del_timer(&ring->crypto_timer);
944 }
945
946 static void pasemi_dma_remove(struct pci_dev *pdev)
947 {
948 struct pasemi_softc *sc = pci_get_drvdata(pdev);
949 int i;
950
951 DPRINTF("%s()\n", __FUNCTION__);
952
953 if (sc->sc_cid >= 0) {
954 crypto_unregister_all(sc->sc_cid);
955 }
956
957 if (sc->tx) {
958 for (i = 0; i < sc->sc_num_channels; i++)
959 pasemi_free_tx_resources(sc, i);
960
961 kfree(sc->tx);
962 }
963 if (sc->sc_sessions) {
964 for (i = 0; i < sc->sc_nsessions; i++)
965 kfree(sc->sc_sessions[i]);
966 kfree(sc->sc_sessions);
967 }
968 if (sc->iob_pdev)
969 pci_dev_put(sc->iob_pdev);
970 if (sc->dma_regs)
971 iounmap(sc->dma_regs);
972 if (sc->iob_regs)
973 iounmap(sc->iob_regs);
974 kfree(sc);
975 }
976
977 static struct pci_device_id pasemi_dma_pci_tbl[] = {
978 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
979 };
980
981 MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
982
983 static struct pci_driver pasemi_dma_driver = {
984 .name = "pasemi_dma",
985 .id_table = pasemi_dma_pci_tbl,
986 .probe = pasemi_dma_probe,
987 .remove = pasemi_dma_remove,
988 };
989
990 static void __exit pasemi_dma_cleanup_module(void)
991 {
992 pci_unregister_driver(&pasemi_dma_driver);
993 __iounmap(dma_status);
994 dma_status = NULL;
995 }
996
997 int pasemi_dma_init_module(void)
998 {
999 return pci_register_driver(&pasemi_dma_driver);
1000 }
1001
1002 module_init(pasemi_dma_init_module);
1003 module_exit(pasemi_dma_cleanup_module);
1004
1005 MODULE_LICENSE("Dual BSD/GPL");
1006 MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
1007 MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");