e259dd390aff79b831364fa3e5c616d83b5b09af
[openwrt/svn-archive/archive.git] / target / linux / generic / files / crypto / ocf / pasemi / pasemi.c
1 /*
2 * Copyright (C) 2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient DMA Crypto Engine
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20 #include <linux/version.h>
21 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
22 #include <generated/autoconf.h>
23 #else
24 #include <linux/autoconf.h>
25 #endif
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/timer.h>
30 #include <linux/random.h>
31 #include <linux/skbuff.h>
32 #include <asm/scatterlist.h>
33 #include <linux/moduleparam.h>
34 #include <linux/pci.h>
35 #include <cryptodev.h>
36 #include <uio.h>
37 #include "pasemi_fnu.h"
38
39 #define DRV_NAME "pasemi"
40
41 #define TIMER_INTERVAL 1000
42
43 static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
44 static struct pasdma_status volatile * dma_status;
45
46 static int debug;
47 module_param(debug, int, 0644);
48 MODULE_PARM_DESC(debug, "Enable debug");
49
50 static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
51 {
52 desc->postop = 0;
53 desc->quad[0] = hdr;
54 desc->quad_cnt = 1;
55 desc->size = 1;
56 }
57
58 static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
59 {
60 desc->quad[desc->quad_cnt++] = val;
61 desc->size = (desc->quad_cnt + 1) / 2;
62 }
63
64 static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
65 {
66 desc->quad[0] |= hdr;
67 }
68
69 static int pasemi_desc_size(struct pasemi_desc *desc)
70 {
71 return desc->size;
72 }
73
74 static void pasemi_ring_add_desc(
75 struct pasemi_fnu_txring *ring,
76 struct pasemi_desc *desc,
77 struct cryptop *crp) {
78 int i;
79 int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
80
81 TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
82 TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
83 TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
84
85 for (i = 0; i < desc->quad_cnt; i += 2) {
86 ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
87 ring->desc[ring_index] = desc->quad[i];
88 ring->desc[ring_index + 1] = desc->quad[i + 1];
89 ring->next_to_fill++;
90 }
91
92 if (desc->quad_cnt & 1)
93 ring->desc[ring_index + 1] = 0;
94 }
95
96 static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
97 {
98 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
99 incr);
100 }
101
102 /*
103 * Generate a new software session.
104 */
105 static int
106 pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
107 {
108 struct cryptoini *c, *encini = NULL, *macini = NULL;
109 struct pasemi_softc *sc = device_get_softc(dev);
110 struct pasemi_session *ses = NULL, **sespp;
111 int sesn, blksz = 0;
112 u64 ccmd = 0;
113 unsigned long flags;
114 struct pasemi_desc init_desc;
115 struct pasemi_fnu_txring *txring;
116
117 DPRINTF("%s()\n", __FUNCTION__);
118 if (sidp == NULL || cri == NULL || sc == NULL) {
119 DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
120 return -EINVAL;
121 }
122 for (c = cri; c != NULL; c = c->cri_next) {
123 if (ALG_IS_SIG(c->cri_alg)) {
124 if (macini)
125 return -EINVAL;
126 macini = c;
127 } else if (ALG_IS_CIPHER(c->cri_alg)) {
128 if (encini)
129 return -EINVAL;
130 encini = c;
131 } else {
132 DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
133 return -EINVAL;
134 }
135 }
136 if (encini == NULL && macini == NULL)
137 return -EINVAL;
138 if (encini) {
139 /* validate key length */
140 switch (encini->cri_alg) {
141 case CRYPTO_DES_CBC:
142 if (encini->cri_klen != 64)
143 return -EINVAL;
144 ccmd = DMA_CALGO_DES;
145 break;
146 case CRYPTO_3DES_CBC:
147 if (encini->cri_klen != 192)
148 return -EINVAL;
149 ccmd = DMA_CALGO_3DES;
150 break;
151 case CRYPTO_AES_CBC:
152 if (encini->cri_klen != 128 &&
153 encini->cri_klen != 192 &&
154 encini->cri_klen != 256)
155 return -EINVAL;
156 ccmd = DMA_CALGO_AES;
157 break;
158 case CRYPTO_ARC4:
159 if (encini->cri_klen != 128)
160 return -EINVAL;
161 ccmd = DMA_CALGO_ARC;
162 break;
163 default:
164 DPRINTF("UNKNOWN encini->cri_alg %d\n",
165 encini->cri_alg);
166 return -EINVAL;
167 }
168 }
169
170 if (macini) {
171 switch (macini->cri_alg) {
172 case CRYPTO_MD5:
173 case CRYPTO_MD5_HMAC:
174 blksz = 16;
175 break;
176 case CRYPTO_SHA1:
177 case CRYPTO_SHA1_HMAC:
178 blksz = 20;
179 break;
180 default:
181 DPRINTF("UNKNOWN macini->cri_alg %d\n",
182 macini->cri_alg);
183 return -EINVAL;
184 }
185 if (((macini->cri_klen + 7) / 8) > blksz) {
186 DPRINTF("key length %d bigger than blksize %d not supported\n",
187 ((macini->cri_klen + 7) / 8), blksz);
188 return -EINVAL;
189 }
190 }
191
192 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
193 if (sc->sc_sessions[sesn] == NULL) {
194 sc->sc_sessions[sesn] = (struct pasemi_session *)
195 kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
196 ses = sc->sc_sessions[sesn];
197 break;
198 } else if (sc->sc_sessions[sesn]->used == 0) {
199 ses = sc->sc_sessions[sesn];
200 break;
201 }
202 }
203
204 if (ses == NULL) {
205 sespp = (struct pasemi_session **)
206 kzalloc(sc->sc_nsessions * 2 *
207 sizeof(struct pasemi_session *), GFP_ATOMIC);
208 if (sespp == NULL)
209 return -ENOMEM;
210 memcpy(sespp, sc->sc_sessions,
211 sc->sc_nsessions * sizeof(struct pasemi_session *));
212 kfree(sc->sc_sessions);
213 sc->sc_sessions = sespp;
214 sesn = sc->sc_nsessions;
215 ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
216 kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
217 if (ses == NULL)
218 return -ENOMEM;
219 sc->sc_nsessions *= 2;
220 }
221
222 ses->used = 1;
223
224 ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
225 sizeof(struct pasemi_session), DMA_TO_DEVICE);
226
227 /* enter the channel scheduler */
228 spin_lock_irqsave(&sc->sc_chnlock, flags);
229
230 /* ARC4 has to be processed by the even channel */
231 if (encini && (encini->cri_alg == CRYPTO_ARC4))
232 ses->chan = sc->sc_lastchn & ~1;
233 else
234 ses->chan = sc->sc_lastchn;
235 sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
236
237 spin_unlock_irqrestore(&sc->sc_chnlock, flags);
238
239 txring = &sc->tx[ses->chan];
240
241 if (encini) {
242 ses->ccmd = ccmd;
243
244 /* get an IV */
245 /* XXX may read fewer than requested */
246 get_random_bytes(ses->civ, sizeof(ses->civ));
247
248 ses->keysz = (encini->cri_klen - 63) / 64;
249 memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
250
251 pasemi_desc_start(&init_desc,
252 XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
253 pasemi_desc_build(&init_desc,
254 XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
255 }
256 if (macini) {
257 if (macini->cri_alg == CRYPTO_MD5_HMAC ||
258 macini->cri_alg == CRYPTO_SHA1_HMAC)
259 memcpy(ses->hkey, macini->cri_key, blksz);
260 else {
261 /* Load initialization constants(RFC 1321, 3174) */
262 ses->hiv[0] = 0x67452301efcdab89ULL;
263 ses->hiv[1] = 0x98badcfe10325476ULL;
264 ses->hiv[2] = 0xc3d2e1f000000000ULL;
265 }
266 ses->hseq = 0ULL;
267 }
268
269 spin_lock_irqsave(&txring->fill_lock, flags);
270
271 if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
272 txring->next_to_clean) > TX_RING_SIZE) {
273 spin_unlock_irqrestore(&txring->fill_lock, flags);
274 return ERESTART;
275 }
276
277 if (encini) {
278 pasemi_ring_add_desc(txring, &init_desc, NULL);
279 pasemi_ring_incr(sc, ses->chan,
280 pasemi_desc_size(&init_desc));
281 }
282
283 txring->sesn = sesn;
284 spin_unlock_irqrestore(&txring->fill_lock, flags);
285
286 *sidp = PASEMI_SID(sesn);
287 return 0;
288 }
289
290 /*
291 * Deallocate a session.
292 */
293 static int
294 pasemi_freesession(device_t dev, u_int64_t tid)
295 {
296 struct pasemi_softc *sc = device_get_softc(dev);
297 int session;
298 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
299
300 DPRINTF("%s()\n", __FUNCTION__);
301
302 if (sc == NULL)
303 return -EINVAL;
304 session = PASEMI_SESSION(sid);
305 if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
306 return -EINVAL;
307
308 pci_unmap_single(sc->dma_pdev,
309 sc->sc_sessions[session]->dma_addr,
310 sizeof(struct pasemi_session), DMA_TO_DEVICE);
311 memset(sc->sc_sessions[session], 0,
312 sizeof(struct pasemi_session));
313
314 return 0;
315 }
316
317 static int
318 pasemi_process(device_t dev, struct cryptop *crp, int hint)
319 {
320
321 int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
322 struct pasemi_softc *sc = device_get_softc(dev);
323 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
324 caddr_t ivp;
325 struct pasemi_desc init_desc, work_desc;
326 struct pasemi_session *ses;
327 struct sk_buff *skb;
328 struct uio *uiop;
329 unsigned long flags;
330 struct pasemi_fnu_txring *txring;
331
332 DPRINTF("%s()\n", __FUNCTION__);
333
334 if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
335 return -EINVAL;
336
337 crp->crp_etype = 0;
338 if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
339 return -EINVAL;
340
341 ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
342
343 crd1 = crp->crp_desc;
344 if (crd1 == NULL) {
345 err = -EINVAL;
346 goto errout;
347 }
348 crd2 = crd1->crd_next;
349
350 if (ALG_IS_SIG(crd1->crd_alg)) {
351 maccrd = crd1;
352 if (crd2 == NULL)
353 enccrd = NULL;
354 else if (ALG_IS_CIPHER(crd2->crd_alg) &&
355 (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
356 enccrd = crd2;
357 else
358 goto erralg;
359 } else if (ALG_IS_CIPHER(crd1->crd_alg)) {
360 enccrd = crd1;
361 if (crd2 == NULL)
362 maccrd = NULL;
363 else if (ALG_IS_SIG(crd2->crd_alg) &&
364 (crd1->crd_flags & CRD_F_ENCRYPT))
365 maccrd = crd2;
366 else
367 goto erralg;
368 } else
369 goto erralg;
370
371 chsel = ses->chan;
372
373 txring = &sc->tx[chsel];
374
375 if (enccrd && !maccrd) {
376 if (enccrd->crd_alg == CRYPTO_ARC4)
377 reinit = 1;
378 reinit_size = 0x40;
379 srclen = crp->crp_ilen;
380
381 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
382 | XCT_FUN_FUN(chsel));
383 if (enccrd->crd_flags & CRD_F_ENCRYPT)
384 pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
385 else
386 pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
387 } else if (enccrd && maccrd) {
388 if (enccrd->crd_alg == CRYPTO_ARC4)
389 reinit = 1;
390 reinit_size = 0x68;
391
392 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
393 /* Encrypt -> Authenticate */
394 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
395 | XCT_FUN_A | XCT_FUN_FUN(chsel));
396 srclen = maccrd->crd_skip + maccrd->crd_len;
397 } else {
398 /* Authenticate -> Decrypt */
399 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
400 | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
401 pasemi_desc_build(&work_desc, 0);
402 pasemi_desc_build(&work_desc, 0);
403 pasemi_desc_build(&work_desc, 0);
404 work_desc.postop = PASEMI_CHECK_SIG;
405 srclen = crp->crp_ilen;
406 }
407
408 pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
409 pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
410 } else if (!enccrd && maccrd) {
411 srclen = maccrd->crd_len;
412
413 pasemi_desc_start(&init_desc,
414 XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
415 pasemi_desc_build(&init_desc,
416 XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
417
418 pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
419 | XCT_FUN_A | XCT_FUN_FUN(chsel));
420 }
421
422 if (enccrd) {
423 switch (enccrd->crd_alg) {
424 case CRYPTO_3DES_CBC:
425 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
426 XCT_FUN_BCM_CBC);
427 ivsize = sizeof(u64);
428 break;
429 case CRYPTO_DES_CBC:
430 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
431 XCT_FUN_BCM_CBC);
432 ivsize = sizeof(u64);
433 break;
434 case CRYPTO_AES_CBC:
435 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
436 XCT_FUN_BCM_CBC);
437 ivsize = 2 * sizeof(u64);
438 break;
439 case CRYPTO_ARC4:
440 pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
441 ivsize = 0;
442 break;
443 default:
444 printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
445 enccrd->crd_alg);
446 err = -EINVAL;
447 goto errout;
448 }
449
450 ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
451 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
452 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
453 memcpy(ivp, enccrd->crd_iv, ivsize);
454 /* If IV is not present in the buffer already, it has to be copied there */
455 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
456 crypto_copyback(crp->crp_flags, crp->crp_buf,
457 enccrd->crd_inject, ivsize, ivp);
458 } else {
459 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
460 /* IV is provided expicitly in descriptor */
461 memcpy(ivp, enccrd->crd_iv, ivsize);
462 else
463 /* IV is provided in the packet */
464 crypto_copydata(crp->crp_flags, crp->crp_buf,
465 enccrd->crd_inject, ivsize,
466 ivp);
467 }
468 }
469
470 if (maccrd) {
471 switch (maccrd->crd_alg) {
472 case CRYPTO_MD5:
473 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
474 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
475 break;
476 case CRYPTO_SHA1:
477 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
478 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
479 break;
480 case CRYPTO_MD5_HMAC:
481 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
482 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
483 break;
484 case CRYPTO_SHA1_HMAC:
485 pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
486 XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
487 break;
488 default:
489 printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
490 maccrd->crd_alg);
491 err = -EINVAL;
492 goto errout;
493 }
494 }
495
496 if (crp->crp_flags & CRYPTO_F_SKBUF) {
497 /* using SKB buffers */
498 skb = (struct sk_buff *)crp->crp_buf;
499 if (skb_shinfo(skb)->nr_frags) {
500 printk(DRV_NAME ": skb frags unimplemented\n");
501 err = -EINVAL;
502 goto errout;
503 }
504 pasemi_desc_build(
505 &work_desc,
506 XCT_FUN_DST_PTR(skb->len, pci_map_single(
507 sc->dma_pdev, skb->data,
508 skb->len, DMA_TO_DEVICE)));
509 pasemi_desc_build(
510 &work_desc,
511 XCT_FUN_SRC_PTR(
512 srclen, pci_map_single(
513 sc->dma_pdev, skb->data,
514 srclen, DMA_TO_DEVICE)));
515 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
516 } else if (crp->crp_flags & CRYPTO_F_IOV) {
517 /* using IOV buffers */
518 uiop = (struct uio *)crp->crp_buf;
519 if (uiop->uio_iovcnt > 1) {
520 printk(DRV_NAME ": iov frags unimplemented\n");
521 err = -EINVAL;
522 goto errout;
523 }
524
525 /* crp_olen is never set; always use crp_ilen */
526 pasemi_desc_build(
527 &work_desc,
528 XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
529 sc->dma_pdev,
530 uiop->uio_iov->iov_base,
531 crp->crp_ilen, DMA_TO_DEVICE)));
532 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
533
534 pasemi_desc_build(
535 &work_desc,
536 XCT_FUN_SRC_PTR(srclen, pci_map_single(
537 sc->dma_pdev,
538 uiop->uio_iov->iov_base,
539 srclen, DMA_TO_DEVICE)));
540 } else {
541 /* using contig buffers */
542 pasemi_desc_build(
543 &work_desc,
544 XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
545 sc->dma_pdev,
546 crp->crp_buf,
547 crp->crp_ilen, DMA_TO_DEVICE)));
548 pasemi_desc_build(
549 &work_desc,
550 XCT_FUN_SRC_PTR(srclen, pci_map_single(
551 sc->dma_pdev,
552 crp->crp_buf, srclen,
553 DMA_TO_DEVICE)));
554 pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
555 }
556
557 spin_lock_irqsave(&txring->fill_lock, flags);
558
559 if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
560 txring->sesn = PASEMI_SESSION(crp->crp_sid);
561 reinit = 1;
562 }
563
564 if (enccrd) {
565 pasemi_desc_start(&init_desc,
566 XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
567 pasemi_desc_build(&init_desc,
568 XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
569 }
570
571 if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
572 pasemi_desc_size(&work_desc)) -
573 txring->next_to_clean) > TX_RING_SIZE) {
574 spin_unlock_irqrestore(&txring->fill_lock, flags);
575 err = ERESTART;
576 goto errout;
577 }
578
579 pasemi_ring_add_desc(txring, &init_desc, NULL);
580 pasemi_ring_add_desc(txring, &work_desc, crp);
581
582 pasemi_ring_incr(sc, chsel,
583 pasemi_desc_size(&init_desc) +
584 pasemi_desc_size(&work_desc));
585
586 spin_unlock_irqrestore(&txring->fill_lock, flags);
587
588 mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
589
590 return 0;
591
592 erralg:
593 printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
594 crd1->crd_alg, crd2->crd_alg);
595 err = -EINVAL;
596
597 errout:
598 if (err != ERESTART) {
599 crp->crp_etype = err;
600 crypto_done(crp);
601 }
602 return err;
603 }
604
605 static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
606 {
607 int i, j, ring_idx;
608 struct pasemi_fnu_txring *ring = &sc->tx[chan];
609 u16 delta_cnt;
610 int flags, loops = 10;
611 int desc_size;
612 struct cryptop *crp;
613
614 spin_lock_irqsave(&ring->clean_lock, flags);
615
616 while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
617 & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
618 && loops--) {
619
620 for (i = 0; i < delta_cnt; i++) {
621 desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
622 crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
623 if (crp) {
624 ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
625 if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
626 /* Need to make sure signature matched,
627 * if not - return error */
628 if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
629 crp->crp_etype = -EINVAL;
630 }
631 crypto_done(TX_DESC_INFO(ring,
632 ring->next_to_clean).cf_crp);
633 TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
634 pci_unmap_single(
635 sc->dma_pdev,
636 XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
637 PCI_DMA_TODEVICE);
638
639 ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
640
641 ring->next_to_clean++;
642 for (j = 1; j < desc_size; j++) {
643 ring_idx = 2 *
644 (ring->next_to_clean &
645 (TX_RING_SIZE-1));
646 pci_unmap_single(
647 sc->dma_pdev,
648 XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
649 PCI_DMA_TODEVICE);
650 if (ring->desc[ring_idx + 1])
651 pci_unmap_single(
652 sc->dma_pdev,
653 XCT_PTR_ADDR_LEN(
654 ring->desc[
655 ring_idx + 1]),
656 PCI_DMA_TODEVICE);
657 ring->desc[ring_idx] =
658 ring->desc[ring_idx + 1] = 0;
659 ring->next_to_clean++;
660 }
661 } else {
662 for (j = 0; j < desc_size; j++) {
663 ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
664 ring->desc[ring_idx] =
665 ring->desc[ring_idx + 1] = 0;
666 ring->next_to_clean++;
667 }
668 }
669 }
670
671 ring->total_pktcnt += delta_cnt;
672 }
673 spin_unlock_irqrestore(&ring->clean_lock, flags);
674
675 return 0;
676 }
677
678 static void sweepup_tx(struct pasemi_softc *sc)
679 {
680 int i;
681
682 for (i = 0; i < sc->sc_num_channels; i++)
683 pasemi_clean_tx(sc, i);
684 }
685
686 static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
687 {
688 struct pasemi_softc *sc = arg;
689 unsigned int reg;
690 int chan = irq - sc->base_irq;
691 int chan_index = sc->base_chan + chan;
692 u64 stat = dma_status->tx_sta[chan_index];
693
694 DPRINTF("%s()\n", __FUNCTION__);
695
696 if (!(stat & PAS_STATUS_CAUSE_M))
697 return IRQ_NONE;
698
699 pasemi_clean_tx(sc, chan);
700
701 stat = dma_status->tx_sta[chan_index];
702
703 reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
704 PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
705
706 if (stat & PAS_STATUS_SOFT)
707 reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
708
709 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
710
711
712 return IRQ_HANDLED;
713 }
714
715 static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
716 {
717 u32 val;
718 int chan_index = chan + sc->base_chan;
719 int ret;
720 struct pasemi_fnu_txring *ring;
721
722 ring = &sc->tx[chan];
723
724 spin_lock_init(&ring->fill_lock);
725 spin_lock_init(&ring->clean_lock);
726
727 ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
728 TX_RING_SIZE, GFP_KERNEL);
729 if (!ring->desc_info)
730 return -ENOMEM;
731
732 /* Allocate descriptors */
733 ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
734 TX_RING_SIZE *
735 2 * sizeof(u64),
736 &ring->dma, GFP_KERNEL);
737 if (!ring->desc)
738 return -ENOMEM;
739
740 memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
741
742 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
743
744 ring->total_pktcnt = 0;
745
746 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
747 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
748
749 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
750 val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
751
752 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
753
754 out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
755 PAS_DMA_TXCHAN_CFG_TY_FUNC |
756 PAS_DMA_TXCHAN_CFG_TATTR(chan) |
757 PAS_DMA_TXCHAN_CFG_WT(2));
758
759 /* enable tx channel */
760 out_le32(sc->dma_regs +
761 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
762 PAS_DMA_TXCHAN_TCMDSTA_EN);
763
764 out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
765 PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
766
767 ring->next_to_fill = 0;
768 ring->next_to_clean = 0;
769
770 snprintf(ring->irq_name, sizeof(ring->irq_name),
771 "%s%d", "crypto", chan);
772
773 ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
774 ret = request_irq(ring->irq, (irq_handler_t)
775 pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
776 if (ret) {
777 printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
778 ring->irq, ret);
779 ring->irq = -1;
780 return ret;
781 }
782
783 setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
784
785 return 0;
786 }
787
788 static device_method_t pasemi_methods = {
789 /* crypto device methods */
790 DEVMETHOD(cryptodev_newsession, pasemi_newsession),
791 DEVMETHOD(cryptodev_freesession, pasemi_freesession),
792 DEVMETHOD(cryptodev_process, pasemi_process),
793 };
794
795 /* Set up the crypto device structure, private data,
796 * and anything else we need before we start */
797
798 static int __devinit
799 pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
800 {
801 struct pasemi_softc *sc;
802 int ret, i;
803
804 DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
805
806 sc = kzalloc(sizeof(*sc), GFP_KERNEL);
807 if (!sc)
808 return -ENOMEM;
809
810 softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
811
812 pci_set_drvdata(pdev, sc);
813
814 spin_lock_init(&sc->sc_chnlock);
815
816 sc->sc_sessions = (struct pasemi_session **)
817 kzalloc(PASEMI_INITIAL_SESSIONS *
818 sizeof(struct pasemi_session *), GFP_ATOMIC);
819 if (sc->sc_sessions == NULL) {
820 ret = -ENOMEM;
821 goto out;
822 }
823
824 sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
825 sc->sc_lastchn = 0;
826 sc->base_irq = pdev->irq + 6;
827 sc->base_chan = 6;
828 sc->sc_cid = -1;
829 sc->dma_pdev = pdev;
830
831 sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
832 if (!sc->iob_pdev) {
833 dev_err(&pdev->dev, "Can't find I/O Bridge\n");
834 ret = -ENODEV;
835 goto out;
836 }
837
838 /* This is hardcoded and ugly, but we have some firmware versions
839 * who don't provide the register space in the device tree. Luckily
840 * they are at well-known locations so we can just do the math here.
841 */
842 sc->dma_regs =
843 ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
844 sc->iob_regs =
845 ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
846 if (!sc->dma_regs || !sc->iob_regs) {
847 dev_err(&pdev->dev, "Can't map registers\n");
848 ret = -ENODEV;
849 goto out;
850 }
851
852 dma_status = __ioremap(0xfd800000, 0x1000, 0);
853 if (!dma_status) {
854 ret = -ENODEV;
855 dev_err(&pdev->dev, "Can't map dmastatus space\n");
856 goto out;
857 }
858
859 sc->tx = (struct pasemi_fnu_txring *)
860 kzalloc(sizeof(struct pasemi_fnu_txring)
861 * 8, GFP_KERNEL);
862 if (!sc->tx) {
863 ret = -ENOMEM;
864 goto out;
865 }
866
867 /* Initialize the h/w */
868 out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
869 (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
870 PAS_DMA_COM_CFG_FWF));
871 out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
872
873 for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
874 sc->sc_num_channels++;
875 ret = pasemi_dma_setup_tx_resources(sc, i);
876 if (ret)
877 goto out;
878 }
879
880 sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
881 CRYPTOCAP_F_HARDWARE);
882 if (sc->sc_cid < 0) {
883 printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
884 ret = -ENXIO;
885 goto out;
886 }
887
888 /* register algorithms with the framework */
889 printk(DRV_NAME ":");
890
891 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
892 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
893 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
894 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
895 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
896 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
897 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
898 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
899
900 return 0;
901
902 out:
903 pasemi_dma_remove(pdev);
904 return ret;
905 }
906
907 #define MAX_RETRIES 5000
908
909 static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
910 {
911 struct pasemi_fnu_txring *ring = &sc->tx[chan];
912 int chan_index = chan + sc->base_chan;
913 int retries;
914 u32 stat;
915
916 /* Stop the channel */
917 out_le32(sc->dma_regs +
918 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
919 PAS_DMA_TXCHAN_TCMDSTA_ST);
920
921 for (retries = 0; retries < MAX_RETRIES; retries++) {
922 stat = in_le32(sc->dma_regs +
923 PAS_DMA_TXCHAN_TCMDSTA(chan_index));
924 if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
925 break;
926 cond_resched();
927 }
928
929 if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
930 dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
931 chan_index);
932
933 /* Disable the channel */
934 out_le32(sc->dma_regs +
935 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
936 0);
937
938 if (ring->desc_info)
939 kfree((void *) ring->desc_info);
940 if (ring->desc)
941 dma_free_coherent(&sc->dma_pdev->dev,
942 TX_RING_SIZE *
943 2 * sizeof(u64),
944 (void *) ring->desc, ring->dma);
945 if (ring->irq != -1)
946 free_irq(ring->irq, sc);
947
948 del_timer(&ring->crypto_timer);
949 }
950
951 static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
952 {
953 struct pasemi_softc *sc = pci_get_drvdata(pdev);
954 int i;
955
956 DPRINTF("%s()\n", __FUNCTION__);
957
958 if (sc->sc_cid >= 0) {
959 crypto_unregister_all(sc->sc_cid);
960 }
961
962 if (sc->tx) {
963 for (i = 0; i < sc->sc_num_channels; i++)
964 pasemi_free_tx_resources(sc, i);
965
966 kfree(sc->tx);
967 }
968 if (sc->sc_sessions) {
969 for (i = 0; i < sc->sc_nsessions; i++)
970 kfree(sc->sc_sessions[i]);
971 kfree(sc->sc_sessions);
972 }
973 if (sc->iob_pdev)
974 pci_dev_put(sc->iob_pdev);
975 if (sc->dma_regs)
976 iounmap(sc->dma_regs);
977 if (sc->iob_regs)
978 iounmap(sc->iob_regs);
979 kfree(sc);
980 }
981
982 static struct pci_device_id pasemi_dma_pci_tbl[] = {
983 { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
984 };
985
986 MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
987
988 static struct pci_driver pasemi_dma_driver = {
989 .name = "pasemi_dma",
990 .id_table = pasemi_dma_pci_tbl,
991 .probe = pasemi_dma_probe,
992 .remove = __devexit_p(pasemi_dma_remove),
993 };
994
995 static void __exit pasemi_dma_cleanup_module(void)
996 {
997 pci_unregister_driver(&pasemi_dma_driver);
998 __iounmap(dma_status);
999 dma_status = NULL;
1000 }
1001
1002 int pasemi_dma_init_module(void)
1003 {
1004 return pci_register_driver(&pasemi_dma_driver);
1005 }
1006
1007 module_init(pasemi_dma_init_module);
1008 module_exit(pasemi_dma_cleanup_module);
1009
1010 MODULE_LICENSE("Dual BSD/GPL");
1011 MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
1012 MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");