8db6276587b98d22790e3f904f86165c421f6223
[openwrt/svn-archive/archive.git] / target / linux / ixp4xx-2.6 / patches / 100-npe_driver.patch
1 diff -Naur linux-2.6.19.orig/arch/arm/mach-ixp4xx/common.c linux-2.6.19/arch/arm/mach-ixp4xx/common.c
2 --- linux-2.6.19.orig/arch/arm/mach-ixp4xx/common.c 2006-11-29 14:57:37.000000000 -0700
3 +++ linux-2.6.19/arch/arm/mach-ixp4xx/common.c 2007-01-12 21:54:40.000000000 -0700
4 @@ -314,6 +314,90 @@
5 &ixp46x_i2c_controller
6 };
7
8 +static struct npe_plat_data npea = {
9 + .name = "NPE-A",
10 + .data_size = 0x800,
11 + .inst_size = 0x1000,
12 + .id = 0,
13 +};
14 +
15 +static struct npe_plat_data npeb = {
16 + .name = "NPE-B",
17 + .data_size = 0x800,
18 + .inst_size = 0x800,
19 + .id = 1,
20 +};
21 +
22 +static struct npe_plat_data npec = {
23 + .name = "NPE-C",
24 + .data_size = 0x800,
25 + .inst_size = 0x800,
26 + .id = 2,
27 +};
28 +
29 +static struct resource res_npea = {
30 + .start = IXP4XX_NPEA_BASE_PHYS,
31 + .end = IXP4XX_NPEA_BASE_PHYS + 0xfff,
32 + .flags = IORESOURCE_MEM,
33 +};
34 +
35 +static struct resource res_npeb = {
36 + .start = IXP4XX_NPEB_BASE_PHYS,
37 + .end = IXP4XX_NPEB_BASE_PHYS + 0xfff,
38 + .flags = IORESOURCE_MEM,
39 +};
40 +
41 +static struct resource res_npec = {
42 + .start = IXP4XX_NPEC_BASE_PHYS,
43 + .end = IXP4XX_NPEC_BASE_PHYS + 0xfff,
44 + .flags = IORESOURCE_MEM,
45 +};
46 +
47 +static struct platform_device dev_npea = {
48 + .name = "ixp4xx_npe",
49 + .id = 0,
50 + .dev.platform_data = &npea,
51 + .num_resources = 1,
52 + .resource = &res_npea,
53 +};
54 +
55 +static struct platform_device dev_npeb = {
56 + .name = "ixp4xx_npe",
57 + .id = 1,
58 + .dev.platform_data = &npeb,
59 + .num_resources = 1,
60 + .resource = &res_npeb,
61 +};
62 +
63 +static struct platform_device dev_npec = {
64 + .name = "ixp4xx_npe",
65 + .id = 2,
66 + .dev.platform_data = &npec,
67 + .num_resources = 1,
68 + .resource = &res_npec,
69 +};
70 +
71 +/* QMGR */
72 +static struct resource res_qmgr[] = {
73 +{
74 + .start = IXP4XX_QMGR_BASE_PHYS,
75 + .end = IXP4XX_QMGR_BASE_PHYS + IXP4XX_QMGR_REGION_SIZE -1,
76 + .flags = IORESOURCE_MEM,
77 +}, {
78 + .start = IRQ_IXP4XX_QM1,
79 + .flags = IORESOURCE_IRQ,
80 +} };
81 +
82 +static struct platform_device qmgr = {
83 + .name = "ixp4xx_qmgr",
84 + .id = 0,
85 + .dev = {
86 + .coherent_dma_mask = DMA_32BIT_MASK,
87 + },
88 + .num_resources = ARRAY_SIZE(res_qmgr),
89 + .resource = res_qmgr,
90 +};
91 +
92 unsigned long ixp4xx_exp_bus_size;
93 EXPORT_SYMBOL(ixp4xx_exp_bus_size);
94
95 @@ -333,8 +417,19 @@
96 break;
97 }
98 }
99 + npeb.inst_size = 0x1000;
100 + npec.inst_size = 0x1000;
101 }
102
103 + platform_device_register(&qmgr);
104 +
105 + if (ix_fuse() & IX_FUSE_NPEA)
106 + platform_device_register(&dev_npea);
107 + if (ix_fuse() & IX_FUSE_NPEB)
108 + platform_device_register(&dev_npeb);
109 + if (ix_fuse() & IX_FUSE_NPEC)
110 + platform_device_register(&dev_npec);
111 +
112 printk("IXP4xx: Using %luMiB expansion bus window size\n",
113 ixp4xx_exp_bus_size >> 20);
114 }
115 diff -Naur linux-2.6.19.orig/arch/arm/mach-ixp4xx/ixdp425-setup.c linux-2.6.19/arch/arm/mach-ixp4xx/ixdp425-setup.c
116 --- linux-2.6.19.orig/arch/arm/mach-ixp4xx/ixdp425-setup.c 2006-11-29 14:57:37.000000000 -0700
117 +++ linux-2.6.19/arch/arm/mach-ixp4xx/ixdp425-setup.c 2007-01-12 21:54:40.000000000 -0700
118 @@ -101,10 +101,59 @@
119 .resource = ixdp425_uart_resources
120 };
121
122 +/* MACs */
123 +static struct resource res_mac0 = {
124 + .start = IXP4XX_EthB_BASE_PHYS,
125 + .end = IXP4XX_EthB_BASE_PHYS + 0x1ff,
126 + .flags = IORESOURCE_MEM,
127 +};
128 +
129 +static struct resource res_mac1 = {
130 + .start = IXP4XX_EthC_BASE_PHYS,
131 + .end = IXP4XX_EthC_BASE_PHYS + 0x1ff,
132 + .flags = IORESOURCE_MEM,
133 +};
134 +
135 +static struct mac_plat_info plat_mac0 = {
136 + .npe_id = 1,
137 + .phy_id = 0,
138 + .eth_id = 0,
139 + .rxq_id = 27,
140 + .txq_id = 24,
141 + .rxdoneq_id = 4,
142 +};
143 +
144 +static struct mac_plat_info plat_mac1 = {
145 + .npe_id = 2,
146 + .phy_id = 1,
147 + .eth_id = 1,
148 + .rxq_id = 28,
149 + .txq_id = 25,
150 + .rxdoneq_id = 5,
151 +};
152 +
153 +static struct platform_device mac0 = {
154 + .name = "ixp4xx_mac",
155 + .id = 0,
156 + .dev.platform_data = &plat_mac0,
157 + .num_resources = 1,
158 + .resource = &res_mac0,
159 +};
160 +
161 +static struct platform_device mac1 = {
162 + .name = "ixp4xx_mac",
163 + .id = 1,
164 + .dev.platform_data = &plat_mac1,
165 + .num_resources = 1,
166 + .resource = &res_mac1,
167 +};
168 +
169 static struct platform_device *ixdp425_devices[] __initdata = {
170 &ixdp425_i2c_controller,
171 &ixdp425_flash,
172 - &ixdp425_uart
173 + &ixdp425_uart,
174 + &mac0,
175 + &mac1,
176 };
177
178 static void __init ixdp425_init(void)
179 diff -Naur linux-2.6.19.orig/Documentation/networking/ixp4xx/IxNpeMicrocode.h linux-2.6.19/Documentation/networking/ixp4xx/IxNpeMicrocode.h
180 --- linux-2.6.19.orig/Documentation/networking/ixp4xx/IxNpeMicrocode.h 1969-12-31 17:00:00.000000000 -0700
181 +++ linux-2.6.19/Documentation/networking/ixp4xx/IxNpeMicrocode.h 2007-01-12 21:54:40.000000000 -0700
182 @@ -0,0 +1,143 @@
183 +/*
184 + * IxNpeMicrocode.h - Headerfile for compiling the Intel microcode C file
185 + *
186 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
187 + *
188 + * This file is released under the GPLv2
189 + *
190 + *
191 + * compile with
192 + *
193 + * gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode
194 + *
195 + * Executing the resulting binary on your build-host creates the
196 + * "NPE-[ABC].xxxxxxxx" files containing the selected microcode
197 + *
198 + * fetch the IxNpeMicrocode.c from the Intel Access Library.
199 + * It will include this header.
200 + *
201 + * select Images for every NPE from the following
202 + * (used C++ comments for easy uncommenting ....)
203 + */
204 +
205 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
206 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
207 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
208 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_TSLOT_SWITCH
209 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
210 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
211 +// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL
212 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_2_PORT
213 +// #define IX_NPEDL_NPEIMAGE_NPEA_DMA
214 +// #define IX_NPEDL_NPEIMAGE_NPEA_ATM_MPHY_12_PORT
215 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_MPHY_1_PORT
216 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_SPHY_1_PORT
217 +// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0
218 +// #define IX_NPEDL_NPEIMAGE_NPEA_WEP
219 +
220 +
221 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
222 +//#define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
223 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
224 +// #define IX_NPEDL_NPEIMAGE_NPEB_DMA
225 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
226 +// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
227 + #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL
228 +
229 +
230 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
231 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
232 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
233 +// #define IX_NPEDL_NPEIMAGE_NPEC_DMA
234 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_SPAN
235 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_FIREWALL
236 + #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_CCM_ETH
237 +// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_ETH_LEARN_FILTER_SPAN_FIREWALL
238 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
239 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
240 +// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL
241 +
242 +
243 +#include <stdio.h>
244 +#include <unistd.h>
245 +#include <stdlib.h>
246 +#include <netinet/in.h>
247 +#include <sys/types.h>
248 +#include <sys/stat.h>
249 +#include <fcntl.h>
250 +#include <errno.h>
251 +#include <endian.h>
252 +#include <byteswap.h>
253 +#include <string.h>
254 +
255 +#if __BYTE_ORDER == __LITTLE_ENDIAN
256 +#define to_le32(x) (x)
257 +#define to_be32(x) bswap_32(x)
258 +#else
259 +#define to_be32(x) (x)
260 +#define to_le32(x) bswap_32(x)
261 +#endif
262 +
263 +struct dl_image {
264 + unsigned magic;
265 + unsigned id;
266 + unsigned size;
267 + unsigned data[0];
268 +};
269 +
270 +const unsigned IxNpeMicrocode_array[];
271 +
272 +int main(int argc, char *argv[])
273 +{
274 + struct dl_image *image = (struct dl_image *)IxNpeMicrocode_array;
275 + int imgsiz, i, fd, cnt;
276 + const unsigned *arrayptr = IxNpeMicrocode_array;
277 + const char *names[] = { "IXP425", "IXP465", "unknown" };
278 + int bigendian = 1;
279 +
280 + if (argc > 1) {
281 + if (!strcmp(argv[1], "-le"))
282 + bigendian = 0;
283 + else if (!strcmp(argv[1], "-be"))
284 + bigendian = 1;
285 + else {
286 + printf("Usage: %s <-le|-be>\n", argv[0]);
287 + return EXIT_FAILURE;
288 + }
289 + }
290 +
291 + for (image = (struct dl_image *)arrayptr, cnt=0;
292 + (image->id != 0xfeedf00d) && (image->magic == 0xfeedf00d);
293 + image = (struct dl_image *)(arrayptr), cnt++)
294 + {
295 + unsigned char field[4];
296 + imgsiz = image->size + 3;
297 + *(unsigned*)field = to_be32(image->id);
298 + char filename[40], slnk[10];
299 +
300 + sprintf(filename, "NPE-%c.%08x", (field[0] & 0xf) + 'A',
301 + image->id);
302 + sprintf(slnk, "NPE-%c", (field[0] & 0xf) + 'A');
303 + printf("Writing image: %s.NPE_%c Func: %2x Rev: %02x.%02x "
304 + "Size: %5d to: '%s'\n",
305 + names[field[0] >> 4], (field[0] & 0xf) + 'A',
306 + field[1], field[2], field[3], imgsiz*4, filename);
307 + fd = open(filename, O_CREAT | O_RDWR | O_TRUNC, 0644);
308 + if (fd >= 0) {
309 + for (i=0; i<imgsiz; i++) {
310 + *(unsigned*)field = bigendian ?
311 + to_be32(arrayptr[i]) :
312 + to_le32(arrayptr[i]);
313 + write(fd, field, sizeof(field));
314 + }
315 + close(fd);
316 + unlink(slnk);
317 + symlink(filename, slnk);
318 + } else {
319 + perror(filename);
320 + }
321 + arrayptr += imgsiz;
322 + }
323 + close(fd);
324 + return 0;
325 +}
326 diff -Naur linux-2.6.19.orig/Documentation/networking/ixp4xx/mc_grab.c linux-2.6.19/Documentation/networking/ixp4xx/mc_grab.c
327 --- linux-2.6.19.orig/Documentation/networking/ixp4xx/mc_grab.c 1969-12-31 17:00:00.000000000 -0700
328 +++ linux-2.6.19/Documentation/networking/ixp4xx/mc_grab.c 2007-01-12 21:54:40.000000000 -0700
329 @@ -0,0 +1,97 @@
330 +/*
331 + * mc_grab.c - grabs IXP4XX microcode from a binary datastream
332 + * e.g. The redboot bootloader....
333 + *
334 + * usage: mc_grab 1010200 2010200 < /dev/mtd/0 > /dev/misc/npe
335 + *
336 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
337 + *
338 + * This file is released under the GPLv2
339 + */
340 +
341 +
342 +#include <stdlib.h>
343 +#include <stdio.h>
344 +#include <unistd.h>
345 +#include <netinet/in.h>
346 +#include <sys/types.h>
347 +#include <sys/stat.h>
348 +#include <fcntl.h>
349 +#include <errno.h>
350 +#include <string.h>
351 +
352 +#define MAX_IMG 6
353 +
354 +static void print_mc_info(unsigned id, int siz)
355 +{
356 + unsigned char buf[sizeof(unsigned)];
357 + *(unsigned*)buf = id;
358 + unsigned idx;
359 + const char *names[] = { "IXP425", "IXP465", "unknown" };
360 +
361 + idx = (buf[0] >> 4) < 2 ? (buf[0] >> 4) : 2;
362 +
363 + fprintf(stderr, "Device: %s:NPE_%c Func: %2x Rev: %02x.%02x "
364 + "Size: %5d bytes ID:%08x\n", names[idx], (buf[0] & 0xf)+'A',
365 + buf[1], buf[2], buf[3], siz*4, ntohl(id));
366 +}
367 +
368 +int main(int argc, char *argv[])
369 +{
370 + int i,j;
371 + unsigned char buf[sizeof(unsigned)];
372 + unsigned magic = htonl(0xfeedf00d);
373 + unsigned id, my_ids[MAX_IMG+1], siz, sizbe;
374 + int ret=1, verbose=0;
375 +
376 + for (i=0, j=0; i<argc-1 && j<MAX_IMG; i++) {
377 + if (!strcmp(argv[i+1], "-v"))
378 + verbose = 1;
379 + else
380 + my_ids[j++] = htonl(strtoul(argv[i+1], NULL, 16));
381 + }
382 + my_ids[j] = 0;
383 + if (my_ids[0] == 0 && !verbose) {
384 + fprintf(stderr, "Usage: %s <-v> [ID1] [ID2] [IDn]\n", argv[0]);
385 + return 1;
386 + }
387 +
388 + while ((ret=read(0, buf, sizeof(unsigned))) == sizeof(unsigned)) {
389 + if (*(unsigned*)buf != magic)
390 + continue;
391 + if ((ret=read(0, buf, sizeof(unsigned))) != sizeof(unsigned) )
392 + break;
393 + id = *(unsigned*)buf;
394 +
395 + if (read(0, buf, sizeof(siz)) != sizeof(siz) )
396 + break;
397 + sizbe = *(unsigned*)buf;
398 + siz = ntohl(sizbe);
399 +
400 + if (verbose)
401 + print_mc_info(id, siz);
402 +
403 + for(i=0; my_ids[i]; i++)
404 + if (id == my_ids[i])
405 + break;
406 + if (!my_ids[i])
407 + continue;
408 +
409 + if (!verbose)
410 + print_mc_info(id, siz);
411 +
412 + write(1, &magic, sizeof(magic));
413 + write(1, &id, sizeof(id));
414 + write(1, &sizbe, sizeof(sizbe));
415 + for (i=0; i<siz; i++) {
416 + if (read(0, buf, sizeof(unsigned)) != sizeof(unsigned))
417 + break;
418 + write(1, buf, sizeof(unsigned));
419 + }
420 + if (i != siz)
421 + break;
422 + }
423 + if (ret)
424 + fprintf(stderr, "Error reading Microcode\n");
425 + return ret;
426 +}
427 diff -Naur linux-2.6.19.orig/Documentation/networking/ixp4xx/README linux-2.6.19/Documentation/networking/ixp4xx/README
428 --- linux-2.6.19.orig/Documentation/networking/ixp4xx/README 1969-12-31 17:00:00.000000000 -0700
429 +++ linux-2.6.19/Documentation/networking/ixp4xx/README 2007-01-12 21:54:40.000000000 -0700
430 @@ -0,0 +1,62 @@
431 +Informations about the Networking Driver using the IXP4XX CPU internal NPEs
432 +and Queue manager.
433 +
434 +If this driver is used, the IAL (Intel Access Library) must not be loaded.
435 +However, the IAL may be loaded, if this Modules are unloaded:
436 + ixp4xx_npe.ko, ixp4xx_qmgr.ko ixp4xx_mac.ko
437 +
438 +This also means that HW crypto accelleration does NOT work when using this
439 +driver, unless I have finished my crypto driver for NPE-C
440 +
441 +
442 +Adoption to your custom board:
443 +------------------------------
444 +use "arch/arm/mach-ixp4xx/ixdp425-setup.c" as template:
445 +
446 +in "static struct mac_plat_info" adopt the entry "phy_id" to your needs
447 +(Ask your hardware designer about the PHY id)
448 +
449 +The order of "&mac0" and "&mac1" in the "struct platform_device"
450 +determines which of them becomes eth0 and eth1
451 +
452 +
453 +The Microcode:
454 +---------------
455 +Solution 1)
456 + Configure "CONFIG_HOTPLUG" and "CONFIG_FW_LOADER" and configure
457 + IXP4XX_NPE as module.
458 + The default hotplug script will load the Firmware from
459 + /usr/lib/hotplug/firmware/NPE-[ABC]
460 + see Documentation/firmware_class/hotplug-script
461 +
462 + You should take care, that $ACTION is "add" and $SUBSYSTEM is "firmware"
463 + to avoid unnessecary calls:
464 + test $ACTION = "remove" -o $SUBSYSTEM != "firmware" && exit
465 +
466 +Solution 2)
467 + create a char-dev: "mknod /dev/misc/npe c 10 184"
468 + cat the Microcode into it:
469 + cat /usr/lib/hotplug/firmware/NPE-* > /dev/misc/npe
470 + This also works if the driver is linked to the kernel
471 +
472 + Having a mix of both (e.g. solution 1 for NPE-B and solution 2 for NPE-C)
473 + is perfectly ok and works.
474 +
475 + The state of the NPEs can be seen and changed at:
476 + /sys/bus/platform/devices/ixp4xx_npe.X/state
477 +
478 +
479 +Obtaining the Microcode:
480 +------------------------
481 +1) IxNpeMicrocode.h in this directory:
482 + Download IPL_IXP400NPELIBRARYWITHCRYPTO-2_1.ZIP from Intel
483 + It unpacks the Microcode IxNpeMicrocode.c
484 + Read the Licence !
485 + Compile it with "gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode" on your host.
486 + The resulting images can be moved to "/usr/lib/hotplug/firmware"
487 +
488 +2) mc_grab.c in this directory:
489 + Compile and execute it either on the host or on the target
490 + to grab the microcode from a binary image like the RedBoot bootloader.
491 +
492 +
493 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/ixp4xx_crypto.c linux-2.6.19/drivers/net/ixp4xx/ixp4xx_crypto.c
494 --- linux-2.6.19.orig/drivers/net/ixp4xx/ixp4xx_crypto.c 1969-12-31 17:00:00.000000000 -0700
495 +++ linux-2.6.19/drivers/net/ixp4xx/ixp4xx_crypto.c 2007-01-12 21:54:40.000000000 -0700
496 @@ -0,0 +1,851 @@
497 +/*
498 + * ixp4xx_crypto.c - interface to the HW crypto
499 + *
500 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
501 + *
502 + * This file is released under the GPLv2
503 + */
504 +
505 +#include <linux/ixp_qmgr.h>
506 +#include <linux/ixp_npe.h>
507 +#include <linux/dma-mapping.h>
508 +#include <linux/dmapool.h>
509 +#include <linux/device.h>
510 +#include <linux/delay.h>
511 +#include <linux/slab.h>
512 +#include <linux/kernel.h>
513 +#include <linux/ixp_crypto.h>
514 +
515 +#define SEND_QID 29
516 +#define RECV_QID 30
517 +
518 +#define NPE_ID 2 /* NPE C */
519 +
520 +#define QUEUE_SIZE 64
521 +#define MY_VERSION "0.0.1"
522 +
523 +/* local head for all sa_ctx */
524 +static struct ix_sa_master sa_master;
525 +
526 +static const struct ix_hash_algo _hash_algos[] = {
527 +{
528 + .name = "MD5",
529 + .cfgword = 0xAA010004,
530 + .digest_len = 16,
531 + .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
532 + "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
533 + .type = HASH_TYPE_MD5,
534 +},{
535 + .name = "SHA1",
536 + .cfgword = 0x00000005,
537 + .digest_len = 20,
538 + .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
539 + "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
540 + .type = HASH_TYPE_SHA1,
541 +#if 0
542 +},{
543 + .name = "CBC MAC",
544 + .digest_len = 64,
545 + .aad_len = 48,
546 + .type = HASH_TYPE_CBCMAC,
547 +#endif
548 +} };
549 +
550 +static const struct ix_cipher_algo _cipher_algos[] = {
551 +{
552 + .name = "DES ECB",
553 + .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
554 + .cfgword_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
555 + .block_len = 8,
556 + .type = CIPHER_TYPE_DES,
557 + .mode = CIPHER_MODE_ECB,
558 +},{
559 + .name = "DES CBC",
560 + .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
561 + .cfgword_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
562 + .iv_len = 8,
563 + .block_len = 8,
564 + .type = CIPHER_TYPE_DES,
565 + .mode = CIPHER_MODE_CBC,
566 +},{
567 + .name = "3DES ECB",
568 + .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
569 + .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_ECB | KEYLEN_192,
570 + .block_len = 8,
571 + .type = CIPHER_TYPE_3DES,
572 + .mode = CIPHER_MODE_ECB,
573 +},{
574 + .name = "3DES CBC",
575 + .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_CBC_ENC | KEYLEN_192,
576 + .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_CBC_DEC | KEYLEN_192,
577 + .iv_len = 8,
578 + .block_len = 8,
579 + .type = CIPHER_TYPE_3DES,
580 + .mode = CIPHER_MODE_CBC,
581 +},{
582 + .name = "AES ECB",
583 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_ECB,
584 + .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_ECB,
585 + .block_len = 16,
586 + .type = CIPHER_TYPE_AES,
587 + .mode = CIPHER_MODE_ECB,
588 +},{
589 + .name = "AES CBC",
590 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CBC_ENC,
591 + .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_CBC_DEC,
592 + .block_len = 16,
593 + .iv_len = 16,
594 + .type = CIPHER_TYPE_AES,
595 + .mode = CIPHER_MODE_CBC,
596 +},{
597 + .name = "AES CTR",
598 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CTR,
599 + .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CTR,
600 + .block_len = 16,
601 + .iv_len = 16,
602 + .type = CIPHER_TYPE_AES,
603 + .mode = CIPHER_MODE_CTR,
604 +#if 0
605 +},{
606 + .name = "AES CCM",
607 + .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CCM_ENC,
608 + .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CCM_DEC,
609 + .block_len = 16,
610 + .iv_len = 16,
611 + .type = CIPHER_TYPE_AES,
612 + .mode = CIPHER_MODE_CCM,
613 +#endif
614 +} };
615 +
616 +const struct ix_hash_algo *ix_hash_by_id(int type)
617 +{
618 + int i;
619 +
620 + for(i=0; i<ARRAY_SIZE(_hash_algos); i++) {
621 + if (_hash_algos[i].type == type)
622 + return _hash_algos + i;
623 + }
624 + return NULL;
625 +}
626 +
627 +const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode)
628 +{
629 + int i;
630 +
631 + for(i=0; i<ARRAY_SIZE(_cipher_algos); i++) {
632 + if (_cipher_algos[i].type==type && _cipher_algos[i].mode==mode)
633 + return _cipher_algos + i;
634 + }
635 + return NULL;
636 +}
637 +
638 +static void irqcb_recv(struct qm_queue *queue);
639 +
640 +static int init_sa_master(struct ix_sa_master *master)
641 +{
642 + struct npe_info *npe;
643 + int ret = -ENODEV;
644 +
645 + if (! (ix_fuse() & (IX_FUSE_HASH | IX_FUSE_AES | IX_FUSE_DES))) {
646 + printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
647 + return ret;
648 + }
649 + memset(master, 0, sizeof(struct ix_sa_master));
650 + master->npe_dev = get_npe_by_id(NPE_ID);
651 + if (! master->npe_dev)
652 + goto err;
653 +
654 + npe = dev_get_drvdata(master->npe_dev);
655 +
656 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
657 + switch (npe->img_info[1]) {
658 + case 4:
659 + printk(KERN_INFO "Crypto AES avaialable\n");
660 + break;
661 + case 5:
662 + printk(KERN_INFO "Crypto AES and CCM avaialable\n");
663 + break;
664 + default:
665 + printk(KERN_WARNING "Current microcode for %s has no"
666 + " crypto capabilities\n", npe->plat->name);
667 + break;
668 + }
669 + }
670 + rwlock_init(&master->lock);
671 + master->dmapool = dma_pool_create("ixp4xx_crypto", master->npe_dev,
672 + sizeof(struct npe_crypt_cont), 32, 0);
673 + if (!master->dmapool) {
674 + ret = -ENOMEM;
675 + goto err;
676 + }
677 + master->sendq = request_queue(SEND_QID, QUEUE_SIZE);
678 + if (IS_ERR(master->sendq)) {
679 + printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
680 + SEND_QID);
681 + ret = PTR_ERR(master->sendq);
682 + goto err;
683 + }
684 + master->recvq = request_queue(RECV_QID, QUEUE_SIZE);
685 + if (IS_ERR(master->recvq)) {
686 + printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n",
687 + RECV_QID);
688 + ret = PTR_ERR(master->recvq);
689 + release_queue(master->sendq);
690 + goto err;
691 + }
692 +
693 + master->recvq->irq_cb = irqcb_recv;
694 + queue_set_watermarks(master->recvq, 0, 0);
695 + queue_set_irq_src(master->recvq, Q_IRQ_ID_NOT_E);
696 + queue_enable_irq(master->recvq);
697 + printk(KERN_INFO "ixp4xx_crypto " MY_VERSION " registered successfully\n");
698 +
699 + return 0;
700 +err:
701 + if (master->dmapool)
702 + dma_pool_destroy(master->dmapool);
703 + if (! master->npe_dev)
704 + put_device(master->npe_dev);
705 + return ret;
706 +
707 +}
708 +
709 +static void release_sa_master(struct ix_sa_master *master)
710 +{
711 + struct npe_crypt_cont *cont;
712 + unsigned long flags;
713 +
714 + write_lock_irqsave(&master->lock, flags);
715 + while (master->pool) {
716 + cont = master->pool;
717 + master->pool = cont->next;
718 + dma_pool_free(master->dmapool, cont, cont->phys);
719 + master->pool_size--;
720 + }
721 + write_unlock_irqrestore(&master->lock, flags);
722 + if (master->pool_size) {
723 + printk(KERN_ERR "ixp4xx_crypto: %d items lost from DMA pool\n",
724 + master->pool_size);
725 + }
726 +
727 + dma_pool_destroy(master->dmapool);
728 + release_queue(master->sendq);
729 + release_queue(master->recvq);
730 + return_npe_dev(master->npe_dev);
731 +}
732 +
733 +static struct npe_crypt_cont *ix_sa_get_cont(struct ix_sa_master *master)
734 +{
735 + unsigned long flags;
736 + struct npe_crypt_cont *cont;
737 + dma_addr_t handle;
738 +
739 + write_lock_irqsave(&master->lock, flags);
740 + if (!master->pool) {
741 + cont = dma_pool_alloc(master->dmapool, GFP_ATOMIC, &handle);
742 + if (cont) {
743 + master->pool_size++;
744 + cont->phys = handle;
745 + cont->virt = cont;
746 + }
747 + } else {
748 + cont = master->pool;
749 + master->pool = cont->next;
750 + }
751 + write_unlock_irqrestore(&master->lock, flags);
752 + return cont;
753 +}
754 +
755 +static void
756 +ix_sa_return_cont(struct ix_sa_master *master,struct npe_crypt_cont *cont)
757 +{
758 + unsigned long flags;
759 +
760 + write_lock_irqsave(&master->lock, flags);
761 + cont->next = master->pool;
762 + master->pool = cont;
763 + write_unlock_irqrestore(&master->lock, flags);
764 +}
765 +
766 +static void free_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
767 +{
768 + memset(dir->npe_ctx, 0, NPE_CTX_LEN);
769 + dma_pool_free(sa_ctx->master->dmapool, dir->npe_ctx,
770 + dir->npe_ctx_phys);
771 +}
772 +
773 +static void ix_sa_ctx_destroy(struct ix_sa_ctx *sa_ctx)
774 +{
775 + BUG_ON(sa_ctx->state != STATE_UNLOADING);
776 + free_sa_dir(sa_ctx, &sa_ctx->encrypt);
777 + free_sa_dir(sa_ctx, &sa_ctx->decrypt);
778 + kfree(sa_ctx);
779 + module_put(THIS_MODULE);
780 +}
781 +
782 +static void recv_pack(struct qm_queue *queue, u32 phys)
783 +{
784 + struct ix_sa_ctx *sa_ctx;
785 + struct npe_crypt_cont *cr_cont;
786 + struct npe_cont *cont;
787 + int failed;
788 +
789 + failed = phys & 0x1;
790 + phys &= ~0x3;
791 +
792 + cr_cont = dma_to_virt(queue->dev, phys);
793 + cr_cont = cr_cont->virt;
794 + sa_ctx = cr_cont->ctl.crypt.sa_ctx;
795 +
796 + phys = npe_to_cpu32(cr_cont->ctl.crypt.src_buf);
797 + if (phys) {
798 + cont = dma_to_virt(queue->dev, phys);
799 + cont = cont->virt;
800 + } else {
801 + cont = NULL;
802 + }
803 + if (cr_cont->ctl.crypt.oper_type == OP_PERFORM) {
804 + dma_unmap_single(sa_ctx->master->npe_dev,
805 + cont->eth.phys_addr,
806 + cont->eth.buf_len,
807 + DMA_BIDIRECTIONAL);
808 + if (sa_ctx->perf_cb)
809 + sa_ctx->perf_cb(sa_ctx, cont->data, failed);
810 + qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
811 + ix_sa_return_cont(sa_ctx->master, cr_cont);
812 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
813 + ix_sa_ctx_destroy(sa_ctx);
814 + return;
815 + }
816 +
817 + /* We are registering */
818 + switch (cr_cont->ctl.crypt.mode) {
819 + case NPE_OP_HASH_GEN_ICV:
820 + /* 1 out of 2 HMAC preparation operations completed */
821 + dma_unmap_single(sa_ctx->master->npe_dev,
822 + cont->eth.phys_addr,
823 + cont->eth.buf_len,
824 + DMA_TO_DEVICE);
825 + kfree(cont->data);
826 + qmgr_return_cont(dev_get_drvdata(queue->dev), cont);
827 + break;
828 + case NPE_OP_ENC_GEN_KEY:
829 + memcpy(sa_ctx->decrypt.npe_ctx + sizeof(u32),
830 + sa_ctx->rev_aes->ctl.rev_aes_key + sizeof(u32),
831 + sa_ctx->c_key.len);
832 + /* REV AES data not needed anymore, free it */
833 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
834 + sa_ctx->rev_aes = NULL;
835 + break;
836 + default:
837 + printk(KERN_ERR "Unknown crypt-register mode: %x\n",
838 + cr_cont->ctl.crypt.mode);
839 +
840 + }
841 + if (cr_cont->ctl.crypt.oper_type == OP_REG_DONE) {
842 + if (sa_ctx->state == STATE_UNREGISTERED)
843 + sa_ctx->state = STATE_REGISTERED;
844 + if (sa_ctx->reg_cb)
845 + sa_ctx->reg_cb(sa_ctx, failed);
846 + }
847 + ix_sa_return_cont(sa_ctx->master, cr_cont);
848 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
849 + ix_sa_ctx_destroy(sa_ctx);
850 +}
851 +
852 +static void irqcb_recv(struct qm_queue *queue)
853 +{
854 + u32 phys;
855 +
856 + queue_ack_irq(queue);
857 + while ((phys = queue_get_entry(queue)))
858 + recv_pack(queue, phys);
859 +}
860 +
861 +static int init_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir)
862 +{
863 + dir->npe_ctx = dma_pool_alloc(sa_ctx->master->dmapool,
864 + sa_ctx->gfp_flags, &dir->npe_ctx_phys);
865 + if (!dir->npe_ctx) {
866 + return 1;
867 + }
868 + memset(dir->npe_ctx, 0, NPE_CTX_LEN);
869 + return 0;
870 +}
871 +
872 +struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags)
873 +{
874 + struct ix_sa_ctx *sa_ctx;
875 + struct ix_sa_master *master = &sa_master;
876 + struct npe_info *npe = dev_get_drvdata(master->npe_dev);
877 +
878 + /* first check if Microcode was downloaded into this NPE */
879 + if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
880 + printk(KERN_ERR "%s not running\n", npe->plat->name);
881 + return NULL;
882 + }
883 + switch (npe->img_info[1]) {
884 + case 4:
885 + case 5:
886 + break;
887 + default:
888 + /* No crypto Microcode */
889 + return NULL;
890 + }
891 + if (!try_module_get(THIS_MODULE)) {
892 + return NULL;
893 + }
894 +
895 + sa_ctx = kzalloc(sizeof(struct ix_sa_ctx) + priv_len, flags);
896 + if (!sa_ctx) {
897 + goto err_put;
898 + }
899 +
900 + sa_ctx->master = master;
901 + sa_ctx->gfp_flags = flags;
902 +
903 + if (init_sa_dir(sa_ctx, &sa_ctx->encrypt))
904 + goto err_free;
905 + if (init_sa_dir(sa_ctx, &sa_ctx->decrypt)) {
906 + free_sa_dir(sa_ctx, &sa_ctx->encrypt);
907 + goto err_free;
908 + }
909 + if (priv_len)
910 + sa_ctx->priv = sa_ctx + 1;
911 +
912 + atomic_set(&sa_ctx->use_cnt, 1);
913 + return sa_ctx;
914 +
915 +err_free:
916 + kfree(sa_ctx);
917 +err_put:
918 + module_put(THIS_MODULE);
919 + return NULL;
920 +}
921 +
922 +void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx)
923 +{
924 + sa_ctx->state = STATE_UNLOADING;
925 + if (atomic_dec_and_test(&sa_ctx->use_cnt))
926 + ix_sa_ctx_destroy(sa_ctx);
927 + else
928 + printk("ix_sa_ctx_free -> delayed: %p %d\n",
929 + sa_ctx, atomic_read(&sa_ctx->use_cnt));
930 +}
931 +
932 +/* http://www.ietf.org/rfc/rfc2104.txt */
933 +#define HMAC_IPAD_VALUE 0x36
934 +#define HMAC_OPAD_VALUE 0x5C
935 +#define PAD_BLOCKLEN 64
936 +
937 +static int register_chain_var(struct ix_sa_ctx *sa_ctx,
938 + unsigned char *pad, u32 target, int init_len, u32 ctx_addr, int oper)
939 +{
940 + struct npe_crypt_cont *cr_cont;
941 + struct npe_cont *cont;
942 +
943 + cr_cont = ix_sa_get_cont(sa_ctx->master);
944 + if (!cr_cont)
945 + return -ENOMEM;
946 +
947 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
948 + cr_cont->ctl.crypt.auth_offs = 0;
949 + cr_cont->ctl.crypt.auth_len =cpu_to_npe16(PAD_BLOCKLEN);
950 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(ctx_addr);
951 +
952 + cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
953 + if (!cont) {
954 + ix_sa_return_cont(sa_ctx->master, cr_cont);
955 + return -ENOMEM;
956 + }
957 +
958 + cont->data = pad;
959 + cont->eth.next = 0;
960 + cont->eth.buf_len = cpu_to_npe16(PAD_BLOCKLEN);
961 + cont->eth.pkt_len = 0;
962 +
963 + cont->eth.phys_addr = cpu_to_npe32(dma_map_single(
964 + sa_ctx->master->npe_dev, pad, PAD_BLOCKLEN, DMA_TO_DEVICE));
965 +
966 + cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);
967 + cr_cont->ctl.crypt.oper_type = oper;
968 +
969 + cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(target);
970 + cr_cont->ctl.crypt.mode = NPE_OP_HASH_GEN_ICV;
971 + cr_cont->ctl.crypt.init_len = init_len;
972 +
973 + atomic_inc(&sa_ctx->use_cnt);
974 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
975 + if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */
976 + atomic_dec(&sa_ctx->use_cnt);
977 + qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev),
978 + cont);
979 + ix_sa_return_cont(sa_ctx->master, cr_cont);
980 + return -ENOMEM;
981 + }
982 + return 0;
983 +}
984 +
985 +/* Return value
986 + * 0 if nothing registered,
987 + * 1 if something registered and
988 + * < 0 on error
989 + */
990 +static int ix_sa_ctx_setup_auth(struct ix_sa_ctx *sa_ctx,
991 + const struct ix_hash_algo *algo, int len, int oper, int encrypt)
992 +{
993 + unsigned char *ipad, *opad;
994 + u32 itarget, otarget, ctx_addr;
995 + unsigned char *cinfo;
996 + int init_len, i, ret = 0;
997 + struct qm_qmgr *qmgr;
998 + struct ix_sa_dir *dir;
999 + u32 cfgword;
1000 +
1001 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1002 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1003 +
1004 + qmgr = dev_get_drvdata(sa_ctx->master->sendq->dev);
1005 +
1006 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1007 + sa_ctx->h_algo = algo;
1008 +
1009 + if (!algo) {
1010 + dir->npe_mode |= NPE_OP_HMAC_DISABLE;
1011 + return 0;
1012 + }
1013 + if (algo->type == HASH_TYPE_CBCMAC) {
1014 + dir->npe_mode |= NPE_OP_CCM_ENABLE | NPE_OP_HMAC_DISABLE;
1015 + return 0;
1016 + }
1017 + if (sa_ctx->h_key.len > 64 || sa_ctx->h_key.len < algo->digest_len)
1018 + return -EINVAL;
1019 + if (len > algo->digest_len || (len % 4))
1020 + return -EINVAL;
1021 + if (!len)
1022 + len = algo->digest_len;
1023 +
1024 + sa_ctx->digest_len = len;
1025 +
1026 + /* write cfg word to cryptinfo */
1027 + cfgword = algo->cfgword | ((len/4) << 8);
1028 + *(u32*)cinfo = cpu_to_be32(cfgword);
1029 + cinfo += sizeof(cfgword);
1030 +
1031 + /* write ICV to cryptinfo */
1032 + memcpy(cinfo, algo->icv, algo->digest_len);
1033 + cinfo += algo->digest_len;
1034 +
1035 + itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
1036 + + sizeof(algo->cfgword);
1037 + otarget = itarget + algo->digest_len;
1038 +
1039 + opad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA);
1040 + if (!opad) {
1041 + return -ENOMEM;
1042 + }
1043 + ipad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA);
1044 + if (!ipad) {
1045 + kfree(opad);
1046 + return -ENOMEM;
1047 + }
1048 + memcpy(ipad, sa_ctx->h_key.key, sa_ctx->h_key.len);
1049 + memcpy(opad, sa_ctx->h_key.key, sa_ctx->h_key.len);
1050 + for (i = 0; i < PAD_BLOCKLEN; i++) {
1051 + ipad[i] ^= HMAC_IPAD_VALUE;
1052 + opad[i] ^= HMAC_OPAD_VALUE;
1053 + }
1054 + init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
1055 + ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
1056 +
1057 + dir->npe_ctx_idx += init_len;
1058 + dir->npe_mode |= NPE_OP_HASH_ENABLE;
1059 +
1060 + if (!encrypt)
1061 + dir->npe_mode |= NPE_OP_HASH_VERIFY;
1062 +
1063 + /* register first chainvar */
1064 + ret = register_chain_var(sa_ctx, opad, otarget,
1065 + init_len, ctx_addr, OP_REGISTER);
1066 + if (ret) {
1067 + kfree(ipad);
1068 + kfree(opad);
1069 + return ret;
1070 + }
1071 +
1072 + /* register second chainvar */
1073 + ret = register_chain_var(sa_ctx, ipad, itarget,
1074 + init_len, ctx_addr, oper);
1075 + if (ret) {
1076 + kfree(ipad);
1077 + return ret;
1078 + }
1079 +
1080 + return 1;
1081 +}
1082 +
1083 +static int gen_rev_aes_key(struct ix_sa_ctx *sa_ctx,
1084 + u32 keylen_cfg, int cipher_op)
1085 +{
1086 + unsigned char *cinfo;
1087 + struct npe_crypt_cont *cr_cont;
1088 +
1089 + keylen_cfg |= CIPH_ENCR | ALGO_AES | MOD_ECB;
1090 + sa_ctx->rev_aes = ix_sa_get_cont(sa_ctx->master);
1091 + if (!sa_ctx->rev_aes)
1092 + return -ENOMEM;
1093 +
1094 + cinfo = sa_ctx->rev_aes->ctl.rev_aes_key;
1095 + *(u32*)cinfo = cpu_to_be32(keylen_cfg);
1096 + cinfo += sizeof(keylen_cfg);
1097 +
1098 + memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len);
1099 +
1100 + cr_cont = ix_sa_get_cont(sa_ctx->master);
1101 + if (!cr_cont) {
1102 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
1103 + sa_ctx->rev_aes = NULL;
1104 + return -ENOMEM;
1105 + }
1106 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1107 + cr_cont->ctl.crypt.oper_type = cipher_op;
1108 +
1109 + cr_cont->ctl.crypt.crypt_offs = 0;
1110 + cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(AES_BLOCK128);
1111 + cr_cont->ctl.crypt.addr.rev_aes = cpu_to_npe32(
1112 + sa_ctx->rev_aes->phys + sizeof(keylen_cfg));
1113 +
1114 + cr_cont->ctl.crypt.src_buf = 0;
1115 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(sa_ctx->rev_aes->phys);
1116 + cr_cont->ctl.crypt.mode = NPE_OP_ENC_GEN_KEY;
1117 + cr_cont->ctl.crypt.init_len = sa_ctx->decrypt.npe_ctx_idx;
1118 +
1119 + atomic_inc(&sa_ctx->use_cnt);
1120 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1121 + if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */
1122 + atomic_dec(&sa_ctx->use_cnt);
1123 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1124 + ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes);
1125 + sa_ctx->rev_aes = NULL;
1126 + return -ENOMEM;
1127 + }
1128 +
1129 + return 1;
1130 +}
1131 +
1132 +/* Return value
1133 + * 0 if nothing registered,
1134 + * 1 if something registered and
1135 + * < 0 on error
1136 + */
1137 +static int ix_sa_ctx_setup_cipher(struct ix_sa_ctx *sa_ctx,
1138 + const struct ix_cipher_algo *algo, int cipher_op, int encrypt)
1139 +{
1140 + unsigned char *cinfo;
1141 + int keylen, init_len;
1142 + u32 cipher_cfg;
1143 + u32 keylen_cfg = 0;
1144 + struct ix_sa_dir *dir;
1145 +
1146 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1147 + cinfo = dir->npe_ctx + dir->npe_ctx_idx;
1148 +
1149 + sa_ctx->c_algo = algo;
1150 +
1151 + if (!algo)
1152 + return 0;
1153 +
1154 + if (algo->type == CIPHER_TYPE_DES && sa_ctx->c_key.len != 8)
1155 + return -EINVAL;
1156 +
1157 + if (algo->type == CIPHER_TYPE_3DES && sa_ctx->c_key.len != 24)
1158 + return -EINVAL;
1159 +
1160 + keylen = 24;
1161 +
1162 + if (encrypt) {
1163 + cipher_cfg = algo->cfgword_enc;
1164 + dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
1165 + } else {
1166 + cipher_cfg = algo->cfgword_dec;
1167 + }
1168 + if (algo->type == CIPHER_TYPE_AES) {
1169 + switch (sa_ctx->c_key.len) {
1170 + case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
1171 + case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
1172 + case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
1173 + default: return -EINVAL;
1174 + }
1175 + keylen = sa_ctx->c_key.len;
1176 + cipher_cfg |= keylen_cfg;
1177 + }
1178 +
1179 + /* write cfg word to cryptinfo */
1180 + *(u32*)cinfo = cpu_to_be32(cipher_cfg);
1181 + cinfo += sizeof(cipher_cfg);
1182 +
1183 + /* write cipher key to cryptinfo */
1184 + memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len);
1185 + cinfo += keylen;
1186 +
1187 + init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
1188 + dir->npe_ctx_idx += init_len;
1189 +
1190 + dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
1191 +
1192 + if (algo->type == CIPHER_TYPE_AES && !encrypt) {
1193 + return gen_rev_aes_key(sa_ctx, keylen_cfg, cipher_op);
1194 + }
1195 +
1196 + return 0;
1197 +}
1198 +
1199 +/* returns 0 on OK, <0 on error and 1 on overflow */
1200 +int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
1201 + int datalen, int c_offs, int c_len, int a_offs, int a_len,
1202 + int hmac, char *iv, int encrypt)
1203 +{
1204 + struct npe_crypt_cont *cr_cont;
1205 + struct npe_cont *cont;
1206 + u32 data_phys;
1207 + int ret = -ENOMEM;
1208 + struct ix_sa_dir *dir;
1209 +
1210 + dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;
1211 +
1212 + if (sa_ctx->state != STATE_REGISTERED)
1213 + return -ENOENT;
1214 +
1215 + cr_cont = ix_sa_get_cont(sa_ctx->master);
1216 + if (!cr_cont)
1217 + return ret;
1218 +
1219 + cr_cont->ctl.crypt.sa_ctx = sa_ctx;
1220 + cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(dir->npe_ctx_phys);
1221 + cr_cont->ctl.crypt.oper_type = OP_PERFORM;
1222 + cr_cont->ctl.crypt.mode = dir->npe_mode;
1223 + cr_cont->ctl.crypt.init_len = dir->npe_ctx_idx;
1224 +
1225 + if (sa_ctx->c_algo) {
1226 + cr_cont->ctl.crypt.crypt_offs = cpu_to_npe16(c_offs);
1227 + cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(c_len);
1228 + if (sa_ctx->c_algo->iv_len) {
1229 + if (!iv) {
1230 + ret = -EINVAL;
1231 + goto err_cr;
1232 + }
1233 + memcpy(cr_cont->ctl.crypt.iv, iv,
1234 + sa_ctx->c_algo->iv_len);
1235 + }
1236 + }
1237 +
1238 + if (sa_ctx->h_algo) {
1239 + /* prepare hashing */
1240 + cr_cont->ctl.crypt.auth_offs = cpu_to_npe16(a_offs);
1241 + cr_cont->ctl.crypt.auth_len = cpu_to_npe16(a_len);
1242 + }
1243 +
1244 + data_phys = dma_map_single(sa_ctx->master->npe_dev,
1245 + data, datalen, DMA_BIDIRECTIONAL);
1246 + if (hmac)
1247 + cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(data_phys + hmac);
1248 +
1249 + /* Prepare the data ptr */
1250 + cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
1251 + if (!cont) {
1252 + goto err_unmap;
1253 + }
1254 +
1255 + cont->data = ptr;
1256 + cont->eth.next = 0;
1257 + cont->eth.buf_len = cpu_to_npe16(datalen);
1258 + cont->eth.pkt_len = 0;
1259 +
1260 + cont->eth.phys_addr = cpu_to_npe32(data_phys);
1261 + cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);
1262 +
1263 + atomic_inc(&sa_ctx->use_cnt);
1264 + queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
1265 + if (queue_stat(sa_ctx->master->sendq) != 2) {
1266 + return 0;
1267 + }
1268 +
1269 + /* overflow */
1270 + printk("%s: Overflow\n", __FUNCTION__);
1271 + ret = -EAGAIN;
1272 + atomic_dec(&sa_ctx->use_cnt);
1273 + qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev), cont);
1274 +
1275 +err_unmap:
1276 + dma_unmap_single(sa_ctx->master->npe_dev, data_phys, datalen,
1277 + DMA_BIDIRECTIONAL);
1278 +err_cr:
1279 + ix_sa_return_cont(sa_ctx->master, cr_cont);
1280 +
1281 + return ret;
1282 +}
1283 +
1284 +int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx,
1285 + const struct ix_cipher_algo *cipher,
1286 + const struct ix_hash_algo *auth, int len)
1287 +{
1288 + int ret = 0, sum = 0;
1289 + int cipher_op;
1290 +
1291 + if (sa_ctx->state != STATE_UNREGISTERED)
1292 + return -ENOENT;
1293 +
1294 + atomic_inc(&sa_ctx->use_cnt);
1295 +
1296 + cipher_op = auth ? OP_REGISTER : OP_REG_DONE;
1297 + if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, OP_REGISTER, 1)) < 0)
1298 + goto out;
1299 + sum += ret;
1300 + if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, cipher_op, 0)) < 0)
1301 + goto out;
1302 + sum += ret;
1303 + if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REGISTER, 1)) < 0)
1304 + goto out;
1305 + sum += ret;
1306 + if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REG_DONE, 0)) < 0)
1307 + goto out;
1308 + sum += ret;
1309 +
1310 + /* Nothing registered ?
1311 + * Ok, then we are done and call the callback here.
1312 + */
1313 + if (!sum) {
1314 + if (sa_ctx->state == STATE_UNREGISTERED)
1315 + sa_ctx->state = STATE_REGISTERED;
1316 + if (sa_ctx->reg_cb)
1317 + sa_ctx->reg_cb(sa_ctx, 0);
1318 + }
1319 +out:
1320 + atomic_dec(&sa_ctx->use_cnt);
1321 + return ret;
1322 +}
1323 +
1324 +static int __init init_crypto(void)
1325 +{
1326 + return init_sa_master(&sa_master);
1327 +}
1328 +
1329 +static void __exit finish_crypto(void)
1330 +{
1331 + release_sa_master(&sa_master);
1332 +}
1333 +
1334 +MODULE_LICENSE("GPL");
1335 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1336 +
1337 +EXPORT_SYMBOL(ix_hash_by_id);
1338 +EXPORT_SYMBOL(ix_cipher_by_id);
1339 +
1340 +EXPORT_SYMBOL(ix_sa_ctx_new);
1341 +EXPORT_SYMBOL(ix_sa_ctx_free);
1342 +EXPORT_SYMBOL(ix_sa_ctx_setup_cipher_auth);
1343 +EXPORT_SYMBOL(ix_sa_crypto_perform);
1344 +
1345 +module_init(init_crypto);
1346 +module_exit(finish_crypto);
1347 +
1348 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/ixp4xx_qmgr.c linux-2.6.19/drivers/net/ixp4xx/ixp4xx_qmgr.c
1349 --- linux-2.6.19.orig/drivers/net/ixp4xx/ixp4xx_qmgr.c 1969-12-31 17:00:00.000000000 -0700
1350 +++ linux-2.6.19/drivers/net/ixp4xx/ixp4xx_qmgr.c 2007-01-12 21:54:40.000000000 -0700
1351 @@ -0,0 +1,474 @@
1352 +/*
1353 + * qmgr.c - reimplementation of the queue configuration interface.
1354 + *
1355 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1356 + *
1357 + * This file is released under the GPLv2
1358 + */
1359 +
1360 +#include <linux/kernel.h>
1361 +#include <linux/module.h>
1362 +#include <linux/platform_device.h>
1363 +#include <linux/fs.h>
1364 +#include <linux/init.h>
1365 +#include <linux/slab.h>
1366 +#include <linux/dmapool.h>
1367 +#include <linux/interrupt.h>
1368 +#include <linux/err.h>
1369 +#include <linux/delay.h>
1370 +#include <asm/uaccess.h>
1371 +#include <asm/io.h>
1372 +
1373 +#include <linux/ixp_qmgr.h>
1374 +#include <linux/ixp_npe.h>
1375 +
1376 +#define IXQMGR_VERSION "IXP4XX Q Manager 0.2.1"
1377 +
1378 +static struct device *qmgr_dev = NULL;
1379 +
1380 +static int poll_freq = 4000;
1381 +static int poll_enable = 0;
1382 +static u32 timer_countup_ticks;
1383 +
1384 +module_param(poll_freq, int, 0644);
1385 +module_param(poll_enable, int, 0644);
1386 +
1387 +int queue_len(struct qm_queue *queue)
1388 +{
1389 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1390 + int diff, offs;
1391 + u32 val;
1392 +
1393 + offs = queue->id/8 + QUE_LOW_STAT0;
1394 + val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
1395 +
1396 + diff = (val - (val >> 7)) & 0x7f;
1397 + if (!diff) {
1398 + /* diff == 0 means either empty or full, must look at STAT0 */
1399 + if ((*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x04)
1400 + diff = queue->len;
1401 + }
1402 + return diff;
1403 +}
1404 +
1405 +static int request_pool(struct device *dev, int count)
1406 +{
1407 + int i;
1408 + struct npe_cont *cont;
1409 + struct qm_qmgr *qmgr = dev_get_drvdata(dev);
1410 + dma_addr_t handle;
1411 +
1412 + for (i=0; i<count; i++) {
1413 + cont = dma_pool_alloc(qmgr->dmapool, GFP_KERNEL, &handle);
1414 + if (!cont) {
1415 + return -ENOMEM;
1416 + }
1417 + cont->phys = handle;
1418 + cont->virt = cont;
1419 + write_lock(&qmgr->lock);
1420 + cont->next = qmgr->pool;
1421 + qmgr->pool = cont;
1422 + write_unlock(&qmgr->lock);
1423 + }
1424 + return 0;
1425 +}
1426 +
1427 +static int free_pool(struct device *dev, int count)
1428 +{
1429 + int i;
1430 + struct npe_cont *cont;
1431 + struct qm_qmgr *qmgr = dev_get_drvdata(dev);
1432 +
1433 + for (i=0; i<count; i++) {
1434 + write_lock(&qmgr->lock);
1435 + cont = qmgr->pool;
1436 + if (!cont) {
1437 + write_unlock(&qmgr->lock);
1438 + return -1;
1439 + }
1440 + qmgr->pool = cont->next;
1441 + write_unlock(&qmgr->lock);
1442 + dma_pool_free(qmgr->dmapool, cont, cont->phys);
1443 + }
1444 + return 0;
1445 +}
1446 +
1447 +static int get_free_qspace(struct qm_qmgr *qmgr, int len)
1448 +{
1449 + int words = (qmgr->res->end - qmgr->res->start + 1) / 4 -
1450 + IX_QMGR_SRAM_SPACE;
1451 + int i,q;
1452 +
1453 + for (i=0; i<words; i+=len) {
1454 + for (q=0; q<MAX_QUEUES; q++) {
1455 + struct qm_queue *qu = qmgr->queues[q];
1456 + if (!qu)
1457 + continue;
1458 + if ((qu->addr + qu->len > i) && (qu->addr < i + len))
1459 + break;
1460 + }
1461 + if (q == MAX_QUEUES) {
1462 + /* we have a free address */
1463 + return i;
1464 + }
1465 + }
1466 + return -1;
1467 +}
1468 +
1469 +static inline int _log2(int x)
1470 +{
1471 + int r=0;
1472 + while(x>>=1)
1473 + r++;
1474 + return r;
1475 +}
1476 +
1477 +/*
1478 + * 32bit Config registers at IX_QMGR_QUECONFIG_BASE_OFFSET[Qid]
1479 + * 0 - 6 WRPTR Word offset to baseaddr (index 0 .. BSIZE-1)
1480 + * 7 -13 RDPTR ''
1481 + * 14 -21 BADDR baseaddr = (offset to IX_QMGR_QUEBUFFER_SPACE_OFFSET) >> 6
1482 + * 22 -23 ESIZE entrySizeInWords (always 00 because entrySizeInWords==1)
1483 + * 24 -25 BSIZE qSizeInWords 00=16,01=32,10=64,11=128
1484 + * 26 -28 NE nearly empty
1485 + * 29 -31 NF nearly full
1486 + */
1487 +static int conf_q_regs(struct qm_queue *queue)
1488 +{
1489 + int bsize = _log2(queue->len/16);
1490 + int baddr = queue->addr + IX_QMGR_QCFG_SIZE;
1491 +
1492 + /* +2, because baddr is in words and not in bytes */
1493 + queue_write_cfg_reg(queue, (bsize << 24) | (baddr<<(14-6+2)) );
1494 +
1495 + return 0;
1496 +}
1497 +
1498 +static void pmu_timer_restart(void)
1499 +{
1500 + unsigned long flags;
1501 +
1502 + local_irq_save(flags);
1503 +
1504 + __asm__(" mcr p14,0,%0,c1,c1,0\n" /* write current counter */
1505 + : : "r" (timer_countup_ticks));
1506 +
1507 + __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */
1508 + " orr r1,r1,#1; "
1509 + " mcr p14,0,r1,c5,c1,0; " /* clear overflow */
1510 + " mcr p14,0,r1,c4,c1,0\n" /* enable interrupts */
1511 + : : : "r1");
1512 +
1513 + local_irq_restore(flags);
1514 +}
1515 +
1516 +static void pmu_timer_init(void)
1517 +{
1518 + u32 controlRegisterMask =
1519 + BIT(0) | /* enable counters */
1520 + BIT(2); /* reset clock counter; */
1521 +
1522 + /*
1523 + * Compute the number of xscale cycles needed between each
1524 + * PMU IRQ. This is done from the result of an OS calibration loop.
1525 + *
1526 + * For 533MHz CPU, 533000000 tick/s / 4000 times/sec = 138250
1527 + * 4000 times/sec = 37 mbufs/interrupt at line rate
1528 + * The pmu timer is reset to -138250 = 0xfffde3f6, to trigger an IRQ
1529 + * when this up counter overflows.
1530 + *
1531 + * The multiplication gives a number of instructions per second.
1532 + * which is close to the processor frequency, and then close to the
1533 + * PMU clock rate.
1534 + *
1535 + * 2 is the number of instructions per loop
1536 + *
1537 + */
1538 +
1539 + timer_countup_ticks = - ((loops_per_jiffy * HZ * 2) / poll_freq);
1540 +
1541 + /* enable the CCNT (clock count) timer from the PMU */
1542 + __asm__(" mcr p14,0,%0,c0,c1,0\n"
1543 + : : "r" (controlRegisterMask));
1544 +}
1545 +
1546 +static void pmu_timer_disable(void)
1547 +{
1548 + unsigned long flags;
1549 +
1550 + local_irq_save(flags);
1551 +
1552 + __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */
1553 + " and r1,r1,#0x1e; "
1554 + " mcr p14,0,r1,c4,c1,0\n" /* disable interrupts */
1555 + : : : "r1");
1556 + local_irq_restore(flags);
1557 +}
1558 +
1559 +void queue_set_watermarks(struct qm_queue *queue, unsigned ne, unsigned nf)
1560 +{
1561 + u32 val;
1562 + /* calculate the register values
1563 + * 0->0, 1->1, 2->2, 4->3, 8->4 16->5...*/
1564 + ne = _log2(ne<<1) & 0x7;
1565 + nf = _log2(nf<<1) & 0x7;
1566 +
1567 + /* Mask out old watermarks */
1568 + val = queue_read_cfg_reg(queue) & ~0xfc000000;
1569 + queue_write_cfg_reg(queue, val | (ne << 26) | (nf << 29));
1570 +}
1571 +
1572 +int queue_set_irq_src(struct qm_queue *queue, int flag)
1573 +{
1574 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1575 + u32 reg;
1576 + int offs, bitoffs;
1577 +
1578 + /* Q 0-7 are in REG0, 8-15 are in REG1, etc. They occupy 4 bits/Q */
1579 + offs = queue->id/8 + INT0_SRC_SELREG0;
1580 + bitoffs = (queue->id % 8)*4;
1581 +
1582 + reg = *(qmgr->addr + offs) & ~(0xf << bitoffs);
1583 + *(qmgr->addr + offs) = reg | (flag << bitoffs);
1584 +
1585 + return 0;
1586 +}
1587 +
1588 +static irqreturn_t irq_qm1(int irq, void *dev_id)
1589 +{
1590 + struct qm_qmgr *qmgr = dev_id;
1591 + int offs, reg;
1592 + struct qm_queue *queue;
1593 +
1594 + if (poll_enable)
1595 + pmu_timer_restart();
1596 +
1597 + reg = *(qmgr->addr + QUE_INT_REG0);
1598 + while(reg) {
1599 + /*
1600 + * count leading zeros. "offs" gets
1601 + * the amount of leading 0 in "reg"
1602 + */
1603 + asm ("clz %0, %1;" : "=r"(offs) : "r"(reg));
1604 + offs = 31 - offs;
1605 + reg &= ~(1 << offs);
1606 + queue = qmgr->queues[offs];
1607 + if (likely(queue)) {
1608 + if (likely(queue->irq_cb)) {
1609 + queue->irq_cb(queue);
1610 + } else {
1611 + printk(KERN_ERR "Missing callback for Q %d\n",
1612 + offs);
1613 + }
1614 + } else {
1615 + printk(KERN_ERR "IRQ for unregistered Q %d\n", offs);
1616 + }
1617 + }
1618 + return IRQ_HANDLED;
1619 +}
1620 +
1621 +struct qm_queue *request_queue(int qid, int len)
1622 +{
1623 + int ram;
1624 + struct qm_qmgr *qmgr;
1625 + struct qm_queue *queue;
1626 +
1627 + if (!qmgr_dev)
1628 + return ERR_PTR(-ENODEV);
1629 +
1630 + if ((qid < 0) || (qid > MAX_QUEUES))
1631 + return ERR_PTR(-ERANGE);
1632 +
1633 + switch (len) {
1634 + case 16:
1635 + case 32:
1636 + case 64:
1637 + case 128: break;
1638 + default : return ERR_PTR(-EINVAL);
1639 + }
1640 +
1641 + qmgr = dev_get_drvdata(qmgr_dev);
1642 +
1643 + if (qmgr->queues[qid]) {
1644 + /* not an error, just in use already */
1645 + return NULL;
1646 + }
1647 + if ((ram = get_free_qspace(qmgr, len)) < 0) {
1648 + printk(KERN_ERR "No free SRAM space for this queue\n");
1649 + return ERR_PTR(-ENOMEM);
1650 + }
1651 + if (!(queue = kzalloc(sizeof(struct qm_queue), GFP_KERNEL)))
1652 + return ERR_PTR(-ENOMEM);
1653 +
1654 + if (!try_module_get(THIS_MODULE)) {
1655 + kfree(queue);
1656 + return ERR_PTR(-ENODEV);
1657 + }
1658 +
1659 + queue->addr = ram;
1660 + queue->len = len;
1661 + queue->id = qid;
1662 + queue->dev = get_device(qmgr_dev);
1663 + queue->acc_reg = qmgr->addr + (4 * qid);
1664 + qmgr->queues[qid] = queue;
1665 + if (request_pool(qmgr_dev, len)) {
1666 + printk(KERN_ERR "Failed to request DMA pool of Q %d\n", qid);
1667 + }
1668 +
1669 + conf_q_regs(queue);
1670 + return queue;
1671 +}
1672 +
1673 +void release_queue(struct qm_queue *queue)
1674 +{
1675 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
1676 +
1677 + BUG_ON(qmgr->queues[queue->id] != queue);
1678 + qmgr->queues[queue->id] = NULL;
1679 +
1680 + if (free_pool(queue->dev, queue->len)) {
1681 + printk(KERN_ERR "Failed to release DMA pool of Q %d\n",
1682 + queue->id);
1683 + }
1684 + queue_disable_irq(queue);
1685 + queue_write_cfg_reg(queue, 0);
1686 +
1687 + module_put(THIS_MODULE);
1688 + put_device(queue->dev);
1689 + kfree(queue);
1690 +}
1691 +
1692 +
1693 +
1694 +
1695 +static int qmgr_probe(struct platform_device *pdev)
1696 +{
1697 + struct resource *res;
1698 + struct qm_qmgr *qmgr;
1699 + int size, ret=0, i;
1700 +
1701 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
1702 + return -EIO;
1703 +
1704 + if ((i = platform_get_irq(pdev, 0)) < 0)
1705 + return -EIO;
1706 +
1707 + if (!(qmgr = kzalloc(sizeof(struct qm_qmgr), GFP_KERNEL)))
1708 + return -ENOMEM;
1709 +
1710 + qmgr->irq = i;
1711 + size = res->end - res->start +1;
1712 + qmgr->res = request_mem_region(res->start, size, "ixp_qmgr");
1713 + if (!qmgr->res) {
1714 + ret = -EBUSY;
1715 + goto out_free;
1716 + }
1717 +
1718 + qmgr->addr = ioremap(res->start, size);
1719 + if (!qmgr->addr) {
1720 + ret = -ENOMEM;
1721 + goto out_rel;
1722 + }
1723 +
1724 + /* Reset Q registers */
1725 + for (i=0; i<4; i++)
1726 + *(qmgr->addr + QUE_LOW_STAT0 +i) = 0x33333333;
1727 + for (i=0; i<10; i++)
1728 + *(qmgr->addr + QUE_UO_STAT0 +i) = 0x0;
1729 + for (i=0; i<4; i++)
1730 + *(qmgr->addr + INT0_SRC_SELREG0 +i) = 0x0;
1731 + for (i=0; i<2; i++) {
1732 + *(qmgr->addr + QUE_IE_REG0 +i) = 0x00;
1733 + *(qmgr->addr + QUE_INT_REG0 +i) = 0xffffffff;
1734 + }
1735 + for (i=0; i<64; i++) {
1736 + *(qmgr->addr + IX_QMGR_QCFG_BASE + i) = 0x0;
1737 + }
1738 +
1739 + if (poll_enable) {
1740 + pmu_timer_init();
1741 + qmgr->irq = IRQ_IXP4XX_XSCALE_PMU;
1742 + }
1743 + ret = request_irq(qmgr->irq, irq_qm1, SA_SHIRQ | SA_INTERRUPT,
1744 + "qmgr", qmgr);
1745 + if (ret) {
1746 + printk(KERN_ERR "Failed to request IRQ(%d)\n", qmgr->irq);
1747 + ret = -EIO;
1748 + goto out_rel;
1749 + }
1750 + if (poll_enable)
1751 + pmu_timer_restart();
1752 +
1753 + rwlock_init(&qmgr->lock);
1754 + qmgr->dmapool = dma_pool_create("qmgr", &pdev->dev,
1755 + sizeof(struct npe_cont), 32, 0);
1756 + platform_set_drvdata(pdev, qmgr);
1757 +
1758 + qmgr_dev = &pdev->dev;
1759 +
1760 + printk(KERN_INFO IXQMGR_VERSION " initialized.\n");
1761 +
1762 + return 0;
1763 +
1764 +out_rel:
1765 + release_resource(qmgr->res);
1766 +out_free:
1767 + kfree(qmgr);
1768 + return ret;
1769 +}
1770 +
1771 +static int qmgr_remove(struct platform_device *pdev)
1772 +{
1773 + struct qm_qmgr *qmgr = platform_get_drvdata(pdev);
1774 + int i;
1775 +
1776 + for (i=0; i<MAX_QUEUES; i++) {
1777 + if (qmgr->queues[i]) {
1778 + printk(KERN_ERR "WARNING Unreleased Q: %d\n", i);
1779 + release_queue(qmgr->queues[i]);
1780 + }
1781 + }
1782 +
1783 + if (poll_enable)
1784 + pmu_timer_disable();
1785 +
1786 + synchronize_irq (qmgr->irq);
1787 + free_irq(qmgr->irq, qmgr);
1788 +
1789 + dma_pool_destroy(qmgr->dmapool);
1790 + iounmap(qmgr->addr);
1791 + release_resource(qmgr->res);
1792 + platform_set_drvdata(pdev, NULL);
1793 + qmgr_dev = NULL;
1794 + kfree(qmgr);
1795 + return 0;
1796 +}
1797 +
1798 +static struct platform_driver ixp4xx_qmgr = {
1799 + .driver.name = "ixp4xx_qmgr",
1800 + .probe = qmgr_probe,
1801 + .remove = qmgr_remove,
1802 +};
1803 +
1804 +
1805 +static int __init init_qmgr(void)
1806 +{
1807 + return platform_driver_register(&ixp4xx_qmgr);
1808 +}
1809 +
1810 +static void __exit finish_qmgr(void)
1811 +{
1812 + platform_driver_unregister(&ixp4xx_qmgr);
1813 +}
1814 +
1815 +module_init(init_qmgr);
1816 +module_exit(finish_qmgr);
1817 +
1818 +MODULE_LICENSE("GPL");
1819 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1820 +
1821 +EXPORT_SYMBOL(request_queue);
1822 +EXPORT_SYMBOL(release_queue);
1823 +EXPORT_SYMBOL(queue_set_irq_src);
1824 +EXPORT_SYMBOL(queue_set_watermarks);
1825 +EXPORT_SYMBOL(queue_len);
1826 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/Kconfig linux-2.6.19/drivers/net/ixp4xx/Kconfig
1827 --- linux-2.6.19.orig/drivers/net/ixp4xx/Kconfig 1969-12-31 17:00:00.000000000 -0700
1828 +++ linux-2.6.19/drivers/net/ixp4xx/Kconfig 2007-01-12 21:54:40.000000000 -0700
1829 @@ -0,0 +1,48 @@
1830 +config IXP4XX_QMGR
1831 + tristate "IXP4xx Queue Manager support"
1832 + depends on ARCH_IXP4XX
1833 + depends on NET_ETHERNET
1834 + help
1835 + The IXP4XX Queue manager is a configurable hardware ringbuffer.
1836 + It is used by the NPEs to exchange data from and to the CPU.
1837 + You can either use this OR the Intel Access Library (IAL)
1838 +
1839 +config IXP4XX_NPE
1840 + tristate "IXP4xx NPE support"
1841 + depends on ARCH_IXP4XX
1842 + depends on NET_ETHERNET
1843 + help
1844 + The IXP4XX NPE driver supports the 3 CPU co-processors called
1845 + "Network Processing Engines" (NPE). It adds support fo downloading
1846 + the Microcode (firmware) via Hotplug or character-special-device.
1847 + More about this at: Documentation/networking/ixp4xx/README.
1848 + You can either use this OR the Intel Access Library (IAL)
1849 +
1850 +config IXP4XX_FW_LOAD
1851 + bool "Use Firmware hotplug for Microcode download"
1852 + depends on IXP4XX_NPE
1853 + select HOTPLUG
1854 + select FW_LOADER
1855 + help
1856 + The default hotplug script will load the Firmware from
1857 + /usr/lib/hotplug/firmware/NPE-[ABC]
1858 + see Documentation/firmware_class/hotplug-script
1859 +
1860 +config IXP4XX_MAC
1861 + tristate "IXP4xx MAC support"
1862 + depends on IXP4XX_NPE
1863 + depends on IXP4XX_QMGR
1864 + depends on NET_ETHERNET
1865 + select MII
1866 + help
1867 + The IXP4XX MAC driver supports the MACs on the IXP4XX CPUs.
1868 + There are 2 on ixp425 and up to 5 on ixdp465.
1869 + You can either use this OR the Intel Access Library (IAL)
1870 +
1871 +config IXP4XX_CRYPTO
1872 + tristate "IXP4xx crypto support"
1873 + depends on IXP4XX_NPE
1874 + depends on IXP4XX_QMGR
1875 + help
1876 + This driver is a generic NPE-crypto access layer.
1877 + You need additional code in OCF for example.
1878 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/mac_driver.c linux-2.6.19/drivers/net/ixp4xx/mac_driver.c
1879 --- linux-2.6.19.orig/drivers/net/ixp4xx/mac_driver.c 1969-12-31 17:00:00.000000000 -0700
1880 +++ linux-2.6.19/drivers/net/ixp4xx/mac_driver.c 2007-01-12 21:54:40.000000000 -0700
1881 @@ -0,0 +1,849 @@
1882 +/*
1883 + * mac_driver.c - provide a network interface for each MAC
1884 + *
1885 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
1886 + *
1887 + * This file is released under the GPLv2
1888 + */
1889 +
1890 +#include <linux/kernel.h>
1891 +#include <linux/module.h>
1892 +#include <linux/platform_device.h>
1893 +#include <linux/netdevice.h>
1894 +#include <linux/etherdevice.h>
1895 +#include <linux/ethtool.h>
1896 +#include <linux/slab.h>
1897 +#include <linux/delay.h>
1898 +#include <linux/err.h>
1899 +#include <linux/dma-mapping.h>
1900 +#include <linux/workqueue.h>
1901 +#include <asm/io.h>
1902 +#include <asm/irq.h>
1903 +
1904 +
1905 +#include <linux/ixp_qmgr.h>
1906 +#include <linux/ixp_npe.h>
1907 +#include "mac.h"
1908 +
1909 +#define MDIO_INTERVAL (3*HZ)
1910 +#define RX_QUEUE_PREFILL 64
1911 +#define TX_QUEUE_PREFILL 16
1912 +
1913 +#define IXMAC_NAME "ixp4xx_mac"
1914 +#define IXMAC_VERSION "0.3.1"
1915 +
1916 +#define MAC_DEFAULT_REG(mac, name) \
1917 + mac_write_reg(mac, MAC_ ## name, MAC_ ## name ## _DEFAULT)
1918 +
1919 +#define TX_DONE_QID 31
1920 +
1921 +#define DMA_ALLOC_SIZE 2048
1922 +#define DMA_HDR_SIZE (sizeof(struct npe_cont))
1923 +#define DMA_BUF_SIZE (DMA_ALLOC_SIZE - DMA_HDR_SIZE)
1924 +
1925 +/* Since the NPEs use 1 Return Q for sent frames, we need a device
1926 + * independent return Q. We call it tx_doneq.
1927 + * It will be initialized during module load and uninitialized
1928 + * during module unload. Evil hack, but there is no choice :-(
1929 + */
1930 +
1931 +static struct qm_queue *tx_doneq = NULL;
1932 +static int debug = -1;
1933 +module_param(debug, int, 0);
1934 +
1935 +static int init_buffer(struct qm_queue *queue, int count)
1936 +{
1937 + int i;
1938 + struct npe_cont *cont;
1939 +
1940 + for (i=0; i<count; i++) {
1941 + cont = kmalloc(DMA_ALLOC_SIZE, GFP_KERNEL | GFP_DMA);
1942 + if (!cont)
1943 + goto err;
1944 +
1945 + cont->phys = dma_map_single(queue->dev, cont, DMA_ALLOC_SIZE,
1946 + DMA_BIDIRECTIONAL);
1947 + if (dma_mapping_error(cont->phys))
1948 + goto err;
1949 +
1950 + cont->data = cont+1;
1951 + /* now the buffer is on a 32 bit boundary.
1952 + * we add 2 bytes for good alignment to SKB */
1953 + cont->data+=2;
1954 + cont->eth.next = 0;
1955 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
1956 + cont->eth.pkt_len = 0;
1957 + /* also add 2 alignment bytes from cont->data*/
1958 + cont->eth.phys_addr = cpu_to_npe32(cont->phys+ DMA_HDR_SIZE+ 2);
1959 +
1960 + dma_sync_single(queue->dev, cont->phys, DMA_HDR_SIZE,
1961 + DMA_TO_DEVICE);
1962 +
1963 + queue_put_entry(queue, cont->phys);
1964 + if (queue_stat(queue) == 2) { /* overflow */
1965 + dma_unmap_single(queue->dev, cont->phys, DMA_ALLOC_SIZE,
1966 + DMA_BIDIRECTIONAL);
1967 + goto err;
1968 + }
1969 + }
1970 + return i;
1971 +err:
1972 + if (cont)
1973 + kfree(cont);
1974 + return i;
1975 +}
1976 +
1977 +static int destroy_buffer(struct qm_queue *queue, int count)
1978 +{
1979 + u32 phys;
1980 + int i;
1981 + struct npe_cont *cont;
1982 +
1983 + for (i=0; i<count; i++) {
1984 + phys = queue_get_entry(queue) & ~0xf;
1985 + if (!phys)
1986 + break;
1987 + dma_unmap_single(queue->dev, phys, DMA_ALLOC_SIZE,
1988 + DMA_BIDIRECTIONAL);
1989 + cont = dma_to_virt(queue->dev, phys);
1990 + kfree(cont);
1991 + }
1992 + return i;
1993 +}
1994 +
1995 +static void mac_init(struct mac_info *mac)
1996 +{
1997 + MAC_DEFAULT_REG(mac, TX_CNTRL2);
1998 + MAC_DEFAULT_REG(mac, RANDOM_SEED);
1999 + MAC_DEFAULT_REG(mac, THRESH_P_EMPTY);
2000 + MAC_DEFAULT_REG(mac, THRESH_P_FULL);
2001 + MAC_DEFAULT_REG(mac, TX_DEFER);
2002 + MAC_DEFAULT_REG(mac, TX_TWO_DEFER_1);
2003 + MAC_DEFAULT_REG(mac, TX_TWO_DEFER_2);
2004 + MAC_DEFAULT_REG(mac, SLOT_TIME);
2005 + MAC_DEFAULT_REG(mac, INT_CLK_THRESH);
2006 + MAC_DEFAULT_REG(mac, BUF_SIZE_TX);
2007 + MAC_DEFAULT_REG(mac, TX_CNTRL1);
2008 + MAC_DEFAULT_REG(mac, RX_CNTRL1);
2009 +}
2010 +
2011 +static void mac_set_uniaddr(struct net_device *dev)
2012 +{
2013 + int i;
2014 + struct mac_info *mac = netdev_priv(dev);
2015 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2016 +
2017 + /* check for multicast */
2018 + if (dev->dev_addr[0] & 1)
2019 + return;
2020 +
2021 + npe_mh_setportaddr(npe, mac->plat, dev->dev_addr);
2022 + npe_mh_disable_firewall(npe, mac->plat);
2023 + for (i=0; i<dev->addr_len; i++)
2024 + mac_write_reg(mac, MAC_UNI_ADDR + i, dev->dev_addr[i]);
2025 +}
2026 +
2027 +static void update_duplex_mode(struct net_device *dev)
2028 +{
2029 + struct mac_info *mac = netdev_priv(dev);
2030 + if (netif_msg_link(mac)) {
2031 + printk(KERN_DEBUG "Link of %s is %s-duplex\n", dev->name,
2032 + mac->mii.full_duplex ? "full" : "half");
2033 + }
2034 + if (mac->mii.full_duplex) {
2035 + mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
2036 + } else {
2037 + mac_set_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX);
2038 + }
2039 +}
2040 +
2041 +static int media_check(struct net_device *dev, int init)
2042 +{
2043 + struct mac_info *mac = netdev_priv(dev);
2044 +
2045 + if (mii_check_media(&mac->mii, netif_msg_link(mac), init)) {
2046 + update_duplex_mode(dev);
2047 + return 1;
2048 + }
2049 + return 0;
2050 +}
2051 +
2052 +static void get_npe_stats(struct mac_info *mac, u32 *buf, int len, int reset)
2053 +{
2054 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2055 + u32 phys;
2056 +
2057 + memset(buf, len, 0);
2058 + phys = dma_map_single(mac->npe_dev, buf, len, DMA_BIDIRECTIONAL);
2059 + npe_mh_get_stats(npe, mac->plat, phys, reset);
2060 + dma_unmap_single(mac->npe_dev, phys, len, DMA_BIDIRECTIONAL);
2061 +}
2062 +
2063 +static void irqcb_recv(struct qm_queue *queue)
2064 +{
2065 + struct net_device *dev = queue->cb_data;
2066 +
2067 + queue_ack_irq(queue);
2068 + queue_disable_irq(queue);
2069 + if (netif_running(dev))
2070 + netif_rx_schedule(dev);
2071 +}
2072 +
2073 +int ix_recv(struct net_device *dev, int *budget, struct qm_queue *queue)
2074 +{
2075 + struct mac_info *mac = netdev_priv(dev);
2076 + struct sk_buff *skb;
2077 + u32 phys;
2078 + struct npe_cont *cont;
2079 +
2080 + while (*budget > 0 && netif_running(dev) ) {
2081 + int len;
2082 + phys = queue_get_entry(queue) & ~0xf;
2083 + if (!phys)
2084 + break;
2085 + dma_sync_single(queue->dev, phys, DMA_HDR_SIZE,
2086 + DMA_FROM_DEVICE);
2087 + cont = dma_to_virt(queue->dev, phys);
2088 + len = npe_to_cpu16(cont->eth.pkt_len) -4; /* strip FCS */
2089 +
2090 + if (unlikely(netif_msg_rx_status(mac))) {
2091 + printk(KERN_DEBUG "%s: RX packet size: %u\n",
2092 + dev->name, len);
2093 + queue_state(mac->rxq);
2094 + queue_state(mac->rxdoneq);
2095 + }
2096 + skb = dev_alloc_skb(len + 2);
2097 + if (likely(skb)) {
2098 + skb->dev = dev;
2099 + skb_reserve(skb, 2);
2100 + dma_sync_single(queue->dev, cont->eth.phys_addr, len,
2101 + DMA_FROM_DEVICE);
2102 +#ifdef CONFIG_NPE_ADDRESS_COHERENT
2103 + /* swap the payload of the SKB */
2104 + {
2105 + u32 *t = (u32*)(skb->data-2);
2106 + u32 *s = (u32*)(cont->data-2);
2107 + int i, j = (len+5)/4;
2108 + for (i=0; i<j; i++)
2109 + t[i] = cpu_to_be32(s[i]);
2110 + }
2111 +#else
2112 + eth_copy_and_sum(skb, cont->data, len, 0);
2113 +#endif
2114 + skb_put(skb, len);
2115 + skb->protocol = eth_type_trans(skb, dev);
2116 + dev->last_rx = jiffies;
2117 + netif_receive_skb(skb);
2118 + mac->stat.rx_packets++;
2119 + mac->stat.rx_bytes += skb->len;
2120 + } else {
2121 + mac->stat.rx_dropped++;
2122 + }
2123 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2124 + cont->eth.pkt_len = 0;
2125 + dma_sync_single(queue->dev, phys, DMA_HDR_SIZE, DMA_TO_DEVICE);
2126 + queue_put_entry(mac->rxq, phys);
2127 + dev->quota--;
2128 + (*budget)--;
2129 + }
2130 +
2131 + return !budget;
2132 +}
2133 +
2134 +static int ix_poll(struct net_device *dev, int *budget)
2135 +{
2136 + struct mac_info *mac = netdev_priv(dev);
2137 + struct qm_queue *queue = mac->rxdoneq;
2138 +
2139 + for (;;) {
2140 + if (ix_recv(dev, budget, queue))
2141 + return 1;
2142 + netif_rx_complete(dev);
2143 + queue_enable_irq(queue);
2144 + if (!queue_len(queue))
2145 + break;
2146 + queue_disable_irq(queue);
2147 + if (netif_rx_reschedule(dev, 0))
2148 + break;
2149 + }
2150 + return 0;
2151 +}
2152 +
2153 +static void ixmac_set_rx_mode (struct net_device *dev)
2154 +{
2155 + struct mac_info *mac = netdev_priv(dev);
2156 + struct dev_mc_list *mclist;
2157 + u8 aset[dev->addr_len], aclear[dev->addr_len];
2158 + int i,j;
2159 +
2160 + if (dev->flags & IFF_PROMISC) {
2161 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
2162 + } else {
2163 + mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN);
2164 +
2165 + mclist = dev->mc_list;
2166 + memset(aset, 0xff, dev->addr_len);
2167 + memset(aclear, 0x00, dev->addr_len);
2168 + for (i = 0; mclist && i < dev->mc_count; i++) {
2169 + for (j=0; j< dev->addr_len; j++) {
2170 + aset[j] &= mclist->dmi_addr[j];
2171 + aclear[j] |= mclist->dmi_addr[j];
2172 + }
2173 + mclist = mclist->next;
2174 + }
2175 + for (j=0; j< dev->addr_len; j++) {
2176 + aclear[j] = aset[j] | ~aclear[j];
2177 + }
2178 + for (i=0; i<dev->addr_len; i++) {
2179 + mac_write_reg(mac, MAC_ADDR + i, aset[i]);
2180 + mac_write_reg(mac, MAC_ADDR_MASK + i, aclear[i]);
2181 + }
2182 + }
2183 +}
2184 +
2185 +static int ixmac_open (struct net_device *dev)
2186 +{
2187 + struct mac_info *mac = netdev_priv(dev);
2188 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2189 + u32 buf[NPE_STAT_NUM];
2190 + int i;
2191 + u32 phys;
2192 +
2193 + /* first check if the NPE is up and running */
2194 + if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) {
2195 + printk(KERN_ERR "%s: %s not running\n", dev->name,
2196 + npe->plat->name);
2197 + return -EIO;
2198 + }
2199 + if (npe_mh_status(npe)) {
2200 + printk(KERN_ERR "%s: %s not responding\n", dev->name,
2201 + npe->plat->name);
2202 + return -EIO;
2203 + }
2204 + mac->txq_pkt += init_buffer(mac->txq, TX_QUEUE_PREFILL - mac->txq_pkt);
2205 + mac->rxq_pkt += init_buffer(mac->rxq, RX_QUEUE_PREFILL - mac->rxq_pkt);
2206 +
2207 + queue_enable_irq(mac->rxdoneq);
2208 +
2209 + /* drain all buffers from then RX-done-q to make the IRQ happen */
2210 + while ((phys = queue_get_entry(mac->rxdoneq) & ~0xf)) {
2211 + struct npe_cont *cont;
2212 + cont = dma_to_virt(mac->rxdoneq->dev, phys);
2213 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2214 + cont->eth.pkt_len = 0;
2215 + dma_sync_single(mac->rxdoneq->dev, phys, DMA_HDR_SIZE,
2216 + DMA_TO_DEVICE);
2217 + queue_put_entry(mac->rxq, phys);
2218 + }
2219 + mac_init(mac);
2220 + npe_mh_set_rxqid(npe, mac->plat, mac->plat->rxdoneq_id);
2221 + get_npe_stats(mac, buf, sizeof(buf), 1); /* reset stats */
2222 + get_npe_stats(mac, buf, sizeof(buf), 0);
2223 + /*
2224 + * if the extended stats contain random values
2225 + * the NPE image lacks extendet statistic counters
2226 + */
2227 + for (i=NPE_STAT_NUM_BASE; i<NPE_STAT_NUM; i++) {
2228 + if (buf[i] >10000)
2229 + break;
2230 + }
2231 + mac->npe_stat_num = i<NPE_STAT_NUM ? NPE_STAT_NUM_BASE : NPE_STAT_NUM;
2232 + mac->npe_stat_num += NPE_Q_STAT_NUM;
2233 +
2234 + mac_set_uniaddr(dev);
2235 + media_check(dev, 1);
2236 + ixmac_set_rx_mode(dev);
2237 + netif_start_queue(dev);
2238 + schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
2239 + if (netif_msg_ifup(mac)) {
2240 + printk(KERN_DEBUG "%s: open " IXMAC_NAME
2241 + " RX queue %d bufs, TX queue %d bufs\n",
2242 + dev->name, mac->rxq_pkt, mac->txq_pkt);
2243 + }
2244 + return 0;
2245 +}
2246 +
2247 +static int ixmac_start_xmit (struct sk_buff *skb, struct net_device *dev)
2248 +{
2249 + struct mac_info *mac = netdev_priv(dev);
2250 + struct npe_cont *cont;
2251 + u32 phys;
2252 + struct qm_queue *queue = mac->txq;
2253 +
2254 + if (unlikely(skb->len > DMA_BUF_SIZE)) {
2255 + dev_kfree_skb(skb);
2256 + mac->stat.tx_errors++;
2257 + return NETDEV_TX_OK;
2258 + }
2259 + phys = queue_get_entry(tx_doneq) & ~0xf;
2260 + if (!phys)
2261 + goto busy;
2262 + cont = dma_to_virt(queue->dev, phys);
2263 +#ifdef CONFIG_NPE_ADDRESS_COHERENT
2264 + /* swap the payload of the SKB */
2265 + {
2266 + u32 *s = (u32*)(skb->data-2);
2267 + u32 *t = (u32*)(cont->data-2);
2268 + int i,j = (skb->len+5) / 4;
2269 + for (i=0; i<j; i++)
2270 + t[i] = cpu_to_be32(s[i]);
2271 + }
2272 +#else
2273 + //skb_copy_and_csum_dev(skb, cont->data);
2274 + memcpy(cont->data, skb->data, skb->len);
2275 +#endif
2276 + cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE);
2277 + cont->eth.pkt_len = cpu_to_npe16(skb->len);
2278 + /* disable VLAN functions in NPE image for now */
2279 + cont->eth.flags = 0;
2280 + dma_sync_single(queue->dev, phys, skb->len + DMA_HDR_SIZE,
2281 + DMA_TO_DEVICE);
2282 + queue_put_entry(queue, phys);
2283 + if (queue_stat(queue) == 2) { /* overflow */
2284 + queue_put_entry(tx_doneq, phys);
2285 + goto busy;
2286 + }
2287 + dev_kfree_skb(skb);
2288 +
2289 + mac->stat.tx_packets++;
2290 + mac->stat.tx_bytes += skb->len;
2291 + dev->trans_start = jiffies;
2292 + if (netif_msg_tx_queued(mac)) {
2293 + printk(KERN_DEBUG "%s: TX packet size %u\n",
2294 + dev->name, skb->len);
2295 + queue_state(mac->txq);
2296 + queue_state(tx_doneq);
2297 + }
2298 + return NETDEV_TX_OK;
2299 +busy:
2300 + return NETDEV_TX_BUSY;
2301 +}
2302 +
2303 +static int ixmac_close (struct net_device *dev)
2304 +{
2305 + struct mac_info *mac = netdev_priv(dev);
2306 +
2307 + netif_stop_queue (dev);
2308 + queue_disable_irq(mac->rxdoneq);
2309 +
2310 + mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
2311 + mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
2312 +
2313 + cancel_rearming_delayed_work(&(mac->mdio_thread));
2314 +
2315 + if (netif_msg_ifdown(mac)) {
2316 + printk(KERN_DEBUG "%s: close " IXMAC_NAME
2317 + " RX queue %d bufs, TX queue %d bufs\n",
2318 + dev->name, mac->rxq_pkt, mac->txq_pkt);
2319 + }
2320 + return 0;
2321 +}
2322 +
2323 +static int ixmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2324 +{
2325 + struct mac_info *mac = netdev_priv(dev);
2326 + int rc, duplex_changed;
2327 +
2328 + if (!netif_running(dev))
2329 + return -EINVAL;
2330 + if (!try_module_get(THIS_MODULE))
2331 + return -ENODEV;
2332 + rc = generic_mii_ioctl(&mac->mii, if_mii(rq), cmd, &duplex_changed);
2333 + module_put(THIS_MODULE);
2334 + if (duplex_changed)
2335 + update_duplex_mode(dev);
2336 + return rc;
2337 +}
2338 +
2339 +static struct net_device_stats *ixmac_stats (struct net_device *dev)
2340 +{
2341 + struct mac_info *mac = netdev_priv(dev);
2342 + return &mac->stat;
2343 +}
2344 +
2345 +static void ixmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2346 +{
2347 + struct mac_info *mac = netdev_priv(dev);
2348 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2349 +
2350 + strcpy(info->driver, IXMAC_NAME);
2351 + strcpy(info->version, IXMAC_VERSION);
2352 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2353 + snprintf(info->fw_version, 32, "%d.%d func [%d]",
2354 + npe->img_info[2], npe->img_info[3], npe->img_info[1]);
2355 + }
2356 + strncpy(info->bus_info, npe->plat->name, ETHTOOL_BUSINFO_LEN);
2357 +}
2358 +
2359 +static int ixmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2360 +{
2361 + struct mac_info *mac = netdev_priv(dev);
2362 + mii_ethtool_gset(&mac->mii, cmd);
2363 + return 0;
2364 +}
2365 +
2366 +static int ixmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2367 +{
2368 + struct mac_info *mac = netdev_priv(dev);
2369 + int rc;
2370 + rc = mii_ethtool_sset(&mac->mii, cmd);
2371 + return rc;
2372 +}
2373 +
2374 +static int ixmac_nway_reset(struct net_device *dev)
2375 +{
2376 + struct mac_info *mac = netdev_priv(dev);
2377 + return mii_nway_restart(&mac->mii);
2378 +}
2379 +
2380 +static u32 ixmac_get_link(struct net_device *dev)
2381 +{
2382 + struct mac_info *mac = netdev_priv(dev);
2383 + return mii_link_ok(&mac->mii);
2384 +}
2385 +
2386 +static const int mac_reg_list[] = MAC_REG_LIST;
2387 +
2388 +static int ixmac_get_regs_len(struct net_device *dev)
2389 +{
2390 + return ARRAY_SIZE(mac_reg_list);
2391 +}
2392 +
2393 +static void
2394 +ixmac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
2395 +{
2396 + int i;
2397 + struct mac_info *mac = netdev_priv(dev);
2398 + u8 *buf = regbuf;
2399 +
2400 + for (i=0; i<regs->len; i++) {
2401 + buf[i] = mac_read_reg(mac, mac_reg_list[i]);
2402 + }
2403 +}
2404 +
2405 +static struct {
2406 + const char str[ETH_GSTRING_LEN];
2407 +} ethtool_stats_keys[NPE_STAT_NUM + NPE_Q_STAT_NUM] = {
2408 + NPE_Q_STAT_STRINGS
2409 + NPE_STAT_STRINGS
2410 +};
2411 +
2412 +static void ixmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2413 +{
2414 + struct mac_info *mac = netdev_priv(dev);
2415 + memcpy(data, ethtool_stats_keys, mac->npe_stat_num * ETH_GSTRING_LEN);
2416 +}
2417 +
2418 +static int ixmac_get_stats_count(struct net_device *dev)
2419 +{
2420 + struct mac_info *mac = netdev_priv(dev);
2421 + return mac->npe_stat_num;
2422 +}
2423 +
2424 +static u32 ixmac_get_msglevel(struct net_device *dev)
2425 +{
2426 + struct mac_info *mac = netdev_priv(dev);
2427 + return mac->msg_enable;
2428 +}
2429 +
2430 +static void ixmac_set_msglevel(struct net_device *dev, u32 datum)
2431 +{
2432 + struct mac_info *mac = netdev_priv(dev);
2433 + mac->msg_enable = datum;
2434 +}
2435 +
2436 +static void ixmac_get_ethtool_stats(struct net_device *dev,
2437 + struct ethtool_stats *stats, u64 *data)
2438 +{
2439 + int i;
2440 + struct mac_info *mac = netdev_priv(dev);
2441 + u32 buf[NPE_STAT_NUM];
2442 +
2443 + data[0] = queue_len(mac->rxq);
2444 + data[1] = queue_len(mac->rxdoneq);
2445 + data[2] = queue_len(mac->txq);
2446 + data[3] = queue_len(tx_doneq);
2447 +
2448 + get_npe_stats(mac, buf, sizeof(buf), 0);
2449 +
2450 + for (i=0; i<stats->n_stats-4; i++) {
2451 + data[i+4] = npe_to_cpu32(buf[i]);
2452 + }
2453 +}
2454 +
2455 +static struct ethtool_ops ixmac_ethtool_ops = {
2456 + .get_drvinfo = ixmac_get_drvinfo,
2457 + .get_settings = ixmac_get_settings,
2458 + .set_settings = ixmac_set_settings,
2459 + .nway_reset = ixmac_nway_reset,
2460 + .get_link = ixmac_get_link,
2461 + .get_msglevel = ixmac_get_msglevel,
2462 + .set_msglevel = ixmac_set_msglevel,
2463 + .get_regs_len = ixmac_get_regs_len,
2464 + .get_regs = ixmac_get_regs,
2465 + .get_perm_addr = ethtool_op_get_perm_addr,
2466 + .get_strings = ixmac_get_strings,
2467 + .get_stats_count = ixmac_get_stats_count,
2468 + .get_ethtool_stats = ixmac_get_ethtool_stats,
2469 +};
2470 +
2471 +static void mac_mdio_thread(void *_data)
2472 +{
2473 + struct net_device *dev = _data;
2474 + struct mac_info *mac = netdev_priv(dev);
2475 +
2476 + media_check(dev, 0);
2477 + schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL);
2478 +}
2479 +
2480 +static int mac_probe(struct platform_device *pdev)
2481 +{
2482 + struct resource *res;
2483 + struct mac_info *mac;
2484 + struct net_device *dev;
2485 + struct npe_info *npe;
2486 + struct mac_plat_info *plat = pdev->dev.platform_data;
2487 + int size, ret;
2488 +
2489 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) {
2490 + return -EIO;
2491 + }
2492 + if (!(dev = alloc_etherdev (sizeof(struct mac_info)))) {
2493 + return -ENOMEM;
2494 + }
2495 + SET_MODULE_OWNER(dev);
2496 + SET_NETDEV_DEV(dev, &pdev->dev);
2497 + mac = netdev_priv(dev);
2498 + mac->netdev = dev;
2499 +
2500 + size = res->end - res->start +1;
2501 + mac->res = request_mem_region(res->start, size, IXMAC_NAME);
2502 + if (!mac->res) {
2503 + ret = -EBUSY;
2504 + goto out_free;
2505 + }
2506 +
2507 + mac->addr = ioremap(res->start, size);
2508 + if (!mac->addr) {
2509 + ret = -ENOMEM;
2510 + goto out_rel;
2511 + }
2512 +
2513 + dev->open = ixmac_open;
2514 + dev->hard_start_xmit = ixmac_start_xmit;
2515 + dev->poll = ix_poll;
2516 + dev->stop = ixmac_close;
2517 + dev->get_stats = ixmac_stats;
2518 + dev->do_ioctl = ixmac_ioctl;
2519 + dev->set_multicast_list = ixmac_set_rx_mode;
2520 + dev->ethtool_ops = &ixmac_ethtool_ops;
2521 +
2522 + dev->weight = 16;
2523 + dev->tx_queue_len = 100;
2524 +
2525 + mac->npe_dev = get_npe_by_id(plat->npe_id);
2526 + if (!mac->npe_dev) {
2527 + ret = -EIO;
2528 + goto out_unmap;
2529 + }
2530 + npe = dev_get_drvdata(mac->npe_dev);
2531 +
2532 + mac->rxq = request_queue(plat->rxq_id, 128);
2533 + if (IS_ERR(mac->rxq)) {
2534 + printk(KERN_ERR "Error requesting Q: %d\n", plat->rxq_id);
2535 + ret = -EBUSY;
2536 + goto out_putmod;
2537 + }
2538 + mac->txq = request_queue(plat->txq_id, 128);
2539 + if (IS_ERR(mac->txq)) {
2540 + printk(KERN_ERR "Error requesting Q: %d\n", plat->txq_id);
2541 + ret = -EBUSY;
2542 + goto out_putmod;
2543 + }
2544 + mac->rxdoneq = request_queue(plat->rxdoneq_id, 128);
2545 + if (IS_ERR(mac->rxdoneq)) {
2546 + printk(KERN_ERR "Error requesting Q: %d\n", plat->rxdoneq_id);
2547 + ret = -EBUSY;
2548 + goto out_putmod;
2549 + }
2550 + mac->rxdoneq->irq_cb = irqcb_recv;
2551 + mac->rxdoneq->cb_data = dev;
2552 + queue_set_watermarks(mac->rxdoneq, 0, 0);
2553 + queue_set_irq_src(mac->rxdoneq, Q_IRQ_ID_NOT_E);
2554 +
2555 + mac->qmgr = dev_get_drvdata(mac->rxq->dev);
2556 + if (register_netdev (dev)) {
2557 + ret = -EIO;
2558 + goto out_putmod;
2559 + }
2560 +
2561 + mac->plat = plat;
2562 + mac->npe_stat_num = NPE_STAT_NUM_BASE;
2563 + mac->msg_enable = netif_msg_init(debug, MAC_DEF_MSG_ENABLE);
2564 +
2565 + platform_set_drvdata(pdev, dev);
2566 +
2567 + mac_write_reg(mac, MAC_CORE_CNTRL, CORE_RESET);
2568 + udelay(500);
2569 + mac_write_reg(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
2570 +
2571 + init_mdio(dev, plat->phy_id);
2572 +
2573 + INIT_WORK(&mac->mdio_thread, mac_mdio_thread, dev);
2574 +
2575 + /* The place of the MAC address is very system dependent.
2576 + * Here we use a random one to be replaced by one of the
2577 + * following commands:
2578 + * "ip link set address 02:03:04:04:04:01 dev eth0"
2579 + * "ifconfig eth0 hw ether 02:03:04:04:04:07"
2580 + */
2581 +
2582 + if (is_zero_ether_addr(plat->hwaddr)) {
2583 + random_ether_addr(dev->dev_addr);
2584 + dev->dev_addr[5] = plat->phy_id;
2585 + }
2586 + else
2587 + memcpy(dev->dev_addr, plat->hwaddr, 6);
2588 +
2589 + printk(KERN_INFO IXMAC_NAME " driver " IXMAC_VERSION
2590 + ": %s on %s with PHY[%d] initialized\n",
2591 + dev->name, npe->plat->name, plat->phy_id);
2592 +
2593 + return 0;
2594 +
2595 +out_putmod:
2596 + if (mac->rxq)
2597 + release_queue(mac->rxq);
2598 + if (mac->txq)
2599 + release_queue(mac->txq);
2600 + if (mac->rxdoneq)
2601 + release_queue(mac->rxdoneq);
2602 + module_put(mac->npe_dev->driver->owner);
2603 +out_unmap:
2604 + iounmap(mac->addr);
2605 +out_rel:
2606 + release_resource(mac->res);
2607 +out_free:
2608 + kfree(mac);
2609 + return ret;
2610 +}
2611 +
2612 +static void drain_npe(struct mac_info *mac)
2613 +{
2614 + struct npe_info *npe = dev_get_drvdata(mac->npe_dev);
2615 + struct npe_cont *cont;
2616 + u32 phys;
2617 + int loop = 0;
2618 +
2619 + /* Now there are some skb hold by the NPE.
2620 + * We switch the MAC in loopback mode and send a pseudo packet
2621 + * that will be returned by the NPE in its last SKB.
2622 + * We will also try to isolate the PHY to keep the packets internal.
2623 + */
2624 +
2625 + if (mac->txq_pkt <2)
2626 + mac->txq_pkt += init_buffer(tx_doneq, 5);
2627 +
2628 + if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) {
2629 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_MDC_EN);
2630 + mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
2631 +
2632 + npe_mh_npe_loopback_mode(npe, mac->plat, 1);
2633 + mdelay(200);
2634 +
2635 + while (mac->rxq_pkt && loop++ < 2000 ) {
2636 + phys = queue_get_entry(tx_doneq) & ~0xf;
2637 + if (!phys)
2638 + break;
2639 + cont = dma_to_virt(queue->dev, phys);
2640 + /* actually the packets should never leave the system,
2641 + * but if they do, they shall contain 0s instead of
2642 + * intresting random data....
2643 + */
2644 + memset(cont->data, 0, 64);
2645 + cont->eth.pkt_len = 64;
2646 + dma_sync_single(mac->txq->dev, phys, 64 + DMA_HDR_SIZE,
2647 + DMA_TO_DEVICE);
2648 + queue_put_entry(mac->txq, phys);
2649 + if (queue_stat(mac->txq) == 2) { /* overflow */
2650 + queue_put_entry(tx_doneq, phys);
2651 + break;
2652 + }
2653 + mdelay(1);
2654 + mac->rxq_pkt -= destroy_buffer(mac->rxdoneq,
2655 + mac->rxq_pkt);
2656 + }
2657 + npe_mh_npe_loopback_mode(npe, mac->plat, 0);
2658 + }
2659 + /* Flush MAC TX fifo to drain the bogus packages */
2660 + mac_set_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2661 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_RX_EN);
2662 + mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_TX_EN);
2663 + mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN);
2664 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2665 + mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH);
2666 +}
2667 +
2668 +static int mac_remove(struct platform_device *pdev)
2669 +{
2670 + struct net_device* dev = platform_get_drvdata(pdev);
2671 + struct mac_info *mac = netdev_priv(dev);
2672 +
2673 + unregister_netdev(dev);
2674 +
2675 + mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt);
2676 + if (mac->rxq_pkt)
2677 + drain_npe(mac);
2678 +
2679 + mac->txq_pkt -= destroy_buffer(mac->txq, mac->txq_pkt);
2680 + mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt);
2681 +
2682 + if (mac->rxq_pkt || mac->txq_pkt)
2683 + printk("Buffers lost in NPE: RX:%d, TX:%d\n",
2684 + mac->rxq_pkt, mac->txq_pkt);
2685 +
2686 + release_queue(mac->txq);
2687 + release_queue(mac->rxq);
2688 + release_queue(mac->rxdoneq);
2689 +
2690 + flush_scheduled_work();
2691 + return_npe_dev(mac->npe_dev);
2692 +
2693 + iounmap(mac->addr);
2694 + release_resource(mac->res);
2695 + platform_set_drvdata(pdev, NULL);
2696 + free_netdev(dev);
2697 + return 0;
2698 +}
2699 +
2700 +static struct platform_driver ixp4xx_mac = {
2701 + .driver.name = IXMAC_NAME,
2702 + .probe = mac_probe,
2703 + .remove = mac_remove,
2704 +};
2705 +
2706 +static int __init init_mac(void)
2707 +{
2708 + /* The TX done Queue handles skbs sent out by the NPE */
2709 + tx_doneq = request_queue(TX_DONE_QID, 128);
2710 + if (IS_ERR(tx_doneq)) {
2711 + printk(KERN_ERR "Error requesting Q: %d\n", TX_DONE_QID);
2712 + return -EBUSY;
2713 + }
2714 + return platform_driver_register(&ixp4xx_mac);
2715 +}
2716 +
2717 +static void __exit finish_mac(void)
2718 +{
2719 + platform_driver_unregister(&ixp4xx_mac);
2720 + if (tx_doneq) {
2721 + release_queue(tx_doneq);
2722 + }
2723 +}
2724 +
2725 +module_init(init_mac);
2726 +module_exit(finish_mac);
2727 +
2728 +MODULE_LICENSE("GPL");
2729 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
2730 +
2731 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/mac.h linux-2.6.19/drivers/net/ixp4xx/mac.h
2732 --- linux-2.6.19.orig/drivers/net/ixp4xx/mac.h 1969-12-31 17:00:00.000000000 -0700
2733 +++ linux-2.6.19/drivers/net/ixp4xx/mac.h 2007-01-12 21:54:40.000000000 -0700
2734 @@ -0,0 +1,275 @@
2735 +/*
2736 + * Copyright (C) 2002-2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
2737 + *
2738 + * This file is released under the GPLv2
2739 + */
2740 +
2741 +#include <linux/resource.h>
2742 +#include <linux/netdevice.h>
2743 +#include <linux/io.h>
2744 +#include <linux/mii.h>
2745 +#include <linux/workqueue.h>
2746 +#include <asm/hardware.h>
2747 +#include <linux/ixp_qmgr.h>
2748 +
2749 +/* 32 bit offsets to be added to u32 *pointers */
2750 +#define MAC_TX_CNTRL1 0x00 // 0x000
2751 +#define MAC_TX_CNTRL2 0x01 // 0x004
2752 +#define MAC_RX_CNTRL1 0x04 // 0x010
2753 +#define MAC_RX_CNTRL2 0x05 // 0x014
2754 +#define MAC_RANDOM_SEED 0x08 // 0x020
2755 +#define MAC_THRESH_P_EMPTY 0x0c // 0x030
2756 +#define MAC_THRESH_P_FULL 0x0e // 0x038
2757 +#define MAC_BUF_SIZE_TX 0x10 // 0x040
2758 +#define MAC_TX_DEFER 0x14 // 0x050
2759 +#define MAC_RX_DEFER 0x15 // 0x054
2760 +#define MAC_TX_TWO_DEFER_1 0x18 // 0x060
2761 +#define MAC_TX_TWO_DEFER_2 0x19 // 0x064
2762 +#define MAC_SLOT_TIME 0x1c // 0x070
2763 +#define MAC_MDIO_CMD 0x20 // 0x080 4 registers 0x20 - 0x23
2764 +#define MAC_MDIO_STS 0x24 // 0x090 4 registers 0x24 - 0x27
2765 +#define MAC_ADDR_MASK 0x28 // 0x0A0 6 registers 0x28 - 0x2d
2766 +#define MAC_ADDR 0x30 // 0x0C0 6 registers 0x30 - 0x35
2767 +#define MAC_INT_CLK_THRESH 0x38 // 0x0E0 1 register
2768 +#define MAC_UNI_ADDR 0x3c // 0x0F0 6 registers 0x3c - 0x41
2769 +#define MAC_CORE_CNTRL 0x7f // 0x1fC
2770 +
2771 +/* TX Control Register 1*/
2772 +
2773 +#define TX_CNTRL1_TX_EN BIT(0)
2774 +#define TX_CNTRL1_DUPLEX BIT(1)
2775 +#define TX_CNTRL1_RETRY BIT(2)
2776 +#define TX_CNTRL1_PAD_EN BIT(3)
2777 +#define TX_CNTRL1_FCS_EN BIT(4)
2778 +#define TX_CNTRL1_2DEFER BIT(5)
2779 +#define TX_CNTRL1_RMII BIT(6)
2780 +
2781 +/* TX Control Register 2 */
2782 +#define TX_CNTRL2_RETRIES_MASK 0xf
2783 +
2784 +/* RX Control Register 1 */
2785 +#define RX_CNTRL1_RX_EN BIT(0)
2786 +#define RX_CNTRL1_PADSTRIP_EN BIT(1)
2787 +#define RX_CNTRL1_CRC_EN BIT(2)
2788 +#define RX_CNTRL1_PAUSE_EN BIT(3)
2789 +#define RX_CNTRL1_LOOP_EN BIT(4)
2790 +#define RX_CNTRL1_ADDR_FLTR_EN BIT(5)
2791 +#define RX_CNTRL1_RX_RUNT_EN BIT(6)
2792 +#define RX_CNTRL1_BCAST_DIS BIT(7)
2793 +
2794 +/* RX Control Register 2 */
2795 +#define RX_CNTRL2_DEFER_EN BIT(0)
2796 +
2797 +/* Core Control Register */
2798 +#define CORE_RESET BIT(0)
2799 +#define CORE_RX_FIFO_FLUSH BIT(1)
2800 +#define CORE_TX_FIFO_FLUSH BIT(2)
2801 +#define CORE_SEND_JAM BIT(3)
2802 +#define CORE_MDC_EN BIT(4)
2803 +
2804 +/* Definitions for MII access routines*/
2805 +
2806 +#define MII_REG_SHL 16
2807 +#define MII_ADDR_SHL 21
2808 +
2809 +#define MII_GO BIT(31)
2810 +#define MII_WRITE BIT(26)
2811 +#define MII_READ_FAIL BIT(31)
2812 +
2813 +#define MII_TIMEOUT_10TH_SECS 5
2814 +#define MII_10TH_SEC_IN_MILLIS 100
2815 +
2816 +/*
2817 + *
2818 + * Default values
2819 + *
2820 + */
2821 +
2822 +#define MAC_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
2823 +
2824 +#define MAC_TX_CNTRL1_DEFAULT (\
2825 + TX_CNTRL1_TX_EN | \
2826 + TX_CNTRL1_RETRY | \
2827 + TX_CNTRL1_FCS_EN | \
2828 + TX_CNTRL1_2DEFER | \
2829 + TX_CNTRL1_PAD_EN )
2830 +
2831 +#define MAC_TX_MAX_RETRIES_DEFAULT 0x0f
2832 +
2833 +#define MAC_RX_CNTRL1_DEFAULT ( \
2834 + RX_CNTRL1_PADSTRIP_EN | \
2835 + RX_CNTRL1_CRC_EN | \
2836 + RX_CNTRL1_RX_EN )
2837 +
2838 +#define MAC_RX_CNTRL2_DEFAULT 0x0
2839 +#define MAC_TX_CNTRL2_DEFAULT TX_CNTRL2_RETRIES_MASK
2840 +
2841 +/* Thresholds determined by NPE firmware FS */
2842 +#define MAC_THRESH_P_EMPTY_DEFAULT 0x12
2843 +#define MAC_THRESH_P_FULL_DEFAULT 0x30
2844 +
2845 +/* Number of bytes that must be in the tx fifo before
2846 + * transmission commences */
2847 +#define MAC_BUF_SIZE_TX_DEFAULT 0x8
2848 +
2849 +/* One-part deferral values */
2850 +#define MAC_TX_DEFER_DEFAULT 0x15
2851 +#define MAC_RX_DEFER_DEFAULT 0x16
2852 +
2853 +/* Two-part deferral values... */
2854 +#define MAC_TX_TWO_DEFER_1_DEFAULT 0x08
2855 +#define MAC_TX_TWO_DEFER_2_DEFAULT 0x07
2856 +
2857 +/* This value applies to MII */
2858 +#define MAC_SLOT_TIME_DEFAULT 0x80
2859 +
2860 +/* This value applies to RMII */
2861 +#define MAC_SLOT_TIME_RMII_DEFAULT 0xFF
2862 +
2863 +#define MAC_ADDR_MASK_DEFAULT 0xFF
2864 +
2865 +#define MAC_INT_CLK_THRESH_DEFAULT 0x1
2866 +/* The following is a value chosen at random */
2867 +#define MAC_RANDOM_SEED_DEFAULT 0x8
2868 +
2869 +/* By default we must configure the MAC to generate the MDC clock*/
2870 +#define CORE_DEFAULT (CORE_MDC_EN)
2871 +
2872 +/* End of Intel provided register information */
2873 +
2874 +extern int
2875 +mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg);
2876 +extern void
2877 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val);
2878 +extern void init_mdio(struct net_device *dev, int phy_id);
2879 +
2880 +struct mac_info {
2881 + u32 __iomem *addr;
2882 + struct resource *res;
2883 + struct device *npe_dev;
2884 + struct net_device *netdev;
2885 + struct qm_qmgr *qmgr;
2886 + struct qm_queue *rxq;
2887 + struct qm_queue *txq;
2888 + struct qm_queue *rxdoneq;
2889 + u32 irqflags;
2890 + struct net_device_stats stat;
2891 + struct mii_if_info mii;
2892 + struct work_struct mdio_thread;
2893 + int rxq_pkt;
2894 + int txq_pkt;
2895 + int unloading;
2896 + struct mac_plat_info *plat;
2897 + int npe_stat_num;
2898 + spinlock_t rx_lock;
2899 + u32 msg_enable;
2900 +};
2901 +
2902 +static inline void mac_write_reg(struct mac_info *mac, int offset, u32 val)
2903 +{
2904 + *(mac->addr + offset) = val;
2905 +}
2906 +static inline u32 mac_read_reg(struct mac_info *mac, int offset)
2907 +{
2908 + return *(mac->addr + offset);
2909 +}
2910 +static inline void mac_set_regbit(struct mac_info *mac, int offset, u32 bit)
2911 +{
2912 + mac_write_reg(mac, offset, mac_read_reg(mac, offset) | bit);
2913 +}
2914 +static inline void mac_reset_regbit(struct mac_info *mac, int offset, u32 bit)
2915 +{
2916 + mac_write_reg(mac, offset, mac_read_reg(mac, offset) & ~bit);
2917 +}
2918 +
2919 +static inline void mac_mdio_cmd_write(struct mac_info *mac, u32 cmd)
2920 +{
2921 + int i;
2922 + for(i=0; i<4; i++) {
2923 + mac_write_reg(mac, MAC_MDIO_CMD + i, cmd & 0xff);
2924 + cmd >>=8;
2925 + }
2926 +}
2927 +
2928 +#define mac_mdio_cmd_read(mac) mac_mdio_read((mac), MAC_MDIO_CMD)
2929 +#define mac_mdio_status_read(mac) mac_mdio_read((mac), MAC_MDIO_STS)
2930 +static inline u32 mac_mdio_read(struct mac_info *mac, int offset)
2931 +{
2932 + int i;
2933 + u32 data = 0;
2934 + for(i=0; i<4; i++) {
2935 + data |= (mac_read_reg(mac, offset + i) & 0xff) << (i*8);
2936 + }
2937 + return data;
2938 +}
2939 +
2940 +static inline u32 mdio_cmd(int phy_addr, int phy_reg)
2941 +{
2942 + return phy_addr << MII_ADDR_SHL |
2943 + phy_reg << MII_REG_SHL |
2944 + MII_GO;
2945 +}
2946 +
2947 +#define MAC_REG_LIST { \
2948 + MAC_TX_CNTRL1, MAC_TX_CNTRL2, \
2949 + MAC_RX_CNTRL1, MAC_RX_CNTRL2, \
2950 + MAC_RANDOM_SEED, MAC_THRESH_P_EMPTY, MAC_THRESH_P_FULL, \
2951 + MAC_BUF_SIZE_TX, MAC_TX_DEFER, MAC_RX_DEFER, \
2952 + MAC_TX_TWO_DEFER_1, MAC_TX_TWO_DEFER_2, MAC_SLOT_TIME, \
2953 + MAC_ADDR_MASK +0, MAC_ADDR_MASK +1, MAC_ADDR_MASK +2, \
2954 + MAC_ADDR_MASK +3, MAC_ADDR_MASK +4, MAC_ADDR_MASK +5, \
2955 + MAC_ADDR +0, MAC_ADDR +1, MAC_ADDR +2, \
2956 + MAC_ADDR +3, MAC_ADDR +4, MAC_ADDR +5, \
2957 + MAC_INT_CLK_THRESH, \
2958 + MAC_UNI_ADDR +0, MAC_UNI_ADDR +1, MAC_UNI_ADDR +2, \
2959 + MAC_UNI_ADDR +3, MAC_UNI_ADDR +4, MAC_UNI_ADDR +5, \
2960 + MAC_CORE_CNTRL \
2961 +}
2962 +
2963 +#define NPE_STAT_NUM 34
2964 +#define NPE_STAT_NUM_BASE 22
2965 +#define NPE_Q_STAT_NUM 4
2966 +
2967 +#define NPE_Q_STAT_STRINGS \
2968 + {"RX ready to use queue len "}, \
2969 + {"RX received queue len "}, \
2970 + {"TX to be send queue len "}, \
2971 + {"TX done queue len "},
2972 +
2973 +#define NPE_STAT_STRINGS \
2974 + {"StatsAlignmentErrors "}, \
2975 + {"StatsFCSErrors "}, \
2976 + {"StatsInternalMacReceiveErrors "}, \
2977 + {"RxOverrunDiscards "}, \
2978 + {"RxLearnedEntryDiscards "}, \
2979 + {"RxLargeFramesDiscards "}, \
2980 + {"RxSTPBlockedDiscards "}, \
2981 + {"RxVLANTypeFilterDiscards "}, \
2982 + {"RxVLANIdFilterDiscards "}, \
2983 + {"RxInvalidSourceDiscards "}, \
2984 + {"RxBlackListDiscards "}, \
2985 + {"RxWhiteListDiscards "}, \
2986 + {"RxUnderflowEntryDiscards "}, \
2987 + {"StatsSingleCollisionFrames "}, \
2988 + {"StatsMultipleCollisionFrames "}, \
2989 + {"StatsDeferredTransmissions "}, \
2990 + {"StatsLateCollisions "}, \
2991 + {"StatsExcessiveCollsions "}, \
2992 + {"StatsInternalMacTransmitErrors"}, \
2993 + {"StatsCarrierSenseErrors "}, \
2994 + {"TxLargeFrameDiscards "}, \
2995 + {"TxVLANIdFilterDiscards "}, \
2996 +\
2997 + {"RxValidFramesTotalOctets "}, \
2998 + {"RxUcastPkts "}, \
2999 + {"RxBcastPkts "}, \
3000 + {"RxMcastPkts "}, \
3001 + {"RxPkts64Octets "}, \
3002 + {"RxPkts65to127Octets "}, \
3003 + {"RxPkts128to255Octets "}, \
3004 + {"RxPkts256to511Octets "}, \
3005 + {"RxPkts512to1023Octets "}, \
3006 + {"RxPkts1024to1518Octets "}, \
3007 + {"RxInternalNPEReceiveErrors "}, \
3008 + {"TxInternalNPETransmitErrors "}
3009 +
3010 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/Makefile linux-2.6.19/drivers/net/ixp4xx/Makefile
3011 --- linux-2.6.19.orig/drivers/net/ixp4xx/Makefile 1969-12-31 17:00:00.000000000 -0700
3012 +++ linux-2.6.19/drivers/net/ixp4xx/Makefile 2007-01-12 21:54:40.000000000 -0700
3013 @@ -0,0 +1,7 @@
3014 +obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o
3015 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o
3016 +obj-$(CONFIG_IXP4XX_MAC) += ixp4xx_mac.o
3017 +obj-$(CONFIG_IXP4XX_CRYPTO) += ixp4xx_crypto.o
3018 +
3019 +ixp4xx_npe-objs := ucode_dl.o npe_mh.o npe.o
3020 +ixp4xx_mac-objs := mac_driver.o phy.o
3021 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/npe.c linux-2.6.19/drivers/net/ixp4xx/npe.c
3022 --- linux-2.6.19.orig/drivers/net/ixp4xx/npe.c 1969-12-31 17:00:00.000000000 -0700
3023 +++ linux-2.6.19/drivers/net/ixp4xx/npe.c 2007-01-12 21:54:40.000000000 -0700
3024 @@ -0,0 +1,291 @@
3025 +
3026 +#include <linux/ixp_npe.h>
3027 +#include <asm/hardware.h>
3028 +
3029 +#define RESET_NPE_PARITY 0x0800
3030 +#define PARITY_BIT_MASK 0x3F00FFFF
3031 +#define CONFIG_CTRL_REG_MASK 0x3F3FFFFF
3032 +#define MAX_RETRIES 1000000
3033 +#define NPE_PHYS_REG 32
3034 +#define RESET_MBST_VAL 0x0000F0F0
3035 +#define NPE_REGMAP 0x0000001E
3036 +#define INSTR_WR_REG_SHORT 0x0000C000
3037 +#define INSTR_WR_REG_BYTE 0x00004000
3038 +#define MASK_ECS_REG_0_NEXTPC 0x1FFF0000
3039 +
3040 +#define INSTR_RD_FIFO 0x0F888220
3041 +#define INSTR_RESET_MBOX 0x0FAC8210
3042 +
3043 +#define ECS_REG_0_LDUR 8
3044 +#define ECS_REG_1_CCTXT 16
3045 +#define ECS_REG_1_SELCTXT 0
3046 +
3047 +#define ECS_BG_CTXT_REG_0 0x00
3048 +#define ECS_BG_CTXT_REG_1 0x01
3049 +#define ECS_BG_CTXT_REG_2 0x02
3050 +#define ECS_PRI_1_CTXT_REG_0 0x04
3051 +#define ECS_PRI_1_CTXT_REG_1 0x05
3052 +#define ECS_PRI_1_CTXT_REG_2 0x06
3053 +#define ECS_PRI_2_CTXT_REG_0 0x08
3054 +#define ECS_PRI_2_CTXT_REG_1 0x09
3055 +#define ECS_PRI_2_CTXT_REG_2 0x0A
3056 +#define ECS_DBG_CTXT_REG_0 0x0C
3057 +#define ECS_DBG_CTXT_REG_1 0x0D
3058 +#define ECS_DBG_CTXT_REG_2 0x0E
3059 +#define ECS_INSTRUCT_REG 0x11
3060 +
3061 +#define ECS_BG_CTXT_REG_0_RESET 0xA0000000
3062 +#define ECS_BG_CTXT_REG_1_RESET 0x01000000
3063 +#define ECS_BG_CTXT_REG_2_RESET 0x00008000
3064 +#define ECS_PRI_1_CTXT_REG_0_RESET 0x20000080
3065 +#define ECS_PRI_1_CTXT_REG_1_RESET 0x01000000
3066 +#define ECS_PRI_1_CTXT_REG_2_RESET 0x00008000
3067 +#define ECS_PRI_2_CTXT_REG_0_RESET 0x20000080
3068 +#define ECS_PRI_2_CTXT_REG_1_RESET 0x01000000
3069 +#define ECS_PRI_2_CTXT_REG_2_RESET 0x00008000
3070 +#define ECS_DBG_CTXT_REG_0_RESET 0x20000000
3071 +#define ECS_DBG_CTXT_REG_1_RESET 0x00000000
3072 +#define ECS_DBG_CTXT_REG_2_RESET 0x001E0000
3073 +#define ECS_INSTRUCT_REG_RESET 0x1003C00F
3074 +
3075 +static struct { u32 reg; u32 val; } ecs_reset[] =
3076 +{
3077 + { ECS_BG_CTXT_REG_0, ECS_BG_CTXT_REG_0_RESET },
3078 + { ECS_BG_CTXT_REG_1, ECS_BG_CTXT_REG_1_RESET },
3079 + { ECS_BG_CTXT_REG_2, ECS_BG_CTXT_REG_2_RESET },
3080 + { ECS_PRI_1_CTXT_REG_0, ECS_PRI_1_CTXT_REG_0_RESET },
3081 + { ECS_PRI_1_CTXT_REG_1, ECS_PRI_1_CTXT_REG_1_RESET },
3082 + { ECS_PRI_1_CTXT_REG_2, ECS_PRI_1_CTXT_REG_2_RESET },
3083 + { ECS_PRI_2_CTXT_REG_0, ECS_PRI_2_CTXT_REG_0_RESET },
3084 + { ECS_PRI_2_CTXT_REG_1, ECS_PRI_2_CTXT_REG_1_RESET },
3085 + { ECS_PRI_2_CTXT_REG_2, ECS_PRI_2_CTXT_REG_2_RESET },
3086 + { ECS_DBG_CTXT_REG_0, ECS_DBG_CTXT_REG_0_RESET },
3087 + { ECS_DBG_CTXT_REG_1, ECS_DBG_CTXT_REG_1_RESET },
3088 + { ECS_DBG_CTXT_REG_2, ECS_DBG_CTXT_REG_2_RESET },
3089 + { ECS_INSTRUCT_REG, ECS_INSTRUCT_REG_RESET }
3090 +};
3091 +
3092 +/* actually I have no idea what I'm doing here !!
3093 + * I only rewrite the "reset" sequence the way Intel does it.
3094 + */
3095 +
3096 +static void npe_debg_preexec(struct npe_info *npe)
3097 +{
3098 + u32 r = IX_NPEDL_MASK_ECS_DBG_REG_2_IF | IX_NPEDL_MASK_ECS_DBG_REG_2_IE;
3099 +
3100 + npe->exec_count = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCT);
3101 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, 0);
3102 + npe->ctx_reg2 = npe_read_ecs_reg(npe, ECS_DBG_CTXT_REG_2);
3103 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2 | r);
3104 +}
3105 +
3106 +static void npe_debg_postexec(struct npe_info *npe)
3107 +{
3108 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0, 0);
3109 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3110 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, npe->exec_count);
3111 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2);
3112 +}
3113 +
3114 +static int
3115 +npe_debg_inst_exec(struct npe_info *npe, u32 instr, u32 ctx, u32 ldur)
3116 +{
3117 + u32 regval, wc;
3118 + int c = 0;
3119 +
3120 + regval = IX_NPEDL_MASK_ECS_REG_0_ACTIVE |
3121 + (ldur << ECS_REG_0_LDUR);
3122 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0 , regval);
3123 + /* set CCTXT at ECS DEBUG L3 to specify in which context
3124 + * to execute the instruction
3125 + */
3126 + regval = (ctx << ECS_REG_1_CCTXT) |
3127 + (ctx << ECS_REG_1_SELCTXT);
3128 + npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_1, regval);
3129 +
3130 + /* clear the pipeline */
3131 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3132 +
3133 + /* load NPE instruction into the instruction register */
3134 + npe_write_ecs_reg(npe, ECS_INSTRUCT_REG, instr);
3135 + /* we need this value later to wait for
3136 + * completion of NPE execution step
3137 + */
3138 + wc = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC);
3139 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STEP);
3140 +
3141 + /* Watch Count register increments when NPE completes an instruction */
3142 + while (wc == npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC) &&
3143 + ++c < MAX_RETRIES);
3144 +
3145 + if (c >= MAX_RETRIES) {
3146 + printk(KERN_ERR "%s reset:npe_debg_inst_exec(): Timeout\n",
3147 + npe->plat->name);
3148 + return 1;
3149 + }
3150 + return 0;
3151 +}
3152 +
3153 +static int npe_logical_reg_write8(struct npe_info *npe, u32 addr, u32 val)
3154 +{
3155 + u32 instr;
3156 + val &= 0xff;
3157 + /* here we build the NPE assembler instruction:
3158 + * mov8 d0, #0 */
3159 + instr = INSTR_WR_REG_BYTE | /* OpCode */
3160 + addr << 9 | /* base Operand */
3161 + (val & 0x1f) << 4 | /* lower 5 bits to immediate data */
3162 + (val & ~0x1f) << (18-5);/* higher 3 bits to CoProc instr. */
3163 + /* and execute it */
3164 + return npe_debg_inst_exec(npe, instr, 0, 1);
3165 +}
3166 +
3167 +static int npe_logical_reg_write16(struct npe_info *npe, u32 addr, u32 val)
3168 +{
3169 + u32 instr;
3170 + /* here we build the NPE assembler instruction:
3171 + * mov16 d0, #0 */
3172 + val &= 0xffff;
3173 + instr = INSTR_WR_REG_SHORT | /* OpCode */
3174 + addr << 9 | /* base Operand */
3175 + (val & 0x1f) << 4 | /* lower 5 bits to immediate data */
3176 + (val & ~0x1f) << (18-5);/* higher 11 bits to CoProc instr. */
3177 + /* and execute it */
3178 + return npe_debg_inst_exec(npe, instr, 0, 1);
3179 +}
3180 +
3181 +static int npe_logical_reg_write32(struct npe_info *npe, u32 addr, u32 val)
3182 +{
3183 + /* write in 16 bit steps first the high and then the low value */
3184 + npe_logical_reg_write16(npe, addr, val >> 16);
3185 + return npe_logical_reg_write16(npe, addr+2, val & 0xffff);
3186 +}
3187 +
3188 +void npe_reset(struct npe_info *npe)
3189 +{
3190 + u32 reg, cfg_ctrl;
3191 + int i;
3192 + struct { u32 reset; int addr; int size; } ctx_reg[] = {
3193 + { 0x80, 0x1b, 8 },
3194 + { 0, 0x1c, 16 },
3195 + { 0x820, 0x1e, 16 },
3196 + { 0, 0x1f, 8 }
3197 + }, *cr;
3198 +
3199 + cfg_ctrl = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
3200 + cfg_ctrl |= 0x3F000000;
3201 + /* disable the parity interrupt */
3202 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, cfg_ctrl & PARITY_BIT_MASK);
3203 +
3204 + npe_debg_preexec(npe);
3205 +
3206 + /* clear the FIFOs */
3207 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WFIFO) ==
3208 + IX_NPEDL_MASK_WFIFO_VALID);
3209 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) ==
3210 + IX_NPEDL_MASK_STAT_OFNE)
3211 + {
3212 + u32 reg;
3213 + reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_FIFO);
3214 + printk("%s reset: Read FIFO:=%x\n", npe->plat->name, reg);
3215 + }
3216 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) ==
3217 + IX_NPEDL_MASK_STAT_IFNE) {
3218 + npe_debg_inst_exec(npe, INSTR_RD_FIFO, 0, 0);
3219 + }
3220 +
3221 + /* Reset the mailbox reg */
3222 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_MBST, RESET_MBST_VAL);
3223 + npe_debg_inst_exec(npe, INSTR_RESET_MBOX, 0, 0);
3224 +
3225 + /* Reset the physical registers in the NPE register file */
3226 + for (i=0; i<NPE_PHYS_REG; i++) {
3227 + npe_logical_reg_write16(npe, NPE_REGMAP, i >> 1);
3228 + npe_logical_reg_write32(npe, (i&1) *4, 0);
3229 + }
3230 +
3231 + /* Reset the context store. Iterate over the 16 ctx s */
3232 + for(i=0; i<16; i++) {
3233 + for (reg=0; reg<4; reg++) {
3234 + /* There is no (STEVT) register for Context 0.
3235 + * ignore if register=0 and ctx=0 */
3236 + if (!(reg || i))
3237 + continue;
3238 + /* Context 0 has no STARTPC. Instead, this value is
3239 + * used to set NextPC for Background ECS,
3240 + * to set where NPE starts executing code
3241 + */
3242 + if (!i && reg==1) {
3243 + u32 r;
3244 + r = npe_read_ecs_reg(npe, ECS_BG_CTXT_REG_0);
3245 + r &= ~MASK_ECS_REG_0_NEXTPC;
3246 + r |= (cr->reset << 16) & MASK_ECS_REG_0_NEXTPC;
3247 + continue;
3248 + }
3249 + cr = ctx_reg + reg;
3250 + switch (cr->size) {
3251 + case 8:
3252 + npe_logical_reg_write8(npe, cr->addr,
3253 + cr->reset);
3254 + break;
3255 + case 16:
3256 + npe_logical_reg_write16(npe, cr->addr,
3257 + cr->reset);
3258 + }
3259 + }
3260 + }
3261 + npe_debg_postexec(npe);
3262 +
3263 + for (i=0; i< ARRAY_SIZE(ecs_reset); i++) {
3264 + npe_write_ecs_reg(npe, ecs_reset[i].reg, ecs_reset[i].val);
3265 + }
3266 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT);
3267 +
3268 + for (i=IX_NPEDL_REG_OFFSET_EXCT; i<=IX_NPEDL_REG_OFFSET_AP3; i+=4) {
3269 + npe_reg_write(npe, i, 0);
3270 + }
3271 +
3272 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_WC, 0);
3273 +
3274 + reg = *IXP4XX_EXP_CFG2;
3275 + reg |= 0x800 << npe->plat->id; /* IX_FUSE_NPE[ABC] */
3276 + *IXP4XX_EXP_CFG2 = reg;
3277 + reg &= ~(0x800 << npe->plat->id); /* IX_FUSE_NPE[ABC] */
3278 + *IXP4XX_EXP_CFG2 = reg;
3279 +
3280 + npe_stop(npe);
3281 +
3282 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL,
3283 + cfg_ctrl & CONFIG_CTRL_REG_MASK);
3284 + npe->loaded = 0;
3285 +}
3286 +
3287 +
3288 +void npe_stop(struct npe_info *npe)
3289 +{
3290 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STOP);
3291 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3292 +}
3293 +
3294 +static void npe_reset_active(struct npe_info *npe, u32 reg)
3295 +{
3296 + u32 regval;
3297 +
3298 + regval = npe_read_ecs_reg(npe, reg);
3299 + regval &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
3300 + npe_write_ecs_reg(npe, reg, regval);
3301 +}
3302 +
3303 +void npe_start(struct npe_info *npe)
3304 +{
3305 + npe_reset_active(npe, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
3306 + npe_reset_active(npe, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
3307 + npe_reset_active(npe, IX_NPEDL_ECS_DBG_CTXT_REG_0);
3308 +
3309 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
3310 + npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_START);
3311 +}
3312 +
3313 +EXPORT_SYMBOL(npe_stop);
3314 +EXPORT_SYMBOL(npe_start);
3315 +EXPORT_SYMBOL(npe_reset);
3316 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/npe_mh.c linux-2.6.19/drivers/net/ixp4xx/npe_mh.c
3317 --- linux-2.6.19.orig/drivers/net/ixp4xx/npe_mh.c 1969-12-31 17:00:00.000000000 -0700
3318 +++ linux-2.6.19/drivers/net/ixp4xx/npe_mh.c 2007-01-12 21:54:40.000000000 -0700
3319 @@ -0,0 +1,170 @@
3320 +/*
3321 + * npe_mh.c - NPE message handler.
3322 + *
3323 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3324 + *
3325 + * This file is released under the GPLv2
3326 + */
3327 +
3328 +#include <linux/ixp_npe.h>
3329 +#include <linux/slab.h>
3330 +
3331 +#define MAX_RETRY 200
3332 +
3333 +struct npe_mh_msg {
3334 + union {
3335 + u8 byte[8]; /* Very desciptive name, I know ... */
3336 + u32 data[2];
3337 + } u;
3338 +};
3339 +
3340 +/*
3341 + * The whole code in this function must be reworked.
3342 + * It is in a state that works but is not rock solid
3343 + */
3344 +static int send_message(struct npe_info *npe, struct npe_mh_msg *msg)
3345 +{
3346 + int i,j;
3347 + u32 send[2], recv[2];
3348 +
3349 + for (i=0; i<2; i++)
3350 + send[i] = be32_to_cpu(msg->u.data[i]);
3351 +
3352 + if ((npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3353 + IX_NPEMH_NPE_STAT_IFNE))
3354 + return -1;
3355 +
3356 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[0]);
3357 + for(i=0; i<MAX_RETRY; i++) {
3358 + /* if the IFNF status bit is unset then the inFIFO is full */
3359 + if (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3360 + IX_NPEMH_NPE_STAT_IFNF)
3361 + break;
3362 + }
3363 + if (i>=MAX_RETRY)
3364 + return -1;
3365 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[1]);
3366 + i=0;
3367 + while (!(npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3368 + IX_NPEMH_NPE_STAT_OFNE)) {
3369 + if (i++>MAX_RETRY) {
3370 + printk("Waiting for Output FIFO NotEmpty failed\n");
3371 + return -1;
3372 + }
3373 + }
3374 + //printk("Output FIFO Not Empty. Loops: %d\n", i);
3375 + j=0;
3376 + while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) &
3377 + IX_NPEMH_NPE_STAT_OFNE) {
3378 + recv[j&1] = npe_reg_read(npe,IX_NPEDL_REG_OFFSET_FIFO);
3379 + j++;
3380 + }
3381 + if ((recv[0] != send[0]) || (recv[1] != send[1])) {
3382 + if (send[0] || send[1]) {
3383 + /* all CMDs return the complete message as answer,
3384 + * only GETSTATUS returns the ImageID of the NPE
3385 + */
3386 + printk("Unexpected answer: "
3387 + "Send %08x:%08x Ret %08x:%08x\n",
3388 + send[0], send[1], recv[0], recv[1]);
3389 + }
3390 + }
3391 + return 0;
3392 +}
3393 +
3394 +#define CMD 0
3395 +#define PORT 1
3396 +#define MAC 2
3397 +
3398 +#define IX_ETHNPE_NPE_GETSTATUS 0x00
3399 +#define IX_ETHNPE_EDB_SETPORTADDRESS 0x01
3400 +#define IX_ETHNPE_GETSTATS 0x04
3401 +#define IX_ETHNPE_RESETSTATS 0x05
3402 +#define IX_ETHNPE_FW_SETFIREWALLMODE 0x0E
3403 +#define IX_ETHNPE_VLAN_SETRXQOSENTRY 0x0B
3404 +#define IX_ETHNPE_SETLOOPBACK_MODE 0x12
3405 +
3406 +#define logical_id(mp) (((mp)->npe_id << 4) | ((mp)->port_id & 0xf))
3407 +
3408 +int npe_mh_status(struct npe_info *npe)
3409 +{
3410 + struct npe_mh_msg msg;
3411 +
3412 + memset(&msg, 0, sizeof(msg));
3413 + msg.u.byte[CMD] = IX_ETHNPE_NPE_GETSTATUS;
3414 + return send_message(npe, &msg);
3415 +}
3416 +
3417 +int npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp,
3418 + u8 *macaddr)
3419 +{
3420 + struct npe_mh_msg msg;
3421 +
3422 + msg.u.byte[CMD] = IX_ETHNPE_EDB_SETPORTADDRESS;
3423 + msg.u.byte[PORT] = mp->eth_id;
3424 + memcpy(msg.u.byte + MAC, macaddr, 6);
3425 +
3426 + return send_message(npe, &msg);
3427 +}
3428 +
3429 +int npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp)
3430 +{
3431 + struct npe_mh_msg msg;
3432 +
3433 + memset(&msg, 0, sizeof(msg));
3434 + msg.u.byte[CMD] = IX_ETHNPE_FW_SETFIREWALLMODE;
3435 + msg.u.byte[PORT] = logical_id(mp);
3436 +
3437 + return send_message(npe, &msg);
3438 +}
3439 +
3440 +int npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp,
3441 + int enable)
3442 +{
3443 + struct npe_mh_msg msg;
3444 +
3445 + memset(&msg, 0, sizeof(msg));
3446 + msg.u.byte[CMD] = IX_ETHNPE_SETLOOPBACK_MODE;
3447 + msg.u.byte[PORT] = logical_id(mp);
3448 + msg.u.byte[3] = enable ? 1 : 0;
3449 +
3450 + return send_message(npe, &msg);
3451 +}
3452 +
3453 +int npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid)
3454 +{
3455 + struct npe_mh_msg msg;
3456 + int i, ret;
3457 +
3458 + memset(&msg, 0, sizeof(msg));
3459 + msg.u.byte[CMD] = IX_ETHNPE_VLAN_SETRXQOSENTRY;
3460 + msg.u.byte[PORT] = logical_id(mp);
3461 + msg.u.byte[5] = qid | 0x80;
3462 + msg.u.byte[7] = qid<<4;
3463 + for(i=0; i<8; i++) {
3464 + msg.u.byte[3] = i;
3465 + if ((ret = send_message(npe, &msg)))
3466 + return ret;
3467 + }
3468 + return 0;
3469 +}
3470 +
3471 +int npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys,
3472 + int reset)
3473 +{
3474 + struct npe_mh_msg msg;
3475 + memset(&msg, 0, sizeof(msg));
3476 + msg.u.byte[CMD] = reset ? IX_ETHNPE_RESETSTATS : IX_ETHNPE_GETSTATS;
3477 + msg.u.byte[PORT] = logical_id(mp);
3478 + msg.u.data[1] = cpu_to_npe32(cpu_to_be32(phys));
3479 +
3480 + return send_message(npe, &msg);
3481 +}
3482 +
3483 +
3484 +EXPORT_SYMBOL(npe_mh_status);
3485 +EXPORT_SYMBOL(npe_mh_setportaddr);
3486 +EXPORT_SYMBOL(npe_mh_disable_firewall);
3487 +EXPORT_SYMBOL(npe_mh_set_rxqid);
3488 +EXPORT_SYMBOL(npe_mh_npe_loopback_mode);
3489 +EXPORT_SYMBOL(npe_mh_get_stats);
3490 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/phy.c linux-2.6.19/drivers/net/ixp4xx/phy.c
3491 --- linux-2.6.19.orig/drivers/net/ixp4xx/phy.c 1969-12-31 17:00:00.000000000 -0700
3492 +++ linux-2.6.19/drivers/net/ixp4xx/phy.c 2007-01-12 21:54:40.000000000 -0700
3493 @@ -0,0 +1,113 @@
3494 +/*
3495 + * phy.c - MDIO functions and mii initialisation
3496 + *
3497 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3498 + *
3499 + * This file is released under the GPLv2
3500 + */
3501 +
3502 +
3503 +#include <linux/mutex.h>
3504 +#include "mac.h"
3505 +
3506 +#define MAX_PHYS (1<<5)
3507 +
3508 +/*
3509 + * We must always use the same MAC for acessing the MDIO
3510 + * We may not use each MAC for its PHY :-(
3511 + */
3512 +
3513 +static struct net_device *phy_dev = NULL;
3514 +static struct mutex mtx;
3515 +
3516 +/* here we remember if the PHY is alive, to avoid log dumping */
3517 +static int phy_works[MAX_PHYS];
3518 +
3519 +int mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg)
3520 +{
3521 + struct mac_info *mac;
3522 + u32 cmd, reg;
3523 + int cnt = 0;
3524 +
3525 + if (!phy_dev)
3526 + return 0;
3527 +
3528 + mac = netdev_priv(phy_dev);
3529 + cmd = mdio_cmd(phy_addr, phy_reg);
3530 + mutex_lock_interruptible(&mtx);
3531 + mac_mdio_cmd_write(mac, cmd);
3532 + while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
3533 + if (++cnt >= 100) {
3534 + printk("%s: PHY[%d] access failed\n",
3535 + dev->name, phy_addr);
3536 + break;
3537 + }
3538 + schedule();
3539 + }
3540 + reg = mac_mdio_status_read(mac);
3541 + mutex_unlock(&mtx);
3542 + if (reg & MII_READ_FAIL) {
3543 + if (phy_works[phy_addr]) {
3544 + printk("%s: PHY[%d] unresponsive\n",
3545 + dev->name, phy_addr);
3546 + }
3547 + reg = 0;
3548 + phy_works[phy_addr] = 0;
3549 + } else {
3550 + if ( !phy_works[phy_addr]) {
3551 + printk("%s: PHY[%d] responsive again\n",
3552 + dev->name, phy_addr);
3553 + }
3554 + phy_works[phy_addr] = 1;
3555 + }
3556 + return reg & 0xffff;
3557 +}
3558 +
3559 +void
3560 +mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val)
3561 +{
3562 + struct mac_info *mac;
3563 + u32 cmd;
3564 + int cnt=0;
3565 +
3566 + if (!phy_dev)
3567 + return;
3568 +
3569 + mac = netdev_priv(phy_dev);
3570 + cmd = mdio_cmd(phy_addr, phy_reg) | MII_WRITE | val;
3571 +
3572 + mutex_lock_interruptible(&mtx);
3573 + mac_mdio_cmd_write(mac, cmd);
3574 + while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) {
3575 + if (++cnt >= 100) {
3576 + printk("%s: PHY[%d] access failed\n",
3577 + dev->name, phy_addr);
3578 + break;
3579 + }
3580 + schedule();
3581 + }
3582 + mutex_unlock(&mtx);
3583 +}
3584 +
3585 +void init_mdio(struct net_device *dev, int phy_id)
3586 +{
3587 + struct mac_info *mac = netdev_priv(dev);
3588 + int i;
3589 +
3590 + /* All phy operations should use the same MAC
3591 + * (my experience)
3592 + */
3593 + if (mac->plat->eth_id == 0) {
3594 + mutex_init(&mtx);
3595 + phy_dev = dev;
3596 + for (i=0; i<MAX_PHYS; i++)
3597 + phy_works[i] = 1;
3598 + }
3599 + mac->mii.dev = dev;
3600 + mac->mii.phy_id = phy_id;
3601 + mac->mii.phy_id_mask = MAX_PHYS - 1;
3602 + mac->mii.reg_num_mask = 0x1f;
3603 + mac->mii.mdio_read = mdio_read_register;
3604 + mac->mii.mdio_write = mdio_write_register;
3605 +}
3606 +
3607 diff -Naur linux-2.6.19.orig/drivers/net/ixp4xx/ucode_dl.c linux-2.6.19/drivers/net/ixp4xx/ucode_dl.c
3608 --- linux-2.6.19.orig/drivers/net/ixp4xx/ucode_dl.c 1969-12-31 17:00:00.000000000 -0700
3609 +++ linux-2.6.19/drivers/net/ixp4xx/ucode_dl.c 2007-01-12 21:54:40.000000000 -0700
3610 @@ -0,0 +1,479 @@
3611 +/*
3612 + * ucode_dl.c - provide an NPE device and a char-dev for microcode download
3613 + *
3614 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
3615 + *
3616 + * This file is released under the GPLv2
3617 + */
3618 +
3619 +#include <linux/kernel.h>
3620 +#include <linux/module.h>
3621 +#include <linux/miscdevice.h>
3622 +#include <linux/platform_device.h>
3623 +#include <linux/fs.h>
3624 +#include <linux/init.h>
3625 +#include <linux/slab.h>
3626 +#include <linux/firmware.h>
3627 +#include <linux/dma-mapping.h>
3628 +#include <linux/byteorder/swab.h>
3629 +#include <asm/uaccess.h>
3630 +#include <asm/io.h>
3631 +
3632 +#include <linux/ixp_npe.h>
3633 +
3634 +#define IXNPE_VERSION "IXP4XX NPE driver Version 0.3.0"
3635 +
3636 +#define DL_MAGIC 0xfeedf00d
3637 +#define DL_MAGIC_SWAP 0x0df0edfe
3638 +
3639 +#define EOF_BLOCK 0xf
3640 +#define IMG_SIZE(image) (((image)->size * sizeof(u32)) + \
3641 + sizeof(struct dl_image))
3642 +
3643 +#define BT_INSTR 0
3644 +#define BT_DATA 1
3645 +
3646 +enum blk_type {
3647 + instruction,
3648 + data,
3649 +};
3650 +
3651 +struct dl_block {
3652 + u32 type;
3653 + u32 offset;
3654 +};
3655 +
3656 +struct dl_image {
3657 + u32 magic;
3658 + u32 id;
3659 + u32 size;
3660 + union {
3661 + u32 data[0];
3662 + struct dl_block block[0];
3663 + } u;
3664 +};
3665 +
3666 +struct dl_codeblock {
3667 + u32 npe_addr;
3668 + u32 size;
3669 + u32 data[0];
3670 +};
3671 +
3672 +static struct platform_driver ixp4xx_npe_driver;
3673 +
3674 +static int match_by_npeid(struct device *dev, void *id)
3675 +{
3676 + struct npe_info *npe = dev_get_drvdata(dev);
3677 + if (!npe->plat)
3678 + return 0;
3679 + return (npe->plat->id == *(int*)id);
3680 +}
3681 +
3682 +struct device *get_npe_by_id(int id)
3683 +{
3684 + struct device *dev = driver_find_device(&ixp4xx_npe_driver.driver,
3685 + NULL, &id, match_by_npeid);
3686 + if (dev) {
3687 + struct npe_info *npe = dev_get_drvdata(dev);
3688 + if (!try_module_get(THIS_MODULE)) {
3689 + put_device(dev);
3690 + return NULL;
3691 + }
3692 + npe->usage++;
3693 + }
3694 + return dev;
3695 +}
3696 +
3697 +void return_npe_dev(struct device *dev)
3698 +{
3699 + struct npe_info *npe = dev_get_drvdata(dev);
3700 + put_device(dev);
3701 + module_put(THIS_MODULE);
3702 + npe->usage--;
3703 +}
3704 +
3705 +static int
3706 +download_block(struct npe_info *npe, struct dl_codeblock *cb, unsigned type)
3707 +{
3708 + int i;
3709 + int cmd;
3710 +
3711 + switch (type) {
3712 + case BT_DATA:
3713 + cmd = IX_NPEDL_EXCTL_CMD_WR_DATA_MEM;
3714 + if (cb->npe_addr + cb->size > npe->plat->data_size) {
3715 + printk(KERN_INFO "Data size too large: %d+%d > %d\n",
3716 + cb->npe_addr, cb->size, npe->plat->data_size);
3717 + return -EIO;
3718 + }
3719 + break;
3720 + case BT_INSTR:
3721 + cmd = IX_NPEDL_EXCTL_CMD_WR_INS_MEM;
3722 + if (cb->npe_addr + cb->size > npe->plat->inst_size) {
3723 + printk(KERN_INFO "Instr size too large: %d+%d > %d\n",
3724 + cb->npe_addr, cb->size, npe->plat->inst_size);
3725 + return -EIO;
3726 + }
3727 + break;
3728 + default:
3729 + printk(KERN_INFO "Unknown CMD: %d\n", type);
3730 + return -EIO;
3731 + }
3732 +
3733 + for (i=0; i < cb->size; i++) {
3734 + npe_write_cmd(npe, cb->npe_addr + i, cb->data[i], cmd);
3735 + }
3736 +
3737 + return 0;
3738 +}
3739 +
3740 +static int store_npe_image(struct dl_image *image, struct device *dev)
3741 +{
3742 + struct dl_block *blk;
3743 + struct dl_codeblock *cb;
3744 + struct npe_info *npe;
3745 + int ret=0;
3746 +
3747 + if (!dev) {
3748 + dev = get_npe_by_id( (image->id >> 24) & 0xf);
3749 + return_npe_dev(dev);
3750 + }
3751 + if (!dev)
3752 + return -ENODEV;
3753 +
3754 + npe = dev_get_drvdata(dev);
3755 + if (npe->loaded && (npe->usage > 0)) {
3756 + printk(KERN_INFO "Cowardly refusing to reload an Image "
3757 + "into the used and running %s\n", npe->plat->name);
3758 + return 0; /* indicate success anyway... */
3759 + }
3760 + if (!cpu_is_ixp46x() && ((image->id >> 28) & 0xf)) {
3761 + printk(KERN_INFO "IXP46x NPE image ignored on IXP42x\n");
3762 + return -EIO;
3763 + }
3764 +
3765 + npe_stop(npe);
3766 + npe_reset(npe);
3767 +
3768 + for (blk = image->u.block; blk->type != EOF_BLOCK; blk++) {
3769 + if (blk->offset > image->size) {
3770 + printk(KERN_INFO "Block offset out of range\n");
3771 + return -EIO;
3772 + }
3773 + cb = (struct dl_codeblock*)&image->u.data[blk->offset];
3774 + if (blk->offset + cb->size + 2 > image->size) {
3775 + printk(KERN_INFO "Codeblock size out of range\n");
3776 + return -EIO;
3777 + }
3778 + if ((ret = download_block(npe, cb, blk->type)))
3779 + return ret;
3780 + }
3781 + *(u32*)npe->img_info = cpu_to_be32(image->id);
3782 + npe_start(npe);
3783 +
3784 + printk(KERN_INFO "Image loaded to %s Func:%x, Rel: %x:%x, Status: %x\n",
3785 + npe->plat->name, npe->img_info[1], npe->img_info[2],
3786 + npe->img_info[3], npe_status(npe));
3787 + if (npe_mh_status(npe)) {
3788 + printk(KERN_ERR "%s not responding\n", npe->plat->name);
3789 + }
3790 + npe->loaded = 1;
3791 + return 0;
3792 +}
3793 +
3794 +static int ucode_open(struct inode *inode, struct file *file)
3795 +{
3796 + file->private_data = kmalloc(sizeof(struct dl_image), GFP_KERNEL);
3797 + if (!file->private_data)
3798 + return -ENOMEM;
3799 + return 0;
3800 +}
3801 +
3802 +static int ucode_close(struct inode *inode, struct file *file)
3803 +{
3804 + kfree(file->private_data);
3805 + return 0;
3806 +}
3807 +
3808 +static ssize_t ucode_write(struct file *file, const char __user *buf,
3809 + size_t count, loff_t *ppos)
3810 +{
3811 + union {
3812 + char *data;
3813 + struct dl_image *image;
3814 + } u;
3815 + const char __user *cbuf = buf;
3816 +
3817 + u.data = file->private_data;
3818 +
3819 + while (count) {
3820 + int len;
3821 + if (*ppos < sizeof(struct dl_image)) {
3822 + len = sizeof(struct dl_image) - *ppos;
3823 + len = len > count ? count : len;
3824 + if (copy_from_user(u.data + *ppos, cbuf, len))
3825 + return -EFAULT;
3826 + count -= len;
3827 + *ppos += len;
3828 + cbuf += len;
3829 + continue;
3830 + } else if (*ppos == sizeof(struct dl_image)) {
3831 + void *data;
3832 + if (u.image->magic == DL_MAGIC_SWAP) {
3833 + printk(KERN_INFO "swapped image found\n");
3834 + u.image->id = swab32(u.image->id);
3835 + u.image->size = swab32(u.image->size);
3836 + } else if (u.image->magic != DL_MAGIC) {
3837 + printk(KERN_INFO "Bad magic:%x\n",
3838 + u.image->magic);
3839 + return -EFAULT;
3840 + }
3841 + len = IMG_SIZE(u.image);
3842 + data = kmalloc(len, GFP_KERNEL);
3843 + if (!data)
3844 + return -ENOMEM;
3845 + memcpy(data, u.data, *ppos);
3846 + kfree(u.data);
3847 + u.data = (char*)data;
3848 + file->private_data = data;
3849 + }
3850 + len = IMG_SIZE(u.image) - *ppos;
3851 + len = len > count ? count : len;
3852 + if (copy_from_user(u.data + *ppos, cbuf, len))
3853 + return -EFAULT;
3854 + count -= len;
3855 + *ppos += len;
3856 + cbuf += len;
3857 + if (*ppos == IMG_SIZE(u.image)) {
3858 + int ret, i;
3859 + *ppos = 0;
3860 + if (u.image->magic == DL_MAGIC_SWAP) {
3861 + for (i=0; i<u.image->size; i++) {
3862 + u.image->u.data[i] =
3863 + swab32(u.image->u.data[i]);
3864 + }
3865 + u.image->magic = swab32(u.image->magic);
3866 + }
3867 + ret = store_npe_image(u.image, NULL);
3868 + if (ret) {
3869 + printk(KERN_INFO "Error in NPE image: %x\n",
3870 + u.image->id);
3871 + return ret;
3872 + }
3873 + }
3874 + }
3875 + return (cbuf-buf);
3876 +}
3877 +
3878 +static void npe_firmware_probe(struct device *dev)
3879 +{
3880 +#if (defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)) \
3881 + && defined(MODULE)
3882 + const struct firmware *fw_entry;
3883 + struct npe_info *npe = dev_get_drvdata(dev);
3884 + struct dl_image *image;
3885 + int ret = -1, i;
3886 +
3887 + if (request_firmware(&fw_entry, npe->plat->name, dev) != 0) {
3888 + return;
3889 + }
3890 + image = (struct dl_image*)fw_entry->data;
3891 + /* Sanity checks */
3892 + if (fw_entry->size < sizeof(struct dl_image)) {
3893 + printk(KERN_ERR "Firmware error: too small\n");
3894 + goto out;
3895 + }
3896 + if (image->magic == DL_MAGIC_SWAP) {
3897 + printk(KERN_INFO "swapped image found\n");
3898 + image->id = swab32(image->id);
3899 + image->size = swab32(image->size);
3900 + } else if (image->magic != DL_MAGIC) {
3901 + printk(KERN_ERR "Bad magic:%x\n", image->magic);
3902 + goto out;
3903 + }
3904 + if (IMG_SIZE(image) != fw_entry->size) {
3905 + printk(KERN_ERR "Firmware error: bad size\n");
3906 + goto out;
3907 + }
3908 + if (((image->id >> 24) & 0xf) != npe->plat->id) {
3909 + printk(KERN_ERR "NPE id missmatch\n");
3910 + goto out;
3911 + }
3912 + if (image->magic == DL_MAGIC_SWAP) {
3913 + for (i=0; i<image->size; i++) {
3914 + image->u.data[i] = swab32(image->u.data[i]);
3915 + }
3916 + image->magic = swab32(image->magic);
3917 + }
3918 +
3919 + ret = store_npe_image(image, dev);
3920 +out:
3921 + if (ret) {
3922 + printk(KERN_ERR "Error downloading Firmware for %s\n",
3923 + npe->plat->name);
3924 + }
3925 + release_firmware(fw_entry);
3926 +#endif
3927 +}
3928 +
3929 +static void disable_npe_irq(struct npe_info *npe)
3930 +{
3931 + u32 reg;
3932 + reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL);
3933 + reg &= ~(IX_NPEMH_NPE_CTL_OFE | IX_NPEMH_NPE_CTL_IFE);
3934 + reg |= IX_NPEMH_NPE_CTL_OFEWE | IX_NPEMH_NPE_CTL_IFEWE;
3935 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, reg);
3936 +}
3937 +
3938 +static ssize_t show_npe_state(struct device *dev, struct device_attribute *attr,
3939 + char *buf)
3940 +{
3941 + struct npe_info *npe = dev_get_drvdata(dev);
3942 +
3943 + strcpy(buf, npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN ?
3944 + "start\n" : "stop\n");
3945 + return strlen(buf);
3946 +}
3947 +
3948 +static ssize_t set_npe_state(struct device *dev, struct device_attribute *attr,
3949 + const char *buf, size_t count)
3950 +{
3951 + struct npe_info *npe = dev_get_drvdata(dev);
3952 +
3953 + if (npe->usage) {
3954 + printk("%s in use: read-only\n", npe->plat->name);
3955 + return count;
3956 + }
3957 + if (!strncmp(buf, "start", 5)) {
3958 + npe_start(npe);
3959 + }
3960 + if (!strncmp(buf, "stop", 4)) {
3961 + npe_stop(npe);
3962 + }
3963 + if (!strncmp(buf, "reset", 5)) {
3964 + npe_stop(npe);
3965 + npe_reset(npe);
3966 + }
3967 + return count;
3968 +}
3969 +
3970 +static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_npe_state, set_npe_state);
3971 +
3972 +static int npe_probe(struct platform_device *pdev)
3973 +{
3974 + struct resource *res;
3975 + struct npe_info *npe;
3976 + struct npe_plat_data *plat = pdev->dev.platform_data;
3977 + int err, size, ret=0;
3978 +
3979 + if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0)))
3980 + return -EIO;
3981 +
3982 + if (!(npe = kzalloc(sizeof(struct npe_info), GFP_KERNEL)))
3983 + return -ENOMEM;
3984 +
3985 + size = res->end - res->start +1;
3986 + npe->res = request_mem_region(res->start, size, plat->name);
3987 + if (!npe->res) {
3988 + ret = -EBUSY;
3989 + printk(KERN_ERR "Failed to get memregion(%x, %x)\n",
3990 + res->start, size);
3991 + goto out_free;
3992 + }
3993 +
3994 + npe->addr = ioremap(res->start, size);
3995 + if (!npe->addr) {
3996 + ret = -ENOMEM;
3997 + printk(KERN_ERR "Failed to ioremap(%x, %x)\n",
3998 + res->start, size);
3999 + goto out_rel;
4000 + }
4001 +
4002 + pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
4003 +
4004 + platform_set_drvdata(pdev, npe);
4005 +
4006 + err = device_create_file(&pdev->dev, &dev_attr_state);
4007 + if (err)
4008 + goto out_rel;
4009 +
4010 + npe->plat = plat;
4011 + disable_npe_irq(npe);
4012 + npe->usage = 0;
4013 + npe_reset(npe);
4014 + npe_firmware_probe(&pdev->dev);
4015 +
4016 + return 0;
4017 +
4018 +out_rel:
4019 + release_resource(npe->res);
4020 +out_free:
4021 + kfree(npe);
4022 + return ret;
4023 +}
4024 +
4025 +static struct file_operations ucode_dl_fops = {
4026 + .owner = THIS_MODULE,
4027 + .write = ucode_write,
4028 + .open = ucode_open,
4029 + .release = ucode_close,
4030 +};
4031 +
4032 +static struct miscdevice ucode_dl_dev = {
4033 + .minor = MICROCODE_MINOR,
4034 + .name = "ixp4xx_ucode",
4035 + .fops = &ucode_dl_fops,
4036 +};
4037 +
4038 +static int npe_remove(struct platform_device *pdev)
4039 +{
4040 + struct npe_info *npe = platform_get_drvdata(pdev);
4041 +
4042 + device_remove_file(&pdev->dev, &dev_attr_state);
4043 +
4044 + iounmap(npe->addr);
4045 + release_resource(npe->res);
4046 + kfree(npe);
4047 + return 0;
4048 +}
4049 +
4050 +static struct platform_driver ixp4xx_npe_driver = {
4051 + .driver = {
4052 + .name = "ixp4xx_npe",
4053 + .owner = THIS_MODULE,
4054 + },
4055 + .probe = npe_probe,
4056 + .remove = npe_remove,
4057 +};
4058 +
4059 +static int __init init_npedriver(void)
4060 +{
4061 + int ret;
4062 + if ((ret = misc_register(&ucode_dl_dev))){
4063 + printk(KERN_ERR "Failed to register misc device %d\n",
4064 + MICROCODE_MINOR);
4065 + return ret;
4066 + }
4067 + if ((ret = platform_driver_register(&ixp4xx_npe_driver)))
4068 + misc_deregister(&ucode_dl_dev);
4069 + else
4070 + printk(KERN_INFO IXNPE_VERSION " initialized\n");
4071 +
4072 + return ret;
4073 +
4074 +}
4075 +
4076 +static void __exit finish_npedriver(void)
4077 +{
4078 + misc_deregister(&ucode_dl_dev);
4079 + platform_driver_unregister(&ixp4xx_npe_driver);
4080 +}
4081 +
4082 +module_init(init_npedriver);
4083 +module_exit(finish_npedriver);
4084 +
4085 +MODULE_LICENSE("GPL");
4086 +MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
4087 +
4088 +EXPORT_SYMBOL(get_npe_by_id);
4089 +EXPORT_SYMBOL(return_npe_dev);
4090 diff -Naur linux-2.6.19.orig/drivers/net/Kconfig linux-2.6.19/drivers/net/Kconfig
4091 --- linux-2.6.19.orig/drivers/net/Kconfig 2006-11-29 14:57:37.000000000 -0700
4092 +++ linux-2.6.19/drivers/net/Kconfig 2007-01-12 21:54:40.000000000 -0700
4093 @@ -190,6 +190,8 @@
4094
4095 source "drivers/net/arm/Kconfig"
4096
4097 +source "drivers/net/ixp4xx/Kconfig"
4098 +
4099 config MACE
4100 tristate "MACE (Power Mac ethernet) support"
4101 depends on NET_ETHERNET && PPC_PMAC && PPC32
4102 diff -Naur linux-2.6.19.orig/drivers/net/Makefile linux-2.6.19/drivers/net/Makefile
4103 --- linux-2.6.19.orig/drivers/net/Makefile 2006-11-29 14:57:37.000000000 -0700
4104 +++ linux-2.6.19/drivers/net/Makefile 2007-01-12 21:54:40.000000000 -0700
4105 @@ -209,6 +209,7 @@
4106 obj-$(CONFIG_IRDA) += irda/
4107 obj-$(CONFIG_ETRAX_ETHERNET) += cris/
4108 obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
4109 +obj-$(CONFIG_IXP4XX_NPE) += ixp4xx/
4110
4111 obj-$(CONFIG_NETCONSOLE) += netconsole.o
4112
4113 diff -Naur linux-2.6.19.orig/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h linux-2.6.19/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h
4114 --- linux-2.6.19.orig/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2006-11-29 14:57:37.000000000 -0700
4115 +++ linux-2.6.19/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2007-01-12 21:54:40.000000000 -0700
4116 @@ -22,6 +22,8 @@
4117 #ifndef _ASM_ARM_IXP4XX_H_
4118 #define _ASM_ARM_IXP4XX_H_
4119
4120 +#include "npe_regs.h"
4121 +
4122 /*
4123 * IXP4xx Linux Memory Map:
4124 *
4125 @@ -44,6 +46,12 @@
4126 */
4127
4128 /*
4129 + * PCI Memory Space
4130 + */
4131 +#define IXP4XX_PCIMEM_BASE_PHYS (0x48000000)
4132 +#define IXP4XX_PCIMEM_REGION_SIZE (0x04000000)
4133 +#define IXP4XX_PCIMEM_BAR_SIZE (0x01000000)
4134 +/*
4135 * Queue Manager
4136 */
4137 #define IXP4XX_QMGR_BASE_PHYS (0x60000000)
4138 @@ -322,7 +330,13 @@
4139 #define PCI_ATPDMA0_LENADDR_OFFSET 0x48
4140 #define PCI_ATPDMA1_AHBADDR_OFFSET 0x4C
4141 #define PCI_ATPDMA1_PCIADDR_OFFSET 0x50
4142 -#define PCI_ATPDMA1_LENADDR_OFFSET 0x54
4143 +#define PCI_ATPDMA1_LENADDR_OFFSET 0x54
4144 +#define PCI_PTADMA0_AHBADDR_OFFSET 0x58
4145 +#define PCI_PTADMA0_PCIADDR_OFFSET 0x5c
4146 +#define PCI_PTADMA0_LENADDR_OFFSET 0x60
4147 +#define PCI_PTADMA1_AHBADDR_OFFSET 0x64
4148 +#define PCI_PTADMA1_PCIADDR_OFFSET 0x68
4149 +#define PCI_PTADMA1_LENADDR_OFFSET 0x6c
4150
4151 /*
4152 * PCI Control/Status Registers
4153 @@ -351,6 +365,12 @@
4154 #define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET)
4155 #define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET)
4156 #define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET)
4157 +#define PCI_PTADMA0_AHBADDR IXP4XX_PCI_CSR(PCI_PTADMA0_AHBADDR_OFFSET)
4158 +#define PCI_PTADMA0_PCIADDR IXP4XX_PCI_CSR(PCI_PTADMA0_PCIADDR_OFFSET)
4159 +#define PCI_PTADMA0_LENADDR IXP4XX_PCI_CSR(PCI_PTADMA0_LENADDR_OFFSET)
4160 +#define PCI_PTADMA1_AHBADDR IXP4XX_PCI_CSR(PCI_PTADMA1_AHBADDR_OFFSET)
4161 +#define PCI_PTADMA1_PCIADDR IXP4XX_PCI_CSR(PCI_PTADMA1_PCIADDR_OFFSET)
4162 +#define PCI_PTADMA1_LENADDR IXP4XX_PCI_CSR(PCI_PTADMA1_LENADDR_OFFSET)
4163
4164 /*
4165 * PCI register values and bit definitions
4166 @@ -607,6 +627,34 @@
4167
4168 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
4169
4170 +
4171 +/* Fuse Bits of IXP_EXP_CFG2 */
4172 +#define IX_FUSE_RCOMP (1 << 0)
4173 +#define IX_FUSE_USB (1 << 1)
4174 +#define IX_FUSE_HASH (1 << 2)
4175 +#define IX_FUSE_AES (1 << 3)
4176 +#define IX_FUSE_DES (1 << 4)
4177 +#define IX_FUSE_HDLC (1 << 5)
4178 +#define IX_FUSE_AAL (1 << 6)
4179 +#define IX_FUSE_HSS (1 << 7)
4180 +#define IX_FUSE_UTOPIA (1 << 8)
4181 +#define IX_FUSE_ETH0 (1 << 9)
4182 +#define IX_FUSE_ETH1 (1 << 10)
4183 +#define IX_FUSE_NPEA (1 << 11)
4184 +#define IX_FUSE_NPEB (1 << 12)
4185 +#define IX_FUSE_NPEC (1 << 13)
4186 +#define IX_FUSE_PCI (1 << 14)
4187 +#define IX_FUSE_ECC (1 << 15)
4188 +#define IX_FUSE_UTOPIA_PHY_LIMIT (3 << 16)
4189 +#define IX_FUSE_USB_HOST (1 << 18)
4190 +#define IX_FUSE_NPEA_ETH (1 << 19)
4191 +#define IX_FUSE_NPEB_ETH (1 << 20)
4192 +#define IX_FUSE_RSA (1 << 21)
4193 +#define IX_FUSE_XSCALE_MAX_FREQ (3 << 22)
4194 +
4195 +#define IX_FUSE_IXP46X_ONLY IX_FUSE_XSCALE_MAX_FREQ | IX_FUSE_RSA | \
4196 + IX_FUSE_NPEB_ETH | IX_FUSE_NPEA_ETH | IX_FUSE_USB_HOST | IX_FUSE_ECC
4197 +
4198 #ifndef __ASSEMBLY__
4199 static inline int cpu_is_ixp46x(void)
4200 {
4201 @@ -620,6 +668,15 @@
4202 #endif
4203 return 0;
4204 }
4205 +
4206 +static inline u32 ix_fuse(void)
4207 +{
4208 + unsigned int fuses = ~(*IXP4XX_EXP_CFG2);
4209 + if (!cpu_is_ixp46x())
4210 + fuses &= ~IX_FUSE_IXP46X_ONLY;
4211 +
4212 + return fuses;
4213 +}
4214 #endif
4215
4216 #endif
4217 diff -Naur linux-2.6.19.orig/include/asm-arm/arch-ixp4xx/npe_regs.h linux-2.6.19/include/asm-arm/arch-ixp4xx/npe_regs.h
4218 --- linux-2.6.19.orig/include/asm-arm/arch-ixp4xx/npe_regs.h 1969-12-31 17:00:00.000000000 -0700
4219 +++ linux-2.6.19/include/asm-arm/arch-ixp4xx/npe_regs.h 2007-01-12 21:54:40.000000000 -0700
4220 @@ -0,0 +1,82 @@
4221 +#ifndef NPE_REGS_H
4222 +#define NPE_REGS_H
4223 +
4224 +/* Execution Address */
4225 +#define IX_NPEDL_REG_OFFSET_EXAD 0x00
4226 +/* Execution Data */
4227 +#define IX_NPEDL_REG_OFFSET_EXDATA 0x04
4228 +/* Execution Control */
4229 +#define IX_NPEDL_REG_OFFSET_EXCTL 0x08
4230 +/* Execution Count */
4231 +#define IX_NPEDL_REG_OFFSET_EXCT 0x0C
4232 +/* Action Point 0 */
4233 +#define IX_NPEDL_REG_OFFSET_AP0 0x10
4234 +/* Action Point 1 */
4235 +#define IX_NPEDL_REG_OFFSET_AP1 0x14
4236 +/* Action Point 2 */
4237 +#define IX_NPEDL_REG_OFFSET_AP2 0x18
4238 +/* Action Point 3 */
4239 +#define IX_NPEDL_REG_OFFSET_AP3 0x1C
4240 +/* Watchpoint FIFO */
4241 +#define IX_NPEDL_REG_OFFSET_WFIFO 0x20
4242 +/* Watch Count */
4243 +#define IX_NPEDL_REG_OFFSET_WC 0x24
4244 +/* Profile Count */
4245 +#define IX_NPEDL_REG_OFFSET_PROFCT 0x28
4246 +
4247 +/* Messaging Status */
4248 +#define IX_NPEDL_REG_OFFSET_STAT 0x2C
4249 +/* Messaging Control */
4250 +#define IX_NPEDL_REG_OFFSET_CTL 0x30
4251 +/* Mailbox Status */
4252 +#define IX_NPEDL_REG_OFFSET_MBST 0x34
4253 +/* messaging in/out FIFO */
4254 +#define IX_NPEDL_REG_OFFSET_FIFO 0x38
4255 +
4256 +
4257 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IF 0x00100000
4258 +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IE 0x00080000
4259 +#define IX_NPEDL_MASK_ECS_REG_0_ACTIVE 0x80000000
4260 +
4261 +#define IX_NPEDL_EXCTL_CMD_NPE_STEP 0x01
4262 +#define IX_NPEDL_EXCTL_CMD_NPE_START 0x02
4263 +#define IX_NPEDL_EXCTL_CMD_NPE_STOP 0x03
4264 +#define IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE 0x04
4265 +#define IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT 0x0C
4266 +#define IX_NPEDL_EXCTL_CMD_RD_INS_MEM 0x10
4267 +#define IX_NPEDL_EXCTL_CMD_WR_INS_MEM 0x11
4268 +#define IX_NPEDL_EXCTL_CMD_RD_DATA_MEM 0x12
4269 +#define IX_NPEDL_EXCTL_CMD_WR_DATA_MEM 0x13
4270 +#define IX_NPEDL_EXCTL_CMD_RD_ECS_REG 0x14
4271 +#define IX_NPEDL_EXCTL_CMD_WR_ECS_REG 0x15
4272 +
4273 +#define IX_NPEDL_EXCTL_STATUS_RUN 0x80000000
4274 +#define IX_NPEDL_EXCTL_STATUS_STOP 0x40000000
4275 +#define IX_NPEDL_EXCTL_STATUS_CLEAR 0x20000000
4276 +
4277 +#define IX_NPEDL_MASK_WFIFO_VALID 0x80000000
4278 +#define IX_NPEDL_MASK_STAT_OFNE 0x00010000
4279 +#define IX_NPEDL_MASK_STAT_IFNE 0x00080000
4280 +
4281 +#define IX_NPEDL_ECS_DBG_CTXT_REG_0 0x0C
4282 +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_0 0x04
4283 +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_0 0x08
4284 +
4285 +/* NPE control register bit definitions */
4286 +#define IX_NPEMH_NPE_CTL_OFE (1 << 16) /**< OutFifoEnable */
4287 +#define IX_NPEMH_NPE_CTL_IFE (1 << 17) /**< InFifoEnable */
4288 +#define IX_NPEMH_NPE_CTL_OFEWE (1 << 24) /**< OutFifoEnableWriteEnable */
4289 +#define IX_NPEMH_NPE_CTL_IFEWE (1 << 25) /**< InFifoEnableWriteEnable */
4290 +
4291 +/* NPE status register bit definitions */
4292 +#define IX_NPEMH_NPE_STAT_OFNE (1 << 16) /**< OutFifoNotEmpty */
4293 +#define IX_NPEMH_NPE_STAT_IFNF (1 << 17) /**< InFifoNotFull */
4294 +#define IX_NPEMH_NPE_STAT_OFNF (1 << 18) /**< OutFifoNotFull */
4295 +#define IX_NPEMH_NPE_STAT_IFNE (1 << 19) /**< InFifoNotEmpty */
4296 +#define IX_NPEMH_NPE_STAT_MBINT (1 << 20) /**< Mailbox interrupt */
4297 +#define IX_NPEMH_NPE_STAT_IFINT (1 << 21) /**< InFifo interrupt */
4298 +#define IX_NPEMH_NPE_STAT_OFINT (1 << 22) /**< OutFifo interrupt */
4299 +#define IX_NPEMH_NPE_STAT_WFINT (1 << 23) /**< WatchFifo interrupt */
4300 +
4301 +#endif
4302 +
4303 diff -Naur linux-2.6.19.orig/include/asm-arm/arch-ixp4xx/platform.h linux-2.6.19/include/asm-arm/arch-ixp4xx/platform.h
4304 --- linux-2.6.19.orig/include/asm-arm/arch-ixp4xx/platform.h 2006-11-29 14:57:37.000000000 -0700
4305 +++ linux-2.6.19/include/asm-arm/arch-ixp4xx/platform.h 2007-01-12 21:54:40.000000000 -0700
4306 @@ -89,6 +89,25 @@
4307
4308 struct sys_timer;
4309
4310 +struct npe_plat_data {
4311 + const char *name;
4312 + int data_size;
4313 + int inst_size;
4314 + int id; /* Node ID */
4315 +};
4316 +
4317 +struct mac_plat_info {
4318 + int npe_id; /* Node ID of the NPE for this port */
4319 + int port_id; /* Port ID for NPE-B @ ixp465 */
4320 + int eth_id; /* Physical ID */
4321 + int phy_id; /* ID of the connected PHY (PCB/platform dependent) */
4322 + int rxq_id; /* Queue ID of the RX-free q */
4323 + int rxdoneq_id; /* where incoming packets are returned */
4324 + int txq_id; /* Where to push the outgoing packets */
4325 + unsigned char hwaddr[6]; /* Desired hardware address */
4326 +
4327 +};
4328 +
4329 /*
4330 * Frequency of clock used for primary clocksource
4331 */
4332 diff -Naur linux-2.6.19.orig/include/linux/ixp_crypto.h linux-2.6.19/include/linux/ixp_crypto.h
4333 --- linux-2.6.19.orig/include/linux/ixp_crypto.h 1969-12-31 17:00:00.000000000 -0700
4334 +++ linux-2.6.19/include/linux/ixp_crypto.h 2007-01-12 21:54:40.000000000 -0700
4335 @@ -0,0 +1,192 @@
4336 +
4337 +#ifndef IX_CRYPTO_H
4338 +#define IX_CRYPTO_H
4339 +
4340 +#define MAX_KEYLEN 64
4341 +#define NPE_CTX_LEN 80
4342 +#define AES_BLOCK128 16
4343 +
4344 +#define NPE_OP_HASH_GEN_ICV 0x50
4345 +#define NPE_OP_ENC_GEN_KEY 0xc9
4346 +
4347 +
4348 +#define NPE_OP_HASH_VERIFY 0x01
4349 +#define NPE_OP_CCM_ENABLE 0x04
4350 +#define NPE_OP_CRYPT_ENABLE 0x08
4351 +#define NPE_OP_HASH_ENABLE 0x10
4352 +#define NPE_OP_NOT_IN_PLACE 0x20
4353 +#define NPE_OP_HMAC_DISABLE 0x40
4354 +#define NPE_OP_CRYPT_ENCRYPT 0x80
4355 +
4356 +#define MOD_ECB 0x0000
4357 +#define MOD_CTR 0x1000
4358 +#define MOD_CBC_ENC 0x2000
4359 +#define MOD_CBC_DEC 0x3000
4360 +#define MOD_CCM_ENC 0x4000
4361 +#define MOD_CCM_DEC 0x5000
4362 +
4363 +#define ALGO_AES 0x0800
4364 +#define CIPH_DECR 0x0000
4365 +#define CIPH_ENCR 0x0400
4366 +
4367 +#define MOD_DES 0x0000
4368 +#define MOD_TDEA2 0x0100
4369 +#define MOD_TDEA3 0x0200
4370 +#define MOD_AES128 0x0000
4371 +#define MOD_AES192 0x0100
4372 +#define MOD_AES256 0x0200
4373 +
4374 +#define KEYLEN_128 4
4375 +#define KEYLEN_192 6
4376 +#define KEYLEN_256 8
4377 +
4378 +#define CIPHER_TYPE_NULL 0
4379 +#define CIPHER_TYPE_DES 1
4380 +#define CIPHER_TYPE_3DES 2
4381 +#define CIPHER_TYPE_AES 3
4382 +
4383 +#define CIPHER_MODE_ECB 1
4384 +#define CIPHER_MODE_CTR 2
4385 +#define CIPHER_MODE_CBC 3
4386 +#define CIPHER_MODE_CCM 4
4387 +
4388 +#define HASH_TYPE_NULL 0
4389 +#define HASH_TYPE_MD5 1
4390 +#define HASH_TYPE_SHA1 2
4391 +#define HASH_TYPE_CBCMAC 3
4392 +
4393 +#define OP_REG_DONE 1
4394 +#define OP_REGISTER 2
4395 +#define OP_PERFORM 3
4396 +
4397 +#define STATE_UNREGISTERED 0
4398 +#define STATE_REGISTERED 1
4399 +#define STATE_UNLOADING 2
4400 +
4401 +struct crypt_ctl {
4402 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4403 + u8 mode; /* NPE operation */
4404 + u8 init_len;
4405 + u16 reserved;
4406 +#else
4407 + u16 reserved;
4408 + u8 init_len;
4409 + u8 mode; /* NPE operation */
4410 +#endif
4411 + u8 iv[16]; /* IV for CBC mode or CTR IV for CTR mode */
4412 + union {
4413 + u32 icv;
4414 + u32 rev_aes;
4415 + } addr;
4416 + u32 src_buf;
4417 + u32 dest_buf;
4418 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4419 + u16 auth_offs; /* Authentication start offset */
4420 + u16 auth_len; /* Authentication data length */
4421 + u16 crypt_offs; /* Cryption start offset */
4422 + u16 crypt_len; /* Cryption data length */
4423 +#else
4424 + u16 auth_len; /* Authentication data length */
4425 + u16 auth_offs; /* Authentication start offset */
4426 + u16 crypt_len; /* Cryption data length */
4427 + u16 crypt_offs; /* Cryption start offset */
4428 +#endif
4429 + u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
4430 + u32 crypto_ctx; /* NPE Crypto Param structure address */
4431 +
4432 + /* Used by Host */
4433 + struct ix_sa_ctx *sa_ctx;
4434 + int oper_type;
4435 +};
4436 +
4437 +struct npe_crypt_cont {
4438 + union {
4439 + struct crypt_ctl crypt;
4440 + u8 rev_aes_key[NPE_CTX_LEN];
4441 + } ctl;
4442 + struct npe_crypt_cont *next;
4443 + struct npe_crypt_cont *virt;
4444 + dma_addr_t phys;
4445 +};
4446 +
4447 +struct ix_hash_algo {
4448 + char *name;
4449 + u32 cfgword;
4450 + int digest_len;
4451 + int aad_len;
4452 + unsigned char *icv;
4453 + int type;
4454 +};
4455 +
4456 +struct ix_cipher_algo {
4457 + char *name;
4458 + u32 cfgword_enc;
4459 + u32 cfgword_dec;
4460 + int block_len;
4461 + int iv_len;
4462 + int type;
4463 + int mode;
4464 +};
4465 +
4466 +struct ix_key {
4467 + u8 key[MAX_KEYLEN];
4468 + int len;
4469 +};
4470 +
4471 +struct ix_sa_master {
4472 + struct device *npe_dev;
4473 + struct qm_queue *sendq;
4474 + struct qm_queue *recvq;
4475 + struct dma_pool *dmapool;
4476 + struct npe_crypt_cont *pool;
4477 + int pool_size;
4478 + rwlock_t lock;
4479 +};
4480 +
4481 +struct ix_sa_dir {
4482 + unsigned char *npe_ctx;
4483 + dma_addr_t npe_ctx_phys;
4484 + int npe_ctx_idx;
4485 + u8 npe_mode;
4486 +};
4487 +
4488 +struct ix_sa_ctx {
4489 + struct list_head list;
4490 + struct ix_sa_master *master;
4491 +
4492 + const struct ix_hash_algo *h_algo;
4493 + const struct ix_cipher_algo *c_algo;
4494 + struct ix_key c_key;
4495 + struct ix_key h_key;
4496 +
4497 + int digest_len;
4498 +
4499 + struct ix_sa_dir encrypt;
4500 + struct ix_sa_dir decrypt;
4501 +
4502 + struct npe_crypt_cont *rev_aes;
4503 + gfp_t gfp_flags;
4504 +
4505 + int state;
4506 + void *priv;
4507 +
4508 + void(*reg_cb)(struct ix_sa_ctx*, int);
4509 + void(*perf_cb)(struct ix_sa_ctx*, void*, int);
4510 + atomic_t use_cnt;
4511 +};
4512 +
4513 +const struct ix_hash_algo *ix_hash_by_id(int type);
4514 +const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode);
4515 +
4516 +struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags);
4517 +void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx);
4518 +
4519 +int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
4520 + int datalen, int c_offs, int c_len, int a_offs, int a_len,
4521 + int hmac, char *iv, int encrypt);
4522 +
4523 +int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx,
4524 + const struct ix_cipher_algo *cipher,
4525 + const struct ix_hash_algo *auth, int len);
4526 +
4527 +#endif
4528 diff -Naur linux-2.6.19.orig/include/linux/ixp_npe.h linux-2.6.19/include/linux/ixp_npe.h
4529 --- linux-2.6.19.orig/include/linux/ixp_npe.h 1969-12-31 17:00:00.000000000 -0700
4530 +++ linux-2.6.19/include/linux/ixp_npe.h 2007-01-12 21:54:40.000000000 -0700
4531 @@ -0,0 +1,117 @@
4532 +/*
4533 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
4534 + *
4535 + * This file is released under the GPLv2
4536 + */
4537 +
4538 +#ifndef NPE_DEVICE_H
4539 +#define NPE_DEVICE_H
4540 +
4541 +#include <linux/miscdevice.h>
4542 +#include <asm/hardware.h>
4543 +
4544 +#ifdef __ARMEB__
4545 +#undef CONFIG_NPE_ADDRESS_COHERENT
4546 +#else
4547 +#define CONFIG_NPE_ADDRESS_COHERENT
4548 +#endif
4549 +
4550 +#if defined(__ARMEB__) || defined (CONFIG_NPE_ADDRESS_COHERENT)
4551 +#define npe_to_cpu32(x) (x)
4552 +#define npe_to_cpu16(x) (x)
4553 +#define cpu_to_npe32(x) (x)
4554 +#define cpu_to_npe16(x) (x)
4555 +#else
4556 +#error NPE_DATA_COHERENT
4557 +#define NPE_DATA_COHERENT
4558 +#define npe_to_cpu32(x) be32_to_cpu(x)
4559 +#define npe_to_cpu16(x) be16_to_cpu(x)
4560 +#define cpu_to_npe32(x) cpu_to_be32(x)
4561 +#define cpu_to_npe16(x) cpu_to_be16(x)
4562 +#endif
4563 +
4564 +
4565 +struct npe_info {
4566 + struct resource *res;
4567 + void __iomem *addr;
4568 + struct npe_plat_data *plat;
4569 + u8 img_info[4];
4570 + int usage;
4571 + int loaded;
4572 + u32 exec_count;
4573 + u32 ctx_reg2;
4574 +};
4575 +
4576 +
4577 +static inline void npe_reg_write(struct npe_info *npe, u32 reg, u32 val)
4578 +{
4579 + *(volatile u32*)((u8*)(npe->addr) + reg) = val;
4580 +}
4581 +
4582 +static inline u32 npe_reg_read(struct npe_info *npe, u32 reg)
4583 +{
4584 + return *(volatile u32*)((u8*)(npe->addr) + reg);
4585 +}
4586 +
4587 +static inline u32 npe_status(struct npe_info *npe)
4588 +{
4589 + return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCTL);
4590 +}
4591 +
4592 +/* ixNpeDlNpeMgrCommandIssue */
4593 +static inline void npe_write_exctl(struct npe_info *npe, u32 cmd)
4594 +{
4595 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4596 +}
4597 +/* ixNpeDlNpeMgrWriteCommandIssue */
4598 +static inline void
4599 +npe_write_cmd(struct npe_info *npe, u32 addr, u32 data, int cmd)
4600 +{
4601 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXDATA, data);
4602 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
4603 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4604 +}
4605 +/* ixNpeDlNpeMgrReadCommandIssue */
4606 +static inline u32
4607 +npe_read_cmd(struct npe_info *npe, u32 addr, int cmd)
4608 +{
4609 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr);
4610 + npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
4611 + /* Intel reads the data twice - so do we... */
4612 + npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
4613 + return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA);
4614 +}
4615 +
4616 +/* ixNpeDlNpeMgrExecAccRegWrite */
4617 +static inline void npe_write_ecs_reg(struct npe_info *npe, u32 addr, u32 data)
4618 +{
4619 + npe_write_cmd(npe, addr, data, IX_NPEDL_EXCTL_CMD_WR_ECS_REG);
4620 +}
4621 +/* ixNpeDlNpeMgrExecAccRegRead */
4622 +static inline u32 npe_read_ecs_reg(struct npe_info *npe, u32 addr)
4623 +{
4624 + return npe_read_cmd(npe, addr, IX_NPEDL_EXCTL_CMD_RD_ECS_REG);
4625 +}
4626 +
4627 +extern void npe_stop(struct npe_info *npe);
4628 +extern void npe_start(struct npe_info *npe);
4629 +extern void npe_reset(struct npe_info *npe);
4630 +
4631 +extern struct device *get_npe_by_id(int id);
4632 +extern void return_npe_dev(struct device *dev);
4633 +
4634 +/* NPE Messages */
4635 +extern int
4636 +npe_mh_status(struct npe_info *npe);
4637 +extern int
4638 +npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp, u8 *macaddr);
4639 +extern int
4640 +npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp);
4641 +extern int
4642 +npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid);
4643 +extern int
4644 +npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp, int enable);
4645 +extern int
4646 +npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys, int reset);
4647 +
4648 +#endif
4649 diff -Naur linux-2.6.19.orig/include/linux/ixp_qmgr.h linux-2.6.19/include/linux/ixp_qmgr.h
4650 --- linux-2.6.19.orig/include/linux/ixp_qmgr.h 1969-12-31 17:00:00.000000000 -0700
4651 +++ linux-2.6.19/include/linux/ixp_qmgr.h 2007-01-12 21:54:40.000000000 -0700
4652 @@ -0,0 +1,202 @@
4653 +/*
4654 + * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
4655 + *
4656 + * This file is released under the GPLv2
4657 + */
4658 +
4659 +#ifndef IX_QMGR_H
4660 +#define IX_QMGR_H
4661 +
4662 +#include <linux/skbuff.h>
4663 +#include <linux/list.h>
4664 +#include <linux/if_ether.h>
4665 +#include <linux/spinlock.h>
4666 +#include <linux/platform_device.h>
4667 +#include <linux/ixp_npe.h>
4668 +#include <asm/atomic.h>
4669 +
4670 +/* All offsets are in 32bit words */
4671 +#define QUE_LOW_STAT0 0x100 /* 4x Status of the 32 lower queues 0-31 */
4672 +#define QUE_UO_STAT0 0x104 /* 2x Underflow/Overflow status bits*/
4673 +#define QUE_UPP_STAT0 0x106 /* 2x Status of thew 32 upper queues 32-63 */
4674 +#define INT0_SRC_SELREG0 0x108 /* 4x */
4675 +#define QUE_IE_REG0 0x10c /* 2x */
4676 +#define QUE_INT_REG0 0x10e /* 2x IRQ reg, write 1 to reset IRQ */
4677 +
4678 +#define IX_QMGR_QCFG_BASE 0x800
4679 +#define IX_QMGR_QCFG_SIZE 0x40
4680 +#define IX_QMGR_SRAM_SPACE (IX_QMGR_QCFG_BASE + IX_QMGR_QCFG_SIZE)
4681 +
4682 +#define MAX_QUEUES 32 /* first, we only support the lower 32 queues */
4683 +#define MAX_NPES 3
4684 +
4685 +enum {
4686 + Q_IRQ_ID_E = 0, /* Queue Empty due to last read */
4687 + Q_IRQ_ID_NE, /* Queue Nearly Empty due to last read */
4688 + Q_IRQ_ID_NF, /* Queue Nearly Full due to last write */
4689 + Q_IRQ_ID_F, /* Queue Full due to last write */
4690 + Q_IRQ_ID_NOT_E, /* Queue Not Empty due to last write */
4691 + Q_IRQ_ID_NOT_NE, /* Queue Not Nearly Empty due to last write */
4692 + Q_IRQ_ID_NOT_NF, /* Queue Not Nearly Full due to last read */
4693 + Q_IRQ_ID_NOT_F /* Queue Not Full due to last read */
4694 +};
4695 +
4696 +extern struct qm_queue *request_queue(int qid, int len);
4697 +extern void release_queue(struct qm_queue *queue);
4698 +extern int queue_set_irq_src(struct qm_queue *queue, int flag);
4699 +extern void queue_set_watermarks(struct qm_queue *, unsigned ne, unsigned nf);
4700 +extern int queue_len(struct qm_queue *queue);
4701 +
4702 +struct qm_qmgr;
4703 +struct qm_queue;
4704 +
4705 +typedef void(*queue_cb)(struct qm_queue *);
4706 +
4707 +struct qm_queue {
4708 + int addr; /* word offset from IX_QMGR_SRAM_SPACE */
4709 + int len; /* size in words */
4710 + int id; /* Q Id */
4711 + u32 __iomem *acc_reg;
4712 + struct device *dev;
4713 + atomic_t use;
4714 + queue_cb irq_cb;
4715 + void *cb_data;
4716 +};
4717 +
4718 +#ifndef CONFIG_NPE_ADDRESS_COHERENT
4719 +struct eth_ctl {
4720 + u32 next;
4721 + u16 buf_len;
4722 + u16 pkt_len;
4723 + u32 phys_addr;
4724 + u8 dest_id;
4725 + u8 src_id;
4726 + u16 flags;
4727 + u8 qos;
4728 + u8 padlen;
4729 + u16 vlan_tci;
4730 + u8 dest_mac[ETH_ALEN];
4731 + u8 src_mac[ETH_ALEN];
4732 +};
4733 +
4734 +#else
4735 +struct eth_ctl {
4736 + u32 next;
4737 + u16 pkt_len;
4738 + u16 buf_len;
4739 + u32 phys_addr;
4740 + u16 flags;
4741 + u8 src_id;
4742 + u8 dest_id;
4743 + u16 vlan_tci;
4744 + u8 padlen;
4745 + u8 qos;
4746 + u8 dest_mac[ETH_ALEN];
4747 + u8 src_mac[ETH_ALEN];
4748 +};
4749 +#endif
4750 +
4751 +struct npe_cont {
4752 + struct eth_ctl eth;
4753 + void *data;
4754 + struct npe_cont *next;
4755 + struct npe_cont *virt;
4756 + dma_addr_t phys;
4757 +};
4758 +
4759 +struct qm_qmgr {
4760 + u32 __iomem *addr;
4761 + struct resource *res;
4762 + struct qm_queue *queues[MAX_QUEUES];
4763 + rwlock_t lock;
4764 + struct npe_cont *pool;
4765 + struct dma_pool *dmapool;
4766 + int irq;
4767 +};
4768 +
4769 +static inline void queue_write_cfg_reg(struct qm_queue *queue, u32 val)
4770 +{
4771 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4772 + *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id) = val;
4773 +}
4774 +static inline u32 queue_read_cfg_reg(struct qm_queue *queue)
4775 +{
4776 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4777 + return *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
4778 +}
4779 +
4780 +static inline void queue_ack_irq(struct qm_queue *queue)
4781 +{
4782 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4783 + *(qmgr->addr + QUE_INT_REG0) = 1 << queue->id;
4784 +}
4785 +
4786 +static inline void queue_enable_irq(struct qm_queue *queue)
4787 +{
4788 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4789 + *(qmgr->addr + QUE_IE_REG0) |= 1 << queue->id;
4790 +}
4791 +
4792 +static inline void queue_disable_irq(struct qm_queue *queue)
4793 +{
4794 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4795 + *(qmgr->addr + QUE_IE_REG0) &= ~(1 << queue->id);
4796 +}
4797 +
4798 +static inline void queue_put_entry(struct qm_queue *queue, u32 entry)
4799 +{
4800 + *(queue->acc_reg) = npe_to_cpu32(entry);
4801 +}
4802 +
4803 +static inline u32 queue_get_entry(struct qm_queue *queue)
4804 +{
4805 + return cpu_to_npe32(*queue->acc_reg);
4806 +}
4807 +
4808 +static inline struct npe_cont *qmgr_get_cont(struct qm_qmgr *qmgr)
4809 +{
4810 + unsigned long flags;
4811 + struct npe_cont *cont;
4812 +
4813 + if (!qmgr->pool)
4814 + return NULL;
4815 + write_lock_irqsave(&qmgr->lock, flags);
4816 + cont = qmgr->pool;
4817 + qmgr->pool = cont->next;
4818 + write_unlock_irqrestore(&qmgr->lock, flags);
4819 + return cont;
4820 +}
4821 +
4822 +static inline void qmgr_return_cont(struct qm_qmgr *qmgr,struct npe_cont *cont)
4823 +{
4824 + unsigned long flags;
4825 +
4826 + write_lock_irqsave(&qmgr->lock, flags);
4827 + cont->next = qmgr->pool;
4828 + qmgr->pool = cont;
4829 + write_unlock_irqrestore(&qmgr->lock, flags);
4830 +}
4831 +
4832 +static inline int queue_stat(struct qm_queue *queue)
4833 +{
4834 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4835 + u32 reg = *(qmgr->addr + QUE_UO_STAT0 + (queue->id >> 4));
4836 + return (reg >> (queue->id & 0xf) << 1) & 3;
4837 +}
4838 +
4839 +/* Prints the queue state, which is very, very helpfull for debugging */
4840 +static inline void queue_state(struct qm_queue *queue)
4841 +{
4842 + u32 val=0, lstat=0;
4843 + int offs;
4844 + struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev);
4845 +
4846 + offs = queue->id/8 + QUE_LOW_STAT0;
4847 + val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id);
4848 + lstat = (*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x0f;
4849 +
4850 + printk("Qid[%02d]: Wptr=%4x, Rptr=%4x, diff=%4x, Stat:%x\n", queue->id,
4851 + val&0x7f, (val>>7) &0x7f, (val - (val >> 7)) & 0x7f, lstat);
4852 +}
4853 +
4854 +#endif