remove empty directory
[openwrt/svn-archive/archive.git] / target / linux / generic-2.6 / patches / 130-netfilter_ipset.patch
1 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set.h
2 ===================================================================
3 --- /dev/null
4 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set.h
5 @@ -0,0 +1,498 @@
6 +#ifndef _IP_SET_H
7 +#define _IP_SET_H
8 +
9 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
10 + * Patrick Schaaf <bof@bof.de>
11 + * Martin Josefsson <gandalf@wlug.westbo.se>
12 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
13 + *
14 + * This program is free software; you can redistribute it and/or modify
15 + * it under the terms of the GNU General Public License version 2 as
16 + * published by the Free Software Foundation.
17 + */
18 +
19 +#if 0
20 +#define IP_SET_DEBUG
21 +#endif
22 +
23 +/*
24 + * A sockopt of such quality has hardly ever been seen before on the open
25 + * market! This little beauty, hardly ever used: above 64, so it's
26 + * traditionally used for firewalling, not touched (even once!) by the
27 + * 2.0, 2.2 and 2.4 kernels!
28 + *
29 + * Comes with its own certificate of authenticity, valid anywhere in the
30 + * Free world!
31 + *
32 + * Rusty, 19.4.2000
33 + */
34 +#define SO_IP_SET 83
35 +
36 +/*
37 + * Heavily modify by Joakim Axelsson 08.03.2002
38 + * - Made it more modulebased
39 + *
40 + * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
41 + * - bindings added
42 + * - in order to "deal with" backward compatibility, renamed to ipset
43 + */
44 +
45 +/*
46 + * Used so that the kernel module and ipset-binary can match their versions
47 + */
48 +#define IP_SET_PROTOCOL_VERSION 2
49 +
50 +#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
51 +
52 +/* Lets work with our own typedef for representing an IP address.
53 + * We hope to make the code more portable, possibly to IPv6...
54 + *
55 + * The representation works in HOST byte order, because most set types
56 + * will perform arithmetic operations and compare operations.
57 + *
58 + * For now the type is an uint32_t.
59 + *
60 + * Make sure to ONLY use the functions when translating and parsing
61 + * in order to keep the host byte order and make it more portable:
62 + * parse_ip()
63 + * parse_mask()
64 + * parse_ipandmask()
65 + * ip_tostring()
66 + * (Joakim: where are they???)
67 + */
68 +
69 +typedef uint32_t ip_set_ip_t;
70 +
71 +/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
72 + * and IP_SET_INVALID_ID if you want to increase the max number of sets.
73 + */
74 +typedef uint16_t ip_set_id_t;
75 +
76 +#define IP_SET_INVALID_ID 65535
77 +
78 +/* How deep we follow bindings */
79 +#define IP_SET_MAX_BINDINGS 6
80 +
81 +/*
82 + * Option flags for kernel operations (ipt_set_info)
83 + */
84 +#define IPSET_SRC 0x01 /* Source match/add */
85 +#define IPSET_DST 0x02 /* Destination match/add */
86 +#define IPSET_MATCH_INV 0x04 /* Inverse matching */
87 +
88 +/*
89 + * Set features
90 + */
91 +#define IPSET_TYPE_IP 0x01 /* IP address type of set */
92 +#define IPSET_TYPE_PORT 0x02 /* Port type of set */
93 +#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
94 +#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
95 +
96 +/* Reserved keywords */
97 +#define IPSET_TOKEN_DEFAULT ":default:"
98 +#define IPSET_TOKEN_ALL ":all:"
99 +
100 +/* SO_IP_SET operation constants, and their request struct types.
101 + *
102 + * Operation ids:
103 + * 0-99: commands with version checking
104 + * 100-199: add/del/test/bind/unbind
105 + * 200-299: list, save, restore
106 + */
107 +
108 +/* Single shot operations:
109 + * version, create, destroy, flush, rename and swap
110 + *
111 + * Sets are identified by name.
112 + */
113 +
114 +#define IP_SET_REQ_STD \
115 + unsigned op; \
116 + unsigned version; \
117 + char name[IP_SET_MAXNAMELEN]
118 +
119 +#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
120 +struct ip_set_req_create {
121 + IP_SET_REQ_STD;
122 + char typename[IP_SET_MAXNAMELEN];
123 +};
124 +
125 +#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
126 +struct ip_set_req_std {
127 + IP_SET_REQ_STD;
128 +};
129 +
130 +#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
131 +/* Uses ip_set_req_std */
132 +
133 +#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
134 +/* Uses ip_set_req_create */
135 +
136 +#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
137 +/* Uses ip_set_req_create */
138 +
139 +union ip_set_name_index {
140 + char name[IP_SET_MAXNAMELEN];
141 + ip_set_id_t index;
142 +};
143 +
144 +#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
145 +struct ip_set_req_get_set {
146 + unsigned op;
147 + unsigned version;
148 + union ip_set_name_index set;
149 +};
150 +
151 +#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
152 +/* Uses ip_set_req_get_set */
153 +
154 +#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
155 +struct ip_set_req_version {
156 + unsigned op;
157 + unsigned version;
158 +};
159 +
160 +/* Double shots operations:
161 + * add, del, test, bind and unbind.
162 + *
163 + * First we query the kernel to get the index and type of the target set,
164 + * then issue the command. Validity of IP is checked in kernel in order
165 + * to minimalize sockopt operations.
166 + */
167 +
168 +/* Get minimal set data for add/del/test/bind/unbind IP */
169 +#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
170 +struct ip_set_req_adt_get {
171 + unsigned op;
172 + unsigned version;
173 + union ip_set_name_index set;
174 + char typename[IP_SET_MAXNAMELEN];
175 +};
176 +
177 +#define IP_SET_REQ_BYINDEX \
178 + unsigned op; \
179 + ip_set_id_t index;
180 +
181 +struct ip_set_req_adt {
182 + IP_SET_REQ_BYINDEX;
183 +};
184 +
185 +#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
186 +/* Uses ip_set_req_adt, with type specific addage */
187 +
188 +#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
189 +/* Uses ip_set_req_adt, with type specific addage */
190 +
191 +#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
192 +/* Uses ip_set_req_adt, with type specific addage */
193 +
194 +#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
195 +/* Uses ip_set_req_bind, with type specific addage */
196 +struct ip_set_req_bind {
197 + IP_SET_REQ_BYINDEX;
198 + char binding[IP_SET_MAXNAMELEN];
199 +};
200 +
201 +#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
202 +/* Uses ip_set_req_bind, with type speficic addage
203 + * index = 0 means unbinding for all sets */
204 +
205 +#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
206 +/* Uses ip_set_req_bind, with type specific addage */
207 +
208 +/* Multiple shots operations: list, save, restore.
209 + *
210 + * - check kernel version and query the max number of sets
211 + * - get the basic information on all sets
212 + * and size required for the next step
213 + * - get actual set data: header, data, bindings
214 + */
215 +
216 +/* Get max_sets and the index of a queried set
217 + */
218 +#define IP_SET_OP_MAX_SETS 0x00000020
219 +struct ip_set_req_max_sets {
220 + unsigned op;
221 + unsigned version;
222 + ip_set_id_t max_sets; /* max_sets */
223 + ip_set_id_t sets; /* real number of sets */
224 + union ip_set_name_index set; /* index of set if name used */
225 +};
226 +
227 +/* Get the id and name of the sets plus size for next step */
228 +#define IP_SET_OP_LIST_SIZE 0x00000201
229 +#define IP_SET_OP_SAVE_SIZE 0x00000202
230 +struct ip_set_req_setnames {
231 + unsigned op;
232 + ip_set_id_t index; /* set to list/save */
233 + size_t size; /* size to get setdata/bindings */
234 + /* followed by sets number of struct ip_set_name_list */
235 +};
236 +
237 +struct ip_set_name_list {
238 + char name[IP_SET_MAXNAMELEN];
239 + char typename[IP_SET_MAXNAMELEN];
240 + ip_set_id_t index;
241 + ip_set_id_t id;
242 +};
243 +
244 +/* The actual list operation */
245 +#define IP_SET_OP_LIST 0x00000203
246 +struct ip_set_req_list {
247 + IP_SET_REQ_BYINDEX;
248 + /* sets number of struct ip_set_list in reply */
249 +};
250 +
251 +struct ip_set_list {
252 + ip_set_id_t index;
253 + ip_set_id_t binding;
254 + u_int32_t ref;
255 + size_t header_size; /* Set header data of header_size */
256 + size_t members_size; /* Set members data of members_size */
257 + size_t bindings_size; /* Set bindings data of bindings_size */
258 +};
259 +
260 +struct ip_set_hash_list {
261 + ip_set_ip_t ip;
262 + ip_set_id_t binding;
263 +};
264 +
265 +/* The save operation */
266 +#define IP_SET_OP_SAVE 0x00000204
267 +/* Uses ip_set_req_list, in the reply replaced by
268 + * sets number of struct ip_set_save plus a marker
269 + * ip_set_save followed by ip_set_hash_save structures.
270 + */
271 +struct ip_set_save {
272 + ip_set_id_t index;
273 + ip_set_id_t binding;
274 + size_t header_size; /* Set header data of header_size */
275 + size_t members_size; /* Set members data of members_size */
276 +};
277 +
278 +/* At restoring, ip == 0 means default binding for the given set: */
279 +struct ip_set_hash_save {
280 + ip_set_ip_t ip;
281 + ip_set_id_t id;
282 + ip_set_id_t binding;
283 +};
284 +
285 +/* The restore operation */
286 +#define IP_SET_OP_RESTORE 0x00000205
287 +/* Uses ip_set_req_setnames followed by ip_set_restore structures
288 + * plus a marker ip_set_restore, followed by ip_set_hash_save
289 + * structures.
290 + */
291 +struct ip_set_restore {
292 + char name[IP_SET_MAXNAMELEN];
293 + char typename[IP_SET_MAXNAMELEN];
294 + ip_set_id_t index;
295 + size_t header_size; /* Create data of header_size */
296 + size_t members_size; /* Set members data of members_size */
297 +};
298 +
299 +static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
300 +{
301 + return 4 * ((((b - a + 8) / 8) + 3) / 4);
302 +}
303 +
304 +#ifdef __KERNEL__
305 +
306 +#define ip_set_printk(format, args...) \
307 + do { \
308 + printk("%s: %s: ", __FILE__, __FUNCTION__); \
309 + printk(format "\n" , ## args); \
310 + } while (0)
311 +
312 +#if defined(IP_SET_DEBUG)
313 +#define DP(format, args...) \
314 + do { \
315 + printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
316 + printk(format "\n" , ## args); \
317 + } while (0)
318 +#define IP_SET_ASSERT(x) \
319 + do { \
320 + if (!(x)) \
321 + printk("IP_SET_ASSERT: %s:%i(%s)\n", \
322 + __FILE__, __LINE__, __FUNCTION__); \
323 + } while (0)
324 +#else
325 +#define DP(format, args...)
326 +#define IP_SET_ASSERT(x)
327 +#endif
328 +
329 +struct ip_set;
330 +
331 +/*
332 + * The ip_set_type definition - one per set type, e.g. "ipmap".
333 + *
334 + * Each individual set has a pointer, set->type, going to one
335 + * of these structures. Function pointers inside the structure implement
336 + * the real behaviour of the sets.
337 + *
338 + * If not mentioned differently, the implementation behind the function
339 + * pointers of a set_type, is expected to return 0 if ok, and a negative
340 + * errno (e.g. -EINVAL) on error.
341 + */
342 +struct ip_set_type {
343 + struct list_head list; /* next in list of set types */
344 +
345 + /* test for IP in set (kernel: iptables -m set src|dst)
346 + * return 0 if not in set, 1 if in set.
347 + */
348 + int (*testip_kernel) (struct ip_set *set,
349 + const struct sk_buff * skb,
350 + ip_set_ip_t *ip,
351 + const u_int32_t *flags,
352 + unsigned char index);
353 +
354 + /* test for IP in set (userspace: ipset -T set IP)
355 + * return 0 if not in set, 1 if in set.
356 + */
357 + int (*testip) (struct ip_set *set,
358 + const void *data, size_t size,
359 + ip_set_ip_t *ip);
360 +
361 + /*
362 + * Size of the data structure passed by when
363 + * adding/deletin/testing an entry.
364 + */
365 + size_t reqsize;
366 +
367 + /* Add IP into set (userspace: ipset -A set IP)
368 + * Return -EEXIST if the address is already in the set,
369 + * and -ERANGE if the address lies outside the set bounds.
370 + * If the address was not already in the set, 0 is returned.
371 + */
372 + int (*addip) (struct ip_set *set,
373 + const void *data, size_t size,
374 + ip_set_ip_t *ip);
375 +
376 + /* Add IP into set (kernel: iptables ... -j SET set src|dst)
377 + * Return -EEXIST if the address is already in the set,
378 + * and -ERANGE if the address lies outside the set bounds.
379 + * If the address was not already in the set, 0 is returned.
380 + */
381 + int (*addip_kernel) (struct ip_set *set,
382 + const struct sk_buff * skb,
383 + ip_set_ip_t *ip,
384 + const u_int32_t *flags,
385 + unsigned char index);
386 +
387 + /* remove IP from set (userspace: ipset -D set --entry x)
388 + * Return -EEXIST if the address is NOT in the set,
389 + * and -ERANGE if the address lies outside the set bounds.
390 + * If the address really was in the set, 0 is returned.
391 + */
392 + int (*delip) (struct ip_set *set,
393 + const void *data, size_t size,
394 + ip_set_ip_t *ip);
395 +
396 + /* remove IP from set (kernel: iptables ... -j SET --entry x)
397 + * Return -EEXIST if the address is NOT in the set,
398 + * and -ERANGE if the address lies outside the set bounds.
399 + * If the address really was in the set, 0 is returned.
400 + */
401 + int (*delip_kernel) (struct ip_set *set,
402 + const struct sk_buff * skb,
403 + ip_set_ip_t *ip,
404 + const u_int32_t *flags,
405 + unsigned char index);
406 +
407 + /* new set creation - allocated type specific items
408 + */
409 + int (*create) (struct ip_set *set,
410 + const void *data, size_t size);
411 +
412 + /* retry the operation after successfully tweaking the set
413 + */
414 + int (*retry) (struct ip_set *set);
415 +
416 + /* set destruction - free type specific items
417 + * There is no return value.
418 + * Can be called only when child sets are destroyed.
419 + */
420 + void (*destroy) (struct ip_set *set);
421 +
422 + /* set flushing - reset all bits in the set, or something similar.
423 + * There is no return value.
424 + */
425 + void (*flush) (struct ip_set *set);
426 +
427 + /* Listing: size needed for header
428 + */
429 + size_t header_size;
430 +
431 + /* Listing: Get the header
432 + *
433 + * Fill in the information in "data".
434 + * This function is always run after list_header_size() under a
435 + * writelock on the set. Therefor is the length of "data" always
436 + * correct.
437 + */
438 + void (*list_header) (const struct ip_set *set,
439 + void *data);
440 +
441 + /* Listing: Get the size for the set members
442 + */
443 + int (*list_members_size) (const struct ip_set *set);
444 +
445 + /* Listing: Get the set members
446 + *
447 + * Fill in the information in "data".
448 + * This function is always run after list_member_size() under a
449 + * writelock on the set. Therefor is the length of "data" always
450 + * correct.
451 + */
452 + void (*list_members) (const struct ip_set *set,
453 + void *data);
454 +
455 + char typename[IP_SET_MAXNAMELEN];
456 + unsigned char features;
457 + int protocol_version;
458 +
459 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
460 + struct module *me;
461 +};
462 +
463 +extern int ip_set_register_set_type(struct ip_set_type *set_type);
464 +extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
465 +
466 +/* A generic ipset */
467 +struct ip_set {
468 + char name[IP_SET_MAXNAMELEN]; /* the name of the set */
469 + rwlock_t lock; /* lock for concurrency control */
470 + ip_set_id_t id; /* set id for swapping */
471 + ip_set_id_t binding; /* default binding for the set */
472 + atomic_t ref; /* in kernel and in hash references */
473 + struct ip_set_type *type; /* the set types */
474 + void *data; /* pooltype specific data */
475 +};
476 +
477 +/* Structure to bind set elements to sets */
478 +struct ip_set_hash {
479 + struct list_head list; /* list of clashing entries in hash */
480 + ip_set_ip_t ip; /* ip from set */
481 + ip_set_id_t id; /* set id */
482 + ip_set_id_t binding; /* set we bind the element to */
483 +};
484 +
485 +/* register and unregister set references */
486 +extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
487 +extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
488 +extern void ip_set_put(ip_set_id_t id);
489 +
490 +/* API for iptables set match, and SET target */
491 +extern void ip_set_addip_kernel(ip_set_id_t id,
492 + const struct sk_buff *skb,
493 + const u_int32_t *flags);
494 +extern void ip_set_delip_kernel(ip_set_id_t id,
495 + const struct sk_buff *skb,
496 + const u_int32_t *flags);
497 +extern int ip_set_testip_kernel(ip_set_id_t id,
498 + const struct sk_buff *skb,
499 + const u_int32_t *flags);
500 +
501 +#endif /* __KERNEL__ */
502 +
503 +#endif /*_IP_SET_H*/
504 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iphash.h
505 ===================================================================
506 --- /dev/null
507 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iphash.h
508 @@ -0,0 +1,30 @@
509 +#ifndef __IP_SET_IPHASH_H
510 +#define __IP_SET_IPHASH_H
511 +
512 +#include <linux/netfilter_ipv4/ip_set.h>
513 +
514 +#define SETTYPE_NAME "iphash"
515 +#define MAX_RANGE 0x0000FFFF
516 +
517 +struct ip_set_iphash {
518 + ip_set_ip_t *members; /* the iphash proper */
519 + uint32_t elements; /* number of elements */
520 + uint32_t hashsize; /* hash size */
521 + uint16_t probes; /* max number of probes */
522 + uint16_t resize; /* resize factor in percent */
523 + ip_set_ip_t netmask; /* netmask */
524 + void *initval[0]; /* initvals for jhash_1word */
525 +};
526 +
527 +struct ip_set_req_iphash_create {
528 + uint32_t hashsize;
529 + uint16_t probes;
530 + uint16_t resize;
531 + ip_set_ip_t netmask;
532 +};
533 +
534 +struct ip_set_req_iphash {
535 + ip_set_ip_t ip;
536 +};
537 +
538 +#endif /* __IP_SET_IPHASH_H */
539 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_ipmap.h
540 ===================================================================
541 --- /dev/null
542 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_ipmap.h
543 @@ -0,0 +1,56 @@
544 +#ifndef __IP_SET_IPMAP_H
545 +#define __IP_SET_IPMAP_H
546 +
547 +#include <linux/netfilter_ipv4/ip_set.h>
548 +
549 +#define SETTYPE_NAME "ipmap"
550 +#define MAX_RANGE 0x0000FFFF
551 +
552 +struct ip_set_ipmap {
553 + void *members; /* the ipmap proper */
554 + ip_set_ip_t first_ip; /* host byte order, included in range */
555 + ip_set_ip_t last_ip; /* host byte order, included in range */
556 + ip_set_ip_t netmask; /* subnet netmask */
557 + ip_set_ip_t sizeid; /* size of set in IPs */
558 + ip_set_ip_t hosts; /* number of hosts in a subnet */
559 +};
560 +
561 +struct ip_set_req_ipmap_create {
562 + ip_set_ip_t from;
563 + ip_set_ip_t to;
564 + ip_set_ip_t netmask;
565 +};
566 +
567 +struct ip_set_req_ipmap {
568 + ip_set_ip_t ip;
569 +};
570 +
571 +unsigned int
572 +mask_to_bits(ip_set_ip_t mask)
573 +{
574 + unsigned int bits = 32;
575 + ip_set_ip_t maskaddr;
576 +
577 + if (mask == 0xFFFFFFFF)
578 + return bits;
579 +
580 + maskaddr = 0xFFFFFFFE;
581 + while (--bits >= 0 && maskaddr != mask)
582 + maskaddr <<= 1;
583 +
584 + return bits;
585 +}
586 +
587 +ip_set_ip_t
588 +range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
589 +{
590 + ip_set_ip_t mask = 0xFFFFFFFE;
591 +
592 + *bits = 32;
593 + while (--(*bits) >= 0 && mask && (to & mask) != from)
594 + mask <<= 1;
595 +
596 + return mask;
597 +}
598 +
599 +#endif /* __IP_SET_IPMAP_H */
600 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_ipporthash.h
601 ===================================================================
602 --- /dev/null
603 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_ipporthash.h
604 @@ -0,0 +1,34 @@
605 +#ifndef __IP_SET_IPPORTHASH_H
606 +#define __IP_SET_IPPORTHASH_H
607 +
608 +#include <linux/netfilter_ipv4/ip_set.h>
609 +
610 +#define SETTYPE_NAME "ipporthash"
611 +#define MAX_RANGE 0x0000FFFF
612 +#define INVALID_PORT (MAX_RANGE + 1)
613 +
614 +struct ip_set_ipporthash {
615 + ip_set_ip_t *members; /* the ipporthash proper */
616 + uint32_t elements; /* number of elements */
617 + uint32_t hashsize; /* hash size */
618 + uint16_t probes; /* max number of probes */
619 + uint16_t resize; /* resize factor in percent */
620 + ip_set_ip_t first_ip; /* host byte order, included in range */
621 + ip_set_ip_t last_ip; /* host byte order, included in range */
622 + void *initval[0]; /* initvals for jhash_1word */
623 +};
624 +
625 +struct ip_set_req_ipporthash_create {
626 + uint32_t hashsize;
627 + uint16_t probes;
628 + uint16_t resize;
629 + ip_set_ip_t from;
630 + ip_set_ip_t to;
631 +};
632 +
633 +struct ip_set_req_ipporthash {
634 + ip_set_ip_t ip;
635 + ip_set_ip_t port;
636 +};
637 +
638 +#endif /* __IP_SET_IPPORTHASH_H */
639 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iptree.h
640 ===================================================================
641 --- /dev/null
642 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iptree.h
643 @@ -0,0 +1,40 @@
644 +#ifndef __IP_SET_IPTREE_H
645 +#define __IP_SET_IPTREE_H
646 +
647 +#include <linux/netfilter_ipv4/ip_set.h>
648 +
649 +#define SETTYPE_NAME "iptree"
650 +#define MAX_RANGE 0x0000FFFF
651 +
652 +struct ip_set_iptreed {
653 + unsigned long expires[256]; /* x.x.x.ADDR */
654 +};
655 +
656 +struct ip_set_iptreec {
657 + struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
658 +};
659 +
660 +struct ip_set_iptreeb {
661 + struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
662 +};
663 +
664 +struct ip_set_iptree {
665 + unsigned int timeout;
666 + unsigned int gc_interval;
667 +#ifdef __KERNEL__
668 + uint32_t elements; /* number of elements */
669 + struct timer_list gc;
670 + struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
671 +#endif
672 +};
673 +
674 +struct ip_set_req_iptree_create {
675 + unsigned int timeout;
676 +};
677 +
678 +struct ip_set_req_iptree {
679 + ip_set_ip_t ip;
680 + unsigned int timeout;
681 +};
682 +
683 +#endif /* __IP_SET_IPTREE_H */
684 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iptreemap.h
685 ===================================================================
686 --- /dev/null
687 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iptreemap.h
688 @@ -0,0 +1,40 @@
689 +#ifndef __IP_SET_IPTREEMAP_H
690 +#define __IP_SET_IPTREEMAP_H
691 +
692 +#include <linux/netfilter_ipv4/ip_set.h>
693 +
694 +#define SETTYPE_NAME "iptreemap"
695 +
696 +#ifdef __KERNEL__
697 +struct ip_set_iptreemap_d {
698 + unsigned char bitmap[32]; /* x.x.x.y */
699 +};
700 +
701 +struct ip_set_iptreemap_c {
702 + struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
703 +};
704 +
705 +struct ip_set_iptreemap_b {
706 + struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
707 + unsigned char dirty[32];
708 +};
709 +#endif
710 +
711 +struct ip_set_iptreemap {
712 + unsigned int gc_interval;
713 +#ifdef __KERNEL__
714 + struct timer_list gc;
715 + struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
716 +#endif
717 +};
718 +
719 +struct ip_set_req_iptreemap_create {
720 + unsigned int gc_interval;
721 +};
722 +
723 +struct ip_set_req_iptreemap {
724 + ip_set_ip_t start;
725 + ip_set_ip_t end;
726 +};
727 +
728 +#endif /* __IP_SET_IPTREEMAP_H */
729 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_jhash.h
730 ===================================================================
731 --- /dev/null
732 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_jhash.h
733 @@ -0,0 +1,148 @@
734 +#ifndef _LINUX_IPSET_JHASH_H
735 +#define _LINUX_IPSET_JHASH_H
736 +
737 +/* This is a copy of linux/jhash.h but the types u32/u8 are changed
738 + * to __u32/__u8 so that the header file can be included into
739 + * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
740 + */
741 +
742 +/* jhash.h: Jenkins hash support.
743 + *
744 + * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
745 + *
746 + * http://burtleburtle.net/bob/hash/
747 + *
748 + * These are the credits from Bob's sources:
749 + *
750 + * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
751 + * hash(), hash2(), hash3, and mix() are externally useful functions.
752 + * Routines to test the hash are included if SELF_TEST is defined.
753 + * You can use this free for any purpose. It has no warranty.
754 + *
755 + * Copyright (C) 2003 David S. Miller (davem@redhat.com)
756 + *
757 + * I've modified Bob's hash to be useful in the Linux kernel, and
758 + * any bugs present are surely my fault. -DaveM
759 + */
760 +
761 +/* NOTE: Arguments are modified. */
762 +#define __jhash_mix(a, b, c) \
763 +{ \
764 + a -= b; a -= c; a ^= (c>>13); \
765 + b -= c; b -= a; b ^= (a<<8); \
766 + c -= a; c -= b; c ^= (b>>13); \
767 + a -= b; a -= c; a ^= (c>>12); \
768 + b -= c; b -= a; b ^= (a<<16); \
769 + c -= a; c -= b; c ^= (b>>5); \
770 + a -= b; a -= c; a ^= (c>>3); \
771 + b -= c; b -= a; b ^= (a<<10); \
772 + c -= a; c -= b; c ^= (b>>15); \
773 +}
774 +
775 +/* The golden ration: an arbitrary value */
776 +#define JHASH_GOLDEN_RATIO 0x9e3779b9
777 +
778 +/* The most generic version, hashes an arbitrary sequence
779 + * of bytes. No alignment or length assumptions are made about
780 + * the input key.
781 + */
782 +static inline __u32 jhash(void *key, __u32 length, __u32 initval)
783 +{
784 + __u32 a, b, c, len;
785 + __u8 *k = key;
786 +
787 + len = length;
788 + a = b = JHASH_GOLDEN_RATIO;
789 + c = initval;
790 +
791 + while (len >= 12) {
792 + a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
793 + b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
794 + c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
795 +
796 + __jhash_mix(a,b,c);
797 +
798 + k += 12;
799 + len -= 12;
800 + }
801 +
802 + c += length;
803 + switch (len) {
804 + case 11: c += ((__u32)k[10]<<24);
805 + case 10: c += ((__u32)k[9]<<16);
806 + case 9 : c += ((__u32)k[8]<<8);
807 + case 8 : b += ((__u32)k[7]<<24);
808 + case 7 : b += ((__u32)k[6]<<16);
809 + case 6 : b += ((__u32)k[5]<<8);
810 + case 5 : b += k[4];
811 + case 4 : a += ((__u32)k[3]<<24);
812 + case 3 : a += ((__u32)k[2]<<16);
813 + case 2 : a += ((__u32)k[1]<<8);
814 + case 1 : a += k[0];
815 + };
816 +
817 + __jhash_mix(a,b,c);
818 +
819 + return c;
820 +}
821 +
822 +/* A special optimized version that handles 1 or more of __u32s.
823 + * The length parameter here is the number of __u32s in the key.
824 + */
825 +static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
826 +{
827 + __u32 a, b, c, len;
828 +
829 + a = b = JHASH_GOLDEN_RATIO;
830 + c = initval;
831 + len = length;
832 +
833 + while (len >= 3) {
834 + a += k[0];
835 + b += k[1];
836 + c += k[2];
837 + __jhash_mix(a, b, c);
838 + k += 3; len -= 3;
839 + }
840 +
841 + c += length * 4;
842 +
843 + switch (len) {
844 + case 2 : b += k[1];
845 + case 1 : a += k[0];
846 + };
847 +
848 + __jhash_mix(a,b,c);
849 +
850 + return c;
851 +}
852 +
853 +
854 +/* A special ultra-optimized versions that knows they are hashing exactly
855 + * 3, 2 or 1 word(s).
856 + *
857 + * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
858 + * done at the end is not done here.
859 + */
860 +static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
861 +{
862 + a += JHASH_GOLDEN_RATIO;
863 + b += JHASH_GOLDEN_RATIO;
864 + c += initval;
865 +
866 + __jhash_mix(a, b, c);
867 +
868 + return c;
869 +}
870 +
871 +static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
872 +{
873 + return jhash_3words(a, b, 0, initval);
874 +}
875 +
876 +static inline __u32 jhash_1word(__u32 a, __u32 initval)
877 +{
878 + return jhash_3words(a, 0, 0, initval);
879 +}
880 +
881 +#endif /* _LINUX_IPSET_JHASH_H */
882 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_macipmap.h
883 ===================================================================
884 --- /dev/null
885 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_macipmap.h
886 @@ -0,0 +1,38 @@
887 +#ifndef __IP_SET_MACIPMAP_H
888 +#define __IP_SET_MACIPMAP_H
889 +
890 +#include <linux/netfilter_ipv4/ip_set.h>
891 +
892 +#define SETTYPE_NAME "macipmap"
893 +#define MAX_RANGE 0x0000FFFF
894 +
895 +/* general flags */
896 +#define IPSET_MACIP_MATCHUNSET 1
897 +
898 +/* per ip flags */
899 +#define IPSET_MACIP_ISSET 1
900 +
901 +struct ip_set_macipmap {
902 + void *members; /* the macipmap proper */
903 + ip_set_ip_t first_ip; /* host byte order, included in range */
904 + ip_set_ip_t last_ip; /* host byte order, included in range */
905 + u_int32_t flags;
906 +};
907 +
908 +struct ip_set_req_macipmap_create {
909 + ip_set_ip_t from;
910 + ip_set_ip_t to;
911 + u_int32_t flags;
912 +};
913 +
914 +struct ip_set_req_macipmap {
915 + ip_set_ip_t ip;
916 + unsigned char ethernet[ETH_ALEN];
917 +};
918 +
919 +struct ip_set_macip {
920 + unsigned short flags;
921 + unsigned char ethernet[ETH_ALEN];
922 +};
923 +
924 +#endif /* __IP_SET_MACIPMAP_H */
925 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_malloc.h
926 ===================================================================
927 --- /dev/null
928 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_malloc.h
929 @@ -0,0 +1,116 @@
930 +#ifndef _IP_SET_MALLOC_H
931 +#define _IP_SET_MALLOC_H
932 +
933 +#ifdef __KERNEL__
934 +
935 +/* Memory allocation and deallocation */
936 +static size_t max_malloc_size = 0;
937 +
938 +static inline void init_max_malloc_size(void)
939 +{
940 +#define CACHE(x) max_malloc_size = x;
941 +#include <linux/kmalloc_sizes.h>
942 +#undef CACHE
943 +}
944 +
945 +static inline void * ip_set_malloc(size_t bytes)
946 +{
947 + if (bytes > max_malloc_size)
948 + return vmalloc(bytes);
949 + else
950 + return kmalloc(bytes, GFP_KERNEL);
951 +}
952 +
953 +static inline void ip_set_free(void * data, size_t bytes)
954 +{
955 + if (bytes > max_malloc_size)
956 + vfree(data);
957 + else
958 + kfree(data);
959 +}
960 +
961 +struct harray {
962 + size_t max_elements;
963 + void *arrays[0];
964 +};
965 +
966 +static inline void *
967 +harray_malloc(size_t hashsize, size_t typesize, int flags)
968 +{
969 + struct harray *harray;
970 + size_t max_elements, size, i, j;
971 +
972 + if (!max_malloc_size)
973 + init_max_malloc_size();
974 +
975 + if (typesize > max_malloc_size)
976 + return NULL;
977 +
978 + max_elements = max_malloc_size/typesize;
979 + size = hashsize/max_elements;
980 + if (hashsize % max_elements)
981 + size++;
982 +
983 + /* Last pointer signals end of arrays */
984 + harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
985 + flags);
986 +
987 + if (!harray)
988 + return NULL;
989 +
990 + for (i = 0; i < size - 1; i++) {
991 + harray->arrays[i] = kmalloc(max_elements * typesize, flags);
992 + if (!harray->arrays[i])
993 + goto undo;
994 + memset(harray->arrays[i], 0, max_elements * typesize);
995 + }
996 + harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
997 + flags);
998 + if (!harray->arrays[i])
999 + goto undo;
1000 + memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
1001 +
1002 + harray->max_elements = max_elements;
1003 + harray->arrays[size] = NULL;
1004 +
1005 + return (void *)harray;
1006 +
1007 + undo:
1008 + for (j = 0; j < i; j++) {
1009 + kfree(harray->arrays[j]);
1010 + }
1011 + kfree(harray);
1012 + return NULL;
1013 +}
1014 +
1015 +static inline void harray_free(void *h)
1016 +{
1017 + struct harray *harray = (struct harray *) h;
1018 + size_t i;
1019 +
1020 + for (i = 0; harray->arrays[i] != NULL; i++)
1021 + kfree(harray->arrays[i]);
1022 + kfree(harray);
1023 +}
1024 +
1025 +static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
1026 +{
1027 + struct harray *harray = (struct harray *) h;
1028 + size_t i;
1029 +
1030 + for (i = 0; harray->arrays[i+1] != NULL; i++)
1031 + memset(harray->arrays[i], 0, harray->max_elements * typesize);
1032 + memset(harray->arrays[i], 0,
1033 + (hashsize - i * harray->max_elements) * typesize);
1034 +}
1035 +
1036 +#define HARRAY_ELEM(h, type, which) \
1037 +({ \
1038 + struct harray *__h = (struct harray *)(h); \
1039 + ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
1040 + + (which)%(__h)->max_elements); \
1041 +})
1042 +
1043 +#endif /* __KERNEL__ */
1044 +
1045 +#endif /*_IP_SET_MALLOC_H*/
1046 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_nethash.h
1047 ===================================================================
1048 --- /dev/null
1049 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_nethash.h
1050 @@ -0,0 +1,55 @@
1051 +#ifndef __IP_SET_NETHASH_H
1052 +#define __IP_SET_NETHASH_H
1053 +
1054 +#include <linux/netfilter_ipv4/ip_set.h>
1055 +
1056 +#define SETTYPE_NAME "nethash"
1057 +#define MAX_RANGE 0x0000FFFF
1058 +
1059 +struct ip_set_nethash {
1060 + ip_set_ip_t *members; /* the nethash proper */
1061 + uint32_t elements; /* number of elements */
1062 + uint32_t hashsize; /* hash size */
1063 + uint16_t probes; /* max number of probes */
1064 + uint16_t resize; /* resize factor in percent */
1065 + unsigned char cidr[30]; /* CIDR sizes */
1066 + void *initval[0]; /* initvals for jhash_1word */
1067 +};
1068 +
1069 +struct ip_set_req_nethash_create {
1070 + uint32_t hashsize;
1071 + uint16_t probes;
1072 + uint16_t resize;
1073 +};
1074 +
1075 +struct ip_set_req_nethash {
1076 + ip_set_ip_t ip;
1077 + unsigned char cidr;
1078 +};
1079 +
1080 +static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
1081 +
1082 +static inline ip_set_ip_t
1083 +pack(ip_set_ip_t ip, unsigned char cidr)
1084 +{
1085 + ip_set_ip_t addr, *paddr = &addr;
1086 + unsigned char n, t, *a;
1087 +
1088 + addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
1089 +#ifdef __KERNEL__
1090 + DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
1091 +#endif
1092 + n = cidr / 8;
1093 + t = cidr % 8;
1094 + a = &((unsigned char *)paddr)[n];
1095 + *a = *a /(1 << (8 - t)) + shifts[t];
1096 +#ifdef __KERNEL__
1097 + DP("n: %u, t: %u, a: %u", n, t, *a);
1098 + DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
1099 + HIPQUAD(ip), cidr, NIPQUAD(addr));
1100 +#endif
1101 +
1102 + return ntohl(addr);
1103 +}
1104 +
1105 +#endif /* __IP_SET_NETHASH_H */
1106 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_portmap.h
1107 ===================================================================
1108 --- /dev/null
1109 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_portmap.h
1110 @@ -0,0 +1,25 @@
1111 +#ifndef __IP_SET_PORTMAP_H
1112 +#define __IP_SET_PORTMAP_H
1113 +
1114 +#include <linux/netfilter_ipv4/ip_set.h>
1115 +
1116 +#define SETTYPE_NAME "portmap"
1117 +#define MAX_RANGE 0x0000FFFF
1118 +#define INVALID_PORT (MAX_RANGE + 1)
1119 +
1120 +struct ip_set_portmap {
1121 + void *members; /* the portmap proper */
1122 + ip_set_ip_t first_port; /* host byte order, included in range */
1123 + ip_set_ip_t last_port; /* host byte order, included in range */
1124 +};
1125 +
1126 +struct ip_set_req_portmap_create {
1127 + ip_set_ip_t from;
1128 + ip_set_ip_t to;
1129 +};
1130 +
1131 +struct ip_set_req_portmap {
1132 + ip_set_ip_t port;
1133 +};
1134 +
1135 +#endif /* __IP_SET_PORTMAP_H */
1136 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ipt_set.h
1137 ===================================================================
1138 --- /dev/null
1139 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ipt_set.h
1140 @@ -0,0 +1,21 @@
1141 +#ifndef _IPT_SET_H
1142 +#define _IPT_SET_H
1143 +
1144 +#include <linux/netfilter_ipv4/ip_set.h>
1145 +
1146 +struct ipt_set_info {
1147 + ip_set_id_t index;
1148 + u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
1149 +};
1150 +
1151 +/* match info */
1152 +struct ipt_set_info_match {
1153 + struct ipt_set_info match_set;
1154 +};
1155 +
1156 +struct ipt_set_info_target {
1157 + struct ipt_set_info add_set;
1158 + struct ipt_set_info del_set;
1159 +};
1160 +
1161 +#endif /*_IPT_SET_H*/
1162 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set.c
1163 ===================================================================
1164 --- /dev/null
1165 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set.c
1166 @@ -0,0 +1,2003 @@
1167 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
1168 + * Patrick Schaaf <bof@bof.de>
1169 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
1170 + *
1171 + * This program is free software; you can redistribute it and/or modify
1172 + * it under the terms of the GNU General Public License version 2 as
1173 + * published by the Free Software Foundation.
1174 + */
1175 +
1176 +/* Kernel module for IP set management */
1177 +
1178 +#include <linux/version.h>
1179 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1180 +#include <linux/config.h>
1181 +#endif
1182 +#include <linux/module.h>
1183 +#include <linux/moduleparam.h>
1184 +#include <linux/kmod.h>
1185 +#include <linux/ip.h>
1186 +#include <linux/skbuff.h>
1187 +#include <linux/random.h>
1188 +#include <linux/jhash.h>
1189 +#include <linux/netfilter_ipv4/ip_tables.h>
1190 +#include <linux/errno.h>
1191 +#include <asm/uaccess.h>
1192 +#include <asm/bitops.h>
1193 +#include <asm/semaphore.h>
1194 +#include <linux/spinlock.h>
1195 +#include <linux/vmalloc.h>
1196 +
1197 +#define ASSERT_READ_LOCK(x)
1198 +#define ASSERT_WRITE_LOCK(x)
1199 +#include <linux/netfilter_ipv4/ip_set.h>
1200 +
1201 +static struct list_head set_type_list; /* all registered sets */
1202 +static struct ip_set **ip_set_list; /* all individual sets */
1203 +static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
1204 +static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
1205 +static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
1206 +static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
1207 +static struct list_head *ip_set_hash; /* hash of bindings */
1208 +static unsigned int ip_set_hash_random; /* random seed */
1209 +
1210 +/*
1211 + * Sets are identified either by the index in ip_set_list or by id.
1212 + * The id never changes and is used to find a key in the hash.
1213 + * The index may change by swapping and used at all other places
1214 + * (set/SET netfilter modules, binding value, etc.)
1215 + *
1216 + * Userspace requests are serialized by ip_set_mutex and sets can
1217 + * be deleted only from userspace. Therefore ip_set_list locking
1218 + * must obey the following rules:
1219 + *
1220 + * - kernel requests: read and write locking mandatory
1221 + * - user requests: read locking optional, write locking mandatory
1222 + */
1223 +
1224 +static inline void
1225 +__ip_set_get(ip_set_id_t index)
1226 +{
1227 + atomic_inc(&ip_set_list[index]->ref);
1228 +}
1229 +
1230 +static inline void
1231 +__ip_set_put(ip_set_id_t index)
1232 +{
1233 + atomic_dec(&ip_set_list[index]->ref);
1234 +}
1235 +
1236 +/*
1237 + * Binding routines
1238 + */
1239 +
1240 +static inline struct ip_set_hash *
1241 +__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
1242 +{
1243 + struct ip_set_hash *set_hash;
1244 +
1245 + list_for_each_entry(set_hash, &ip_set_hash[key], list)
1246 + if (set_hash->id == id && set_hash->ip == ip)
1247 + return set_hash;
1248 +
1249 + return NULL;
1250 +}
1251 +
1252 +static ip_set_id_t
1253 +ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
1254 +{
1255 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1256 + % ip_set_bindings_hash_size;
1257 + struct ip_set_hash *set_hash;
1258 +
1259 + ASSERT_READ_LOCK(&ip_set_lock);
1260 + IP_SET_ASSERT(ip_set_list[id]);
1261 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1262 +
1263 + set_hash = __ip_set_find(key, id, ip);
1264 +
1265 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1266 + HIPQUAD(ip),
1267 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1268 +
1269 + return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
1270 +}
1271 +
1272 +static inline void
1273 +__set_hash_del(struct ip_set_hash *set_hash)
1274 +{
1275 + ASSERT_WRITE_LOCK(&ip_set_lock);
1276 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1277 +
1278 + __ip_set_put(set_hash->binding);
1279 + list_del(&set_hash->list);
1280 + kfree(set_hash);
1281 +}
1282 +
1283 +static int
1284 +ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
1285 +{
1286 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1287 + % ip_set_bindings_hash_size;
1288 + struct ip_set_hash *set_hash;
1289 +
1290 + IP_SET_ASSERT(ip_set_list[id]);
1291 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1292 + write_lock_bh(&ip_set_lock);
1293 + set_hash = __ip_set_find(key, id, ip);
1294 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1295 + HIPQUAD(ip),
1296 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1297 +
1298 + if (set_hash != NULL)
1299 + __set_hash_del(set_hash);
1300 + write_unlock_bh(&ip_set_lock);
1301 + return 0;
1302 +}
1303 +
1304 +static int
1305 +ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
1306 +{
1307 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1308 + % ip_set_bindings_hash_size;
1309 + struct ip_set_hash *set_hash;
1310 + int ret = 0;
1311 +
1312 + IP_SET_ASSERT(ip_set_list[id]);
1313 + IP_SET_ASSERT(ip_set_list[binding]);
1314 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1315 + HIPQUAD(ip), ip_set_list[binding]->name);
1316 + write_lock_bh(&ip_set_lock);
1317 + set_hash = __ip_set_find(key, id, ip);
1318 + if (!set_hash) {
1319 + set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
1320 + if (!set_hash) {
1321 + ret = -ENOMEM;
1322 + goto unlock;
1323 + }
1324 + INIT_LIST_HEAD(&set_hash->list);
1325 + set_hash->id = id;
1326 + set_hash->ip = ip;
1327 + list_add(&set_hash->list, &ip_set_hash[key]);
1328 + } else {
1329 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1330 + DP("overwrite binding: %s",
1331 + ip_set_list[set_hash->binding]->name);
1332 + __ip_set_put(set_hash->binding);
1333 + }
1334 + set_hash->binding = binding;
1335 + __ip_set_get(set_hash->binding);
1336 + DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
1337 + key, id, ip_set_list[id]->name,
1338 + HIPQUAD(ip), binding, ip_set_list[binding]->name);
1339 + unlock:
1340 + write_unlock_bh(&ip_set_lock);
1341 + return ret;
1342 +}
1343 +
1344 +#define FOREACH_HASH_DO(fn, args...) \
1345 +({ \
1346 + ip_set_id_t __key; \
1347 + struct ip_set_hash *__set_hash; \
1348 + \
1349 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1350 + list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
1351 + fn(__set_hash , ## args); \
1352 + } \
1353 +})
1354 +
1355 +#define FOREACH_HASH_RW_DO(fn, args...) \
1356 +({ \
1357 + ip_set_id_t __key; \
1358 + struct ip_set_hash *__set_hash, *__n; \
1359 + \
1360 + ASSERT_WRITE_LOCK(&ip_set_lock); \
1361 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1362 + list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
1363 + fn(__set_hash , ## args); \
1364 + } \
1365 +})
1366 +
1367 +/* Add, del and test set entries from kernel */
1368 +
1369 +#define follow_bindings(index, set, ip) \
1370 +((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
1371 + || (index = (set)->binding) != IP_SET_INVALID_ID)
1372 +
1373 +int
1374 +ip_set_testip_kernel(ip_set_id_t index,
1375 + const struct sk_buff *skb,
1376 + const u_int32_t *flags)
1377 +{
1378 + struct ip_set *set;
1379 + ip_set_ip_t ip;
1380 + int res;
1381 + unsigned char i = 0;
1382 +
1383 + IP_SET_ASSERT(flags[i]);
1384 + read_lock_bh(&ip_set_lock);
1385 + do {
1386 + set = ip_set_list[index];
1387 + IP_SET_ASSERT(set);
1388 + DP("set %s, index %u", set->name, index);
1389 + read_lock_bh(&set->lock);
1390 + res = set->type->testip_kernel(set, skb, &ip, flags, i++);
1391 + read_unlock_bh(&set->lock);
1392 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1393 + } while (res > 0
1394 + && flags[i]
1395 + && follow_bindings(index, set, ip));
1396 + read_unlock_bh(&ip_set_lock);
1397 +
1398 + return res;
1399 +}
1400 +
1401 +void
1402 +ip_set_addip_kernel(ip_set_id_t index,
1403 + const struct sk_buff *skb,
1404 + const u_int32_t *flags)
1405 +{
1406 + struct ip_set *set;
1407 + ip_set_ip_t ip;
1408 + int res;
1409 + unsigned char i = 0;
1410 +
1411 + IP_SET_ASSERT(flags[i]);
1412 + retry:
1413 + read_lock_bh(&ip_set_lock);
1414 + do {
1415 + set = ip_set_list[index];
1416 + IP_SET_ASSERT(set);
1417 + DP("set %s, index %u", set->name, index);
1418 + write_lock_bh(&set->lock);
1419 + res = set->type->addip_kernel(set, skb, &ip, flags, i++);
1420 + write_unlock_bh(&set->lock);
1421 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1422 + } while ((res == 0 || res == -EEXIST)
1423 + && flags[i]
1424 + && follow_bindings(index, set, ip));
1425 + read_unlock_bh(&ip_set_lock);
1426 +
1427 + if (res == -EAGAIN
1428 + && set->type->retry
1429 + && (res = set->type->retry(set)) == 0)
1430 + goto retry;
1431 +}
1432 +
1433 +void
1434 +ip_set_delip_kernel(ip_set_id_t index,
1435 + const struct sk_buff *skb,
1436 + const u_int32_t *flags)
1437 +{
1438 + struct ip_set *set;
1439 + ip_set_ip_t ip;
1440 + int res;
1441 + unsigned char i = 0;
1442 +
1443 + IP_SET_ASSERT(flags[i]);
1444 + read_lock_bh(&ip_set_lock);
1445 + do {
1446 + set = ip_set_list[index];
1447 + IP_SET_ASSERT(set);
1448 + DP("set %s, index %u", set->name, index);
1449 + write_lock_bh(&set->lock);
1450 + res = set->type->delip_kernel(set, skb, &ip, flags, i++);
1451 + write_unlock_bh(&set->lock);
1452 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1453 + } while ((res == 0 || res == -EEXIST)
1454 + && flags[i]
1455 + && follow_bindings(index, set, ip));
1456 + read_unlock_bh(&ip_set_lock);
1457 +}
1458 +
1459 +/* Register and deregister settype */
1460 +
1461 +static inline struct ip_set_type *
1462 +find_set_type(const char *name)
1463 +{
1464 + struct ip_set_type *set_type;
1465 +
1466 + list_for_each_entry(set_type, &set_type_list, list)
1467 + if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
1468 + return set_type;
1469 + return NULL;
1470 +}
1471 +
1472 +int
1473 +ip_set_register_set_type(struct ip_set_type *set_type)
1474 +{
1475 + int ret = 0;
1476 +
1477 + if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
1478 + ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
1479 + set_type->typename,
1480 + set_type->protocol_version,
1481 + IP_SET_PROTOCOL_VERSION);
1482 + return -EINVAL;
1483 + }
1484 +
1485 + write_lock_bh(&ip_set_lock);
1486 + if (find_set_type(set_type->typename)) {
1487 + /* Duplicate! */
1488 + ip_set_printk("'%s' already registered!",
1489 + set_type->typename);
1490 + ret = -EINVAL;
1491 + goto unlock;
1492 + }
1493 + if (!try_module_get(THIS_MODULE)) {
1494 + ret = -EFAULT;
1495 + goto unlock;
1496 + }
1497 + list_add(&set_type->list, &set_type_list);
1498 + DP("'%s' registered.", set_type->typename);
1499 + unlock:
1500 + write_unlock_bh(&ip_set_lock);
1501 + return ret;
1502 +}
1503 +
1504 +void
1505 +ip_set_unregister_set_type(struct ip_set_type *set_type)
1506 +{
1507 + write_lock_bh(&ip_set_lock);
1508 + if (!find_set_type(set_type->typename)) {
1509 + ip_set_printk("'%s' not registered?",
1510 + set_type->typename);
1511 + goto unlock;
1512 + }
1513 + list_del(&set_type->list);
1514 + module_put(THIS_MODULE);
1515 + DP("'%s' unregistered.", set_type->typename);
1516 + unlock:
1517 + write_unlock_bh(&ip_set_lock);
1518 +
1519 +}
1520 +
1521 +/*
1522 + * Userspace routines
1523 + */
1524 +
1525 +/*
1526 + * Find set by name, reference it once. The reference makes sure the
1527 + * thing pointed to, does not go away under our feet. Drop the reference
1528 + * later, using ip_set_put().
1529 + */
1530 +ip_set_id_t
1531 +ip_set_get_byname(const char *name)
1532 +{
1533 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1534 +
1535 + down(&ip_set_app_mutex);
1536 + for (i = 0; i < ip_set_max; i++) {
1537 + if (ip_set_list[i] != NULL
1538 + && strcmp(ip_set_list[i]->name, name) == 0) {
1539 + __ip_set_get(i);
1540 + index = i;
1541 + break;
1542 + }
1543 + }
1544 + up(&ip_set_app_mutex);
1545 + return index;
1546 +}
1547 +
1548 +/*
1549 + * Find set by index, reference it once. The reference makes sure the
1550 + * thing pointed to, does not go away under our feet. Drop the reference
1551 + * later, using ip_set_put().
1552 + */
1553 +ip_set_id_t
1554 +ip_set_get_byindex(ip_set_id_t index)
1555 +{
1556 + down(&ip_set_app_mutex);
1557 +
1558 + if (index >= ip_set_max)
1559 + return IP_SET_INVALID_ID;
1560 +
1561 + if (ip_set_list[index])
1562 + __ip_set_get(index);
1563 + else
1564 + index = IP_SET_INVALID_ID;
1565 +
1566 + up(&ip_set_app_mutex);
1567 + return index;
1568 +}
1569 +
1570 +/*
1571 + * If the given set pointer points to a valid set, decrement
1572 + * reference count by 1. The caller shall not assume the index
1573 + * to be valid, after calling this function.
1574 + */
1575 +void ip_set_put(ip_set_id_t index)
1576 +{
1577 + down(&ip_set_app_mutex);
1578 + if (ip_set_list[index])
1579 + __ip_set_put(index);
1580 + up(&ip_set_app_mutex);
1581 +}
1582 +
1583 +/* Find a set by name or index */
1584 +static ip_set_id_t
1585 +ip_set_find_byname(const char *name)
1586 +{
1587 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1588 +
1589 + for (i = 0; i < ip_set_max; i++) {
1590 + if (ip_set_list[i] != NULL
1591 + && strcmp(ip_set_list[i]->name, name) == 0) {
1592 + index = i;
1593 + break;
1594 + }
1595 + }
1596 + return index;
1597 +}
1598 +
1599 +static ip_set_id_t
1600 +ip_set_find_byindex(ip_set_id_t index)
1601 +{
1602 + if (index >= ip_set_max || ip_set_list[index] == NULL)
1603 + index = IP_SET_INVALID_ID;
1604 +
1605 + return index;
1606 +}
1607 +
1608 +/*
1609 + * Add, del, test, bind and unbind
1610 + */
1611 +
1612 +static inline int
1613 +__ip_set_testip(struct ip_set *set,
1614 + const void *data,
1615 + size_t size,
1616 + ip_set_ip_t *ip)
1617 +{
1618 + int res;
1619 +
1620 + read_lock_bh(&set->lock);
1621 + res = set->type->testip(set, data, size, ip);
1622 + read_unlock_bh(&set->lock);
1623 +
1624 + return res;
1625 +}
1626 +
1627 +static int
1628 +__ip_set_addip(ip_set_id_t index,
1629 + const void *data,
1630 + size_t size)
1631 +{
1632 + struct ip_set *set = ip_set_list[index];
1633 + ip_set_ip_t ip;
1634 + int res;
1635 +
1636 + IP_SET_ASSERT(set);
1637 + do {
1638 + write_lock_bh(&set->lock);
1639 + res = set->type->addip(set, data, size, &ip);
1640 + write_unlock_bh(&set->lock);
1641 + } while (res == -EAGAIN
1642 + && set->type->retry
1643 + && (res = set->type->retry(set)) == 0);
1644 +
1645 + return res;
1646 +}
1647 +
1648 +static int
1649 +ip_set_addip(ip_set_id_t index,
1650 + const void *data,
1651 + size_t size)
1652 +{
1653 +
1654 + return __ip_set_addip(index,
1655 + data + sizeof(struct ip_set_req_adt),
1656 + size - sizeof(struct ip_set_req_adt));
1657 +}
1658 +
1659 +static int
1660 +ip_set_delip(ip_set_id_t index,
1661 + const void *data,
1662 + size_t size)
1663 +{
1664 + struct ip_set *set = ip_set_list[index];
1665 + ip_set_ip_t ip;
1666 + int res;
1667 +
1668 + IP_SET_ASSERT(set);
1669 + write_lock_bh(&set->lock);
1670 + res = set->type->delip(set,
1671 + data + sizeof(struct ip_set_req_adt),
1672 + size - sizeof(struct ip_set_req_adt),
1673 + &ip);
1674 + write_unlock_bh(&set->lock);
1675 +
1676 + return res;
1677 +}
1678 +
1679 +static int
1680 +ip_set_testip(ip_set_id_t index,
1681 + const void *data,
1682 + size_t size)
1683 +{
1684 + struct ip_set *set = ip_set_list[index];
1685 + ip_set_ip_t ip;
1686 + int res;
1687 +
1688 + IP_SET_ASSERT(set);
1689 + res = __ip_set_testip(set,
1690 + data + sizeof(struct ip_set_req_adt),
1691 + size - sizeof(struct ip_set_req_adt),
1692 + &ip);
1693 +
1694 + return (res > 0 ? -EEXIST : res);
1695 +}
1696 +
1697 +static int
1698 +ip_set_bindip(ip_set_id_t index,
1699 + const void *data,
1700 + size_t size)
1701 +{
1702 + struct ip_set *set = ip_set_list[index];
1703 + struct ip_set_req_bind *req_bind;
1704 + ip_set_id_t binding;
1705 + ip_set_ip_t ip;
1706 + int res;
1707 +
1708 + IP_SET_ASSERT(set);
1709 + if (size < sizeof(struct ip_set_req_bind))
1710 + return -EINVAL;
1711 +
1712 + req_bind = (struct ip_set_req_bind *) data;
1713 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1714 +
1715 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1716 + /* Default binding of a set */
1717 + char *binding_name;
1718 +
1719 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1720 + return -EINVAL;
1721 +
1722 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1723 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1724 +
1725 + binding = ip_set_find_byname(binding_name);
1726 + if (binding == IP_SET_INVALID_ID)
1727 + return -ENOENT;
1728 +
1729 + write_lock_bh(&ip_set_lock);
1730 + /* Sets as binding values are referenced */
1731 + if (set->binding != IP_SET_INVALID_ID)
1732 + __ip_set_put(set->binding);
1733 + set->binding = binding;
1734 + __ip_set_get(set->binding);
1735 + write_unlock_bh(&ip_set_lock);
1736 +
1737 + return 0;
1738 + }
1739 + binding = ip_set_find_byname(req_bind->binding);
1740 + if (binding == IP_SET_INVALID_ID)
1741 + return -ENOENT;
1742 +
1743 + res = __ip_set_testip(set,
1744 + data + sizeof(struct ip_set_req_bind),
1745 + size - sizeof(struct ip_set_req_bind),
1746 + &ip);
1747 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1748 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1749 +
1750 + if (res >= 0)
1751 + res = ip_set_hash_add(set->id, ip, binding);
1752 +
1753 + return res;
1754 +}
1755 +
1756 +#define FOREACH_SET_DO(fn, args...) \
1757 +({ \
1758 + ip_set_id_t __i; \
1759 + struct ip_set *__set; \
1760 + \
1761 + for (__i = 0; __i < ip_set_max; __i++) { \
1762 + __set = ip_set_list[__i]; \
1763 + if (__set != NULL) \
1764 + fn(__set , ##args); \
1765 + } \
1766 +})
1767 +
1768 +static inline void
1769 +__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
1770 +{
1771 + if (set_hash->id == id)
1772 + __set_hash_del(set_hash);
1773 +}
1774 +
1775 +static inline void
1776 +__unbind_default(struct ip_set *set)
1777 +{
1778 + if (set->binding != IP_SET_INVALID_ID) {
1779 + /* Sets as binding values are referenced */
1780 + __ip_set_put(set->binding);
1781 + set->binding = IP_SET_INVALID_ID;
1782 + }
1783 +}
1784 +
1785 +static int
1786 +ip_set_unbindip(ip_set_id_t index,
1787 + const void *data,
1788 + size_t size)
1789 +{
1790 + struct ip_set *set;
1791 + struct ip_set_req_bind *req_bind;
1792 + ip_set_ip_t ip;
1793 + int res;
1794 +
1795 + DP("");
1796 + if (size < sizeof(struct ip_set_req_bind))
1797 + return -EINVAL;
1798 +
1799 + req_bind = (struct ip_set_req_bind *) data;
1800 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1801 +
1802 + DP("%u %s", index, req_bind->binding);
1803 + if (index == IP_SET_INVALID_ID) {
1804 + /* unbind :all: */
1805 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1806 + /* Default binding of sets */
1807 + write_lock_bh(&ip_set_lock);
1808 + FOREACH_SET_DO(__unbind_default);
1809 + write_unlock_bh(&ip_set_lock);
1810 + return 0;
1811 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1812 + /* Flush all bindings of all sets*/
1813 + write_lock_bh(&ip_set_lock);
1814 + FOREACH_HASH_RW_DO(__set_hash_del);
1815 + write_unlock_bh(&ip_set_lock);
1816 + return 0;
1817 + }
1818 + DP("unreachable reached!");
1819 + return -EINVAL;
1820 + }
1821 +
1822 + set = ip_set_list[index];
1823 + IP_SET_ASSERT(set);
1824 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1825 + /* Default binding of set */
1826 + ip_set_id_t binding = ip_set_find_byindex(set->binding);
1827 +
1828 + if (binding == IP_SET_INVALID_ID)
1829 + return -ENOENT;
1830 +
1831 + write_lock_bh(&ip_set_lock);
1832 + /* Sets in hash values are referenced */
1833 + __ip_set_put(set->binding);
1834 + set->binding = IP_SET_INVALID_ID;
1835 + write_unlock_bh(&ip_set_lock);
1836 +
1837 + return 0;
1838 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1839 + /* Flush all bindings */
1840 +
1841 + write_lock_bh(&ip_set_lock);
1842 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
1843 + write_unlock_bh(&ip_set_lock);
1844 + return 0;
1845 + }
1846 +
1847 + res = __ip_set_testip(set,
1848 + data + sizeof(struct ip_set_req_bind),
1849 + size - sizeof(struct ip_set_req_bind),
1850 + &ip);
1851 +
1852 + DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
1853 + if (res >= 0)
1854 + res = ip_set_hash_del(set->id, ip);
1855 +
1856 + return res;
1857 +}
1858 +
1859 +static int
1860 +ip_set_testbind(ip_set_id_t index,
1861 + const void *data,
1862 + size_t size)
1863 +{
1864 + struct ip_set *set = ip_set_list[index];
1865 + struct ip_set_req_bind *req_bind;
1866 + ip_set_id_t binding;
1867 + ip_set_ip_t ip;
1868 + int res;
1869 +
1870 + IP_SET_ASSERT(set);
1871 + if (size < sizeof(struct ip_set_req_bind))
1872 + return -EINVAL;
1873 +
1874 + req_bind = (struct ip_set_req_bind *) data;
1875 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1876 +
1877 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1878 + /* Default binding of set */
1879 + char *binding_name;
1880 +
1881 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1882 + return -EINVAL;
1883 +
1884 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1885 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1886 +
1887 + binding = ip_set_find_byname(binding_name);
1888 + if (binding == IP_SET_INVALID_ID)
1889 + return -ENOENT;
1890 +
1891 + res = (set->binding == binding) ? -EEXIST : 0;
1892 +
1893 + return res;
1894 + }
1895 + binding = ip_set_find_byname(req_bind->binding);
1896 + if (binding == IP_SET_INVALID_ID)
1897 + return -ENOENT;
1898 +
1899 +
1900 + res = __ip_set_testip(set,
1901 + data + sizeof(struct ip_set_req_bind),
1902 + size - sizeof(struct ip_set_req_bind),
1903 + &ip);
1904 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1905 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1906 +
1907 + if (res >= 0)
1908 + res = (ip_set_find_in_hash(set->id, ip) == binding)
1909 + ? -EEXIST : 0;
1910 +
1911 + return res;
1912 +}
1913 +
1914 +static struct ip_set_type *
1915 +find_set_type_rlock(const char *typename)
1916 +{
1917 + struct ip_set_type *type;
1918 +
1919 + read_lock_bh(&ip_set_lock);
1920 + type = find_set_type(typename);
1921 + if (type == NULL)
1922 + read_unlock_bh(&ip_set_lock);
1923 +
1924 + return type;
1925 +}
1926 +
1927 +static int
1928 +find_free_id(const char *name,
1929 + ip_set_id_t *index,
1930 + ip_set_id_t *id)
1931 +{
1932 + ip_set_id_t i;
1933 +
1934 + *id = IP_SET_INVALID_ID;
1935 + for (i = 0; i < ip_set_max; i++) {
1936 + if (ip_set_list[i] == NULL) {
1937 + if (*id == IP_SET_INVALID_ID)
1938 + *id = *index = i;
1939 + } else if (strcmp(name, ip_set_list[i]->name) == 0)
1940 + /* Name clash */
1941 + return -EEXIST;
1942 + }
1943 + if (*id == IP_SET_INVALID_ID)
1944 + /* No free slot remained */
1945 + return -ERANGE;
1946 + /* Check that index is usable as id (swapping) */
1947 + check:
1948 + for (i = 0; i < ip_set_max; i++) {
1949 + if (ip_set_list[i] != NULL
1950 + && ip_set_list[i]->id == *id) {
1951 + *id = i;
1952 + goto check;
1953 + }
1954 + }
1955 + return 0;
1956 +}
1957 +
1958 +/*
1959 + * Create a set
1960 + */
1961 +static int
1962 +ip_set_create(const char *name,
1963 + const char *typename,
1964 + ip_set_id_t restore,
1965 + const void *data,
1966 + size_t size)
1967 +{
1968 + struct ip_set *set;
1969 + ip_set_id_t index = 0, id;
1970 + int res = 0;
1971 +
1972 + DP("setname: %s, typename: %s, id: %u", name, typename, restore);
1973 + /*
1974 + * First, and without any locks, allocate and initialize
1975 + * a normal base set structure.
1976 + */
1977 + set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
1978 + if (!set)
1979 + return -ENOMEM;
1980 + set->lock = RW_LOCK_UNLOCKED;
1981 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
1982 + set->binding = IP_SET_INVALID_ID;
1983 + atomic_set(&set->ref, 0);
1984 +
1985 + /*
1986 + * Next, take the &ip_set_lock, check that we know the type,
1987 + * and take a reference on the type, to make sure it
1988 + * stays available while constructing our new set.
1989 + *
1990 + * After referencing the type, we drop the &ip_set_lock,
1991 + * and let the new set construction run without locks.
1992 + */
1993 + set->type = find_set_type_rlock(typename);
1994 + if (set->type == NULL) {
1995 + /* Try loading the module */
1996 + char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
1997 + strcpy(modulename, "ip_set_");
1998 + strcat(modulename, typename);
1999 + DP("try to load %s", modulename);
2000 + request_module(modulename);
2001 + set->type = find_set_type_rlock(typename);
2002 + }
2003 + if (set->type == NULL) {
2004 + ip_set_printk("no set type '%s', set '%s' not created",
2005 + typename, name);
2006 + res = -ENOENT;
2007 + goto out;
2008 + }
2009 + if (!try_module_get(set->type->me)) {
2010 + read_unlock_bh(&ip_set_lock);
2011 + res = -EFAULT;
2012 + goto out;
2013 + }
2014 + read_unlock_bh(&ip_set_lock);
2015 +
2016 + /*
2017 + * Without holding any locks, create private part.
2018 + */
2019 + res = set->type->create(set, data, size);
2020 + if (res != 0)
2021 + goto put_out;
2022 +
2023 + /* BTW, res==0 here. */
2024 +
2025 + /*
2026 + * Here, we have a valid, constructed set. &ip_set_lock again,
2027 + * find free id/index and check that it is not already in
2028 + * ip_set_list.
2029 + */
2030 + write_lock_bh(&ip_set_lock);
2031 + if ((res = find_free_id(set->name, &index, &id)) != 0) {
2032 + DP("no free id!");
2033 + goto cleanup;
2034 + }
2035 +
2036 + /* Make sure restore gets the same index */
2037 + if (restore != IP_SET_INVALID_ID && index != restore) {
2038 + DP("Can't restore, sets are screwed up");
2039 + res = -ERANGE;
2040 + goto cleanup;
2041 + }
2042 +
2043 + /*
2044 + * Finally! Add our shiny new set to the list, and be done.
2045 + */
2046 + DP("create: '%s' created with index %u, id %u!", set->name, index, id);
2047 + set->id = id;
2048 + ip_set_list[index] = set;
2049 + write_unlock_bh(&ip_set_lock);
2050 + return res;
2051 +
2052 + cleanup:
2053 + write_unlock_bh(&ip_set_lock);
2054 + set->type->destroy(set);
2055 + put_out:
2056 + module_put(set->type->me);
2057 + out:
2058 + kfree(set);
2059 + return res;
2060 +}
2061 +
2062 +/*
2063 + * Destroy a given existing set
2064 + */
2065 +static void
2066 +ip_set_destroy_set(ip_set_id_t index)
2067 +{
2068 + struct ip_set *set = ip_set_list[index];
2069 +
2070 + IP_SET_ASSERT(set);
2071 + DP("set: %s", set->name);
2072 + write_lock_bh(&ip_set_lock);
2073 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2074 + if (set->binding != IP_SET_INVALID_ID)
2075 + __ip_set_put(set->binding);
2076 + ip_set_list[index] = NULL;
2077 + write_unlock_bh(&ip_set_lock);
2078 +
2079 + /* Must call it without holding any lock */
2080 + set->type->destroy(set);
2081 + module_put(set->type->me);
2082 + kfree(set);
2083 +}
2084 +
2085 +/*
2086 + * Destroy a set - or all sets
2087 + * Sets must not be referenced/used.
2088 + */
2089 +static int
2090 +ip_set_destroy(ip_set_id_t index)
2091 +{
2092 + ip_set_id_t i;
2093 +
2094 + /* ref modification always protected by the mutex */
2095 + if (index != IP_SET_INVALID_ID) {
2096 + if (atomic_read(&ip_set_list[index]->ref))
2097 + return -EBUSY;
2098 + ip_set_destroy_set(index);
2099 + } else {
2100 + for (i = 0; i < ip_set_max; i++) {
2101 + if (ip_set_list[i] != NULL
2102 + && (atomic_read(&ip_set_list[i]->ref)))
2103 + return -EBUSY;
2104 + }
2105 +
2106 + for (i = 0; i < ip_set_max; i++) {
2107 + if (ip_set_list[i] != NULL)
2108 + ip_set_destroy_set(i);
2109 + }
2110 + }
2111 + return 0;
2112 +}
2113 +
2114 +static void
2115 +ip_set_flush_set(struct ip_set *set)
2116 +{
2117 + DP("set: %s %u", set->name, set->id);
2118 +
2119 + write_lock_bh(&set->lock);
2120 + set->type->flush(set);
2121 + write_unlock_bh(&set->lock);
2122 +}
2123 +
2124 +/*
2125 + * Flush data in a set - or in all sets
2126 + */
2127 +static int
2128 +ip_set_flush(ip_set_id_t index)
2129 +{
2130 + if (index != IP_SET_INVALID_ID) {
2131 + IP_SET_ASSERT(ip_set_list[index]);
2132 + ip_set_flush_set(ip_set_list[index]);
2133 + } else
2134 + FOREACH_SET_DO(ip_set_flush_set);
2135 +
2136 + return 0;
2137 +}
2138 +
2139 +/* Rename a set */
2140 +static int
2141 +ip_set_rename(ip_set_id_t index, const char *name)
2142 +{
2143 + struct ip_set *set = ip_set_list[index];
2144 + ip_set_id_t i;
2145 + int res = 0;
2146 +
2147 + DP("set: %s to %s", set->name, name);
2148 + write_lock_bh(&ip_set_lock);
2149 + for (i = 0; i < ip_set_max; i++) {
2150 + if (ip_set_list[i] != NULL
2151 + && strncmp(ip_set_list[i]->name,
2152 + name,
2153 + IP_SET_MAXNAMELEN - 1) == 0) {
2154 + res = -EEXIST;
2155 + goto unlock;
2156 + }
2157 + }
2158 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
2159 + unlock:
2160 + write_unlock_bh(&ip_set_lock);
2161 + return res;
2162 +}
2163 +
2164 +/*
2165 + * Swap two sets so that name/index points to the other.
2166 + * References are also swapped.
2167 + */
2168 +static int
2169 +ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
2170 +{
2171 + struct ip_set *from = ip_set_list[from_index];
2172 + struct ip_set *to = ip_set_list[to_index];
2173 + char from_name[IP_SET_MAXNAMELEN];
2174 + u_int32_t from_ref;
2175 +
2176 + DP("set: %s to %s", from->name, to->name);
2177 + /* Features must not change. Artifical restriction. */
2178 + if (from->type->features != to->type->features)
2179 + return -ENOEXEC;
2180 +
2181 + /* No magic here: ref munging protected by the mutex */
2182 + write_lock_bh(&ip_set_lock);
2183 + strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
2184 + from_ref = atomic_read(&from->ref);
2185 +
2186 + strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
2187 + atomic_set(&from->ref, atomic_read(&to->ref));
2188 + strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
2189 + atomic_set(&to->ref, from_ref);
2190 +
2191 + ip_set_list[from_index] = to;
2192 + ip_set_list[to_index] = from;
2193 +
2194 + write_unlock_bh(&ip_set_lock);
2195 + return 0;
2196 +}
2197 +
2198 +/*
2199 + * List set data
2200 + */
2201 +
2202 +static inline void
2203 +__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
2204 + ip_set_id_t id, size_t *size)
2205 +{
2206 + if (set_hash->id == id)
2207 + *size += sizeof(struct ip_set_hash_list);
2208 +}
2209 +
2210 +static inline void
2211 +__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
2212 + ip_set_id_t id, size_t *size)
2213 +{
2214 + if (set_hash->id == id)
2215 + *size += sizeof(struct ip_set_hash_save);
2216 +}
2217 +
2218 +static inline void
2219 +__set_hash_bindings(struct ip_set_hash *set_hash,
2220 + ip_set_id_t id, void *data, int *used)
2221 +{
2222 + if (set_hash->id == id) {
2223 + struct ip_set_hash_list *hash_list =
2224 + (struct ip_set_hash_list *)(data + *used);
2225 +
2226 + hash_list->ip = set_hash->ip;
2227 + hash_list->binding = set_hash->binding;
2228 + *used += sizeof(struct ip_set_hash_list);
2229 + }
2230 +}
2231 +
2232 +static int ip_set_list_set(ip_set_id_t index,
2233 + void *data,
2234 + int *used,
2235 + int len)
2236 +{
2237 + struct ip_set *set = ip_set_list[index];
2238 + struct ip_set_list *set_list;
2239 +
2240 + /* Pointer to our header */
2241 + set_list = (struct ip_set_list *) (data + *used);
2242 +
2243 + DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
2244 +
2245 + /* Get and ensure header size */
2246 + if (*used + sizeof(struct ip_set_list) > len)
2247 + goto not_enough_mem;
2248 + *used += sizeof(struct ip_set_list);
2249 +
2250 + read_lock_bh(&set->lock);
2251 + /* Get and ensure set specific header size */
2252 + set_list->header_size = set->type->header_size;
2253 + if (*used + set_list->header_size > len)
2254 + goto unlock_set;
2255 +
2256 + /* Fill in the header */
2257 + set_list->index = index;
2258 + set_list->binding = set->binding;
2259 + set_list->ref = atomic_read(&set->ref);
2260 +
2261 + /* Fill in set spefific header data */
2262 + set->type->list_header(set, data + *used);
2263 + *used += set_list->header_size;
2264 +
2265 + /* Get and ensure set specific members size */
2266 + set_list->members_size = set->type->list_members_size(set);
2267 + if (*used + set_list->members_size > len)
2268 + goto unlock_set;
2269 +
2270 + /* Fill in set spefific members data */
2271 + set->type->list_members(set, data + *used);
2272 + *used += set_list->members_size;
2273 + read_unlock_bh(&set->lock);
2274 +
2275 + /* Bindings */
2276 +
2277 + /* Get and ensure set specific bindings size */
2278 + set_list->bindings_size = 0;
2279 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2280 + set->id, &set_list->bindings_size);
2281 + if (*used + set_list->bindings_size > len)
2282 + goto not_enough_mem;
2283 +
2284 + /* Fill in set spefific bindings data */
2285 + FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
2286 +
2287 + return 0;
2288 +
2289 + unlock_set:
2290 + read_unlock_bh(&set->lock);
2291 + not_enough_mem:
2292 + DP("not enough mem, try again");
2293 + return -EAGAIN;
2294 +}
2295 +
2296 +/*
2297 + * Save sets
2298 + */
2299 +static int ip_set_save_set(ip_set_id_t index,
2300 + void *data,
2301 + int *used,
2302 + int len)
2303 +{
2304 + struct ip_set *set;
2305 + struct ip_set_save *set_save;
2306 +
2307 + /* Pointer to our header */
2308 + set_save = (struct ip_set_save *) (data + *used);
2309 +
2310 + /* Get and ensure header size */
2311 + if (*used + sizeof(struct ip_set_save) > len)
2312 + goto not_enough_mem;
2313 + *used += sizeof(struct ip_set_save);
2314 +
2315 + set = ip_set_list[index];
2316 + DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
2317 + data, data + *used);
2318 +
2319 + read_lock_bh(&set->lock);
2320 + /* Get and ensure set specific header size */
2321 + set_save->header_size = set->type->header_size;
2322 + if (*used + set_save->header_size > len)
2323 + goto unlock_set;
2324 +
2325 + /* Fill in the header */
2326 + set_save->index = index;
2327 + set_save->binding = set->binding;
2328 +
2329 + /* Fill in set spefific header data */
2330 + set->type->list_header(set, data + *used);
2331 + *used += set_save->header_size;
2332 +
2333 + DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
2334 + set_save->header_size, data, data + *used);
2335 + /* Get and ensure set specific members size */
2336 + set_save->members_size = set->type->list_members_size(set);
2337 + if (*used + set_save->members_size > len)
2338 + goto unlock_set;
2339 +
2340 + /* Fill in set spefific members data */
2341 + set->type->list_members(set, data + *used);
2342 + *used += set_save->members_size;
2343 + read_unlock_bh(&set->lock);
2344 + DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
2345 + set_save->members_size, data, data + *used);
2346 + return 0;
2347 +
2348 + unlock_set:
2349 + read_unlock_bh(&set->lock);
2350 + not_enough_mem:
2351 + DP("not enough mem, try again");
2352 + return -EAGAIN;
2353 +}
2354 +
2355 +static inline void
2356 +__set_hash_save_bindings(struct ip_set_hash *set_hash,
2357 + ip_set_id_t id,
2358 + void *data,
2359 + int *used,
2360 + int len,
2361 + int *res)
2362 +{
2363 + if (*res == 0
2364 + && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
2365 + struct ip_set_hash_save *hash_save =
2366 + (struct ip_set_hash_save *)(data + *used);
2367 + /* Ensure bindings size */
2368 + if (*used + sizeof(struct ip_set_hash_save) > len) {
2369 + *res = -ENOMEM;
2370 + return;
2371 + }
2372 + hash_save->id = set_hash->id;
2373 + hash_save->ip = set_hash->ip;
2374 + hash_save->binding = set_hash->binding;
2375 + *used += sizeof(struct ip_set_hash_save);
2376 + }
2377 +}
2378 +
2379 +static int ip_set_save_bindings(ip_set_id_t index,
2380 + void *data,
2381 + int *used,
2382 + int len)
2383 +{
2384 + int res = 0;
2385 + struct ip_set_save *set_save;
2386 +
2387 + DP("used %u, len %u", *used, len);
2388 + /* Get and ensure header size */
2389 + if (*used + sizeof(struct ip_set_save) > len)
2390 + return -ENOMEM;
2391 +
2392 + /* Marker */
2393 + set_save = (struct ip_set_save *) (data + *used);
2394 + set_save->index = IP_SET_INVALID_ID;
2395 + set_save->header_size = 0;
2396 + set_save->members_size = 0;
2397 + *used += sizeof(struct ip_set_save);
2398 +
2399 + DP("marker added used %u, len %u", *used, len);
2400 + /* Fill in bindings data */
2401 + if (index != IP_SET_INVALID_ID)
2402 + /* Sets are identified by id in hash */
2403 + index = ip_set_list[index]->id;
2404 + FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
2405 +
2406 + return res;
2407 +}
2408 +
2409 +/*
2410 + * Restore sets
2411 + */
2412 +static int ip_set_restore(void *data,
2413 + int len)
2414 +{
2415 + int res = 0;
2416 + int line = 0, used = 0, members_size;
2417 + struct ip_set *set;
2418 + struct ip_set_hash_save *hash_save;
2419 + struct ip_set_restore *set_restore;
2420 + ip_set_id_t index;
2421 +
2422 + /* Loop to restore sets */
2423 + while (1) {
2424 + line++;
2425 +
2426 + DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
2427 + /* Get and ensure header size */
2428 + if (used + sizeof(struct ip_set_restore) > len)
2429 + return line;
2430 + set_restore = (struct ip_set_restore *) (data + used);
2431 + used += sizeof(struct ip_set_restore);
2432 +
2433 + /* Ensure data size */
2434 + if (used
2435 + + set_restore->header_size
2436 + + set_restore->members_size > len)
2437 + return line;
2438 +
2439 + /* Check marker */
2440 + if (set_restore->index == IP_SET_INVALID_ID) {
2441 + line--;
2442 + goto bindings;
2443 + }
2444 +
2445 + /* Try to create the set */
2446 + DP("restore %s %s", set_restore->name, set_restore->typename);
2447 + res = ip_set_create(set_restore->name,
2448 + set_restore->typename,
2449 + set_restore->index,
2450 + data + used,
2451 + set_restore->header_size);
2452 +
2453 + if (res != 0)
2454 + return line;
2455 + used += set_restore->header_size;
2456 +
2457 + index = ip_set_find_byindex(set_restore->index);
2458 + DP("index %u, restore_index %u", index, set_restore->index);
2459 + if (index != set_restore->index)
2460 + return line;
2461 + /* Try to restore members data */
2462 + set = ip_set_list[index];
2463 + members_size = 0;
2464 + DP("members_size %u reqsize %u",
2465 + set_restore->members_size, set->type->reqsize);
2466 + while (members_size + set->type->reqsize <=
2467 + set_restore->members_size) {
2468 + line++;
2469 + DP("members: %u, line %u", members_size, line);
2470 + res = __ip_set_addip(index,
2471 + data + used + members_size,
2472 + set->type->reqsize);
2473 + if (!(res == 0 || res == -EEXIST))
2474 + return line;
2475 + members_size += set->type->reqsize;
2476 + }
2477 +
2478 + DP("members_size %u %u",
2479 + set_restore->members_size, members_size);
2480 + if (members_size != set_restore->members_size)
2481 + return line++;
2482 + used += set_restore->members_size;
2483 + }
2484 +
2485 + bindings:
2486 + /* Loop to restore bindings */
2487 + while (used < len) {
2488 + line++;
2489 +
2490 + DP("restore binding, line %u", line);
2491 + /* Get and ensure size */
2492 + if (used + sizeof(struct ip_set_hash_save) > len)
2493 + return line;
2494 + hash_save = (struct ip_set_hash_save *) (data + used);
2495 + used += sizeof(struct ip_set_hash_save);
2496 +
2497 + /* hash_save->id is used to store the index */
2498 + index = ip_set_find_byindex(hash_save->id);
2499 + DP("restore binding index %u, id %u, %u -> %u",
2500 + index, hash_save->id, hash_save->ip, hash_save->binding);
2501 + if (index != hash_save->id)
2502 + return line;
2503 + if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
2504 + DP("corrupt binding set index %u", hash_save->binding);
2505 + return line;
2506 + }
2507 + set = ip_set_list[hash_save->id];
2508 + /* Null valued IP means default binding */
2509 + if (hash_save->ip)
2510 + res = ip_set_hash_add(set->id,
2511 + hash_save->ip,
2512 + hash_save->binding);
2513 + else {
2514 + IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
2515 + write_lock_bh(&ip_set_lock);
2516 + set->binding = hash_save->binding;
2517 + __ip_set_get(set->binding);
2518 + write_unlock_bh(&ip_set_lock);
2519 + DP("default binding: %u", set->binding);
2520 + }
2521 + if (res != 0)
2522 + return line;
2523 + }
2524 + if (used != len)
2525 + return line;
2526 +
2527 + return 0;
2528 +}
2529 +
2530 +static int
2531 +ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
2532 +{
2533 + void *data;
2534 + int res = 0; /* Assume OK */
2535 + unsigned *op;
2536 + struct ip_set_req_adt *req_adt;
2537 + ip_set_id_t index = IP_SET_INVALID_ID;
2538 + int (*adtfn)(ip_set_id_t index,
2539 + const void *data, size_t size);
2540 + struct fn_table {
2541 + int (*fn)(ip_set_id_t index,
2542 + const void *data, size_t size);
2543 + } adtfn_table[] =
2544 + { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
2545 + { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
2546 + };
2547 +
2548 + DP("optval=%d, user=%p, len=%d", optval, user, len);
2549 + if (!capable(CAP_NET_ADMIN))
2550 + return -EPERM;
2551 + if (optval != SO_IP_SET)
2552 + return -EBADF;
2553 + if (len <= sizeof(unsigned)) {
2554 + ip_set_printk("short userdata (want >%zu, got %u)",
2555 + sizeof(unsigned), len);
2556 + return -EINVAL;
2557 + }
2558 + data = vmalloc(len);
2559 + if (!data) {
2560 + DP("out of mem for %u bytes", len);
2561 + return -ENOMEM;
2562 + }
2563 + if (copy_from_user(data, user, len) != 0) {
2564 + res = -EFAULT;
2565 + goto done;
2566 + }
2567 + if (down_interruptible(&ip_set_app_mutex)) {
2568 + res = -EINTR;
2569 + goto done;
2570 + }
2571 +
2572 + op = (unsigned *)data;
2573 + DP("op=%x", *op);
2574 +
2575 + if (*op < IP_SET_OP_VERSION) {
2576 + /* Check the version at the beginning of operations */
2577 + struct ip_set_req_version *req_version =
2578 + (struct ip_set_req_version *) data;
2579 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2580 + res = -EPROTO;
2581 + goto done;
2582 + }
2583 + }
2584 +
2585 + switch (*op) {
2586 + case IP_SET_OP_CREATE:{
2587 + struct ip_set_req_create *req_create
2588 + = (struct ip_set_req_create *) data;
2589 +
2590 + if (len < sizeof(struct ip_set_req_create)) {
2591 + ip_set_printk("short CREATE data (want >=%zu, got %u)",
2592 + sizeof(struct ip_set_req_create), len);
2593 + res = -EINVAL;
2594 + goto done;
2595 + }
2596 + req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
2597 + req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2598 + res = ip_set_create(req_create->name,
2599 + req_create->typename,
2600 + IP_SET_INVALID_ID,
2601 + data + sizeof(struct ip_set_req_create),
2602 + len - sizeof(struct ip_set_req_create));
2603 + goto done;
2604 + }
2605 + case IP_SET_OP_DESTROY:{
2606 + struct ip_set_req_std *req_destroy
2607 + = (struct ip_set_req_std *) data;
2608 +
2609 + if (len != sizeof(struct ip_set_req_std)) {
2610 + ip_set_printk("invalid DESTROY data (want %zu, got %u)",
2611 + sizeof(struct ip_set_req_std), len);
2612 + res = -EINVAL;
2613 + goto done;
2614 + }
2615 + if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
2616 + /* Destroy all sets */
2617 + index = IP_SET_INVALID_ID;
2618 + } else {
2619 + req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
2620 + index = ip_set_find_byname(req_destroy->name);
2621 +
2622 + if (index == IP_SET_INVALID_ID) {
2623 + res = -ENOENT;
2624 + goto done;
2625 + }
2626 + }
2627 +
2628 + res = ip_set_destroy(index);
2629 + goto done;
2630 + }
2631 + case IP_SET_OP_FLUSH:{
2632 + struct ip_set_req_std *req_flush =
2633 + (struct ip_set_req_std *) data;
2634 +
2635 + if (len != sizeof(struct ip_set_req_std)) {
2636 + ip_set_printk("invalid FLUSH data (want %zu, got %u)",
2637 + sizeof(struct ip_set_req_std), len);
2638 + res = -EINVAL;
2639 + goto done;
2640 + }
2641 + if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
2642 + /* Flush all sets */
2643 + index = IP_SET_INVALID_ID;
2644 + } else {
2645 + req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
2646 + index = ip_set_find_byname(req_flush->name);
2647 +
2648 + if (index == IP_SET_INVALID_ID) {
2649 + res = -ENOENT;
2650 + goto done;
2651 + }
2652 + }
2653 + res = ip_set_flush(index);
2654 + goto done;
2655 + }
2656 + case IP_SET_OP_RENAME:{
2657 + struct ip_set_req_create *req_rename
2658 + = (struct ip_set_req_create *) data;
2659 +
2660 + if (len != sizeof(struct ip_set_req_create)) {
2661 + ip_set_printk("invalid RENAME data (want %zu, got %u)",
2662 + sizeof(struct ip_set_req_create), len);
2663 + res = -EINVAL;
2664 + goto done;
2665 + }
2666 +
2667 + req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
2668 + req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2669 +
2670 + index = ip_set_find_byname(req_rename->name);
2671 + if (index == IP_SET_INVALID_ID) {
2672 + res = -ENOENT;
2673 + goto done;
2674 + }
2675 + res = ip_set_rename(index, req_rename->typename);
2676 + goto done;
2677 + }
2678 + case IP_SET_OP_SWAP:{
2679 + struct ip_set_req_create *req_swap
2680 + = (struct ip_set_req_create *) data;
2681 + ip_set_id_t to_index;
2682 +
2683 + if (len != sizeof(struct ip_set_req_create)) {
2684 + ip_set_printk("invalid SWAP data (want %zu, got %u)",
2685 + sizeof(struct ip_set_req_create), len);
2686 + res = -EINVAL;
2687 + goto done;
2688 + }
2689 +
2690 + req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
2691 + req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2692 +
2693 + index = ip_set_find_byname(req_swap->name);
2694 + if (index == IP_SET_INVALID_ID) {
2695 + res = -ENOENT;
2696 + goto done;
2697 + }
2698 + to_index = ip_set_find_byname(req_swap->typename);
2699 + if (to_index == IP_SET_INVALID_ID) {
2700 + res = -ENOENT;
2701 + goto done;
2702 + }
2703 + res = ip_set_swap(index, to_index);
2704 + goto done;
2705 + }
2706 + default:
2707 + break; /* Set identified by id */
2708 + }
2709 +
2710 + /* There we may have add/del/test/bind/unbind/test_bind operations */
2711 + if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
2712 + res = -EBADMSG;
2713 + goto done;
2714 + }
2715 + adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
2716 +
2717 + if (len < sizeof(struct ip_set_req_adt)) {
2718 + ip_set_printk("short data in adt request (want >=%zu, got %u)",
2719 + sizeof(struct ip_set_req_adt), len);
2720 + res = -EINVAL;
2721 + goto done;
2722 + }
2723 + req_adt = (struct ip_set_req_adt *) data;
2724 +
2725 + /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
2726 + if (!(*op == IP_SET_OP_UNBIND_SET
2727 + && req_adt->index == IP_SET_INVALID_ID)) {
2728 + index = ip_set_find_byindex(req_adt->index);
2729 + if (index == IP_SET_INVALID_ID) {
2730 + res = -ENOENT;
2731 + goto done;
2732 + }
2733 + }
2734 + res = adtfn(index, data, len);
2735 +
2736 + done:
2737 + up(&ip_set_app_mutex);
2738 + vfree(data);
2739 + if (res > 0)
2740 + res = 0;
2741 + DP("final result %d", res);
2742 + return res;
2743 +}
2744 +
2745 +static int
2746 +ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
2747 +{
2748 + int res = 0;
2749 + unsigned *op;
2750 + ip_set_id_t index = IP_SET_INVALID_ID;
2751 + void *data;
2752 + int copylen = *len;
2753 +
2754 + DP("optval=%d, user=%p, len=%d", optval, user, *len);
2755 + if (!capable(CAP_NET_ADMIN))
2756 + return -EPERM;
2757 + if (optval != SO_IP_SET)
2758 + return -EBADF;
2759 + if (*len < sizeof(unsigned)) {
2760 + ip_set_printk("short userdata (want >=%zu, got %d)",
2761 + sizeof(unsigned), *len);
2762 + return -EINVAL;
2763 + }
2764 + data = vmalloc(*len);
2765 + if (!data) {
2766 + DP("out of mem for %d bytes", *len);
2767 + return -ENOMEM;
2768 + }
2769 + if (copy_from_user(data, user, *len) != 0) {
2770 + res = -EFAULT;
2771 + goto done;
2772 + }
2773 + if (down_interruptible(&ip_set_app_mutex)) {
2774 + res = -EINTR;
2775 + goto done;
2776 + }
2777 +
2778 + op = (unsigned *) data;
2779 + DP("op=%x", *op);
2780 +
2781 + if (*op < IP_SET_OP_VERSION) {
2782 + /* Check the version at the beginning of operations */
2783 + struct ip_set_req_version *req_version =
2784 + (struct ip_set_req_version *) data;
2785 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2786 + res = -EPROTO;
2787 + goto done;
2788 + }
2789 + }
2790 +
2791 + switch (*op) {
2792 + case IP_SET_OP_VERSION: {
2793 + struct ip_set_req_version *req_version =
2794 + (struct ip_set_req_version *) data;
2795 +
2796 + if (*len != sizeof(struct ip_set_req_version)) {
2797 + ip_set_printk("invalid VERSION (want %zu, got %d)",
2798 + sizeof(struct ip_set_req_version),
2799 + *len);
2800 + res = -EINVAL;
2801 + goto done;
2802 + }
2803 +
2804 + req_version->version = IP_SET_PROTOCOL_VERSION;
2805 + res = copy_to_user(user, req_version,
2806 + sizeof(struct ip_set_req_version));
2807 + goto done;
2808 + }
2809 + case IP_SET_OP_GET_BYNAME: {
2810 + struct ip_set_req_get_set *req_get
2811 + = (struct ip_set_req_get_set *) data;
2812 +
2813 + if (*len != sizeof(struct ip_set_req_get_set)) {
2814 + ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
2815 + sizeof(struct ip_set_req_get_set), *len);
2816 + res = -EINVAL;
2817 + goto done;
2818 + }
2819 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2820 + index = ip_set_find_byname(req_get->set.name);
2821 + req_get->set.index = index;
2822 + goto copy;
2823 + }
2824 + case IP_SET_OP_GET_BYINDEX: {
2825 + struct ip_set_req_get_set *req_get
2826 + = (struct ip_set_req_get_set *) data;
2827 +
2828 + if (*len != sizeof(struct ip_set_req_get_set)) {
2829 + ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
2830 + sizeof(struct ip_set_req_get_set), *len);
2831 + res = -EINVAL;
2832 + goto done;
2833 + }
2834 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2835 + index = ip_set_find_byindex(req_get->set.index);
2836 + strncpy(req_get->set.name,
2837 + index == IP_SET_INVALID_ID ? ""
2838 + : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
2839 + goto copy;
2840 + }
2841 + case IP_SET_OP_ADT_GET: {
2842 + struct ip_set_req_adt_get *req_get
2843 + = (struct ip_set_req_adt_get *) data;
2844 +
2845 + if (*len != sizeof(struct ip_set_req_adt_get)) {
2846 + ip_set_printk("invalid ADT_GET (want %zu, got %d)",
2847 + sizeof(struct ip_set_req_adt_get), *len);
2848 + res = -EINVAL;
2849 + goto done;
2850 + }
2851 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2852 + index = ip_set_find_byname(req_get->set.name);
2853 + if (index != IP_SET_INVALID_ID) {
2854 + req_get->set.index = index;
2855 + strncpy(req_get->typename,
2856 + ip_set_list[index]->type->typename,
2857 + IP_SET_MAXNAMELEN - 1);
2858 + } else {
2859 + res = -ENOENT;
2860 + goto done;
2861 + }
2862 + goto copy;
2863 + }
2864 + case IP_SET_OP_MAX_SETS: {
2865 + struct ip_set_req_max_sets *req_max_sets
2866 + = (struct ip_set_req_max_sets *) data;
2867 + ip_set_id_t i;
2868 +
2869 + if (*len != sizeof(struct ip_set_req_max_sets)) {
2870 + ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
2871 + sizeof(struct ip_set_req_max_sets), *len);
2872 + res = -EINVAL;
2873 + goto done;
2874 + }
2875 +
2876 + if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
2877 + req_max_sets->set.index = IP_SET_INVALID_ID;
2878 + } else {
2879 + req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2880 + req_max_sets->set.index =
2881 + ip_set_find_byname(req_max_sets->set.name);
2882 + if (req_max_sets->set.index == IP_SET_INVALID_ID) {
2883 + res = -ENOENT;
2884 + goto done;
2885 + }
2886 + }
2887 + req_max_sets->max_sets = ip_set_max;
2888 + req_max_sets->sets = 0;
2889 + for (i = 0; i < ip_set_max; i++) {
2890 + if (ip_set_list[i] != NULL)
2891 + req_max_sets->sets++;
2892 + }
2893 + goto copy;
2894 + }
2895 + case IP_SET_OP_LIST_SIZE:
2896 + case IP_SET_OP_SAVE_SIZE: {
2897 + struct ip_set_req_setnames *req_setnames
2898 + = (struct ip_set_req_setnames *) data;
2899 + struct ip_set_name_list *name_list;
2900 + struct ip_set *set;
2901 + ip_set_id_t i;
2902 + int used;
2903 +
2904 + if (*len < sizeof(struct ip_set_req_setnames)) {
2905 + ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
2906 + sizeof(struct ip_set_req_setnames), *len);
2907 + res = -EINVAL;
2908 + goto done;
2909 + }
2910 +
2911 + req_setnames->size = 0;
2912 + used = sizeof(struct ip_set_req_setnames);
2913 + for (i = 0; i < ip_set_max; i++) {
2914 + if (ip_set_list[i] == NULL)
2915 + continue;
2916 + name_list = (struct ip_set_name_list *)
2917 + (data + used);
2918 + used += sizeof(struct ip_set_name_list);
2919 + if (used > copylen) {
2920 + res = -EAGAIN;
2921 + goto done;
2922 + }
2923 + set = ip_set_list[i];
2924 + /* Fill in index, name, etc. */
2925 + name_list->index = i;
2926 + name_list->id = set->id;
2927 + strncpy(name_list->name,
2928 + set->name,
2929 + IP_SET_MAXNAMELEN - 1);
2930 + strncpy(name_list->typename,
2931 + set->type->typename,
2932 + IP_SET_MAXNAMELEN - 1);
2933 + DP("filled %s of type %s, index %u\n",
2934 + name_list->name, name_list->typename,
2935 + name_list->index);
2936 + if (!(req_setnames->index == IP_SET_INVALID_ID
2937 + || req_setnames->index == i))
2938 + continue;
2939 + /* Update size */
2940 + switch (*op) {
2941 + case IP_SET_OP_LIST_SIZE: {
2942 + req_setnames->size += sizeof(struct ip_set_list)
2943 + + set->type->header_size
2944 + + set->type->list_members_size(set);
2945 + /* Sets are identified by id in the hash */
2946 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2947 + set->id, &req_setnames->size);
2948 + break;
2949 + }
2950 + case IP_SET_OP_SAVE_SIZE: {
2951 + req_setnames->size += sizeof(struct ip_set_save)
2952 + + set->type->header_size
2953 + + set->type->list_members_size(set);
2954 + FOREACH_HASH_DO(__set_hash_bindings_size_save,
2955 + set->id, &req_setnames->size);
2956 + break;
2957 + }
2958 + default:
2959 + break;
2960 + }
2961 + }
2962 + if (copylen != used) {
2963 + res = -EAGAIN;
2964 + goto done;
2965 + }
2966 + goto copy;
2967 + }
2968 + case IP_SET_OP_LIST: {
2969 + struct ip_set_req_list *req_list
2970 + = (struct ip_set_req_list *) data;
2971 + ip_set_id_t i;
2972 + int used;
2973 +
2974 + if (*len < sizeof(struct ip_set_req_list)) {
2975 + ip_set_printk("short LIST (want >=%zu, got %d)",
2976 + sizeof(struct ip_set_req_list), *len);
2977 + res = -EINVAL;
2978 + goto done;
2979 + }
2980 + index = req_list->index;
2981 + if (index != IP_SET_INVALID_ID
2982 + && ip_set_find_byindex(index) != index) {
2983 + res = -ENOENT;
2984 + goto done;
2985 + }
2986 + used = 0;
2987 + if (index == IP_SET_INVALID_ID) {
2988 + /* List all sets */
2989 + for (i = 0; i < ip_set_max && res == 0; i++) {
2990 + if (ip_set_list[i] != NULL)
2991 + res = ip_set_list_set(i, data, &used, *len);
2992 + }
2993 + } else {
2994 + /* List an individual set */
2995 + res = ip_set_list_set(index, data, &used, *len);
2996 + }
2997 + if (res != 0)
2998 + goto done;
2999 + else if (copylen != used) {
3000 + res = -EAGAIN;
3001 + goto done;
3002 + }
3003 + goto copy;
3004 + }
3005 + case IP_SET_OP_SAVE: {
3006 + struct ip_set_req_list *req_save
3007 + = (struct ip_set_req_list *) data;
3008 + ip_set_id_t i;
3009 + int used;
3010 +
3011 + if (*len < sizeof(struct ip_set_req_list)) {
3012 + ip_set_printk("short SAVE (want >=%zu, got %d)",
3013 + sizeof(struct ip_set_req_list), *len);
3014 + res = -EINVAL;
3015 + goto done;
3016 + }
3017 + index = req_save->index;
3018 + if (index != IP_SET_INVALID_ID
3019 + && ip_set_find_byindex(index) != index) {
3020 + res = -ENOENT;
3021 + goto done;
3022 + }
3023 + used = 0;
3024 + if (index == IP_SET_INVALID_ID) {
3025 + /* Save all sets */
3026 + for (i = 0; i < ip_set_max && res == 0; i++) {
3027 + if (ip_set_list[i] != NULL)
3028 + res = ip_set_save_set(i, data, &used, *len);
3029 + }
3030 + } else {
3031 + /* Save an individual set */
3032 + res = ip_set_save_set(index, data, &used, *len);
3033 + }
3034 + if (res == 0)
3035 + res = ip_set_save_bindings(index, data, &used, *len);
3036 +
3037 + if (res != 0)
3038 + goto done;
3039 + else if (copylen != used) {
3040 + res = -EAGAIN;
3041 + goto done;
3042 + }
3043 + goto copy;
3044 + }
3045 + case IP_SET_OP_RESTORE: {
3046 + struct ip_set_req_setnames *req_restore
3047 + = (struct ip_set_req_setnames *) data;
3048 + int line;
3049 +
3050 + if (*len < sizeof(struct ip_set_req_setnames)
3051 + || *len != req_restore->size) {
3052 + ip_set_printk("invalid RESTORE (want =%zu, got %d)",
3053 + req_restore->size, *len);
3054 + res = -EINVAL;
3055 + goto done;
3056 + }
3057 + line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
3058 + req_restore->size - sizeof(struct ip_set_req_setnames));
3059 + DP("ip_set_restore: %u", line);
3060 + if (line != 0) {
3061 + res = -EAGAIN;
3062 + req_restore->size = line;
3063 + copylen = sizeof(struct ip_set_req_setnames);
3064 + goto copy;
3065 + }
3066 + goto done;
3067 + }
3068 + default:
3069 + res = -EBADMSG;
3070 + goto done;
3071 + } /* end of switch(op) */
3072 +
3073 + copy:
3074 + DP("set %s, copylen %u", index != IP_SET_INVALID_ID
3075 + && ip_set_list[index]
3076 + ? ip_set_list[index]->name
3077 + : ":all:", copylen);
3078 + res = copy_to_user(user, data, copylen);
3079 +
3080 + done:
3081 + up(&ip_set_app_mutex);
3082 + vfree(data);
3083 + if (res > 0)
3084 + res = 0;
3085 + DP("final result %d", res);
3086 + return res;
3087 +}
3088 +
3089 +static struct nf_sockopt_ops so_set = {
3090 + .pf = PF_INET,
3091 + .set_optmin = SO_IP_SET,
3092 + .set_optmax = SO_IP_SET + 1,
3093 + .set = &ip_set_sockfn_set,
3094 + .get_optmin = SO_IP_SET,
3095 + .get_optmax = SO_IP_SET + 1,
3096 + .get = &ip_set_sockfn_get,
3097 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
3098 + .owner = THIS_MODULE,
3099 +#endif
3100 +};
3101 +
3102 +static int max_sets, hash_size;
3103 +module_param(max_sets, int, 0600);
3104 +MODULE_PARM_DESC(max_sets, "maximal number of sets");
3105 +module_param(hash_size, int, 0600);
3106 +MODULE_PARM_DESC(hash_size, "hash size for bindings");
3107 +MODULE_LICENSE("GPL");
3108 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3109 +MODULE_DESCRIPTION("module implementing core IP set support");
3110 +
3111 +static int __init ip_set_init(void)
3112 +{
3113 + int res;
3114 + ip_set_id_t i;
3115 +
3116 + get_random_bytes(&ip_set_hash_random, 4);
3117 + if (max_sets)
3118 + ip_set_max = max_sets;
3119 + ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
3120 + if (!ip_set_list) {
3121 + printk(KERN_ERR "Unable to create ip_set_list\n");
3122 + return -ENOMEM;
3123 + }
3124 + memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
3125 + if (hash_size)
3126 + ip_set_bindings_hash_size = hash_size;
3127 + ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
3128 + if (!ip_set_hash) {
3129 + printk(KERN_ERR "Unable to create ip_set_hash\n");
3130 + vfree(ip_set_list);
3131 + return -ENOMEM;
3132 + }
3133 + for (i = 0; i < ip_set_bindings_hash_size; i++)
3134 + INIT_LIST_HEAD(&ip_set_hash[i]);
3135 +
3136 + INIT_LIST_HEAD(&set_type_list);
3137 +
3138 + res = nf_register_sockopt(&so_set);
3139 + if (res != 0) {
3140 + ip_set_printk("SO_SET registry failed: %d", res);
3141 + vfree(ip_set_list);
3142 + vfree(ip_set_hash);
3143 + return res;
3144 + }
3145 + return 0;
3146 +}
3147 +
3148 +static void __exit ip_set_fini(void)
3149 +{
3150 + /* There can't be any existing set or binding */
3151 + nf_unregister_sockopt(&so_set);
3152 + vfree(ip_set_list);
3153 + vfree(ip_set_hash);
3154 + DP("these are the famous last words");
3155 +}
3156 +
3157 +EXPORT_SYMBOL(ip_set_register_set_type);
3158 +EXPORT_SYMBOL(ip_set_unregister_set_type);
3159 +
3160 +EXPORT_SYMBOL(ip_set_get_byname);
3161 +EXPORT_SYMBOL(ip_set_get_byindex);
3162 +EXPORT_SYMBOL(ip_set_put);
3163 +
3164 +EXPORT_SYMBOL(ip_set_addip_kernel);
3165 +EXPORT_SYMBOL(ip_set_delip_kernel);
3166 +EXPORT_SYMBOL(ip_set_testip_kernel);
3167 +
3168 +module_init(ip_set_init);
3169 +module_exit(ip_set_fini);
3170 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_iphash.c
3171 ===================================================================
3172 --- /dev/null
3173 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_iphash.c
3174 @@ -0,0 +1,429 @@
3175 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3176 + *
3177 + * This program is free software; you can redistribute it and/or modify
3178 + * it under the terms of the GNU General Public License version 2 as
3179 + * published by the Free Software Foundation.
3180 + */
3181 +
3182 +/* Kernel module implementing an ip hash set */
3183 +
3184 +#include <linux/module.h>
3185 +#include <linux/ip.h>
3186 +#include <linux/skbuff.h>
3187 +#include <linux/version.h>
3188 +#include <linux/jhash.h>
3189 +#include <linux/netfilter_ipv4/ip_tables.h>
3190 +#include <linux/netfilter_ipv4/ip_set.h>
3191 +#include <linux/errno.h>
3192 +#include <asm/uaccess.h>
3193 +#include <asm/bitops.h>
3194 +#include <linux/spinlock.h>
3195 +#include <linux/vmalloc.h>
3196 +#include <linux/random.h>
3197 +
3198 +#include <net/ip.h>
3199 +
3200 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3201 +#include <linux/netfilter_ipv4/ip_set_iphash.h>
3202 +
3203 +static int limit = MAX_RANGE;
3204 +
3205 +static inline __u32
3206 +jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
3207 +{
3208 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
3209 +}
3210 +
3211 +static inline __u32
3212 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3213 +{
3214 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3215 + __u32 id;
3216 + u_int16_t i;
3217 + ip_set_ip_t *elem;
3218 +
3219 + *hash_ip = ip & map->netmask;
3220 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
3221 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
3222 +
3223 + for (i = 0; i < map->probes; i++) {
3224 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
3225 + DP("hash key: %u", id);
3226 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3227 + if (*elem == *hash_ip)
3228 + return id;
3229 + /* No shortcut at testing - there can be deleted
3230 + * entries. */
3231 + }
3232 + return UINT_MAX;
3233 +}
3234 +
3235 +static inline int
3236 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3237 +{
3238 + return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
3239 +}
3240 +
3241 +static int
3242 +testip(struct ip_set *set, const void *data, size_t size,
3243 + ip_set_ip_t *hash_ip)
3244 +{
3245 + struct ip_set_req_iphash *req =
3246 + (struct ip_set_req_iphash *) data;
3247 +
3248 + if (size != sizeof(struct ip_set_req_iphash)) {
3249 + ip_set_printk("data length wrong (want %zu, have %zu)",
3250 + sizeof(struct ip_set_req_iphash),
3251 + size);
3252 + return -EINVAL;
3253 + }
3254 + return __testip(set, req->ip, hash_ip);
3255 +}
3256 +
3257 +static int
3258 +testip_kernel(struct ip_set *set,
3259 + const struct sk_buff *skb,
3260 + ip_set_ip_t *hash_ip,
3261 + const u_int32_t *flags,
3262 + unsigned char index)
3263 +{
3264 + return __testip(set,
3265 + ntohl(flags[index] & IPSET_SRC
3266 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3267 + ? ip_hdr(skb)->saddr
3268 + : ip_hdr(skb)->daddr),
3269 +#else
3270 + ? skb->nh.iph->saddr
3271 + : skb->nh.iph->daddr),
3272 +#endif
3273 + hash_ip);
3274 +}
3275 +
3276 +static inline int
3277 +__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3278 +{
3279 + __u32 probe;
3280 + u_int16_t i;
3281 + ip_set_ip_t *elem;
3282 +
3283 + if (!ip || map->elements >= limit)
3284 + return -ERANGE;
3285 +
3286 + *hash_ip = ip & map->netmask;
3287 +
3288 + for (i = 0; i < map->probes; i++) {
3289 + probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
3290 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
3291 + if (*elem == *hash_ip)
3292 + return -EEXIST;
3293 + if (!*elem) {
3294 + *elem = *hash_ip;
3295 + map->elements++;
3296 + return 0;
3297 + }
3298 + }
3299 + /* Trigger rehashing */
3300 + return -EAGAIN;
3301 +}
3302 +
3303 +static int
3304 +addip(struct ip_set *set, const void *data, size_t size,
3305 + ip_set_ip_t *hash_ip)
3306 +{
3307 + struct ip_set_req_iphash *req =
3308 + (struct ip_set_req_iphash *) data;
3309 +
3310 + if (size != sizeof(struct ip_set_req_iphash)) {
3311 + ip_set_printk("data length wrong (want %zu, have %zu)",
3312 + sizeof(struct ip_set_req_iphash),
3313 + size);
3314 + return -EINVAL;
3315 + }
3316 + return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
3317 +}
3318 +
3319 +static int
3320 +addip_kernel(struct ip_set *set,
3321 + const struct sk_buff *skb,
3322 + ip_set_ip_t *hash_ip,
3323 + const u_int32_t *flags,
3324 + unsigned char index)
3325 +{
3326 + return __addip((struct ip_set_iphash *) set->data,
3327 + ntohl(flags[index] & IPSET_SRC
3328 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3329 + ? ip_hdr(skb)->saddr
3330 + : ip_hdr(skb)->daddr),
3331 +#else
3332 + ? skb->nh.iph->saddr
3333 + : skb->nh.iph->daddr),
3334 +#endif
3335 + hash_ip);
3336 +}
3337 +
3338 +static int retry(struct ip_set *set)
3339 +{
3340 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3341 + ip_set_ip_t hash_ip, *elem;
3342 + void *members;
3343 + u_int32_t i, hashsize = map->hashsize;
3344 + int res;
3345 + struct ip_set_iphash *tmp;
3346 +
3347 + if (map->resize == 0)
3348 + return -ERANGE;
3349 +
3350 + again:
3351 + res = 0;
3352 +
3353 + /* Calculate new hash size */
3354 + hashsize += (hashsize * map->resize)/100;
3355 + if (hashsize == map->hashsize)
3356 + hashsize++;
3357 +
3358 + ip_set_printk("rehashing of set %s triggered: "
3359 + "hashsize grows from %u to %u",
3360 + set->name, map->hashsize, hashsize);
3361 +
3362 + tmp = kmalloc(sizeof(struct ip_set_iphash)
3363 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
3364 + if (!tmp) {
3365 + DP("out of memory for %d bytes",
3366 + sizeof(struct ip_set_iphash)
3367 + + map->probes * sizeof(uint32_t));
3368 + return -ENOMEM;
3369 + }
3370 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
3371 + if (!tmp->members) {
3372 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
3373 + kfree(tmp);
3374 + return -ENOMEM;
3375 + }
3376 + tmp->hashsize = hashsize;
3377 + tmp->elements = 0;
3378 + tmp->probes = map->probes;
3379 + tmp->resize = map->resize;
3380 + tmp->netmask = map->netmask;
3381 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
3382 +
3383 + write_lock_bh(&set->lock);
3384 + map = (struct ip_set_iphash *) set->data; /* Play safe */
3385 + for (i = 0; i < map->hashsize && res == 0; i++) {
3386 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3387 + if (*elem)
3388 + res = __addip(tmp, *elem, &hash_ip);
3389 + }
3390 + if (res) {
3391 + /* Failure, try again */
3392 + write_unlock_bh(&set->lock);
3393 + harray_free(tmp->members);
3394 + kfree(tmp);
3395 + goto again;
3396 + }
3397 +
3398 + /* Success at resizing! */
3399 + members = map->members;
3400 +
3401 + map->hashsize = tmp->hashsize;
3402 + map->members = tmp->members;
3403 + write_unlock_bh(&set->lock);
3404 +
3405 + harray_free(members);
3406 + kfree(tmp);
3407 +
3408 + return 0;
3409 +}
3410 +
3411 +static inline int
3412 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3413 +{
3414 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3415 + ip_set_ip_t id, *elem;
3416 +
3417 + if (!ip)
3418 + return -ERANGE;
3419 +
3420 + id = hash_id(set, ip, hash_ip);
3421 + if (id == UINT_MAX)
3422 + return -EEXIST;
3423 +
3424 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3425 + *elem = 0;
3426 + map->elements--;
3427 +
3428 + return 0;
3429 +}
3430 +
3431 +static int
3432 +delip(struct ip_set *set, const void *data, size_t size,
3433 + ip_set_ip_t *hash_ip)
3434 +{
3435 + struct ip_set_req_iphash *req =
3436 + (struct ip_set_req_iphash *) data;
3437 +
3438 + if (size != sizeof(struct ip_set_req_iphash)) {
3439 + ip_set_printk("data length wrong (want %zu, have %zu)",
3440 + sizeof(struct ip_set_req_iphash),
3441 + size);
3442 + return -EINVAL;
3443 + }
3444 + return __delip(set, req->ip, hash_ip);
3445 +}
3446 +
3447 +static int
3448 +delip_kernel(struct ip_set *set,
3449 + const struct sk_buff *skb,
3450 + ip_set_ip_t *hash_ip,
3451 + const u_int32_t *flags,
3452 + unsigned char index)
3453 +{
3454 + return __delip(set,
3455 + ntohl(flags[index] & IPSET_SRC
3456 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3457 + ? ip_hdr(skb)->saddr
3458 + : ip_hdr(skb)->daddr),
3459 +#else
3460 + ? skb->nh.iph->saddr
3461 + : skb->nh.iph->daddr),
3462 +#endif
3463 + hash_ip);
3464 +}
3465 +
3466 +static int create(struct ip_set *set, const void *data, size_t size)
3467 +{
3468 + struct ip_set_req_iphash_create *req =
3469 + (struct ip_set_req_iphash_create *) data;
3470 + struct ip_set_iphash *map;
3471 + uint16_t i;
3472 +
3473 + if (size != sizeof(struct ip_set_req_iphash_create)) {
3474 + ip_set_printk("data length wrong (want %zu, have %zu)",
3475 + sizeof(struct ip_set_req_iphash_create),
3476 + size);
3477 + return -EINVAL;
3478 + }
3479 +
3480 + if (req->hashsize < 1) {
3481 + ip_set_printk("hashsize too small");
3482 + return -ENOEXEC;
3483 + }
3484 +
3485 + if (req->probes < 1) {
3486 + ip_set_printk("probes too small");
3487 + return -ENOEXEC;
3488 + }
3489 +
3490 + map = kmalloc(sizeof(struct ip_set_iphash)
3491 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
3492 + if (!map) {
3493 + DP("out of memory for %d bytes",
3494 + sizeof(struct ip_set_iphash)
3495 + + req->probes * sizeof(uint32_t));
3496 + return -ENOMEM;
3497 + }
3498 + for (i = 0; i < req->probes; i++)
3499 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
3500 + map->elements = 0;
3501 + map->hashsize = req->hashsize;
3502 + map->probes = req->probes;
3503 + map->resize = req->resize;
3504 + map->netmask = req->netmask;
3505 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
3506 + if (!map->members) {
3507 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
3508 + kfree(map);
3509 + return -ENOMEM;
3510 + }
3511 +
3512 + set->data = map;
3513 + return 0;
3514 +}
3515 +
3516 +static void destroy(struct ip_set *set)
3517 +{
3518 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3519 +
3520 + harray_free(map->members);
3521 + kfree(map);
3522 +
3523 + set->data = NULL;
3524 +}
3525 +
3526 +static void flush(struct ip_set *set)
3527 +{
3528 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3529 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
3530 + map->elements = 0;
3531 +}
3532 +
3533 +static void list_header(const struct ip_set *set, void *data)
3534 +{
3535 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3536 + struct ip_set_req_iphash_create *header =
3537 + (struct ip_set_req_iphash_create *) data;
3538 +
3539 + header->hashsize = map->hashsize;
3540 + header->probes = map->probes;
3541 + header->resize = map->resize;
3542 + header->netmask = map->netmask;
3543 +}
3544 +
3545 +static int list_members_size(const struct ip_set *set)
3546 +{
3547 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3548 +
3549 + return (map->hashsize * sizeof(ip_set_ip_t));
3550 +}
3551 +
3552 +static void list_members(const struct ip_set *set, void *data)
3553 +{
3554 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3555 + ip_set_ip_t i, *elem;
3556 +
3557 + for (i = 0; i < map->hashsize; i++) {
3558 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3559 + ((ip_set_ip_t *)data)[i] = *elem;
3560 + }
3561 +}
3562 +
3563 +static struct ip_set_type ip_set_iphash = {
3564 + .typename = SETTYPE_NAME,
3565 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3566 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3567 + .create = &create,
3568 + .destroy = &destroy,
3569 + .flush = &flush,
3570 + .reqsize = sizeof(struct ip_set_req_iphash),
3571 + .addip = &addip,
3572 + .addip_kernel = &addip_kernel,
3573 + .retry = &retry,
3574 + .delip = &delip,
3575 + .delip_kernel = &delip_kernel,
3576 + .testip = &testip,
3577 + .testip_kernel = &testip_kernel,
3578 + .header_size = sizeof(struct ip_set_req_iphash_create),
3579 + .list_header = &list_header,
3580 + .list_members_size = &list_members_size,
3581 + .list_members = &list_members,
3582 + .me = THIS_MODULE,
3583 +};
3584 +
3585 +MODULE_LICENSE("GPL");
3586 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3587 +MODULE_DESCRIPTION("iphash type of IP sets");
3588 +module_param(limit, int, 0600);
3589 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
3590 +
3591 +static int __init ip_set_iphash_init(void)
3592 +{
3593 + return ip_set_register_set_type(&ip_set_iphash);
3594 +}
3595 +
3596 +static void __exit ip_set_iphash_fini(void)
3597 +{
3598 + /* FIXME: possible race with ip_set_create() */
3599 + ip_set_unregister_set_type(&ip_set_iphash);
3600 +}
3601 +
3602 +module_init(ip_set_iphash_init);
3603 +module_exit(ip_set_iphash_fini);
3604 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_ipmap.c
3605 ===================================================================
3606 --- /dev/null
3607 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_ipmap.c
3608 @@ -0,0 +1,336 @@
3609 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3610 + * Patrick Schaaf <bof@bof.de>
3611 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3612 + *
3613 + * This program is free software; you can redistribute it and/or modify
3614 + * it under the terms of the GNU General Public License version 2 as
3615 + * published by the Free Software Foundation.
3616 + */
3617 +
3618 +/* Kernel module implementing an IP set type: the single bitmap type */
3619 +
3620 +#include <linux/module.h>
3621 +#include <linux/ip.h>
3622 +#include <linux/skbuff.h>
3623 +#include <linux/version.h>
3624 +#include <linux/netfilter_ipv4/ip_tables.h>
3625 +#include <linux/netfilter_ipv4/ip_set.h>
3626 +#include <linux/errno.h>
3627 +#include <asm/uaccess.h>
3628 +#include <asm/bitops.h>
3629 +#include <linux/spinlock.h>
3630 +
3631 +#include <linux/netfilter_ipv4/ip_set_ipmap.h>
3632 +
3633 +static inline ip_set_ip_t
3634 +ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
3635 +{
3636 + return (ip - map->first_ip)/map->hosts;
3637 +}
3638 +
3639 +static inline int
3640 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3641 +{
3642 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3643 +
3644 + if (ip < map->first_ip || ip > map->last_ip)
3645 + return -ERANGE;
3646 +
3647 + *hash_ip = ip & map->netmask;
3648 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
3649 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
3650 + return !!test_bit(ip_to_id(map, *hash_ip), map->members);
3651 +}
3652 +
3653 +static int
3654 +testip(struct ip_set *set, const void *data, size_t size,
3655 + ip_set_ip_t *hash_ip)
3656 +{
3657 + struct ip_set_req_ipmap *req =
3658 + (struct ip_set_req_ipmap *) data;
3659 +
3660 + if (size != sizeof(struct ip_set_req_ipmap)) {
3661 + ip_set_printk("data length wrong (want %zu, have %zu)",
3662 + sizeof(struct ip_set_req_ipmap),
3663 + size);
3664 + return -EINVAL;
3665 + }
3666 + return __testip(set, req->ip, hash_ip);
3667 +}
3668 +
3669 +static int
3670 +testip_kernel(struct ip_set *set,
3671 + const struct sk_buff *skb,
3672 + ip_set_ip_t *hash_ip,
3673 + const u_int32_t *flags,
3674 + unsigned char index)
3675 +{
3676 + int res = __testip(set,
3677 + ntohl(flags[index] & IPSET_SRC
3678 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3679 + ? ip_hdr(skb)->saddr
3680 + : ip_hdr(skb)->daddr),
3681 +#else
3682 + ? skb->nh.iph->saddr
3683 + : skb->nh.iph->daddr),
3684 +#endif
3685 + hash_ip);
3686 + return (res < 0 ? 0 : res);
3687 +}
3688 +
3689 +static inline int
3690 +__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3691 +{
3692 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3693 +
3694 + if (ip < map->first_ip || ip > map->last_ip)
3695 + return -ERANGE;
3696 +
3697 + *hash_ip = ip & map->netmask;
3698 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3699 + if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
3700 + return -EEXIST;
3701 +
3702 + return 0;
3703 +}
3704 +
3705 +static int
3706 +addip(struct ip_set *set, const void *data, size_t size,
3707 + ip_set_ip_t *hash_ip)
3708 +{
3709 + struct ip_set_req_ipmap *req =
3710 + (struct ip_set_req_ipmap *) data;
3711 +
3712 + if (size != sizeof(struct ip_set_req_ipmap)) {
3713 + ip_set_printk("data length wrong (want %zu, have %zu)",
3714 + sizeof(struct ip_set_req_ipmap),
3715 + size);
3716 + return -EINVAL;
3717 + }
3718 + DP("%u.%u.%u.%u", HIPQUAD(req->ip));
3719 + return __addip(set, req->ip, hash_ip);
3720 +}
3721 +
3722 +static int
3723 +addip_kernel(struct ip_set *set,
3724 + const struct sk_buff *skb,
3725 + ip_set_ip_t *hash_ip,
3726 + const u_int32_t *flags,
3727 + unsigned char index)
3728 +{
3729 + return __addip(set,
3730 + ntohl(flags[index] & IPSET_SRC
3731 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3732 + ? ip_hdr(skb)->saddr
3733 + : ip_hdr(skb)->daddr),
3734 +#else
3735 + ? skb->nh.iph->saddr
3736 + : skb->nh.iph->daddr),
3737 +#endif
3738 + hash_ip);
3739 +}
3740 +
3741 +static inline int
3742 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3743 +{
3744 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3745 +
3746 + if (ip < map->first_ip || ip > map->last_ip)
3747 + return -ERANGE;
3748 +
3749 + *hash_ip = ip & map->netmask;
3750 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3751 + if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
3752 + return -EEXIST;
3753 +
3754 + return 0;
3755 +}
3756 +
3757 +static int
3758 +delip(struct ip_set *set, const void *data, size_t size,
3759 + ip_set_ip_t *hash_ip)
3760 +{
3761 + struct ip_set_req_ipmap *req =
3762 + (struct ip_set_req_ipmap *) data;
3763 +
3764 + if (size != sizeof(struct ip_set_req_ipmap)) {
3765 + ip_set_printk("data length wrong (want %zu, have %zu)",
3766 + sizeof(struct ip_set_req_ipmap),
3767 + size);
3768 + return -EINVAL;
3769 + }
3770 + return __delip(set, req->ip, hash_ip);
3771 +}
3772 +
3773 +static int
3774 +delip_kernel(struct ip_set *set,
3775 + const struct sk_buff *skb,
3776 + ip_set_ip_t *hash_ip,
3777 + const u_int32_t *flags,
3778 + unsigned char index)
3779 +{
3780 + return __delip(set,
3781 + ntohl(flags[index] & IPSET_SRC
3782 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3783 + ? ip_hdr(skb)->saddr
3784 + : ip_hdr(skb)->daddr),
3785 +#else
3786 + ? skb->nh.iph->saddr
3787 + : skb->nh.iph->daddr),
3788 +#endif
3789 + hash_ip);
3790 +}
3791 +
3792 +static int create(struct ip_set *set, const void *data, size_t size)
3793 +{
3794 + int newbytes;
3795 + struct ip_set_req_ipmap_create *req =
3796 + (struct ip_set_req_ipmap_create *) data;
3797 + struct ip_set_ipmap *map;
3798 +
3799 + if (size != sizeof(struct ip_set_req_ipmap_create)) {
3800 + ip_set_printk("data length wrong (want %zu, have %zu)",
3801 + sizeof(struct ip_set_req_ipmap_create),
3802 + size);
3803 + return -EINVAL;
3804 + }
3805 +
3806 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
3807 + HIPQUAD(req->from), HIPQUAD(req->to));
3808 +
3809 + if (req->from > req->to) {
3810 + DP("bad ip range");
3811 + return -ENOEXEC;
3812 + }
3813 +
3814 + map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
3815 + if (!map) {
3816 + DP("out of memory for %d bytes",
3817 + sizeof(struct ip_set_ipmap));
3818 + return -ENOMEM;
3819 + }
3820 + map->first_ip = req->from;
3821 + map->last_ip = req->to;
3822 + map->netmask = req->netmask;
3823 +
3824 + if (req->netmask == 0xFFFFFFFF) {
3825 + map->hosts = 1;
3826 + map->sizeid = map->last_ip - map->first_ip + 1;
3827 + } else {
3828 + unsigned int mask_bits, netmask_bits;
3829 + ip_set_ip_t mask;
3830 +
3831 + map->first_ip &= map->netmask; /* Should we better bark? */
3832 +
3833 + mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
3834 + netmask_bits = mask_to_bits(map->netmask);
3835 +
3836 + if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
3837 + || netmask_bits <= mask_bits)
3838 + return -ENOEXEC;
3839 +
3840 + DP("mask_bits %u, netmask_bits %u",
3841 + mask_bits, netmask_bits);
3842 + map->hosts = 2 << (32 - netmask_bits - 1);
3843 + map->sizeid = 2 << (netmask_bits - mask_bits - 1);
3844 + }
3845 + if (map->sizeid > MAX_RANGE + 1) {
3846 + ip_set_printk("range too big (max %d addresses)",
3847 + MAX_RANGE+1);
3848 + kfree(map);
3849 + return -ENOEXEC;
3850 + }
3851 + DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
3852 + newbytes = bitmap_bytes(0, map->sizeid - 1);
3853 + map->members = kmalloc(newbytes, GFP_KERNEL);
3854 + if (!map->members) {
3855 + DP("out of memory for %d bytes", newbytes);
3856 + kfree(map);
3857 + return -ENOMEM;
3858 + }
3859 + memset(map->members, 0, newbytes);
3860 +
3861 + set->data = map;
3862 + return 0;
3863 +}
3864 +
3865 +static void destroy(struct ip_set *set)
3866 +{
3867 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3868 +
3869 + kfree(map->members);
3870 + kfree(map);
3871 +
3872 + set->data = NULL;
3873 +}
3874 +
3875 +static void flush(struct ip_set *set)
3876 +{
3877 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3878 + memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
3879 +}
3880 +
3881 +static void list_header(const struct ip_set *set, void *data)
3882 +{
3883 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3884 + struct ip_set_req_ipmap_create *header =
3885 + (struct ip_set_req_ipmap_create *) data;
3886 +
3887 + header->from = map->first_ip;
3888 + header->to = map->last_ip;
3889 + header->netmask = map->netmask;
3890 +}
3891 +
3892 +static int list_members_size(const struct ip_set *set)
3893 +{
3894 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3895 +
3896 + return bitmap_bytes(0, map->sizeid - 1);
3897 +}
3898 +
3899 +static void list_members(const struct ip_set *set, void *data)
3900 +{
3901 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3902 + int bytes = bitmap_bytes(0, map->sizeid - 1);
3903 +
3904 + memcpy(data, map->members, bytes);
3905 +}
3906 +
3907 +static struct ip_set_type ip_set_ipmap = {
3908 + .typename = SETTYPE_NAME,
3909 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3910 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3911 + .create = &create,
3912 + .destroy = &destroy,
3913 + .flush = &flush,
3914 + .reqsize = sizeof(struct ip_set_req_ipmap),
3915 + .addip = &addip,
3916 + .addip_kernel = &addip_kernel,
3917 + .delip = &delip,
3918 + .delip_kernel = &delip_kernel,
3919 + .testip = &testip,
3920 + .testip_kernel = &testip_kernel,
3921 + .header_size = sizeof(struct ip_set_req_ipmap_create),
3922 + .list_header = &list_header,
3923 + .list_members_size = &list_members_size,
3924 + .list_members = &list_members,
3925 + .me = THIS_MODULE,
3926 +};
3927 +
3928 +MODULE_LICENSE("GPL");
3929 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3930 +MODULE_DESCRIPTION("ipmap type of IP sets");
3931 +
3932 +static int __init ip_set_ipmap_init(void)
3933 +{
3934 + return ip_set_register_set_type(&ip_set_ipmap);
3935 +}
3936 +
3937 +static void __exit ip_set_ipmap_fini(void)
3938 +{
3939 + /* FIXME: possible race with ip_set_create() */
3940 + ip_set_unregister_set_type(&ip_set_ipmap);
3941 +}
3942 +
3943 +module_init(ip_set_ipmap_init);
3944 +module_exit(ip_set_ipmap_fini);
3945 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_ipporthash.c
3946 ===================================================================
3947 --- /dev/null
3948 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_ipporthash.c
3949 @@ -0,0 +1,581 @@
3950 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3951 + *
3952 + * This program is free software; you can redistribute it and/or modify
3953 + * it under the terms of the GNU General Public License version 2 as
3954 + * published by the Free Software Foundation.
3955 + */
3956 +
3957 +/* Kernel module implementing an ip+port hash set */
3958 +
3959 +#include <linux/module.h>
3960 +#include <linux/ip.h>
3961 +#include <linux/tcp.h>
3962 +#include <linux/udp.h>
3963 +#include <linux/skbuff.h>
3964 +#include <linux/version.h>
3965 +#include <linux/jhash.h>
3966 +#include <linux/netfilter_ipv4/ip_tables.h>
3967 +#include <linux/netfilter_ipv4/ip_set.h>
3968 +#include <linux/errno.h>
3969 +#include <asm/uaccess.h>
3970 +#include <asm/bitops.h>
3971 +#include <linux/spinlock.h>
3972 +#include <linux/vmalloc.h>
3973 +#include <linux/random.h>
3974 +
3975 +#include <net/ip.h>
3976 +
3977 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3978 +#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
3979 +
3980 +static int limit = MAX_RANGE;
3981 +
3982 +/* We must handle non-linear skbs */
3983 +static inline ip_set_ip_t
3984 +get_port(const struct sk_buff *skb, u_int32_t flags)
3985 +{
3986 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3987 + struct iphdr *iph = ip_hdr(skb);
3988 +#else
3989 + struct iphdr *iph = skb->nh.iph;
3990 +#endif
3991 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
3992 +
3993 + switch (iph->protocol) {
3994 + case IPPROTO_TCP: {
3995 + struct tcphdr tcph;
3996 +
3997 + /* See comments at tcp_match in ip_tables.c */
3998 + if (offset)
3999 + return INVALID_PORT;
4000 +
4001 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4002 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
4003 +#else
4004 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
4005 +#endif
4006 + /* No choice either */
4007 + return INVALID_PORT;
4008 +
4009 + return ntohs(flags & IPSET_SRC ?
4010 + tcph.source : tcph.dest);
4011 + }
4012 + case IPPROTO_UDP: {
4013 + struct udphdr udph;
4014 +
4015 + if (offset)
4016 + return INVALID_PORT;
4017 +
4018 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4019 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
4020 +#else
4021 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
4022 +#endif
4023 + /* No choice either */
4024 + return INVALID_PORT;
4025 +
4026 + return ntohs(flags & IPSET_SRC ?
4027 + udph.source : udph.dest);
4028 + }
4029 + default:
4030 + return INVALID_PORT;
4031 + }
4032 +}
4033 +
4034 +static inline __u32
4035 +jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
4036 +{
4037 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
4038 +}
4039 +
4040 +#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
4041 +
4042 +static inline __u32
4043 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4044 + ip_set_ip_t *hash_ip)
4045 +{
4046 + struct ip_set_ipporthash *map =
4047 + (struct ip_set_ipporthash *) set->data;
4048 + __u32 id;
4049 + u_int16_t i;
4050 + ip_set_ip_t *elem;
4051 +
4052 + *hash_ip = HASH_IP(map, ip, port);
4053 + DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4054 + set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4055 +
4056 + for (i = 0; i < map->probes; i++) {
4057 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
4058 + DP("hash key: %u", id);
4059 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4060 + if (*elem == *hash_ip)
4061 + return id;
4062 + /* No shortcut at testing - there can be deleted
4063 + * entries. */
4064 + }
4065 + return UINT_MAX;
4066 +}
4067 +
4068 +static inline int
4069 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4070 + ip_set_ip_t *hash_ip)
4071 +{
4072 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4073 +
4074 + if (ip < map->first_ip || ip > map->last_ip)
4075 + return -ERANGE;
4076 +
4077 + return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
4078 +}
4079 +
4080 +static int
4081 +testip(struct ip_set *set, const void *data, size_t size,
4082 + ip_set_ip_t *hash_ip)
4083 +{
4084 + struct ip_set_req_ipporthash *req =
4085 + (struct ip_set_req_ipporthash *) data;
4086 +
4087 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4088 + ip_set_printk("data length wrong (want %zu, have %zu)",
4089 + sizeof(struct ip_set_req_ipporthash),
4090 + size);
4091 + return -EINVAL;
4092 + }
4093 + return __testip(set, req->ip, req->port, hash_ip);
4094 +}
4095 +
4096 +static int
4097 +testip_kernel(struct ip_set *set,
4098 + const struct sk_buff *skb,
4099 + ip_set_ip_t *hash_ip,
4100 + const u_int32_t *flags,
4101 + unsigned char index)
4102 +{
4103 + ip_set_ip_t port;
4104 + int res;
4105 +
4106 + if (flags[index+1] == 0)
4107 + return 0;
4108 +
4109 + port = get_port(skb, flags[index+1]);
4110 +
4111 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4112 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4113 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4114 + NIPQUAD(ip_hdr(skb)->saddr),
4115 + NIPQUAD(ip_hdr(skb)->daddr));
4116 +#else
4117 + NIPQUAD(skb->nh.iph->saddr),
4118 + NIPQUAD(skb->nh.iph->daddr));
4119 +#endif
4120 + DP("flag %s port %u",
4121 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4122 + port);
4123 + if (port == INVALID_PORT)
4124 + return 0;
4125 +
4126 + res = __testip(set,
4127 + ntohl(flags[index] & IPSET_SRC
4128 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4129 + ? ip_hdr(skb)->saddr
4130 + : ip_hdr(skb)->daddr),
4131 +#else
4132 + ? skb->nh.iph->saddr
4133 + : skb->nh.iph->daddr),
4134 +#endif
4135 + port,
4136 + hash_ip);
4137 + return (res < 0 ? 0 : res);
4138 +
4139 +}
4140 +
4141 +static inline int
4142 +__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
4143 +{
4144 + __u32 probe;
4145 + u_int16_t i;
4146 + ip_set_ip_t *elem;
4147 +
4148 + for (i = 0; i < map->probes; i++) {
4149 + probe = jhash_ip(map, i, hash_ip) % map->hashsize;
4150 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4151 + if (*elem == hash_ip)
4152 + return -EEXIST;
4153 + if (!*elem) {
4154 + *elem = hash_ip;
4155 + map->elements++;
4156 + return 0;
4157 + }
4158 + }
4159 + /* Trigger rehashing */
4160 + return -EAGAIN;
4161 +}
4162 +
4163 +static inline int
4164 +__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
4165 + ip_set_ip_t *hash_ip)
4166 +{
4167 + if (map->elements > limit)
4168 + return -ERANGE;
4169 + if (ip < map->first_ip || ip > map->last_ip)
4170 + return -ERANGE;
4171 +
4172 + *hash_ip = HASH_IP(map, ip, port);
4173 +
4174 + return __add_haship(map, *hash_ip);
4175 +}
4176 +
4177 +static int
4178 +addip(struct ip_set *set, const void *data, size_t size,
4179 + ip_set_ip_t *hash_ip)
4180 +{
4181 + struct ip_set_req_ipporthash *req =
4182 + (struct ip_set_req_ipporthash *) data;
4183 +
4184 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4185 + ip_set_printk("data length wrong (want %zu, have %zu)",
4186 + sizeof(struct ip_set_req_ipporthash),
4187 + size);
4188 + return -EINVAL;
4189 + }
4190 + return __addip((struct ip_set_ipporthash *) set->data,
4191 + req->ip, req->port, hash_ip);
4192 +}
4193 +
4194 +static int
4195 +addip_kernel(struct ip_set *set,
4196 + const struct sk_buff *skb,
4197 + ip_set_ip_t *hash_ip,
4198 + const u_int32_t *flags,
4199 + unsigned char index)
4200 +{
4201 + ip_set_ip_t port;
4202 +
4203 + if (flags[index+1] == 0)
4204 + return -EINVAL;
4205 +
4206 + port = get_port(skb, flags[index+1]);
4207 +
4208 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4209 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4210 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4211 + NIPQUAD(ip_hdr(skb)->saddr),
4212 + NIPQUAD(ip_hdr(skb)->daddr));
4213 +#else
4214 + NIPQUAD(skb->nh.iph->saddr),
4215 + NIPQUAD(skb->nh.iph->daddr));
4216 +#endif
4217 + DP("flag %s port %u",
4218 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4219 + port);
4220 + if (port == INVALID_PORT)
4221 + return -EINVAL;
4222 +
4223 + return __addip((struct ip_set_ipporthash *) set->data,
4224 + ntohl(flags[index] & IPSET_SRC
4225 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4226 + ? ip_hdr(skb)->saddr
4227 + : ip_hdr(skb)->daddr),
4228 +#else
4229 + ? skb->nh.iph->saddr
4230 + : skb->nh.iph->daddr),
4231 +#endif
4232 + port,
4233 + hash_ip);
4234 +}
4235 +
4236 +static int retry(struct ip_set *set)
4237 +{
4238 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4239 + ip_set_ip_t *elem;
4240 + void *members;
4241 + u_int32_t i, hashsize = map->hashsize;
4242 + int res;
4243 + struct ip_set_ipporthash *tmp;
4244 +
4245 + if (map->resize == 0)
4246 + return -ERANGE;
4247 +
4248 + again:
4249 + res = 0;
4250 +
4251 + /* Calculate new hash size */
4252 + hashsize += (hashsize * map->resize)/100;
4253 + if (hashsize == map->hashsize)
4254 + hashsize++;
4255 +
4256 + ip_set_printk("rehashing of set %s triggered: "
4257 + "hashsize grows from %u to %u",
4258 + set->name, map->hashsize, hashsize);
4259 +
4260 + tmp = kmalloc(sizeof(struct ip_set_ipporthash)
4261 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
4262 + if (!tmp) {
4263 + DP("out of memory for %d bytes",
4264 + sizeof(struct ip_set_ipporthash)
4265 + + map->probes * sizeof(uint32_t));
4266 + return -ENOMEM;
4267 + }
4268 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
4269 + if (!tmp->members) {
4270 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
4271 + kfree(tmp);
4272 + return -ENOMEM;
4273 + }
4274 + tmp->hashsize = hashsize;
4275 + tmp->elements = 0;
4276 + tmp->probes = map->probes;
4277 + tmp->resize = map->resize;
4278 + tmp->first_ip = map->first_ip;
4279 + tmp->last_ip = map->last_ip;
4280 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
4281 +
4282 + write_lock_bh(&set->lock);
4283 + map = (struct ip_set_ipporthash *) set->data; /* Play safe */
4284 + for (i = 0; i < map->hashsize && res == 0; i++) {
4285 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4286 + if (*elem)
4287 + res = __add_haship(tmp, *elem);
4288 + }
4289 + if (res) {
4290 + /* Failure, try again */
4291 + write_unlock_bh(&set->lock);
4292 + harray_free(tmp->members);
4293 + kfree(tmp);
4294 + goto again;
4295 + }
4296 +
4297 + /* Success at resizing! */
4298 + members = map->members;
4299 +
4300 + map->hashsize = tmp->hashsize;
4301 + map->members = tmp->members;
4302 + write_unlock_bh(&set->lock);
4303 +
4304 + harray_free(members);
4305 + kfree(tmp);
4306 +
4307 + return 0;
4308 +}
4309 +
4310 +static inline int
4311 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4312 + ip_set_ip_t *hash_ip)
4313 +{
4314 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4315 + ip_set_ip_t id;
4316 + ip_set_ip_t *elem;
4317 +
4318 + if (ip < map->first_ip || ip > map->last_ip)
4319 + return -ERANGE;
4320 +
4321 + id = hash_id(set, ip, port, hash_ip);
4322 +
4323 + if (id == UINT_MAX)
4324 + return -EEXIST;
4325 +
4326 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4327 + *elem = 0;
4328 + map->elements--;
4329 +
4330 + return 0;
4331 +}
4332 +
4333 +static int
4334 +delip(struct ip_set *set, const void *data, size_t size,
4335 + ip_set_ip_t *hash_ip)
4336 +{
4337 + struct ip_set_req_ipporthash *req =
4338 + (struct ip_set_req_ipporthash *) data;
4339 +
4340 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4341 + ip_set_printk("data length wrong (want %zu, have %zu)",
4342 + sizeof(struct ip_set_req_ipporthash),
4343 + size);
4344 + return -EINVAL;
4345 + }
4346 + return __delip(set, req->ip, req->port, hash_ip);
4347 +}
4348 +
4349 +static int
4350 +delip_kernel(struct ip_set *set,
4351 + const struct sk_buff *skb,
4352 + ip_set_ip_t *hash_ip,
4353 + const u_int32_t *flags,
4354 + unsigned char index)
4355 +{
4356 + ip_set_ip_t port;
4357 +
4358 + if (flags[index+1] == 0)
4359 + return -EINVAL;
4360 +
4361 + port = get_port(skb, flags[index+1]);
4362 +
4363 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4364 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4365 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4366 + NIPQUAD(ip_hdr(skb)->saddr),
4367 + NIPQUAD(ip_hdr(skb)->daddr));
4368 +#else
4369 + NIPQUAD(skb->nh.iph->saddr),
4370 + NIPQUAD(skb->nh.iph->daddr));
4371 +#endif
4372 + DP("flag %s port %u",
4373 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4374 + port);
4375 + if (port == INVALID_PORT)
4376 + return -EINVAL;
4377 +
4378 + return __delip(set,
4379 + ntohl(flags[index] & IPSET_SRC
4380 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4381 + ? ip_hdr(skb)->saddr
4382 + : ip_hdr(skb)->daddr),
4383 +#else
4384 + ? skb->nh.iph->saddr
4385 + : skb->nh.iph->daddr),
4386 +#endif
4387 + port,
4388 + hash_ip);
4389 +}
4390 +
4391 +static int create(struct ip_set *set, const void *data, size_t size)
4392 +{
4393 + struct ip_set_req_ipporthash_create *req =
4394 + (struct ip_set_req_ipporthash_create *) data;
4395 + struct ip_set_ipporthash *map;
4396 + uint16_t i;
4397 +
4398 + if (size != sizeof(struct ip_set_req_ipporthash_create)) {
4399 + ip_set_printk("data length wrong (want %zu, have %zu)",
4400 + sizeof(struct ip_set_req_ipporthash_create),
4401 + size);
4402 + return -EINVAL;
4403 + }
4404 +
4405 + if (req->hashsize < 1) {
4406 + ip_set_printk("hashsize too small");
4407 + return -ENOEXEC;
4408 + }
4409 +
4410 + if (req->probes < 1) {
4411 + ip_set_printk("probes too small");
4412 + return -ENOEXEC;
4413 + }
4414 +
4415 + map = kmalloc(sizeof(struct ip_set_ipporthash)
4416 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
4417 + if (!map) {
4418 + DP("out of memory for %d bytes",
4419 + sizeof(struct ip_set_ipporthash)
4420 + + req->probes * sizeof(uint32_t));
4421 + return -ENOMEM;
4422 + }
4423 + for (i = 0; i < req->probes; i++)
4424 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
4425 + map->elements = 0;
4426 + map->hashsize = req->hashsize;
4427 + map->probes = req->probes;
4428 + map->resize = req->resize;
4429 + map->first_ip = req->from;
4430 + map->last_ip = req->to;
4431 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
4432 + if (!map->members) {
4433 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
4434 + kfree(map);
4435 + return -ENOMEM;
4436 + }
4437 +
4438 + set->data = map;
4439 + return 0;
4440 +}
4441 +
4442 +static void destroy(struct ip_set *set)
4443 +{
4444 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4445 +
4446 + harray_free(map->members);
4447 + kfree(map);
4448 +
4449 + set->data = NULL;
4450 +}
4451 +
4452 +static void flush(struct ip_set *set)
4453 +{
4454 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4455 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
4456 + map->elements = 0;
4457 +}
4458 +
4459 +static void list_header(const struct ip_set *set, void *data)
4460 +{
4461 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4462 + struct ip_set_req_ipporthash_create *header =
4463 + (struct ip_set_req_ipporthash_create *) data;
4464 +
4465 + header->hashsize = map->hashsize;
4466 + header->probes = map->probes;
4467 + header->resize = map->resize;
4468 + header->from = map->first_ip;
4469 + header->to = map->last_ip;
4470 +}
4471 +
4472 +static int list_members_size(const struct ip_set *set)
4473 +{
4474 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4475 +
4476 + return (map->hashsize * sizeof(ip_set_ip_t));
4477 +}
4478 +
4479 +static void list_members(const struct ip_set *set, void *data)
4480 +{
4481 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4482 + ip_set_ip_t i, *elem;
4483 +
4484 + for (i = 0; i < map->hashsize; i++) {
4485 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4486 + ((ip_set_ip_t *)data)[i] = *elem;
4487 + }
4488 +}
4489 +
4490 +static struct ip_set_type ip_set_ipporthash = {
4491 + .typename = SETTYPE_NAME,
4492 + .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
4493 + .protocol_version = IP_SET_PROTOCOL_VERSION,
4494 + .create = &create,
4495 + .destroy = &destroy,
4496 + .flush = &flush,
4497 + .reqsize = sizeof(struct ip_set_req_ipporthash),
4498 + .addip = &addip,
4499 + .addip_kernel = &addip_kernel,
4500 + .retry = &retry,
4501 + .delip = &delip,
4502 + .delip_kernel = &delip_kernel,
4503 + .testip = &testip,
4504 + .testip_kernel = &testip_kernel,
4505 + .header_size = sizeof(struct ip_set_req_ipporthash_create),
4506 + .list_header = &list_header,
4507 + .list_members_size = &list_members_size,
4508 + .list_members = &list_members,
4509 + .me = THIS_MODULE,
4510 +};
4511 +
4512 +MODULE_LICENSE("GPL");
4513 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4514 +MODULE_DESCRIPTION("ipporthash type of IP sets");
4515 +module_param(limit, int, 0600);
4516 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4517 +
4518 +static int __init ip_set_ipporthash_init(void)
4519 +{
4520 + return ip_set_register_set_type(&ip_set_ipporthash);
4521 +}
4522 +
4523 +static void __exit ip_set_ipporthash_fini(void)
4524 +{
4525 + /* FIXME: possible race with ip_set_create() */
4526 + ip_set_unregister_set_type(&ip_set_ipporthash);
4527 +}
4528 +
4529 +module_init(ip_set_ipporthash_init);
4530 +module_exit(ip_set_ipporthash_fini);
4531 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_iptree.c
4532 ===================================================================
4533 --- /dev/null
4534 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_iptree.c
4535 @@ -0,0 +1,612 @@
4536 +/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4537 + *
4538 + * This program is free software; you can redistribute it and/or modify
4539 + * it under the terms of the GNU General Public License version 2 as
4540 + * published by the Free Software Foundation.
4541 + */
4542 +
4543 +/* Kernel module implementing an IP set type: the iptree type */
4544 +
4545 +#include <linux/version.h>
4546 +#include <linux/module.h>
4547 +#include <linux/ip.h>
4548 +#include <linux/skbuff.h>
4549 +#include <linux/slab.h>
4550 +#include <linux/delay.h>
4551 +#include <linux/netfilter_ipv4/ip_tables.h>
4552 +#include <linux/netfilter_ipv4/ip_set.h>
4553 +#include <linux/errno.h>
4554 +#include <asm/uaccess.h>
4555 +#include <asm/bitops.h>
4556 +#include <linux/spinlock.h>
4557 +
4558 +/* Backward compatibility */
4559 +#ifndef __nocast
4560 +#define __nocast
4561 +#endif
4562 +
4563 +#include <linux/netfilter_ipv4/ip_set_iptree.h>
4564 +
4565 +static int limit = MAX_RANGE;
4566 +
4567 +/* Garbage collection interval in seconds: */
4568 +#define IPTREE_GC_TIME 5*60
4569 +/* Sleep so many milliseconds before trying again
4570 + * to delete the gc timer at destroying/flushing a set */
4571 +#define IPTREE_DESTROY_SLEEP 100
4572 +
4573 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
4574 +static struct kmem_cache *branch_cachep;
4575 +static struct kmem_cache *leaf_cachep;
4576 +#else
4577 +static kmem_cache_t *branch_cachep;
4578 +static kmem_cache_t *leaf_cachep;
4579 +#endif
4580 +
4581 +#if defined(__LITTLE_ENDIAN)
4582 +#define ABCD(a,b,c,d,addrp) do { \
4583 + a = ((unsigned char *)addrp)[3]; \
4584 + b = ((unsigned char *)addrp)[2]; \
4585 + c = ((unsigned char *)addrp)[1]; \
4586 + d = ((unsigned char *)addrp)[0]; \
4587 +} while (0)
4588 +#elif defined(__BIG_ENDIAN)
4589 +#define ABCD(a,b,c,d,addrp) do { \
4590 + a = ((unsigned char *)addrp)[0]; \
4591 + b = ((unsigned char *)addrp)[1]; \
4592 + c = ((unsigned char *)addrp)[2]; \
4593 + d = ((unsigned char *)addrp)[3]; \
4594 +} while (0)
4595 +#else
4596 +#error "Please fix asm/byteorder.h"
4597 +#endif /* __LITTLE_ENDIAN */
4598 +
4599 +#define TESTIP_WALK(map, elem, branch) do { \
4600 + if ((map)->tree[elem]) { \
4601 + branch = (map)->tree[elem]; \
4602 + } else \
4603 + return 0; \
4604 +} while (0)
4605 +
4606 +static inline int
4607 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4608 +{
4609 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4610 + struct ip_set_iptreeb *btree;
4611 + struct ip_set_iptreec *ctree;
4612 + struct ip_set_iptreed *dtree;
4613 + unsigned char a,b,c,d;
4614 +
4615 + if (!ip)
4616 + return -ERANGE;
4617 +
4618 + *hash_ip = ip;
4619 + ABCD(a, b, c, d, hash_ip);
4620 + DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
4621 + TESTIP_WALK(map, a, btree);
4622 + TESTIP_WALK(btree, b, ctree);
4623 + TESTIP_WALK(ctree, c, dtree);
4624 + DP("%lu %lu", dtree->expires[d], jiffies);
4625 + return dtree->expires[d]
4626 + && (!map->timeout
4627 + || time_after(dtree->expires[d], jiffies));
4628 +}
4629 +
4630 +static int
4631 +testip(struct ip_set *set, const void *data, size_t size,
4632 + ip_set_ip_t *hash_ip)
4633 +{
4634 + struct ip_set_req_iptree *req =
4635 + (struct ip_set_req_iptree *) data;
4636 +
4637 + if (size != sizeof(struct ip_set_req_iptree)) {
4638 + ip_set_printk("data length wrong (want %zu, have %zu)",
4639 + sizeof(struct ip_set_req_iptree),
4640 + size);
4641 + return -EINVAL;
4642 + }
4643 + return __testip(set, req->ip, hash_ip);
4644 +}
4645 +
4646 +static int
4647 +testip_kernel(struct ip_set *set,
4648 + const struct sk_buff *skb,
4649 + ip_set_ip_t *hash_ip,
4650 + const u_int32_t *flags,
4651 + unsigned char index)
4652 +{
4653 + int res;
4654 +
4655 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4656 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4657 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4658 + NIPQUAD(ip_hdr(skb)->saddr),
4659 + NIPQUAD(ip_hdr(skb)->daddr));
4660 +#else
4661 + NIPQUAD(skb->nh.iph->saddr),
4662 + NIPQUAD(skb->nh.iph->daddr));
4663 +#endif
4664 +
4665 + res = __testip(set,
4666 + ntohl(flags[index] & IPSET_SRC
4667 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4668 + ? ip_hdr(skb)->saddr
4669 + : ip_hdr(skb)->daddr),
4670 +#else
4671 + ? skb->nh.iph->saddr
4672 + : skb->nh.iph->daddr),
4673 +#endif
4674 + hash_ip);
4675 + return (res < 0 ? 0 : res);
4676 +}
4677 +
4678 +#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
4679 + if ((map)->tree[elem]) { \
4680 + DP("found %u", elem); \
4681 + branch = (map)->tree[elem]; \
4682 + } else { \
4683 + branch = (type *) \
4684 + kmem_cache_alloc(cachep, GFP_ATOMIC); \
4685 + if (branch == NULL) \
4686 + return -ENOMEM; \
4687 + memset(branch, 0, sizeof(*branch)); \
4688 + (map)->tree[elem] = branch; \
4689 + DP("alloc %u", elem); \
4690 + } \
4691 +} while (0)
4692 +
4693 +static inline int
4694 +__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
4695 + ip_set_ip_t *hash_ip)
4696 +{
4697 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4698 + struct ip_set_iptreeb *btree;
4699 + struct ip_set_iptreec *ctree;
4700 + struct ip_set_iptreed *dtree;
4701 + unsigned char a,b,c,d;
4702 + int ret = 0;
4703 +
4704 + if (!ip || map->elements >= limit)
4705 + /* We could call the garbage collector
4706 + * but it's probably overkill */
4707 + return -ERANGE;
4708 +
4709 + *hash_ip = ip;
4710 + ABCD(a, b, c, d, hash_ip);
4711 + DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
4712 + ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
4713 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
4714 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
4715 + if (dtree->expires[d]
4716 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
4717 + ret = -EEXIST;
4718 + dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
4719 + /* Lottery: I won! */
4720 + if (dtree->expires[d] == 0)
4721 + dtree->expires[d] = 1;
4722 + DP("%u %lu", d, dtree->expires[d]);
4723 + if (ret == 0)
4724 + map->elements++;
4725 + return ret;
4726 +}
4727 +
4728 +static int
4729 +addip(struct ip_set *set, const void *data, size_t size,
4730 + ip_set_ip_t *hash_ip)
4731 +{
4732 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4733 + struct ip_set_req_iptree *req =
4734 + (struct ip_set_req_iptree *) data;
4735 +
4736 + if (size != sizeof(struct ip_set_req_iptree)) {
4737 + ip_set_printk("data length wrong (want %zu, have %zu)",
4738 + sizeof(struct ip_set_req_iptree),
4739 + size);
4740 + return -EINVAL;
4741 + }
4742 + DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
4743 + return __addip(set, req->ip,
4744 + req->timeout ? req->timeout : map->timeout,
4745 + hash_ip);
4746 +}
4747 +
4748 +static int
4749 +addip_kernel(struct ip_set *set,
4750 + const struct sk_buff *skb,
4751 + ip_set_ip_t *hash_ip,
4752 + const u_int32_t *flags,
4753 + unsigned char index)
4754 +{
4755 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4756 +
4757 + return __addip(set,
4758 + ntohl(flags[index] & IPSET_SRC
4759 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4760 + ? ip_hdr(skb)->saddr
4761 + : ip_hdr(skb)->daddr),
4762 +#else
4763 + ? skb->nh.iph->saddr
4764 + : skb->nh.iph->daddr),
4765 +#endif
4766 + map->timeout,
4767 + hash_ip);
4768 +}
4769 +
4770 +#define DELIP_WALK(map, elem, branch) do { \
4771 + if ((map)->tree[elem]) { \
4772 + branch = (map)->tree[elem]; \
4773 + } else \
4774 + return -EEXIST; \
4775 +} while (0)
4776 +
4777 +static inline int
4778 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4779 +{
4780 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4781 + struct ip_set_iptreeb *btree;
4782 + struct ip_set_iptreec *ctree;
4783 + struct ip_set_iptreed *dtree;
4784 + unsigned char a,b,c,d;
4785 +
4786 + if (!ip)
4787 + return -ERANGE;
4788 +
4789 + *hash_ip = ip;
4790 + ABCD(a, b, c, d, hash_ip);
4791 + DELIP_WALK(map, a, btree);
4792 + DELIP_WALK(btree, b, ctree);
4793 + DELIP_WALK(ctree, c, dtree);
4794 +
4795 + if (dtree->expires[d]) {
4796 + dtree->expires[d] = 0;
4797 + map->elements--;
4798 + return 0;
4799 + }
4800 + return -EEXIST;
4801 +}
4802 +
4803 +static int
4804 +delip(struct ip_set *set, const void *data, size_t size,
4805 + ip_set_ip_t *hash_ip)
4806 +{
4807 + struct ip_set_req_iptree *req =
4808 + (struct ip_set_req_iptree *) data;
4809 +
4810 + if (size != sizeof(struct ip_set_req_iptree)) {
4811 + ip_set_printk("data length wrong (want %zu, have %zu)",
4812 + sizeof(struct ip_set_req_iptree),
4813 + size);
4814 + return -EINVAL;
4815 + }
4816 + return __delip(set, req->ip, hash_ip);
4817 +}
4818 +
4819 +static int
4820 +delip_kernel(struct ip_set *set,
4821 + const struct sk_buff *skb,
4822 + ip_set_ip_t *hash_ip,
4823 + const u_int32_t *flags,
4824 + unsigned char index)
4825 +{
4826 + return __delip(set,
4827 + ntohl(flags[index] & IPSET_SRC
4828 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4829 + ? ip_hdr(skb)->saddr
4830 + : ip_hdr(skb)->daddr),
4831 +#else
4832 + ? skb->nh.iph->saddr
4833 + : skb->nh.iph->daddr),
4834 +#endif
4835 + hash_ip);
4836 +}
4837 +
4838 +#define LOOP_WALK_BEGIN(map, i, branch) \
4839 + for (i = 0; i < 256; i++) { \
4840 + if (!(map)->tree[i]) \
4841 + continue; \
4842 + branch = (map)->tree[i]
4843 +
4844 +#define LOOP_WALK_END }
4845 +
4846 +static void ip_tree_gc(unsigned long ul_set)
4847 +{
4848 + struct ip_set *set = (void *) ul_set;
4849 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4850 + struct ip_set_iptreeb *btree;
4851 + struct ip_set_iptreec *ctree;
4852 + struct ip_set_iptreed *dtree;
4853 + unsigned int a,b,c,d;
4854 + unsigned char i,j,k;
4855 +
4856 + i = j = k = 0;
4857 + DP("gc: %s", set->name);
4858 + write_lock_bh(&set->lock);
4859 + LOOP_WALK_BEGIN(map, a, btree);
4860 + LOOP_WALK_BEGIN(btree, b, ctree);
4861 + LOOP_WALK_BEGIN(ctree, c, dtree);
4862 + for (d = 0; d < 256; d++) {
4863 + if (dtree->expires[d]) {
4864 + DP("gc: %u %u %u %u: expires %lu jiffies %lu",
4865 + a, b, c, d,
4866 + dtree->expires[d], jiffies);
4867 + if (map->timeout
4868 + && time_before(dtree->expires[d], jiffies)) {
4869 + dtree->expires[d] = 0;
4870 + map->elements--;
4871 + } else
4872 + k = 1;
4873 + }
4874 + }
4875 + if (k == 0) {
4876 + DP("gc: %s: leaf %u %u %u empty",
4877 + set->name, a, b, c);
4878 + kmem_cache_free(leaf_cachep, dtree);
4879 + ctree->tree[c] = NULL;
4880 + } else {
4881 + DP("gc: %s: leaf %u %u %u not empty",
4882 + set->name, a, b, c);
4883 + j = 1;
4884 + k = 0;
4885 + }
4886 + LOOP_WALK_END;
4887 + if (j == 0) {
4888 + DP("gc: %s: branch %u %u empty",
4889 + set->name, a, b);
4890 + kmem_cache_free(branch_cachep, ctree);
4891 + btree->tree[b] = NULL;
4892 + } else {
4893 + DP("gc: %s: branch %u %u not empty",
4894 + set->name, a, b);
4895 + i = 1;
4896 + j = k = 0;
4897 + }
4898 + LOOP_WALK_END;
4899 + if (i == 0) {
4900 + DP("gc: %s: branch %u empty",
4901 + set->name, a);
4902 + kmem_cache_free(branch_cachep, btree);
4903 + map->tree[a] = NULL;
4904 + } else {
4905 + DP("gc: %s: branch %u not empty",
4906 + set->name, a);
4907 + i = j = k = 0;
4908 + }
4909 + LOOP_WALK_END;
4910 + write_unlock_bh(&set->lock);
4911 +
4912 + map->gc.expires = jiffies + map->gc_interval * HZ;
4913 + add_timer(&map->gc);
4914 +}
4915 +
4916 +static inline void init_gc_timer(struct ip_set *set)
4917 +{
4918 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4919 +
4920 + /* Even if there is no timeout for the entries,
4921 + * we still have to call gc because delete
4922 + * do not clean up empty branches */
4923 + map->gc_interval = IPTREE_GC_TIME;
4924 + init_timer(&map->gc);
4925 + map->gc.data = (unsigned long) set;
4926 + map->gc.function = ip_tree_gc;
4927 + map->gc.expires = jiffies + map->gc_interval * HZ;
4928 + add_timer(&map->gc);
4929 +}
4930 +
4931 +static int create(struct ip_set *set, const void *data, size_t size)
4932 +{
4933 + struct ip_set_req_iptree_create *req =
4934 + (struct ip_set_req_iptree_create *) data;
4935 + struct ip_set_iptree *map;
4936 +
4937 + if (size != sizeof(struct ip_set_req_iptree_create)) {
4938 + ip_set_printk("data length wrong (want %zu, have %zu)",
4939 + sizeof(struct ip_set_req_iptree_create),
4940 + size);
4941 + return -EINVAL;
4942 + }
4943 +
4944 + map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
4945 + if (!map) {
4946 + DP("out of memory for %d bytes",
4947 + sizeof(struct ip_set_iptree));
4948 + return -ENOMEM;
4949 + }
4950 + memset(map, 0, sizeof(*map));
4951 + map->timeout = req->timeout;
4952 + map->elements = 0;
4953 + set->data = map;
4954 +
4955 + init_gc_timer(set);
4956 +
4957 + return 0;
4958 +}
4959 +
4960 +static void __flush(struct ip_set_iptree *map)
4961 +{
4962 + struct ip_set_iptreeb *btree;
4963 + struct ip_set_iptreec *ctree;
4964 + struct ip_set_iptreed *dtree;
4965 + unsigned int a,b,c;
4966 +
4967 + LOOP_WALK_BEGIN(map, a, btree);
4968 + LOOP_WALK_BEGIN(btree, b, ctree);
4969 + LOOP_WALK_BEGIN(ctree, c, dtree);
4970 + kmem_cache_free(leaf_cachep, dtree);
4971 + LOOP_WALK_END;
4972 + kmem_cache_free(branch_cachep, ctree);
4973 + LOOP_WALK_END;
4974 + kmem_cache_free(branch_cachep, btree);
4975 + LOOP_WALK_END;
4976 + map->elements = 0;
4977 +}
4978 +
4979 +static void destroy(struct ip_set *set)
4980 +{
4981 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4982 +
4983 + /* gc might be running */
4984 + while (!del_timer(&map->gc))
4985 + msleep(IPTREE_DESTROY_SLEEP);
4986 + __flush(map);
4987 + kfree(map);
4988 + set->data = NULL;
4989 +}
4990 +
4991 +static void flush(struct ip_set *set)
4992 +{
4993 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4994 + unsigned int timeout = map->timeout;
4995 +
4996 + /* gc might be running */
4997 + while (!del_timer(&map->gc))
4998 + msleep(IPTREE_DESTROY_SLEEP);
4999 + __flush(map);
5000 + memset(map, 0, sizeof(*map));
5001 + map->timeout = timeout;
5002 +
5003 + init_gc_timer(set);
5004 +}
5005 +
5006 +static void list_header(const struct ip_set *set, void *data)
5007 +{
5008 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5009 + struct ip_set_req_iptree_create *header =
5010 + (struct ip_set_req_iptree_create *) data;
5011 +
5012 + header->timeout = map->timeout;
5013 +}
5014 +
5015 +static int list_members_size(const struct ip_set *set)
5016 +{
5017 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5018 + struct ip_set_iptreeb *btree;
5019 + struct ip_set_iptreec *ctree;
5020 + struct ip_set_iptreed *dtree;
5021 + unsigned int a,b,c,d;
5022 + unsigned int count = 0;
5023 +
5024 + LOOP_WALK_BEGIN(map, a, btree);
5025 + LOOP_WALK_BEGIN(btree, b, ctree);
5026 + LOOP_WALK_BEGIN(ctree, c, dtree);
5027 + for (d = 0; d < 256; d++) {
5028 + if (dtree->expires[d]
5029 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
5030 + count++;
5031 + }
5032 + LOOP_WALK_END;
5033 + LOOP_WALK_END;
5034 + LOOP_WALK_END;
5035 +
5036 + DP("members %u", count);
5037 + return (count * sizeof(struct ip_set_req_iptree));
5038 +}
5039 +
5040 +static void list_members(const struct ip_set *set, void *data)
5041 +{
5042 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5043 + struct ip_set_iptreeb *btree;
5044 + struct ip_set_iptreec *ctree;
5045 + struct ip_set_iptreed *dtree;
5046 + unsigned int a,b,c,d;
5047 + size_t offset = 0;
5048 + struct ip_set_req_iptree *entry;
5049 +
5050 + LOOP_WALK_BEGIN(map, a, btree);
5051 + LOOP_WALK_BEGIN(btree, b, ctree);
5052 + LOOP_WALK_BEGIN(ctree, c, dtree);
5053 + for (d = 0; d < 256; d++) {
5054 + if (dtree->expires[d]
5055 + && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
5056 + entry = (struct ip_set_req_iptree *)(data + offset);
5057 + entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
5058 + entry->timeout = !map->timeout ? 0
5059 + : (dtree->expires[d] - jiffies)/HZ;
5060 + offset += sizeof(struct ip_set_req_iptree);
5061 + }
5062 + }
5063 + LOOP_WALK_END;
5064 + LOOP_WALK_END;
5065 + LOOP_WALK_END;
5066 +}
5067 +
5068 +static struct ip_set_type ip_set_iptree = {
5069 + .typename = SETTYPE_NAME,
5070 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5071 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5072 + .create = &create,
5073 + .destroy = &destroy,
5074 + .flush = &flush,
5075 + .reqsize = sizeof(struct ip_set_req_iptree),
5076 + .addip = &addip,
5077 + .addip_kernel = &addip_kernel,
5078 + .delip = &delip,
5079 + .delip_kernel = &delip_kernel,
5080 + .testip = &testip,
5081 + .testip_kernel = &testip_kernel,
5082 + .header_size = sizeof(struct ip_set_req_iptree_create),
5083 + .list_header = &list_header,
5084 + .list_members_size = &list_members_size,
5085 + .list_members = &list_members,
5086 + .me = THIS_MODULE,
5087 +};
5088 +
5089 +MODULE_LICENSE("GPL");
5090 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5091 +MODULE_DESCRIPTION("iptree type of IP sets");
5092 +module_param(limit, int, 0600);
5093 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5094 +
5095 +static int __init ip_set_iptree_init(void)
5096 +{
5097 + int ret;
5098 +
5099 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5100 + branch_cachep = kmem_cache_create("ip_set_iptreeb",
5101 + sizeof(struct ip_set_iptreeb),
5102 + 0, 0, NULL);
5103 +#else
5104 + branch_cachep = kmem_cache_create("ip_set_iptreeb",
5105 + sizeof(struct ip_set_iptreeb),
5106 + 0, 0, NULL, NULL);
5107 +#endif
5108 + if (!branch_cachep) {
5109 + printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
5110 + ret = -ENOMEM;
5111 + goto out;
5112 + }
5113 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5114 + leaf_cachep = kmem_cache_create("ip_set_iptreed",
5115 + sizeof(struct ip_set_iptreed),
5116 + 0, 0, NULL);
5117 +#else
5118 + leaf_cachep = kmem_cache_create("ip_set_iptreed",
5119 + sizeof(struct ip_set_iptreed),
5120 + 0, 0, NULL, NULL);
5121 +#endif
5122 + if (!leaf_cachep) {
5123 + printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
5124 + ret = -ENOMEM;
5125 + goto free_branch;
5126 + }
5127 + ret = ip_set_register_set_type(&ip_set_iptree);
5128 + if (ret == 0)
5129 + goto out;
5130 +
5131 + kmem_cache_destroy(leaf_cachep);
5132 + free_branch:
5133 + kmem_cache_destroy(branch_cachep);
5134 + out:
5135 + return ret;
5136 +}
5137 +
5138 +static void __exit ip_set_iptree_fini(void)
5139 +{
5140 + /* FIXME: possible race with ip_set_create() */
5141 + ip_set_unregister_set_type(&ip_set_iptree);
5142 + kmem_cache_destroy(leaf_cachep);
5143 + kmem_cache_destroy(branch_cachep);
5144 +}
5145 +
5146 +module_init(ip_set_iptree_init);
5147 +module_exit(ip_set_iptree_fini);
5148 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_iptreemap.c
5149 ===================================================================
5150 --- /dev/null
5151 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_iptreemap.c
5152 @@ -0,0 +1,829 @@
5153 +/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
5154 + *
5155 + * This program is free software; you can redistribute it and/or modify it
5156 + * under the terms of the GNU General Public License version 2 as published by
5157 + * the Free Software Foundation.
5158 + */
5159 +
5160 +/* This modules implements the iptreemap ipset type. It uses bitmaps to
5161 + * represent every single IPv4 address as a single bit. The bitmaps are managed
5162 + * in a tree structure, where the first three octets of an addresses are used
5163 + * as an index to find the bitmap and the last octet is used as the bit number.
5164 + */
5165 +
5166 +#include <linux/version.h>
5167 +#include <linux/module.h>
5168 +#include <linux/ip.h>
5169 +#include <linux/skbuff.h>
5170 +#include <linux/slab.h>
5171 +#include <linux/delay.h>
5172 +#include <linux/netfilter_ipv4/ip_tables.h>
5173 +#include <linux/netfilter_ipv4/ip_set.h>
5174 +#include <linux/errno.h>
5175 +#include <asm/uaccess.h>
5176 +#include <asm/bitops.h>
5177 +#include <linux/spinlock.h>
5178 +
5179 +#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
5180 +
5181 +#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
5182 +#define IPTREEMAP_DESTROY_SLEEP (100)
5183 +
5184 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
5185 +static struct kmem_cache *cachep_b;
5186 +static struct kmem_cache *cachep_c;
5187 +static struct kmem_cache *cachep_d;
5188 +#else
5189 +static kmem_cache_t *cachep_b;
5190 +static kmem_cache_t *cachep_c;
5191 +static kmem_cache_t *cachep_d;
5192 +#endif
5193 +
5194 +static struct ip_set_iptreemap_d *fullbitmap_d;
5195 +static struct ip_set_iptreemap_c *fullbitmap_c;
5196 +static struct ip_set_iptreemap_b *fullbitmap_b;
5197 +
5198 +#if defined(__LITTLE_ENDIAN)
5199 +#define ABCD(a, b, c, d, addr) \
5200 + do { \
5201 + a = ((unsigned char *)addr)[3]; \
5202 + b = ((unsigned char *)addr)[2]; \
5203 + c = ((unsigned char *)addr)[1]; \
5204 + d = ((unsigned char *)addr)[0]; \
5205 + } while (0)
5206 +#elif defined(__BIG_ENDIAN)
5207 +#define ABCD(a,b,c,d,addrp) do { \
5208 + a = ((unsigned char *)addrp)[0]; \
5209 + b = ((unsigned char *)addrp)[1]; \
5210 + c = ((unsigned char *)addrp)[2]; \
5211 + d = ((unsigned char *)addrp)[3]; \
5212 +} while (0)
5213 +#else
5214 +#error "Please fix asm/byteorder.h"
5215 +#endif /* __LITTLE_ENDIAN */
5216 +
5217 +#define TESTIP_WALK(map, elem, branch, full) \
5218 + do { \
5219 + branch = (map)->tree[elem]; \
5220 + if (!branch) \
5221 + return 0; \
5222 + else if (branch == full) \
5223 + return 1; \
5224 + } while (0)
5225 +
5226 +#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
5227 + do { \
5228 + branch = (map)->tree[elem]; \
5229 + if (!branch) { \
5230 + branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
5231 + if (!branch) \
5232 + return -ENOMEM; \
5233 + memset(branch, 0, sizeof(*branch)); \
5234 + (map)->tree[elem] = branch; \
5235 + } else if (branch == full) { \
5236 + return -EEXIST; \
5237 + } \
5238 + } while (0)
5239 +
5240 +#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
5241 + for (a = a1; a <= a2; a++) { \
5242 + branch = (map)->tree[a]; \
5243 + if (branch != full) { \
5244 + if ((a > a1 && a < a2) || (hint)) { \
5245 + if (branch) \
5246 + free(branch); \
5247 + (map)->tree[a] = full; \
5248 + continue; \
5249 + } else if (!branch) { \
5250 + branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
5251 + if (!branch) \
5252 + return -ENOMEM; \
5253 + memset(branch, 0, sizeof(*branch)); \
5254 + (map)->tree[a] = branch; \
5255 + }
5256 +
5257 +#define ADDIP_RANGE_LOOP_END() \
5258 + } \
5259 + }
5260 +
5261 +#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
5262 + do { \
5263 + branch = (map)->tree[elem]; \
5264 + if (!branch) { \
5265 + return -EEXIST; \
5266 + } else if (branch == full) { \
5267 + branch = kmem_cache_alloc(cachep, flags); \
5268 + if (!branch) \
5269 + return -ENOMEM; \
5270 + memcpy(branch, full, sizeof(*full)); \
5271 + (map)->tree[elem] = branch; \
5272 + } \
5273 + } while (0)
5274 +
5275 +#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
5276 + for (a = a1; a <= a2; a++) { \
5277 + branch = (map)->tree[a]; \
5278 + if (branch) { \
5279 + if ((a > a1 && a < a2) || (hint)) { \
5280 + if (branch != full) \
5281 + free(branch); \
5282 + (map)->tree[a] = NULL; \
5283 + continue; \
5284 + } else if (branch == full) { \
5285 + branch = kmem_cache_alloc(cachep, flags); \
5286 + if (!branch) \
5287 + return -ENOMEM; \
5288 + memcpy(branch, full, sizeof(*branch)); \
5289 + (map)->tree[a] = branch; \
5290 + }
5291 +
5292 +#define DELIP_RANGE_LOOP_END() \
5293 + } \
5294 + }
5295 +
5296 +#define LOOP_WALK_BEGIN(map, i, branch) \
5297 + for (i = 0; i < 256; i++) { \
5298 + branch = (map)->tree[i]; \
5299 + if (likely(!branch)) \
5300 + continue;
5301 +
5302 +#define LOOP_WALK_END() \
5303 + }
5304 +
5305 +#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
5306 + count = -256; \
5307 + for (i = 0; i < 256; i++) { \
5308 + branch = (map)->tree[i]; \
5309 + if (likely(!branch)) \
5310 + continue; \
5311 + count++; \
5312 + if (branch == full) { \
5313 + count++; \
5314 + continue; \
5315 + }
5316 +
5317 +#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
5318 + if (-256 == count) { \
5319 + kmem_cache_free(cachep, branch); \
5320 + (map)->tree[i] = NULL; \
5321 + } else if (256 == count) { \
5322 + kmem_cache_free(cachep, branch); \
5323 + (map)->tree[i] = full; \
5324 + } \
5325 + }
5326 +
5327 +#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
5328 + for (i = 0; i < 256; i++) { \
5329 + if (!(map)->tree[i]) { \
5330 + if (inrange) { \
5331 + count++; \
5332 + inrange = 0; \
5333 + } \
5334 + continue; \
5335 + } \
5336 + branch = (map)->tree[i];
5337 +
5338 +#define LOOP_WALK_END_COUNT() \
5339 + }
5340 +
5341 +#define MIN(a, b) (a < b ? a : b)
5342 +#define MAX(a, b) (a > b ? a : b)
5343 +
5344 +#define GETVALUE1(a, a1, b1, r) \
5345 + (a == a1 ? b1 : r)
5346 +
5347 +#define GETVALUE2(a, b, a1, b1, c1, r) \
5348 + (a == a1 && b == b1 ? c1 : r)
5349 +
5350 +#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
5351 + (a == a1 && b == b1 && c == c1 ? d1 : r)
5352 +
5353 +#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
5354 + ( \
5355 + GETVALUE1(a, a1, b1, 0) == 0 \
5356 + && GETVALUE1(a, a2, b2, 255) == 255 \
5357 + && c1 == 0 \
5358 + && c2 == 255 \
5359 + && d1 == 0 \
5360 + && d2 == 255 \
5361 + )
5362 +
5363 +#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
5364 + ( \
5365 + GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
5366 + && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
5367 + && d1 == 0 \
5368 + && d2 == 255 \
5369 + )
5370 +
5371 +#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
5372 + ( \
5373 + GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
5374 + && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
5375 + )
5376 +
5377 +
5378 +static inline void
5379 +free_d(struct ip_set_iptreemap_d *map)
5380 +{
5381 + kmem_cache_free(cachep_d, map);
5382 +}
5383 +
5384 +static inline void
5385 +free_c(struct ip_set_iptreemap_c *map)
5386 +{
5387 + struct ip_set_iptreemap_d *dtree;
5388 + unsigned int i;
5389 +
5390 + LOOP_WALK_BEGIN(map, i, dtree) {
5391 + if (dtree != fullbitmap_d)
5392 + free_d(dtree);
5393 + } LOOP_WALK_END();
5394 +
5395 + kmem_cache_free(cachep_c, map);
5396 +}
5397 +
5398 +static inline void
5399 +free_b(struct ip_set_iptreemap_b *map)
5400 +{
5401 + struct ip_set_iptreemap_c *ctree;
5402 + unsigned int i;
5403 +
5404 + LOOP_WALK_BEGIN(map, i, ctree) {
5405 + if (ctree != fullbitmap_c)
5406 + free_c(ctree);
5407 + } LOOP_WALK_END();
5408 +
5409 + kmem_cache_free(cachep_b, map);
5410 +}
5411 +
5412 +static inline int
5413 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5414 +{
5415 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5416 + struct ip_set_iptreemap_b *btree;
5417 + struct ip_set_iptreemap_c *ctree;
5418 + struct ip_set_iptreemap_d *dtree;
5419 + unsigned char a, b, c, d;
5420 +
5421 + *hash_ip = ip;
5422 +
5423 + ABCD(a, b, c, d, hash_ip);
5424 +
5425 + TESTIP_WALK(map, a, btree, fullbitmap_b);
5426 + TESTIP_WALK(btree, b, ctree, fullbitmap_c);
5427 + TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
5428 +
5429 + return !!test_bit(d, (void *) dtree->bitmap);
5430 +}
5431 +
5432 +static int
5433 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5434 +{
5435 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5436 +
5437 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5438 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5439 + return -EINVAL;
5440 + }
5441 +
5442 + return __testip(set, req->start, hash_ip);
5443 +}
5444 +
5445 +static int
5446 +testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5447 +{
5448 + int res;
5449 +
5450 + res = __testip(set,
5451 + ntohl(flags[index] & IPSET_SRC
5452 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5453 + ? ip_hdr(skb)->saddr
5454 + : ip_hdr(skb)->daddr),
5455 +#else
5456 + ? skb->nh.iph->saddr
5457 + : skb->nh.iph->daddr),
5458 +#endif
5459 + hash_ip);
5460 +
5461 + return (res < 0 ? 0 : res);
5462 +}
5463 +
5464 +static inline int
5465 +__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5466 +{
5467 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5468 + struct ip_set_iptreemap_b *btree;
5469 + struct ip_set_iptreemap_c *ctree;
5470 + struct ip_set_iptreemap_d *dtree;
5471 + unsigned char a, b, c, d;
5472 +
5473 + *hash_ip = ip;
5474 +
5475 + ABCD(a, b, c, d, hash_ip);
5476 +
5477 + ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
5478 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
5479 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
5480 +
5481 + if (test_and_set_bit(d, (void *) dtree->bitmap))
5482 + return -EEXIST;
5483 +
5484 + set_bit(b, (void *) btree->dirty);
5485 +
5486 + return 0;
5487 +}
5488 +
5489 +static inline int
5490 +__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
5491 +{
5492 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5493 + struct ip_set_iptreemap_b *btree;
5494 + struct ip_set_iptreemap_c *ctree;
5495 + struct ip_set_iptreemap_d *dtree;
5496 + unsigned int a, b, c, d;
5497 + unsigned char a1, b1, c1, d1;
5498 + unsigned char a2, b2, c2, d2;
5499 +
5500 + if (start == end)
5501 + return __addip_single(set, start, hash_ip);
5502 +
5503 + *hash_ip = start;
5504 +
5505 + ABCD(a1, b1, c1, d1, &start);
5506 + ABCD(a2, b2, c2, d2, &end);
5507 +
5508 + /* This is sooo ugly... */
5509 + ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
5510 + ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
5511 + ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
5512 + for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5513 + set_bit(d, (void *) dtree->bitmap);
5514 + set_bit(b, (void *) btree->dirty);
5515 + } ADDIP_RANGE_LOOP_END();
5516 + } ADDIP_RANGE_LOOP_END();
5517 + } ADDIP_RANGE_LOOP_END();
5518 +
5519 + return 0;
5520 +}
5521 +
5522 +static int
5523 +addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5524 +{
5525 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5526 +
5527 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5528 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5529 + return -EINVAL;
5530 + }
5531 +
5532 + return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
5533 +}
5534 +
5535 +static int
5536 +addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5537 +{
5538 +
5539 + return __addip_single(set,
5540 + ntohl(flags[index] & IPSET_SRC
5541 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5542 + ? ip_hdr(skb)->saddr
5543 + : ip_hdr(skb)->daddr),
5544 +#else
5545 + ? skb->nh.iph->saddr
5546 + : skb->nh.iph->daddr),
5547 +#endif
5548 + hash_ip);
5549 +}
5550 +
5551 +static inline int
5552 +__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
5553 +{
5554 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5555 + struct ip_set_iptreemap_b *btree;
5556 + struct ip_set_iptreemap_c *ctree;
5557 + struct ip_set_iptreemap_d *dtree;
5558 + unsigned char a,b,c,d;
5559 +
5560 + *hash_ip = ip;
5561 +
5562 + ABCD(a, b, c, d, hash_ip);
5563 +
5564 + DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
5565 + DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
5566 + DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
5567 +
5568 + if (!test_and_clear_bit(d, (void *) dtree->bitmap))
5569 + return -EEXIST;
5570 +
5571 + set_bit(b, (void *) btree->dirty);
5572 +
5573 + return 0;
5574 +}
5575 +
5576 +static inline int
5577 +__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
5578 +{
5579 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5580 + struct ip_set_iptreemap_b *btree;
5581 + struct ip_set_iptreemap_c *ctree;
5582 + struct ip_set_iptreemap_d *dtree;
5583 + unsigned int a, b, c, d;
5584 + unsigned char a1, b1, c1, d1;
5585 + unsigned char a2, b2, c2, d2;
5586 +
5587 + if (start == end)
5588 + return __delip_single(set, start, hash_ip, flags);
5589 +
5590 + *hash_ip = start;
5591 +
5592 + ABCD(a1, b1, c1, d1, &start);
5593 + ABCD(a2, b2, c2, d2, &end);
5594 +
5595 + /* This is sooo ugly... */
5596 + DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
5597 + DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
5598 + DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
5599 + for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5600 + clear_bit(d, (void *) dtree->bitmap);
5601 + set_bit(b, (void *) btree->dirty);
5602 + } DELIP_RANGE_LOOP_END();
5603 + } DELIP_RANGE_LOOP_END();
5604 + } DELIP_RANGE_LOOP_END();
5605 +
5606 + return 0;
5607 +}
5608 +
5609 +static int
5610 +delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5611 +{
5612 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5613 +
5614 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5615 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5616 + return -EINVAL;
5617 + }
5618 +
5619 + return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
5620 +}
5621 +
5622 +static int
5623 +delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5624 +{
5625 + return __delip_single(set,
5626 + ntohl(flags[index] & IPSET_SRC
5627 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5628 + ? ip_hdr(skb)->saddr
5629 + : ip_hdr(skb)->daddr),
5630 +#else
5631 + ? skb->nh.iph->saddr
5632 + : skb->nh.iph->daddr),
5633 +#endif
5634 + hash_ip,
5635 + GFP_ATOMIC);
5636 +}
5637 +
5638 +/* Check the status of the bitmap
5639 + * -1 == all bits cleared
5640 + * 1 == all bits set
5641 + * 0 == anything else
5642 + */
5643 +static inline int
5644 +bitmap_status(struct ip_set_iptreemap_d *dtree)
5645 +{
5646 + unsigned char first = dtree->bitmap[0];
5647 + int a;
5648 +
5649 + for (a = 1; a < 32; a++)
5650 + if (dtree->bitmap[a] != first)
5651 + return 0;
5652 +
5653 + return (first == 0 ? -1 : (first == 255 ? 1 : 0));
5654 +}
5655 +
5656 +static void
5657 +gc(unsigned long addr)
5658 +{
5659 + struct ip_set *set = (struct ip_set *) addr;
5660 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5661 + struct ip_set_iptreemap_b *btree;
5662 + struct ip_set_iptreemap_c *ctree;
5663 + struct ip_set_iptreemap_d *dtree;
5664 + unsigned int a, b, c;
5665 + int i, j, k;
5666 +
5667 + write_lock_bh(&set->lock);
5668 +
5669 + LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
5670 + LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
5671 + if (!test_and_clear_bit(b, (void *) btree->dirty))
5672 + continue;
5673 + LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
5674 + switch (bitmap_status(dtree)) {
5675 + case -1:
5676 + kmem_cache_free(cachep_d, dtree);
5677 + ctree->tree[c] = NULL;
5678 + k--;
5679 + break;
5680 + case 1:
5681 + kmem_cache_free(cachep_d, dtree);
5682 + ctree->tree[c] = fullbitmap_d;
5683 + k++;
5684 + break;
5685 + }
5686 + } LOOP_WALK_END();
5687 + } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
5688 + } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
5689 +
5690 + write_unlock_bh(&set->lock);
5691 +
5692 + map->gc.expires = jiffies + map->gc_interval * HZ;
5693 + add_timer(&map->gc);
5694 +}
5695 +
5696 +static inline void
5697 +init_gc_timer(struct ip_set *set)
5698 +{
5699 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5700 +
5701 + init_timer(&map->gc);
5702 + map->gc.data = (unsigned long) set;
5703 + map->gc.function = gc;
5704 + map->gc.expires = jiffies + map->gc_interval * HZ;
5705 + add_timer(&map->gc);
5706 +}
5707 +
5708 +static int create(struct ip_set *set, const void *data, size_t size)
5709 +{
5710 + struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
5711 + struct ip_set_iptreemap *map;
5712 +
5713 + if (size != sizeof(struct ip_set_req_iptreemap_create)) {
5714 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
5715 + return -EINVAL;
5716 + }
5717 +
5718 + map = kzalloc(sizeof(*map), GFP_KERNEL);
5719 + if (!map)
5720 + return -ENOMEM;
5721 +
5722 + map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
5723 + set->data = map;
5724 +
5725 + init_gc_timer(set);
5726 +
5727 + return 0;
5728 +}
5729 +
5730 +static inline void __flush(struct ip_set_iptreemap *map)
5731 +{
5732 + struct ip_set_iptreemap_b *btree;
5733 + unsigned int a;
5734 +
5735 + LOOP_WALK_BEGIN(map, a, btree);
5736 + if (btree != fullbitmap_b)
5737 + free_b(btree);
5738 + LOOP_WALK_END();
5739 +}
5740 +
5741 +static void destroy(struct ip_set *set)
5742 +{
5743 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5744 +
5745 + while (!del_timer(&map->gc))
5746 + msleep(IPTREEMAP_DESTROY_SLEEP);
5747 +
5748 + __flush(map);
5749 + kfree(map);
5750 +
5751 + set->data = NULL;
5752 +}
5753 +
5754 +static void flush(struct ip_set *set)
5755 +{
5756 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5757 +
5758 + while (!del_timer(&map->gc))
5759 + msleep(IPTREEMAP_DESTROY_SLEEP);
5760 +
5761 + __flush(map);
5762 +
5763 + memset(map, 0, sizeof(*map));
5764 +
5765 + init_gc_timer(set);
5766 +}
5767 +
5768 +static void list_header(const struct ip_set *set, void *data)
5769 +{
5770 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5771 + struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
5772 +
5773 + header->gc_interval = map->gc_interval;
5774 +}
5775 +
5776 +static int list_members_size(const struct ip_set *set)
5777 +{
5778 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5779 + struct ip_set_iptreemap_b *btree;
5780 + struct ip_set_iptreemap_c *ctree;
5781 + struct ip_set_iptreemap_d *dtree;
5782 + unsigned int a, b, c, d, inrange = 0, count = 0;
5783 +
5784 + LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
5785 + LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
5786 + LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
5787 + for (d = 0; d < 256; d++) {
5788 + if (test_bit(d, (void *) dtree->bitmap)) {
5789 + inrange = 1;
5790 + } else if (inrange) {
5791 + count++;
5792 + inrange = 0;
5793 + }
5794 + }
5795 + } LOOP_WALK_END_COUNT();
5796 + } LOOP_WALK_END_COUNT();
5797 + } LOOP_WALK_END_COUNT();
5798 +
5799 + if (inrange)
5800 + count++;
5801 +
5802 + return (count * sizeof(struct ip_set_req_iptreemap));
5803 +}
5804 +
5805 +static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
5806 +{
5807 + struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
5808 +
5809 + entry->start = start;
5810 + entry->end = end;
5811 +
5812 + return sizeof(*entry);
5813 +}
5814 +
5815 +static void list_members(const struct ip_set *set, void *data)
5816 +{
5817 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5818 + struct ip_set_iptreemap_b *btree;
5819 + struct ip_set_iptreemap_c *ctree;
5820 + struct ip_set_iptreemap_d *dtree;
5821 + unsigned int a, b, c, d, inrange = 0;
5822 + size_t offset = 0;
5823 + ip_set_ip_t start = 0, end = 0, ip;
5824 +
5825 + LOOP_WALK_BEGIN(map, a, btree) {
5826 + LOOP_WALK_BEGIN(btree, b, ctree) {
5827 + LOOP_WALK_BEGIN(ctree, c, dtree) {
5828 + for (d = 0; d < 256; d++) {
5829 + if (test_bit(d, (void *) dtree->bitmap)) {
5830 + ip = ((a << 24) | (b << 16) | (c << 8) | d);
5831 + if (!inrange) {
5832 + inrange = 1;
5833 + start = ip;
5834 + } else if (end < ip - 1) {
5835 + offset += add_member(data, offset, start, end);
5836 + start = ip;
5837 + }
5838 + end = ip;
5839 + } else if (inrange) {
5840 + offset += add_member(data, offset, start, end);
5841 + inrange = 0;
5842 + }
5843 + }
5844 + } LOOP_WALK_END();
5845 + } LOOP_WALK_END();
5846 + } LOOP_WALK_END();
5847 +
5848 + if (inrange)
5849 + add_member(data, offset, start, end);
5850 +}
5851 +
5852 +static struct ip_set_type ip_set_iptreemap = {
5853 + .typename = SETTYPE_NAME,
5854 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5855 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5856 + .create = create,
5857 + .destroy = destroy,
5858 + .flush = flush,
5859 + .reqsize = sizeof(struct ip_set_req_iptreemap),
5860 + .addip = addip,
5861 + .addip_kernel = addip_kernel,
5862 + .delip = delip,
5863 + .delip_kernel = delip_kernel,
5864 + .testip = testip,
5865 + .testip_kernel = testip_kernel,
5866 + .header_size = sizeof(struct ip_set_req_iptreemap_create),
5867 + .list_header = list_header,
5868 + .list_members_size = list_members_size,
5869 + .list_members = list_members,
5870 + .me = THIS_MODULE,
5871 +};
5872 +
5873 +MODULE_LICENSE("GPL");
5874 +MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
5875 +MODULE_DESCRIPTION("iptreemap type of IP sets");
5876 +
5877 +static int __init ip_set_iptreemap_init(void)
5878 +{
5879 + int ret = -ENOMEM;
5880 + int a;
5881 +
5882 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5883 + cachep_b = kmem_cache_create("ip_set_iptreemap_b",
5884 + sizeof(struct ip_set_iptreemap_b),
5885 + 0, 0, NULL);
5886 +#else
5887 + cachep_b = kmem_cache_create("ip_set_iptreemap_b",
5888 + sizeof(struct ip_set_iptreemap_b),
5889 + 0, 0, NULL, NULL);
5890 +#endif
5891 + if (!cachep_b) {
5892 + ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
5893 + goto out;
5894 + }
5895 +
5896 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5897 + cachep_c = kmem_cache_create("ip_set_iptreemap_c",
5898 + sizeof(struct ip_set_iptreemap_c),
5899 + 0, 0, NULL);
5900 +#else
5901 + cachep_c = kmem_cache_create("ip_set_iptreemap_c",
5902 + sizeof(struct ip_set_iptreemap_c),
5903 + 0, 0, NULL, NULL);
5904 +#endif
5905 + if (!cachep_c) {
5906 + ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
5907 + goto outb;
5908 + }
5909 +
5910 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5911 + cachep_d = kmem_cache_create("ip_set_iptreemap_d",
5912 + sizeof(struct ip_set_iptreemap_d),
5913 + 0, 0, NULL);
5914 +#else
5915 + cachep_d = kmem_cache_create("ip_set_iptreemap_d",
5916 + sizeof(struct ip_set_iptreemap_d),
5917 + 0, 0, NULL, NULL);
5918 +#endif
5919 + if (!cachep_d) {
5920 + ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
5921 + goto outc;
5922 + }
5923 +
5924 + fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
5925 + if (!fullbitmap_d)
5926 + goto outd;
5927 +
5928 + fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
5929 + if (!fullbitmap_c)
5930 + goto outbitmapd;
5931 +
5932 + fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
5933 + if (!fullbitmap_b)
5934 + goto outbitmapc;
5935 +
5936 + ret = ip_set_register_set_type(&ip_set_iptreemap);
5937 + if (0 > ret)
5938 + goto outbitmapb;
5939 +
5940 + /* Now init our global bitmaps */
5941 + memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
5942 +
5943 + for (a = 0; a < 256; a++)
5944 + fullbitmap_c->tree[a] = fullbitmap_d;
5945 +
5946 + for (a = 0; a < 256; a++)
5947 + fullbitmap_b->tree[a] = fullbitmap_c;
5948 + memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
5949 +
5950 + return 0;
5951 +
5952 +outbitmapb:
5953 + kmem_cache_free(cachep_b, fullbitmap_b);
5954 +outbitmapc:
5955 + kmem_cache_free(cachep_c, fullbitmap_c);
5956 +outbitmapd:
5957 + kmem_cache_free(cachep_d, fullbitmap_d);
5958 +outd:
5959 + kmem_cache_destroy(cachep_d);
5960 +outc:
5961 + kmem_cache_destroy(cachep_c);
5962 +outb:
5963 + kmem_cache_destroy(cachep_b);
5964 +out:
5965 +
5966 + return ret;
5967 +}
5968 +
5969 +static void __exit ip_set_iptreemap_fini(void)
5970 +{
5971 + ip_set_unregister_set_type(&ip_set_iptreemap);
5972 + kmem_cache_free(cachep_d, fullbitmap_d);
5973 + kmem_cache_free(cachep_c, fullbitmap_c);
5974 + kmem_cache_free(cachep_b, fullbitmap_b);
5975 + kmem_cache_destroy(cachep_d);
5976 + kmem_cache_destroy(cachep_c);
5977 + kmem_cache_destroy(cachep_b);
5978 +}
5979 +
5980 +module_init(ip_set_iptreemap_init);
5981 +module_exit(ip_set_iptreemap_fini);
5982 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_macipmap.c
5983 ===================================================================
5984 --- /dev/null
5985 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_macipmap.c
5986 @@ -0,0 +1,375 @@
5987 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
5988 + * Patrick Schaaf <bof@bof.de>
5989 + * Martin Josefsson <gandalf@wlug.westbo.se>
5990 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5991 + *
5992 + * This program is free software; you can redistribute it and/or modify
5993 + * it under the terms of the GNU General Public License version 2 as
5994 + * published by the Free Software Foundation.
5995 + */
5996 +
5997 +/* Kernel module implementing an IP set type: the macipmap type */
5998 +
5999 +#include <linux/module.h>
6000 +#include <linux/ip.h>
6001 +#include <linux/skbuff.h>
6002 +#include <linux/version.h>
6003 +#include <linux/netfilter_ipv4/ip_tables.h>
6004 +#include <linux/netfilter_ipv4/ip_set.h>
6005 +#include <linux/errno.h>
6006 +#include <asm/uaccess.h>
6007 +#include <asm/bitops.h>
6008 +#include <linux/spinlock.h>
6009 +#include <linux/if_ether.h>
6010 +#include <linux/vmalloc.h>
6011 +
6012 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
6013 +#include <linux/netfilter_ipv4/ip_set_macipmap.h>
6014 +
6015 +static int
6016 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
6017 +{
6018 + struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
6019 + struct ip_set_macip *table = (struct ip_set_macip *) map->members;
6020 + struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
6021 +
6022 + if (size != sizeof(struct ip_set_req_macipmap)) {
6023 + ip_set_printk("data length wrong (want %zu, have %zu)",
6024 + sizeof(struct ip_set_req_macipmap),
6025 + size);
6026 + return -EINVAL;
6027 + }
6028 +
6029 + if (req->ip < map->first_ip || req->ip > map->last_ip)
6030 + return -ERANGE;
6031 +
6032 + *hash_ip = req->ip;
6033 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6034 + set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
6035 + if (test_bit(IPSET_MACIP_ISSET,
6036 + (void *) &table[req->ip - map->first_ip].flags)) {
6037 + return (memcmp(req->ethernet,
6038 + &table[req->ip - map->first_ip].ethernet,
6039 + ETH_ALEN) == 0);
6040 + } else {
6041 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6042 + }
6043 +}
6044 +
6045 +static int
6046 +testip_kernel(struct ip_set *set,
6047 + const struct sk_buff *skb,
6048 + ip_set_ip_t *hash_ip,
6049 + const u_int32_t *flags,
6050 + unsigned char index)
6051 +{
6052 + struct ip_set_macipmap *map =
6053 + (struct ip_set_macipmap *) set->data;
6054 + struct ip_set_macip *table =
6055 + (struct ip_set_macip *) map->members;
6056 + ip_set_ip_t ip;
6057 +
6058 + ip = ntohl(flags[index] & IPSET_SRC
6059 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6060 + ? ip_hdr(skb)->saddr
6061 + : ip_hdr(skb)->daddr);
6062 +#else
6063 + ? skb->nh.iph->saddr
6064 + : skb->nh.iph->daddr);
6065 +#endif
6066 +
6067 + if (ip < map->first_ip || ip > map->last_ip)
6068 + return 0;
6069 +
6070 + *hash_ip = ip;
6071 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6072 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
6073 + if (test_bit(IPSET_MACIP_ISSET,
6074 + (void *) &table[ip - map->first_ip].flags)) {
6075 + /* Is mac pointer valid?
6076 + * If so, compare... */
6077 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6078 + return (skb_mac_header(skb) >= skb->head
6079 + && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
6080 +#else
6081 + return (skb->mac.raw >= skb->head
6082 + && (skb->mac.raw + ETH_HLEN) <= skb->data
6083 +#endif
6084 + && (memcmp(eth_hdr(skb)->h_source,
6085 + &table[ip - map->first_ip].ethernet,
6086 + ETH_ALEN) == 0));
6087 + } else {
6088 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6089 + }
6090 +}
6091 +
6092 +/* returns 0 on success */
6093 +static inline int
6094 +__addip(struct ip_set *set,
6095 + ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
6096 +{
6097 + struct ip_set_macipmap *map =
6098 + (struct ip_set_macipmap *) set->data;
6099 + struct ip_set_macip *table =
6100 + (struct ip_set_macip *) map->members;
6101 +
6102 + if (ip < map->first_ip || ip > map->last_ip)
6103 + return -ERANGE;
6104 + if (test_and_set_bit(IPSET_MACIP_ISSET,
6105 + (void *) &table[ip - map->first_ip].flags))
6106 + return -EEXIST;
6107 +
6108 + *hash_ip = ip;
6109 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6110 + memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
6111 + return 0;
6112 +}
6113 +
6114 +static int
6115 +addip(struct ip_set *set, const void *data, size_t size,
6116 + ip_set_ip_t *hash_ip)
6117 +{
6118 + struct ip_set_req_macipmap *req =
6119 + (struct ip_set_req_macipmap *) data;
6120 +
6121 + if (size != sizeof(struct ip_set_req_macipmap)) {
6122 + ip_set_printk("data length wrong (want %zu, have %zu)",
6123 + sizeof(struct ip_set_req_macipmap),
6124 + size);
6125 + return -EINVAL;
6126 + }
6127 + return __addip(set, req->ip, req->ethernet, hash_ip);
6128 +}
6129 +
6130 +static int
6131 +addip_kernel(struct ip_set *set,
6132 + const struct sk_buff *skb,
6133 + ip_set_ip_t *hash_ip,
6134 + const u_int32_t *flags,
6135 + unsigned char index)
6136 +{
6137 + ip_set_ip_t ip;
6138 +
6139 + ip = ntohl(flags[index] & IPSET_SRC
6140 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6141 + ? ip_hdr(skb)->saddr
6142 + : ip_hdr(skb)->daddr);
6143 +#else
6144 + ? skb->nh.iph->saddr
6145 + : skb->nh.iph->daddr);
6146 +#endif
6147 +
6148 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6149 + if (!(skb_mac_header(skb) >= skb->head
6150 + && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
6151 +#else
6152 + if (!(skb->mac.raw >= skb->head
6153 + && (skb->mac.raw + ETH_HLEN) <= skb->data))
6154 +#endif
6155 + return -EINVAL;
6156 +
6157 + return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
6158 +}
6159 +
6160 +static inline int
6161 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6162 +{
6163 + struct ip_set_macipmap *map =
6164 + (struct ip_set_macipmap *) set->data;
6165 + struct ip_set_macip *table =
6166 + (struct ip_set_macip *) map->members;
6167 +
6168 + if (ip < map->first_ip || ip > map->last_ip)
6169 + return -ERANGE;
6170 + if (!test_and_clear_bit(IPSET_MACIP_ISSET,
6171 + (void *)&table[ip - map->first_ip].flags))
6172 + return -EEXIST;
6173 +
6174 + *hash_ip = ip;
6175 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6176 + return 0;
6177 +}
6178 +
6179 +static int
6180 +delip(struct ip_set *set, const void *data, size_t size,
6181 + ip_set_ip_t *hash_ip)
6182 +{
6183 + struct ip_set_req_macipmap *req =
6184 + (struct ip_set_req_macipmap *) data;
6185 +
6186 + if (size != sizeof(struct ip_set_req_macipmap)) {
6187 + ip_set_printk("data length wrong (want %zu, have %zu)",
6188 + sizeof(struct ip_set_req_macipmap),
6189 + size);
6190 + return -EINVAL;
6191 + }
6192 + return __delip(set, req->ip, hash_ip);
6193 +}
6194 +
6195 +static int
6196 +delip_kernel(struct ip_set *set,
6197 + const struct sk_buff *skb,
6198 + ip_set_ip_t *hash_ip,
6199 + const u_int32_t *flags,
6200 + unsigned char index)
6201 +{
6202 + return __delip(set,
6203 + ntohl(flags[index] & IPSET_SRC
6204 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6205 + ? ip_hdr(skb)->saddr
6206 + : ip_hdr(skb)->daddr),
6207 +#else
6208 + ? skb->nh.iph->saddr
6209 + : skb->nh.iph->daddr),
6210 +#endif
6211 + hash_ip);
6212 +}
6213 +
6214 +static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
6215 +{
6216 + return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
6217 +}
6218 +
6219 +static int create(struct ip_set *set, const void *data, size_t size)
6220 +{
6221 + int newbytes;
6222 + struct ip_set_req_macipmap_create *req =
6223 + (struct ip_set_req_macipmap_create *) data;
6224 + struct ip_set_macipmap *map;
6225 +
6226 + if (size != sizeof(struct ip_set_req_macipmap_create)) {
6227 + ip_set_printk("data length wrong (want %zu, have %zu)",
6228 + sizeof(struct ip_set_req_macipmap_create),
6229 + size);
6230 + return -EINVAL;
6231 + }
6232 +
6233 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
6234 + HIPQUAD(req->from), HIPQUAD(req->to));
6235 +
6236 + if (req->from > req->to) {
6237 + DP("bad ip range");
6238 + return -ENOEXEC;
6239 + }
6240 +
6241 + if (req->to - req->from > MAX_RANGE) {
6242 + ip_set_printk("range too big (max %d addresses)",
6243 + MAX_RANGE+1);
6244 + return -ENOEXEC;
6245 + }
6246 +
6247 + map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
6248 + if (!map) {
6249 + DP("out of memory for %d bytes",
6250 + sizeof(struct ip_set_macipmap));
6251 + return -ENOMEM;
6252 + }
6253 + map->flags = req->flags;
6254 + map->first_ip = req->from;
6255 + map->last_ip = req->to;
6256 + newbytes = members_size(map->first_ip, map->last_ip);
6257 + map->members = ip_set_malloc(newbytes);
6258 + DP("members: %u %p", newbytes, map->members);
6259 + if (!map->members) {
6260 + DP("out of memory for %d bytes", newbytes);
6261 + kfree(map);
6262 + return -ENOMEM;
6263 + }
6264 + memset(map->members, 0, newbytes);
6265 +
6266 + set->data = map;
6267 + return 0;
6268 +}
6269 +
6270 +static void destroy(struct ip_set *set)
6271 +{
6272 + struct ip_set_macipmap *map =
6273 + (struct ip_set_macipmap *) set->data;
6274 +
6275 + ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
6276 + kfree(map);
6277 +
6278 + set->data = NULL;
6279 +}
6280 +
6281 +static void flush(struct ip_set *set)
6282 +{
6283 + struct ip_set_macipmap *map =
6284 + (struct ip_set_macipmap *) set->data;
6285 + memset(map->members, 0, members_size(map->first_ip, map->last_ip));
6286 +}
6287 +
6288 +static void list_header(const struct ip_set *set, void *data)
6289 +{
6290 + struct ip_set_macipmap *map =
6291 + (struct ip_set_macipmap *) set->data;
6292 + struct ip_set_req_macipmap_create *header =
6293 + (struct ip_set_req_macipmap_create *) data;
6294 +
6295 + DP("list_header %x %x %u", map->first_ip, map->last_ip,
6296 + map->flags);
6297 +
6298 + header->from = map->first_ip;
6299 + header->to = map->last_ip;
6300 + header->flags = map->flags;
6301 +}
6302 +
6303 +static int list_members_size(const struct ip_set *set)
6304 +{
6305 + struct ip_set_macipmap *map =
6306 + (struct ip_set_macipmap *) set->data;
6307 +
6308 + DP("%u", members_size(map->first_ip, map->last_ip));
6309 + return members_size(map->first_ip, map->last_ip);
6310 +}
6311 +
6312 +static void list_members(const struct ip_set *set, void *data)
6313 +{
6314 + struct ip_set_macipmap *map =
6315 + (struct ip_set_macipmap *) set->data;
6316 +
6317 + int bytes = members_size(map->first_ip, map->last_ip);
6318 +
6319 + DP("members: %u %p", bytes, map->members);
6320 + memcpy(data, map->members, bytes);
6321 +}
6322 +
6323 +static struct ip_set_type ip_set_macipmap = {
6324 + .typename = SETTYPE_NAME,
6325 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
6326 + .protocol_version = IP_SET_PROTOCOL_VERSION,
6327 + .create = &create,
6328 + .destroy = &destroy,
6329 + .flush = &flush,
6330 + .reqsize = sizeof(struct ip_set_req_macipmap),
6331 + .addip = &addip,
6332 + .addip_kernel = &addip_kernel,
6333 + .delip = &delip,
6334 + .delip_kernel = &delip_kernel,
6335 + .testip = &testip,
6336 + .testip_kernel = &testip_kernel,
6337 + .header_size = sizeof(struct ip_set_req_macipmap_create),
6338 + .list_header = &list_header,
6339 + .list_members_size = &list_members_size,
6340 + .list_members = &list_members,
6341 + .me = THIS_MODULE,
6342 +};
6343 +
6344 +MODULE_LICENSE("GPL");
6345 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6346 +MODULE_DESCRIPTION("macipmap type of IP sets");
6347 +
6348 +static int __init ip_set_macipmap_init(void)
6349 +{
6350 + init_max_malloc_size();
6351 + return ip_set_register_set_type(&ip_set_macipmap);
6352 +}
6353 +
6354 +static void __exit ip_set_macipmap_fini(void)
6355 +{
6356 + /* FIXME: possible race with ip_set_create() */
6357 + ip_set_unregister_set_type(&ip_set_macipmap);
6358 +}
6359 +
6360 +module_init(ip_set_macipmap_init);
6361 +module_exit(ip_set_macipmap_fini);
6362 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_nethash.c
6363 ===================================================================
6364 --- /dev/null
6365 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_nethash.c
6366 @@ -0,0 +1,497 @@
6367 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6368 + *
6369 + * This program is free software; you can redistribute it and/or modify
6370 + * it under the terms of the GNU General Public License version 2 as
6371 + * published by the Free Software Foundation.
6372 + */
6373 +
6374 +/* Kernel module implementing a cidr nethash set */
6375 +
6376 +#include <linux/module.h>
6377 +#include <linux/ip.h>
6378 +#include <linux/skbuff.h>
6379 +#include <linux/version.h>
6380 +#include <linux/jhash.h>
6381 +#include <linux/netfilter_ipv4/ip_tables.h>
6382 +#include <linux/netfilter_ipv4/ip_set.h>
6383 +#include <linux/errno.h>
6384 +#include <asm/uaccess.h>
6385 +#include <asm/bitops.h>
6386 +#include <linux/spinlock.h>
6387 +#include <linux/vmalloc.h>
6388 +#include <linux/random.h>
6389 +
6390 +#include <net/ip.h>
6391 +
6392 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
6393 +#include <linux/netfilter_ipv4/ip_set_nethash.h>
6394 +
6395 +static int limit = MAX_RANGE;
6396 +
6397 +static inline __u32
6398 +jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
6399 +{
6400 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
6401 +}
6402 +
6403 +static inline __u32
6404 +hash_id_cidr(struct ip_set_nethash *map,
6405 + ip_set_ip_t ip,
6406 + unsigned char cidr,
6407 + ip_set_ip_t *hash_ip)
6408 +{
6409 + __u32 id;
6410 + u_int16_t i;
6411 + ip_set_ip_t *elem;
6412 +
6413 + *hash_ip = pack(ip, cidr);
6414 +
6415 + for (i = 0; i < map->probes; i++) {
6416 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
6417 + DP("hash key: %u", id);
6418 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6419 + if (*elem == *hash_ip)
6420 + return id;
6421 + }
6422 + return UINT_MAX;
6423 +}
6424 +
6425 +static inline __u32
6426 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6427 +{
6428 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6429 + __u32 id = UINT_MAX;
6430 + int i;
6431 +
6432 + for (i = 0; i < 30 && map->cidr[i]; i++) {
6433 + id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
6434 + if (id != UINT_MAX)
6435 + break;
6436 + }
6437 + return id;
6438 +}
6439 +
6440 +static inline int
6441 +__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
6442 + ip_set_ip_t *hash_ip)
6443 +{
6444 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6445 +
6446 + return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
6447 +}
6448 +
6449 +static inline int
6450 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6451 +{
6452 + return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
6453 +}
6454 +
6455 +static int
6456 +testip(struct ip_set *set, const void *data, size_t size,
6457 + ip_set_ip_t *hash_ip)
6458 +{
6459 + struct ip_set_req_nethash *req =
6460 + (struct ip_set_req_nethash *) data;
6461 +
6462 + if (size != sizeof(struct ip_set_req_nethash)) {
6463 + ip_set_printk("data length wrong (want %zu, have %zu)",
6464 + sizeof(struct ip_set_req_nethash),
6465 + size);
6466 + return -EINVAL;
6467 + }
6468 + return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
6469 + : __testip_cidr(set, req->ip, req->cidr, hash_ip));
6470 +}
6471 +
6472 +static int
6473 +testip_kernel(struct ip_set *set,
6474 + const struct sk_buff *skb,
6475 + ip_set_ip_t *hash_ip,
6476 + const u_int32_t *flags,
6477 + unsigned char index)
6478 +{
6479 + return __testip(set,
6480 + ntohl(flags[index] & IPSET_SRC
6481 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6482 + ? ip_hdr(skb)->saddr
6483 + : ip_hdr(skb)->daddr),
6484 +#else
6485 + ? skb->nh.iph->saddr
6486 + : skb->nh.iph->daddr),
6487 +#endif
6488 + hash_ip);
6489 +}
6490 +
6491 +static inline int
6492 +__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
6493 +{
6494 + __u32 probe;
6495 + u_int16_t i;
6496 + ip_set_ip_t *elem;
6497 +
6498 + for (i = 0; i < map->probes; i++) {
6499 + probe = jhash_ip(map, i, ip) % map->hashsize;
6500 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
6501 + if (*elem == ip)
6502 + return -EEXIST;
6503 + if (!*elem) {
6504 + *elem = ip;
6505 + map->elements++;
6506 + return 0;
6507 + }
6508 + }
6509 + /* Trigger rehashing */
6510 + return -EAGAIN;
6511 +}
6512 +
6513 +static inline int
6514 +__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
6515 + ip_set_ip_t *hash_ip)
6516 +{
6517 + if (!ip || map->elements >= limit)
6518 + return -ERANGE;
6519 +
6520 + *hash_ip = pack(ip, cidr);
6521 + DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
6522 +
6523 + return __addip_base(map, *hash_ip);
6524 +}
6525 +
6526 +static void
6527 +update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
6528 +{
6529 + unsigned char next;
6530 + int i;
6531 +
6532 + for (i = 0; i < 30 && map->cidr[i]; i++) {
6533 + if (map->cidr[i] == cidr) {
6534 + return;
6535 + } else if (map->cidr[i] < cidr) {
6536 + next = map->cidr[i];
6537 + map->cidr[i] = cidr;
6538 + cidr = next;
6539 + }
6540 + }
6541 + if (i < 30)
6542 + map->cidr[i] = cidr;
6543 +}
6544 +
6545 +static int
6546 +addip(struct ip_set *set, const void *data, size_t size,
6547 + ip_set_ip_t *hash_ip)
6548 +{
6549 + struct ip_set_req_nethash *req =
6550 + (struct ip_set_req_nethash *) data;
6551 + int ret;
6552 +
6553 + if (size != sizeof(struct ip_set_req_nethash)) {
6554 + ip_set_printk("data length wrong (want %zu, have %zu)",
6555 + sizeof(struct ip_set_req_nethash),
6556 + size);
6557 + return -EINVAL;
6558 + }
6559 + ret = __addip((struct ip_set_nethash *) set->data,
6560 + req->ip, req->cidr, hash_ip);
6561 +
6562 + if (ret == 0)
6563 + update_cidr_sizes((struct ip_set_nethash *) set->data,
6564 + req->cidr);
6565 +
6566 + return ret;
6567 +}
6568 +
6569 +static int
6570 +addip_kernel(struct ip_set *set,
6571 + const struct sk_buff *skb,
6572 + ip_set_ip_t *hash_ip,
6573 + const u_int32_t *flags,
6574 + unsigned char index)
6575 +{
6576 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6577 + int ret = -ERANGE;
6578 + ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
6579 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6580 + ? ip_hdr(skb)->saddr
6581 + : ip_hdr(skb)->daddr);
6582 +#else
6583 + ? skb->nh.iph->saddr
6584 + : skb->nh.iph->daddr);
6585 +#endif
6586 +
6587 + if (map->cidr[0])
6588 + ret = __addip(map, ip, map->cidr[0], hash_ip);
6589 +
6590 + return ret;
6591 +}
6592 +
6593 +static int retry(struct ip_set *set)
6594 +{
6595 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6596 + ip_set_ip_t *elem;
6597 + void *members;
6598 + u_int32_t i, hashsize = map->hashsize;
6599 + int res;
6600 + struct ip_set_nethash *tmp;
6601 +
6602 + if (map->resize == 0)
6603 + return -ERANGE;
6604 +
6605 + again:
6606 + res = 0;
6607 +
6608 + /* Calculate new parameters */
6609 + hashsize += (hashsize * map->resize)/100;
6610 + if (hashsize == map->hashsize)
6611 + hashsize++;
6612 +
6613 + ip_set_printk("rehashing of set %s triggered: "
6614 + "hashsize grows from %u to %u",
6615 + set->name, map->hashsize, hashsize);
6616 +
6617 + tmp = kmalloc(sizeof(struct ip_set_nethash)
6618 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
6619 + if (!tmp) {
6620 + DP("out of memory for %d bytes",
6621 + sizeof(struct ip_set_nethash)
6622 + + map->probes * sizeof(uint32_t));
6623 + return -ENOMEM;
6624 + }
6625 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
6626 + if (!tmp->members) {
6627 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
6628 + kfree(tmp);
6629 + return -ENOMEM;
6630 + }
6631 + tmp->hashsize = hashsize;
6632 + tmp->elements = 0;
6633 + tmp->probes = map->probes;
6634 + tmp->resize = map->resize;
6635 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
6636 + memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
6637 +
6638 + write_lock_bh(&set->lock);
6639 + map = (struct ip_set_nethash *) set->data; /* Play safe */
6640 + for (i = 0; i < map->hashsize && res == 0; i++) {
6641 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
6642 + if (*elem)
6643 + res = __addip_base(tmp, *elem);
6644 + }
6645 + if (res) {
6646 + /* Failure, try again */
6647 + write_unlock_bh(&set->lock);
6648 + harray_free(tmp->members);
6649 + kfree(tmp);
6650 + goto again;
6651 + }
6652 +
6653 + /* Success at resizing! */
6654 + members = map->members;
6655 +
6656 + map->hashsize = tmp->hashsize;
6657 + map->members = tmp->members;
6658 + write_unlock_bh(&set->lock);
6659 +
6660 + harray_free(members);
6661 + kfree(tmp);
6662 +
6663 + return 0;
6664 +}
6665 +
6666 +static inline int
6667 +__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
6668 + ip_set_ip_t *hash_ip)
6669 +{
6670 + ip_set_ip_t id, *elem;
6671 +
6672 + if (!ip)
6673 + return -ERANGE;
6674 +
6675 + id = hash_id_cidr(map, ip, cidr, hash_ip);
6676 + if (id == UINT_MAX)
6677 + return -EEXIST;
6678 +
6679 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6680 + *elem = 0;
6681 + map->elements--;
6682 + return 0;
6683 +}
6684 +
6685 +static int
6686 +delip(struct ip_set *set, const void *data, size_t size,
6687 + ip_set_ip_t *hash_ip)
6688 +{
6689 + struct ip_set_req_nethash *req =
6690 + (struct ip_set_req_nethash *) data;
6691 +
6692 + if (size != sizeof(struct ip_set_req_nethash)) {
6693 + ip_set_printk("data length wrong (want %zu, have %zu)",
6694 + sizeof(struct ip_set_req_nethash),
6695 + size);
6696 + return -EINVAL;
6697 + }
6698 + /* TODO: no garbage collection in map->cidr */
6699 + return __delip((struct ip_set_nethash *) set->data,
6700 + req->ip, req->cidr, hash_ip);
6701 +}
6702 +
6703 +static int
6704 +delip_kernel(struct ip_set *set,
6705 + const struct sk_buff *skb,
6706 + ip_set_ip_t *hash_ip,
6707 + const u_int32_t *flags,
6708 + unsigned char index)
6709 +{
6710 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6711 + int ret = -ERANGE;
6712 + ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
6713 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6714 + ? ip_hdr(skb)->saddr
6715 + : ip_hdr(skb)->daddr);
6716 +#else
6717 + ? skb->nh.iph->saddr
6718 + : skb->nh.iph->daddr);
6719 +#endif
6720 +
6721 + if (map->cidr[0])
6722 + ret = __delip(map, ip, map->cidr[0], hash_ip);
6723 +
6724 + return ret;
6725 +}
6726 +
6727 +static int create(struct ip_set *set, const void *data, size_t size)
6728 +{
6729 + struct ip_set_req_nethash_create *req =
6730 + (struct ip_set_req_nethash_create *) data;
6731 + struct ip_set_nethash *map;
6732 + uint16_t i;
6733 +
6734 + if (size != sizeof(struct ip_set_req_nethash_create)) {
6735 + ip_set_printk("data length wrong (want %zu, have %zu)",
6736 + sizeof(struct ip_set_req_nethash_create),
6737 + size);
6738 + return -EINVAL;
6739 + }
6740 +
6741 + if (req->hashsize < 1) {
6742 + ip_set_printk("hashsize too small");
6743 + return -ENOEXEC;
6744 + }
6745 + if (req->probes < 1) {
6746 + ip_set_printk("probes too small");
6747 + return -ENOEXEC;
6748 + }
6749 +
6750 + map = kmalloc(sizeof(struct ip_set_nethash)
6751 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
6752 + if (!map) {
6753 + DP("out of memory for %d bytes",
6754 + sizeof(struct ip_set_nethash)
6755 + + req->probes * sizeof(uint32_t));
6756 + return -ENOMEM;
6757 + }
6758 + for (i = 0; i < req->probes; i++)
6759 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
6760 + map->elements = 0;
6761 + map->hashsize = req->hashsize;
6762 + map->probes = req->probes;
6763 + map->resize = req->resize;
6764 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
6765 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
6766 + if (!map->members) {
6767 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
6768 + kfree(map);
6769 + return -ENOMEM;
6770 + }
6771 +
6772 + set->data = map;
6773 + return 0;
6774 +}
6775 +
6776 +static void destroy(struct ip_set *set)
6777 +{
6778 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6779 +
6780 + harray_free(map->members);
6781 + kfree(map);
6782 +
6783 + set->data = NULL;
6784 +}
6785 +
6786 +static void flush(struct ip_set *set)
6787 +{
6788 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6789 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
6790 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
6791 + map->elements = 0;
6792 +}
6793 +
6794 +static void list_header(const struct ip_set *set, void *data)
6795 +{
6796 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6797 + struct ip_set_req_nethash_create *header =
6798 + (struct ip_set_req_nethash_create *) data;
6799 +
6800 + header->hashsize = map->hashsize;
6801 + header->probes = map->probes;
6802 + header->resize = map->resize;
6803 +}
6804 +
6805 +static int list_members_size(const struct ip_set *set)
6806 +{
6807 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6808 +
6809 + return (map->hashsize * sizeof(ip_set_ip_t));
6810 +}
6811 +
6812 +static void list_members(const struct ip_set *set, void *data)
6813 +{
6814 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6815 + ip_set_ip_t i, *elem;
6816 +
6817 + for (i = 0; i < map->hashsize; i++) {
6818 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
6819 + ((ip_set_ip_t *)data)[i] = *elem;
6820 + }
6821 +}
6822 +
6823 +static struct ip_set_type ip_set_nethash = {
6824 + .typename = SETTYPE_NAME,
6825 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
6826 + .protocol_version = IP_SET_PROTOCOL_VERSION,
6827 + .create = &create,
6828 + .destroy = &destroy,
6829 + .flush = &flush,
6830 + .reqsize = sizeof(struct ip_set_req_nethash),
6831 + .addip = &addip,
6832 + .addip_kernel = &addip_kernel,
6833 + .retry = &retry,
6834 + .delip = &delip,
6835 + .delip_kernel = &delip_kernel,
6836 + .testip = &testip,
6837 + .testip_kernel = &testip_kernel,
6838 + .header_size = sizeof(struct ip_set_req_nethash_create),
6839 + .list_header = &list_header,
6840 + .list_members_size = &list_members_size,
6841 + .list_members = &list_members,
6842 + .me = THIS_MODULE,
6843 +};
6844 +
6845 +MODULE_LICENSE("GPL");
6846 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6847 +MODULE_DESCRIPTION("nethash type of IP sets");
6848 +module_param(limit, int, 0600);
6849 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
6850 +
6851 +static int __init ip_set_nethash_init(void)
6852 +{
6853 + return ip_set_register_set_type(&ip_set_nethash);
6854 +}
6855 +
6856 +static void __exit ip_set_nethash_fini(void)
6857 +{
6858 + /* FIXME: possible race with ip_set_create() */
6859 + ip_set_unregister_set_type(&ip_set_nethash);
6860 +}
6861 +
6862 +module_init(ip_set_nethash_init);
6863 +module_exit(ip_set_nethash_fini);
6864 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_portmap.c
6865 ===================================================================
6866 --- /dev/null
6867 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_portmap.c
6868 @@ -0,0 +1,346 @@
6869 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6870 + *
6871 + * This program is free software; you can redistribute it and/or modify
6872 + * it under the terms of the GNU General Public License version 2 as
6873 + * published by the Free Software Foundation.
6874 + */
6875 +
6876 +/* Kernel module implementing a port set type as a bitmap */
6877 +
6878 +#include <linux/module.h>
6879 +#include <linux/ip.h>
6880 +#include <linux/tcp.h>
6881 +#include <linux/udp.h>
6882 +#include <linux/skbuff.h>
6883 +#include <linux/version.h>
6884 +#include <linux/netfilter_ipv4/ip_tables.h>
6885 +#include <linux/netfilter_ipv4/ip_set.h>
6886 +#include <linux/errno.h>
6887 +#include <asm/uaccess.h>
6888 +#include <asm/bitops.h>
6889 +#include <linux/spinlock.h>
6890 +
6891 +#include <net/ip.h>
6892 +
6893 +#include <linux/netfilter_ipv4/ip_set_portmap.h>
6894 +
6895 +/* We must handle non-linear skbs */
6896 +static inline ip_set_ip_t
6897 +get_port(const struct sk_buff *skb, u_int32_t flags)
6898 +{
6899 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6900 + struct iphdr *iph = ip_hdr(skb);
6901 +#else
6902 + struct iphdr *iph = skb->nh.iph;
6903 +#endif
6904 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
6905 + switch (iph->protocol) {
6906 + case IPPROTO_TCP: {
6907 + struct tcphdr tcph;
6908 +
6909 + /* See comments at tcp_match in ip_tables.c */
6910 + if (offset)
6911 + return INVALID_PORT;
6912 +
6913 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6914 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
6915 +#else
6916 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
6917 +#endif
6918 + /* No choice either */
6919 + return INVALID_PORT;
6920 +
6921 + return ntohs(flags & IPSET_SRC ?
6922 + tcph.source : tcph.dest);
6923 + }
6924 + case IPPROTO_UDP: {
6925 + struct udphdr udph;
6926 +
6927 + if (offset)
6928 + return INVALID_PORT;
6929 +
6930 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6931 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
6932 +#else
6933 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
6934 +#endif
6935 + /* No choice either */
6936 + return INVALID_PORT;
6937 +
6938 + return ntohs(flags & IPSET_SRC ?
6939 + udph.source : udph.dest);
6940 + }
6941 + default:
6942 + return INVALID_PORT;
6943 + }
6944 +}
6945 +
6946 +static inline int
6947 +__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
6948 +{
6949 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6950 +
6951 + if (port < map->first_port || port > map->last_port)
6952 + return -ERANGE;
6953 +
6954 + *hash_port = port;
6955 + DP("set: %s, port:%u, %u", set->name, port, *hash_port);
6956 + return !!test_bit(port - map->first_port, map->members);
6957 +}
6958 +
6959 +static int
6960 +testport(struct ip_set *set, const void *data, size_t size,
6961 + ip_set_ip_t *hash_port)
6962 +{
6963 + struct ip_set_req_portmap *req =
6964 + (struct ip_set_req_portmap *) data;
6965 +
6966 + if (size != sizeof(struct ip_set_req_portmap)) {
6967 + ip_set_printk("data length wrong (want %zu, have %zu)",
6968 + sizeof(struct ip_set_req_portmap),
6969 + size);
6970 + return -EINVAL;
6971 + }
6972 + return __testport(set, req->port, hash_port);
6973 +}
6974 +
6975 +static int
6976 +testport_kernel(struct ip_set *set,
6977 + const struct sk_buff *skb,
6978 + ip_set_ip_t *hash_port,
6979 + const u_int32_t *flags,
6980 + unsigned char index)
6981 +{
6982 + int res;
6983 + ip_set_ip_t port = get_port(skb, flags[index]);
6984 +
6985 + DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
6986 + if (port == INVALID_PORT)
6987 + return 0;
6988 +
6989 + res = __testport(set, port, hash_port);
6990 +
6991 + return (res < 0 ? 0 : res);
6992 +}
6993 +
6994 +static inline int
6995 +__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
6996 +{
6997 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6998 +
6999 + if (port < map->first_port || port > map->last_port)
7000 + return -ERANGE;
7001 + if (test_and_set_bit(port - map->first_port, map->members))
7002 + return -EEXIST;
7003 +
7004 + *hash_port = port;
7005 + DP("port %u", port);
7006 + return 0;
7007 +}
7008 +
7009 +static int
7010 +addport(struct ip_set *set, const void *data, size_t size,
7011 + ip_set_ip_t *hash_port)
7012 +{
7013 + struct ip_set_req_portmap *req =
7014 + (struct ip_set_req_portmap *) data;
7015 +
7016 + if (size != sizeof(struct ip_set_req_portmap)) {
7017 + ip_set_printk("data length wrong (want %zu, have %zu)",
7018 + sizeof(struct ip_set_req_portmap),
7019 + size);
7020 + return -EINVAL;
7021 + }
7022 + return __addport(set, req->port, hash_port);
7023 +}
7024 +
7025 +static int
7026 +addport_kernel(struct ip_set *set,
7027 + const struct sk_buff *skb,
7028 + ip_set_ip_t *hash_port,
7029 + const u_int32_t *flags,
7030 + unsigned char index)
7031 +{
7032 + ip_set_ip_t port = get_port(skb, flags[index]);
7033 +
7034 + if (port == INVALID_PORT)
7035 + return -EINVAL;
7036 +
7037 + return __addport(set, port, hash_port);
7038 +}
7039 +
7040 +static inline int
7041 +__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
7042 +{
7043 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7044 +
7045 + if (port < map->first_port || port > map->last_port)
7046 + return -ERANGE;
7047 + if (!test_and_clear_bit(port - map->first_port, map->members))
7048 + return -EEXIST;
7049 +
7050 + *hash_port = port;
7051 + DP("port %u", port);
7052 + return 0;
7053 +}
7054 +
7055 +static int
7056 +delport(struct ip_set *set, const void *data, size_t size,
7057 + ip_set_ip_t *hash_port)
7058 +{
7059 + struct ip_set_req_portmap *req =
7060 + (struct ip_set_req_portmap *) data;
7061 +
7062 + if (size != sizeof(struct ip_set_req_portmap)) {
7063 + ip_set_printk("data length wrong (want %zu, have %zu)",
7064 + sizeof(struct ip_set_req_portmap),
7065 + size);
7066 + return -EINVAL;
7067 + }
7068 + return __delport(set, req->port, hash_port);
7069 +}
7070 +
7071 +static int
7072 +delport_kernel(struct ip_set *set,
7073 + const struct sk_buff *skb,
7074 + ip_set_ip_t *hash_port,
7075 + const u_int32_t *flags,
7076 + unsigned char index)
7077 +{
7078 + ip_set_ip_t port = get_port(skb, flags[index]);
7079 +
7080 + if (port == INVALID_PORT)
7081 + return -EINVAL;
7082 +
7083 + return __delport(set, port, hash_port);
7084 +}
7085 +
7086 +static int create(struct ip_set *set, const void *data, size_t size)
7087 +{
7088 + int newbytes;
7089 + struct ip_set_req_portmap_create *req =
7090 + (struct ip_set_req_portmap_create *) data;
7091 + struct ip_set_portmap *map;
7092 +
7093 + if (size != sizeof(struct ip_set_req_portmap_create)) {
7094 + ip_set_printk("data length wrong (want %zu, have %zu)",
7095 + sizeof(struct ip_set_req_portmap_create),
7096 + size);
7097 + return -EINVAL;
7098 + }
7099 +
7100 + DP("from %u to %u", req->from, req->to);
7101 +
7102 + if (req->from > req->to) {
7103 + DP("bad port range");
7104 + return -ENOEXEC;
7105 + }
7106 +
7107 + if (req->to - req->from > MAX_RANGE) {
7108 + ip_set_printk("range too big (max %d ports)",
7109 + MAX_RANGE+1);
7110 + return -ENOEXEC;
7111 + }
7112 +
7113 + map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
7114 + if (!map) {
7115 + DP("out of memory for %d bytes",
7116 + sizeof(struct ip_set_portmap));
7117 + return -ENOMEM;
7118 + }
7119 + map->first_port = req->from;
7120 + map->last_port = req->to;
7121 + newbytes = bitmap_bytes(req->from, req->to);
7122 + map->members = kmalloc(newbytes, GFP_KERNEL);
7123 + if (!map->members) {
7124 + DP("out of memory for %d bytes", newbytes);
7125 + kfree(map);
7126 + return -ENOMEM;
7127 + }
7128 + memset(map->members, 0, newbytes);
7129 +
7130 + set->data = map;
7131 + return 0;
7132 +}
7133 +
7134 +static void destroy(struct ip_set *set)
7135 +{
7136 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7137 +
7138 + kfree(map->members);
7139 + kfree(map);
7140 +
7141 + set->data = NULL;
7142 +}
7143 +
7144 +static void flush(struct ip_set *set)
7145 +{
7146 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7147 + memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
7148 +}
7149 +
7150 +static void list_header(const struct ip_set *set, void *data)
7151 +{
7152 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7153 + struct ip_set_req_portmap_create *header =
7154 + (struct ip_set_req_portmap_create *) data;
7155 +
7156 + DP("list_header %u %u", map->first_port, map->last_port);
7157 +
7158 + header->from = map->first_port;
7159 + header->to = map->last_port;
7160 +}
7161 +
7162 +static int list_members_size(const struct ip_set *set)
7163 +{
7164 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7165 +
7166 + return bitmap_bytes(map->first_port, map->last_port);
7167 +}
7168 +
7169 +static void list_members(const struct ip_set *set, void *data)
7170 +{
7171 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7172 + int bytes = bitmap_bytes(map->first_port, map->last_port);
7173 +
7174 + memcpy(data, map->members, bytes);
7175 +}
7176 +
7177 +static struct ip_set_type ip_set_portmap = {
7178 + .typename = SETTYPE_NAME,
7179 + .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
7180 + .protocol_version = IP_SET_PROTOCOL_VERSION,
7181 + .create = &create,
7182 + .destroy = &destroy,
7183 + .flush = &flush,
7184 + .reqsize = sizeof(struct ip_set_req_portmap),
7185 + .addip = &addport,
7186 + .addip_kernel = &addport_kernel,
7187 + .delip = &delport,
7188 + .delip_kernel = &delport_kernel,
7189 + .testip = &testport,
7190 + .testip_kernel = &testport_kernel,
7191 + .header_size = sizeof(struct ip_set_req_portmap_create),
7192 + .list_header = &list_header,
7193 + .list_members_size = &list_members_size,
7194 + .list_members = &list_members,
7195 + .me = THIS_MODULE,
7196 +};
7197 +
7198 +MODULE_LICENSE("GPL");
7199 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7200 +MODULE_DESCRIPTION("portmap type of IP sets");
7201 +
7202 +static int __init ip_set_portmap_init(void)
7203 +{
7204 + return ip_set_register_set_type(&ip_set_portmap);
7205 +}
7206 +
7207 +static void __exit ip_set_portmap_fini(void)
7208 +{
7209 + /* FIXME: possible race with ip_set_create() */
7210 + ip_set_unregister_set_type(&ip_set_portmap);
7211 +}
7212 +
7213 +module_init(ip_set_portmap_init);
7214 +module_exit(ip_set_portmap_fini);
7215 Index: linux-2.6.21.7/net/ipv4/netfilter/ipt_set.c
7216 ===================================================================
7217 --- /dev/null
7218 +++ linux-2.6.21.7/net/ipv4/netfilter/ipt_set.c
7219 @@ -0,0 +1,160 @@
7220 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7221 + * Patrick Schaaf <bof@bof.de>
7222 + * Martin Josefsson <gandalf@wlug.westbo.se>
7223 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7224 + *
7225 + * This program is free software; you can redistribute it and/or modify
7226 + * it under the terms of the GNU General Public License version 2 as
7227 + * published by the Free Software Foundation.
7228 + */
7229 +
7230 +/* Kernel module to match an IP set. */
7231 +
7232 +#include <linux/module.h>
7233 +#include <linux/ip.h>
7234 +#include <linux/skbuff.h>
7235 +#include <linux/version.h>
7236 +
7237 +#include <linux/netfilter_ipv4/ip_tables.h>
7238 +#include <linux/netfilter_ipv4/ip_set.h>
7239 +#include <linux/netfilter_ipv4/ipt_set.h>
7240 +
7241 +static inline int
7242 +match_set(const struct ipt_set_info *info,
7243 + const struct sk_buff *skb,
7244 + int inv)
7245 +{
7246 + if (ip_set_testip_kernel(info->index, skb, info->flags))
7247 + inv = !inv;
7248 + return inv;
7249 +}
7250 +
7251 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7252 +static bool
7253 +#else
7254 +static int
7255 +#endif
7256 +match(const struct sk_buff *skb,
7257 + const struct net_device *in,
7258 + const struct net_device *out,
7259 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7260 + const struct xt_match *match,
7261 +#endif
7262 + const void *matchinfo,
7263 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7264 + int offset, unsigned int protoff, bool *hotdrop)
7265 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7266 + int offset, unsigned int protoff, int *hotdrop)
7267 +#else
7268 + int offset, int *hotdrop)
7269 +#endif
7270 +{
7271 + const struct ipt_set_info_match *info = matchinfo;
7272 +
7273 + return match_set(&info->match_set,
7274 + skb,
7275 + info->match_set.flags[0] & IPSET_MATCH_INV);
7276 +}
7277 +
7278 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7279 +bool
7280 +#else
7281 +static int
7282 +#endif
7283 +checkentry(const char *tablename,
7284 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7285 + const void *inf,
7286 +#else
7287 + const struct ipt_ip *ip,
7288 +#endif
7289 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7290 + const struct xt_match *match,
7291 +#endif
7292 + void *matchinfo,
7293 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7294 + unsigned int matchsize,
7295 +#endif
7296 + unsigned int hook_mask)
7297 +{
7298 + struct ipt_set_info_match *info =
7299 + (struct ipt_set_info_match *) matchinfo;
7300 + ip_set_id_t index;
7301 +
7302 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7303 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7304 + ip_set_printk("invalid matchsize %d", matchsize);
7305 + return 0;
7306 + }
7307 +#endif
7308 +
7309 + index = ip_set_get_byindex(info->match_set.index);
7310 +
7311 + if (index == IP_SET_INVALID_ID) {
7312 + ip_set_printk("Cannot find set indentified by id %u to match",
7313 + info->match_set.index);
7314 + return 0; /* error */
7315 + }
7316 + if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7317 + ip_set_printk("That's nasty!");
7318 + return 0; /* error */
7319 + }
7320 +
7321 + return 1;
7322 +}
7323 +
7324 +static void destroy(
7325 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7326 + const struct xt_match *match,
7327 +#endif
7328 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7329 + void *matchinfo, unsigned int matchsize)
7330 +#else
7331 + void *matchinfo)
7332 +#endif
7333 +{
7334 + struct ipt_set_info_match *info = matchinfo;
7335 +
7336 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7337 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7338 + ip_set_printk("invalid matchsize %d", matchsize);
7339 + return;
7340 + }
7341 +#endif
7342 + ip_set_put(info->match_set.index);
7343 +}
7344 +
7345 +static struct ipt_match set_match = {
7346 + .name = "set",
7347 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7348 + .family = AF_INET,
7349 +#endif
7350 + .match = &match,
7351 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7352 + .matchsize = sizeof(struct ipt_set_info_match),
7353 +#endif
7354 + .checkentry = &checkentry,
7355 + .destroy = &destroy,
7356 + .me = THIS_MODULE
7357 +};
7358 +
7359 +MODULE_LICENSE("GPL");
7360 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7361 +MODULE_DESCRIPTION("iptables IP set match module");
7362 +
7363 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7364 +#define ipt_register_match xt_register_match
7365 +#define ipt_unregister_match xt_unregister_match
7366 +#endif
7367 +
7368 +static int __init ipt_ipset_init(void)
7369 +{
7370 + return ipt_register_match(&set_match);
7371 +}
7372 +
7373 +static void __exit ipt_ipset_fini(void)
7374 +{
7375 + ipt_unregister_match(&set_match);
7376 +}
7377 +
7378 +module_init(ipt_ipset_init);
7379 +module_exit(ipt_ipset_fini);
7380 Index: linux-2.6.21.7/net/ipv4/netfilter/ipt_SET.c
7381 ===================================================================
7382 --- /dev/null
7383 +++ linux-2.6.21.7/net/ipv4/netfilter/ipt_SET.c
7384 @@ -0,0 +1,172 @@
7385 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7386 + * Patrick Schaaf <bof@bof.de>
7387 + * Martin Josefsson <gandalf@wlug.westbo.se>
7388 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7389 + *
7390 + * This program is free software; you can redistribute it and/or modify
7391 + * it under the terms of the GNU General Public License version 2 as
7392 + * published by the Free Software Foundation.
7393 + */
7394 +
7395 +/* ipt_SET.c - netfilter target to manipulate IP sets */
7396 +
7397 +#include <linux/types.h>
7398 +#include <linux/ip.h>
7399 +#include <linux/timer.h>
7400 +#include <linux/module.h>
7401 +#include <linux/netfilter.h>
7402 +#include <linux/netdevice.h>
7403 +#include <linux/if.h>
7404 +#include <linux/inetdevice.h>
7405 +#include <linux/version.h>
7406 +#include <net/protocol.h>
7407 +#include <net/checksum.h>
7408 +#include <linux/netfilter_ipv4.h>
7409 +#include <linux/netfilter_ipv4/ip_tables.h>
7410 +#include <linux/netfilter_ipv4/ipt_set.h>
7411 +
7412 +static unsigned int
7413 +target(struct sk_buff **pskb,
7414 + const struct net_device *in,
7415 + const struct net_device *out,
7416 + unsigned int hooknum,
7417 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7418 + const struct xt_target *target,
7419 +#endif
7420 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7421 + const void *targinfo,
7422 + void *userinfo)
7423 +#else
7424 + const void *targinfo)
7425 +#endif
7426 +{
7427 + const struct ipt_set_info_target *info = targinfo;
7428 +
7429 + if (info->add_set.index != IP_SET_INVALID_ID)
7430 + ip_set_addip_kernel(info->add_set.index,
7431 + *pskb,
7432 + info->add_set.flags);
7433 + if (info->del_set.index != IP_SET_INVALID_ID)
7434 + ip_set_delip_kernel(info->del_set.index,
7435 + *pskb,
7436 + info->del_set.flags);
7437 +
7438 + return IPT_CONTINUE;
7439 +}
7440 +
7441 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7442 +static bool
7443 +#else
7444 +static int
7445 +#endif
7446 +checkentry(const char *tablename,
7447 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7448 + const void *e,
7449 +#else
7450 + const struct ipt_entry *e,
7451 +#endif
7452 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7453 + const struct xt_target *target,
7454 +#endif
7455 + void *targinfo,
7456 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7457 + unsigned int targinfosize,
7458 +#endif
7459 + unsigned int hook_mask)
7460 +{
7461 + struct ipt_set_info_target *info =
7462 + (struct ipt_set_info_target *) targinfo;
7463 + ip_set_id_t index;
7464 +
7465 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7466 + if (targinfosize != IPT_ALIGN(sizeof(*info))) {
7467 + DP("bad target info size %u", targinfosize);
7468 + return 0;
7469 + }
7470 +#endif
7471 +
7472 + if (info->add_set.index != IP_SET_INVALID_ID) {
7473 + index = ip_set_get_byindex(info->add_set.index);
7474 + if (index == IP_SET_INVALID_ID) {
7475 + ip_set_printk("cannot find add_set index %u as target",
7476 + info->add_set.index);
7477 + return 0; /* error */
7478 + }
7479 + }
7480 +
7481 + if (info->del_set.index != IP_SET_INVALID_ID) {
7482 + index = ip_set_get_byindex(info->del_set.index);
7483 + if (index == IP_SET_INVALID_ID) {
7484 + ip_set_printk("cannot find del_set index %u as target",
7485 + info->del_set.index);
7486 + return 0; /* error */
7487 + }
7488 + }
7489 + if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
7490 + || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7491 + ip_set_printk("That's nasty!");
7492 + return 0; /* error */
7493 + }
7494 +
7495 + return 1;
7496 +}
7497 +
7498 +static void destroy(
7499 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7500 + const struct xt_target *target,
7501 +#endif
7502 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7503 + void *targetinfo, unsigned int targetsize)
7504 +#else
7505 + void *targetinfo)
7506 +#endif
7507 +{
7508 + struct ipt_set_info_target *info = targetinfo;
7509 +
7510 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7511 + if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
7512 + ip_set_printk("invalid targetsize %d", targetsize);
7513 + return;
7514 + }
7515 +#endif
7516 + if (info->add_set.index != IP_SET_INVALID_ID)
7517 + ip_set_put(info->add_set.index);
7518 + if (info->del_set.index != IP_SET_INVALID_ID)
7519 + ip_set_put(info->del_set.index);
7520 +}
7521 +
7522 +static struct ipt_target SET_target = {
7523 + .name = "SET",
7524 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7525 + .family = AF_INET,
7526 +#endif
7527 + .target = target,
7528 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7529 + .targetsize = sizeof(struct ipt_set_info_target),
7530 +#endif
7531 + .checkentry = checkentry,
7532 + .destroy = destroy,
7533 + .me = THIS_MODULE
7534 +};
7535 +
7536 +MODULE_LICENSE("GPL");
7537 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7538 +MODULE_DESCRIPTION("iptables IP set target module");
7539 +
7540 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7541 +#define ipt_register_target xt_register_target
7542 +#define ipt_unregister_target xt_unregister_target
7543 +#endif
7544 +
7545 +static int __init ipt_SET_init(void)
7546 +{
7547 + return ipt_register_target(&SET_target);
7548 +}
7549 +
7550 +static void __exit ipt_SET_fini(void)
7551 +{
7552 + ipt_unregister_target(&SET_target);
7553 +}
7554 +
7555 +module_init(ipt_SET_init);
7556 +module_exit(ipt_SET_fini);
7557 Index: linux-2.6.21.7/net/ipv4/netfilter/Kconfig
7558 ===================================================================
7559 --- linux-2.6.21.7.orig/net/ipv4/netfilter/Kconfig
7560 +++ linux-2.6.21.7/net/ipv4/netfilter/Kconfig
7561 @@ -663,5 +663,122 @@ config IP_NF_ARP_MANGLE
7562 Allows altering the ARP packet payload: source and destination
7563 hardware and network addresses.
7564
7565 +config IP_NF_SET
7566 + tristate "IP set support"
7567 + depends on INET && NETFILTER
7568 + help
7569 + This option adds IP set support to the kernel.
7570 + In order to define and use sets, you need the userspace utility
7571 + ipset(8).
7572 +
7573 + To compile it as a module, choose M here. If unsure, say N.
7574 +
7575 +config IP_NF_SET_MAX
7576 + int "Maximum number of IP sets"
7577 + default 256
7578 + range 2 65534
7579 + depends on IP_NF_SET
7580 + help
7581 + You can define here default value of the maximum number
7582 + of IP sets for the kernel.
7583 +
7584 + The value can be overriden by the 'max_sets' module
7585 + parameter of the 'ip_set' module.
7586 +
7587 +config IP_NF_SET_HASHSIZE
7588 + int "Hash size for bindings of IP sets"
7589 + default 1024
7590 + depends on IP_NF_SET
7591 + help
7592 + You can define here default value of the hash size for
7593 + bindings of IP sets.
7594 +
7595 + The value can be overriden by the 'hash_size' module
7596 + parameter of the 'ip_set' module.
7597 +
7598 +config IP_NF_SET_IPMAP
7599 + tristate "ipmap set support"
7600 + depends on IP_NF_SET
7601 + help
7602 + This option adds the ipmap set type support.
7603 +
7604 + To compile it as a module, choose M here. If unsure, say N.
7605 +
7606 +config IP_NF_SET_MACIPMAP
7607 + tristate "macipmap set support"
7608 + depends on IP_NF_SET
7609 + help
7610 + This option adds the macipmap set type support.
7611 +
7612 + To compile it as a module, choose M here. If unsure, say N.
7613 +
7614 +config IP_NF_SET_PORTMAP
7615 + tristate "portmap set support"
7616 + depends on IP_NF_SET
7617 + help
7618 + This option adds the portmap set type support.
7619 +
7620 + To compile it as a module, choose M here. If unsure, say N.
7621 +
7622 +config IP_NF_SET_IPHASH
7623 + tristate "iphash set support"
7624 + depends on IP_NF_SET
7625 + help
7626 + This option adds the iphash set type support.
7627 +
7628 + To compile it as a module, choose M here. If unsure, say N.
7629 +
7630 +config IP_NF_SET_NETHASH
7631 + tristate "nethash set support"
7632 + depends on IP_NF_SET
7633 + help
7634 + This option adds the nethash set type support.
7635 +
7636 + To compile it as a module, choose M here. If unsure, say N.
7637 +
7638 +config IP_NF_SET_IPPORTHASH
7639 + tristate "ipporthash set support"
7640 + depends on IP_NF_SET
7641 + help
7642 + This option adds the ipporthash set type support.
7643 +
7644 + To compile it as a module, choose M here. If unsure, say N.
7645 +
7646 +config IP_NF_SET_IPTREE
7647 + tristate "iptree set support"
7648 + depends on IP_NF_SET
7649 + help
7650 + This option adds the iptree set type support.
7651 +
7652 + To compile it as a module, choose M here. If unsure, say N.
7653 +
7654 +config IP_NF_SET_IPTREEMAP
7655 + tristate "iptreemap set support"
7656 + depends on IP_NF_SET
7657 + help
7658 + This option adds the iptreemap set type support.
7659 +
7660 + To compile it as a module, choose M here. If unsure, say N.
7661 +
7662 +config IP_NF_MATCH_SET
7663 + tristate "set match support"
7664 + depends on IP_NF_SET
7665 + help
7666 + Set matching matches against given IP sets.
7667 + You need the ipset utility to create and set up the sets.
7668 +
7669 + To compile it as a module, choose M here. If unsure, say N.
7670 +
7671 +config IP_NF_TARGET_SET
7672 + tristate "SET target support"
7673 + depends on IP_NF_SET
7674 + help
7675 + The SET target makes possible to add/delete entries
7676 + in IP sets.
7677 + You need the ipset utility to create and set up the sets.
7678 +
7679 + To compile it as a module, choose M here. If unsure, say N.
7680 +
7681 +
7682 endmenu
7683
7684 Index: linux-2.6.21.7/net/ipv4/netfilter/Makefile
7685 ===================================================================
7686 --- linux-2.6.21.7.orig/net/ipv4/netfilter/Makefile
7687 +++ linux-2.6.21.7/net/ipv4/netfilter/Makefile
7688 @@ -90,6 +90,7 @@ obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_
7689 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
7690 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
7691 obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
7692 +obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
7693 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
7694 obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
7695
7696 @@ -106,6 +107,18 @@ obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LO
7697 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
7698 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
7699 obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
7700 +obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
7701 +
7702 +# sets
7703 +obj-$(CONFIG_IP_NF_SET) += ip_set.o
7704 +obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
7705 +obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
7706 +obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
7707 +obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
7708 +obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
7709 +obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
7710 +obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
7711 +obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
7712
7713 # generic ARP tables
7714 obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o