fix ipset patch to have headers installed when using 'make -C $(KDIR) headers_install'
[openwrt/staging/chunkeey.git] / target / linux / generic-2.6 / patches-2.6.21 / 130-netfilter_ipset.patch
1 --- a/include/linux/netfilter_ipv4/Kbuild
2 +++ b/include/linux/netfilter_ipv4/Kbuild
3 @@ -59,3 +59,14 @@
4 unifdef-y += ip_nat_rule.h
5 unifdef-y += ip_queue.h
6 unifdef-y += ip_tables.h
7 +
8 +unifdef-y += ip_set.h
9 +header-y += ip_set_iphash.h
10 +header-y += ip_set_ipmap.h
11 +header-y += ip_set_ipporthash.h
12 +unifdef-y += ip_set_iptree.h
13 +unifdef-y += ip_set_iptreemap.h
14 +header-y += ip_set_jhash.h
15 +header-y += ip_set_macipmap.h
16 +unifdef-y += ip_set_nethash.h
17 +header-y += ip_set_portmap.h
18 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set.h
19 ===================================================================
20 --- /dev/null
21 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set.h
22 @@ -0,0 +1,498 @@
23 +#ifndef _IP_SET_H
24 +#define _IP_SET_H
25 +
26 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
27 + * Patrick Schaaf <bof@bof.de>
28 + * Martin Josefsson <gandalf@wlug.westbo.se>
29 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
30 + *
31 + * This program is free software; you can redistribute it and/or modify
32 + * it under the terms of the GNU General Public License version 2 as
33 + * published by the Free Software Foundation.
34 + */
35 +
36 +#if 0
37 +#define IP_SET_DEBUG
38 +#endif
39 +
40 +/*
41 + * A sockopt of such quality has hardly ever been seen before on the open
42 + * market! This little beauty, hardly ever used: above 64, so it's
43 + * traditionally used for firewalling, not touched (even once!) by the
44 + * 2.0, 2.2 and 2.4 kernels!
45 + *
46 + * Comes with its own certificate of authenticity, valid anywhere in the
47 + * Free world!
48 + *
49 + * Rusty, 19.4.2000
50 + */
51 +#define SO_IP_SET 83
52 +
53 +/*
54 + * Heavily modify by Joakim Axelsson 08.03.2002
55 + * - Made it more modulebased
56 + *
57 + * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
58 + * - bindings added
59 + * - in order to "deal with" backward compatibility, renamed to ipset
60 + */
61 +
62 +/*
63 + * Used so that the kernel module and ipset-binary can match their versions
64 + */
65 +#define IP_SET_PROTOCOL_VERSION 2
66 +
67 +#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
68 +
69 +/* Lets work with our own typedef for representing an IP address.
70 + * We hope to make the code more portable, possibly to IPv6...
71 + *
72 + * The representation works in HOST byte order, because most set types
73 + * will perform arithmetic operations and compare operations.
74 + *
75 + * For now the type is an uint32_t.
76 + *
77 + * Make sure to ONLY use the functions when translating and parsing
78 + * in order to keep the host byte order and make it more portable:
79 + * parse_ip()
80 + * parse_mask()
81 + * parse_ipandmask()
82 + * ip_tostring()
83 + * (Joakim: where are they???)
84 + */
85 +
86 +typedef uint32_t ip_set_ip_t;
87 +
88 +/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
89 + * and IP_SET_INVALID_ID if you want to increase the max number of sets.
90 + */
91 +typedef uint16_t ip_set_id_t;
92 +
93 +#define IP_SET_INVALID_ID 65535
94 +
95 +/* How deep we follow bindings */
96 +#define IP_SET_MAX_BINDINGS 6
97 +
98 +/*
99 + * Option flags for kernel operations (ipt_set_info)
100 + */
101 +#define IPSET_SRC 0x01 /* Source match/add */
102 +#define IPSET_DST 0x02 /* Destination match/add */
103 +#define IPSET_MATCH_INV 0x04 /* Inverse matching */
104 +
105 +/*
106 + * Set features
107 + */
108 +#define IPSET_TYPE_IP 0x01 /* IP address type of set */
109 +#define IPSET_TYPE_PORT 0x02 /* Port type of set */
110 +#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
111 +#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
112 +
113 +/* Reserved keywords */
114 +#define IPSET_TOKEN_DEFAULT ":default:"
115 +#define IPSET_TOKEN_ALL ":all:"
116 +
117 +/* SO_IP_SET operation constants, and their request struct types.
118 + *
119 + * Operation ids:
120 + * 0-99: commands with version checking
121 + * 100-199: add/del/test/bind/unbind
122 + * 200-299: list, save, restore
123 + */
124 +
125 +/* Single shot operations:
126 + * version, create, destroy, flush, rename and swap
127 + *
128 + * Sets are identified by name.
129 + */
130 +
131 +#define IP_SET_REQ_STD \
132 + unsigned op; \
133 + unsigned version; \
134 + char name[IP_SET_MAXNAMELEN]
135 +
136 +#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
137 +struct ip_set_req_create {
138 + IP_SET_REQ_STD;
139 + char typename[IP_SET_MAXNAMELEN];
140 +};
141 +
142 +#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
143 +struct ip_set_req_std {
144 + IP_SET_REQ_STD;
145 +};
146 +
147 +#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
148 +/* Uses ip_set_req_std */
149 +
150 +#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
151 +/* Uses ip_set_req_create */
152 +
153 +#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
154 +/* Uses ip_set_req_create */
155 +
156 +union ip_set_name_index {
157 + char name[IP_SET_MAXNAMELEN];
158 + ip_set_id_t index;
159 +};
160 +
161 +#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
162 +struct ip_set_req_get_set {
163 + unsigned op;
164 + unsigned version;
165 + union ip_set_name_index set;
166 +};
167 +
168 +#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
169 +/* Uses ip_set_req_get_set */
170 +
171 +#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
172 +struct ip_set_req_version {
173 + unsigned op;
174 + unsigned version;
175 +};
176 +
177 +/* Double shots operations:
178 + * add, del, test, bind and unbind.
179 + *
180 + * First we query the kernel to get the index and type of the target set,
181 + * then issue the command. Validity of IP is checked in kernel in order
182 + * to minimalize sockopt operations.
183 + */
184 +
185 +/* Get minimal set data for add/del/test/bind/unbind IP */
186 +#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
187 +struct ip_set_req_adt_get {
188 + unsigned op;
189 + unsigned version;
190 + union ip_set_name_index set;
191 + char typename[IP_SET_MAXNAMELEN];
192 +};
193 +
194 +#define IP_SET_REQ_BYINDEX \
195 + unsigned op; \
196 + ip_set_id_t index;
197 +
198 +struct ip_set_req_adt {
199 + IP_SET_REQ_BYINDEX;
200 +};
201 +
202 +#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
203 +/* Uses ip_set_req_adt, with type specific addage */
204 +
205 +#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
206 +/* Uses ip_set_req_adt, with type specific addage */
207 +
208 +#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
209 +/* Uses ip_set_req_adt, with type specific addage */
210 +
211 +#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
212 +/* Uses ip_set_req_bind, with type specific addage */
213 +struct ip_set_req_bind {
214 + IP_SET_REQ_BYINDEX;
215 + char binding[IP_SET_MAXNAMELEN];
216 +};
217 +
218 +#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
219 +/* Uses ip_set_req_bind, with type speficic addage
220 + * index = 0 means unbinding for all sets */
221 +
222 +#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
223 +/* Uses ip_set_req_bind, with type specific addage */
224 +
225 +/* Multiple shots operations: list, save, restore.
226 + *
227 + * - check kernel version and query the max number of sets
228 + * - get the basic information on all sets
229 + * and size required for the next step
230 + * - get actual set data: header, data, bindings
231 + */
232 +
233 +/* Get max_sets and the index of a queried set
234 + */
235 +#define IP_SET_OP_MAX_SETS 0x00000020
236 +struct ip_set_req_max_sets {
237 + unsigned op;
238 + unsigned version;
239 + ip_set_id_t max_sets; /* max_sets */
240 + ip_set_id_t sets; /* real number of sets */
241 + union ip_set_name_index set; /* index of set if name used */
242 +};
243 +
244 +/* Get the id and name of the sets plus size for next step */
245 +#define IP_SET_OP_LIST_SIZE 0x00000201
246 +#define IP_SET_OP_SAVE_SIZE 0x00000202
247 +struct ip_set_req_setnames {
248 + unsigned op;
249 + ip_set_id_t index; /* set to list/save */
250 + size_t size; /* size to get setdata/bindings */
251 + /* followed by sets number of struct ip_set_name_list */
252 +};
253 +
254 +struct ip_set_name_list {
255 + char name[IP_SET_MAXNAMELEN];
256 + char typename[IP_SET_MAXNAMELEN];
257 + ip_set_id_t index;
258 + ip_set_id_t id;
259 +};
260 +
261 +/* The actual list operation */
262 +#define IP_SET_OP_LIST 0x00000203
263 +struct ip_set_req_list {
264 + IP_SET_REQ_BYINDEX;
265 + /* sets number of struct ip_set_list in reply */
266 +};
267 +
268 +struct ip_set_list {
269 + ip_set_id_t index;
270 + ip_set_id_t binding;
271 + u_int32_t ref;
272 + size_t header_size; /* Set header data of header_size */
273 + size_t members_size; /* Set members data of members_size */
274 + size_t bindings_size; /* Set bindings data of bindings_size */
275 +};
276 +
277 +struct ip_set_hash_list {
278 + ip_set_ip_t ip;
279 + ip_set_id_t binding;
280 +};
281 +
282 +/* The save operation */
283 +#define IP_SET_OP_SAVE 0x00000204
284 +/* Uses ip_set_req_list, in the reply replaced by
285 + * sets number of struct ip_set_save plus a marker
286 + * ip_set_save followed by ip_set_hash_save structures.
287 + */
288 +struct ip_set_save {
289 + ip_set_id_t index;
290 + ip_set_id_t binding;
291 + size_t header_size; /* Set header data of header_size */
292 + size_t members_size; /* Set members data of members_size */
293 +};
294 +
295 +/* At restoring, ip == 0 means default binding for the given set: */
296 +struct ip_set_hash_save {
297 + ip_set_ip_t ip;
298 + ip_set_id_t id;
299 + ip_set_id_t binding;
300 +};
301 +
302 +/* The restore operation */
303 +#define IP_SET_OP_RESTORE 0x00000205
304 +/* Uses ip_set_req_setnames followed by ip_set_restore structures
305 + * plus a marker ip_set_restore, followed by ip_set_hash_save
306 + * structures.
307 + */
308 +struct ip_set_restore {
309 + char name[IP_SET_MAXNAMELEN];
310 + char typename[IP_SET_MAXNAMELEN];
311 + ip_set_id_t index;
312 + size_t header_size; /* Create data of header_size */
313 + size_t members_size; /* Set members data of members_size */
314 +};
315 +
316 +static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
317 +{
318 + return 4 * ((((b - a + 8) / 8) + 3) / 4);
319 +}
320 +
321 +#ifdef __KERNEL__
322 +
323 +#define ip_set_printk(format, args...) \
324 + do { \
325 + printk("%s: %s: ", __FILE__, __FUNCTION__); \
326 + printk(format "\n" , ## args); \
327 + } while (0)
328 +
329 +#if defined(IP_SET_DEBUG)
330 +#define DP(format, args...) \
331 + do { \
332 + printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
333 + printk(format "\n" , ## args); \
334 + } while (0)
335 +#define IP_SET_ASSERT(x) \
336 + do { \
337 + if (!(x)) \
338 + printk("IP_SET_ASSERT: %s:%i(%s)\n", \
339 + __FILE__, __LINE__, __FUNCTION__); \
340 + } while (0)
341 +#else
342 +#define DP(format, args...)
343 +#define IP_SET_ASSERT(x)
344 +#endif
345 +
346 +struct ip_set;
347 +
348 +/*
349 + * The ip_set_type definition - one per set type, e.g. "ipmap".
350 + *
351 + * Each individual set has a pointer, set->type, going to one
352 + * of these structures. Function pointers inside the structure implement
353 + * the real behaviour of the sets.
354 + *
355 + * If not mentioned differently, the implementation behind the function
356 + * pointers of a set_type, is expected to return 0 if ok, and a negative
357 + * errno (e.g. -EINVAL) on error.
358 + */
359 +struct ip_set_type {
360 + struct list_head list; /* next in list of set types */
361 +
362 + /* test for IP in set (kernel: iptables -m set src|dst)
363 + * return 0 if not in set, 1 if in set.
364 + */
365 + int (*testip_kernel) (struct ip_set *set,
366 + const struct sk_buff * skb,
367 + ip_set_ip_t *ip,
368 + const u_int32_t *flags,
369 + unsigned char index);
370 +
371 + /* test for IP in set (userspace: ipset -T set IP)
372 + * return 0 if not in set, 1 if in set.
373 + */
374 + int (*testip) (struct ip_set *set,
375 + const void *data, size_t size,
376 + ip_set_ip_t *ip);
377 +
378 + /*
379 + * Size of the data structure passed by when
380 + * adding/deletin/testing an entry.
381 + */
382 + size_t reqsize;
383 +
384 + /* Add IP into set (userspace: ipset -A set IP)
385 + * Return -EEXIST if the address is already in the set,
386 + * and -ERANGE if the address lies outside the set bounds.
387 + * If the address was not already in the set, 0 is returned.
388 + */
389 + int (*addip) (struct ip_set *set,
390 + const void *data, size_t size,
391 + ip_set_ip_t *ip);
392 +
393 + /* Add IP into set (kernel: iptables ... -j SET set src|dst)
394 + * Return -EEXIST if the address is already in the set,
395 + * and -ERANGE if the address lies outside the set bounds.
396 + * If the address was not already in the set, 0 is returned.
397 + */
398 + int (*addip_kernel) (struct ip_set *set,
399 + const struct sk_buff * skb,
400 + ip_set_ip_t *ip,
401 + const u_int32_t *flags,
402 + unsigned char index);
403 +
404 + /* remove IP from set (userspace: ipset -D set --entry x)
405 + * Return -EEXIST if the address is NOT in the set,
406 + * and -ERANGE if the address lies outside the set bounds.
407 + * If the address really was in the set, 0 is returned.
408 + */
409 + int (*delip) (struct ip_set *set,
410 + const void *data, size_t size,
411 + ip_set_ip_t *ip);
412 +
413 + /* remove IP from set (kernel: iptables ... -j SET --entry x)
414 + * Return -EEXIST if the address is NOT in the set,
415 + * and -ERANGE if the address lies outside the set bounds.
416 + * If the address really was in the set, 0 is returned.
417 + */
418 + int (*delip_kernel) (struct ip_set *set,
419 + const struct sk_buff * skb,
420 + ip_set_ip_t *ip,
421 + const u_int32_t *flags,
422 + unsigned char index);
423 +
424 + /* new set creation - allocated type specific items
425 + */
426 + int (*create) (struct ip_set *set,
427 + const void *data, size_t size);
428 +
429 + /* retry the operation after successfully tweaking the set
430 + */
431 + int (*retry) (struct ip_set *set);
432 +
433 + /* set destruction - free type specific items
434 + * There is no return value.
435 + * Can be called only when child sets are destroyed.
436 + */
437 + void (*destroy) (struct ip_set *set);
438 +
439 + /* set flushing - reset all bits in the set, or something similar.
440 + * There is no return value.
441 + */
442 + void (*flush) (struct ip_set *set);
443 +
444 + /* Listing: size needed for header
445 + */
446 + size_t header_size;
447 +
448 + /* Listing: Get the header
449 + *
450 + * Fill in the information in "data".
451 + * This function is always run after list_header_size() under a
452 + * writelock on the set. Therefor is the length of "data" always
453 + * correct.
454 + */
455 + void (*list_header) (const struct ip_set *set,
456 + void *data);
457 +
458 + /* Listing: Get the size for the set members
459 + */
460 + int (*list_members_size) (const struct ip_set *set);
461 +
462 + /* Listing: Get the set members
463 + *
464 + * Fill in the information in "data".
465 + * This function is always run after list_member_size() under a
466 + * writelock on the set. Therefor is the length of "data" always
467 + * correct.
468 + */
469 + void (*list_members) (const struct ip_set *set,
470 + void *data);
471 +
472 + char typename[IP_SET_MAXNAMELEN];
473 + unsigned char features;
474 + int protocol_version;
475 +
476 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
477 + struct module *me;
478 +};
479 +
480 +extern int ip_set_register_set_type(struct ip_set_type *set_type);
481 +extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
482 +
483 +/* A generic ipset */
484 +struct ip_set {
485 + char name[IP_SET_MAXNAMELEN]; /* the name of the set */
486 + rwlock_t lock; /* lock for concurrency control */
487 + ip_set_id_t id; /* set id for swapping */
488 + ip_set_id_t binding; /* default binding for the set */
489 + atomic_t ref; /* in kernel and in hash references */
490 + struct ip_set_type *type; /* the set types */
491 + void *data; /* pooltype specific data */
492 +};
493 +
494 +/* Structure to bind set elements to sets */
495 +struct ip_set_hash {
496 + struct list_head list; /* list of clashing entries in hash */
497 + ip_set_ip_t ip; /* ip from set */
498 + ip_set_id_t id; /* set id */
499 + ip_set_id_t binding; /* set we bind the element to */
500 +};
501 +
502 +/* register and unregister set references */
503 +extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
504 +extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
505 +extern void ip_set_put(ip_set_id_t id);
506 +
507 +/* API for iptables set match, and SET target */
508 +extern void ip_set_addip_kernel(ip_set_id_t id,
509 + const struct sk_buff *skb,
510 + const u_int32_t *flags);
511 +extern void ip_set_delip_kernel(ip_set_id_t id,
512 + const struct sk_buff *skb,
513 + const u_int32_t *flags);
514 +extern int ip_set_testip_kernel(ip_set_id_t id,
515 + const struct sk_buff *skb,
516 + const u_int32_t *flags);
517 +
518 +#endif /* __KERNEL__ */
519 +
520 +#endif /*_IP_SET_H*/
521 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iphash.h
522 ===================================================================
523 --- /dev/null
524 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iphash.h
525 @@ -0,0 +1,30 @@
526 +#ifndef __IP_SET_IPHASH_H
527 +#define __IP_SET_IPHASH_H
528 +
529 +#include <linux/netfilter_ipv4/ip_set.h>
530 +
531 +#define SETTYPE_NAME "iphash"
532 +#define MAX_RANGE 0x0000FFFF
533 +
534 +struct ip_set_iphash {
535 + ip_set_ip_t *members; /* the iphash proper */
536 + uint32_t elements; /* number of elements */
537 + uint32_t hashsize; /* hash size */
538 + uint16_t probes; /* max number of probes */
539 + uint16_t resize; /* resize factor in percent */
540 + ip_set_ip_t netmask; /* netmask */
541 + void *initval[0]; /* initvals for jhash_1word */
542 +};
543 +
544 +struct ip_set_req_iphash_create {
545 + uint32_t hashsize;
546 + uint16_t probes;
547 + uint16_t resize;
548 + ip_set_ip_t netmask;
549 +};
550 +
551 +struct ip_set_req_iphash {
552 + ip_set_ip_t ip;
553 +};
554 +
555 +#endif /* __IP_SET_IPHASH_H */
556 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_ipmap.h
557 ===================================================================
558 --- /dev/null
559 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_ipmap.h
560 @@ -0,0 +1,56 @@
561 +#ifndef __IP_SET_IPMAP_H
562 +#define __IP_SET_IPMAP_H
563 +
564 +#include <linux/netfilter_ipv4/ip_set.h>
565 +
566 +#define SETTYPE_NAME "ipmap"
567 +#define MAX_RANGE 0x0000FFFF
568 +
569 +struct ip_set_ipmap {
570 + void *members; /* the ipmap proper */
571 + ip_set_ip_t first_ip; /* host byte order, included in range */
572 + ip_set_ip_t last_ip; /* host byte order, included in range */
573 + ip_set_ip_t netmask; /* subnet netmask */
574 + ip_set_ip_t sizeid; /* size of set in IPs */
575 + ip_set_ip_t hosts; /* number of hosts in a subnet */
576 +};
577 +
578 +struct ip_set_req_ipmap_create {
579 + ip_set_ip_t from;
580 + ip_set_ip_t to;
581 + ip_set_ip_t netmask;
582 +};
583 +
584 +struct ip_set_req_ipmap {
585 + ip_set_ip_t ip;
586 +};
587 +
588 +unsigned int
589 +mask_to_bits(ip_set_ip_t mask)
590 +{
591 + unsigned int bits = 32;
592 + ip_set_ip_t maskaddr;
593 +
594 + if (mask == 0xFFFFFFFF)
595 + return bits;
596 +
597 + maskaddr = 0xFFFFFFFE;
598 + while (--bits >= 0 && maskaddr != mask)
599 + maskaddr <<= 1;
600 +
601 + return bits;
602 +}
603 +
604 +ip_set_ip_t
605 +range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
606 +{
607 + ip_set_ip_t mask = 0xFFFFFFFE;
608 +
609 + *bits = 32;
610 + while (--(*bits) >= 0 && mask && (to & mask) != from)
611 + mask <<= 1;
612 +
613 + return mask;
614 +}
615 +
616 +#endif /* __IP_SET_IPMAP_H */
617 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_ipporthash.h
618 ===================================================================
619 --- /dev/null
620 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_ipporthash.h
621 @@ -0,0 +1,34 @@
622 +#ifndef __IP_SET_IPPORTHASH_H
623 +#define __IP_SET_IPPORTHASH_H
624 +
625 +#include <linux/netfilter_ipv4/ip_set.h>
626 +
627 +#define SETTYPE_NAME "ipporthash"
628 +#define MAX_RANGE 0x0000FFFF
629 +#define INVALID_PORT (MAX_RANGE + 1)
630 +
631 +struct ip_set_ipporthash {
632 + ip_set_ip_t *members; /* the ipporthash proper */
633 + uint32_t elements; /* number of elements */
634 + uint32_t hashsize; /* hash size */
635 + uint16_t probes; /* max number of probes */
636 + uint16_t resize; /* resize factor in percent */
637 + ip_set_ip_t first_ip; /* host byte order, included in range */
638 + ip_set_ip_t last_ip; /* host byte order, included in range */
639 + void *initval[0]; /* initvals for jhash_1word */
640 +};
641 +
642 +struct ip_set_req_ipporthash_create {
643 + uint32_t hashsize;
644 + uint16_t probes;
645 + uint16_t resize;
646 + ip_set_ip_t from;
647 + ip_set_ip_t to;
648 +};
649 +
650 +struct ip_set_req_ipporthash {
651 + ip_set_ip_t ip;
652 + ip_set_ip_t port;
653 +};
654 +
655 +#endif /* __IP_SET_IPPORTHASH_H */
656 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iptree.h
657 ===================================================================
658 --- /dev/null
659 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iptree.h
660 @@ -0,0 +1,40 @@
661 +#ifndef __IP_SET_IPTREE_H
662 +#define __IP_SET_IPTREE_H
663 +
664 +#include <linux/netfilter_ipv4/ip_set.h>
665 +
666 +#define SETTYPE_NAME "iptree"
667 +#define MAX_RANGE 0x0000FFFF
668 +
669 +struct ip_set_iptreed {
670 + unsigned long expires[256]; /* x.x.x.ADDR */
671 +};
672 +
673 +struct ip_set_iptreec {
674 + struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
675 +};
676 +
677 +struct ip_set_iptreeb {
678 + struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
679 +};
680 +
681 +struct ip_set_iptree {
682 + unsigned int timeout;
683 + unsigned int gc_interval;
684 +#ifdef __KERNEL__
685 + uint32_t elements; /* number of elements */
686 + struct timer_list gc;
687 + struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
688 +#endif
689 +};
690 +
691 +struct ip_set_req_iptree_create {
692 + unsigned int timeout;
693 +};
694 +
695 +struct ip_set_req_iptree {
696 + ip_set_ip_t ip;
697 + unsigned int timeout;
698 +};
699 +
700 +#endif /* __IP_SET_IPTREE_H */
701 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iptreemap.h
702 ===================================================================
703 --- /dev/null
704 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_iptreemap.h
705 @@ -0,0 +1,40 @@
706 +#ifndef __IP_SET_IPTREEMAP_H
707 +#define __IP_SET_IPTREEMAP_H
708 +
709 +#include <linux/netfilter_ipv4/ip_set.h>
710 +
711 +#define SETTYPE_NAME "iptreemap"
712 +
713 +#ifdef __KERNEL__
714 +struct ip_set_iptreemap_d {
715 + unsigned char bitmap[32]; /* x.x.x.y */
716 +};
717 +
718 +struct ip_set_iptreemap_c {
719 + struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
720 +};
721 +
722 +struct ip_set_iptreemap_b {
723 + struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
724 + unsigned char dirty[32];
725 +};
726 +#endif
727 +
728 +struct ip_set_iptreemap {
729 + unsigned int gc_interval;
730 +#ifdef __KERNEL__
731 + struct timer_list gc;
732 + struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
733 +#endif
734 +};
735 +
736 +struct ip_set_req_iptreemap_create {
737 + unsigned int gc_interval;
738 +};
739 +
740 +struct ip_set_req_iptreemap {
741 + ip_set_ip_t start;
742 + ip_set_ip_t end;
743 +};
744 +
745 +#endif /* __IP_SET_IPTREEMAP_H */
746 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_jhash.h
747 ===================================================================
748 --- /dev/null
749 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_jhash.h
750 @@ -0,0 +1,148 @@
751 +#ifndef _LINUX_IPSET_JHASH_H
752 +#define _LINUX_IPSET_JHASH_H
753 +
754 +/* This is a copy of linux/jhash.h but the types u32/u8 are changed
755 + * to __u32/__u8 so that the header file can be included into
756 + * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
757 + */
758 +
759 +/* jhash.h: Jenkins hash support.
760 + *
761 + * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
762 + *
763 + * http://burtleburtle.net/bob/hash/
764 + *
765 + * These are the credits from Bob's sources:
766 + *
767 + * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
768 + * hash(), hash2(), hash3, and mix() are externally useful functions.
769 + * Routines to test the hash are included if SELF_TEST is defined.
770 + * You can use this free for any purpose. It has no warranty.
771 + *
772 + * Copyright (C) 2003 David S. Miller (davem@redhat.com)
773 + *
774 + * I've modified Bob's hash to be useful in the Linux kernel, and
775 + * any bugs present are surely my fault. -DaveM
776 + */
777 +
778 +/* NOTE: Arguments are modified. */
779 +#define __jhash_mix(a, b, c) \
780 +{ \
781 + a -= b; a -= c; a ^= (c>>13); \
782 + b -= c; b -= a; b ^= (a<<8); \
783 + c -= a; c -= b; c ^= (b>>13); \
784 + a -= b; a -= c; a ^= (c>>12); \
785 + b -= c; b -= a; b ^= (a<<16); \
786 + c -= a; c -= b; c ^= (b>>5); \
787 + a -= b; a -= c; a ^= (c>>3); \
788 + b -= c; b -= a; b ^= (a<<10); \
789 + c -= a; c -= b; c ^= (b>>15); \
790 +}
791 +
792 +/* The golden ration: an arbitrary value */
793 +#define JHASH_GOLDEN_RATIO 0x9e3779b9
794 +
795 +/* The most generic version, hashes an arbitrary sequence
796 + * of bytes. No alignment or length assumptions are made about
797 + * the input key.
798 + */
799 +static inline __u32 jhash(void *key, __u32 length, __u32 initval)
800 +{
801 + __u32 a, b, c, len;
802 + __u8 *k = key;
803 +
804 + len = length;
805 + a = b = JHASH_GOLDEN_RATIO;
806 + c = initval;
807 +
808 + while (len >= 12) {
809 + a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
810 + b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
811 + c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
812 +
813 + __jhash_mix(a,b,c);
814 +
815 + k += 12;
816 + len -= 12;
817 + }
818 +
819 + c += length;
820 + switch (len) {
821 + case 11: c += ((__u32)k[10]<<24);
822 + case 10: c += ((__u32)k[9]<<16);
823 + case 9 : c += ((__u32)k[8]<<8);
824 + case 8 : b += ((__u32)k[7]<<24);
825 + case 7 : b += ((__u32)k[6]<<16);
826 + case 6 : b += ((__u32)k[5]<<8);
827 + case 5 : b += k[4];
828 + case 4 : a += ((__u32)k[3]<<24);
829 + case 3 : a += ((__u32)k[2]<<16);
830 + case 2 : a += ((__u32)k[1]<<8);
831 + case 1 : a += k[0];
832 + };
833 +
834 + __jhash_mix(a,b,c);
835 +
836 + return c;
837 +}
838 +
839 +/* A special optimized version that handles 1 or more of __u32s.
840 + * The length parameter here is the number of __u32s in the key.
841 + */
842 +static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
843 +{
844 + __u32 a, b, c, len;
845 +
846 + a = b = JHASH_GOLDEN_RATIO;
847 + c = initval;
848 + len = length;
849 +
850 + while (len >= 3) {
851 + a += k[0];
852 + b += k[1];
853 + c += k[2];
854 + __jhash_mix(a, b, c);
855 + k += 3; len -= 3;
856 + }
857 +
858 + c += length * 4;
859 +
860 + switch (len) {
861 + case 2 : b += k[1];
862 + case 1 : a += k[0];
863 + };
864 +
865 + __jhash_mix(a,b,c);
866 +
867 + return c;
868 +}
869 +
870 +
871 +/* A special ultra-optimized versions that knows they are hashing exactly
872 + * 3, 2 or 1 word(s).
873 + *
874 + * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
875 + * done at the end is not done here.
876 + */
877 +static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
878 +{
879 + a += JHASH_GOLDEN_RATIO;
880 + b += JHASH_GOLDEN_RATIO;
881 + c += initval;
882 +
883 + __jhash_mix(a, b, c);
884 +
885 + return c;
886 +}
887 +
888 +static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
889 +{
890 + return jhash_3words(a, b, 0, initval);
891 +}
892 +
893 +static inline __u32 jhash_1word(__u32 a, __u32 initval)
894 +{
895 + return jhash_3words(a, 0, 0, initval);
896 +}
897 +
898 +#endif /* _LINUX_IPSET_JHASH_H */
899 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_macipmap.h
900 ===================================================================
901 --- /dev/null
902 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_macipmap.h
903 @@ -0,0 +1,38 @@
904 +#ifndef __IP_SET_MACIPMAP_H
905 +#define __IP_SET_MACIPMAP_H
906 +
907 +#include <linux/netfilter_ipv4/ip_set.h>
908 +
909 +#define SETTYPE_NAME "macipmap"
910 +#define MAX_RANGE 0x0000FFFF
911 +
912 +/* general flags */
913 +#define IPSET_MACIP_MATCHUNSET 1
914 +
915 +/* per ip flags */
916 +#define IPSET_MACIP_ISSET 1
917 +
918 +struct ip_set_macipmap {
919 + void *members; /* the macipmap proper */
920 + ip_set_ip_t first_ip; /* host byte order, included in range */
921 + ip_set_ip_t last_ip; /* host byte order, included in range */
922 + u_int32_t flags;
923 +};
924 +
925 +struct ip_set_req_macipmap_create {
926 + ip_set_ip_t from;
927 + ip_set_ip_t to;
928 + u_int32_t flags;
929 +};
930 +
931 +struct ip_set_req_macipmap {
932 + ip_set_ip_t ip;
933 + unsigned char ethernet[ETH_ALEN];
934 +};
935 +
936 +struct ip_set_macip {
937 + unsigned short flags;
938 + unsigned char ethernet[ETH_ALEN];
939 +};
940 +
941 +#endif /* __IP_SET_MACIPMAP_H */
942 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_malloc.h
943 ===================================================================
944 --- /dev/null
945 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_malloc.h
946 @@ -0,0 +1,116 @@
947 +#ifndef _IP_SET_MALLOC_H
948 +#define _IP_SET_MALLOC_H
949 +
950 +#ifdef __KERNEL__
951 +
952 +/* Memory allocation and deallocation */
953 +static size_t max_malloc_size = 0;
954 +
955 +static inline void init_max_malloc_size(void)
956 +{
957 +#define CACHE(x) max_malloc_size = x;
958 +#include <linux/kmalloc_sizes.h>
959 +#undef CACHE
960 +}
961 +
962 +static inline void * ip_set_malloc(size_t bytes)
963 +{
964 + if (bytes > max_malloc_size)
965 + return vmalloc(bytes);
966 + else
967 + return kmalloc(bytes, GFP_KERNEL);
968 +}
969 +
970 +static inline void ip_set_free(void * data, size_t bytes)
971 +{
972 + if (bytes > max_malloc_size)
973 + vfree(data);
974 + else
975 + kfree(data);
976 +}
977 +
978 +struct harray {
979 + size_t max_elements;
980 + void *arrays[0];
981 +};
982 +
983 +static inline void *
984 +harray_malloc(size_t hashsize, size_t typesize, int flags)
985 +{
986 + struct harray *harray;
987 + size_t max_elements, size, i, j;
988 +
989 + if (!max_malloc_size)
990 + init_max_malloc_size();
991 +
992 + if (typesize > max_malloc_size)
993 + return NULL;
994 +
995 + max_elements = max_malloc_size/typesize;
996 + size = hashsize/max_elements;
997 + if (hashsize % max_elements)
998 + size++;
999 +
1000 + /* Last pointer signals end of arrays */
1001 + harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
1002 + flags);
1003 +
1004 + if (!harray)
1005 + return NULL;
1006 +
1007 + for (i = 0; i < size - 1; i++) {
1008 + harray->arrays[i] = kmalloc(max_elements * typesize, flags);
1009 + if (!harray->arrays[i])
1010 + goto undo;
1011 + memset(harray->arrays[i], 0, max_elements * typesize);
1012 + }
1013 + harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
1014 + flags);
1015 + if (!harray->arrays[i])
1016 + goto undo;
1017 + memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
1018 +
1019 + harray->max_elements = max_elements;
1020 + harray->arrays[size] = NULL;
1021 +
1022 + return (void *)harray;
1023 +
1024 + undo:
1025 + for (j = 0; j < i; j++) {
1026 + kfree(harray->arrays[j]);
1027 + }
1028 + kfree(harray);
1029 + return NULL;
1030 +}
1031 +
1032 +static inline void harray_free(void *h)
1033 +{
1034 + struct harray *harray = (struct harray *) h;
1035 + size_t i;
1036 +
1037 + for (i = 0; harray->arrays[i] != NULL; i++)
1038 + kfree(harray->arrays[i]);
1039 + kfree(harray);
1040 +}
1041 +
1042 +static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
1043 +{
1044 + struct harray *harray = (struct harray *) h;
1045 + size_t i;
1046 +
1047 + for (i = 0; harray->arrays[i+1] != NULL; i++)
1048 + memset(harray->arrays[i], 0, harray->max_elements * typesize);
1049 + memset(harray->arrays[i], 0,
1050 + (hashsize - i * harray->max_elements) * typesize);
1051 +}
1052 +
1053 +#define HARRAY_ELEM(h, type, which) \
1054 +({ \
1055 + struct harray *__h = (struct harray *)(h); \
1056 + ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
1057 + + (which)%(__h)->max_elements); \
1058 +})
1059 +
1060 +#endif /* __KERNEL__ */
1061 +
1062 +#endif /*_IP_SET_MALLOC_H*/
1063 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_nethash.h
1064 ===================================================================
1065 --- /dev/null
1066 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_nethash.h
1067 @@ -0,0 +1,55 @@
1068 +#ifndef __IP_SET_NETHASH_H
1069 +#define __IP_SET_NETHASH_H
1070 +
1071 +#include <linux/netfilter_ipv4/ip_set.h>
1072 +
1073 +#define SETTYPE_NAME "nethash"
1074 +#define MAX_RANGE 0x0000FFFF
1075 +
1076 +struct ip_set_nethash {
1077 + ip_set_ip_t *members; /* the nethash proper */
1078 + uint32_t elements; /* number of elements */
1079 + uint32_t hashsize; /* hash size */
1080 + uint16_t probes; /* max number of probes */
1081 + uint16_t resize; /* resize factor in percent */
1082 + unsigned char cidr[30]; /* CIDR sizes */
1083 + void *initval[0]; /* initvals for jhash_1word */
1084 +};
1085 +
1086 +struct ip_set_req_nethash_create {
1087 + uint32_t hashsize;
1088 + uint16_t probes;
1089 + uint16_t resize;
1090 +};
1091 +
1092 +struct ip_set_req_nethash {
1093 + ip_set_ip_t ip;
1094 + unsigned char cidr;
1095 +};
1096 +
1097 +static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
1098 +
1099 +static inline ip_set_ip_t
1100 +pack(ip_set_ip_t ip, unsigned char cidr)
1101 +{
1102 + ip_set_ip_t addr, *paddr = &addr;
1103 + unsigned char n, t, *a;
1104 +
1105 + addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
1106 +#ifdef __KERNEL__
1107 + DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
1108 +#endif
1109 + n = cidr / 8;
1110 + t = cidr % 8;
1111 + a = &((unsigned char *)paddr)[n];
1112 + *a = *a /(1 << (8 - t)) + shifts[t];
1113 +#ifdef __KERNEL__
1114 + DP("n: %u, t: %u, a: %u", n, t, *a);
1115 + DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
1116 + HIPQUAD(ip), cidr, NIPQUAD(addr));
1117 +#endif
1118 +
1119 + return ntohl(addr);
1120 +}
1121 +
1122 +#endif /* __IP_SET_NETHASH_H */
1123 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_portmap.h
1124 ===================================================================
1125 --- /dev/null
1126 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ip_set_portmap.h
1127 @@ -0,0 +1,25 @@
1128 +#ifndef __IP_SET_PORTMAP_H
1129 +#define __IP_SET_PORTMAP_H
1130 +
1131 +#include <linux/netfilter_ipv4/ip_set.h>
1132 +
1133 +#define SETTYPE_NAME "portmap"
1134 +#define MAX_RANGE 0x0000FFFF
1135 +#define INVALID_PORT (MAX_RANGE + 1)
1136 +
1137 +struct ip_set_portmap {
1138 + void *members; /* the portmap proper */
1139 + ip_set_ip_t first_port; /* host byte order, included in range */
1140 + ip_set_ip_t last_port; /* host byte order, included in range */
1141 +};
1142 +
1143 +struct ip_set_req_portmap_create {
1144 + ip_set_ip_t from;
1145 + ip_set_ip_t to;
1146 +};
1147 +
1148 +struct ip_set_req_portmap {
1149 + ip_set_ip_t port;
1150 +};
1151 +
1152 +#endif /* __IP_SET_PORTMAP_H */
1153 Index: linux-2.6.21.7/include/linux/netfilter_ipv4/ipt_set.h
1154 ===================================================================
1155 --- /dev/null
1156 +++ linux-2.6.21.7/include/linux/netfilter_ipv4/ipt_set.h
1157 @@ -0,0 +1,21 @@
1158 +#ifndef _IPT_SET_H
1159 +#define _IPT_SET_H
1160 +
1161 +#include <linux/netfilter_ipv4/ip_set.h>
1162 +
1163 +struct ipt_set_info {
1164 + ip_set_id_t index;
1165 + u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
1166 +};
1167 +
1168 +/* match info */
1169 +struct ipt_set_info_match {
1170 + struct ipt_set_info match_set;
1171 +};
1172 +
1173 +struct ipt_set_info_target {
1174 + struct ipt_set_info add_set;
1175 + struct ipt_set_info del_set;
1176 +};
1177 +
1178 +#endif /*_IPT_SET_H*/
1179 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set.c
1180 ===================================================================
1181 --- /dev/null
1182 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set.c
1183 @@ -0,0 +1,2003 @@
1184 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
1185 + * Patrick Schaaf <bof@bof.de>
1186 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
1187 + *
1188 + * This program is free software; you can redistribute it and/or modify
1189 + * it under the terms of the GNU General Public License version 2 as
1190 + * published by the Free Software Foundation.
1191 + */
1192 +
1193 +/* Kernel module for IP set management */
1194 +
1195 +#include <linux/version.h>
1196 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1197 +#include <linux/config.h>
1198 +#endif
1199 +#include <linux/module.h>
1200 +#include <linux/moduleparam.h>
1201 +#include <linux/kmod.h>
1202 +#include <linux/ip.h>
1203 +#include <linux/skbuff.h>
1204 +#include <linux/random.h>
1205 +#include <linux/jhash.h>
1206 +#include <linux/netfilter_ipv4/ip_tables.h>
1207 +#include <linux/errno.h>
1208 +#include <asm/uaccess.h>
1209 +#include <asm/bitops.h>
1210 +#include <asm/semaphore.h>
1211 +#include <linux/spinlock.h>
1212 +#include <linux/vmalloc.h>
1213 +
1214 +#define ASSERT_READ_LOCK(x)
1215 +#define ASSERT_WRITE_LOCK(x)
1216 +#include <linux/netfilter_ipv4/ip_set.h>
1217 +
1218 +static struct list_head set_type_list; /* all registered sets */
1219 +static struct ip_set **ip_set_list; /* all individual sets */
1220 +static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
1221 +static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
1222 +static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
1223 +static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
1224 +static struct list_head *ip_set_hash; /* hash of bindings */
1225 +static unsigned int ip_set_hash_random; /* random seed */
1226 +
1227 +/*
1228 + * Sets are identified either by the index in ip_set_list or by id.
1229 + * The id never changes and is used to find a key in the hash.
1230 + * The index may change by swapping and used at all other places
1231 + * (set/SET netfilter modules, binding value, etc.)
1232 + *
1233 + * Userspace requests are serialized by ip_set_mutex and sets can
1234 + * be deleted only from userspace. Therefore ip_set_list locking
1235 + * must obey the following rules:
1236 + *
1237 + * - kernel requests: read and write locking mandatory
1238 + * - user requests: read locking optional, write locking mandatory
1239 + */
1240 +
1241 +static inline void
1242 +__ip_set_get(ip_set_id_t index)
1243 +{
1244 + atomic_inc(&ip_set_list[index]->ref);
1245 +}
1246 +
1247 +static inline void
1248 +__ip_set_put(ip_set_id_t index)
1249 +{
1250 + atomic_dec(&ip_set_list[index]->ref);
1251 +}
1252 +
1253 +/*
1254 + * Binding routines
1255 + */
1256 +
1257 +static inline struct ip_set_hash *
1258 +__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
1259 +{
1260 + struct ip_set_hash *set_hash;
1261 +
1262 + list_for_each_entry(set_hash, &ip_set_hash[key], list)
1263 + if (set_hash->id == id && set_hash->ip == ip)
1264 + return set_hash;
1265 +
1266 + return NULL;
1267 +}
1268 +
1269 +static ip_set_id_t
1270 +ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
1271 +{
1272 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1273 + % ip_set_bindings_hash_size;
1274 + struct ip_set_hash *set_hash;
1275 +
1276 + ASSERT_READ_LOCK(&ip_set_lock);
1277 + IP_SET_ASSERT(ip_set_list[id]);
1278 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1279 +
1280 + set_hash = __ip_set_find(key, id, ip);
1281 +
1282 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1283 + HIPQUAD(ip),
1284 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1285 +
1286 + return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
1287 +}
1288 +
1289 +static inline void
1290 +__set_hash_del(struct ip_set_hash *set_hash)
1291 +{
1292 + ASSERT_WRITE_LOCK(&ip_set_lock);
1293 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1294 +
1295 + __ip_set_put(set_hash->binding);
1296 + list_del(&set_hash->list);
1297 + kfree(set_hash);
1298 +}
1299 +
1300 +static int
1301 +ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
1302 +{
1303 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1304 + % ip_set_bindings_hash_size;
1305 + struct ip_set_hash *set_hash;
1306 +
1307 + IP_SET_ASSERT(ip_set_list[id]);
1308 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1309 + write_lock_bh(&ip_set_lock);
1310 + set_hash = __ip_set_find(key, id, ip);
1311 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1312 + HIPQUAD(ip),
1313 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1314 +
1315 + if (set_hash != NULL)
1316 + __set_hash_del(set_hash);
1317 + write_unlock_bh(&ip_set_lock);
1318 + return 0;
1319 +}
1320 +
1321 +static int
1322 +ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
1323 +{
1324 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1325 + % ip_set_bindings_hash_size;
1326 + struct ip_set_hash *set_hash;
1327 + int ret = 0;
1328 +
1329 + IP_SET_ASSERT(ip_set_list[id]);
1330 + IP_SET_ASSERT(ip_set_list[binding]);
1331 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1332 + HIPQUAD(ip), ip_set_list[binding]->name);
1333 + write_lock_bh(&ip_set_lock);
1334 + set_hash = __ip_set_find(key, id, ip);
1335 + if (!set_hash) {
1336 + set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
1337 + if (!set_hash) {
1338 + ret = -ENOMEM;
1339 + goto unlock;
1340 + }
1341 + INIT_LIST_HEAD(&set_hash->list);
1342 + set_hash->id = id;
1343 + set_hash->ip = ip;
1344 + list_add(&set_hash->list, &ip_set_hash[key]);
1345 + } else {
1346 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1347 + DP("overwrite binding: %s",
1348 + ip_set_list[set_hash->binding]->name);
1349 + __ip_set_put(set_hash->binding);
1350 + }
1351 + set_hash->binding = binding;
1352 + __ip_set_get(set_hash->binding);
1353 + DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
1354 + key, id, ip_set_list[id]->name,
1355 + HIPQUAD(ip), binding, ip_set_list[binding]->name);
1356 + unlock:
1357 + write_unlock_bh(&ip_set_lock);
1358 + return ret;
1359 +}
1360 +
1361 +#define FOREACH_HASH_DO(fn, args...) \
1362 +({ \
1363 + ip_set_id_t __key; \
1364 + struct ip_set_hash *__set_hash; \
1365 + \
1366 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1367 + list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
1368 + fn(__set_hash , ## args); \
1369 + } \
1370 +})
1371 +
1372 +#define FOREACH_HASH_RW_DO(fn, args...) \
1373 +({ \
1374 + ip_set_id_t __key; \
1375 + struct ip_set_hash *__set_hash, *__n; \
1376 + \
1377 + ASSERT_WRITE_LOCK(&ip_set_lock); \
1378 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1379 + list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
1380 + fn(__set_hash , ## args); \
1381 + } \
1382 +})
1383 +
1384 +/* Add, del and test set entries from kernel */
1385 +
1386 +#define follow_bindings(index, set, ip) \
1387 +((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
1388 + || (index = (set)->binding) != IP_SET_INVALID_ID)
1389 +
1390 +int
1391 +ip_set_testip_kernel(ip_set_id_t index,
1392 + const struct sk_buff *skb,
1393 + const u_int32_t *flags)
1394 +{
1395 + struct ip_set *set;
1396 + ip_set_ip_t ip;
1397 + int res;
1398 + unsigned char i = 0;
1399 +
1400 + IP_SET_ASSERT(flags[i]);
1401 + read_lock_bh(&ip_set_lock);
1402 + do {
1403 + set = ip_set_list[index];
1404 + IP_SET_ASSERT(set);
1405 + DP("set %s, index %u", set->name, index);
1406 + read_lock_bh(&set->lock);
1407 + res = set->type->testip_kernel(set, skb, &ip, flags, i++);
1408 + read_unlock_bh(&set->lock);
1409 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1410 + } while (res > 0
1411 + && flags[i]
1412 + && follow_bindings(index, set, ip));
1413 + read_unlock_bh(&ip_set_lock);
1414 +
1415 + return res;
1416 +}
1417 +
1418 +void
1419 +ip_set_addip_kernel(ip_set_id_t index,
1420 + const struct sk_buff *skb,
1421 + const u_int32_t *flags)
1422 +{
1423 + struct ip_set *set;
1424 + ip_set_ip_t ip;
1425 + int res;
1426 + unsigned char i = 0;
1427 +
1428 + IP_SET_ASSERT(flags[i]);
1429 + retry:
1430 + read_lock_bh(&ip_set_lock);
1431 + do {
1432 + set = ip_set_list[index];
1433 + IP_SET_ASSERT(set);
1434 + DP("set %s, index %u", set->name, index);
1435 + write_lock_bh(&set->lock);
1436 + res = set->type->addip_kernel(set, skb, &ip, flags, i++);
1437 + write_unlock_bh(&set->lock);
1438 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1439 + } while ((res == 0 || res == -EEXIST)
1440 + && flags[i]
1441 + && follow_bindings(index, set, ip));
1442 + read_unlock_bh(&ip_set_lock);
1443 +
1444 + if (res == -EAGAIN
1445 + && set->type->retry
1446 + && (res = set->type->retry(set)) == 0)
1447 + goto retry;
1448 +}
1449 +
1450 +void
1451 +ip_set_delip_kernel(ip_set_id_t index,
1452 + const struct sk_buff *skb,
1453 + const u_int32_t *flags)
1454 +{
1455 + struct ip_set *set;
1456 + ip_set_ip_t ip;
1457 + int res;
1458 + unsigned char i = 0;
1459 +
1460 + IP_SET_ASSERT(flags[i]);
1461 + read_lock_bh(&ip_set_lock);
1462 + do {
1463 + set = ip_set_list[index];
1464 + IP_SET_ASSERT(set);
1465 + DP("set %s, index %u", set->name, index);
1466 + write_lock_bh(&set->lock);
1467 + res = set->type->delip_kernel(set, skb, &ip, flags, i++);
1468 + write_unlock_bh(&set->lock);
1469 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1470 + } while ((res == 0 || res == -EEXIST)
1471 + && flags[i]
1472 + && follow_bindings(index, set, ip));
1473 + read_unlock_bh(&ip_set_lock);
1474 +}
1475 +
1476 +/* Register and deregister settype */
1477 +
1478 +static inline struct ip_set_type *
1479 +find_set_type(const char *name)
1480 +{
1481 + struct ip_set_type *set_type;
1482 +
1483 + list_for_each_entry(set_type, &set_type_list, list)
1484 + if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
1485 + return set_type;
1486 + return NULL;
1487 +}
1488 +
1489 +int
1490 +ip_set_register_set_type(struct ip_set_type *set_type)
1491 +{
1492 + int ret = 0;
1493 +
1494 + if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
1495 + ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
1496 + set_type->typename,
1497 + set_type->protocol_version,
1498 + IP_SET_PROTOCOL_VERSION);
1499 + return -EINVAL;
1500 + }
1501 +
1502 + write_lock_bh(&ip_set_lock);
1503 + if (find_set_type(set_type->typename)) {
1504 + /* Duplicate! */
1505 + ip_set_printk("'%s' already registered!",
1506 + set_type->typename);
1507 + ret = -EINVAL;
1508 + goto unlock;
1509 + }
1510 + if (!try_module_get(THIS_MODULE)) {
1511 + ret = -EFAULT;
1512 + goto unlock;
1513 + }
1514 + list_add(&set_type->list, &set_type_list);
1515 + DP("'%s' registered.", set_type->typename);
1516 + unlock:
1517 + write_unlock_bh(&ip_set_lock);
1518 + return ret;
1519 +}
1520 +
1521 +void
1522 +ip_set_unregister_set_type(struct ip_set_type *set_type)
1523 +{
1524 + write_lock_bh(&ip_set_lock);
1525 + if (!find_set_type(set_type->typename)) {
1526 + ip_set_printk("'%s' not registered?",
1527 + set_type->typename);
1528 + goto unlock;
1529 + }
1530 + list_del(&set_type->list);
1531 + module_put(THIS_MODULE);
1532 + DP("'%s' unregistered.", set_type->typename);
1533 + unlock:
1534 + write_unlock_bh(&ip_set_lock);
1535 +
1536 +}
1537 +
1538 +/*
1539 + * Userspace routines
1540 + */
1541 +
1542 +/*
1543 + * Find set by name, reference it once. The reference makes sure the
1544 + * thing pointed to, does not go away under our feet. Drop the reference
1545 + * later, using ip_set_put().
1546 + */
1547 +ip_set_id_t
1548 +ip_set_get_byname(const char *name)
1549 +{
1550 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1551 +
1552 + down(&ip_set_app_mutex);
1553 + for (i = 0; i < ip_set_max; i++) {
1554 + if (ip_set_list[i] != NULL
1555 + && strcmp(ip_set_list[i]->name, name) == 0) {
1556 + __ip_set_get(i);
1557 + index = i;
1558 + break;
1559 + }
1560 + }
1561 + up(&ip_set_app_mutex);
1562 + return index;
1563 +}
1564 +
1565 +/*
1566 + * Find set by index, reference it once. The reference makes sure the
1567 + * thing pointed to, does not go away under our feet. Drop the reference
1568 + * later, using ip_set_put().
1569 + */
1570 +ip_set_id_t
1571 +ip_set_get_byindex(ip_set_id_t index)
1572 +{
1573 + down(&ip_set_app_mutex);
1574 +
1575 + if (index >= ip_set_max)
1576 + return IP_SET_INVALID_ID;
1577 +
1578 + if (ip_set_list[index])
1579 + __ip_set_get(index);
1580 + else
1581 + index = IP_SET_INVALID_ID;
1582 +
1583 + up(&ip_set_app_mutex);
1584 + return index;
1585 +}
1586 +
1587 +/*
1588 + * If the given set pointer points to a valid set, decrement
1589 + * reference count by 1. The caller shall not assume the index
1590 + * to be valid, after calling this function.
1591 + */
1592 +void ip_set_put(ip_set_id_t index)
1593 +{
1594 + down(&ip_set_app_mutex);
1595 + if (ip_set_list[index])
1596 + __ip_set_put(index);
1597 + up(&ip_set_app_mutex);
1598 +}
1599 +
1600 +/* Find a set by name or index */
1601 +static ip_set_id_t
1602 +ip_set_find_byname(const char *name)
1603 +{
1604 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1605 +
1606 + for (i = 0; i < ip_set_max; i++) {
1607 + if (ip_set_list[i] != NULL
1608 + && strcmp(ip_set_list[i]->name, name) == 0) {
1609 + index = i;
1610 + break;
1611 + }
1612 + }
1613 + return index;
1614 +}
1615 +
1616 +static ip_set_id_t
1617 +ip_set_find_byindex(ip_set_id_t index)
1618 +{
1619 + if (index >= ip_set_max || ip_set_list[index] == NULL)
1620 + index = IP_SET_INVALID_ID;
1621 +
1622 + return index;
1623 +}
1624 +
1625 +/*
1626 + * Add, del, test, bind and unbind
1627 + */
1628 +
1629 +static inline int
1630 +__ip_set_testip(struct ip_set *set,
1631 + const void *data,
1632 + size_t size,
1633 + ip_set_ip_t *ip)
1634 +{
1635 + int res;
1636 +
1637 + read_lock_bh(&set->lock);
1638 + res = set->type->testip(set, data, size, ip);
1639 + read_unlock_bh(&set->lock);
1640 +
1641 + return res;
1642 +}
1643 +
1644 +static int
1645 +__ip_set_addip(ip_set_id_t index,
1646 + const void *data,
1647 + size_t size)
1648 +{
1649 + struct ip_set *set = ip_set_list[index];
1650 + ip_set_ip_t ip;
1651 + int res;
1652 +
1653 + IP_SET_ASSERT(set);
1654 + do {
1655 + write_lock_bh(&set->lock);
1656 + res = set->type->addip(set, data, size, &ip);
1657 + write_unlock_bh(&set->lock);
1658 + } while (res == -EAGAIN
1659 + && set->type->retry
1660 + && (res = set->type->retry(set)) == 0);
1661 +
1662 + return res;
1663 +}
1664 +
1665 +static int
1666 +ip_set_addip(ip_set_id_t index,
1667 + const void *data,
1668 + size_t size)
1669 +{
1670 +
1671 + return __ip_set_addip(index,
1672 + data + sizeof(struct ip_set_req_adt),
1673 + size - sizeof(struct ip_set_req_adt));
1674 +}
1675 +
1676 +static int
1677 +ip_set_delip(ip_set_id_t index,
1678 + const void *data,
1679 + size_t size)
1680 +{
1681 + struct ip_set *set = ip_set_list[index];
1682 + ip_set_ip_t ip;
1683 + int res;
1684 +
1685 + IP_SET_ASSERT(set);
1686 + write_lock_bh(&set->lock);
1687 + res = set->type->delip(set,
1688 + data + sizeof(struct ip_set_req_adt),
1689 + size - sizeof(struct ip_set_req_adt),
1690 + &ip);
1691 + write_unlock_bh(&set->lock);
1692 +
1693 + return res;
1694 +}
1695 +
1696 +static int
1697 +ip_set_testip(ip_set_id_t index,
1698 + const void *data,
1699 + size_t size)
1700 +{
1701 + struct ip_set *set = ip_set_list[index];
1702 + ip_set_ip_t ip;
1703 + int res;
1704 +
1705 + IP_SET_ASSERT(set);
1706 + res = __ip_set_testip(set,
1707 + data + sizeof(struct ip_set_req_adt),
1708 + size - sizeof(struct ip_set_req_adt),
1709 + &ip);
1710 +
1711 + return (res > 0 ? -EEXIST : res);
1712 +}
1713 +
1714 +static int
1715 +ip_set_bindip(ip_set_id_t index,
1716 + const void *data,
1717 + size_t size)
1718 +{
1719 + struct ip_set *set = ip_set_list[index];
1720 + struct ip_set_req_bind *req_bind;
1721 + ip_set_id_t binding;
1722 + ip_set_ip_t ip;
1723 + int res;
1724 +
1725 + IP_SET_ASSERT(set);
1726 + if (size < sizeof(struct ip_set_req_bind))
1727 + return -EINVAL;
1728 +
1729 + req_bind = (struct ip_set_req_bind *) data;
1730 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1731 +
1732 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1733 + /* Default binding of a set */
1734 + char *binding_name;
1735 +
1736 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1737 + return -EINVAL;
1738 +
1739 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1740 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1741 +
1742 + binding = ip_set_find_byname(binding_name);
1743 + if (binding == IP_SET_INVALID_ID)
1744 + return -ENOENT;
1745 +
1746 + write_lock_bh(&ip_set_lock);
1747 + /* Sets as binding values are referenced */
1748 + if (set->binding != IP_SET_INVALID_ID)
1749 + __ip_set_put(set->binding);
1750 + set->binding = binding;
1751 + __ip_set_get(set->binding);
1752 + write_unlock_bh(&ip_set_lock);
1753 +
1754 + return 0;
1755 + }
1756 + binding = ip_set_find_byname(req_bind->binding);
1757 + if (binding == IP_SET_INVALID_ID)
1758 + return -ENOENT;
1759 +
1760 + res = __ip_set_testip(set,
1761 + data + sizeof(struct ip_set_req_bind),
1762 + size - sizeof(struct ip_set_req_bind),
1763 + &ip);
1764 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1765 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1766 +
1767 + if (res >= 0)
1768 + res = ip_set_hash_add(set->id, ip, binding);
1769 +
1770 + return res;
1771 +}
1772 +
1773 +#define FOREACH_SET_DO(fn, args...) \
1774 +({ \
1775 + ip_set_id_t __i; \
1776 + struct ip_set *__set; \
1777 + \
1778 + for (__i = 0; __i < ip_set_max; __i++) { \
1779 + __set = ip_set_list[__i]; \
1780 + if (__set != NULL) \
1781 + fn(__set , ##args); \
1782 + } \
1783 +})
1784 +
1785 +static inline void
1786 +__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
1787 +{
1788 + if (set_hash->id == id)
1789 + __set_hash_del(set_hash);
1790 +}
1791 +
1792 +static inline void
1793 +__unbind_default(struct ip_set *set)
1794 +{
1795 + if (set->binding != IP_SET_INVALID_ID) {
1796 + /* Sets as binding values are referenced */
1797 + __ip_set_put(set->binding);
1798 + set->binding = IP_SET_INVALID_ID;
1799 + }
1800 +}
1801 +
1802 +static int
1803 +ip_set_unbindip(ip_set_id_t index,
1804 + const void *data,
1805 + size_t size)
1806 +{
1807 + struct ip_set *set;
1808 + struct ip_set_req_bind *req_bind;
1809 + ip_set_ip_t ip;
1810 + int res;
1811 +
1812 + DP("");
1813 + if (size < sizeof(struct ip_set_req_bind))
1814 + return -EINVAL;
1815 +
1816 + req_bind = (struct ip_set_req_bind *) data;
1817 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1818 +
1819 + DP("%u %s", index, req_bind->binding);
1820 + if (index == IP_SET_INVALID_ID) {
1821 + /* unbind :all: */
1822 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1823 + /* Default binding of sets */
1824 + write_lock_bh(&ip_set_lock);
1825 + FOREACH_SET_DO(__unbind_default);
1826 + write_unlock_bh(&ip_set_lock);
1827 + return 0;
1828 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1829 + /* Flush all bindings of all sets*/
1830 + write_lock_bh(&ip_set_lock);
1831 + FOREACH_HASH_RW_DO(__set_hash_del);
1832 + write_unlock_bh(&ip_set_lock);
1833 + return 0;
1834 + }
1835 + DP("unreachable reached!");
1836 + return -EINVAL;
1837 + }
1838 +
1839 + set = ip_set_list[index];
1840 + IP_SET_ASSERT(set);
1841 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1842 + /* Default binding of set */
1843 + ip_set_id_t binding = ip_set_find_byindex(set->binding);
1844 +
1845 + if (binding == IP_SET_INVALID_ID)
1846 + return -ENOENT;
1847 +
1848 + write_lock_bh(&ip_set_lock);
1849 + /* Sets in hash values are referenced */
1850 + __ip_set_put(set->binding);
1851 + set->binding = IP_SET_INVALID_ID;
1852 + write_unlock_bh(&ip_set_lock);
1853 +
1854 + return 0;
1855 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1856 + /* Flush all bindings */
1857 +
1858 + write_lock_bh(&ip_set_lock);
1859 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
1860 + write_unlock_bh(&ip_set_lock);
1861 + return 0;
1862 + }
1863 +
1864 + res = __ip_set_testip(set,
1865 + data + sizeof(struct ip_set_req_bind),
1866 + size - sizeof(struct ip_set_req_bind),
1867 + &ip);
1868 +
1869 + DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
1870 + if (res >= 0)
1871 + res = ip_set_hash_del(set->id, ip);
1872 +
1873 + return res;
1874 +}
1875 +
1876 +static int
1877 +ip_set_testbind(ip_set_id_t index,
1878 + const void *data,
1879 + size_t size)
1880 +{
1881 + struct ip_set *set = ip_set_list[index];
1882 + struct ip_set_req_bind *req_bind;
1883 + ip_set_id_t binding;
1884 + ip_set_ip_t ip;
1885 + int res;
1886 +
1887 + IP_SET_ASSERT(set);
1888 + if (size < sizeof(struct ip_set_req_bind))
1889 + return -EINVAL;
1890 +
1891 + req_bind = (struct ip_set_req_bind *) data;
1892 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1893 +
1894 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1895 + /* Default binding of set */
1896 + char *binding_name;
1897 +
1898 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1899 + return -EINVAL;
1900 +
1901 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1902 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1903 +
1904 + binding = ip_set_find_byname(binding_name);
1905 + if (binding == IP_SET_INVALID_ID)
1906 + return -ENOENT;
1907 +
1908 + res = (set->binding == binding) ? -EEXIST : 0;
1909 +
1910 + return res;
1911 + }
1912 + binding = ip_set_find_byname(req_bind->binding);
1913 + if (binding == IP_SET_INVALID_ID)
1914 + return -ENOENT;
1915 +
1916 +
1917 + res = __ip_set_testip(set,
1918 + data + sizeof(struct ip_set_req_bind),
1919 + size - sizeof(struct ip_set_req_bind),
1920 + &ip);
1921 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1922 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1923 +
1924 + if (res >= 0)
1925 + res = (ip_set_find_in_hash(set->id, ip) == binding)
1926 + ? -EEXIST : 0;
1927 +
1928 + return res;
1929 +}
1930 +
1931 +static struct ip_set_type *
1932 +find_set_type_rlock(const char *typename)
1933 +{
1934 + struct ip_set_type *type;
1935 +
1936 + read_lock_bh(&ip_set_lock);
1937 + type = find_set_type(typename);
1938 + if (type == NULL)
1939 + read_unlock_bh(&ip_set_lock);
1940 +
1941 + return type;
1942 +}
1943 +
1944 +static int
1945 +find_free_id(const char *name,
1946 + ip_set_id_t *index,
1947 + ip_set_id_t *id)
1948 +{
1949 + ip_set_id_t i;
1950 +
1951 + *id = IP_SET_INVALID_ID;
1952 + for (i = 0; i < ip_set_max; i++) {
1953 + if (ip_set_list[i] == NULL) {
1954 + if (*id == IP_SET_INVALID_ID)
1955 + *id = *index = i;
1956 + } else if (strcmp(name, ip_set_list[i]->name) == 0)
1957 + /* Name clash */
1958 + return -EEXIST;
1959 + }
1960 + if (*id == IP_SET_INVALID_ID)
1961 + /* No free slot remained */
1962 + return -ERANGE;
1963 + /* Check that index is usable as id (swapping) */
1964 + check:
1965 + for (i = 0; i < ip_set_max; i++) {
1966 + if (ip_set_list[i] != NULL
1967 + && ip_set_list[i]->id == *id) {
1968 + *id = i;
1969 + goto check;
1970 + }
1971 + }
1972 + return 0;
1973 +}
1974 +
1975 +/*
1976 + * Create a set
1977 + */
1978 +static int
1979 +ip_set_create(const char *name,
1980 + const char *typename,
1981 + ip_set_id_t restore,
1982 + const void *data,
1983 + size_t size)
1984 +{
1985 + struct ip_set *set;
1986 + ip_set_id_t index = 0, id;
1987 + int res = 0;
1988 +
1989 + DP("setname: %s, typename: %s, id: %u", name, typename, restore);
1990 + /*
1991 + * First, and without any locks, allocate and initialize
1992 + * a normal base set structure.
1993 + */
1994 + set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
1995 + if (!set)
1996 + return -ENOMEM;
1997 + set->lock = RW_LOCK_UNLOCKED;
1998 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
1999 + set->binding = IP_SET_INVALID_ID;
2000 + atomic_set(&set->ref, 0);
2001 +
2002 + /*
2003 + * Next, take the &ip_set_lock, check that we know the type,
2004 + * and take a reference on the type, to make sure it
2005 + * stays available while constructing our new set.
2006 + *
2007 + * After referencing the type, we drop the &ip_set_lock,
2008 + * and let the new set construction run without locks.
2009 + */
2010 + set->type = find_set_type_rlock(typename);
2011 + if (set->type == NULL) {
2012 + /* Try loading the module */
2013 + char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
2014 + strcpy(modulename, "ip_set_");
2015 + strcat(modulename, typename);
2016 + DP("try to load %s", modulename);
2017 + request_module(modulename);
2018 + set->type = find_set_type_rlock(typename);
2019 + }
2020 + if (set->type == NULL) {
2021 + ip_set_printk("no set type '%s', set '%s' not created",
2022 + typename, name);
2023 + res = -ENOENT;
2024 + goto out;
2025 + }
2026 + if (!try_module_get(set->type->me)) {
2027 + read_unlock_bh(&ip_set_lock);
2028 + res = -EFAULT;
2029 + goto out;
2030 + }
2031 + read_unlock_bh(&ip_set_lock);
2032 +
2033 + /*
2034 + * Without holding any locks, create private part.
2035 + */
2036 + res = set->type->create(set, data, size);
2037 + if (res != 0)
2038 + goto put_out;
2039 +
2040 + /* BTW, res==0 here. */
2041 +
2042 + /*
2043 + * Here, we have a valid, constructed set. &ip_set_lock again,
2044 + * find free id/index and check that it is not already in
2045 + * ip_set_list.
2046 + */
2047 + write_lock_bh(&ip_set_lock);
2048 + if ((res = find_free_id(set->name, &index, &id)) != 0) {
2049 + DP("no free id!");
2050 + goto cleanup;
2051 + }
2052 +
2053 + /* Make sure restore gets the same index */
2054 + if (restore != IP_SET_INVALID_ID && index != restore) {
2055 + DP("Can't restore, sets are screwed up");
2056 + res = -ERANGE;
2057 + goto cleanup;
2058 + }
2059 +
2060 + /*
2061 + * Finally! Add our shiny new set to the list, and be done.
2062 + */
2063 + DP("create: '%s' created with index %u, id %u!", set->name, index, id);
2064 + set->id = id;
2065 + ip_set_list[index] = set;
2066 + write_unlock_bh(&ip_set_lock);
2067 + return res;
2068 +
2069 + cleanup:
2070 + write_unlock_bh(&ip_set_lock);
2071 + set->type->destroy(set);
2072 + put_out:
2073 + module_put(set->type->me);
2074 + out:
2075 + kfree(set);
2076 + return res;
2077 +}
2078 +
2079 +/*
2080 + * Destroy a given existing set
2081 + */
2082 +static void
2083 +ip_set_destroy_set(ip_set_id_t index)
2084 +{
2085 + struct ip_set *set = ip_set_list[index];
2086 +
2087 + IP_SET_ASSERT(set);
2088 + DP("set: %s", set->name);
2089 + write_lock_bh(&ip_set_lock);
2090 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2091 + if (set->binding != IP_SET_INVALID_ID)
2092 + __ip_set_put(set->binding);
2093 + ip_set_list[index] = NULL;
2094 + write_unlock_bh(&ip_set_lock);
2095 +
2096 + /* Must call it without holding any lock */
2097 + set->type->destroy(set);
2098 + module_put(set->type->me);
2099 + kfree(set);
2100 +}
2101 +
2102 +/*
2103 + * Destroy a set - or all sets
2104 + * Sets must not be referenced/used.
2105 + */
2106 +static int
2107 +ip_set_destroy(ip_set_id_t index)
2108 +{
2109 + ip_set_id_t i;
2110 +
2111 + /* ref modification always protected by the mutex */
2112 + if (index != IP_SET_INVALID_ID) {
2113 + if (atomic_read(&ip_set_list[index]->ref))
2114 + return -EBUSY;
2115 + ip_set_destroy_set(index);
2116 + } else {
2117 + for (i = 0; i < ip_set_max; i++) {
2118 + if (ip_set_list[i] != NULL
2119 + && (atomic_read(&ip_set_list[i]->ref)))
2120 + return -EBUSY;
2121 + }
2122 +
2123 + for (i = 0; i < ip_set_max; i++) {
2124 + if (ip_set_list[i] != NULL)
2125 + ip_set_destroy_set(i);
2126 + }
2127 + }
2128 + return 0;
2129 +}
2130 +
2131 +static void
2132 +ip_set_flush_set(struct ip_set *set)
2133 +{
2134 + DP("set: %s %u", set->name, set->id);
2135 +
2136 + write_lock_bh(&set->lock);
2137 + set->type->flush(set);
2138 + write_unlock_bh(&set->lock);
2139 +}
2140 +
2141 +/*
2142 + * Flush data in a set - or in all sets
2143 + */
2144 +static int
2145 +ip_set_flush(ip_set_id_t index)
2146 +{
2147 + if (index != IP_SET_INVALID_ID) {
2148 + IP_SET_ASSERT(ip_set_list[index]);
2149 + ip_set_flush_set(ip_set_list[index]);
2150 + } else
2151 + FOREACH_SET_DO(ip_set_flush_set);
2152 +
2153 + return 0;
2154 +}
2155 +
2156 +/* Rename a set */
2157 +static int
2158 +ip_set_rename(ip_set_id_t index, const char *name)
2159 +{
2160 + struct ip_set *set = ip_set_list[index];
2161 + ip_set_id_t i;
2162 + int res = 0;
2163 +
2164 + DP("set: %s to %s", set->name, name);
2165 + write_lock_bh(&ip_set_lock);
2166 + for (i = 0; i < ip_set_max; i++) {
2167 + if (ip_set_list[i] != NULL
2168 + && strncmp(ip_set_list[i]->name,
2169 + name,
2170 + IP_SET_MAXNAMELEN - 1) == 0) {
2171 + res = -EEXIST;
2172 + goto unlock;
2173 + }
2174 + }
2175 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
2176 + unlock:
2177 + write_unlock_bh(&ip_set_lock);
2178 + return res;
2179 +}
2180 +
2181 +/*
2182 + * Swap two sets so that name/index points to the other.
2183 + * References are also swapped.
2184 + */
2185 +static int
2186 +ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
2187 +{
2188 + struct ip_set *from = ip_set_list[from_index];
2189 + struct ip_set *to = ip_set_list[to_index];
2190 + char from_name[IP_SET_MAXNAMELEN];
2191 + u_int32_t from_ref;
2192 +
2193 + DP("set: %s to %s", from->name, to->name);
2194 + /* Features must not change. Artifical restriction. */
2195 + if (from->type->features != to->type->features)
2196 + return -ENOEXEC;
2197 +
2198 + /* No magic here: ref munging protected by the mutex */
2199 + write_lock_bh(&ip_set_lock);
2200 + strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
2201 + from_ref = atomic_read(&from->ref);
2202 +
2203 + strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
2204 + atomic_set(&from->ref, atomic_read(&to->ref));
2205 + strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
2206 + atomic_set(&to->ref, from_ref);
2207 +
2208 + ip_set_list[from_index] = to;
2209 + ip_set_list[to_index] = from;
2210 +
2211 + write_unlock_bh(&ip_set_lock);
2212 + return 0;
2213 +}
2214 +
2215 +/*
2216 + * List set data
2217 + */
2218 +
2219 +static inline void
2220 +__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
2221 + ip_set_id_t id, size_t *size)
2222 +{
2223 + if (set_hash->id == id)
2224 + *size += sizeof(struct ip_set_hash_list);
2225 +}
2226 +
2227 +static inline void
2228 +__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
2229 + ip_set_id_t id, size_t *size)
2230 +{
2231 + if (set_hash->id == id)
2232 + *size += sizeof(struct ip_set_hash_save);
2233 +}
2234 +
2235 +static inline void
2236 +__set_hash_bindings(struct ip_set_hash *set_hash,
2237 + ip_set_id_t id, void *data, int *used)
2238 +{
2239 + if (set_hash->id == id) {
2240 + struct ip_set_hash_list *hash_list =
2241 + (struct ip_set_hash_list *)(data + *used);
2242 +
2243 + hash_list->ip = set_hash->ip;
2244 + hash_list->binding = set_hash->binding;
2245 + *used += sizeof(struct ip_set_hash_list);
2246 + }
2247 +}
2248 +
2249 +static int ip_set_list_set(ip_set_id_t index,
2250 + void *data,
2251 + int *used,
2252 + int len)
2253 +{
2254 + struct ip_set *set = ip_set_list[index];
2255 + struct ip_set_list *set_list;
2256 +
2257 + /* Pointer to our header */
2258 + set_list = (struct ip_set_list *) (data + *used);
2259 +
2260 + DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
2261 +
2262 + /* Get and ensure header size */
2263 + if (*used + sizeof(struct ip_set_list) > len)
2264 + goto not_enough_mem;
2265 + *used += sizeof(struct ip_set_list);
2266 +
2267 + read_lock_bh(&set->lock);
2268 + /* Get and ensure set specific header size */
2269 + set_list->header_size = set->type->header_size;
2270 + if (*used + set_list->header_size > len)
2271 + goto unlock_set;
2272 +
2273 + /* Fill in the header */
2274 + set_list->index = index;
2275 + set_list->binding = set->binding;
2276 + set_list->ref = atomic_read(&set->ref);
2277 +
2278 + /* Fill in set spefific header data */
2279 + set->type->list_header(set, data + *used);
2280 + *used += set_list->header_size;
2281 +
2282 + /* Get and ensure set specific members size */
2283 + set_list->members_size = set->type->list_members_size(set);
2284 + if (*used + set_list->members_size > len)
2285 + goto unlock_set;
2286 +
2287 + /* Fill in set spefific members data */
2288 + set->type->list_members(set, data + *used);
2289 + *used += set_list->members_size;
2290 + read_unlock_bh(&set->lock);
2291 +
2292 + /* Bindings */
2293 +
2294 + /* Get and ensure set specific bindings size */
2295 + set_list->bindings_size = 0;
2296 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2297 + set->id, &set_list->bindings_size);
2298 + if (*used + set_list->bindings_size > len)
2299 + goto not_enough_mem;
2300 +
2301 + /* Fill in set spefific bindings data */
2302 + FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
2303 +
2304 + return 0;
2305 +
2306 + unlock_set:
2307 + read_unlock_bh(&set->lock);
2308 + not_enough_mem:
2309 + DP("not enough mem, try again");
2310 + return -EAGAIN;
2311 +}
2312 +
2313 +/*
2314 + * Save sets
2315 + */
2316 +static int ip_set_save_set(ip_set_id_t index,
2317 + void *data,
2318 + int *used,
2319 + int len)
2320 +{
2321 + struct ip_set *set;
2322 + struct ip_set_save *set_save;
2323 +
2324 + /* Pointer to our header */
2325 + set_save = (struct ip_set_save *) (data + *used);
2326 +
2327 + /* Get and ensure header size */
2328 + if (*used + sizeof(struct ip_set_save) > len)
2329 + goto not_enough_mem;
2330 + *used += sizeof(struct ip_set_save);
2331 +
2332 + set = ip_set_list[index];
2333 + DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
2334 + data, data + *used);
2335 +
2336 + read_lock_bh(&set->lock);
2337 + /* Get and ensure set specific header size */
2338 + set_save->header_size = set->type->header_size;
2339 + if (*used + set_save->header_size > len)
2340 + goto unlock_set;
2341 +
2342 + /* Fill in the header */
2343 + set_save->index = index;
2344 + set_save->binding = set->binding;
2345 +
2346 + /* Fill in set spefific header data */
2347 + set->type->list_header(set, data + *used);
2348 + *used += set_save->header_size;
2349 +
2350 + DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
2351 + set_save->header_size, data, data + *used);
2352 + /* Get and ensure set specific members size */
2353 + set_save->members_size = set->type->list_members_size(set);
2354 + if (*used + set_save->members_size > len)
2355 + goto unlock_set;
2356 +
2357 + /* Fill in set spefific members data */
2358 + set->type->list_members(set, data + *used);
2359 + *used += set_save->members_size;
2360 + read_unlock_bh(&set->lock);
2361 + DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
2362 + set_save->members_size, data, data + *used);
2363 + return 0;
2364 +
2365 + unlock_set:
2366 + read_unlock_bh(&set->lock);
2367 + not_enough_mem:
2368 + DP("not enough mem, try again");
2369 + return -EAGAIN;
2370 +}
2371 +
2372 +static inline void
2373 +__set_hash_save_bindings(struct ip_set_hash *set_hash,
2374 + ip_set_id_t id,
2375 + void *data,
2376 + int *used,
2377 + int len,
2378 + int *res)
2379 +{
2380 + if (*res == 0
2381 + && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
2382 + struct ip_set_hash_save *hash_save =
2383 + (struct ip_set_hash_save *)(data + *used);
2384 + /* Ensure bindings size */
2385 + if (*used + sizeof(struct ip_set_hash_save) > len) {
2386 + *res = -ENOMEM;
2387 + return;
2388 + }
2389 + hash_save->id = set_hash->id;
2390 + hash_save->ip = set_hash->ip;
2391 + hash_save->binding = set_hash->binding;
2392 + *used += sizeof(struct ip_set_hash_save);
2393 + }
2394 +}
2395 +
2396 +static int ip_set_save_bindings(ip_set_id_t index,
2397 + void *data,
2398 + int *used,
2399 + int len)
2400 +{
2401 + int res = 0;
2402 + struct ip_set_save *set_save;
2403 +
2404 + DP("used %u, len %u", *used, len);
2405 + /* Get and ensure header size */
2406 + if (*used + sizeof(struct ip_set_save) > len)
2407 + return -ENOMEM;
2408 +
2409 + /* Marker */
2410 + set_save = (struct ip_set_save *) (data + *used);
2411 + set_save->index = IP_SET_INVALID_ID;
2412 + set_save->header_size = 0;
2413 + set_save->members_size = 0;
2414 + *used += sizeof(struct ip_set_save);
2415 +
2416 + DP("marker added used %u, len %u", *used, len);
2417 + /* Fill in bindings data */
2418 + if (index != IP_SET_INVALID_ID)
2419 + /* Sets are identified by id in hash */
2420 + index = ip_set_list[index]->id;
2421 + FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
2422 +
2423 + return res;
2424 +}
2425 +
2426 +/*
2427 + * Restore sets
2428 + */
2429 +static int ip_set_restore(void *data,
2430 + int len)
2431 +{
2432 + int res = 0;
2433 + int line = 0, used = 0, members_size;
2434 + struct ip_set *set;
2435 + struct ip_set_hash_save *hash_save;
2436 + struct ip_set_restore *set_restore;
2437 + ip_set_id_t index;
2438 +
2439 + /* Loop to restore sets */
2440 + while (1) {
2441 + line++;
2442 +
2443 + DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
2444 + /* Get and ensure header size */
2445 + if (used + sizeof(struct ip_set_restore) > len)
2446 + return line;
2447 + set_restore = (struct ip_set_restore *) (data + used);
2448 + used += sizeof(struct ip_set_restore);
2449 +
2450 + /* Ensure data size */
2451 + if (used
2452 + + set_restore->header_size
2453 + + set_restore->members_size > len)
2454 + return line;
2455 +
2456 + /* Check marker */
2457 + if (set_restore->index == IP_SET_INVALID_ID) {
2458 + line--;
2459 + goto bindings;
2460 + }
2461 +
2462 + /* Try to create the set */
2463 + DP("restore %s %s", set_restore->name, set_restore->typename);
2464 + res = ip_set_create(set_restore->name,
2465 + set_restore->typename,
2466 + set_restore->index,
2467 + data + used,
2468 + set_restore->header_size);
2469 +
2470 + if (res != 0)
2471 + return line;
2472 + used += set_restore->header_size;
2473 +
2474 + index = ip_set_find_byindex(set_restore->index);
2475 + DP("index %u, restore_index %u", index, set_restore->index);
2476 + if (index != set_restore->index)
2477 + return line;
2478 + /* Try to restore members data */
2479 + set = ip_set_list[index];
2480 + members_size = 0;
2481 + DP("members_size %u reqsize %u",
2482 + set_restore->members_size, set->type->reqsize);
2483 + while (members_size + set->type->reqsize <=
2484 + set_restore->members_size) {
2485 + line++;
2486 + DP("members: %u, line %u", members_size, line);
2487 + res = __ip_set_addip(index,
2488 + data + used + members_size,
2489 + set->type->reqsize);
2490 + if (!(res == 0 || res == -EEXIST))
2491 + return line;
2492 + members_size += set->type->reqsize;
2493 + }
2494 +
2495 + DP("members_size %u %u",
2496 + set_restore->members_size, members_size);
2497 + if (members_size != set_restore->members_size)
2498 + return line++;
2499 + used += set_restore->members_size;
2500 + }
2501 +
2502 + bindings:
2503 + /* Loop to restore bindings */
2504 + while (used < len) {
2505 + line++;
2506 +
2507 + DP("restore binding, line %u", line);
2508 + /* Get and ensure size */
2509 + if (used + sizeof(struct ip_set_hash_save) > len)
2510 + return line;
2511 + hash_save = (struct ip_set_hash_save *) (data + used);
2512 + used += sizeof(struct ip_set_hash_save);
2513 +
2514 + /* hash_save->id is used to store the index */
2515 + index = ip_set_find_byindex(hash_save->id);
2516 + DP("restore binding index %u, id %u, %u -> %u",
2517 + index, hash_save->id, hash_save->ip, hash_save->binding);
2518 + if (index != hash_save->id)
2519 + return line;
2520 + if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
2521 + DP("corrupt binding set index %u", hash_save->binding);
2522 + return line;
2523 + }
2524 + set = ip_set_list[hash_save->id];
2525 + /* Null valued IP means default binding */
2526 + if (hash_save->ip)
2527 + res = ip_set_hash_add(set->id,
2528 + hash_save->ip,
2529 + hash_save->binding);
2530 + else {
2531 + IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
2532 + write_lock_bh(&ip_set_lock);
2533 + set->binding = hash_save->binding;
2534 + __ip_set_get(set->binding);
2535 + write_unlock_bh(&ip_set_lock);
2536 + DP("default binding: %u", set->binding);
2537 + }
2538 + if (res != 0)
2539 + return line;
2540 + }
2541 + if (used != len)
2542 + return line;
2543 +
2544 + return 0;
2545 +}
2546 +
2547 +static int
2548 +ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
2549 +{
2550 + void *data;
2551 + int res = 0; /* Assume OK */
2552 + unsigned *op;
2553 + struct ip_set_req_adt *req_adt;
2554 + ip_set_id_t index = IP_SET_INVALID_ID;
2555 + int (*adtfn)(ip_set_id_t index,
2556 + const void *data, size_t size);
2557 + struct fn_table {
2558 + int (*fn)(ip_set_id_t index,
2559 + const void *data, size_t size);
2560 + } adtfn_table[] =
2561 + { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
2562 + { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
2563 + };
2564 +
2565 + DP("optval=%d, user=%p, len=%d", optval, user, len);
2566 + if (!capable(CAP_NET_ADMIN))
2567 + return -EPERM;
2568 + if (optval != SO_IP_SET)
2569 + return -EBADF;
2570 + if (len <= sizeof(unsigned)) {
2571 + ip_set_printk("short userdata (want >%zu, got %u)",
2572 + sizeof(unsigned), len);
2573 + return -EINVAL;
2574 + }
2575 + data = vmalloc(len);
2576 + if (!data) {
2577 + DP("out of mem for %u bytes", len);
2578 + return -ENOMEM;
2579 + }
2580 + if (copy_from_user(data, user, len) != 0) {
2581 + res = -EFAULT;
2582 + goto done;
2583 + }
2584 + if (down_interruptible(&ip_set_app_mutex)) {
2585 + res = -EINTR;
2586 + goto done;
2587 + }
2588 +
2589 + op = (unsigned *)data;
2590 + DP("op=%x", *op);
2591 +
2592 + if (*op < IP_SET_OP_VERSION) {
2593 + /* Check the version at the beginning of operations */
2594 + struct ip_set_req_version *req_version =
2595 + (struct ip_set_req_version *) data;
2596 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2597 + res = -EPROTO;
2598 + goto done;
2599 + }
2600 + }
2601 +
2602 + switch (*op) {
2603 + case IP_SET_OP_CREATE:{
2604 + struct ip_set_req_create *req_create
2605 + = (struct ip_set_req_create *) data;
2606 +
2607 + if (len < sizeof(struct ip_set_req_create)) {
2608 + ip_set_printk("short CREATE data (want >=%zu, got %u)",
2609 + sizeof(struct ip_set_req_create), len);
2610 + res = -EINVAL;
2611 + goto done;
2612 + }
2613 + req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
2614 + req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2615 + res = ip_set_create(req_create->name,
2616 + req_create->typename,
2617 + IP_SET_INVALID_ID,
2618 + data + sizeof(struct ip_set_req_create),
2619 + len - sizeof(struct ip_set_req_create));
2620 + goto done;
2621 + }
2622 + case IP_SET_OP_DESTROY:{
2623 + struct ip_set_req_std *req_destroy
2624 + = (struct ip_set_req_std *) data;
2625 +
2626 + if (len != sizeof(struct ip_set_req_std)) {
2627 + ip_set_printk("invalid DESTROY data (want %zu, got %u)",
2628 + sizeof(struct ip_set_req_std), len);
2629 + res = -EINVAL;
2630 + goto done;
2631 + }
2632 + if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
2633 + /* Destroy all sets */
2634 + index = IP_SET_INVALID_ID;
2635 + } else {
2636 + req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
2637 + index = ip_set_find_byname(req_destroy->name);
2638 +
2639 + if (index == IP_SET_INVALID_ID) {
2640 + res = -ENOENT;
2641 + goto done;
2642 + }
2643 + }
2644 +
2645 + res = ip_set_destroy(index);
2646 + goto done;
2647 + }
2648 + case IP_SET_OP_FLUSH:{
2649 + struct ip_set_req_std *req_flush =
2650 + (struct ip_set_req_std *) data;
2651 +
2652 + if (len != sizeof(struct ip_set_req_std)) {
2653 + ip_set_printk("invalid FLUSH data (want %zu, got %u)",
2654 + sizeof(struct ip_set_req_std), len);
2655 + res = -EINVAL;
2656 + goto done;
2657 + }
2658 + if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
2659 + /* Flush all sets */
2660 + index = IP_SET_INVALID_ID;
2661 + } else {
2662 + req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
2663 + index = ip_set_find_byname(req_flush->name);
2664 +
2665 + if (index == IP_SET_INVALID_ID) {
2666 + res = -ENOENT;
2667 + goto done;
2668 + }
2669 + }
2670 + res = ip_set_flush(index);
2671 + goto done;
2672 + }
2673 + case IP_SET_OP_RENAME:{
2674 + struct ip_set_req_create *req_rename
2675 + = (struct ip_set_req_create *) data;
2676 +
2677 + if (len != sizeof(struct ip_set_req_create)) {
2678 + ip_set_printk("invalid RENAME data (want %zu, got %u)",
2679 + sizeof(struct ip_set_req_create), len);
2680 + res = -EINVAL;
2681 + goto done;
2682 + }
2683 +
2684 + req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
2685 + req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2686 +
2687 + index = ip_set_find_byname(req_rename->name);
2688 + if (index == IP_SET_INVALID_ID) {
2689 + res = -ENOENT;
2690 + goto done;
2691 + }
2692 + res = ip_set_rename(index, req_rename->typename);
2693 + goto done;
2694 + }
2695 + case IP_SET_OP_SWAP:{
2696 + struct ip_set_req_create *req_swap
2697 + = (struct ip_set_req_create *) data;
2698 + ip_set_id_t to_index;
2699 +
2700 + if (len != sizeof(struct ip_set_req_create)) {
2701 + ip_set_printk("invalid SWAP data (want %zu, got %u)",
2702 + sizeof(struct ip_set_req_create), len);
2703 + res = -EINVAL;
2704 + goto done;
2705 + }
2706 +
2707 + req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
2708 + req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2709 +
2710 + index = ip_set_find_byname(req_swap->name);
2711 + if (index == IP_SET_INVALID_ID) {
2712 + res = -ENOENT;
2713 + goto done;
2714 + }
2715 + to_index = ip_set_find_byname(req_swap->typename);
2716 + if (to_index == IP_SET_INVALID_ID) {
2717 + res = -ENOENT;
2718 + goto done;
2719 + }
2720 + res = ip_set_swap(index, to_index);
2721 + goto done;
2722 + }
2723 + default:
2724 + break; /* Set identified by id */
2725 + }
2726 +
2727 + /* There we may have add/del/test/bind/unbind/test_bind operations */
2728 + if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
2729 + res = -EBADMSG;
2730 + goto done;
2731 + }
2732 + adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
2733 +
2734 + if (len < sizeof(struct ip_set_req_adt)) {
2735 + ip_set_printk("short data in adt request (want >=%zu, got %u)",
2736 + sizeof(struct ip_set_req_adt), len);
2737 + res = -EINVAL;
2738 + goto done;
2739 + }
2740 + req_adt = (struct ip_set_req_adt *) data;
2741 +
2742 + /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
2743 + if (!(*op == IP_SET_OP_UNBIND_SET
2744 + && req_adt->index == IP_SET_INVALID_ID)) {
2745 + index = ip_set_find_byindex(req_adt->index);
2746 + if (index == IP_SET_INVALID_ID) {
2747 + res = -ENOENT;
2748 + goto done;
2749 + }
2750 + }
2751 + res = adtfn(index, data, len);
2752 +
2753 + done:
2754 + up(&ip_set_app_mutex);
2755 + vfree(data);
2756 + if (res > 0)
2757 + res = 0;
2758 + DP("final result %d", res);
2759 + return res;
2760 +}
2761 +
2762 +static int
2763 +ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
2764 +{
2765 + int res = 0;
2766 + unsigned *op;
2767 + ip_set_id_t index = IP_SET_INVALID_ID;
2768 + void *data;
2769 + int copylen = *len;
2770 +
2771 + DP("optval=%d, user=%p, len=%d", optval, user, *len);
2772 + if (!capable(CAP_NET_ADMIN))
2773 + return -EPERM;
2774 + if (optval != SO_IP_SET)
2775 + return -EBADF;
2776 + if (*len < sizeof(unsigned)) {
2777 + ip_set_printk("short userdata (want >=%zu, got %d)",
2778 + sizeof(unsigned), *len);
2779 + return -EINVAL;
2780 + }
2781 + data = vmalloc(*len);
2782 + if (!data) {
2783 + DP("out of mem for %d bytes", *len);
2784 + return -ENOMEM;
2785 + }
2786 + if (copy_from_user(data, user, *len) != 0) {
2787 + res = -EFAULT;
2788 + goto done;
2789 + }
2790 + if (down_interruptible(&ip_set_app_mutex)) {
2791 + res = -EINTR;
2792 + goto done;
2793 + }
2794 +
2795 + op = (unsigned *) data;
2796 + DP("op=%x", *op);
2797 +
2798 + if (*op < IP_SET_OP_VERSION) {
2799 + /* Check the version at the beginning of operations */
2800 + struct ip_set_req_version *req_version =
2801 + (struct ip_set_req_version *) data;
2802 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2803 + res = -EPROTO;
2804 + goto done;
2805 + }
2806 + }
2807 +
2808 + switch (*op) {
2809 + case IP_SET_OP_VERSION: {
2810 + struct ip_set_req_version *req_version =
2811 + (struct ip_set_req_version *) data;
2812 +
2813 + if (*len != sizeof(struct ip_set_req_version)) {
2814 + ip_set_printk("invalid VERSION (want %zu, got %d)",
2815 + sizeof(struct ip_set_req_version),
2816 + *len);
2817 + res = -EINVAL;
2818 + goto done;
2819 + }
2820 +
2821 + req_version->version = IP_SET_PROTOCOL_VERSION;
2822 + res = copy_to_user(user, req_version,
2823 + sizeof(struct ip_set_req_version));
2824 + goto done;
2825 + }
2826 + case IP_SET_OP_GET_BYNAME: {
2827 + struct ip_set_req_get_set *req_get
2828 + = (struct ip_set_req_get_set *) data;
2829 +
2830 + if (*len != sizeof(struct ip_set_req_get_set)) {
2831 + ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
2832 + sizeof(struct ip_set_req_get_set), *len);
2833 + res = -EINVAL;
2834 + goto done;
2835 + }
2836 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2837 + index = ip_set_find_byname(req_get->set.name);
2838 + req_get->set.index = index;
2839 + goto copy;
2840 + }
2841 + case IP_SET_OP_GET_BYINDEX: {
2842 + struct ip_set_req_get_set *req_get
2843 + = (struct ip_set_req_get_set *) data;
2844 +
2845 + if (*len != sizeof(struct ip_set_req_get_set)) {
2846 + ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
2847 + sizeof(struct ip_set_req_get_set), *len);
2848 + res = -EINVAL;
2849 + goto done;
2850 + }
2851 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2852 + index = ip_set_find_byindex(req_get->set.index);
2853 + strncpy(req_get->set.name,
2854 + index == IP_SET_INVALID_ID ? ""
2855 + : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
2856 + goto copy;
2857 + }
2858 + case IP_SET_OP_ADT_GET: {
2859 + struct ip_set_req_adt_get *req_get
2860 + = (struct ip_set_req_adt_get *) data;
2861 +
2862 + if (*len != sizeof(struct ip_set_req_adt_get)) {
2863 + ip_set_printk("invalid ADT_GET (want %zu, got %d)",
2864 + sizeof(struct ip_set_req_adt_get), *len);
2865 + res = -EINVAL;
2866 + goto done;
2867 + }
2868 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2869 + index = ip_set_find_byname(req_get->set.name);
2870 + if (index != IP_SET_INVALID_ID) {
2871 + req_get->set.index = index;
2872 + strncpy(req_get->typename,
2873 + ip_set_list[index]->type->typename,
2874 + IP_SET_MAXNAMELEN - 1);
2875 + } else {
2876 + res = -ENOENT;
2877 + goto done;
2878 + }
2879 + goto copy;
2880 + }
2881 + case IP_SET_OP_MAX_SETS: {
2882 + struct ip_set_req_max_sets *req_max_sets
2883 + = (struct ip_set_req_max_sets *) data;
2884 + ip_set_id_t i;
2885 +
2886 + if (*len != sizeof(struct ip_set_req_max_sets)) {
2887 + ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
2888 + sizeof(struct ip_set_req_max_sets), *len);
2889 + res = -EINVAL;
2890 + goto done;
2891 + }
2892 +
2893 + if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
2894 + req_max_sets->set.index = IP_SET_INVALID_ID;
2895 + } else {
2896 + req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2897 + req_max_sets->set.index =
2898 + ip_set_find_byname(req_max_sets->set.name);
2899 + if (req_max_sets->set.index == IP_SET_INVALID_ID) {
2900 + res = -ENOENT;
2901 + goto done;
2902 + }
2903 + }
2904 + req_max_sets->max_sets = ip_set_max;
2905 + req_max_sets->sets = 0;
2906 + for (i = 0; i < ip_set_max; i++) {
2907 + if (ip_set_list[i] != NULL)
2908 + req_max_sets->sets++;
2909 + }
2910 + goto copy;
2911 + }
2912 + case IP_SET_OP_LIST_SIZE:
2913 + case IP_SET_OP_SAVE_SIZE: {
2914 + struct ip_set_req_setnames *req_setnames
2915 + = (struct ip_set_req_setnames *) data;
2916 + struct ip_set_name_list *name_list;
2917 + struct ip_set *set;
2918 + ip_set_id_t i;
2919 + int used;
2920 +
2921 + if (*len < sizeof(struct ip_set_req_setnames)) {
2922 + ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
2923 + sizeof(struct ip_set_req_setnames), *len);
2924 + res = -EINVAL;
2925 + goto done;
2926 + }
2927 +
2928 + req_setnames->size = 0;
2929 + used = sizeof(struct ip_set_req_setnames);
2930 + for (i = 0; i < ip_set_max; i++) {
2931 + if (ip_set_list[i] == NULL)
2932 + continue;
2933 + name_list = (struct ip_set_name_list *)
2934 + (data + used);
2935 + used += sizeof(struct ip_set_name_list);
2936 + if (used > copylen) {
2937 + res = -EAGAIN;
2938 + goto done;
2939 + }
2940 + set = ip_set_list[i];
2941 + /* Fill in index, name, etc. */
2942 + name_list->index = i;
2943 + name_list->id = set->id;
2944 + strncpy(name_list->name,
2945 + set->name,
2946 + IP_SET_MAXNAMELEN - 1);
2947 + strncpy(name_list->typename,
2948 + set->type->typename,
2949 + IP_SET_MAXNAMELEN - 1);
2950 + DP("filled %s of type %s, index %u\n",
2951 + name_list->name, name_list->typename,
2952 + name_list->index);
2953 + if (!(req_setnames->index == IP_SET_INVALID_ID
2954 + || req_setnames->index == i))
2955 + continue;
2956 + /* Update size */
2957 + switch (*op) {
2958 + case IP_SET_OP_LIST_SIZE: {
2959 + req_setnames->size += sizeof(struct ip_set_list)
2960 + + set->type->header_size
2961 + + set->type->list_members_size(set);
2962 + /* Sets are identified by id in the hash */
2963 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2964 + set->id, &req_setnames->size);
2965 + break;
2966 + }
2967 + case IP_SET_OP_SAVE_SIZE: {
2968 + req_setnames->size += sizeof(struct ip_set_save)
2969 + + set->type->header_size
2970 + + set->type->list_members_size(set);
2971 + FOREACH_HASH_DO(__set_hash_bindings_size_save,
2972 + set->id, &req_setnames->size);
2973 + break;
2974 + }
2975 + default:
2976 + break;
2977 + }
2978 + }
2979 + if (copylen != used) {
2980 + res = -EAGAIN;
2981 + goto done;
2982 + }
2983 + goto copy;
2984 + }
2985 + case IP_SET_OP_LIST: {
2986 + struct ip_set_req_list *req_list
2987 + = (struct ip_set_req_list *) data;
2988 + ip_set_id_t i;
2989 + int used;
2990 +
2991 + if (*len < sizeof(struct ip_set_req_list)) {
2992 + ip_set_printk("short LIST (want >=%zu, got %d)",
2993 + sizeof(struct ip_set_req_list), *len);
2994 + res = -EINVAL;
2995 + goto done;
2996 + }
2997 + index = req_list->index;
2998 + if (index != IP_SET_INVALID_ID
2999 + && ip_set_find_byindex(index) != index) {
3000 + res = -ENOENT;
3001 + goto done;
3002 + }
3003 + used = 0;
3004 + if (index == IP_SET_INVALID_ID) {
3005 + /* List all sets */
3006 + for (i = 0; i < ip_set_max && res == 0; i++) {
3007 + if (ip_set_list[i] != NULL)
3008 + res = ip_set_list_set(i, data, &used, *len);
3009 + }
3010 + } else {
3011 + /* List an individual set */
3012 + res = ip_set_list_set(index, data, &used, *len);
3013 + }
3014 + if (res != 0)
3015 + goto done;
3016 + else if (copylen != used) {
3017 + res = -EAGAIN;
3018 + goto done;
3019 + }
3020 + goto copy;
3021 + }
3022 + case IP_SET_OP_SAVE: {
3023 + struct ip_set_req_list *req_save
3024 + = (struct ip_set_req_list *) data;
3025 + ip_set_id_t i;
3026 + int used;
3027 +
3028 + if (*len < sizeof(struct ip_set_req_list)) {
3029 + ip_set_printk("short SAVE (want >=%zu, got %d)",
3030 + sizeof(struct ip_set_req_list), *len);
3031 + res = -EINVAL;
3032 + goto done;
3033 + }
3034 + index = req_save->index;
3035 + if (index != IP_SET_INVALID_ID
3036 + && ip_set_find_byindex(index) != index) {
3037 + res = -ENOENT;
3038 + goto done;
3039 + }
3040 + used = 0;
3041 + if (index == IP_SET_INVALID_ID) {
3042 + /* Save all sets */
3043 + for (i = 0; i < ip_set_max && res == 0; i++) {
3044 + if (ip_set_list[i] != NULL)
3045 + res = ip_set_save_set(i, data, &used, *len);
3046 + }
3047 + } else {
3048 + /* Save an individual set */
3049 + res = ip_set_save_set(index, data, &used, *len);
3050 + }
3051 + if (res == 0)
3052 + res = ip_set_save_bindings(index, data, &used, *len);
3053 +
3054 + if (res != 0)
3055 + goto done;
3056 + else if (copylen != used) {
3057 + res = -EAGAIN;
3058 + goto done;
3059 + }
3060 + goto copy;
3061 + }
3062 + case IP_SET_OP_RESTORE: {
3063 + struct ip_set_req_setnames *req_restore
3064 + = (struct ip_set_req_setnames *) data;
3065 + int line;
3066 +
3067 + if (*len < sizeof(struct ip_set_req_setnames)
3068 + || *len != req_restore->size) {
3069 + ip_set_printk("invalid RESTORE (want =%zu, got %d)",
3070 + req_restore->size, *len);
3071 + res = -EINVAL;
3072 + goto done;
3073 + }
3074 + line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
3075 + req_restore->size - sizeof(struct ip_set_req_setnames));
3076 + DP("ip_set_restore: %u", line);
3077 + if (line != 0) {
3078 + res = -EAGAIN;
3079 + req_restore->size = line;
3080 + copylen = sizeof(struct ip_set_req_setnames);
3081 + goto copy;
3082 + }
3083 + goto done;
3084 + }
3085 + default:
3086 + res = -EBADMSG;
3087 + goto done;
3088 + } /* end of switch(op) */
3089 +
3090 + copy:
3091 + DP("set %s, copylen %u", index != IP_SET_INVALID_ID
3092 + && ip_set_list[index]
3093 + ? ip_set_list[index]->name
3094 + : ":all:", copylen);
3095 + res = copy_to_user(user, data, copylen);
3096 +
3097 + done:
3098 + up(&ip_set_app_mutex);
3099 + vfree(data);
3100 + if (res > 0)
3101 + res = 0;
3102 + DP("final result %d", res);
3103 + return res;
3104 +}
3105 +
3106 +static struct nf_sockopt_ops so_set = {
3107 + .pf = PF_INET,
3108 + .set_optmin = SO_IP_SET,
3109 + .set_optmax = SO_IP_SET + 1,
3110 + .set = &ip_set_sockfn_set,
3111 + .get_optmin = SO_IP_SET,
3112 + .get_optmax = SO_IP_SET + 1,
3113 + .get = &ip_set_sockfn_get,
3114 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
3115 + .owner = THIS_MODULE,
3116 +#endif
3117 +};
3118 +
3119 +static int max_sets, hash_size;
3120 +module_param(max_sets, int, 0600);
3121 +MODULE_PARM_DESC(max_sets, "maximal number of sets");
3122 +module_param(hash_size, int, 0600);
3123 +MODULE_PARM_DESC(hash_size, "hash size for bindings");
3124 +MODULE_LICENSE("GPL");
3125 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3126 +MODULE_DESCRIPTION("module implementing core IP set support");
3127 +
3128 +static int __init ip_set_init(void)
3129 +{
3130 + int res;
3131 + ip_set_id_t i;
3132 +
3133 + get_random_bytes(&ip_set_hash_random, 4);
3134 + if (max_sets)
3135 + ip_set_max = max_sets;
3136 + ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
3137 + if (!ip_set_list) {
3138 + printk(KERN_ERR "Unable to create ip_set_list\n");
3139 + return -ENOMEM;
3140 + }
3141 + memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
3142 + if (hash_size)
3143 + ip_set_bindings_hash_size = hash_size;
3144 + ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
3145 + if (!ip_set_hash) {
3146 + printk(KERN_ERR "Unable to create ip_set_hash\n");
3147 + vfree(ip_set_list);
3148 + return -ENOMEM;
3149 + }
3150 + for (i = 0; i < ip_set_bindings_hash_size; i++)
3151 + INIT_LIST_HEAD(&ip_set_hash[i]);
3152 +
3153 + INIT_LIST_HEAD(&set_type_list);
3154 +
3155 + res = nf_register_sockopt(&so_set);
3156 + if (res != 0) {
3157 + ip_set_printk("SO_SET registry failed: %d", res);
3158 + vfree(ip_set_list);
3159 + vfree(ip_set_hash);
3160 + return res;
3161 + }
3162 + return 0;
3163 +}
3164 +
3165 +static void __exit ip_set_fini(void)
3166 +{
3167 + /* There can't be any existing set or binding */
3168 + nf_unregister_sockopt(&so_set);
3169 + vfree(ip_set_list);
3170 + vfree(ip_set_hash);
3171 + DP("these are the famous last words");
3172 +}
3173 +
3174 +EXPORT_SYMBOL(ip_set_register_set_type);
3175 +EXPORT_SYMBOL(ip_set_unregister_set_type);
3176 +
3177 +EXPORT_SYMBOL(ip_set_get_byname);
3178 +EXPORT_SYMBOL(ip_set_get_byindex);
3179 +EXPORT_SYMBOL(ip_set_put);
3180 +
3181 +EXPORT_SYMBOL(ip_set_addip_kernel);
3182 +EXPORT_SYMBOL(ip_set_delip_kernel);
3183 +EXPORT_SYMBOL(ip_set_testip_kernel);
3184 +
3185 +module_init(ip_set_init);
3186 +module_exit(ip_set_fini);
3187 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_iphash.c
3188 ===================================================================
3189 --- /dev/null
3190 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_iphash.c
3191 @@ -0,0 +1,429 @@
3192 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3193 + *
3194 + * This program is free software; you can redistribute it and/or modify
3195 + * it under the terms of the GNU General Public License version 2 as
3196 + * published by the Free Software Foundation.
3197 + */
3198 +
3199 +/* Kernel module implementing an ip hash set */
3200 +
3201 +#include <linux/module.h>
3202 +#include <linux/ip.h>
3203 +#include <linux/skbuff.h>
3204 +#include <linux/version.h>
3205 +#include <linux/jhash.h>
3206 +#include <linux/netfilter_ipv4/ip_tables.h>
3207 +#include <linux/netfilter_ipv4/ip_set.h>
3208 +#include <linux/errno.h>
3209 +#include <asm/uaccess.h>
3210 +#include <asm/bitops.h>
3211 +#include <linux/spinlock.h>
3212 +#include <linux/vmalloc.h>
3213 +#include <linux/random.h>
3214 +
3215 +#include <net/ip.h>
3216 +
3217 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3218 +#include <linux/netfilter_ipv4/ip_set_iphash.h>
3219 +
3220 +static int limit = MAX_RANGE;
3221 +
3222 +static inline __u32
3223 +jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
3224 +{
3225 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
3226 +}
3227 +
3228 +static inline __u32
3229 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3230 +{
3231 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3232 + __u32 id;
3233 + u_int16_t i;
3234 + ip_set_ip_t *elem;
3235 +
3236 + *hash_ip = ip & map->netmask;
3237 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
3238 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
3239 +
3240 + for (i = 0; i < map->probes; i++) {
3241 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
3242 + DP("hash key: %u", id);
3243 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3244 + if (*elem == *hash_ip)
3245 + return id;
3246 + /* No shortcut at testing - there can be deleted
3247 + * entries. */
3248 + }
3249 + return UINT_MAX;
3250 +}
3251 +
3252 +static inline int
3253 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3254 +{
3255 + return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
3256 +}
3257 +
3258 +static int
3259 +testip(struct ip_set *set, const void *data, size_t size,
3260 + ip_set_ip_t *hash_ip)
3261 +{
3262 + struct ip_set_req_iphash *req =
3263 + (struct ip_set_req_iphash *) data;
3264 +
3265 + if (size != sizeof(struct ip_set_req_iphash)) {
3266 + ip_set_printk("data length wrong (want %zu, have %zu)",
3267 + sizeof(struct ip_set_req_iphash),
3268 + size);
3269 + return -EINVAL;
3270 + }
3271 + return __testip(set, req->ip, hash_ip);
3272 +}
3273 +
3274 +static int
3275 +testip_kernel(struct ip_set *set,
3276 + const struct sk_buff *skb,
3277 + ip_set_ip_t *hash_ip,
3278 + const u_int32_t *flags,
3279 + unsigned char index)
3280 +{
3281 + return __testip(set,
3282 + ntohl(flags[index] & IPSET_SRC
3283 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3284 + ? ip_hdr(skb)->saddr
3285 + : ip_hdr(skb)->daddr),
3286 +#else
3287 + ? skb->nh.iph->saddr
3288 + : skb->nh.iph->daddr),
3289 +#endif
3290 + hash_ip);
3291 +}
3292 +
3293 +static inline int
3294 +__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3295 +{
3296 + __u32 probe;
3297 + u_int16_t i;
3298 + ip_set_ip_t *elem;
3299 +
3300 + if (!ip || map->elements >= limit)
3301 + return -ERANGE;
3302 +
3303 + *hash_ip = ip & map->netmask;
3304 +
3305 + for (i = 0; i < map->probes; i++) {
3306 + probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
3307 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
3308 + if (*elem == *hash_ip)
3309 + return -EEXIST;
3310 + if (!*elem) {
3311 + *elem = *hash_ip;
3312 + map->elements++;
3313 + return 0;
3314 + }
3315 + }
3316 + /* Trigger rehashing */
3317 + return -EAGAIN;
3318 +}
3319 +
3320 +static int
3321 +addip(struct ip_set *set, const void *data, size_t size,
3322 + ip_set_ip_t *hash_ip)
3323 +{
3324 + struct ip_set_req_iphash *req =
3325 + (struct ip_set_req_iphash *) data;
3326 +
3327 + if (size != sizeof(struct ip_set_req_iphash)) {
3328 + ip_set_printk("data length wrong (want %zu, have %zu)",
3329 + sizeof(struct ip_set_req_iphash),
3330 + size);
3331 + return -EINVAL;
3332 + }
3333 + return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
3334 +}
3335 +
3336 +static int
3337 +addip_kernel(struct ip_set *set,
3338 + const struct sk_buff *skb,
3339 + ip_set_ip_t *hash_ip,
3340 + const u_int32_t *flags,
3341 + unsigned char index)
3342 +{
3343 + return __addip((struct ip_set_iphash *) set->data,
3344 + ntohl(flags[index] & IPSET_SRC
3345 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3346 + ? ip_hdr(skb)->saddr
3347 + : ip_hdr(skb)->daddr),
3348 +#else
3349 + ? skb->nh.iph->saddr
3350 + : skb->nh.iph->daddr),
3351 +#endif
3352 + hash_ip);
3353 +}
3354 +
3355 +static int retry(struct ip_set *set)
3356 +{
3357 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3358 + ip_set_ip_t hash_ip, *elem;
3359 + void *members;
3360 + u_int32_t i, hashsize = map->hashsize;
3361 + int res;
3362 + struct ip_set_iphash *tmp;
3363 +
3364 + if (map->resize == 0)
3365 + return -ERANGE;
3366 +
3367 + again:
3368 + res = 0;
3369 +
3370 + /* Calculate new hash size */
3371 + hashsize += (hashsize * map->resize)/100;
3372 + if (hashsize == map->hashsize)
3373 + hashsize++;
3374 +
3375 + ip_set_printk("rehashing of set %s triggered: "
3376 + "hashsize grows from %u to %u",
3377 + set->name, map->hashsize, hashsize);
3378 +
3379 + tmp = kmalloc(sizeof(struct ip_set_iphash)
3380 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
3381 + if (!tmp) {
3382 + DP("out of memory for %d bytes",
3383 + sizeof(struct ip_set_iphash)
3384 + + map->probes * sizeof(uint32_t));
3385 + return -ENOMEM;
3386 + }
3387 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
3388 + if (!tmp->members) {
3389 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
3390 + kfree(tmp);
3391 + return -ENOMEM;
3392 + }
3393 + tmp->hashsize = hashsize;
3394 + tmp->elements = 0;
3395 + tmp->probes = map->probes;
3396 + tmp->resize = map->resize;
3397 + tmp->netmask = map->netmask;
3398 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
3399 +
3400 + write_lock_bh(&set->lock);
3401 + map = (struct ip_set_iphash *) set->data; /* Play safe */
3402 + for (i = 0; i < map->hashsize && res == 0; i++) {
3403 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3404 + if (*elem)
3405 + res = __addip(tmp, *elem, &hash_ip);
3406 + }
3407 + if (res) {
3408 + /* Failure, try again */
3409 + write_unlock_bh(&set->lock);
3410 + harray_free(tmp->members);
3411 + kfree(tmp);
3412 + goto again;
3413 + }
3414 +
3415 + /* Success at resizing! */
3416 + members = map->members;
3417 +
3418 + map->hashsize = tmp->hashsize;
3419 + map->members = tmp->members;
3420 + write_unlock_bh(&set->lock);
3421 +
3422 + harray_free(members);
3423 + kfree(tmp);
3424 +
3425 + return 0;
3426 +}
3427 +
3428 +static inline int
3429 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3430 +{
3431 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3432 + ip_set_ip_t id, *elem;
3433 +
3434 + if (!ip)
3435 + return -ERANGE;
3436 +
3437 + id = hash_id(set, ip, hash_ip);
3438 + if (id == UINT_MAX)
3439 + return -EEXIST;
3440 +
3441 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3442 + *elem = 0;
3443 + map->elements--;
3444 +
3445 + return 0;
3446 +}
3447 +
3448 +static int
3449 +delip(struct ip_set *set, const void *data, size_t size,
3450 + ip_set_ip_t *hash_ip)
3451 +{
3452 + struct ip_set_req_iphash *req =
3453 + (struct ip_set_req_iphash *) data;
3454 +
3455 + if (size != sizeof(struct ip_set_req_iphash)) {
3456 + ip_set_printk("data length wrong (want %zu, have %zu)",
3457 + sizeof(struct ip_set_req_iphash),
3458 + size);
3459 + return -EINVAL;
3460 + }
3461 + return __delip(set, req->ip, hash_ip);
3462 +}
3463 +
3464 +static int
3465 +delip_kernel(struct ip_set *set,
3466 + const struct sk_buff *skb,
3467 + ip_set_ip_t *hash_ip,
3468 + const u_int32_t *flags,
3469 + unsigned char index)
3470 +{
3471 + return __delip(set,
3472 + ntohl(flags[index] & IPSET_SRC
3473 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3474 + ? ip_hdr(skb)->saddr
3475 + : ip_hdr(skb)->daddr),
3476 +#else
3477 + ? skb->nh.iph->saddr
3478 + : skb->nh.iph->daddr),
3479 +#endif
3480 + hash_ip);
3481 +}
3482 +
3483 +static int create(struct ip_set *set, const void *data, size_t size)
3484 +{
3485 + struct ip_set_req_iphash_create *req =
3486 + (struct ip_set_req_iphash_create *) data;
3487 + struct ip_set_iphash *map;
3488 + uint16_t i;
3489 +
3490 + if (size != sizeof(struct ip_set_req_iphash_create)) {
3491 + ip_set_printk("data length wrong (want %zu, have %zu)",
3492 + sizeof(struct ip_set_req_iphash_create),
3493 + size);
3494 + return -EINVAL;
3495 + }
3496 +
3497 + if (req->hashsize < 1) {
3498 + ip_set_printk("hashsize too small");
3499 + return -ENOEXEC;
3500 + }
3501 +
3502 + if (req->probes < 1) {
3503 + ip_set_printk("probes too small");
3504 + return -ENOEXEC;
3505 + }
3506 +
3507 + map = kmalloc(sizeof(struct ip_set_iphash)
3508 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
3509 + if (!map) {
3510 + DP("out of memory for %d bytes",
3511 + sizeof(struct ip_set_iphash)
3512 + + req->probes * sizeof(uint32_t));
3513 + return -ENOMEM;
3514 + }
3515 + for (i = 0; i < req->probes; i++)
3516 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
3517 + map->elements = 0;
3518 + map->hashsize = req->hashsize;
3519 + map->probes = req->probes;
3520 + map->resize = req->resize;
3521 + map->netmask = req->netmask;
3522 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
3523 + if (!map->members) {
3524 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
3525 + kfree(map);
3526 + return -ENOMEM;
3527 + }
3528 +
3529 + set->data = map;
3530 + return 0;
3531 +}
3532 +
3533 +static void destroy(struct ip_set *set)
3534 +{
3535 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3536 +
3537 + harray_free(map->members);
3538 + kfree(map);
3539 +
3540 + set->data = NULL;
3541 +}
3542 +
3543 +static void flush(struct ip_set *set)
3544 +{
3545 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3546 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
3547 + map->elements = 0;
3548 +}
3549 +
3550 +static void list_header(const struct ip_set *set, void *data)
3551 +{
3552 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3553 + struct ip_set_req_iphash_create *header =
3554 + (struct ip_set_req_iphash_create *) data;
3555 +
3556 + header->hashsize = map->hashsize;
3557 + header->probes = map->probes;
3558 + header->resize = map->resize;
3559 + header->netmask = map->netmask;
3560 +}
3561 +
3562 +static int list_members_size(const struct ip_set *set)
3563 +{
3564 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3565 +
3566 + return (map->hashsize * sizeof(ip_set_ip_t));
3567 +}
3568 +
3569 +static void list_members(const struct ip_set *set, void *data)
3570 +{
3571 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3572 + ip_set_ip_t i, *elem;
3573 +
3574 + for (i = 0; i < map->hashsize; i++) {
3575 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3576 + ((ip_set_ip_t *)data)[i] = *elem;
3577 + }
3578 +}
3579 +
3580 +static struct ip_set_type ip_set_iphash = {
3581 + .typename = SETTYPE_NAME,
3582 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3583 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3584 + .create = &create,
3585 + .destroy = &destroy,
3586 + .flush = &flush,
3587 + .reqsize = sizeof(struct ip_set_req_iphash),
3588 + .addip = &addip,
3589 + .addip_kernel = &addip_kernel,
3590 + .retry = &retry,
3591 + .delip = &delip,
3592 + .delip_kernel = &delip_kernel,
3593 + .testip = &testip,
3594 + .testip_kernel = &testip_kernel,
3595 + .header_size = sizeof(struct ip_set_req_iphash_create),
3596 + .list_header = &list_header,
3597 + .list_members_size = &list_members_size,
3598 + .list_members = &list_members,
3599 + .me = THIS_MODULE,
3600 +};
3601 +
3602 +MODULE_LICENSE("GPL");
3603 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3604 +MODULE_DESCRIPTION("iphash type of IP sets");
3605 +module_param(limit, int, 0600);
3606 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
3607 +
3608 +static int __init ip_set_iphash_init(void)
3609 +{
3610 + return ip_set_register_set_type(&ip_set_iphash);
3611 +}
3612 +
3613 +static void __exit ip_set_iphash_fini(void)
3614 +{
3615 + /* FIXME: possible race with ip_set_create() */
3616 + ip_set_unregister_set_type(&ip_set_iphash);
3617 +}
3618 +
3619 +module_init(ip_set_iphash_init);
3620 +module_exit(ip_set_iphash_fini);
3621 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_ipmap.c
3622 ===================================================================
3623 --- /dev/null
3624 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_ipmap.c
3625 @@ -0,0 +1,336 @@
3626 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3627 + * Patrick Schaaf <bof@bof.de>
3628 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3629 + *
3630 + * This program is free software; you can redistribute it and/or modify
3631 + * it under the terms of the GNU General Public License version 2 as
3632 + * published by the Free Software Foundation.
3633 + */
3634 +
3635 +/* Kernel module implementing an IP set type: the single bitmap type */
3636 +
3637 +#include <linux/module.h>
3638 +#include <linux/ip.h>
3639 +#include <linux/skbuff.h>
3640 +#include <linux/version.h>
3641 +#include <linux/netfilter_ipv4/ip_tables.h>
3642 +#include <linux/netfilter_ipv4/ip_set.h>
3643 +#include <linux/errno.h>
3644 +#include <asm/uaccess.h>
3645 +#include <asm/bitops.h>
3646 +#include <linux/spinlock.h>
3647 +
3648 +#include <linux/netfilter_ipv4/ip_set_ipmap.h>
3649 +
3650 +static inline ip_set_ip_t
3651 +ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
3652 +{
3653 + return (ip - map->first_ip)/map->hosts;
3654 +}
3655 +
3656 +static inline int
3657 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3658 +{
3659 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3660 +
3661 + if (ip < map->first_ip || ip > map->last_ip)
3662 + return -ERANGE;
3663 +
3664 + *hash_ip = ip & map->netmask;
3665 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
3666 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
3667 + return !!test_bit(ip_to_id(map, *hash_ip), map->members);
3668 +}
3669 +
3670 +static int
3671 +testip(struct ip_set *set, const void *data, size_t size,
3672 + ip_set_ip_t *hash_ip)
3673 +{
3674 + struct ip_set_req_ipmap *req =
3675 + (struct ip_set_req_ipmap *) data;
3676 +
3677 + if (size != sizeof(struct ip_set_req_ipmap)) {
3678 + ip_set_printk("data length wrong (want %zu, have %zu)",
3679 + sizeof(struct ip_set_req_ipmap),
3680 + size);
3681 + return -EINVAL;
3682 + }
3683 + return __testip(set, req->ip, hash_ip);
3684 +}
3685 +
3686 +static int
3687 +testip_kernel(struct ip_set *set,
3688 + const struct sk_buff *skb,
3689 + ip_set_ip_t *hash_ip,
3690 + const u_int32_t *flags,
3691 + unsigned char index)
3692 +{
3693 + int res = __testip(set,
3694 + ntohl(flags[index] & IPSET_SRC
3695 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3696 + ? ip_hdr(skb)->saddr
3697 + : ip_hdr(skb)->daddr),
3698 +#else
3699 + ? skb->nh.iph->saddr
3700 + : skb->nh.iph->daddr),
3701 +#endif
3702 + hash_ip);
3703 + return (res < 0 ? 0 : res);
3704 +}
3705 +
3706 +static inline int
3707 +__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3708 +{
3709 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3710 +
3711 + if (ip < map->first_ip || ip > map->last_ip)
3712 + return -ERANGE;
3713 +
3714 + *hash_ip = ip & map->netmask;
3715 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3716 + if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
3717 + return -EEXIST;
3718 +
3719 + return 0;
3720 +}
3721 +
3722 +static int
3723 +addip(struct ip_set *set, const void *data, size_t size,
3724 + ip_set_ip_t *hash_ip)
3725 +{
3726 + struct ip_set_req_ipmap *req =
3727 + (struct ip_set_req_ipmap *) data;
3728 +
3729 + if (size != sizeof(struct ip_set_req_ipmap)) {
3730 + ip_set_printk("data length wrong (want %zu, have %zu)",
3731 + sizeof(struct ip_set_req_ipmap),
3732 + size);
3733 + return -EINVAL;
3734 + }
3735 + DP("%u.%u.%u.%u", HIPQUAD(req->ip));
3736 + return __addip(set, req->ip, hash_ip);
3737 +}
3738 +
3739 +static int
3740 +addip_kernel(struct ip_set *set,
3741 + const struct sk_buff *skb,
3742 + ip_set_ip_t *hash_ip,
3743 + const u_int32_t *flags,
3744 + unsigned char index)
3745 +{
3746 + return __addip(set,
3747 + ntohl(flags[index] & IPSET_SRC
3748 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3749 + ? ip_hdr(skb)->saddr
3750 + : ip_hdr(skb)->daddr),
3751 +#else
3752 + ? skb->nh.iph->saddr
3753 + : skb->nh.iph->daddr),
3754 +#endif
3755 + hash_ip);
3756 +}
3757 +
3758 +static inline int
3759 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3760 +{
3761 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3762 +
3763 + if (ip < map->first_ip || ip > map->last_ip)
3764 + return -ERANGE;
3765 +
3766 + *hash_ip = ip & map->netmask;
3767 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3768 + if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
3769 + return -EEXIST;
3770 +
3771 + return 0;
3772 +}
3773 +
3774 +static int
3775 +delip(struct ip_set *set, const void *data, size_t size,
3776 + ip_set_ip_t *hash_ip)
3777 +{
3778 + struct ip_set_req_ipmap *req =
3779 + (struct ip_set_req_ipmap *) data;
3780 +
3781 + if (size != sizeof(struct ip_set_req_ipmap)) {
3782 + ip_set_printk("data length wrong (want %zu, have %zu)",
3783 + sizeof(struct ip_set_req_ipmap),
3784 + size);
3785 + return -EINVAL;
3786 + }
3787 + return __delip(set, req->ip, hash_ip);
3788 +}
3789 +
3790 +static int
3791 +delip_kernel(struct ip_set *set,
3792 + const struct sk_buff *skb,
3793 + ip_set_ip_t *hash_ip,
3794 + const u_int32_t *flags,
3795 + unsigned char index)
3796 +{
3797 + return __delip(set,
3798 + ntohl(flags[index] & IPSET_SRC
3799 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3800 + ? ip_hdr(skb)->saddr
3801 + : ip_hdr(skb)->daddr),
3802 +#else
3803 + ? skb->nh.iph->saddr
3804 + : skb->nh.iph->daddr),
3805 +#endif
3806 + hash_ip);
3807 +}
3808 +
3809 +static int create(struct ip_set *set, const void *data, size_t size)
3810 +{
3811 + int newbytes;
3812 + struct ip_set_req_ipmap_create *req =
3813 + (struct ip_set_req_ipmap_create *) data;
3814 + struct ip_set_ipmap *map;
3815 +
3816 + if (size != sizeof(struct ip_set_req_ipmap_create)) {
3817 + ip_set_printk("data length wrong (want %zu, have %zu)",
3818 + sizeof(struct ip_set_req_ipmap_create),
3819 + size);
3820 + return -EINVAL;
3821 + }
3822 +
3823 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
3824 + HIPQUAD(req->from), HIPQUAD(req->to));
3825 +
3826 + if (req->from > req->to) {
3827 + DP("bad ip range");
3828 + return -ENOEXEC;
3829 + }
3830 +
3831 + map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
3832 + if (!map) {
3833 + DP("out of memory for %d bytes",
3834 + sizeof(struct ip_set_ipmap));
3835 + return -ENOMEM;
3836 + }
3837 + map->first_ip = req->from;
3838 + map->last_ip = req->to;
3839 + map->netmask = req->netmask;
3840 +
3841 + if (req->netmask == 0xFFFFFFFF) {
3842 + map->hosts = 1;
3843 + map->sizeid = map->last_ip - map->first_ip + 1;
3844 + } else {
3845 + unsigned int mask_bits, netmask_bits;
3846 + ip_set_ip_t mask;
3847 +
3848 + map->first_ip &= map->netmask; /* Should we better bark? */
3849 +
3850 + mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
3851 + netmask_bits = mask_to_bits(map->netmask);
3852 +
3853 + if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
3854 + || netmask_bits <= mask_bits)
3855 + return -ENOEXEC;
3856 +
3857 + DP("mask_bits %u, netmask_bits %u",
3858 + mask_bits, netmask_bits);
3859 + map->hosts = 2 << (32 - netmask_bits - 1);
3860 + map->sizeid = 2 << (netmask_bits - mask_bits - 1);
3861 + }
3862 + if (map->sizeid > MAX_RANGE + 1) {
3863 + ip_set_printk("range too big (max %d addresses)",
3864 + MAX_RANGE+1);
3865 + kfree(map);
3866 + return -ENOEXEC;
3867 + }
3868 + DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
3869 + newbytes = bitmap_bytes(0, map->sizeid - 1);
3870 + map->members = kmalloc(newbytes, GFP_KERNEL);
3871 + if (!map->members) {
3872 + DP("out of memory for %d bytes", newbytes);
3873 + kfree(map);
3874 + return -ENOMEM;
3875 + }
3876 + memset(map->members, 0, newbytes);
3877 +
3878 + set->data = map;
3879 + return 0;
3880 +}
3881 +
3882 +static void destroy(struct ip_set *set)
3883 +{
3884 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3885 +
3886 + kfree(map->members);
3887 + kfree(map);
3888 +
3889 + set->data = NULL;
3890 +}
3891 +
3892 +static void flush(struct ip_set *set)
3893 +{
3894 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3895 + memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
3896 +}
3897 +
3898 +static void list_header(const struct ip_set *set, void *data)
3899 +{
3900 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3901 + struct ip_set_req_ipmap_create *header =
3902 + (struct ip_set_req_ipmap_create *) data;
3903 +
3904 + header->from = map->first_ip;
3905 + header->to = map->last_ip;
3906 + header->netmask = map->netmask;
3907 +}
3908 +
3909 +static int list_members_size(const struct ip_set *set)
3910 +{
3911 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3912 +
3913 + return bitmap_bytes(0, map->sizeid - 1);
3914 +}
3915 +
3916 +static void list_members(const struct ip_set *set, void *data)
3917 +{
3918 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3919 + int bytes = bitmap_bytes(0, map->sizeid - 1);
3920 +
3921 + memcpy(data, map->members, bytes);
3922 +}
3923 +
3924 +static struct ip_set_type ip_set_ipmap = {
3925 + .typename = SETTYPE_NAME,
3926 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3927 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3928 + .create = &create,
3929 + .destroy = &destroy,
3930 + .flush = &flush,
3931 + .reqsize = sizeof(struct ip_set_req_ipmap),
3932 + .addip = &addip,
3933 + .addip_kernel = &addip_kernel,
3934 + .delip = &delip,
3935 + .delip_kernel = &delip_kernel,
3936 + .testip = &testip,
3937 + .testip_kernel = &testip_kernel,
3938 + .header_size = sizeof(struct ip_set_req_ipmap_create),
3939 + .list_header = &list_header,
3940 + .list_members_size = &list_members_size,
3941 + .list_members = &list_members,
3942 + .me = THIS_MODULE,
3943 +};
3944 +
3945 +MODULE_LICENSE("GPL");
3946 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3947 +MODULE_DESCRIPTION("ipmap type of IP sets");
3948 +
3949 +static int __init ip_set_ipmap_init(void)
3950 +{
3951 + return ip_set_register_set_type(&ip_set_ipmap);
3952 +}
3953 +
3954 +static void __exit ip_set_ipmap_fini(void)
3955 +{
3956 + /* FIXME: possible race with ip_set_create() */
3957 + ip_set_unregister_set_type(&ip_set_ipmap);
3958 +}
3959 +
3960 +module_init(ip_set_ipmap_init);
3961 +module_exit(ip_set_ipmap_fini);
3962 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_ipporthash.c
3963 ===================================================================
3964 --- /dev/null
3965 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_ipporthash.c
3966 @@ -0,0 +1,581 @@
3967 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3968 + *
3969 + * This program is free software; you can redistribute it and/or modify
3970 + * it under the terms of the GNU General Public License version 2 as
3971 + * published by the Free Software Foundation.
3972 + */
3973 +
3974 +/* Kernel module implementing an ip+port hash set */
3975 +
3976 +#include <linux/module.h>
3977 +#include <linux/ip.h>
3978 +#include <linux/tcp.h>
3979 +#include <linux/udp.h>
3980 +#include <linux/skbuff.h>
3981 +#include <linux/version.h>
3982 +#include <linux/jhash.h>
3983 +#include <linux/netfilter_ipv4/ip_tables.h>
3984 +#include <linux/netfilter_ipv4/ip_set.h>
3985 +#include <linux/errno.h>
3986 +#include <asm/uaccess.h>
3987 +#include <asm/bitops.h>
3988 +#include <linux/spinlock.h>
3989 +#include <linux/vmalloc.h>
3990 +#include <linux/random.h>
3991 +
3992 +#include <net/ip.h>
3993 +
3994 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3995 +#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
3996 +
3997 +static int limit = MAX_RANGE;
3998 +
3999 +/* We must handle non-linear skbs */
4000 +static inline ip_set_ip_t
4001 +get_port(const struct sk_buff *skb, u_int32_t flags)
4002 +{
4003 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4004 + struct iphdr *iph = ip_hdr(skb);
4005 +#else
4006 + struct iphdr *iph = skb->nh.iph;
4007 +#endif
4008 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
4009 +
4010 + switch (iph->protocol) {
4011 + case IPPROTO_TCP: {
4012 + struct tcphdr tcph;
4013 +
4014 + /* See comments at tcp_match in ip_tables.c */
4015 + if (offset)
4016 + return INVALID_PORT;
4017 +
4018 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4019 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
4020 +#else
4021 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
4022 +#endif
4023 + /* No choice either */
4024 + return INVALID_PORT;
4025 +
4026 + return ntohs(flags & IPSET_SRC ?
4027 + tcph.source : tcph.dest);
4028 + }
4029 + case IPPROTO_UDP: {
4030 + struct udphdr udph;
4031 +
4032 + if (offset)
4033 + return INVALID_PORT;
4034 +
4035 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4036 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
4037 +#else
4038 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
4039 +#endif
4040 + /* No choice either */
4041 + return INVALID_PORT;
4042 +
4043 + return ntohs(flags & IPSET_SRC ?
4044 + udph.source : udph.dest);
4045 + }
4046 + default:
4047 + return INVALID_PORT;
4048 + }
4049 +}
4050 +
4051 +static inline __u32
4052 +jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
4053 +{
4054 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
4055 +}
4056 +
4057 +#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
4058 +
4059 +static inline __u32
4060 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4061 + ip_set_ip_t *hash_ip)
4062 +{
4063 + struct ip_set_ipporthash *map =
4064 + (struct ip_set_ipporthash *) set->data;
4065 + __u32 id;
4066 + u_int16_t i;
4067 + ip_set_ip_t *elem;
4068 +
4069 + *hash_ip = HASH_IP(map, ip, port);
4070 + DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4071 + set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4072 +
4073 + for (i = 0; i < map->probes; i++) {
4074 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
4075 + DP("hash key: %u", id);
4076 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4077 + if (*elem == *hash_ip)
4078 + return id;
4079 + /* No shortcut at testing - there can be deleted
4080 + * entries. */
4081 + }
4082 + return UINT_MAX;
4083 +}
4084 +
4085 +static inline int
4086 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4087 + ip_set_ip_t *hash_ip)
4088 +{
4089 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4090 +
4091 + if (ip < map->first_ip || ip > map->last_ip)
4092 + return -ERANGE;
4093 +
4094 + return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
4095 +}
4096 +
4097 +static int
4098 +testip(struct ip_set *set, const void *data, size_t size,
4099 + ip_set_ip_t *hash_ip)
4100 +{
4101 + struct ip_set_req_ipporthash *req =
4102 + (struct ip_set_req_ipporthash *) data;
4103 +
4104 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4105 + ip_set_printk("data length wrong (want %zu, have %zu)",
4106 + sizeof(struct ip_set_req_ipporthash),
4107 + size);
4108 + return -EINVAL;
4109 + }
4110 + return __testip(set, req->ip, req->port, hash_ip);
4111 +}
4112 +
4113 +static int
4114 +testip_kernel(struct ip_set *set,
4115 + const struct sk_buff *skb,
4116 + ip_set_ip_t *hash_ip,
4117 + const u_int32_t *flags,
4118 + unsigned char index)
4119 +{
4120 + ip_set_ip_t port;
4121 + int res;
4122 +
4123 + if (flags[index+1] == 0)
4124 + return 0;
4125 +
4126 + port = get_port(skb, flags[index+1]);
4127 +
4128 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4129 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4130 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4131 + NIPQUAD(ip_hdr(skb)->saddr),
4132 + NIPQUAD(ip_hdr(skb)->daddr));
4133 +#else
4134 + NIPQUAD(skb->nh.iph->saddr),
4135 + NIPQUAD(skb->nh.iph->daddr));
4136 +#endif
4137 + DP("flag %s port %u",
4138 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4139 + port);
4140 + if (port == INVALID_PORT)
4141 + return 0;
4142 +
4143 + res = __testip(set,
4144 + ntohl(flags[index] & IPSET_SRC
4145 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4146 + ? ip_hdr(skb)->saddr
4147 + : ip_hdr(skb)->daddr),
4148 +#else
4149 + ? skb->nh.iph->saddr
4150 + : skb->nh.iph->daddr),
4151 +#endif
4152 + port,
4153 + hash_ip);
4154 + return (res < 0 ? 0 : res);
4155 +
4156 +}
4157 +
4158 +static inline int
4159 +__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
4160 +{
4161 + __u32 probe;
4162 + u_int16_t i;
4163 + ip_set_ip_t *elem;
4164 +
4165 + for (i = 0; i < map->probes; i++) {
4166 + probe = jhash_ip(map, i, hash_ip) % map->hashsize;
4167 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4168 + if (*elem == hash_ip)
4169 + return -EEXIST;
4170 + if (!*elem) {
4171 + *elem = hash_ip;
4172 + map->elements++;
4173 + return 0;
4174 + }
4175 + }
4176 + /* Trigger rehashing */
4177 + return -EAGAIN;
4178 +}
4179 +
4180 +static inline int
4181 +__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
4182 + ip_set_ip_t *hash_ip)
4183 +{
4184 + if (map->elements > limit)
4185 + return -ERANGE;
4186 + if (ip < map->first_ip || ip > map->last_ip)
4187 + return -ERANGE;
4188 +
4189 + *hash_ip = HASH_IP(map, ip, port);
4190 +
4191 + return __add_haship(map, *hash_ip);
4192 +}
4193 +
4194 +static int
4195 +addip(struct ip_set *set, const void *data, size_t size,
4196 + ip_set_ip_t *hash_ip)
4197 +{
4198 + struct ip_set_req_ipporthash *req =
4199 + (struct ip_set_req_ipporthash *) data;
4200 +
4201 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4202 + ip_set_printk("data length wrong (want %zu, have %zu)",
4203 + sizeof(struct ip_set_req_ipporthash),
4204 + size);
4205 + return -EINVAL;
4206 + }
4207 + return __addip((struct ip_set_ipporthash *) set->data,
4208 + req->ip, req->port, hash_ip);
4209 +}
4210 +
4211 +static int
4212 +addip_kernel(struct ip_set *set,
4213 + const struct sk_buff *skb,
4214 + ip_set_ip_t *hash_ip,
4215 + const u_int32_t *flags,
4216 + unsigned char index)
4217 +{
4218 + ip_set_ip_t port;
4219 +
4220 + if (flags[index+1] == 0)
4221 + return -EINVAL;
4222 +
4223 + port = get_port(skb, flags[index+1]);
4224 +
4225 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4226 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4227 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4228 + NIPQUAD(ip_hdr(skb)->saddr),
4229 + NIPQUAD(ip_hdr(skb)->daddr));
4230 +#else
4231 + NIPQUAD(skb->nh.iph->saddr),
4232 + NIPQUAD(skb->nh.iph->daddr));
4233 +#endif
4234 + DP("flag %s port %u",
4235 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4236 + port);
4237 + if (port == INVALID_PORT)
4238 + return -EINVAL;
4239 +
4240 + return __addip((struct ip_set_ipporthash *) set->data,
4241 + ntohl(flags[index] & IPSET_SRC
4242 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4243 + ? ip_hdr(skb)->saddr
4244 + : ip_hdr(skb)->daddr),
4245 +#else
4246 + ? skb->nh.iph->saddr
4247 + : skb->nh.iph->daddr),
4248 +#endif
4249 + port,
4250 + hash_ip);
4251 +}
4252 +
4253 +static int retry(struct ip_set *set)
4254 +{
4255 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4256 + ip_set_ip_t *elem;
4257 + void *members;
4258 + u_int32_t i, hashsize = map->hashsize;
4259 + int res;
4260 + struct ip_set_ipporthash *tmp;
4261 +
4262 + if (map->resize == 0)
4263 + return -ERANGE;
4264 +
4265 + again:
4266 + res = 0;
4267 +
4268 + /* Calculate new hash size */
4269 + hashsize += (hashsize * map->resize)/100;
4270 + if (hashsize == map->hashsize)
4271 + hashsize++;
4272 +
4273 + ip_set_printk("rehashing of set %s triggered: "
4274 + "hashsize grows from %u to %u",
4275 + set->name, map->hashsize, hashsize);
4276 +
4277 + tmp = kmalloc(sizeof(struct ip_set_ipporthash)
4278 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
4279 + if (!tmp) {
4280 + DP("out of memory for %d bytes",
4281 + sizeof(struct ip_set_ipporthash)
4282 + + map->probes * sizeof(uint32_t));
4283 + return -ENOMEM;
4284 + }
4285 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
4286 + if (!tmp->members) {
4287 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
4288 + kfree(tmp);
4289 + return -ENOMEM;
4290 + }
4291 + tmp->hashsize = hashsize;
4292 + tmp->elements = 0;
4293 + tmp->probes = map->probes;
4294 + tmp->resize = map->resize;
4295 + tmp->first_ip = map->first_ip;
4296 + tmp->last_ip = map->last_ip;
4297 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
4298 +
4299 + write_lock_bh(&set->lock);
4300 + map = (struct ip_set_ipporthash *) set->data; /* Play safe */
4301 + for (i = 0; i < map->hashsize && res == 0; i++) {
4302 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4303 + if (*elem)
4304 + res = __add_haship(tmp, *elem);
4305 + }
4306 + if (res) {
4307 + /* Failure, try again */
4308 + write_unlock_bh(&set->lock);
4309 + harray_free(tmp->members);
4310 + kfree(tmp);
4311 + goto again;
4312 + }
4313 +
4314 + /* Success at resizing! */
4315 + members = map->members;
4316 +
4317 + map->hashsize = tmp->hashsize;
4318 + map->members = tmp->members;
4319 + write_unlock_bh(&set->lock);
4320 +
4321 + harray_free(members);
4322 + kfree(tmp);
4323 +
4324 + return 0;
4325 +}
4326 +
4327 +static inline int
4328 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4329 + ip_set_ip_t *hash_ip)
4330 +{
4331 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4332 + ip_set_ip_t id;
4333 + ip_set_ip_t *elem;
4334 +
4335 + if (ip < map->first_ip || ip > map->last_ip)
4336 + return -ERANGE;
4337 +
4338 + id = hash_id(set, ip, port, hash_ip);
4339 +
4340 + if (id == UINT_MAX)
4341 + return -EEXIST;
4342 +
4343 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4344 + *elem = 0;
4345 + map->elements--;
4346 +
4347 + return 0;
4348 +}
4349 +
4350 +static int
4351 +delip(struct ip_set *set, const void *data, size_t size,
4352 + ip_set_ip_t *hash_ip)
4353 +{
4354 + struct ip_set_req_ipporthash *req =
4355 + (struct ip_set_req_ipporthash *) data;
4356 +
4357 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4358 + ip_set_printk("data length wrong (want %zu, have %zu)",
4359 + sizeof(struct ip_set_req_ipporthash),
4360 + size);
4361 + return -EINVAL;
4362 + }
4363 + return __delip(set, req->ip, req->port, hash_ip);
4364 +}
4365 +
4366 +static int
4367 +delip_kernel(struct ip_set *set,
4368 + const struct sk_buff *skb,
4369 + ip_set_ip_t *hash_ip,
4370 + const u_int32_t *flags,
4371 + unsigned char index)
4372 +{
4373 + ip_set_ip_t port;
4374 +
4375 + if (flags[index+1] == 0)
4376 + return -EINVAL;
4377 +
4378 + port = get_port(skb, flags[index+1]);
4379 +
4380 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4381 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4382 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4383 + NIPQUAD(ip_hdr(skb)->saddr),
4384 + NIPQUAD(ip_hdr(skb)->daddr));
4385 +#else
4386 + NIPQUAD(skb->nh.iph->saddr),
4387 + NIPQUAD(skb->nh.iph->daddr));
4388 +#endif
4389 + DP("flag %s port %u",
4390 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4391 + port);
4392 + if (port == INVALID_PORT)
4393 + return -EINVAL;
4394 +
4395 + return __delip(set,
4396 + ntohl(flags[index] & IPSET_SRC
4397 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4398 + ? ip_hdr(skb)->saddr
4399 + : ip_hdr(skb)->daddr),
4400 +#else
4401 + ? skb->nh.iph->saddr
4402 + : skb->nh.iph->daddr),
4403 +#endif
4404 + port,
4405 + hash_ip);
4406 +}
4407 +
4408 +static int create(struct ip_set *set, const void *data, size_t size)
4409 +{
4410 + struct ip_set_req_ipporthash_create *req =
4411 + (struct ip_set_req_ipporthash_create *) data;
4412 + struct ip_set_ipporthash *map;
4413 + uint16_t i;
4414 +
4415 + if (size != sizeof(struct ip_set_req_ipporthash_create)) {
4416 + ip_set_printk("data length wrong (want %zu, have %zu)",
4417 + sizeof(struct ip_set_req_ipporthash_create),
4418 + size);
4419 + return -EINVAL;
4420 + }
4421 +
4422 + if (req->hashsize < 1) {
4423 + ip_set_printk("hashsize too small");
4424 + return -ENOEXEC;
4425 + }
4426 +
4427 + if (req->probes < 1) {
4428 + ip_set_printk("probes too small");
4429 + return -ENOEXEC;
4430 + }
4431 +
4432 + map = kmalloc(sizeof(struct ip_set_ipporthash)
4433 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
4434 + if (!map) {
4435 + DP("out of memory for %d bytes",
4436 + sizeof(struct ip_set_ipporthash)
4437 + + req->probes * sizeof(uint32_t));
4438 + return -ENOMEM;
4439 + }
4440 + for (i = 0; i < req->probes; i++)
4441 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
4442 + map->elements = 0;
4443 + map->hashsize = req->hashsize;
4444 + map->probes = req->probes;
4445 + map->resize = req->resize;
4446 + map->first_ip = req->from;
4447 + map->last_ip = req->to;
4448 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
4449 + if (!map->members) {
4450 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
4451 + kfree(map);
4452 + return -ENOMEM;
4453 + }
4454 +
4455 + set->data = map;
4456 + return 0;
4457 +}
4458 +
4459 +static void destroy(struct ip_set *set)
4460 +{
4461 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4462 +
4463 + harray_free(map->members);
4464 + kfree(map);
4465 +
4466 + set->data = NULL;
4467 +}
4468 +
4469 +static void flush(struct ip_set *set)
4470 +{
4471 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4472 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
4473 + map->elements = 0;
4474 +}
4475 +
4476 +static void list_header(const struct ip_set *set, void *data)
4477 +{
4478 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4479 + struct ip_set_req_ipporthash_create *header =
4480 + (struct ip_set_req_ipporthash_create *) data;
4481 +
4482 + header->hashsize = map->hashsize;
4483 + header->probes = map->probes;
4484 + header->resize = map->resize;
4485 + header->from = map->first_ip;
4486 + header->to = map->last_ip;
4487 +}
4488 +
4489 +static int list_members_size(const struct ip_set *set)
4490 +{
4491 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4492 +
4493 + return (map->hashsize * sizeof(ip_set_ip_t));
4494 +}
4495 +
4496 +static void list_members(const struct ip_set *set, void *data)
4497 +{
4498 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4499 + ip_set_ip_t i, *elem;
4500 +
4501 + for (i = 0; i < map->hashsize; i++) {
4502 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4503 + ((ip_set_ip_t *)data)[i] = *elem;
4504 + }
4505 +}
4506 +
4507 +static struct ip_set_type ip_set_ipporthash = {
4508 + .typename = SETTYPE_NAME,
4509 + .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
4510 + .protocol_version = IP_SET_PROTOCOL_VERSION,
4511 + .create = &create,
4512 + .destroy = &destroy,
4513 + .flush = &flush,
4514 + .reqsize = sizeof(struct ip_set_req_ipporthash),
4515 + .addip = &addip,
4516 + .addip_kernel = &addip_kernel,
4517 + .retry = &retry,
4518 + .delip = &delip,
4519 + .delip_kernel = &delip_kernel,
4520 + .testip = &testip,
4521 + .testip_kernel = &testip_kernel,
4522 + .header_size = sizeof(struct ip_set_req_ipporthash_create),
4523 + .list_header = &list_header,
4524 + .list_members_size = &list_members_size,
4525 + .list_members = &list_members,
4526 + .me = THIS_MODULE,
4527 +};
4528 +
4529 +MODULE_LICENSE("GPL");
4530 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4531 +MODULE_DESCRIPTION("ipporthash type of IP sets");
4532 +module_param(limit, int, 0600);
4533 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4534 +
4535 +static int __init ip_set_ipporthash_init(void)
4536 +{
4537 + return ip_set_register_set_type(&ip_set_ipporthash);
4538 +}
4539 +
4540 +static void __exit ip_set_ipporthash_fini(void)
4541 +{
4542 + /* FIXME: possible race with ip_set_create() */
4543 + ip_set_unregister_set_type(&ip_set_ipporthash);
4544 +}
4545 +
4546 +module_init(ip_set_ipporthash_init);
4547 +module_exit(ip_set_ipporthash_fini);
4548 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_iptree.c
4549 ===================================================================
4550 --- /dev/null
4551 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_iptree.c
4552 @@ -0,0 +1,612 @@
4553 +/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4554 + *
4555 + * This program is free software; you can redistribute it and/or modify
4556 + * it under the terms of the GNU General Public License version 2 as
4557 + * published by the Free Software Foundation.
4558 + */
4559 +
4560 +/* Kernel module implementing an IP set type: the iptree type */
4561 +
4562 +#include <linux/version.h>
4563 +#include <linux/module.h>
4564 +#include <linux/ip.h>
4565 +#include <linux/skbuff.h>
4566 +#include <linux/slab.h>
4567 +#include <linux/delay.h>
4568 +#include <linux/netfilter_ipv4/ip_tables.h>
4569 +#include <linux/netfilter_ipv4/ip_set.h>
4570 +#include <linux/errno.h>
4571 +#include <asm/uaccess.h>
4572 +#include <asm/bitops.h>
4573 +#include <linux/spinlock.h>
4574 +
4575 +/* Backward compatibility */
4576 +#ifndef __nocast
4577 +#define __nocast
4578 +#endif
4579 +
4580 +#include <linux/netfilter_ipv4/ip_set_iptree.h>
4581 +
4582 +static int limit = MAX_RANGE;
4583 +
4584 +/* Garbage collection interval in seconds: */
4585 +#define IPTREE_GC_TIME 5*60
4586 +/* Sleep so many milliseconds before trying again
4587 + * to delete the gc timer at destroying/flushing a set */
4588 +#define IPTREE_DESTROY_SLEEP 100
4589 +
4590 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
4591 +static struct kmem_cache *branch_cachep;
4592 +static struct kmem_cache *leaf_cachep;
4593 +#else
4594 +static kmem_cache_t *branch_cachep;
4595 +static kmem_cache_t *leaf_cachep;
4596 +#endif
4597 +
4598 +#if defined(__LITTLE_ENDIAN)
4599 +#define ABCD(a,b,c,d,addrp) do { \
4600 + a = ((unsigned char *)addrp)[3]; \
4601 + b = ((unsigned char *)addrp)[2]; \
4602 + c = ((unsigned char *)addrp)[1]; \
4603 + d = ((unsigned char *)addrp)[0]; \
4604 +} while (0)
4605 +#elif defined(__BIG_ENDIAN)
4606 +#define ABCD(a,b,c,d,addrp) do { \
4607 + a = ((unsigned char *)addrp)[0]; \
4608 + b = ((unsigned char *)addrp)[1]; \
4609 + c = ((unsigned char *)addrp)[2]; \
4610 + d = ((unsigned char *)addrp)[3]; \
4611 +} while (0)
4612 +#else
4613 +#error "Please fix asm/byteorder.h"
4614 +#endif /* __LITTLE_ENDIAN */
4615 +
4616 +#define TESTIP_WALK(map, elem, branch) do { \
4617 + if ((map)->tree[elem]) { \
4618 + branch = (map)->tree[elem]; \
4619 + } else \
4620 + return 0; \
4621 +} while (0)
4622 +
4623 +static inline int
4624 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4625 +{
4626 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4627 + struct ip_set_iptreeb *btree;
4628 + struct ip_set_iptreec *ctree;
4629 + struct ip_set_iptreed *dtree;
4630 + unsigned char a,b,c,d;
4631 +
4632 + if (!ip)
4633 + return -ERANGE;
4634 +
4635 + *hash_ip = ip;
4636 + ABCD(a, b, c, d, hash_ip);
4637 + DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
4638 + TESTIP_WALK(map, a, btree);
4639 + TESTIP_WALK(btree, b, ctree);
4640 + TESTIP_WALK(ctree, c, dtree);
4641 + DP("%lu %lu", dtree->expires[d], jiffies);
4642 + return dtree->expires[d]
4643 + && (!map->timeout
4644 + || time_after(dtree->expires[d], jiffies));
4645 +}
4646 +
4647 +static int
4648 +testip(struct ip_set *set, const void *data, size_t size,
4649 + ip_set_ip_t *hash_ip)
4650 +{
4651 + struct ip_set_req_iptree *req =
4652 + (struct ip_set_req_iptree *) data;
4653 +
4654 + if (size != sizeof(struct ip_set_req_iptree)) {
4655 + ip_set_printk("data length wrong (want %zu, have %zu)",
4656 + sizeof(struct ip_set_req_iptree),
4657 + size);
4658 + return -EINVAL;
4659 + }
4660 + return __testip(set, req->ip, hash_ip);
4661 +}
4662 +
4663 +static int
4664 +testip_kernel(struct ip_set *set,
4665 + const struct sk_buff *skb,
4666 + ip_set_ip_t *hash_ip,
4667 + const u_int32_t *flags,
4668 + unsigned char index)
4669 +{
4670 + int res;
4671 +
4672 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4673 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4674 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4675 + NIPQUAD(ip_hdr(skb)->saddr),
4676 + NIPQUAD(ip_hdr(skb)->daddr));
4677 +#else
4678 + NIPQUAD(skb->nh.iph->saddr),
4679 + NIPQUAD(skb->nh.iph->daddr));
4680 +#endif
4681 +
4682 + res = __testip(set,
4683 + ntohl(flags[index] & IPSET_SRC
4684 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4685 + ? ip_hdr(skb)->saddr
4686 + : ip_hdr(skb)->daddr),
4687 +#else
4688 + ? skb->nh.iph->saddr
4689 + : skb->nh.iph->daddr),
4690 +#endif
4691 + hash_ip);
4692 + return (res < 0 ? 0 : res);
4693 +}
4694 +
4695 +#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
4696 + if ((map)->tree[elem]) { \
4697 + DP("found %u", elem); \
4698 + branch = (map)->tree[elem]; \
4699 + } else { \
4700 + branch = (type *) \
4701 + kmem_cache_alloc(cachep, GFP_ATOMIC); \
4702 + if (branch == NULL) \
4703 + return -ENOMEM; \
4704 + memset(branch, 0, sizeof(*branch)); \
4705 + (map)->tree[elem] = branch; \
4706 + DP("alloc %u", elem); \
4707 + } \
4708 +} while (0)
4709 +
4710 +static inline int
4711 +__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
4712 + ip_set_ip_t *hash_ip)
4713 +{
4714 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4715 + struct ip_set_iptreeb *btree;
4716 + struct ip_set_iptreec *ctree;
4717 + struct ip_set_iptreed *dtree;
4718 + unsigned char a,b,c,d;
4719 + int ret = 0;
4720 +
4721 + if (!ip || map->elements >= limit)
4722 + /* We could call the garbage collector
4723 + * but it's probably overkill */
4724 + return -ERANGE;
4725 +
4726 + *hash_ip = ip;
4727 + ABCD(a, b, c, d, hash_ip);
4728 + DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
4729 + ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
4730 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
4731 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
4732 + if (dtree->expires[d]
4733 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
4734 + ret = -EEXIST;
4735 + dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
4736 + /* Lottery: I won! */
4737 + if (dtree->expires[d] == 0)
4738 + dtree->expires[d] = 1;
4739 + DP("%u %lu", d, dtree->expires[d]);
4740 + if (ret == 0)
4741 + map->elements++;
4742 + return ret;
4743 +}
4744 +
4745 +static int
4746 +addip(struct ip_set *set, const void *data, size_t size,
4747 + ip_set_ip_t *hash_ip)
4748 +{
4749 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4750 + struct ip_set_req_iptree *req =
4751 + (struct ip_set_req_iptree *) data;
4752 +
4753 + if (size != sizeof(struct ip_set_req_iptree)) {
4754 + ip_set_printk("data length wrong (want %zu, have %zu)",
4755 + sizeof(struct ip_set_req_iptree),
4756 + size);
4757 + return -EINVAL;
4758 + }
4759 + DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
4760 + return __addip(set, req->ip,
4761 + req->timeout ? req->timeout : map->timeout,
4762 + hash_ip);
4763 +}
4764 +
4765 +static int
4766 +addip_kernel(struct ip_set *set,
4767 + const struct sk_buff *skb,
4768 + ip_set_ip_t *hash_ip,
4769 + const u_int32_t *flags,
4770 + unsigned char index)
4771 +{
4772 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4773 +
4774 + return __addip(set,
4775 + ntohl(flags[index] & IPSET_SRC
4776 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4777 + ? ip_hdr(skb)->saddr
4778 + : ip_hdr(skb)->daddr),
4779 +#else
4780 + ? skb->nh.iph->saddr
4781 + : skb->nh.iph->daddr),
4782 +#endif
4783 + map->timeout,
4784 + hash_ip);
4785 +}
4786 +
4787 +#define DELIP_WALK(map, elem, branch) do { \
4788 + if ((map)->tree[elem]) { \
4789 + branch = (map)->tree[elem]; \
4790 + } else \
4791 + return -EEXIST; \
4792 +} while (0)
4793 +
4794 +static inline int
4795 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4796 +{
4797 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4798 + struct ip_set_iptreeb *btree;
4799 + struct ip_set_iptreec *ctree;
4800 + struct ip_set_iptreed *dtree;
4801 + unsigned char a,b,c,d;
4802 +
4803 + if (!ip)
4804 + return -ERANGE;
4805 +
4806 + *hash_ip = ip;
4807 + ABCD(a, b, c, d, hash_ip);
4808 + DELIP_WALK(map, a, btree);
4809 + DELIP_WALK(btree, b, ctree);
4810 + DELIP_WALK(ctree, c, dtree);
4811 +
4812 + if (dtree->expires[d]) {
4813 + dtree->expires[d] = 0;
4814 + map->elements--;
4815 + return 0;
4816 + }
4817 + return -EEXIST;
4818 +}
4819 +
4820 +static int
4821 +delip(struct ip_set *set, const void *data, size_t size,
4822 + ip_set_ip_t *hash_ip)
4823 +{
4824 + struct ip_set_req_iptree *req =
4825 + (struct ip_set_req_iptree *) data;
4826 +
4827 + if (size != sizeof(struct ip_set_req_iptree)) {
4828 + ip_set_printk("data length wrong (want %zu, have %zu)",
4829 + sizeof(struct ip_set_req_iptree),
4830 + size);
4831 + return -EINVAL;
4832 + }
4833 + return __delip(set, req->ip, hash_ip);
4834 +}
4835 +
4836 +static int
4837 +delip_kernel(struct ip_set *set,
4838 + const struct sk_buff *skb,
4839 + ip_set_ip_t *hash_ip,
4840 + const u_int32_t *flags,
4841 + unsigned char index)
4842 +{
4843 + return __delip(set,
4844 + ntohl(flags[index] & IPSET_SRC
4845 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4846 + ? ip_hdr(skb)->saddr
4847 + : ip_hdr(skb)->daddr),
4848 +#else
4849 + ? skb->nh.iph->saddr
4850 + : skb->nh.iph->daddr),
4851 +#endif
4852 + hash_ip);
4853 +}
4854 +
4855 +#define LOOP_WALK_BEGIN(map, i, branch) \
4856 + for (i = 0; i < 256; i++) { \
4857 + if (!(map)->tree[i]) \
4858 + continue; \
4859 + branch = (map)->tree[i]
4860 +
4861 +#define LOOP_WALK_END }
4862 +
4863 +static void ip_tree_gc(unsigned long ul_set)
4864 +{
4865 + struct ip_set *set = (void *) ul_set;
4866 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4867 + struct ip_set_iptreeb *btree;
4868 + struct ip_set_iptreec *ctree;
4869 + struct ip_set_iptreed *dtree;
4870 + unsigned int a,b,c,d;
4871 + unsigned char i,j,k;
4872 +
4873 + i = j = k = 0;
4874 + DP("gc: %s", set->name);
4875 + write_lock_bh(&set->lock);
4876 + LOOP_WALK_BEGIN(map, a, btree);
4877 + LOOP_WALK_BEGIN(btree, b, ctree);
4878 + LOOP_WALK_BEGIN(ctree, c, dtree);
4879 + for (d = 0; d < 256; d++) {
4880 + if (dtree->expires[d]) {
4881 + DP("gc: %u %u %u %u: expires %lu jiffies %lu",
4882 + a, b, c, d,
4883 + dtree->expires[d], jiffies);
4884 + if (map->timeout
4885 + && time_before(dtree->expires[d], jiffies)) {
4886 + dtree->expires[d] = 0;
4887 + map->elements--;
4888 + } else
4889 + k = 1;
4890 + }
4891 + }
4892 + if (k == 0) {
4893 + DP("gc: %s: leaf %u %u %u empty",
4894 + set->name, a, b, c);
4895 + kmem_cache_free(leaf_cachep, dtree);
4896 + ctree->tree[c] = NULL;
4897 + } else {
4898 + DP("gc: %s: leaf %u %u %u not empty",
4899 + set->name, a, b, c);
4900 + j = 1;
4901 + k = 0;
4902 + }
4903 + LOOP_WALK_END;
4904 + if (j == 0) {
4905 + DP("gc: %s: branch %u %u empty",
4906 + set->name, a, b);
4907 + kmem_cache_free(branch_cachep, ctree);
4908 + btree->tree[b] = NULL;
4909 + } else {
4910 + DP("gc: %s: branch %u %u not empty",
4911 + set->name, a, b);
4912 + i = 1;
4913 + j = k = 0;
4914 + }
4915 + LOOP_WALK_END;
4916 + if (i == 0) {
4917 + DP("gc: %s: branch %u empty",
4918 + set->name, a);
4919 + kmem_cache_free(branch_cachep, btree);
4920 + map->tree[a] = NULL;
4921 + } else {
4922 + DP("gc: %s: branch %u not empty",
4923 + set->name, a);
4924 + i = j = k = 0;
4925 + }
4926 + LOOP_WALK_END;
4927 + write_unlock_bh(&set->lock);
4928 +
4929 + map->gc.expires = jiffies + map->gc_interval * HZ;
4930 + add_timer(&map->gc);
4931 +}
4932 +
4933 +static inline void init_gc_timer(struct ip_set *set)
4934 +{
4935 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4936 +
4937 + /* Even if there is no timeout for the entries,
4938 + * we still have to call gc because delete
4939 + * do not clean up empty branches */
4940 + map->gc_interval = IPTREE_GC_TIME;
4941 + init_timer(&map->gc);
4942 + map->gc.data = (unsigned long) set;
4943 + map->gc.function = ip_tree_gc;
4944 + map->gc.expires = jiffies + map->gc_interval * HZ;
4945 + add_timer(&map->gc);
4946 +}
4947 +
4948 +static int create(struct ip_set *set, const void *data, size_t size)
4949 +{
4950 + struct ip_set_req_iptree_create *req =
4951 + (struct ip_set_req_iptree_create *) data;
4952 + struct ip_set_iptree *map;
4953 +
4954 + if (size != sizeof(struct ip_set_req_iptree_create)) {
4955 + ip_set_printk("data length wrong (want %zu, have %zu)",
4956 + sizeof(struct ip_set_req_iptree_create),
4957 + size);
4958 + return -EINVAL;
4959 + }
4960 +
4961 + map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
4962 + if (!map) {
4963 + DP("out of memory for %d bytes",
4964 + sizeof(struct ip_set_iptree));
4965 + return -ENOMEM;
4966 + }
4967 + memset(map, 0, sizeof(*map));
4968 + map->timeout = req->timeout;
4969 + map->elements = 0;
4970 + set->data = map;
4971 +
4972 + init_gc_timer(set);
4973 +
4974 + return 0;
4975 +}
4976 +
4977 +static void __flush(struct ip_set_iptree *map)
4978 +{
4979 + struct ip_set_iptreeb *btree;
4980 + struct ip_set_iptreec *ctree;
4981 + struct ip_set_iptreed *dtree;
4982 + unsigned int a,b,c;
4983 +
4984 + LOOP_WALK_BEGIN(map, a, btree);
4985 + LOOP_WALK_BEGIN(btree, b, ctree);
4986 + LOOP_WALK_BEGIN(ctree, c, dtree);
4987 + kmem_cache_free(leaf_cachep, dtree);
4988 + LOOP_WALK_END;
4989 + kmem_cache_free(branch_cachep, ctree);
4990 + LOOP_WALK_END;
4991 + kmem_cache_free(branch_cachep, btree);
4992 + LOOP_WALK_END;
4993 + map->elements = 0;
4994 +}
4995 +
4996 +static void destroy(struct ip_set *set)
4997 +{
4998 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4999 +
5000 + /* gc might be running */
5001 + while (!del_timer(&map->gc))
5002 + msleep(IPTREE_DESTROY_SLEEP);
5003 + __flush(map);
5004 + kfree(map);
5005 + set->data = NULL;
5006 +}
5007 +
5008 +static void flush(struct ip_set *set)
5009 +{
5010 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5011 + unsigned int timeout = map->timeout;
5012 +
5013 + /* gc might be running */
5014 + while (!del_timer(&map->gc))
5015 + msleep(IPTREE_DESTROY_SLEEP);
5016 + __flush(map);
5017 + memset(map, 0, sizeof(*map));
5018 + map->timeout = timeout;
5019 +
5020 + init_gc_timer(set);
5021 +}
5022 +
5023 +static void list_header(const struct ip_set *set, void *data)
5024 +{
5025 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5026 + struct ip_set_req_iptree_create *header =
5027 + (struct ip_set_req_iptree_create *) data;
5028 +
5029 + header->timeout = map->timeout;
5030 +}
5031 +
5032 +static int list_members_size(const struct ip_set *set)
5033 +{
5034 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5035 + struct ip_set_iptreeb *btree;
5036 + struct ip_set_iptreec *ctree;
5037 + struct ip_set_iptreed *dtree;
5038 + unsigned int a,b,c,d;
5039 + unsigned int count = 0;
5040 +
5041 + LOOP_WALK_BEGIN(map, a, btree);
5042 + LOOP_WALK_BEGIN(btree, b, ctree);
5043 + LOOP_WALK_BEGIN(ctree, c, dtree);
5044 + for (d = 0; d < 256; d++) {
5045 + if (dtree->expires[d]
5046 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
5047 + count++;
5048 + }
5049 + LOOP_WALK_END;
5050 + LOOP_WALK_END;
5051 + LOOP_WALK_END;
5052 +
5053 + DP("members %u", count);
5054 + return (count * sizeof(struct ip_set_req_iptree));
5055 +}
5056 +
5057 +static void list_members(const struct ip_set *set, void *data)
5058 +{
5059 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5060 + struct ip_set_iptreeb *btree;
5061 + struct ip_set_iptreec *ctree;
5062 + struct ip_set_iptreed *dtree;
5063 + unsigned int a,b,c,d;
5064 + size_t offset = 0;
5065 + struct ip_set_req_iptree *entry;
5066 +
5067 + LOOP_WALK_BEGIN(map, a, btree);
5068 + LOOP_WALK_BEGIN(btree, b, ctree);
5069 + LOOP_WALK_BEGIN(ctree, c, dtree);
5070 + for (d = 0; d < 256; d++) {
5071 + if (dtree->expires[d]
5072 + && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
5073 + entry = (struct ip_set_req_iptree *)(data + offset);
5074 + entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
5075 + entry->timeout = !map->timeout ? 0
5076 + : (dtree->expires[d] - jiffies)/HZ;
5077 + offset += sizeof(struct ip_set_req_iptree);
5078 + }
5079 + }
5080 + LOOP_WALK_END;
5081 + LOOP_WALK_END;
5082 + LOOP_WALK_END;
5083 +}
5084 +
5085 +static struct ip_set_type ip_set_iptree = {
5086 + .typename = SETTYPE_NAME,
5087 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5088 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5089 + .create = &create,
5090 + .destroy = &destroy,
5091 + .flush = &flush,
5092 + .reqsize = sizeof(struct ip_set_req_iptree),
5093 + .addip = &addip,
5094 + .addip_kernel = &addip_kernel,
5095 + .delip = &delip,
5096 + .delip_kernel = &delip_kernel,
5097 + .testip = &testip,
5098 + .testip_kernel = &testip_kernel,
5099 + .header_size = sizeof(struct ip_set_req_iptree_create),
5100 + .list_header = &list_header,
5101 + .list_members_size = &list_members_size,
5102 + .list_members = &list_members,
5103 + .me = THIS_MODULE,
5104 +};
5105 +
5106 +MODULE_LICENSE("GPL");
5107 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5108 +MODULE_DESCRIPTION("iptree type of IP sets");
5109 +module_param(limit, int, 0600);
5110 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5111 +
5112 +static int __init ip_set_iptree_init(void)
5113 +{
5114 + int ret;
5115 +
5116 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5117 + branch_cachep = kmem_cache_create("ip_set_iptreeb",
5118 + sizeof(struct ip_set_iptreeb),
5119 + 0, 0, NULL);
5120 +#else
5121 + branch_cachep = kmem_cache_create("ip_set_iptreeb",
5122 + sizeof(struct ip_set_iptreeb),
5123 + 0, 0, NULL, NULL);
5124 +#endif
5125 + if (!branch_cachep) {
5126 + printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
5127 + ret = -ENOMEM;
5128 + goto out;
5129 + }
5130 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5131 + leaf_cachep = kmem_cache_create("ip_set_iptreed",
5132 + sizeof(struct ip_set_iptreed),
5133 + 0, 0, NULL);
5134 +#else
5135 + leaf_cachep = kmem_cache_create("ip_set_iptreed",
5136 + sizeof(struct ip_set_iptreed),
5137 + 0, 0, NULL, NULL);
5138 +#endif
5139 + if (!leaf_cachep) {
5140 + printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
5141 + ret = -ENOMEM;
5142 + goto free_branch;
5143 + }
5144 + ret = ip_set_register_set_type(&ip_set_iptree);
5145 + if (ret == 0)
5146 + goto out;
5147 +
5148 + kmem_cache_destroy(leaf_cachep);
5149 + free_branch:
5150 + kmem_cache_destroy(branch_cachep);
5151 + out:
5152 + return ret;
5153 +}
5154 +
5155 +static void __exit ip_set_iptree_fini(void)
5156 +{
5157 + /* FIXME: possible race with ip_set_create() */
5158 + ip_set_unregister_set_type(&ip_set_iptree);
5159 + kmem_cache_destroy(leaf_cachep);
5160 + kmem_cache_destroy(branch_cachep);
5161 +}
5162 +
5163 +module_init(ip_set_iptree_init);
5164 +module_exit(ip_set_iptree_fini);
5165 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_iptreemap.c
5166 ===================================================================
5167 --- /dev/null
5168 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_iptreemap.c
5169 @@ -0,0 +1,829 @@
5170 +/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
5171 + *
5172 + * This program is free software; you can redistribute it and/or modify it
5173 + * under the terms of the GNU General Public License version 2 as published by
5174 + * the Free Software Foundation.
5175 + */
5176 +
5177 +/* This modules implements the iptreemap ipset type. It uses bitmaps to
5178 + * represent every single IPv4 address as a single bit. The bitmaps are managed
5179 + * in a tree structure, where the first three octets of an addresses are used
5180 + * as an index to find the bitmap and the last octet is used as the bit number.
5181 + */
5182 +
5183 +#include <linux/version.h>
5184 +#include <linux/module.h>
5185 +#include <linux/ip.h>
5186 +#include <linux/skbuff.h>
5187 +#include <linux/slab.h>
5188 +#include <linux/delay.h>
5189 +#include <linux/netfilter_ipv4/ip_tables.h>
5190 +#include <linux/netfilter_ipv4/ip_set.h>
5191 +#include <linux/errno.h>
5192 +#include <asm/uaccess.h>
5193 +#include <asm/bitops.h>
5194 +#include <linux/spinlock.h>
5195 +
5196 +#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
5197 +
5198 +#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
5199 +#define IPTREEMAP_DESTROY_SLEEP (100)
5200 +
5201 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
5202 +static struct kmem_cache *cachep_b;
5203 +static struct kmem_cache *cachep_c;
5204 +static struct kmem_cache *cachep_d;
5205 +#else
5206 +static kmem_cache_t *cachep_b;
5207 +static kmem_cache_t *cachep_c;
5208 +static kmem_cache_t *cachep_d;
5209 +#endif
5210 +
5211 +static struct ip_set_iptreemap_d *fullbitmap_d;
5212 +static struct ip_set_iptreemap_c *fullbitmap_c;
5213 +static struct ip_set_iptreemap_b *fullbitmap_b;
5214 +
5215 +#if defined(__LITTLE_ENDIAN)
5216 +#define ABCD(a, b, c, d, addr) \
5217 + do { \
5218 + a = ((unsigned char *)addr)[3]; \
5219 + b = ((unsigned char *)addr)[2]; \
5220 + c = ((unsigned char *)addr)[1]; \
5221 + d = ((unsigned char *)addr)[0]; \
5222 + } while (0)
5223 +#elif defined(__BIG_ENDIAN)
5224 +#define ABCD(a,b,c,d,addrp) do { \
5225 + a = ((unsigned char *)addrp)[0]; \
5226 + b = ((unsigned char *)addrp)[1]; \
5227 + c = ((unsigned char *)addrp)[2]; \
5228 + d = ((unsigned char *)addrp)[3]; \
5229 +} while (0)
5230 +#else
5231 +#error "Please fix asm/byteorder.h"
5232 +#endif /* __LITTLE_ENDIAN */
5233 +
5234 +#define TESTIP_WALK(map, elem, branch, full) \
5235 + do { \
5236 + branch = (map)->tree[elem]; \
5237 + if (!branch) \
5238 + return 0; \
5239 + else if (branch == full) \
5240 + return 1; \
5241 + } while (0)
5242 +
5243 +#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
5244 + do { \
5245 + branch = (map)->tree[elem]; \
5246 + if (!branch) { \
5247 + branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
5248 + if (!branch) \
5249 + return -ENOMEM; \
5250 + memset(branch, 0, sizeof(*branch)); \
5251 + (map)->tree[elem] = branch; \
5252 + } else if (branch == full) { \
5253 + return -EEXIST; \
5254 + } \
5255 + } while (0)
5256 +
5257 +#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
5258 + for (a = a1; a <= a2; a++) { \
5259 + branch = (map)->tree[a]; \
5260 + if (branch != full) { \
5261 + if ((a > a1 && a < a2) || (hint)) { \
5262 + if (branch) \
5263 + free(branch); \
5264 + (map)->tree[a] = full; \
5265 + continue; \
5266 + } else if (!branch) { \
5267 + branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
5268 + if (!branch) \
5269 + return -ENOMEM; \
5270 + memset(branch, 0, sizeof(*branch)); \
5271 + (map)->tree[a] = branch; \
5272 + }
5273 +
5274 +#define ADDIP_RANGE_LOOP_END() \
5275 + } \
5276 + }
5277 +
5278 +#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
5279 + do { \
5280 + branch = (map)->tree[elem]; \
5281 + if (!branch) { \
5282 + return -EEXIST; \
5283 + } else if (branch == full) { \
5284 + branch = kmem_cache_alloc(cachep, flags); \
5285 + if (!branch) \
5286 + return -ENOMEM; \
5287 + memcpy(branch, full, sizeof(*full)); \
5288 + (map)->tree[elem] = branch; \
5289 + } \
5290 + } while (0)
5291 +
5292 +#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
5293 + for (a = a1; a <= a2; a++) { \
5294 + branch = (map)->tree[a]; \
5295 + if (branch) { \
5296 + if ((a > a1 && a < a2) || (hint)) { \
5297 + if (branch != full) \
5298 + free(branch); \
5299 + (map)->tree[a] = NULL; \
5300 + continue; \
5301 + } else if (branch == full) { \
5302 + branch = kmem_cache_alloc(cachep, flags); \
5303 + if (!branch) \
5304 + return -ENOMEM; \
5305 + memcpy(branch, full, sizeof(*branch)); \
5306 + (map)->tree[a] = branch; \
5307 + }
5308 +
5309 +#define DELIP_RANGE_LOOP_END() \
5310 + } \
5311 + }
5312 +
5313 +#define LOOP_WALK_BEGIN(map, i, branch) \
5314 + for (i = 0; i < 256; i++) { \
5315 + branch = (map)->tree[i]; \
5316 + if (likely(!branch)) \
5317 + continue;
5318 +
5319 +#define LOOP_WALK_END() \
5320 + }
5321 +
5322 +#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
5323 + count = -256; \
5324 + for (i = 0; i < 256; i++) { \
5325 + branch = (map)->tree[i]; \
5326 + if (likely(!branch)) \
5327 + continue; \
5328 + count++; \
5329 + if (branch == full) { \
5330 + count++; \
5331 + continue; \
5332 + }
5333 +
5334 +#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
5335 + if (-256 == count) { \
5336 + kmem_cache_free(cachep, branch); \
5337 + (map)->tree[i] = NULL; \
5338 + } else if (256 == count) { \
5339 + kmem_cache_free(cachep, branch); \
5340 + (map)->tree[i] = full; \
5341 + } \
5342 + }
5343 +
5344 +#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
5345 + for (i = 0; i < 256; i++) { \
5346 + if (!(map)->tree[i]) { \
5347 + if (inrange) { \
5348 + count++; \
5349 + inrange = 0; \
5350 + } \
5351 + continue; \
5352 + } \
5353 + branch = (map)->tree[i];
5354 +
5355 +#define LOOP_WALK_END_COUNT() \
5356 + }
5357 +
5358 +#define MIN(a, b) (a < b ? a : b)
5359 +#define MAX(a, b) (a > b ? a : b)
5360 +
5361 +#define GETVALUE1(a, a1, b1, r) \
5362 + (a == a1 ? b1 : r)
5363 +
5364 +#define GETVALUE2(a, b, a1, b1, c1, r) \
5365 + (a == a1 && b == b1 ? c1 : r)
5366 +
5367 +#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
5368 + (a == a1 && b == b1 && c == c1 ? d1 : r)
5369 +
5370 +#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
5371 + ( \
5372 + GETVALUE1(a, a1, b1, 0) == 0 \
5373 + && GETVALUE1(a, a2, b2, 255) == 255 \
5374 + && c1 == 0 \
5375 + && c2 == 255 \
5376 + && d1 == 0 \
5377 + && d2 == 255 \
5378 + )
5379 +
5380 +#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
5381 + ( \
5382 + GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
5383 + && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
5384 + && d1 == 0 \
5385 + && d2 == 255 \
5386 + )
5387 +
5388 +#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
5389 + ( \
5390 + GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
5391 + && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
5392 + )
5393 +
5394 +
5395 +static inline void
5396 +free_d(struct ip_set_iptreemap_d *map)
5397 +{
5398 + kmem_cache_free(cachep_d, map);
5399 +}
5400 +
5401 +static inline void
5402 +free_c(struct ip_set_iptreemap_c *map)
5403 +{
5404 + struct ip_set_iptreemap_d *dtree;
5405 + unsigned int i;
5406 +
5407 + LOOP_WALK_BEGIN(map, i, dtree) {
5408 + if (dtree != fullbitmap_d)
5409 + free_d(dtree);
5410 + } LOOP_WALK_END();
5411 +
5412 + kmem_cache_free(cachep_c, map);
5413 +}
5414 +
5415 +static inline void
5416 +free_b(struct ip_set_iptreemap_b *map)
5417 +{
5418 + struct ip_set_iptreemap_c *ctree;
5419 + unsigned int i;
5420 +
5421 + LOOP_WALK_BEGIN(map, i, ctree) {
5422 + if (ctree != fullbitmap_c)
5423 + free_c(ctree);
5424 + } LOOP_WALK_END();
5425 +
5426 + kmem_cache_free(cachep_b, map);
5427 +}
5428 +
5429 +static inline int
5430 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5431 +{
5432 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5433 + struct ip_set_iptreemap_b *btree;
5434 + struct ip_set_iptreemap_c *ctree;
5435 + struct ip_set_iptreemap_d *dtree;
5436 + unsigned char a, b, c, d;
5437 +
5438 + *hash_ip = ip;
5439 +
5440 + ABCD(a, b, c, d, hash_ip);
5441 +
5442 + TESTIP_WALK(map, a, btree, fullbitmap_b);
5443 + TESTIP_WALK(btree, b, ctree, fullbitmap_c);
5444 + TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
5445 +
5446 + return !!test_bit(d, (void *) dtree->bitmap);
5447 +}
5448 +
5449 +static int
5450 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5451 +{
5452 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5453 +
5454 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5455 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5456 + return -EINVAL;
5457 + }
5458 +
5459 + return __testip(set, req->start, hash_ip);
5460 +}
5461 +
5462 +static int
5463 +testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5464 +{
5465 + int res;
5466 +
5467 + res = __testip(set,
5468 + ntohl(flags[index] & IPSET_SRC
5469 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5470 + ? ip_hdr(skb)->saddr
5471 + : ip_hdr(skb)->daddr),
5472 +#else
5473 + ? skb->nh.iph->saddr
5474 + : skb->nh.iph->daddr),
5475 +#endif
5476 + hash_ip);
5477 +
5478 + return (res < 0 ? 0 : res);
5479 +}
5480 +
5481 +static inline int
5482 +__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5483 +{
5484 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5485 + struct ip_set_iptreemap_b *btree;
5486 + struct ip_set_iptreemap_c *ctree;
5487 + struct ip_set_iptreemap_d *dtree;
5488 + unsigned char a, b, c, d;
5489 +
5490 + *hash_ip = ip;
5491 +
5492 + ABCD(a, b, c, d, hash_ip);
5493 +
5494 + ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
5495 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
5496 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
5497 +
5498 + if (test_and_set_bit(d, (void *) dtree->bitmap))
5499 + return -EEXIST;
5500 +
5501 + set_bit(b, (void *) btree->dirty);
5502 +
5503 + return 0;
5504 +}
5505 +
5506 +static inline int
5507 +__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
5508 +{
5509 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5510 + struct ip_set_iptreemap_b *btree;
5511 + struct ip_set_iptreemap_c *ctree;
5512 + struct ip_set_iptreemap_d *dtree;
5513 + unsigned int a, b, c, d;
5514 + unsigned char a1, b1, c1, d1;
5515 + unsigned char a2, b2, c2, d2;
5516 +
5517 + if (start == end)
5518 + return __addip_single(set, start, hash_ip);
5519 +
5520 + *hash_ip = start;
5521 +
5522 + ABCD(a1, b1, c1, d1, &start);
5523 + ABCD(a2, b2, c2, d2, &end);
5524 +
5525 + /* This is sooo ugly... */
5526 + ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
5527 + ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
5528 + ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
5529 + for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5530 + set_bit(d, (void *) dtree->bitmap);
5531 + set_bit(b, (void *) btree->dirty);
5532 + } ADDIP_RANGE_LOOP_END();
5533 + } ADDIP_RANGE_LOOP_END();
5534 + } ADDIP_RANGE_LOOP_END();
5535 +
5536 + return 0;
5537 +}
5538 +
5539 +static int
5540 +addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5541 +{
5542 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5543 +
5544 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5545 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5546 + return -EINVAL;
5547 + }
5548 +
5549 + return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
5550 +}
5551 +
5552 +static int
5553 +addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5554 +{
5555 +
5556 + return __addip_single(set,
5557 + ntohl(flags[index] & IPSET_SRC
5558 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5559 + ? ip_hdr(skb)->saddr
5560 + : ip_hdr(skb)->daddr),
5561 +#else
5562 + ? skb->nh.iph->saddr
5563 + : skb->nh.iph->daddr),
5564 +#endif
5565 + hash_ip);
5566 +}
5567 +
5568 +static inline int
5569 +__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
5570 +{
5571 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5572 + struct ip_set_iptreemap_b *btree;
5573 + struct ip_set_iptreemap_c *ctree;
5574 + struct ip_set_iptreemap_d *dtree;
5575 + unsigned char a,b,c,d;
5576 +
5577 + *hash_ip = ip;
5578 +
5579 + ABCD(a, b, c, d, hash_ip);
5580 +
5581 + DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
5582 + DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
5583 + DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
5584 +
5585 + if (!test_and_clear_bit(d, (void *) dtree->bitmap))
5586 + return -EEXIST;
5587 +
5588 + set_bit(b, (void *) btree->dirty);
5589 +
5590 + return 0;
5591 +}
5592 +
5593 +static inline int
5594 +__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
5595 +{
5596 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5597 + struct ip_set_iptreemap_b *btree;
5598 + struct ip_set_iptreemap_c *ctree;
5599 + struct ip_set_iptreemap_d *dtree;
5600 + unsigned int a, b, c, d;
5601 + unsigned char a1, b1, c1, d1;
5602 + unsigned char a2, b2, c2, d2;
5603 +
5604 + if (start == end)
5605 + return __delip_single(set, start, hash_ip, flags);
5606 +
5607 + *hash_ip = start;
5608 +
5609 + ABCD(a1, b1, c1, d1, &start);
5610 + ABCD(a2, b2, c2, d2, &end);
5611 +
5612 + /* This is sooo ugly... */
5613 + DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
5614 + DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
5615 + DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
5616 + for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5617 + clear_bit(d, (void *) dtree->bitmap);
5618 + set_bit(b, (void *) btree->dirty);
5619 + } DELIP_RANGE_LOOP_END();
5620 + } DELIP_RANGE_LOOP_END();
5621 + } DELIP_RANGE_LOOP_END();
5622 +
5623 + return 0;
5624 +}
5625 +
5626 +static int
5627 +delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5628 +{
5629 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5630 +
5631 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5632 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5633 + return -EINVAL;
5634 + }
5635 +
5636 + return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
5637 +}
5638 +
5639 +static int
5640 +delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5641 +{
5642 + return __delip_single(set,
5643 + ntohl(flags[index] & IPSET_SRC
5644 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5645 + ? ip_hdr(skb)->saddr
5646 + : ip_hdr(skb)->daddr),
5647 +#else
5648 + ? skb->nh.iph->saddr
5649 + : skb->nh.iph->daddr),
5650 +#endif
5651 + hash_ip,
5652 + GFP_ATOMIC);
5653 +}
5654 +
5655 +/* Check the status of the bitmap
5656 + * -1 == all bits cleared
5657 + * 1 == all bits set
5658 + * 0 == anything else
5659 + */
5660 +static inline int
5661 +bitmap_status(struct ip_set_iptreemap_d *dtree)
5662 +{
5663 + unsigned char first = dtree->bitmap[0];
5664 + int a;
5665 +
5666 + for (a = 1; a < 32; a++)
5667 + if (dtree->bitmap[a] != first)
5668 + return 0;
5669 +
5670 + return (first == 0 ? -1 : (first == 255 ? 1 : 0));
5671 +}
5672 +
5673 +static void
5674 +gc(unsigned long addr)
5675 +{
5676 + struct ip_set *set = (struct ip_set *) addr;
5677 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5678 + struct ip_set_iptreemap_b *btree;
5679 + struct ip_set_iptreemap_c *ctree;
5680 + struct ip_set_iptreemap_d *dtree;
5681 + unsigned int a, b, c;
5682 + int i, j, k;
5683 +
5684 + write_lock_bh(&set->lock);
5685 +
5686 + LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
5687 + LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
5688 + if (!test_and_clear_bit(b, (void *) btree->dirty))
5689 + continue;
5690 + LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
5691 + switch (bitmap_status(dtree)) {
5692 + case -1:
5693 + kmem_cache_free(cachep_d, dtree);
5694 + ctree->tree[c] = NULL;
5695 + k--;
5696 + break;
5697 + case 1:
5698 + kmem_cache_free(cachep_d, dtree);
5699 + ctree->tree[c] = fullbitmap_d;
5700 + k++;
5701 + break;
5702 + }
5703 + } LOOP_WALK_END();
5704 + } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
5705 + } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
5706 +
5707 + write_unlock_bh(&set->lock);
5708 +
5709 + map->gc.expires = jiffies + map->gc_interval * HZ;
5710 + add_timer(&map->gc);
5711 +}
5712 +
5713 +static inline void
5714 +init_gc_timer(struct ip_set *set)
5715 +{
5716 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5717 +
5718 + init_timer(&map->gc);
5719 + map->gc.data = (unsigned long) set;
5720 + map->gc.function = gc;
5721 + map->gc.expires = jiffies + map->gc_interval * HZ;
5722 + add_timer(&map->gc);
5723 +}
5724 +
5725 +static int create(struct ip_set *set, const void *data, size_t size)
5726 +{
5727 + struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
5728 + struct ip_set_iptreemap *map;
5729 +
5730 + if (size != sizeof(struct ip_set_req_iptreemap_create)) {
5731 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
5732 + return -EINVAL;
5733 + }
5734 +
5735 + map = kzalloc(sizeof(*map), GFP_KERNEL);
5736 + if (!map)
5737 + return -ENOMEM;
5738 +
5739 + map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
5740 + set->data = map;
5741 +
5742 + init_gc_timer(set);
5743 +
5744 + return 0;
5745 +}
5746 +
5747 +static inline void __flush(struct ip_set_iptreemap *map)
5748 +{
5749 + struct ip_set_iptreemap_b *btree;
5750 + unsigned int a;
5751 +
5752 + LOOP_WALK_BEGIN(map, a, btree);
5753 + if (btree != fullbitmap_b)
5754 + free_b(btree);
5755 + LOOP_WALK_END();
5756 +}
5757 +
5758 +static void destroy(struct ip_set *set)
5759 +{
5760 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5761 +
5762 + while (!del_timer(&map->gc))
5763 + msleep(IPTREEMAP_DESTROY_SLEEP);
5764 +
5765 + __flush(map);
5766 + kfree(map);
5767 +
5768 + set->data = NULL;
5769 +}
5770 +
5771 +static void flush(struct ip_set *set)
5772 +{
5773 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5774 +
5775 + while (!del_timer(&map->gc))
5776 + msleep(IPTREEMAP_DESTROY_SLEEP);
5777 +
5778 + __flush(map);
5779 +
5780 + memset(map, 0, sizeof(*map));
5781 +
5782 + init_gc_timer(set);
5783 +}
5784 +
5785 +static void list_header(const struct ip_set *set, void *data)
5786 +{
5787 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5788 + struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
5789 +
5790 + header->gc_interval = map->gc_interval;
5791 +}
5792 +
5793 +static int list_members_size(const struct ip_set *set)
5794 +{
5795 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5796 + struct ip_set_iptreemap_b *btree;
5797 + struct ip_set_iptreemap_c *ctree;
5798 + struct ip_set_iptreemap_d *dtree;
5799 + unsigned int a, b, c, d, inrange = 0, count = 0;
5800 +
5801 + LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
5802 + LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
5803 + LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
5804 + for (d = 0; d < 256; d++) {
5805 + if (test_bit(d, (void *) dtree->bitmap)) {
5806 + inrange = 1;
5807 + } else if (inrange) {
5808 + count++;
5809 + inrange = 0;
5810 + }
5811 + }
5812 + } LOOP_WALK_END_COUNT();
5813 + } LOOP_WALK_END_COUNT();
5814 + } LOOP_WALK_END_COUNT();
5815 +
5816 + if (inrange)
5817 + count++;
5818 +
5819 + return (count * sizeof(struct ip_set_req_iptreemap));
5820 +}
5821 +
5822 +static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
5823 +{
5824 + struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
5825 +
5826 + entry->start = start;
5827 + entry->end = end;
5828 +
5829 + return sizeof(*entry);
5830 +}
5831 +
5832 +static void list_members(const struct ip_set *set, void *data)
5833 +{
5834 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5835 + struct ip_set_iptreemap_b *btree;
5836 + struct ip_set_iptreemap_c *ctree;
5837 + struct ip_set_iptreemap_d *dtree;
5838 + unsigned int a, b, c, d, inrange = 0;
5839 + size_t offset = 0;
5840 + ip_set_ip_t start = 0, end = 0, ip;
5841 +
5842 + LOOP_WALK_BEGIN(map, a, btree) {
5843 + LOOP_WALK_BEGIN(btree, b, ctree) {
5844 + LOOP_WALK_BEGIN(ctree, c, dtree) {
5845 + for (d = 0; d < 256; d++) {
5846 + if (test_bit(d, (void *) dtree->bitmap)) {
5847 + ip = ((a << 24) | (b << 16) | (c << 8) | d);
5848 + if (!inrange) {
5849 + inrange = 1;
5850 + start = ip;
5851 + } else if (end < ip - 1) {
5852 + offset += add_member(data, offset, start, end);
5853 + start = ip;
5854 + }
5855 + end = ip;
5856 + } else if (inrange) {
5857 + offset += add_member(data, offset, start, end);
5858 + inrange = 0;
5859 + }
5860 + }
5861 + } LOOP_WALK_END();
5862 + } LOOP_WALK_END();
5863 + } LOOP_WALK_END();
5864 +
5865 + if (inrange)
5866 + add_member(data, offset, start, end);
5867 +}
5868 +
5869 +static struct ip_set_type ip_set_iptreemap = {
5870 + .typename = SETTYPE_NAME,
5871 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5872 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5873 + .create = create,
5874 + .destroy = destroy,
5875 + .flush = flush,
5876 + .reqsize = sizeof(struct ip_set_req_iptreemap),
5877 + .addip = addip,
5878 + .addip_kernel = addip_kernel,
5879 + .delip = delip,
5880 + .delip_kernel = delip_kernel,
5881 + .testip = testip,
5882 + .testip_kernel = testip_kernel,
5883 + .header_size = sizeof(struct ip_set_req_iptreemap_create),
5884 + .list_header = list_header,
5885 + .list_members_size = list_members_size,
5886 + .list_members = list_members,
5887 + .me = THIS_MODULE,
5888 +};
5889 +
5890 +MODULE_LICENSE("GPL");
5891 +MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
5892 +MODULE_DESCRIPTION("iptreemap type of IP sets");
5893 +
5894 +static int __init ip_set_iptreemap_init(void)
5895 +{
5896 + int ret = -ENOMEM;
5897 + int a;
5898 +
5899 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5900 + cachep_b = kmem_cache_create("ip_set_iptreemap_b",
5901 + sizeof(struct ip_set_iptreemap_b),
5902 + 0, 0, NULL);
5903 +#else
5904 + cachep_b = kmem_cache_create("ip_set_iptreemap_b",
5905 + sizeof(struct ip_set_iptreemap_b),
5906 + 0, 0, NULL, NULL);
5907 +#endif
5908 + if (!cachep_b) {
5909 + ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
5910 + goto out;
5911 + }
5912 +
5913 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5914 + cachep_c = kmem_cache_create("ip_set_iptreemap_c",
5915 + sizeof(struct ip_set_iptreemap_c),
5916 + 0, 0, NULL);
5917 +#else
5918 + cachep_c = kmem_cache_create("ip_set_iptreemap_c",
5919 + sizeof(struct ip_set_iptreemap_c),
5920 + 0, 0, NULL, NULL);
5921 +#endif
5922 + if (!cachep_c) {
5923 + ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
5924 + goto outb;
5925 + }
5926 +
5927 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5928 + cachep_d = kmem_cache_create("ip_set_iptreemap_d",
5929 + sizeof(struct ip_set_iptreemap_d),
5930 + 0, 0, NULL);
5931 +#else
5932 + cachep_d = kmem_cache_create("ip_set_iptreemap_d",
5933 + sizeof(struct ip_set_iptreemap_d),
5934 + 0, 0, NULL, NULL);
5935 +#endif
5936 + if (!cachep_d) {
5937 + ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
5938 + goto outc;
5939 + }
5940 +
5941 + fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
5942 + if (!fullbitmap_d)
5943 + goto outd;
5944 +
5945 + fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
5946 + if (!fullbitmap_c)
5947 + goto outbitmapd;
5948 +
5949 + fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
5950 + if (!fullbitmap_b)
5951 + goto outbitmapc;
5952 +
5953 + ret = ip_set_register_set_type(&ip_set_iptreemap);
5954 + if (0 > ret)
5955 + goto outbitmapb;
5956 +
5957 + /* Now init our global bitmaps */
5958 + memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
5959 +
5960 + for (a = 0; a < 256; a++)
5961 + fullbitmap_c->tree[a] = fullbitmap_d;
5962 +
5963 + for (a = 0; a < 256; a++)
5964 + fullbitmap_b->tree[a] = fullbitmap_c;
5965 + memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
5966 +
5967 + return 0;
5968 +
5969 +outbitmapb:
5970 + kmem_cache_free(cachep_b, fullbitmap_b);
5971 +outbitmapc:
5972 + kmem_cache_free(cachep_c, fullbitmap_c);
5973 +outbitmapd:
5974 + kmem_cache_free(cachep_d, fullbitmap_d);
5975 +outd:
5976 + kmem_cache_destroy(cachep_d);
5977 +outc:
5978 + kmem_cache_destroy(cachep_c);
5979 +outb:
5980 + kmem_cache_destroy(cachep_b);
5981 +out:
5982 +
5983 + return ret;
5984 +}
5985 +
5986 +static void __exit ip_set_iptreemap_fini(void)
5987 +{
5988 + ip_set_unregister_set_type(&ip_set_iptreemap);
5989 + kmem_cache_free(cachep_d, fullbitmap_d);
5990 + kmem_cache_free(cachep_c, fullbitmap_c);
5991 + kmem_cache_free(cachep_b, fullbitmap_b);
5992 + kmem_cache_destroy(cachep_d);
5993 + kmem_cache_destroy(cachep_c);
5994 + kmem_cache_destroy(cachep_b);
5995 +}
5996 +
5997 +module_init(ip_set_iptreemap_init);
5998 +module_exit(ip_set_iptreemap_fini);
5999 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_macipmap.c
6000 ===================================================================
6001 --- /dev/null
6002 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_macipmap.c
6003 @@ -0,0 +1,375 @@
6004 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
6005 + * Patrick Schaaf <bof@bof.de>
6006 + * Martin Josefsson <gandalf@wlug.westbo.se>
6007 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6008 + *
6009 + * This program is free software; you can redistribute it and/or modify
6010 + * it under the terms of the GNU General Public License version 2 as
6011 + * published by the Free Software Foundation.
6012 + */
6013 +
6014 +/* Kernel module implementing an IP set type: the macipmap type */
6015 +
6016 +#include <linux/module.h>
6017 +#include <linux/ip.h>
6018 +#include <linux/skbuff.h>
6019 +#include <linux/version.h>
6020 +#include <linux/netfilter_ipv4/ip_tables.h>
6021 +#include <linux/netfilter_ipv4/ip_set.h>
6022 +#include <linux/errno.h>
6023 +#include <asm/uaccess.h>
6024 +#include <asm/bitops.h>
6025 +#include <linux/spinlock.h>
6026 +#include <linux/if_ether.h>
6027 +#include <linux/vmalloc.h>
6028 +
6029 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
6030 +#include <linux/netfilter_ipv4/ip_set_macipmap.h>
6031 +
6032 +static int
6033 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
6034 +{
6035 + struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
6036 + struct ip_set_macip *table = (struct ip_set_macip *) map->members;
6037 + struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
6038 +
6039 + if (size != sizeof(struct ip_set_req_macipmap)) {
6040 + ip_set_printk("data length wrong (want %zu, have %zu)",
6041 + sizeof(struct ip_set_req_macipmap),
6042 + size);
6043 + return -EINVAL;
6044 + }
6045 +
6046 + if (req->ip < map->first_ip || req->ip > map->last_ip)
6047 + return -ERANGE;
6048 +
6049 + *hash_ip = req->ip;
6050 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6051 + set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
6052 + if (test_bit(IPSET_MACIP_ISSET,
6053 + (void *) &table[req->ip - map->first_ip].flags)) {
6054 + return (memcmp(req->ethernet,
6055 + &table[req->ip - map->first_ip].ethernet,
6056 + ETH_ALEN) == 0);
6057 + } else {
6058 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6059 + }
6060 +}
6061 +
6062 +static int
6063 +testip_kernel(struct ip_set *set,
6064 + const struct sk_buff *skb,
6065 + ip_set_ip_t *hash_ip,
6066 + const u_int32_t *flags,
6067 + unsigned char index)
6068 +{
6069 + struct ip_set_macipmap *map =
6070 + (struct ip_set_macipmap *) set->data;
6071 + struct ip_set_macip *table =
6072 + (struct ip_set_macip *) map->members;
6073 + ip_set_ip_t ip;
6074 +
6075 + ip = ntohl(flags[index] & IPSET_SRC
6076 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6077 + ? ip_hdr(skb)->saddr
6078 + : ip_hdr(skb)->daddr);
6079 +#else
6080 + ? skb->nh.iph->saddr
6081 + : skb->nh.iph->daddr);
6082 +#endif
6083 +
6084 + if (ip < map->first_ip || ip > map->last_ip)
6085 + return 0;
6086 +
6087 + *hash_ip = ip;
6088 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6089 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
6090 + if (test_bit(IPSET_MACIP_ISSET,
6091 + (void *) &table[ip - map->first_ip].flags)) {
6092 + /* Is mac pointer valid?
6093 + * If so, compare... */
6094 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6095 + return (skb_mac_header(skb) >= skb->head
6096 + && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
6097 +#else
6098 + return (skb->mac.raw >= skb->head
6099 + && (skb->mac.raw + ETH_HLEN) <= skb->data
6100 +#endif
6101 + && (memcmp(eth_hdr(skb)->h_source,
6102 + &table[ip - map->first_ip].ethernet,
6103 + ETH_ALEN) == 0));
6104 + } else {
6105 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6106 + }
6107 +}
6108 +
6109 +/* returns 0 on success */
6110 +static inline int
6111 +__addip(struct ip_set *set,
6112 + ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
6113 +{
6114 + struct ip_set_macipmap *map =
6115 + (struct ip_set_macipmap *) set->data;
6116 + struct ip_set_macip *table =
6117 + (struct ip_set_macip *) map->members;
6118 +
6119 + if (ip < map->first_ip || ip > map->last_ip)
6120 + return -ERANGE;
6121 + if (test_and_set_bit(IPSET_MACIP_ISSET,
6122 + (void *) &table[ip - map->first_ip].flags))
6123 + return -EEXIST;
6124 +
6125 + *hash_ip = ip;
6126 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6127 + memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
6128 + return 0;
6129 +}
6130 +
6131 +static int
6132 +addip(struct ip_set *set, const void *data, size_t size,
6133 + ip_set_ip_t *hash_ip)
6134 +{
6135 + struct ip_set_req_macipmap *req =
6136 + (struct ip_set_req_macipmap *) data;
6137 +
6138 + if (size != sizeof(struct ip_set_req_macipmap)) {
6139 + ip_set_printk("data length wrong (want %zu, have %zu)",
6140 + sizeof(struct ip_set_req_macipmap),
6141 + size);
6142 + return -EINVAL;
6143 + }
6144 + return __addip(set, req->ip, req->ethernet, hash_ip);
6145 +}
6146 +
6147 +static int
6148 +addip_kernel(struct ip_set *set,
6149 + const struct sk_buff *skb,
6150 + ip_set_ip_t *hash_ip,
6151 + const u_int32_t *flags,
6152 + unsigned char index)
6153 +{
6154 + ip_set_ip_t ip;
6155 +
6156 + ip = ntohl(flags[index] & IPSET_SRC
6157 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6158 + ? ip_hdr(skb)->saddr
6159 + : ip_hdr(skb)->daddr);
6160 +#else
6161 + ? skb->nh.iph->saddr
6162 + : skb->nh.iph->daddr);
6163 +#endif
6164 +
6165 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6166 + if (!(skb_mac_header(skb) >= skb->head
6167 + && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
6168 +#else
6169 + if (!(skb->mac.raw >= skb->head
6170 + && (skb->mac.raw + ETH_HLEN) <= skb->data))
6171 +#endif
6172 + return -EINVAL;
6173 +
6174 + return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
6175 +}
6176 +
6177 +static inline int
6178 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6179 +{
6180 + struct ip_set_macipmap *map =
6181 + (struct ip_set_macipmap *) set->data;
6182 + struct ip_set_macip *table =
6183 + (struct ip_set_macip *) map->members;
6184 +
6185 + if (ip < map->first_ip || ip > map->last_ip)
6186 + return -ERANGE;
6187 + if (!test_and_clear_bit(IPSET_MACIP_ISSET,
6188 + (void *)&table[ip - map->first_ip].flags))
6189 + return -EEXIST;
6190 +
6191 + *hash_ip = ip;
6192 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6193 + return 0;
6194 +}
6195 +
6196 +static int
6197 +delip(struct ip_set *set, const void *data, size_t size,
6198 + ip_set_ip_t *hash_ip)
6199 +{
6200 + struct ip_set_req_macipmap *req =
6201 + (struct ip_set_req_macipmap *) data;
6202 +
6203 + if (size != sizeof(struct ip_set_req_macipmap)) {
6204 + ip_set_printk("data length wrong (want %zu, have %zu)",
6205 + sizeof(struct ip_set_req_macipmap),
6206 + size);
6207 + return -EINVAL;
6208 + }
6209 + return __delip(set, req->ip, hash_ip);
6210 +}
6211 +
6212 +static int
6213 +delip_kernel(struct ip_set *set,
6214 + const struct sk_buff *skb,
6215 + ip_set_ip_t *hash_ip,
6216 + const u_int32_t *flags,
6217 + unsigned char index)
6218 +{
6219 + return __delip(set,
6220 + ntohl(flags[index] & IPSET_SRC
6221 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6222 + ? ip_hdr(skb)->saddr
6223 + : ip_hdr(skb)->daddr),
6224 +#else
6225 + ? skb->nh.iph->saddr
6226 + : skb->nh.iph->daddr),
6227 +#endif
6228 + hash_ip);
6229 +}
6230 +
6231 +static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
6232 +{
6233 + return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
6234 +}
6235 +
6236 +static int create(struct ip_set *set, const void *data, size_t size)
6237 +{
6238 + int newbytes;
6239 + struct ip_set_req_macipmap_create *req =
6240 + (struct ip_set_req_macipmap_create *) data;
6241 + struct ip_set_macipmap *map;
6242 +
6243 + if (size != sizeof(struct ip_set_req_macipmap_create)) {
6244 + ip_set_printk("data length wrong (want %zu, have %zu)",
6245 + sizeof(struct ip_set_req_macipmap_create),
6246 + size);
6247 + return -EINVAL;
6248 + }
6249 +
6250 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
6251 + HIPQUAD(req->from), HIPQUAD(req->to));
6252 +
6253 + if (req->from > req->to) {
6254 + DP("bad ip range");
6255 + return -ENOEXEC;
6256 + }
6257 +
6258 + if (req->to - req->from > MAX_RANGE) {
6259 + ip_set_printk("range too big (max %d addresses)",
6260 + MAX_RANGE+1);
6261 + return -ENOEXEC;
6262 + }
6263 +
6264 + map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
6265 + if (!map) {
6266 + DP("out of memory for %d bytes",
6267 + sizeof(struct ip_set_macipmap));
6268 + return -ENOMEM;
6269 + }
6270 + map->flags = req->flags;
6271 + map->first_ip = req->from;
6272 + map->last_ip = req->to;
6273 + newbytes = members_size(map->first_ip, map->last_ip);
6274 + map->members = ip_set_malloc(newbytes);
6275 + DP("members: %u %p", newbytes, map->members);
6276 + if (!map->members) {
6277 + DP("out of memory for %d bytes", newbytes);
6278 + kfree(map);
6279 + return -ENOMEM;
6280 + }
6281 + memset(map->members, 0, newbytes);
6282 +
6283 + set->data = map;
6284 + return 0;
6285 +}
6286 +
6287 +static void destroy(struct ip_set *set)
6288 +{
6289 + struct ip_set_macipmap *map =
6290 + (struct ip_set_macipmap *) set->data;
6291 +
6292 + ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
6293 + kfree(map);
6294 +
6295 + set->data = NULL;
6296 +}
6297 +
6298 +static void flush(struct ip_set *set)
6299 +{
6300 + struct ip_set_macipmap *map =
6301 + (struct ip_set_macipmap *) set->data;
6302 + memset(map->members, 0, members_size(map->first_ip, map->last_ip));
6303 +}
6304 +
6305 +static void list_header(const struct ip_set *set, void *data)
6306 +{
6307 + struct ip_set_macipmap *map =
6308 + (struct ip_set_macipmap *) set->data;
6309 + struct ip_set_req_macipmap_create *header =
6310 + (struct ip_set_req_macipmap_create *) data;
6311 +
6312 + DP("list_header %x %x %u", map->first_ip, map->last_ip,
6313 + map->flags);
6314 +
6315 + header->from = map->first_ip;
6316 + header->to = map->last_ip;
6317 + header->flags = map->flags;
6318 +}
6319 +
6320 +static int list_members_size(const struct ip_set *set)
6321 +{
6322 + struct ip_set_macipmap *map =
6323 + (struct ip_set_macipmap *) set->data;
6324 +
6325 + DP("%u", members_size(map->first_ip, map->last_ip));
6326 + return members_size(map->first_ip, map->last_ip);
6327 +}
6328 +
6329 +static void list_members(const struct ip_set *set, void *data)
6330 +{
6331 + struct ip_set_macipmap *map =
6332 + (struct ip_set_macipmap *) set->data;
6333 +
6334 + int bytes = members_size(map->first_ip, map->last_ip);
6335 +
6336 + DP("members: %u %p", bytes, map->members);
6337 + memcpy(data, map->members, bytes);
6338 +}
6339 +
6340 +static struct ip_set_type ip_set_macipmap = {
6341 + .typename = SETTYPE_NAME,
6342 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
6343 + .protocol_version = IP_SET_PROTOCOL_VERSION,
6344 + .create = &create,
6345 + .destroy = &destroy,
6346 + .flush = &flush,
6347 + .reqsize = sizeof(struct ip_set_req_macipmap),
6348 + .addip = &addip,
6349 + .addip_kernel = &addip_kernel,
6350 + .delip = &delip,
6351 + .delip_kernel = &delip_kernel,
6352 + .testip = &testip,
6353 + .testip_kernel = &testip_kernel,
6354 + .header_size = sizeof(struct ip_set_req_macipmap_create),
6355 + .list_header = &list_header,
6356 + .list_members_size = &list_members_size,
6357 + .list_members = &list_members,
6358 + .me = THIS_MODULE,
6359 +};
6360 +
6361 +MODULE_LICENSE("GPL");
6362 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6363 +MODULE_DESCRIPTION("macipmap type of IP sets");
6364 +
6365 +static int __init ip_set_macipmap_init(void)
6366 +{
6367 + init_max_malloc_size();
6368 + return ip_set_register_set_type(&ip_set_macipmap);
6369 +}
6370 +
6371 +static void __exit ip_set_macipmap_fini(void)
6372 +{
6373 + /* FIXME: possible race with ip_set_create() */
6374 + ip_set_unregister_set_type(&ip_set_macipmap);
6375 +}
6376 +
6377 +module_init(ip_set_macipmap_init);
6378 +module_exit(ip_set_macipmap_fini);
6379 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_nethash.c
6380 ===================================================================
6381 --- /dev/null
6382 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_nethash.c
6383 @@ -0,0 +1,497 @@
6384 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6385 + *
6386 + * This program is free software; you can redistribute it and/or modify
6387 + * it under the terms of the GNU General Public License version 2 as
6388 + * published by the Free Software Foundation.
6389 + */
6390 +
6391 +/* Kernel module implementing a cidr nethash set */
6392 +
6393 +#include <linux/module.h>
6394 +#include <linux/ip.h>
6395 +#include <linux/skbuff.h>
6396 +#include <linux/version.h>
6397 +#include <linux/jhash.h>
6398 +#include <linux/netfilter_ipv4/ip_tables.h>
6399 +#include <linux/netfilter_ipv4/ip_set.h>
6400 +#include <linux/errno.h>
6401 +#include <asm/uaccess.h>
6402 +#include <asm/bitops.h>
6403 +#include <linux/spinlock.h>
6404 +#include <linux/vmalloc.h>
6405 +#include <linux/random.h>
6406 +
6407 +#include <net/ip.h>
6408 +
6409 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
6410 +#include <linux/netfilter_ipv4/ip_set_nethash.h>
6411 +
6412 +static int limit = MAX_RANGE;
6413 +
6414 +static inline __u32
6415 +jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
6416 +{
6417 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
6418 +}
6419 +
6420 +static inline __u32
6421 +hash_id_cidr(struct ip_set_nethash *map,
6422 + ip_set_ip_t ip,
6423 + unsigned char cidr,
6424 + ip_set_ip_t *hash_ip)
6425 +{
6426 + __u32 id;
6427 + u_int16_t i;
6428 + ip_set_ip_t *elem;
6429 +
6430 + *hash_ip = pack(ip, cidr);
6431 +
6432 + for (i = 0; i < map->probes; i++) {
6433 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
6434 + DP("hash key: %u", id);
6435 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6436 + if (*elem == *hash_ip)
6437 + return id;
6438 + }
6439 + return UINT_MAX;
6440 +}
6441 +
6442 +static inline __u32
6443 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6444 +{
6445 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6446 + __u32 id = UINT_MAX;
6447 + int i;
6448 +
6449 + for (i = 0; i < 30 && map->cidr[i]; i++) {
6450 + id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
6451 + if (id != UINT_MAX)
6452 + break;
6453 + }
6454 + return id;
6455 +}
6456 +
6457 +static inline int
6458 +__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
6459 + ip_set_ip_t *hash_ip)
6460 +{
6461 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6462 +
6463 + return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
6464 +}
6465 +
6466 +static inline int
6467 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6468 +{
6469 + return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
6470 +}
6471 +
6472 +static int
6473 +testip(struct ip_set *set, const void *data, size_t size,
6474 + ip_set_ip_t *hash_ip)
6475 +{
6476 + struct ip_set_req_nethash *req =
6477 + (struct ip_set_req_nethash *) data;
6478 +
6479 + if (size != sizeof(struct ip_set_req_nethash)) {
6480 + ip_set_printk("data length wrong (want %zu, have %zu)",
6481 + sizeof(struct ip_set_req_nethash),
6482 + size);
6483 + return -EINVAL;
6484 + }
6485 + return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
6486 + : __testip_cidr(set, req->ip, req->cidr, hash_ip));
6487 +}
6488 +
6489 +static int
6490 +testip_kernel(struct ip_set *set,
6491 + const struct sk_buff *skb,
6492 + ip_set_ip_t *hash_ip,
6493 + const u_int32_t *flags,
6494 + unsigned char index)
6495 +{
6496 + return __testip(set,
6497 + ntohl(flags[index] & IPSET_SRC
6498 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6499 + ? ip_hdr(skb)->saddr
6500 + : ip_hdr(skb)->daddr),
6501 +#else
6502 + ? skb->nh.iph->saddr
6503 + : skb->nh.iph->daddr),
6504 +#endif
6505 + hash_ip);
6506 +}
6507 +
6508 +static inline int
6509 +__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
6510 +{
6511 + __u32 probe;
6512 + u_int16_t i;
6513 + ip_set_ip_t *elem;
6514 +
6515 + for (i = 0; i < map->probes; i++) {
6516 + probe = jhash_ip(map, i, ip) % map->hashsize;
6517 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
6518 + if (*elem == ip)
6519 + return -EEXIST;
6520 + if (!*elem) {
6521 + *elem = ip;
6522 + map->elements++;
6523 + return 0;
6524 + }
6525 + }
6526 + /* Trigger rehashing */
6527 + return -EAGAIN;
6528 +}
6529 +
6530 +static inline int
6531 +__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
6532 + ip_set_ip_t *hash_ip)
6533 +{
6534 + if (!ip || map->elements >= limit)
6535 + return -ERANGE;
6536 +
6537 + *hash_ip = pack(ip, cidr);
6538 + DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
6539 +
6540 + return __addip_base(map, *hash_ip);
6541 +}
6542 +
6543 +static void
6544 +update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
6545 +{
6546 + unsigned char next;
6547 + int i;
6548 +
6549 + for (i = 0; i < 30 && map->cidr[i]; i++) {
6550 + if (map->cidr[i] == cidr) {
6551 + return;
6552 + } else if (map->cidr[i] < cidr) {
6553 + next = map->cidr[i];
6554 + map->cidr[i] = cidr;
6555 + cidr = next;
6556 + }
6557 + }
6558 + if (i < 30)
6559 + map->cidr[i] = cidr;
6560 +}
6561 +
6562 +static int
6563 +addip(struct ip_set *set, const void *data, size_t size,
6564 + ip_set_ip_t *hash_ip)
6565 +{
6566 + struct ip_set_req_nethash *req =
6567 + (struct ip_set_req_nethash *) data;
6568 + int ret;
6569 +
6570 + if (size != sizeof(struct ip_set_req_nethash)) {
6571 + ip_set_printk("data length wrong (want %zu, have %zu)",
6572 + sizeof(struct ip_set_req_nethash),
6573 + size);
6574 + return -EINVAL;
6575 + }
6576 + ret = __addip((struct ip_set_nethash *) set->data,
6577 + req->ip, req->cidr, hash_ip);
6578 +
6579 + if (ret == 0)
6580 + update_cidr_sizes((struct ip_set_nethash *) set->data,
6581 + req->cidr);
6582 +
6583 + return ret;
6584 +}
6585 +
6586 +static int
6587 +addip_kernel(struct ip_set *set,
6588 + const struct sk_buff *skb,
6589 + ip_set_ip_t *hash_ip,
6590 + const u_int32_t *flags,
6591 + unsigned char index)
6592 +{
6593 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6594 + int ret = -ERANGE;
6595 + ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
6596 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6597 + ? ip_hdr(skb)->saddr
6598 + : ip_hdr(skb)->daddr);
6599 +#else
6600 + ? skb->nh.iph->saddr
6601 + : skb->nh.iph->daddr);
6602 +#endif
6603 +
6604 + if (map->cidr[0])
6605 + ret = __addip(map, ip, map->cidr[0], hash_ip);
6606 +
6607 + return ret;
6608 +}
6609 +
6610 +static int retry(struct ip_set *set)
6611 +{
6612 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6613 + ip_set_ip_t *elem;
6614 + void *members;
6615 + u_int32_t i, hashsize = map->hashsize;
6616 + int res;
6617 + struct ip_set_nethash *tmp;
6618 +
6619 + if (map->resize == 0)
6620 + return -ERANGE;
6621 +
6622 + again:
6623 + res = 0;
6624 +
6625 + /* Calculate new parameters */
6626 + hashsize += (hashsize * map->resize)/100;
6627 + if (hashsize == map->hashsize)
6628 + hashsize++;
6629 +
6630 + ip_set_printk("rehashing of set %s triggered: "
6631 + "hashsize grows from %u to %u",
6632 + set->name, map->hashsize, hashsize);
6633 +
6634 + tmp = kmalloc(sizeof(struct ip_set_nethash)
6635 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
6636 + if (!tmp) {
6637 + DP("out of memory for %d bytes",
6638 + sizeof(struct ip_set_nethash)
6639 + + map->probes * sizeof(uint32_t));
6640 + return -ENOMEM;
6641 + }
6642 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
6643 + if (!tmp->members) {
6644 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
6645 + kfree(tmp);
6646 + return -ENOMEM;
6647 + }
6648 + tmp->hashsize = hashsize;
6649 + tmp->elements = 0;
6650 + tmp->probes = map->probes;
6651 + tmp->resize = map->resize;
6652 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
6653 + memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
6654 +
6655 + write_lock_bh(&set->lock);
6656 + map = (struct ip_set_nethash *) set->data; /* Play safe */
6657 + for (i = 0; i < map->hashsize && res == 0; i++) {
6658 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
6659 + if (*elem)
6660 + res = __addip_base(tmp, *elem);
6661 + }
6662 + if (res) {
6663 + /* Failure, try again */
6664 + write_unlock_bh(&set->lock);
6665 + harray_free(tmp->members);
6666 + kfree(tmp);
6667 + goto again;
6668 + }
6669 +
6670 + /* Success at resizing! */
6671 + members = map->members;
6672 +
6673 + map->hashsize = tmp->hashsize;
6674 + map->members = tmp->members;
6675 + write_unlock_bh(&set->lock);
6676 +
6677 + harray_free(members);
6678 + kfree(tmp);
6679 +
6680 + return 0;
6681 +}
6682 +
6683 +static inline int
6684 +__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
6685 + ip_set_ip_t *hash_ip)
6686 +{
6687 + ip_set_ip_t id, *elem;
6688 +
6689 + if (!ip)
6690 + return -ERANGE;
6691 +
6692 + id = hash_id_cidr(map, ip, cidr, hash_ip);
6693 + if (id == UINT_MAX)
6694 + return -EEXIST;
6695 +
6696 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6697 + *elem = 0;
6698 + map->elements--;
6699 + return 0;
6700 +}
6701 +
6702 +static int
6703 +delip(struct ip_set *set, const void *data, size_t size,
6704 + ip_set_ip_t *hash_ip)
6705 +{
6706 + struct ip_set_req_nethash *req =
6707 + (struct ip_set_req_nethash *) data;
6708 +
6709 + if (size != sizeof(struct ip_set_req_nethash)) {
6710 + ip_set_printk("data length wrong (want %zu, have %zu)",
6711 + sizeof(struct ip_set_req_nethash),
6712 + size);
6713 + return -EINVAL;
6714 + }
6715 + /* TODO: no garbage collection in map->cidr */
6716 + return __delip((struct ip_set_nethash *) set->data,
6717 + req->ip, req->cidr, hash_ip);
6718 +}
6719 +
6720 +static int
6721 +delip_kernel(struct ip_set *set,
6722 + const struct sk_buff *skb,
6723 + ip_set_ip_t *hash_ip,
6724 + const u_int32_t *flags,
6725 + unsigned char index)
6726 +{
6727 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6728 + int ret = -ERANGE;
6729 + ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
6730 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6731 + ? ip_hdr(skb)->saddr
6732 + : ip_hdr(skb)->daddr);
6733 +#else
6734 + ? skb->nh.iph->saddr
6735 + : skb->nh.iph->daddr);
6736 +#endif
6737 +
6738 + if (map->cidr[0])
6739 + ret = __delip(map, ip, map->cidr[0], hash_ip);
6740 +
6741 + return ret;
6742 +}
6743 +
6744 +static int create(struct ip_set *set, const void *data, size_t size)
6745 +{
6746 + struct ip_set_req_nethash_create *req =
6747 + (struct ip_set_req_nethash_create *) data;
6748 + struct ip_set_nethash *map;
6749 + uint16_t i;
6750 +
6751 + if (size != sizeof(struct ip_set_req_nethash_create)) {
6752 + ip_set_printk("data length wrong (want %zu, have %zu)",
6753 + sizeof(struct ip_set_req_nethash_create),
6754 + size);
6755 + return -EINVAL;
6756 + }
6757 +
6758 + if (req->hashsize < 1) {
6759 + ip_set_printk("hashsize too small");
6760 + return -ENOEXEC;
6761 + }
6762 + if (req->probes < 1) {
6763 + ip_set_printk("probes too small");
6764 + return -ENOEXEC;
6765 + }
6766 +
6767 + map = kmalloc(sizeof(struct ip_set_nethash)
6768 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
6769 + if (!map) {
6770 + DP("out of memory for %d bytes",
6771 + sizeof(struct ip_set_nethash)
6772 + + req->probes * sizeof(uint32_t));
6773 + return -ENOMEM;
6774 + }
6775 + for (i = 0; i < req->probes; i++)
6776 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
6777 + map->elements = 0;
6778 + map->hashsize = req->hashsize;
6779 + map->probes = req->probes;
6780 + map->resize = req->resize;
6781 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
6782 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
6783 + if (!map->members) {
6784 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
6785 + kfree(map);
6786 + return -ENOMEM;
6787 + }
6788 +
6789 + set->data = map;
6790 + return 0;
6791 +}
6792 +
6793 +static void destroy(struct ip_set *set)
6794 +{
6795 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6796 +
6797 + harray_free(map->members);
6798 + kfree(map);
6799 +
6800 + set->data = NULL;
6801 +}
6802 +
6803 +static void flush(struct ip_set *set)
6804 +{
6805 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6806 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
6807 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
6808 + map->elements = 0;
6809 +}
6810 +
6811 +static void list_header(const struct ip_set *set, void *data)
6812 +{
6813 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6814 + struct ip_set_req_nethash_create *header =
6815 + (struct ip_set_req_nethash_create *) data;
6816 +
6817 + header->hashsize = map->hashsize;
6818 + header->probes = map->probes;
6819 + header->resize = map->resize;
6820 +}
6821 +
6822 +static int list_members_size(const struct ip_set *set)
6823 +{
6824 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6825 +
6826 + return (map->hashsize * sizeof(ip_set_ip_t));
6827 +}
6828 +
6829 +static void list_members(const struct ip_set *set, void *data)
6830 +{
6831 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6832 + ip_set_ip_t i, *elem;
6833 +
6834 + for (i = 0; i < map->hashsize; i++) {
6835 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
6836 + ((ip_set_ip_t *)data)[i] = *elem;
6837 + }
6838 +}
6839 +
6840 +static struct ip_set_type ip_set_nethash = {
6841 + .typename = SETTYPE_NAME,
6842 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
6843 + .protocol_version = IP_SET_PROTOCOL_VERSION,
6844 + .create = &create,
6845 + .destroy = &destroy,
6846 + .flush = &flush,
6847 + .reqsize = sizeof(struct ip_set_req_nethash),
6848 + .addip = &addip,
6849 + .addip_kernel = &addip_kernel,
6850 + .retry = &retry,
6851 + .delip = &delip,
6852 + .delip_kernel = &delip_kernel,
6853 + .testip = &testip,
6854 + .testip_kernel = &testip_kernel,
6855 + .header_size = sizeof(struct ip_set_req_nethash_create),
6856 + .list_header = &list_header,
6857 + .list_members_size = &list_members_size,
6858 + .list_members = &list_members,
6859 + .me = THIS_MODULE,
6860 +};
6861 +
6862 +MODULE_LICENSE("GPL");
6863 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6864 +MODULE_DESCRIPTION("nethash type of IP sets");
6865 +module_param(limit, int, 0600);
6866 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
6867 +
6868 +static int __init ip_set_nethash_init(void)
6869 +{
6870 + return ip_set_register_set_type(&ip_set_nethash);
6871 +}
6872 +
6873 +static void __exit ip_set_nethash_fini(void)
6874 +{
6875 + /* FIXME: possible race with ip_set_create() */
6876 + ip_set_unregister_set_type(&ip_set_nethash);
6877 +}
6878 +
6879 +module_init(ip_set_nethash_init);
6880 +module_exit(ip_set_nethash_fini);
6881 Index: linux-2.6.21.7/net/ipv4/netfilter/ip_set_portmap.c
6882 ===================================================================
6883 --- /dev/null
6884 +++ linux-2.6.21.7/net/ipv4/netfilter/ip_set_portmap.c
6885 @@ -0,0 +1,346 @@
6886 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6887 + *
6888 + * This program is free software; you can redistribute it and/or modify
6889 + * it under the terms of the GNU General Public License version 2 as
6890 + * published by the Free Software Foundation.
6891 + */
6892 +
6893 +/* Kernel module implementing a port set type as a bitmap */
6894 +
6895 +#include <linux/module.h>
6896 +#include <linux/ip.h>
6897 +#include <linux/tcp.h>
6898 +#include <linux/udp.h>
6899 +#include <linux/skbuff.h>
6900 +#include <linux/version.h>
6901 +#include <linux/netfilter_ipv4/ip_tables.h>
6902 +#include <linux/netfilter_ipv4/ip_set.h>
6903 +#include <linux/errno.h>
6904 +#include <asm/uaccess.h>
6905 +#include <asm/bitops.h>
6906 +#include <linux/spinlock.h>
6907 +
6908 +#include <net/ip.h>
6909 +
6910 +#include <linux/netfilter_ipv4/ip_set_portmap.h>
6911 +
6912 +/* We must handle non-linear skbs */
6913 +static inline ip_set_ip_t
6914 +get_port(const struct sk_buff *skb, u_int32_t flags)
6915 +{
6916 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6917 + struct iphdr *iph = ip_hdr(skb);
6918 +#else
6919 + struct iphdr *iph = skb->nh.iph;
6920 +#endif
6921 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
6922 + switch (iph->protocol) {
6923 + case IPPROTO_TCP: {
6924 + struct tcphdr tcph;
6925 +
6926 + /* See comments at tcp_match in ip_tables.c */
6927 + if (offset)
6928 + return INVALID_PORT;
6929 +
6930 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6931 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
6932 +#else
6933 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
6934 +#endif
6935 + /* No choice either */
6936 + return INVALID_PORT;
6937 +
6938 + return ntohs(flags & IPSET_SRC ?
6939 + tcph.source : tcph.dest);
6940 + }
6941 + case IPPROTO_UDP: {
6942 + struct udphdr udph;
6943 +
6944 + if (offset)
6945 + return INVALID_PORT;
6946 +
6947 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6948 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
6949 +#else
6950 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
6951 +#endif
6952 + /* No choice either */
6953 + return INVALID_PORT;
6954 +
6955 + return ntohs(flags & IPSET_SRC ?
6956 + udph.source : udph.dest);
6957 + }
6958 + default:
6959 + return INVALID_PORT;
6960 + }
6961 +}
6962 +
6963 +static inline int
6964 +__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
6965 +{
6966 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6967 +
6968 + if (port < map->first_port || port > map->last_port)
6969 + return -ERANGE;
6970 +
6971 + *hash_port = port;
6972 + DP("set: %s, port:%u, %u", set->name, port, *hash_port);
6973 + return !!test_bit(port - map->first_port, map->members);
6974 +}
6975 +
6976 +static int
6977 +testport(struct ip_set *set, const void *data, size_t size,
6978 + ip_set_ip_t *hash_port)
6979 +{
6980 + struct ip_set_req_portmap *req =
6981 + (struct ip_set_req_portmap *) data;
6982 +
6983 + if (size != sizeof(struct ip_set_req_portmap)) {
6984 + ip_set_printk("data length wrong (want %zu, have %zu)",
6985 + sizeof(struct ip_set_req_portmap),
6986 + size);
6987 + return -EINVAL;
6988 + }
6989 + return __testport(set, req->port, hash_port);
6990 +}
6991 +
6992 +static int
6993 +testport_kernel(struct ip_set *set,
6994 + const struct sk_buff *skb,
6995 + ip_set_ip_t *hash_port,
6996 + const u_int32_t *flags,
6997 + unsigned char index)
6998 +{
6999 + int res;
7000 + ip_set_ip_t port = get_port(skb, flags[index]);
7001 +
7002 + DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
7003 + if (port == INVALID_PORT)
7004 + return 0;
7005 +
7006 + res = __testport(set, port, hash_port);
7007 +
7008 + return (res < 0 ? 0 : res);
7009 +}
7010 +
7011 +static inline int
7012 +__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
7013 +{
7014 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7015 +
7016 + if (port < map->first_port || port > map->last_port)
7017 + return -ERANGE;
7018 + if (test_and_set_bit(port - map->first_port, map->members))
7019 + return -EEXIST;
7020 +
7021 + *hash_port = port;
7022 + DP("port %u", port);
7023 + return 0;
7024 +}
7025 +
7026 +static int
7027 +addport(struct ip_set *set, const void *data, size_t size,
7028 + ip_set_ip_t *hash_port)
7029 +{
7030 + struct ip_set_req_portmap *req =
7031 + (struct ip_set_req_portmap *) data;
7032 +
7033 + if (size != sizeof(struct ip_set_req_portmap)) {
7034 + ip_set_printk("data length wrong (want %zu, have %zu)",
7035 + sizeof(struct ip_set_req_portmap),
7036 + size);
7037 + return -EINVAL;
7038 + }
7039 + return __addport(set, req->port, hash_port);
7040 +}
7041 +
7042 +static int
7043 +addport_kernel(struct ip_set *set,
7044 + const struct sk_buff *skb,
7045 + ip_set_ip_t *hash_port,
7046 + const u_int32_t *flags,
7047 + unsigned char index)
7048 +{
7049 + ip_set_ip_t port = get_port(skb, flags[index]);
7050 +
7051 + if (port == INVALID_PORT)
7052 + return -EINVAL;
7053 +
7054 + return __addport(set, port, hash_port);
7055 +}
7056 +
7057 +static inline int
7058 +__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
7059 +{
7060 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7061 +
7062 + if (port < map->first_port || port > map->last_port)
7063 + return -ERANGE;
7064 + if (!test_and_clear_bit(port - map->first_port, map->members))
7065 + return -EEXIST;
7066 +
7067 + *hash_port = port;
7068 + DP("port %u", port);
7069 + return 0;
7070 +}
7071 +
7072 +static int
7073 +delport(struct ip_set *set, const void *data, size_t size,
7074 + ip_set_ip_t *hash_port)
7075 +{
7076 + struct ip_set_req_portmap *req =
7077 + (struct ip_set_req_portmap *) data;
7078 +
7079 + if (size != sizeof(struct ip_set_req_portmap)) {
7080 + ip_set_printk("data length wrong (want %zu, have %zu)",
7081 + sizeof(struct ip_set_req_portmap),
7082 + size);
7083 + return -EINVAL;
7084 + }
7085 + return __delport(set, req->port, hash_port);
7086 +}
7087 +
7088 +static int
7089 +delport_kernel(struct ip_set *set,
7090 + const struct sk_buff *skb,
7091 + ip_set_ip_t *hash_port,
7092 + const u_int32_t *flags,
7093 + unsigned char index)
7094 +{
7095 + ip_set_ip_t port = get_port(skb, flags[index]);
7096 +
7097 + if (port == INVALID_PORT)
7098 + return -EINVAL;
7099 +
7100 + return __delport(set, port, hash_port);
7101 +}
7102 +
7103 +static int create(struct ip_set *set, const void *data, size_t size)
7104 +{
7105 + int newbytes;
7106 + struct ip_set_req_portmap_create *req =
7107 + (struct ip_set_req_portmap_create *) data;
7108 + struct ip_set_portmap *map;
7109 +
7110 + if (size != sizeof(struct ip_set_req_portmap_create)) {
7111 + ip_set_printk("data length wrong (want %zu, have %zu)",
7112 + sizeof(struct ip_set_req_portmap_create),
7113 + size);
7114 + return -EINVAL;
7115 + }
7116 +
7117 + DP("from %u to %u", req->from, req->to);
7118 +
7119 + if (req->from > req->to) {
7120 + DP("bad port range");
7121 + return -ENOEXEC;
7122 + }
7123 +
7124 + if (req->to - req->from > MAX_RANGE) {
7125 + ip_set_printk("range too big (max %d ports)",
7126 + MAX_RANGE+1);
7127 + return -ENOEXEC;
7128 + }
7129 +
7130 + map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
7131 + if (!map) {
7132 + DP("out of memory for %d bytes",
7133 + sizeof(struct ip_set_portmap));
7134 + return -ENOMEM;
7135 + }
7136 + map->first_port = req->from;
7137 + map->last_port = req->to;
7138 + newbytes = bitmap_bytes(req->from, req->to);
7139 + map->members = kmalloc(newbytes, GFP_KERNEL);
7140 + if (!map->members) {
7141 + DP("out of memory for %d bytes", newbytes);
7142 + kfree(map);
7143 + return -ENOMEM;
7144 + }
7145 + memset(map->members, 0, newbytes);
7146 +
7147 + set->data = map;
7148 + return 0;
7149 +}
7150 +
7151 +static void destroy(struct ip_set *set)
7152 +{
7153 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7154 +
7155 + kfree(map->members);
7156 + kfree(map);
7157 +
7158 + set->data = NULL;
7159 +}
7160 +
7161 +static void flush(struct ip_set *set)
7162 +{
7163 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7164 + memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
7165 +}
7166 +
7167 +static void list_header(const struct ip_set *set, void *data)
7168 +{
7169 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7170 + struct ip_set_req_portmap_create *header =
7171 + (struct ip_set_req_portmap_create *) data;
7172 +
7173 + DP("list_header %u %u", map->first_port, map->last_port);
7174 +
7175 + header->from = map->first_port;
7176 + header->to = map->last_port;
7177 +}
7178 +
7179 +static int list_members_size(const struct ip_set *set)
7180 +{
7181 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7182 +
7183 + return bitmap_bytes(map->first_port, map->last_port);
7184 +}
7185 +
7186 +static void list_members(const struct ip_set *set, void *data)
7187 +{
7188 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7189 + int bytes = bitmap_bytes(map->first_port, map->last_port);
7190 +
7191 + memcpy(data, map->members, bytes);
7192 +}
7193 +
7194 +static struct ip_set_type ip_set_portmap = {
7195 + .typename = SETTYPE_NAME,
7196 + .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
7197 + .protocol_version = IP_SET_PROTOCOL_VERSION,
7198 + .create = &create,
7199 + .destroy = &destroy,
7200 + .flush = &flush,
7201 + .reqsize = sizeof(struct ip_set_req_portmap),
7202 + .addip = &addport,
7203 + .addip_kernel = &addport_kernel,
7204 + .delip = &delport,
7205 + .delip_kernel = &delport_kernel,
7206 + .testip = &testport,
7207 + .testip_kernel = &testport_kernel,
7208 + .header_size = sizeof(struct ip_set_req_portmap_create),
7209 + .list_header = &list_header,
7210 + .list_members_size = &list_members_size,
7211 + .list_members = &list_members,
7212 + .me = THIS_MODULE,
7213 +};
7214 +
7215 +MODULE_LICENSE("GPL");
7216 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7217 +MODULE_DESCRIPTION("portmap type of IP sets");
7218 +
7219 +static int __init ip_set_portmap_init(void)
7220 +{
7221 + return ip_set_register_set_type(&ip_set_portmap);
7222 +}
7223 +
7224 +static void __exit ip_set_portmap_fini(void)
7225 +{
7226 + /* FIXME: possible race with ip_set_create() */
7227 + ip_set_unregister_set_type(&ip_set_portmap);
7228 +}
7229 +
7230 +module_init(ip_set_portmap_init);
7231 +module_exit(ip_set_portmap_fini);
7232 Index: linux-2.6.21.7/net/ipv4/netfilter/ipt_set.c
7233 ===================================================================
7234 --- /dev/null
7235 +++ linux-2.6.21.7/net/ipv4/netfilter/ipt_set.c
7236 @@ -0,0 +1,160 @@
7237 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7238 + * Patrick Schaaf <bof@bof.de>
7239 + * Martin Josefsson <gandalf@wlug.westbo.se>
7240 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7241 + *
7242 + * This program is free software; you can redistribute it and/or modify
7243 + * it under the terms of the GNU General Public License version 2 as
7244 + * published by the Free Software Foundation.
7245 + */
7246 +
7247 +/* Kernel module to match an IP set. */
7248 +
7249 +#include <linux/module.h>
7250 +#include <linux/ip.h>
7251 +#include <linux/skbuff.h>
7252 +#include <linux/version.h>
7253 +
7254 +#include <linux/netfilter_ipv4/ip_tables.h>
7255 +#include <linux/netfilter_ipv4/ip_set.h>
7256 +#include <linux/netfilter_ipv4/ipt_set.h>
7257 +
7258 +static inline int
7259 +match_set(const struct ipt_set_info *info,
7260 + const struct sk_buff *skb,
7261 + int inv)
7262 +{
7263 + if (ip_set_testip_kernel(info->index, skb, info->flags))
7264 + inv = !inv;
7265 + return inv;
7266 +}
7267 +
7268 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7269 +static bool
7270 +#else
7271 +static int
7272 +#endif
7273 +match(const struct sk_buff *skb,
7274 + const struct net_device *in,
7275 + const struct net_device *out,
7276 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7277 + const struct xt_match *match,
7278 +#endif
7279 + const void *matchinfo,
7280 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7281 + int offset, unsigned int protoff, bool *hotdrop)
7282 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7283 + int offset, unsigned int protoff, int *hotdrop)
7284 +#else
7285 + int offset, int *hotdrop)
7286 +#endif
7287 +{
7288 + const struct ipt_set_info_match *info = matchinfo;
7289 +
7290 + return match_set(&info->match_set,
7291 + skb,
7292 + info->match_set.flags[0] & IPSET_MATCH_INV);
7293 +}
7294 +
7295 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7296 +bool
7297 +#else
7298 +static int
7299 +#endif
7300 +checkentry(const char *tablename,
7301 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7302 + const void *inf,
7303 +#else
7304 + const struct ipt_ip *ip,
7305 +#endif
7306 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7307 + const struct xt_match *match,
7308 +#endif
7309 + void *matchinfo,
7310 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7311 + unsigned int matchsize,
7312 +#endif
7313 + unsigned int hook_mask)
7314 +{
7315 + struct ipt_set_info_match *info =
7316 + (struct ipt_set_info_match *) matchinfo;
7317 + ip_set_id_t index;
7318 +
7319 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7320 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7321 + ip_set_printk("invalid matchsize %d", matchsize);
7322 + return 0;
7323 + }
7324 +#endif
7325 +
7326 + index = ip_set_get_byindex(info->match_set.index);
7327 +
7328 + if (index == IP_SET_INVALID_ID) {
7329 + ip_set_printk("Cannot find set indentified by id %u to match",
7330 + info->match_set.index);
7331 + return 0; /* error */
7332 + }
7333 + if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7334 + ip_set_printk("That's nasty!");
7335 + return 0; /* error */
7336 + }
7337 +
7338 + return 1;
7339 +}
7340 +
7341 +static void destroy(
7342 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7343 + const struct xt_match *match,
7344 +#endif
7345 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7346 + void *matchinfo, unsigned int matchsize)
7347 +#else
7348 + void *matchinfo)
7349 +#endif
7350 +{
7351 + struct ipt_set_info_match *info = matchinfo;
7352 +
7353 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7354 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7355 + ip_set_printk("invalid matchsize %d", matchsize);
7356 + return;
7357 + }
7358 +#endif
7359 + ip_set_put(info->match_set.index);
7360 +}
7361 +
7362 +static struct ipt_match set_match = {
7363 + .name = "set",
7364 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7365 + .family = AF_INET,
7366 +#endif
7367 + .match = &match,
7368 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7369 + .matchsize = sizeof(struct ipt_set_info_match),
7370 +#endif
7371 + .checkentry = &checkentry,
7372 + .destroy = &destroy,
7373 + .me = THIS_MODULE
7374 +};
7375 +
7376 +MODULE_LICENSE("GPL");
7377 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7378 +MODULE_DESCRIPTION("iptables IP set match module");
7379 +
7380 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7381 +#define ipt_register_match xt_register_match
7382 +#define ipt_unregister_match xt_unregister_match
7383 +#endif
7384 +
7385 +static int __init ipt_ipset_init(void)
7386 +{
7387 + return ipt_register_match(&set_match);
7388 +}
7389 +
7390 +static void __exit ipt_ipset_fini(void)
7391 +{
7392 + ipt_unregister_match(&set_match);
7393 +}
7394 +
7395 +module_init(ipt_ipset_init);
7396 +module_exit(ipt_ipset_fini);
7397 Index: linux-2.6.21.7/net/ipv4/netfilter/ipt_SET.c
7398 ===================================================================
7399 --- /dev/null
7400 +++ linux-2.6.21.7/net/ipv4/netfilter/ipt_SET.c
7401 @@ -0,0 +1,172 @@
7402 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7403 + * Patrick Schaaf <bof@bof.de>
7404 + * Martin Josefsson <gandalf@wlug.westbo.se>
7405 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7406 + *
7407 + * This program is free software; you can redistribute it and/or modify
7408 + * it under the terms of the GNU General Public License version 2 as
7409 + * published by the Free Software Foundation.
7410 + */
7411 +
7412 +/* ipt_SET.c - netfilter target to manipulate IP sets */
7413 +
7414 +#include <linux/types.h>
7415 +#include <linux/ip.h>
7416 +#include <linux/timer.h>
7417 +#include <linux/module.h>
7418 +#include <linux/netfilter.h>
7419 +#include <linux/netdevice.h>
7420 +#include <linux/if.h>
7421 +#include <linux/inetdevice.h>
7422 +#include <linux/version.h>
7423 +#include <net/protocol.h>
7424 +#include <net/checksum.h>
7425 +#include <linux/netfilter_ipv4.h>
7426 +#include <linux/netfilter_ipv4/ip_tables.h>
7427 +#include <linux/netfilter_ipv4/ipt_set.h>
7428 +
7429 +static unsigned int
7430 +target(struct sk_buff **pskb,
7431 + const struct net_device *in,
7432 + const struct net_device *out,
7433 + unsigned int hooknum,
7434 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7435 + const struct xt_target *target,
7436 +#endif
7437 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7438 + const void *targinfo,
7439 + void *userinfo)
7440 +#else
7441 + const void *targinfo)
7442 +#endif
7443 +{
7444 + const struct ipt_set_info_target *info = targinfo;
7445 +
7446 + if (info->add_set.index != IP_SET_INVALID_ID)
7447 + ip_set_addip_kernel(info->add_set.index,
7448 + *pskb,
7449 + info->add_set.flags);
7450 + if (info->del_set.index != IP_SET_INVALID_ID)
7451 + ip_set_delip_kernel(info->del_set.index,
7452 + *pskb,
7453 + info->del_set.flags);
7454 +
7455 + return IPT_CONTINUE;
7456 +}
7457 +
7458 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7459 +static bool
7460 +#else
7461 +static int
7462 +#endif
7463 +checkentry(const char *tablename,
7464 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7465 + const void *e,
7466 +#else
7467 + const struct ipt_entry *e,
7468 +#endif
7469 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7470 + const struct xt_target *target,
7471 +#endif
7472 + void *targinfo,
7473 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7474 + unsigned int targinfosize,
7475 +#endif
7476 + unsigned int hook_mask)
7477 +{
7478 + struct ipt_set_info_target *info =
7479 + (struct ipt_set_info_target *) targinfo;
7480 + ip_set_id_t index;
7481 +
7482 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7483 + if (targinfosize != IPT_ALIGN(sizeof(*info))) {
7484 + DP("bad target info size %u", targinfosize);
7485 + return 0;
7486 + }
7487 +#endif
7488 +
7489 + if (info->add_set.index != IP_SET_INVALID_ID) {
7490 + index = ip_set_get_byindex(info->add_set.index);
7491 + if (index == IP_SET_INVALID_ID) {
7492 + ip_set_printk("cannot find add_set index %u as target",
7493 + info->add_set.index);
7494 + return 0; /* error */
7495 + }
7496 + }
7497 +
7498 + if (info->del_set.index != IP_SET_INVALID_ID) {
7499 + index = ip_set_get_byindex(info->del_set.index);
7500 + if (index == IP_SET_INVALID_ID) {
7501 + ip_set_printk("cannot find del_set index %u as target",
7502 + info->del_set.index);
7503 + return 0; /* error */
7504 + }
7505 + }
7506 + if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
7507 + || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7508 + ip_set_printk("That's nasty!");
7509 + return 0; /* error */
7510 + }
7511 +
7512 + return 1;
7513 +}
7514 +
7515 +static void destroy(
7516 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7517 + const struct xt_target *target,
7518 +#endif
7519 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7520 + void *targetinfo, unsigned int targetsize)
7521 +#else
7522 + void *targetinfo)
7523 +#endif
7524 +{
7525 + struct ipt_set_info_target *info = targetinfo;
7526 +
7527 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7528 + if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
7529 + ip_set_printk("invalid targetsize %d", targetsize);
7530 + return;
7531 + }
7532 +#endif
7533 + if (info->add_set.index != IP_SET_INVALID_ID)
7534 + ip_set_put(info->add_set.index);
7535 + if (info->del_set.index != IP_SET_INVALID_ID)
7536 + ip_set_put(info->del_set.index);
7537 +}
7538 +
7539 +static struct ipt_target SET_target = {
7540 + .name = "SET",
7541 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7542 + .family = AF_INET,
7543 +#endif
7544 + .target = target,
7545 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7546 + .targetsize = sizeof(struct ipt_set_info_target),
7547 +#endif
7548 + .checkentry = checkentry,
7549 + .destroy = destroy,
7550 + .me = THIS_MODULE
7551 +};
7552 +
7553 +MODULE_LICENSE("GPL");
7554 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7555 +MODULE_DESCRIPTION("iptables IP set target module");
7556 +
7557 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7558 +#define ipt_register_target xt_register_target
7559 +#define ipt_unregister_target xt_unregister_target
7560 +#endif
7561 +
7562 +static int __init ipt_SET_init(void)
7563 +{
7564 + return ipt_register_target(&SET_target);
7565 +}
7566 +
7567 +static void __exit ipt_SET_fini(void)
7568 +{
7569 + ipt_unregister_target(&SET_target);
7570 +}
7571 +
7572 +module_init(ipt_SET_init);
7573 +module_exit(ipt_SET_fini);
7574 Index: linux-2.6.21.7/net/ipv4/netfilter/Kconfig
7575 ===================================================================
7576 --- linux-2.6.21.7.orig/net/ipv4/netfilter/Kconfig
7577 +++ linux-2.6.21.7/net/ipv4/netfilter/Kconfig
7578 @@ -663,5 +663,122 @@ config IP_NF_ARP_MANGLE
7579 Allows altering the ARP packet payload: source and destination
7580 hardware and network addresses.
7581
7582 +config IP_NF_SET
7583 + tristate "IP set support"
7584 + depends on INET && NETFILTER
7585 + help
7586 + This option adds IP set support to the kernel.
7587 + In order to define and use sets, you need the userspace utility
7588 + ipset(8).
7589 +
7590 + To compile it as a module, choose M here. If unsure, say N.
7591 +
7592 +config IP_NF_SET_MAX
7593 + int "Maximum number of IP sets"
7594 + default 256
7595 + range 2 65534
7596 + depends on IP_NF_SET
7597 + help
7598 + You can define here default value of the maximum number
7599 + of IP sets for the kernel.
7600 +
7601 + The value can be overriden by the 'max_sets' module
7602 + parameter of the 'ip_set' module.
7603 +
7604 +config IP_NF_SET_HASHSIZE
7605 + int "Hash size for bindings of IP sets"
7606 + default 1024
7607 + depends on IP_NF_SET
7608 + help
7609 + You can define here default value of the hash size for
7610 + bindings of IP sets.
7611 +
7612 + The value can be overriden by the 'hash_size' module
7613 + parameter of the 'ip_set' module.
7614 +
7615 +config IP_NF_SET_IPMAP
7616 + tristate "ipmap set support"
7617 + depends on IP_NF_SET
7618 + help
7619 + This option adds the ipmap set type support.
7620 +
7621 + To compile it as a module, choose M here. If unsure, say N.
7622 +
7623 +config IP_NF_SET_MACIPMAP
7624 + tristate "macipmap set support"
7625 + depends on IP_NF_SET
7626 + help
7627 + This option adds the macipmap set type support.
7628 +
7629 + To compile it as a module, choose M here. If unsure, say N.
7630 +
7631 +config IP_NF_SET_PORTMAP
7632 + tristate "portmap set support"
7633 + depends on IP_NF_SET
7634 + help
7635 + This option adds the portmap set type support.
7636 +
7637 + To compile it as a module, choose M here. If unsure, say N.
7638 +
7639 +config IP_NF_SET_IPHASH
7640 + tristate "iphash set support"
7641 + depends on IP_NF_SET
7642 + help
7643 + This option adds the iphash set type support.
7644 +
7645 + To compile it as a module, choose M here. If unsure, say N.
7646 +
7647 +config IP_NF_SET_NETHASH
7648 + tristate "nethash set support"
7649 + depends on IP_NF_SET
7650 + help
7651 + This option adds the nethash set type support.
7652 +
7653 + To compile it as a module, choose M here. If unsure, say N.
7654 +
7655 +config IP_NF_SET_IPPORTHASH
7656 + tristate "ipporthash set support"
7657 + depends on IP_NF_SET
7658 + help
7659 + This option adds the ipporthash set type support.
7660 +
7661 + To compile it as a module, choose M here. If unsure, say N.
7662 +
7663 +config IP_NF_SET_IPTREE
7664 + tristate "iptree set support"
7665 + depends on IP_NF_SET
7666 + help
7667 + This option adds the iptree set type support.
7668 +
7669 + To compile it as a module, choose M here. If unsure, say N.
7670 +
7671 +config IP_NF_SET_IPTREEMAP
7672 + tristate "iptreemap set support"
7673 + depends on IP_NF_SET
7674 + help
7675 + This option adds the iptreemap set type support.
7676 +
7677 + To compile it as a module, choose M here. If unsure, say N.
7678 +
7679 +config IP_NF_MATCH_SET
7680 + tristate "set match support"
7681 + depends on IP_NF_SET
7682 + help
7683 + Set matching matches against given IP sets.
7684 + You need the ipset utility to create and set up the sets.
7685 +
7686 + To compile it as a module, choose M here. If unsure, say N.
7687 +
7688 +config IP_NF_TARGET_SET
7689 + tristate "SET target support"
7690 + depends on IP_NF_SET
7691 + help
7692 + The SET target makes possible to add/delete entries
7693 + in IP sets.
7694 + You need the ipset utility to create and set up the sets.
7695 +
7696 + To compile it as a module, choose M here. If unsure, say N.
7697 +
7698 +
7699 endmenu
7700
7701 Index: linux-2.6.21.7/net/ipv4/netfilter/Makefile
7702 ===================================================================
7703 --- linux-2.6.21.7.orig/net/ipv4/netfilter/Makefile
7704 +++ linux-2.6.21.7/net/ipv4/netfilter/Makefile
7705 @@ -90,6 +90,7 @@ obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_
7706 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
7707 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
7708 obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
7709 +obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
7710 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
7711 obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
7712
7713 @@ -106,6 +107,18 @@ obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LO
7714 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
7715 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
7716 obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
7717 +obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
7718 +
7719 +# sets
7720 +obj-$(CONFIG_IP_NF_SET) += ip_set.o
7721 +obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
7722 +obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
7723 +obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
7724 +obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
7725 +obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
7726 +obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
7727 +obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
7728 +obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
7729
7730 # generic ARP tables
7731 obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o