remove an unused #include statement that is causing compile issues on osx
[openwrt/svn-archive/archive.git] / target / linux / generic-2.6 / patches-2.6.23 / 130-netfilter_ipset.patch
1 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set.h
2 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set.h 1970-01-01 01:00:00.000000000 +0100
3 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set.h 2007-10-12 11:52:37.000000000 +0200
4 @@ -0,0 +1,498 @@
5 +#ifndef _IP_SET_H
6 +#define _IP_SET_H
7 +
8 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
9 + * Patrick Schaaf <bof@bof.de>
10 + * Martin Josefsson <gandalf@wlug.westbo.se>
11 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
12 + *
13 + * This program is free software; you can redistribute it and/or modify
14 + * it under the terms of the GNU General Public License version 2 as
15 + * published by the Free Software Foundation.
16 + */
17 +
18 +#if 0
19 +#define IP_SET_DEBUG
20 +#endif
21 +
22 +/*
23 + * A sockopt of such quality has hardly ever been seen before on the open
24 + * market! This little beauty, hardly ever used: above 64, so it's
25 + * traditionally used for firewalling, not touched (even once!) by the
26 + * 2.0, 2.2 and 2.4 kernels!
27 + *
28 + * Comes with its own certificate of authenticity, valid anywhere in the
29 + * Free world!
30 + *
31 + * Rusty, 19.4.2000
32 + */
33 +#define SO_IP_SET 83
34 +
35 +/*
36 + * Heavily modify by Joakim Axelsson 08.03.2002
37 + * - Made it more modulebased
38 + *
39 + * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
40 + * - bindings added
41 + * - in order to "deal with" backward compatibility, renamed to ipset
42 + */
43 +
44 +/*
45 + * Used so that the kernel module and ipset-binary can match their versions
46 + */
47 +#define IP_SET_PROTOCOL_VERSION 2
48 +
49 +#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
50 +
51 +/* Lets work with our own typedef for representing an IP address.
52 + * We hope to make the code more portable, possibly to IPv6...
53 + *
54 + * The representation works in HOST byte order, because most set types
55 + * will perform arithmetic operations and compare operations.
56 + *
57 + * For now the type is an uint32_t.
58 + *
59 + * Make sure to ONLY use the functions when translating and parsing
60 + * in order to keep the host byte order and make it more portable:
61 + * parse_ip()
62 + * parse_mask()
63 + * parse_ipandmask()
64 + * ip_tostring()
65 + * (Joakim: where are they???)
66 + */
67 +
68 +typedef uint32_t ip_set_ip_t;
69 +
70 +/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
71 + * and IP_SET_INVALID_ID if you want to increase the max number of sets.
72 + */
73 +typedef uint16_t ip_set_id_t;
74 +
75 +#define IP_SET_INVALID_ID 65535
76 +
77 +/* How deep we follow bindings */
78 +#define IP_SET_MAX_BINDINGS 6
79 +
80 +/*
81 + * Option flags for kernel operations (ipt_set_info)
82 + */
83 +#define IPSET_SRC 0x01 /* Source match/add */
84 +#define IPSET_DST 0x02 /* Destination match/add */
85 +#define IPSET_MATCH_INV 0x04 /* Inverse matching */
86 +
87 +/*
88 + * Set features
89 + */
90 +#define IPSET_TYPE_IP 0x01 /* IP address type of set */
91 +#define IPSET_TYPE_PORT 0x02 /* Port type of set */
92 +#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
93 +#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
94 +
95 +/* Reserved keywords */
96 +#define IPSET_TOKEN_DEFAULT ":default:"
97 +#define IPSET_TOKEN_ALL ":all:"
98 +
99 +/* SO_IP_SET operation constants, and their request struct types.
100 + *
101 + * Operation ids:
102 + * 0-99: commands with version checking
103 + * 100-199: add/del/test/bind/unbind
104 + * 200-299: list, save, restore
105 + */
106 +
107 +/* Single shot operations:
108 + * version, create, destroy, flush, rename and swap
109 + *
110 + * Sets are identified by name.
111 + */
112 +
113 +#define IP_SET_REQ_STD \
114 + unsigned op; \
115 + unsigned version; \
116 + char name[IP_SET_MAXNAMELEN]
117 +
118 +#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
119 +struct ip_set_req_create {
120 + IP_SET_REQ_STD;
121 + char typename[IP_SET_MAXNAMELEN];
122 +};
123 +
124 +#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
125 +struct ip_set_req_std {
126 + IP_SET_REQ_STD;
127 +};
128 +
129 +#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
130 +/* Uses ip_set_req_std */
131 +
132 +#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
133 +/* Uses ip_set_req_create */
134 +
135 +#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
136 +/* Uses ip_set_req_create */
137 +
138 +union ip_set_name_index {
139 + char name[IP_SET_MAXNAMELEN];
140 + ip_set_id_t index;
141 +};
142 +
143 +#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
144 +struct ip_set_req_get_set {
145 + unsigned op;
146 + unsigned version;
147 + union ip_set_name_index set;
148 +};
149 +
150 +#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
151 +/* Uses ip_set_req_get_set */
152 +
153 +#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
154 +struct ip_set_req_version {
155 + unsigned op;
156 + unsigned version;
157 +};
158 +
159 +/* Double shots operations:
160 + * add, del, test, bind and unbind.
161 + *
162 + * First we query the kernel to get the index and type of the target set,
163 + * then issue the command. Validity of IP is checked in kernel in order
164 + * to minimalize sockopt operations.
165 + */
166 +
167 +/* Get minimal set data for add/del/test/bind/unbind IP */
168 +#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
169 +struct ip_set_req_adt_get {
170 + unsigned op;
171 + unsigned version;
172 + union ip_set_name_index set;
173 + char typename[IP_SET_MAXNAMELEN];
174 +};
175 +
176 +#define IP_SET_REQ_BYINDEX \
177 + unsigned op; \
178 + ip_set_id_t index;
179 +
180 +struct ip_set_req_adt {
181 + IP_SET_REQ_BYINDEX;
182 +};
183 +
184 +#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
185 +/* Uses ip_set_req_adt, with type specific addage */
186 +
187 +#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
188 +/* Uses ip_set_req_adt, with type specific addage */
189 +
190 +#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
191 +/* Uses ip_set_req_adt, with type specific addage */
192 +
193 +#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
194 +/* Uses ip_set_req_bind, with type specific addage */
195 +struct ip_set_req_bind {
196 + IP_SET_REQ_BYINDEX;
197 + char binding[IP_SET_MAXNAMELEN];
198 +};
199 +
200 +#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
201 +/* Uses ip_set_req_bind, with type speficic addage
202 + * index = 0 means unbinding for all sets */
203 +
204 +#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
205 +/* Uses ip_set_req_bind, with type specific addage */
206 +
207 +/* Multiple shots operations: list, save, restore.
208 + *
209 + * - check kernel version and query the max number of sets
210 + * - get the basic information on all sets
211 + * and size required for the next step
212 + * - get actual set data: header, data, bindings
213 + */
214 +
215 +/* Get max_sets and the index of a queried set
216 + */
217 +#define IP_SET_OP_MAX_SETS 0x00000020
218 +struct ip_set_req_max_sets {
219 + unsigned op;
220 + unsigned version;
221 + ip_set_id_t max_sets; /* max_sets */
222 + ip_set_id_t sets; /* real number of sets */
223 + union ip_set_name_index set; /* index of set if name used */
224 +};
225 +
226 +/* Get the id and name of the sets plus size for next step */
227 +#define IP_SET_OP_LIST_SIZE 0x00000201
228 +#define IP_SET_OP_SAVE_SIZE 0x00000202
229 +struct ip_set_req_setnames {
230 + unsigned op;
231 + ip_set_id_t index; /* set to list/save */
232 + size_t size; /* size to get setdata/bindings */
233 + /* followed by sets number of struct ip_set_name_list */
234 +};
235 +
236 +struct ip_set_name_list {
237 + char name[IP_SET_MAXNAMELEN];
238 + char typename[IP_SET_MAXNAMELEN];
239 + ip_set_id_t index;
240 + ip_set_id_t id;
241 +};
242 +
243 +/* The actual list operation */
244 +#define IP_SET_OP_LIST 0x00000203
245 +struct ip_set_req_list {
246 + IP_SET_REQ_BYINDEX;
247 + /* sets number of struct ip_set_list in reply */
248 +};
249 +
250 +struct ip_set_list {
251 + ip_set_id_t index;
252 + ip_set_id_t binding;
253 + u_int32_t ref;
254 + size_t header_size; /* Set header data of header_size */
255 + size_t members_size; /* Set members data of members_size */
256 + size_t bindings_size; /* Set bindings data of bindings_size */
257 +};
258 +
259 +struct ip_set_hash_list {
260 + ip_set_ip_t ip;
261 + ip_set_id_t binding;
262 +};
263 +
264 +/* The save operation */
265 +#define IP_SET_OP_SAVE 0x00000204
266 +/* Uses ip_set_req_list, in the reply replaced by
267 + * sets number of struct ip_set_save plus a marker
268 + * ip_set_save followed by ip_set_hash_save structures.
269 + */
270 +struct ip_set_save {
271 + ip_set_id_t index;
272 + ip_set_id_t binding;
273 + size_t header_size; /* Set header data of header_size */
274 + size_t members_size; /* Set members data of members_size */
275 +};
276 +
277 +/* At restoring, ip == 0 means default binding for the given set: */
278 +struct ip_set_hash_save {
279 + ip_set_ip_t ip;
280 + ip_set_id_t id;
281 + ip_set_id_t binding;
282 +};
283 +
284 +/* The restore operation */
285 +#define IP_SET_OP_RESTORE 0x00000205
286 +/* Uses ip_set_req_setnames followed by ip_set_restore structures
287 + * plus a marker ip_set_restore, followed by ip_set_hash_save
288 + * structures.
289 + */
290 +struct ip_set_restore {
291 + char name[IP_SET_MAXNAMELEN];
292 + char typename[IP_SET_MAXNAMELEN];
293 + ip_set_id_t index;
294 + size_t header_size; /* Create data of header_size */
295 + size_t members_size; /* Set members data of members_size */
296 +};
297 +
298 +static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
299 +{
300 + return 4 * ((((b - a + 8) / 8) + 3) / 4);
301 +}
302 +
303 +#ifdef __KERNEL__
304 +
305 +#define ip_set_printk(format, args...) \
306 + do { \
307 + printk("%s: %s: ", __FILE__, __FUNCTION__); \
308 + printk(format "\n" , ## args); \
309 + } while (0)
310 +
311 +#if defined(IP_SET_DEBUG)
312 +#define DP(format, args...) \
313 + do { \
314 + printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
315 + printk(format "\n" , ## args); \
316 + } while (0)
317 +#define IP_SET_ASSERT(x) \
318 + do { \
319 + if (!(x)) \
320 + printk("IP_SET_ASSERT: %s:%i(%s)\n", \
321 + __FILE__, __LINE__, __FUNCTION__); \
322 + } while (0)
323 +#else
324 +#define DP(format, args...)
325 +#define IP_SET_ASSERT(x)
326 +#endif
327 +
328 +struct ip_set;
329 +
330 +/*
331 + * The ip_set_type definition - one per set type, e.g. "ipmap".
332 + *
333 + * Each individual set has a pointer, set->type, going to one
334 + * of these structures. Function pointers inside the structure implement
335 + * the real behaviour of the sets.
336 + *
337 + * If not mentioned differently, the implementation behind the function
338 + * pointers of a set_type, is expected to return 0 if ok, and a negative
339 + * errno (e.g. -EINVAL) on error.
340 + */
341 +struct ip_set_type {
342 + struct list_head list; /* next in list of set types */
343 +
344 + /* test for IP in set (kernel: iptables -m set src|dst)
345 + * return 0 if not in set, 1 if in set.
346 + */
347 + int (*testip_kernel) (struct ip_set *set,
348 + const struct sk_buff * skb,
349 + ip_set_ip_t *ip,
350 + const u_int32_t *flags,
351 + unsigned char index);
352 +
353 + /* test for IP in set (userspace: ipset -T set IP)
354 + * return 0 if not in set, 1 if in set.
355 + */
356 + int (*testip) (struct ip_set *set,
357 + const void *data, size_t size,
358 + ip_set_ip_t *ip);
359 +
360 + /*
361 + * Size of the data structure passed by when
362 + * adding/deletin/testing an entry.
363 + */
364 + size_t reqsize;
365 +
366 + /* Add IP into set (userspace: ipset -A set IP)
367 + * Return -EEXIST if the address is already in the set,
368 + * and -ERANGE if the address lies outside the set bounds.
369 + * If the address was not already in the set, 0 is returned.
370 + */
371 + int (*addip) (struct ip_set *set,
372 + const void *data, size_t size,
373 + ip_set_ip_t *ip);
374 +
375 + /* Add IP into set (kernel: iptables ... -j SET set src|dst)
376 + * Return -EEXIST if the address is already in the set,
377 + * and -ERANGE if the address lies outside the set bounds.
378 + * If the address was not already in the set, 0 is returned.
379 + */
380 + int (*addip_kernel) (struct ip_set *set,
381 + const struct sk_buff * skb,
382 + ip_set_ip_t *ip,
383 + const u_int32_t *flags,
384 + unsigned char index);
385 +
386 + /* remove IP from set (userspace: ipset -D set --entry x)
387 + * Return -EEXIST if the address is NOT in the set,
388 + * and -ERANGE if the address lies outside the set bounds.
389 + * If the address really was in the set, 0 is returned.
390 + */
391 + int (*delip) (struct ip_set *set,
392 + const void *data, size_t size,
393 + ip_set_ip_t *ip);
394 +
395 + /* remove IP from set (kernel: iptables ... -j SET --entry x)
396 + * Return -EEXIST if the address is NOT in the set,
397 + * and -ERANGE if the address lies outside the set bounds.
398 + * If the address really was in the set, 0 is returned.
399 + */
400 + int (*delip_kernel) (struct ip_set *set,
401 + const struct sk_buff * skb,
402 + ip_set_ip_t *ip,
403 + const u_int32_t *flags,
404 + unsigned char index);
405 +
406 + /* new set creation - allocated type specific items
407 + */
408 + int (*create) (struct ip_set *set,
409 + const void *data, size_t size);
410 +
411 + /* retry the operation after successfully tweaking the set
412 + */
413 + int (*retry) (struct ip_set *set);
414 +
415 + /* set destruction - free type specific items
416 + * There is no return value.
417 + * Can be called only when child sets are destroyed.
418 + */
419 + void (*destroy) (struct ip_set *set);
420 +
421 + /* set flushing - reset all bits in the set, or something similar.
422 + * There is no return value.
423 + */
424 + void (*flush) (struct ip_set *set);
425 +
426 + /* Listing: size needed for header
427 + */
428 + size_t header_size;
429 +
430 + /* Listing: Get the header
431 + *
432 + * Fill in the information in "data".
433 + * This function is always run after list_header_size() under a
434 + * writelock on the set. Therefor is the length of "data" always
435 + * correct.
436 + */
437 + void (*list_header) (const struct ip_set *set,
438 + void *data);
439 +
440 + /* Listing: Get the size for the set members
441 + */
442 + int (*list_members_size) (const struct ip_set *set);
443 +
444 + /* Listing: Get the set members
445 + *
446 + * Fill in the information in "data".
447 + * This function is always run after list_member_size() under a
448 + * writelock on the set. Therefor is the length of "data" always
449 + * correct.
450 + */
451 + void (*list_members) (const struct ip_set *set,
452 + void *data);
453 +
454 + char typename[IP_SET_MAXNAMELEN];
455 + unsigned char features;
456 + int protocol_version;
457 +
458 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
459 + struct module *me;
460 +};
461 +
462 +extern int ip_set_register_set_type(struct ip_set_type *set_type);
463 +extern void ip_set_unregister_set_type(struct ip_set_type *set_type);
464 +
465 +/* A generic ipset */
466 +struct ip_set {
467 + char name[IP_SET_MAXNAMELEN]; /* the name of the set */
468 + rwlock_t lock; /* lock for concurrency control */
469 + ip_set_id_t id; /* set id for swapping */
470 + ip_set_id_t binding; /* default binding for the set */
471 + atomic_t ref; /* in kernel and in hash references */
472 + struct ip_set_type *type; /* the set types */
473 + void *data; /* pooltype specific data */
474 +};
475 +
476 +/* Structure to bind set elements to sets */
477 +struct ip_set_hash {
478 + struct list_head list; /* list of clashing entries in hash */
479 + ip_set_ip_t ip; /* ip from set */
480 + ip_set_id_t id; /* set id */
481 + ip_set_id_t binding; /* set we bind the element to */
482 +};
483 +
484 +/* register and unregister set references */
485 +extern ip_set_id_t ip_set_get_byname(const char name[IP_SET_MAXNAMELEN]);
486 +extern ip_set_id_t ip_set_get_byindex(ip_set_id_t id);
487 +extern void ip_set_put(ip_set_id_t id);
488 +
489 +/* API for iptables set match, and SET target */
490 +extern void ip_set_addip_kernel(ip_set_id_t id,
491 + const struct sk_buff *skb,
492 + const u_int32_t *flags);
493 +extern void ip_set_delip_kernel(ip_set_id_t id,
494 + const struct sk_buff *skb,
495 + const u_int32_t *flags);
496 +extern int ip_set_testip_kernel(ip_set_id_t id,
497 + const struct sk_buff *skb,
498 + const u_int32_t *flags);
499 +
500 +#endif /* __KERNEL__ */
501 +
502 +#endif /*_IP_SET_H*/
503 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iphash.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iphash.h
504 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iphash.h 1970-01-01 01:00:00.000000000 +0100
505 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iphash.h 2007-10-12 11:52:37.000000000 +0200
506 @@ -0,0 +1,30 @@
507 +#ifndef __IP_SET_IPHASH_H
508 +#define __IP_SET_IPHASH_H
509 +
510 +#include <linux/netfilter_ipv4/ip_set.h>
511 +
512 +#define SETTYPE_NAME "iphash"
513 +#define MAX_RANGE 0x0000FFFF
514 +
515 +struct ip_set_iphash {
516 + ip_set_ip_t *members; /* the iphash proper */
517 + uint32_t elements; /* number of elements */
518 + uint32_t hashsize; /* hash size */
519 + uint16_t probes; /* max number of probes */
520 + uint16_t resize; /* resize factor in percent */
521 + ip_set_ip_t netmask; /* netmask */
522 + void *initval[0]; /* initvals for jhash_1word */
523 +};
524 +
525 +struct ip_set_req_iphash_create {
526 + uint32_t hashsize;
527 + uint16_t probes;
528 + uint16_t resize;
529 + ip_set_ip_t netmask;
530 +};
531 +
532 +struct ip_set_req_iphash {
533 + ip_set_ip_t ip;
534 +};
535 +
536 +#endif /* __IP_SET_IPHASH_H */
537 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_ipmap.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipmap.h
538 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_ipmap.h 1970-01-01 01:00:00.000000000 +0100
539 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipmap.h 2007-10-12 11:52:37.000000000 +0200
540 @@ -0,0 +1,56 @@
541 +#ifndef __IP_SET_IPMAP_H
542 +#define __IP_SET_IPMAP_H
543 +
544 +#include <linux/netfilter_ipv4/ip_set.h>
545 +
546 +#define SETTYPE_NAME "ipmap"
547 +#define MAX_RANGE 0x0000FFFF
548 +
549 +struct ip_set_ipmap {
550 + void *members; /* the ipmap proper */
551 + ip_set_ip_t first_ip; /* host byte order, included in range */
552 + ip_set_ip_t last_ip; /* host byte order, included in range */
553 + ip_set_ip_t netmask; /* subnet netmask */
554 + ip_set_ip_t sizeid; /* size of set in IPs */
555 + ip_set_ip_t hosts; /* number of hosts in a subnet */
556 +};
557 +
558 +struct ip_set_req_ipmap_create {
559 + ip_set_ip_t from;
560 + ip_set_ip_t to;
561 + ip_set_ip_t netmask;
562 +};
563 +
564 +struct ip_set_req_ipmap {
565 + ip_set_ip_t ip;
566 +};
567 +
568 +unsigned int
569 +mask_to_bits(ip_set_ip_t mask)
570 +{
571 + unsigned int bits = 32;
572 + ip_set_ip_t maskaddr;
573 +
574 + if (mask == 0xFFFFFFFF)
575 + return bits;
576 +
577 + maskaddr = 0xFFFFFFFE;
578 + while (--bits >= 0 && maskaddr != mask)
579 + maskaddr <<= 1;
580 +
581 + return bits;
582 +}
583 +
584 +ip_set_ip_t
585 +range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
586 +{
587 + ip_set_ip_t mask = 0xFFFFFFFE;
588 +
589 + *bits = 32;
590 + while (--(*bits) >= 0 && mask && (to & mask) != from)
591 + mask <<= 1;
592 +
593 + return mask;
594 +}
595 +
596 +#endif /* __IP_SET_IPMAP_H */
597 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_ipporthash.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipporthash.h
598 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_ipporthash.h 1970-01-01 01:00:00.000000000 +0100
599 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_ipporthash.h 2007-10-12 11:52:37.000000000 +0200
600 @@ -0,0 +1,34 @@
601 +#ifndef __IP_SET_IPPORTHASH_H
602 +#define __IP_SET_IPPORTHASH_H
603 +
604 +#include <linux/netfilter_ipv4/ip_set.h>
605 +
606 +#define SETTYPE_NAME "ipporthash"
607 +#define MAX_RANGE 0x0000FFFF
608 +#define INVALID_PORT (MAX_RANGE + 1)
609 +
610 +struct ip_set_ipporthash {
611 + ip_set_ip_t *members; /* the ipporthash proper */
612 + uint32_t elements; /* number of elements */
613 + uint32_t hashsize; /* hash size */
614 + uint16_t probes; /* max number of probes */
615 + uint16_t resize; /* resize factor in percent */
616 + ip_set_ip_t first_ip; /* host byte order, included in range */
617 + ip_set_ip_t last_ip; /* host byte order, included in range */
618 + void *initval[0]; /* initvals for jhash_1word */
619 +};
620 +
621 +struct ip_set_req_ipporthash_create {
622 + uint32_t hashsize;
623 + uint16_t probes;
624 + uint16_t resize;
625 + ip_set_ip_t from;
626 + ip_set_ip_t to;
627 +};
628 +
629 +struct ip_set_req_ipporthash {
630 + ip_set_ip_t ip;
631 + ip_set_ip_t port;
632 +};
633 +
634 +#endif /* __IP_SET_IPPORTHASH_H */
635 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iptree.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptree.h
636 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iptree.h 1970-01-01 01:00:00.000000000 +0100
637 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptree.h 2007-10-12 11:52:37.000000000 +0200
638 @@ -0,0 +1,40 @@
639 +#ifndef __IP_SET_IPTREE_H
640 +#define __IP_SET_IPTREE_H
641 +
642 +#include <linux/netfilter_ipv4/ip_set.h>
643 +
644 +#define SETTYPE_NAME "iptree"
645 +#define MAX_RANGE 0x0000FFFF
646 +
647 +struct ip_set_iptreed {
648 + unsigned long expires[256]; /* x.x.x.ADDR */
649 +};
650 +
651 +struct ip_set_iptreec {
652 + struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
653 +};
654 +
655 +struct ip_set_iptreeb {
656 + struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
657 +};
658 +
659 +struct ip_set_iptree {
660 + unsigned int timeout;
661 + unsigned int gc_interval;
662 +#ifdef __KERNEL__
663 + uint32_t elements; /* number of elements */
664 + struct timer_list gc;
665 + struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
666 +#endif
667 +};
668 +
669 +struct ip_set_req_iptree_create {
670 + unsigned int timeout;
671 +};
672 +
673 +struct ip_set_req_iptree {
674 + ip_set_ip_t ip;
675 + unsigned int timeout;
676 +};
677 +
678 +#endif /* __IP_SET_IPTREE_H */
679 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iptreemap.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptreemap.h
680 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_iptreemap.h 1970-01-01 01:00:00.000000000 +0100
681 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_iptreemap.h 2007-10-12 11:52:37.000000000 +0200
682 @@ -0,0 +1,40 @@
683 +#ifndef __IP_SET_IPTREEMAP_H
684 +#define __IP_SET_IPTREEMAP_H
685 +
686 +#include <linux/netfilter_ipv4/ip_set.h>
687 +
688 +#define SETTYPE_NAME "iptreemap"
689 +
690 +#ifdef __KERNEL__
691 +struct ip_set_iptreemap_d {
692 + unsigned char bitmap[32]; /* x.x.x.y */
693 +};
694 +
695 +struct ip_set_iptreemap_c {
696 + struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
697 +};
698 +
699 +struct ip_set_iptreemap_b {
700 + struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
701 + unsigned char dirty[32];
702 +};
703 +#endif
704 +
705 +struct ip_set_iptreemap {
706 + unsigned int gc_interval;
707 +#ifdef __KERNEL__
708 + struct timer_list gc;
709 + struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
710 +#endif
711 +};
712 +
713 +struct ip_set_req_iptreemap_create {
714 + unsigned int gc_interval;
715 +};
716 +
717 +struct ip_set_req_iptreemap {
718 + ip_set_ip_t start;
719 + ip_set_ip_t end;
720 +};
721 +
722 +#endif /* __IP_SET_IPTREEMAP_H */
723 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_jhash.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_jhash.h
724 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_jhash.h 1970-01-01 01:00:00.000000000 +0100
725 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_jhash.h 2007-10-12 11:52:37.000000000 +0200
726 @@ -0,0 +1,148 @@
727 +#ifndef _LINUX_IPSET_JHASH_H
728 +#define _LINUX_IPSET_JHASH_H
729 +
730 +/* This is a copy of linux/jhash.h but the types u32/u8 are changed
731 + * to __u32/__u8 so that the header file can be included into
732 + * userspace code as well. Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
733 + */
734 +
735 +/* jhash.h: Jenkins hash support.
736 + *
737 + * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
738 + *
739 + * http://burtleburtle.net/bob/hash/
740 + *
741 + * These are the credits from Bob's sources:
742 + *
743 + * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
744 + * hash(), hash2(), hash3, and mix() are externally useful functions.
745 + * Routines to test the hash are included if SELF_TEST is defined.
746 + * You can use this free for any purpose. It has no warranty.
747 + *
748 + * Copyright (C) 2003 David S. Miller (davem@redhat.com)
749 + *
750 + * I've modified Bob's hash to be useful in the Linux kernel, and
751 + * any bugs present are surely my fault. -DaveM
752 + */
753 +
754 +/* NOTE: Arguments are modified. */
755 +#define __jhash_mix(a, b, c) \
756 +{ \
757 + a -= b; a -= c; a ^= (c>>13); \
758 + b -= c; b -= a; b ^= (a<<8); \
759 + c -= a; c -= b; c ^= (b>>13); \
760 + a -= b; a -= c; a ^= (c>>12); \
761 + b -= c; b -= a; b ^= (a<<16); \
762 + c -= a; c -= b; c ^= (b>>5); \
763 + a -= b; a -= c; a ^= (c>>3); \
764 + b -= c; b -= a; b ^= (a<<10); \
765 + c -= a; c -= b; c ^= (b>>15); \
766 +}
767 +
768 +/* The golden ration: an arbitrary value */
769 +#define JHASH_GOLDEN_RATIO 0x9e3779b9
770 +
771 +/* The most generic version, hashes an arbitrary sequence
772 + * of bytes. No alignment or length assumptions are made about
773 + * the input key.
774 + */
775 +static inline __u32 jhash(void *key, __u32 length, __u32 initval)
776 +{
777 + __u32 a, b, c, len;
778 + __u8 *k = key;
779 +
780 + len = length;
781 + a = b = JHASH_GOLDEN_RATIO;
782 + c = initval;
783 +
784 + while (len >= 12) {
785 + a += (k[0] +((__u32)k[1]<<8) +((__u32)k[2]<<16) +((__u32)k[3]<<24));
786 + b += (k[4] +((__u32)k[5]<<8) +((__u32)k[6]<<16) +((__u32)k[7]<<24));
787 + c += (k[8] +((__u32)k[9]<<8) +((__u32)k[10]<<16)+((__u32)k[11]<<24));
788 +
789 + __jhash_mix(a,b,c);
790 +
791 + k += 12;
792 + len -= 12;
793 + }
794 +
795 + c += length;
796 + switch (len) {
797 + case 11: c += ((__u32)k[10]<<24);
798 + case 10: c += ((__u32)k[9]<<16);
799 + case 9 : c += ((__u32)k[8]<<8);
800 + case 8 : b += ((__u32)k[7]<<24);
801 + case 7 : b += ((__u32)k[6]<<16);
802 + case 6 : b += ((__u32)k[5]<<8);
803 + case 5 : b += k[4];
804 + case 4 : a += ((__u32)k[3]<<24);
805 + case 3 : a += ((__u32)k[2]<<16);
806 + case 2 : a += ((__u32)k[1]<<8);
807 + case 1 : a += k[0];
808 + };
809 +
810 + __jhash_mix(a,b,c);
811 +
812 + return c;
813 +}
814 +
815 +/* A special optimized version that handles 1 or more of __u32s.
816 + * The length parameter here is the number of __u32s in the key.
817 + */
818 +static inline __u32 jhash2(__u32 *k, __u32 length, __u32 initval)
819 +{
820 + __u32 a, b, c, len;
821 +
822 + a = b = JHASH_GOLDEN_RATIO;
823 + c = initval;
824 + len = length;
825 +
826 + while (len >= 3) {
827 + a += k[0];
828 + b += k[1];
829 + c += k[2];
830 + __jhash_mix(a, b, c);
831 + k += 3; len -= 3;
832 + }
833 +
834 + c += length * 4;
835 +
836 + switch (len) {
837 + case 2 : b += k[1];
838 + case 1 : a += k[0];
839 + };
840 +
841 + __jhash_mix(a,b,c);
842 +
843 + return c;
844 +}
845 +
846 +
847 +/* A special ultra-optimized versions that knows they are hashing exactly
848 + * 3, 2 or 1 word(s).
849 + *
850 + * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally
851 + * done at the end is not done here.
852 + */
853 +static inline __u32 jhash_3words(__u32 a, __u32 b, __u32 c, __u32 initval)
854 +{
855 + a += JHASH_GOLDEN_RATIO;
856 + b += JHASH_GOLDEN_RATIO;
857 + c += initval;
858 +
859 + __jhash_mix(a, b, c);
860 +
861 + return c;
862 +}
863 +
864 +static inline __u32 jhash_2words(__u32 a, __u32 b, __u32 initval)
865 +{
866 + return jhash_3words(a, b, 0, initval);
867 +}
868 +
869 +static inline __u32 jhash_1word(__u32 a, __u32 initval)
870 +{
871 + return jhash_3words(a, 0, 0, initval);
872 +}
873 +
874 +#endif /* _LINUX_IPSET_JHASH_H */
875 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_macipmap.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_macipmap.h
876 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_macipmap.h 1970-01-01 01:00:00.000000000 +0100
877 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_macipmap.h 2007-10-12 11:52:37.000000000 +0200
878 @@ -0,0 +1,38 @@
879 +#ifndef __IP_SET_MACIPMAP_H
880 +#define __IP_SET_MACIPMAP_H
881 +
882 +#include <linux/netfilter_ipv4/ip_set.h>
883 +
884 +#define SETTYPE_NAME "macipmap"
885 +#define MAX_RANGE 0x0000FFFF
886 +
887 +/* general flags */
888 +#define IPSET_MACIP_MATCHUNSET 1
889 +
890 +/* per ip flags */
891 +#define IPSET_MACIP_ISSET 1
892 +
893 +struct ip_set_macipmap {
894 + void *members; /* the macipmap proper */
895 + ip_set_ip_t first_ip; /* host byte order, included in range */
896 + ip_set_ip_t last_ip; /* host byte order, included in range */
897 + u_int32_t flags;
898 +};
899 +
900 +struct ip_set_req_macipmap_create {
901 + ip_set_ip_t from;
902 + ip_set_ip_t to;
903 + u_int32_t flags;
904 +};
905 +
906 +struct ip_set_req_macipmap {
907 + ip_set_ip_t ip;
908 + unsigned char ethernet[ETH_ALEN];
909 +};
910 +
911 +struct ip_set_macip {
912 + unsigned short flags;
913 + unsigned char ethernet[ETH_ALEN];
914 +};
915 +
916 +#endif /* __IP_SET_MACIPMAP_H */
917 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_malloc.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_malloc.h
918 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_malloc.h 1970-01-01 01:00:00.000000000 +0100
919 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_malloc.h 2007-10-12 11:52:37.000000000 +0200
920 @@ -0,0 +1,116 @@
921 +#ifndef _IP_SET_MALLOC_H
922 +#define _IP_SET_MALLOC_H
923 +
924 +#ifdef __KERNEL__
925 +
926 +/* Memory allocation and deallocation */
927 +static size_t max_malloc_size = 0;
928 +
929 +static inline void init_max_malloc_size(void)
930 +{
931 +#define CACHE(x) max_malloc_size = x;
932 +#include <linux/kmalloc_sizes.h>
933 +#undef CACHE
934 +}
935 +
936 +static inline void * ip_set_malloc(size_t bytes)
937 +{
938 + if (bytes > max_malloc_size)
939 + return vmalloc(bytes);
940 + else
941 + return kmalloc(bytes, GFP_KERNEL);
942 +}
943 +
944 +static inline void ip_set_free(void * data, size_t bytes)
945 +{
946 + if (bytes > max_malloc_size)
947 + vfree(data);
948 + else
949 + kfree(data);
950 +}
951 +
952 +struct harray {
953 + size_t max_elements;
954 + void *arrays[0];
955 +};
956 +
957 +static inline void *
958 +harray_malloc(size_t hashsize, size_t typesize, int flags)
959 +{
960 + struct harray *harray;
961 + size_t max_elements, size, i, j;
962 +
963 + if (!max_malloc_size)
964 + init_max_malloc_size();
965 +
966 + if (typesize > max_malloc_size)
967 + return NULL;
968 +
969 + max_elements = max_malloc_size/typesize;
970 + size = hashsize/max_elements;
971 + if (hashsize % max_elements)
972 + size++;
973 +
974 + /* Last pointer signals end of arrays */
975 + harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
976 + flags);
977 +
978 + if (!harray)
979 + return NULL;
980 +
981 + for (i = 0; i < size - 1; i++) {
982 + harray->arrays[i] = kmalloc(max_elements * typesize, flags);
983 + if (!harray->arrays[i])
984 + goto undo;
985 + memset(harray->arrays[i], 0, max_elements * typesize);
986 + }
987 + harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
988 + flags);
989 + if (!harray->arrays[i])
990 + goto undo;
991 + memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
992 +
993 + harray->max_elements = max_elements;
994 + harray->arrays[size] = NULL;
995 +
996 + return (void *)harray;
997 +
998 + undo:
999 + for (j = 0; j < i; j++) {
1000 + kfree(harray->arrays[j]);
1001 + }
1002 + kfree(harray);
1003 + return NULL;
1004 +}
1005 +
1006 +static inline void harray_free(void *h)
1007 +{
1008 + struct harray *harray = (struct harray *) h;
1009 + size_t i;
1010 +
1011 + for (i = 0; harray->arrays[i] != NULL; i++)
1012 + kfree(harray->arrays[i]);
1013 + kfree(harray);
1014 +}
1015 +
1016 +static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
1017 +{
1018 + struct harray *harray = (struct harray *) h;
1019 + size_t i;
1020 +
1021 + for (i = 0; harray->arrays[i+1] != NULL; i++)
1022 + memset(harray->arrays[i], 0, harray->max_elements * typesize);
1023 + memset(harray->arrays[i], 0,
1024 + (hashsize - i * harray->max_elements) * typesize);
1025 +}
1026 +
1027 +#define HARRAY_ELEM(h, type, which) \
1028 +({ \
1029 + struct harray *__h = (struct harray *)(h); \
1030 + ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
1031 + + (which)%(__h)->max_elements); \
1032 +})
1033 +
1034 +#endif /* __KERNEL__ */
1035 +
1036 +#endif /*_IP_SET_MALLOC_H*/
1037 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_nethash.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_nethash.h
1038 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_nethash.h 1970-01-01 01:00:00.000000000 +0100
1039 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_nethash.h 2007-10-12 11:52:37.000000000 +0200
1040 @@ -0,0 +1,55 @@
1041 +#ifndef __IP_SET_NETHASH_H
1042 +#define __IP_SET_NETHASH_H
1043 +
1044 +#include <linux/netfilter_ipv4/ip_set.h>
1045 +
1046 +#define SETTYPE_NAME "nethash"
1047 +#define MAX_RANGE 0x0000FFFF
1048 +
1049 +struct ip_set_nethash {
1050 + ip_set_ip_t *members; /* the nethash proper */
1051 + uint32_t elements; /* number of elements */
1052 + uint32_t hashsize; /* hash size */
1053 + uint16_t probes; /* max number of probes */
1054 + uint16_t resize; /* resize factor in percent */
1055 + unsigned char cidr[30]; /* CIDR sizes */
1056 + void *initval[0]; /* initvals for jhash_1word */
1057 +};
1058 +
1059 +struct ip_set_req_nethash_create {
1060 + uint32_t hashsize;
1061 + uint16_t probes;
1062 + uint16_t resize;
1063 +};
1064 +
1065 +struct ip_set_req_nethash {
1066 + ip_set_ip_t ip;
1067 + unsigned char cidr;
1068 +};
1069 +
1070 +static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
1071 +
1072 +static inline ip_set_ip_t
1073 +pack(ip_set_ip_t ip, unsigned char cidr)
1074 +{
1075 + ip_set_ip_t addr, *paddr = &addr;
1076 + unsigned char n, t, *a;
1077 +
1078 + addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
1079 +#ifdef __KERNEL__
1080 + DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
1081 +#endif
1082 + n = cidr / 8;
1083 + t = cidr % 8;
1084 + a = &((unsigned char *)paddr)[n];
1085 + *a = *a /(1 << (8 - t)) + shifts[t];
1086 +#ifdef __KERNEL__
1087 + DP("n: %u, t: %u, a: %u", n, t, *a);
1088 + DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
1089 + HIPQUAD(ip), cidr, NIPQUAD(addr));
1090 +#endif
1091 +
1092 + return ntohl(addr);
1093 +}
1094 +
1095 +#endif /* __IP_SET_NETHASH_H */
1096 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ip_set_portmap.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_portmap.h
1097 --- linux-2.6.23/include/linux/netfilter_ipv4/ip_set_portmap.h 1970-01-01 01:00:00.000000000 +0100
1098 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ip_set_portmap.h 2007-10-12 11:52:37.000000000 +0200
1099 @@ -0,0 +1,25 @@
1100 +#ifndef __IP_SET_PORTMAP_H
1101 +#define __IP_SET_PORTMAP_H
1102 +
1103 +#include <linux/netfilter_ipv4/ip_set.h>
1104 +
1105 +#define SETTYPE_NAME "portmap"
1106 +#define MAX_RANGE 0x0000FFFF
1107 +#define INVALID_PORT (MAX_RANGE + 1)
1108 +
1109 +struct ip_set_portmap {
1110 + void *members; /* the portmap proper */
1111 + ip_set_ip_t first_port; /* host byte order, included in range */
1112 + ip_set_ip_t last_port; /* host byte order, included in range */
1113 +};
1114 +
1115 +struct ip_set_req_portmap_create {
1116 + ip_set_ip_t from;
1117 + ip_set_ip_t to;
1118 +};
1119 +
1120 +struct ip_set_req_portmap {
1121 + ip_set_ip_t port;
1122 +};
1123 +
1124 +#endif /* __IP_SET_PORTMAP_H */
1125 diff -Nru linux-2.6.23/include/linux/netfilter_ipv4/ipt_set.h linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ipt_set.h
1126 --- linux-2.6.23/include/linux/netfilter_ipv4/ipt_set.h 1970-01-01 01:00:00.000000000 +0100
1127 +++ linux-2.6.23.pom2patch.set/include/linux/netfilter_ipv4/ipt_set.h 2007-10-12 11:52:37.000000000 +0200
1128 @@ -0,0 +1,21 @@
1129 +#ifndef _IPT_SET_H
1130 +#define _IPT_SET_H
1131 +
1132 +#include <linux/netfilter_ipv4/ip_set.h>
1133 +
1134 +struct ipt_set_info {
1135 + ip_set_id_t index;
1136 + u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
1137 +};
1138 +
1139 +/* match info */
1140 +struct ipt_set_info_match {
1141 + struct ipt_set_info match_set;
1142 +};
1143 +
1144 +struct ipt_set_info_target {
1145 + struct ipt_set_info add_set;
1146 + struct ipt_set_info del_set;
1147 +};
1148 +
1149 +#endif /*_IPT_SET_H*/
1150 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set.c
1151 --- linux-2.6.23/net/ipv4/netfilter/ip_set.c 1970-01-01 01:00:00.000000000 +0100
1152 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set.c 2007-10-12 11:52:37.000000000 +0200
1153 @@ -0,0 +1,2003 @@
1154 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
1155 + * Patrick Schaaf <bof@bof.de>
1156 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
1157 + *
1158 + * This program is free software; you can redistribute it and/or modify
1159 + * it under the terms of the GNU General Public License version 2 as
1160 + * published by the Free Software Foundation.
1161 + */
1162 +
1163 +/* Kernel module for IP set management */
1164 +
1165 +#include <linux/version.h>
1166 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
1167 +#include <linux/config.h>
1168 +#endif
1169 +#include <linux/module.h>
1170 +#include <linux/moduleparam.h>
1171 +#include <linux/kmod.h>
1172 +#include <linux/ip.h>
1173 +#include <linux/skbuff.h>
1174 +#include <linux/random.h>
1175 +#include <linux/jhash.h>
1176 +#include <linux/netfilter_ipv4/ip_tables.h>
1177 +#include <linux/errno.h>
1178 +#include <asm/uaccess.h>
1179 +#include <asm/bitops.h>
1180 +#include <asm/semaphore.h>
1181 +#include <linux/spinlock.h>
1182 +#include <linux/vmalloc.h>
1183 +
1184 +#define ASSERT_READ_LOCK(x)
1185 +#define ASSERT_WRITE_LOCK(x)
1186 +#include <linux/netfilter_ipv4/ip_set.h>
1187 +
1188 +static struct list_head set_type_list; /* all registered sets */
1189 +static struct ip_set **ip_set_list; /* all individual sets */
1190 +static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
1191 +static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
1192 +static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
1193 +static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
1194 +static struct list_head *ip_set_hash; /* hash of bindings */
1195 +static unsigned int ip_set_hash_random; /* random seed */
1196 +
1197 +/*
1198 + * Sets are identified either by the index in ip_set_list or by id.
1199 + * The id never changes and is used to find a key in the hash.
1200 + * The index may change by swapping and used at all other places
1201 + * (set/SET netfilter modules, binding value, etc.)
1202 + *
1203 + * Userspace requests are serialized by ip_set_mutex and sets can
1204 + * be deleted only from userspace. Therefore ip_set_list locking
1205 + * must obey the following rules:
1206 + *
1207 + * - kernel requests: read and write locking mandatory
1208 + * - user requests: read locking optional, write locking mandatory
1209 + */
1210 +
1211 +static inline void
1212 +__ip_set_get(ip_set_id_t index)
1213 +{
1214 + atomic_inc(&ip_set_list[index]->ref);
1215 +}
1216 +
1217 +static inline void
1218 +__ip_set_put(ip_set_id_t index)
1219 +{
1220 + atomic_dec(&ip_set_list[index]->ref);
1221 +}
1222 +
1223 +/*
1224 + * Binding routines
1225 + */
1226 +
1227 +static inline struct ip_set_hash *
1228 +__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
1229 +{
1230 + struct ip_set_hash *set_hash;
1231 +
1232 + list_for_each_entry(set_hash, &ip_set_hash[key], list)
1233 + if (set_hash->id == id && set_hash->ip == ip)
1234 + return set_hash;
1235 +
1236 + return NULL;
1237 +}
1238 +
1239 +static ip_set_id_t
1240 +ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
1241 +{
1242 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1243 + % ip_set_bindings_hash_size;
1244 + struct ip_set_hash *set_hash;
1245 +
1246 + ASSERT_READ_LOCK(&ip_set_lock);
1247 + IP_SET_ASSERT(ip_set_list[id]);
1248 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1249 +
1250 + set_hash = __ip_set_find(key, id, ip);
1251 +
1252 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1253 + HIPQUAD(ip),
1254 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1255 +
1256 + return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
1257 +}
1258 +
1259 +static inline void
1260 +__set_hash_del(struct ip_set_hash *set_hash)
1261 +{
1262 + ASSERT_WRITE_LOCK(&ip_set_lock);
1263 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1264 +
1265 + __ip_set_put(set_hash->binding);
1266 + list_del(&set_hash->list);
1267 + kfree(set_hash);
1268 +}
1269 +
1270 +static int
1271 +ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
1272 +{
1273 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1274 + % ip_set_bindings_hash_size;
1275 + struct ip_set_hash *set_hash;
1276 +
1277 + IP_SET_ASSERT(ip_set_list[id]);
1278 + DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
1279 + write_lock_bh(&ip_set_lock);
1280 + set_hash = __ip_set_find(key, id, ip);
1281 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1282 + HIPQUAD(ip),
1283 + set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
1284 +
1285 + if (set_hash != NULL)
1286 + __set_hash_del(set_hash);
1287 + write_unlock_bh(&ip_set_lock);
1288 + return 0;
1289 +}
1290 +
1291 +static int
1292 +ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
1293 +{
1294 + u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
1295 + % ip_set_bindings_hash_size;
1296 + struct ip_set_hash *set_hash;
1297 + int ret = 0;
1298 +
1299 + IP_SET_ASSERT(ip_set_list[id]);
1300 + IP_SET_ASSERT(ip_set_list[binding]);
1301 + DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
1302 + HIPQUAD(ip), ip_set_list[binding]->name);
1303 + write_lock_bh(&ip_set_lock);
1304 + set_hash = __ip_set_find(key, id, ip);
1305 + if (!set_hash) {
1306 + set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
1307 + if (!set_hash) {
1308 + ret = -ENOMEM;
1309 + goto unlock;
1310 + }
1311 + INIT_LIST_HEAD(&set_hash->list);
1312 + set_hash->id = id;
1313 + set_hash->ip = ip;
1314 + list_add(&set_hash->list, &ip_set_hash[key]);
1315 + } else {
1316 + IP_SET_ASSERT(ip_set_list[set_hash->binding]);
1317 + DP("overwrite binding: %s",
1318 + ip_set_list[set_hash->binding]->name);
1319 + __ip_set_put(set_hash->binding);
1320 + }
1321 + set_hash->binding = binding;
1322 + __ip_set_get(set_hash->binding);
1323 + DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
1324 + key, id, ip_set_list[id]->name,
1325 + HIPQUAD(ip), binding, ip_set_list[binding]->name);
1326 + unlock:
1327 + write_unlock_bh(&ip_set_lock);
1328 + return ret;
1329 +}
1330 +
1331 +#define FOREACH_HASH_DO(fn, args...) \
1332 +({ \
1333 + ip_set_id_t __key; \
1334 + struct ip_set_hash *__set_hash; \
1335 + \
1336 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1337 + list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
1338 + fn(__set_hash , ## args); \
1339 + } \
1340 +})
1341 +
1342 +#define FOREACH_HASH_RW_DO(fn, args...) \
1343 +({ \
1344 + ip_set_id_t __key; \
1345 + struct ip_set_hash *__set_hash, *__n; \
1346 + \
1347 + ASSERT_WRITE_LOCK(&ip_set_lock); \
1348 + for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
1349 + list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
1350 + fn(__set_hash , ## args); \
1351 + } \
1352 +})
1353 +
1354 +/* Add, del and test set entries from kernel */
1355 +
1356 +#define follow_bindings(index, set, ip) \
1357 +((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
1358 + || (index = (set)->binding) != IP_SET_INVALID_ID)
1359 +
1360 +int
1361 +ip_set_testip_kernel(ip_set_id_t index,
1362 + const struct sk_buff *skb,
1363 + const u_int32_t *flags)
1364 +{
1365 + struct ip_set *set;
1366 + ip_set_ip_t ip;
1367 + int res;
1368 + unsigned char i = 0;
1369 +
1370 + IP_SET_ASSERT(flags[i]);
1371 + read_lock_bh(&ip_set_lock);
1372 + do {
1373 + set = ip_set_list[index];
1374 + IP_SET_ASSERT(set);
1375 + DP("set %s, index %u", set->name, index);
1376 + read_lock_bh(&set->lock);
1377 + res = set->type->testip_kernel(set, skb, &ip, flags, i++);
1378 + read_unlock_bh(&set->lock);
1379 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1380 + } while (res > 0
1381 + && flags[i]
1382 + && follow_bindings(index, set, ip));
1383 + read_unlock_bh(&ip_set_lock);
1384 +
1385 + return res;
1386 +}
1387 +
1388 +void
1389 +ip_set_addip_kernel(ip_set_id_t index,
1390 + const struct sk_buff *skb,
1391 + const u_int32_t *flags)
1392 +{
1393 + struct ip_set *set;
1394 + ip_set_ip_t ip;
1395 + int res;
1396 + unsigned char i = 0;
1397 +
1398 + IP_SET_ASSERT(flags[i]);
1399 + retry:
1400 + read_lock_bh(&ip_set_lock);
1401 + do {
1402 + set = ip_set_list[index];
1403 + IP_SET_ASSERT(set);
1404 + DP("set %s, index %u", set->name, index);
1405 + write_lock_bh(&set->lock);
1406 + res = set->type->addip_kernel(set, skb, &ip, flags, i++);
1407 + write_unlock_bh(&set->lock);
1408 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1409 + } while ((res == 0 || res == -EEXIST)
1410 + && flags[i]
1411 + && follow_bindings(index, set, ip));
1412 + read_unlock_bh(&ip_set_lock);
1413 +
1414 + if (res == -EAGAIN
1415 + && set->type->retry
1416 + && (res = set->type->retry(set)) == 0)
1417 + goto retry;
1418 +}
1419 +
1420 +void
1421 +ip_set_delip_kernel(ip_set_id_t index,
1422 + const struct sk_buff *skb,
1423 + const u_int32_t *flags)
1424 +{
1425 + struct ip_set *set;
1426 + ip_set_ip_t ip;
1427 + int res;
1428 + unsigned char i = 0;
1429 +
1430 + IP_SET_ASSERT(flags[i]);
1431 + read_lock_bh(&ip_set_lock);
1432 + do {
1433 + set = ip_set_list[index];
1434 + IP_SET_ASSERT(set);
1435 + DP("set %s, index %u", set->name, index);
1436 + write_lock_bh(&set->lock);
1437 + res = set->type->delip_kernel(set, skb, &ip, flags, i++);
1438 + write_unlock_bh(&set->lock);
1439 + i += !!(set->type->features & IPSET_DATA_DOUBLE);
1440 + } while ((res == 0 || res == -EEXIST)
1441 + && flags[i]
1442 + && follow_bindings(index, set, ip));
1443 + read_unlock_bh(&ip_set_lock);
1444 +}
1445 +
1446 +/* Register and deregister settype */
1447 +
1448 +static inline struct ip_set_type *
1449 +find_set_type(const char *name)
1450 +{
1451 + struct ip_set_type *set_type;
1452 +
1453 + list_for_each_entry(set_type, &set_type_list, list)
1454 + if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
1455 + return set_type;
1456 + return NULL;
1457 +}
1458 +
1459 +int
1460 +ip_set_register_set_type(struct ip_set_type *set_type)
1461 +{
1462 + int ret = 0;
1463 +
1464 + if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
1465 + ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
1466 + set_type->typename,
1467 + set_type->protocol_version,
1468 + IP_SET_PROTOCOL_VERSION);
1469 + return -EINVAL;
1470 + }
1471 +
1472 + write_lock_bh(&ip_set_lock);
1473 + if (find_set_type(set_type->typename)) {
1474 + /* Duplicate! */
1475 + ip_set_printk("'%s' already registered!",
1476 + set_type->typename);
1477 + ret = -EINVAL;
1478 + goto unlock;
1479 + }
1480 + if (!try_module_get(THIS_MODULE)) {
1481 + ret = -EFAULT;
1482 + goto unlock;
1483 + }
1484 + list_add(&set_type->list, &set_type_list);
1485 + DP("'%s' registered.", set_type->typename);
1486 + unlock:
1487 + write_unlock_bh(&ip_set_lock);
1488 + return ret;
1489 +}
1490 +
1491 +void
1492 +ip_set_unregister_set_type(struct ip_set_type *set_type)
1493 +{
1494 + write_lock_bh(&ip_set_lock);
1495 + if (!find_set_type(set_type->typename)) {
1496 + ip_set_printk("'%s' not registered?",
1497 + set_type->typename);
1498 + goto unlock;
1499 + }
1500 + list_del(&set_type->list);
1501 + module_put(THIS_MODULE);
1502 + DP("'%s' unregistered.", set_type->typename);
1503 + unlock:
1504 + write_unlock_bh(&ip_set_lock);
1505 +
1506 +}
1507 +
1508 +/*
1509 + * Userspace routines
1510 + */
1511 +
1512 +/*
1513 + * Find set by name, reference it once. The reference makes sure the
1514 + * thing pointed to, does not go away under our feet. Drop the reference
1515 + * later, using ip_set_put().
1516 + */
1517 +ip_set_id_t
1518 +ip_set_get_byname(const char *name)
1519 +{
1520 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1521 +
1522 + down(&ip_set_app_mutex);
1523 + for (i = 0; i < ip_set_max; i++) {
1524 + if (ip_set_list[i] != NULL
1525 + && strcmp(ip_set_list[i]->name, name) == 0) {
1526 + __ip_set_get(i);
1527 + index = i;
1528 + break;
1529 + }
1530 + }
1531 + up(&ip_set_app_mutex);
1532 + return index;
1533 +}
1534 +
1535 +/*
1536 + * Find set by index, reference it once. The reference makes sure the
1537 + * thing pointed to, does not go away under our feet. Drop the reference
1538 + * later, using ip_set_put().
1539 + */
1540 +ip_set_id_t
1541 +ip_set_get_byindex(ip_set_id_t index)
1542 +{
1543 + down(&ip_set_app_mutex);
1544 +
1545 + if (index >= ip_set_max)
1546 + return IP_SET_INVALID_ID;
1547 +
1548 + if (ip_set_list[index])
1549 + __ip_set_get(index);
1550 + else
1551 + index = IP_SET_INVALID_ID;
1552 +
1553 + up(&ip_set_app_mutex);
1554 + return index;
1555 +}
1556 +
1557 +/*
1558 + * If the given set pointer points to a valid set, decrement
1559 + * reference count by 1. The caller shall not assume the index
1560 + * to be valid, after calling this function.
1561 + */
1562 +void ip_set_put(ip_set_id_t index)
1563 +{
1564 + down(&ip_set_app_mutex);
1565 + if (ip_set_list[index])
1566 + __ip_set_put(index);
1567 + up(&ip_set_app_mutex);
1568 +}
1569 +
1570 +/* Find a set by name or index */
1571 +static ip_set_id_t
1572 +ip_set_find_byname(const char *name)
1573 +{
1574 + ip_set_id_t i, index = IP_SET_INVALID_ID;
1575 +
1576 + for (i = 0; i < ip_set_max; i++) {
1577 + if (ip_set_list[i] != NULL
1578 + && strcmp(ip_set_list[i]->name, name) == 0) {
1579 + index = i;
1580 + break;
1581 + }
1582 + }
1583 + return index;
1584 +}
1585 +
1586 +static ip_set_id_t
1587 +ip_set_find_byindex(ip_set_id_t index)
1588 +{
1589 + if (index >= ip_set_max || ip_set_list[index] == NULL)
1590 + index = IP_SET_INVALID_ID;
1591 +
1592 + return index;
1593 +}
1594 +
1595 +/*
1596 + * Add, del, test, bind and unbind
1597 + */
1598 +
1599 +static inline int
1600 +__ip_set_testip(struct ip_set *set,
1601 + const void *data,
1602 + size_t size,
1603 + ip_set_ip_t *ip)
1604 +{
1605 + int res;
1606 +
1607 + read_lock_bh(&set->lock);
1608 + res = set->type->testip(set, data, size, ip);
1609 + read_unlock_bh(&set->lock);
1610 +
1611 + return res;
1612 +}
1613 +
1614 +static int
1615 +__ip_set_addip(ip_set_id_t index,
1616 + const void *data,
1617 + size_t size)
1618 +{
1619 + struct ip_set *set = ip_set_list[index];
1620 + ip_set_ip_t ip;
1621 + int res;
1622 +
1623 + IP_SET_ASSERT(set);
1624 + do {
1625 + write_lock_bh(&set->lock);
1626 + res = set->type->addip(set, data, size, &ip);
1627 + write_unlock_bh(&set->lock);
1628 + } while (res == -EAGAIN
1629 + && set->type->retry
1630 + && (res = set->type->retry(set)) == 0);
1631 +
1632 + return res;
1633 +}
1634 +
1635 +static int
1636 +ip_set_addip(ip_set_id_t index,
1637 + const void *data,
1638 + size_t size)
1639 +{
1640 +
1641 + return __ip_set_addip(index,
1642 + data + sizeof(struct ip_set_req_adt),
1643 + size - sizeof(struct ip_set_req_adt));
1644 +}
1645 +
1646 +static int
1647 +ip_set_delip(ip_set_id_t index,
1648 + const void *data,
1649 + size_t size)
1650 +{
1651 + struct ip_set *set = ip_set_list[index];
1652 + ip_set_ip_t ip;
1653 + int res;
1654 +
1655 + IP_SET_ASSERT(set);
1656 + write_lock_bh(&set->lock);
1657 + res = set->type->delip(set,
1658 + data + sizeof(struct ip_set_req_adt),
1659 + size - sizeof(struct ip_set_req_adt),
1660 + &ip);
1661 + write_unlock_bh(&set->lock);
1662 +
1663 + return res;
1664 +}
1665 +
1666 +static int
1667 +ip_set_testip(ip_set_id_t index,
1668 + const void *data,
1669 + size_t size)
1670 +{
1671 + struct ip_set *set = ip_set_list[index];
1672 + ip_set_ip_t ip;
1673 + int res;
1674 +
1675 + IP_SET_ASSERT(set);
1676 + res = __ip_set_testip(set,
1677 + data + sizeof(struct ip_set_req_adt),
1678 + size - sizeof(struct ip_set_req_adt),
1679 + &ip);
1680 +
1681 + return (res > 0 ? -EEXIST : res);
1682 +}
1683 +
1684 +static int
1685 +ip_set_bindip(ip_set_id_t index,
1686 + const void *data,
1687 + size_t size)
1688 +{
1689 + struct ip_set *set = ip_set_list[index];
1690 + struct ip_set_req_bind *req_bind;
1691 + ip_set_id_t binding;
1692 + ip_set_ip_t ip;
1693 + int res;
1694 +
1695 + IP_SET_ASSERT(set);
1696 + if (size < sizeof(struct ip_set_req_bind))
1697 + return -EINVAL;
1698 +
1699 + req_bind = (struct ip_set_req_bind *) data;
1700 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1701 +
1702 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1703 + /* Default binding of a set */
1704 + char *binding_name;
1705 +
1706 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1707 + return -EINVAL;
1708 +
1709 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1710 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1711 +
1712 + binding = ip_set_find_byname(binding_name);
1713 + if (binding == IP_SET_INVALID_ID)
1714 + return -ENOENT;
1715 +
1716 + write_lock_bh(&ip_set_lock);
1717 + /* Sets as binding values are referenced */
1718 + if (set->binding != IP_SET_INVALID_ID)
1719 + __ip_set_put(set->binding);
1720 + set->binding = binding;
1721 + __ip_set_get(set->binding);
1722 + write_unlock_bh(&ip_set_lock);
1723 +
1724 + return 0;
1725 + }
1726 + binding = ip_set_find_byname(req_bind->binding);
1727 + if (binding == IP_SET_INVALID_ID)
1728 + return -ENOENT;
1729 +
1730 + res = __ip_set_testip(set,
1731 + data + sizeof(struct ip_set_req_bind),
1732 + size - sizeof(struct ip_set_req_bind),
1733 + &ip);
1734 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1735 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1736 +
1737 + if (res >= 0)
1738 + res = ip_set_hash_add(set->id, ip, binding);
1739 +
1740 + return res;
1741 +}
1742 +
1743 +#define FOREACH_SET_DO(fn, args...) \
1744 +({ \
1745 + ip_set_id_t __i; \
1746 + struct ip_set *__set; \
1747 + \
1748 + for (__i = 0; __i < ip_set_max; __i++) { \
1749 + __set = ip_set_list[__i]; \
1750 + if (__set != NULL) \
1751 + fn(__set , ##args); \
1752 + } \
1753 +})
1754 +
1755 +static inline void
1756 +__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
1757 +{
1758 + if (set_hash->id == id)
1759 + __set_hash_del(set_hash);
1760 +}
1761 +
1762 +static inline void
1763 +__unbind_default(struct ip_set *set)
1764 +{
1765 + if (set->binding != IP_SET_INVALID_ID) {
1766 + /* Sets as binding values are referenced */
1767 + __ip_set_put(set->binding);
1768 + set->binding = IP_SET_INVALID_ID;
1769 + }
1770 +}
1771 +
1772 +static int
1773 +ip_set_unbindip(ip_set_id_t index,
1774 + const void *data,
1775 + size_t size)
1776 +{
1777 + struct ip_set *set;
1778 + struct ip_set_req_bind *req_bind;
1779 + ip_set_ip_t ip;
1780 + int res;
1781 +
1782 + DP("");
1783 + if (size < sizeof(struct ip_set_req_bind))
1784 + return -EINVAL;
1785 +
1786 + req_bind = (struct ip_set_req_bind *) data;
1787 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1788 +
1789 + DP("%u %s", index, req_bind->binding);
1790 + if (index == IP_SET_INVALID_ID) {
1791 + /* unbind :all: */
1792 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1793 + /* Default binding of sets */
1794 + write_lock_bh(&ip_set_lock);
1795 + FOREACH_SET_DO(__unbind_default);
1796 + write_unlock_bh(&ip_set_lock);
1797 + return 0;
1798 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1799 + /* Flush all bindings of all sets*/
1800 + write_lock_bh(&ip_set_lock);
1801 + FOREACH_HASH_RW_DO(__set_hash_del);
1802 + write_unlock_bh(&ip_set_lock);
1803 + return 0;
1804 + }
1805 + DP("unreachable reached!");
1806 + return -EINVAL;
1807 + }
1808 +
1809 + set = ip_set_list[index];
1810 + IP_SET_ASSERT(set);
1811 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1812 + /* Default binding of set */
1813 + ip_set_id_t binding = ip_set_find_byindex(set->binding);
1814 +
1815 + if (binding == IP_SET_INVALID_ID)
1816 + return -ENOENT;
1817 +
1818 + write_lock_bh(&ip_set_lock);
1819 + /* Sets in hash values are referenced */
1820 + __ip_set_put(set->binding);
1821 + set->binding = IP_SET_INVALID_ID;
1822 + write_unlock_bh(&ip_set_lock);
1823 +
1824 + return 0;
1825 + } else if (strcmp(req_bind->binding, IPSET_TOKEN_ALL) == 0) {
1826 + /* Flush all bindings */
1827 +
1828 + write_lock_bh(&ip_set_lock);
1829 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
1830 + write_unlock_bh(&ip_set_lock);
1831 + return 0;
1832 + }
1833 +
1834 + res = __ip_set_testip(set,
1835 + data + sizeof(struct ip_set_req_bind),
1836 + size - sizeof(struct ip_set_req_bind),
1837 + &ip);
1838 +
1839 + DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
1840 + if (res >= 0)
1841 + res = ip_set_hash_del(set->id, ip);
1842 +
1843 + return res;
1844 +}
1845 +
1846 +static int
1847 +ip_set_testbind(ip_set_id_t index,
1848 + const void *data,
1849 + size_t size)
1850 +{
1851 + struct ip_set *set = ip_set_list[index];
1852 + struct ip_set_req_bind *req_bind;
1853 + ip_set_id_t binding;
1854 + ip_set_ip_t ip;
1855 + int res;
1856 +
1857 + IP_SET_ASSERT(set);
1858 + if (size < sizeof(struct ip_set_req_bind))
1859 + return -EINVAL;
1860 +
1861 + req_bind = (struct ip_set_req_bind *) data;
1862 + req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
1863 +
1864 + if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
1865 + /* Default binding of set */
1866 + char *binding_name;
1867 +
1868 + if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
1869 + return -EINVAL;
1870 +
1871 + binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
1872 + binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
1873 +
1874 + binding = ip_set_find_byname(binding_name);
1875 + if (binding == IP_SET_INVALID_ID)
1876 + return -ENOENT;
1877 +
1878 + res = (set->binding == binding) ? -EEXIST : 0;
1879 +
1880 + return res;
1881 + }
1882 + binding = ip_set_find_byname(req_bind->binding);
1883 + if (binding == IP_SET_INVALID_ID)
1884 + return -ENOENT;
1885 +
1886 +
1887 + res = __ip_set_testip(set,
1888 + data + sizeof(struct ip_set_req_bind),
1889 + size - sizeof(struct ip_set_req_bind),
1890 + &ip);
1891 + DP("set %s, ip: %u.%u.%u.%u, binding %s",
1892 + set->name, HIPQUAD(ip), ip_set_list[binding]->name);
1893 +
1894 + if (res >= 0)
1895 + res = (ip_set_find_in_hash(set->id, ip) == binding)
1896 + ? -EEXIST : 0;
1897 +
1898 + return res;
1899 +}
1900 +
1901 +static struct ip_set_type *
1902 +find_set_type_rlock(const char *typename)
1903 +{
1904 + struct ip_set_type *type;
1905 +
1906 + read_lock_bh(&ip_set_lock);
1907 + type = find_set_type(typename);
1908 + if (type == NULL)
1909 + read_unlock_bh(&ip_set_lock);
1910 +
1911 + return type;
1912 +}
1913 +
1914 +static int
1915 +find_free_id(const char *name,
1916 + ip_set_id_t *index,
1917 + ip_set_id_t *id)
1918 +{
1919 + ip_set_id_t i;
1920 +
1921 + *id = IP_SET_INVALID_ID;
1922 + for (i = 0; i < ip_set_max; i++) {
1923 + if (ip_set_list[i] == NULL) {
1924 + if (*id == IP_SET_INVALID_ID)
1925 + *id = *index = i;
1926 + } else if (strcmp(name, ip_set_list[i]->name) == 0)
1927 + /* Name clash */
1928 + return -EEXIST;
1929 + }
1930 + if (*id == IP_SET_INVALID_ID)
1931 + /* No free slot remained */
1932 + return -ERANGE;
1933 + /* Check that index is usable as id (swapping) */
1934 + check:
1935 + for (i = 0; i < ip_set_max; i++) {
1936 + if (ip_set_list[i] != NULL
1937 + && ip_set_list[i]->id == *id) {
1938 + *id = i;
1939 + goto check;
1940 + }
1941 + }
1942 + return 0;
1943 +}
1944 +
1945 +/*
1946 + * Create a set
1947 + */
1948 +static int
1949 +ip_set_create(const char *name,
1950 + const char *typename,
1951 + ip_set_id_t restore,
1952 + const void *data,
1953 + size_t size)
1954 +{
1955 + struct ip_set *set;
1956 + ip_set_id_t index = 0, id;
1957 + int res = 0;
1958 +
1959 + DP("setname: %s, typename: %s, id: %u", name, typename, restore);
1960 + /*
1961 + * First, and without any locks, allocate and initialize
1962 + * a normal base set structure.
1963 + */
1964 + set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
1965 + if (!set)
1966 + return -ENOMEM;
1967 + set->lock = RW_LOCK_UNLOCKED;
1968 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
1969 + set->binding = IP_SET_INVALID_ID;
1970 + atomic_set(&set->ref, 0);
1971 +
1972 + /*
1973 + * Next, take the &ip_set_lock, check that we know the type,
1974 + * and take a reference on the type, to make sure it
1975 + * stays available while constructing our new set.
1976 + *
1977 + * After referencing the type, we drop the &ip_set_lock,
1978 + * and let the new set construction run without locks.
1979 + */
1980 + set->type = find_set_type_rlock(typename);
1981 + if (set->type == NULL) {
1982 + /* Try loading the module */
1983 + char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
1984 + strcpy(modulename, "ip_set_");
1985 + strcat(modulename, typename);
1986 + DP("try to load %s", modulename);
1987 + request_module(modulename);
1988 + set->type = find_set_type_rlock(typename);
1989 + }
1990 + if (set->type == NULL) {
1991 + ip_set_printk("no set type '%s', set '%s' not created",
1992 + typename, name);
1993 + res = -ENOENT;
1994 + goto out;
1995 + }
1996 + if (!try_module_get(set->type->me)) {
1997 + read_unlock_bh(&ip_set_lock);
1998 + res = -EFAULT;
1999 + goto out;
2000 + }
2001 + read_unlock_bh(&ip_set_lock);
2002 +
2003 + /*
2004 + * Without holding any locks, create private part.
2005 + */
2006 + res = set->type->create(set, data, size);
2007 + if (res != 0)
2008 + goto put_out;
2009 +
2010 + /* BTW, res==0 here. */
2011 +
2012 + /*
2013 + * Here, we have a valid, constructed set. &ip_set_lock again,
2014 + * find free id/index and check that it is not already in
2015 + * ip_set_list.
2016 + */
2017 + write_lock_bh(&ip_set_lock);
2018 + if ((res = find_free_id(set->name, &index, &id)) != 0) {
2019 + DP("no free id!");
2020 + goto cleanup;
2021 + }
2022 +
2023 + /* Make sure restore gets the same index */
2024 + if (restore != IP_SET_INVALID_ID && index != restore) {
2025 + DP("Can't restore, sets are screwed up");
2026 + res = -ERANGE;
2027 + goto cleanup;
2028 + }
2029 +
2030 + /*
2031 + * Finally! Add our shiny new set to the list, and be done.
2032 + */
2033 + DP("create: '%s' created with index %u, id %u!", set->name, index, id);
2034 + set->id = id;
2035 + ip_set_list[index] = set;
2036 + write_unlock_bh(&ip_set_lock);
2037 + return res;
2038 +
2039 + cleanup:
2040 + write_unlock_bh(&ip_set_lock);
2041 + set->type->destroy(set);
2042 + put_out:
2043 + module_put(set->type->me);
2044 + out:
2045 + kfree(set);
2046 + return res;
2047 +}
2048 +
2049 +/*
2050 + * Destroy a given existing set
2051 + */
2052 +static void
2053 +ip_set_destroy_set(ip_set_id_t index)
2054 +{
2055 + struct ip_set *set = ip_set_list[index];
2056 +
2057 + IP_SET_ASSERT(set);
2058 + DP("set: %s", set->name);
2059 + write_lock_bh(&ip_set_lock);
2060 + FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
2061 + if (set->binding != IP_SET_INVALID_ID)
2062 + __ip_set_put(set->binding);
2063 + ip_set_list[index] = NULL;
2064 + write_unlock_bh(&ip_set_lock);
2065 +
2066 + /* Must call it without holding any lock */
2067 + set->type->destroy(set);
2068 + module_put(set->type->me);
2069 + kfree(set);
2070 +}
2071 +
2072 +/*
2073 + * Destroy a set - or all sets
2074 + * Sets must not be referenced/used.
2075 + */
2076 +static int
2077 +ip_set_destroy(ip_set_id_t index)
2078 +{
2079 + ip_set_id_t i;
2080 +
2081 + /* ref modification always protected by the mutex */
2082 + if (index != IP_SET_INVALID_ID) {
2083 + if (atomic_read(&ip_set_list[index]->ref))
2084 + return -EBUSY;
2085 + ip_set_destroy_set(index);
2086 + } else {
2087 + for (i = 0; i < ip_set_max; i++) {
2088 + if (ip_set_list[i] != NULL
2089 + && (atomic_read(&ip_set_list[i]->ref)))
2090 + return -EBUSY;
2091 + }
2092 +
2093 + for (i = 0; i < ip_set_max; i++) {
2094 + if (ip_set_list[i] != NULL)
2095 + ip_set_destroy_set(i);
2096 + }
2097 + }
2098 + return 0;
2099 +}
2100 +
2101 +static void
2102 +ip_set_flush_set(struct ip_set *set)
2103 +{
2104 + DP("set: %s %u", set->name, set->id);
2105 +
2106 + write_lock_bh(&set->lock);
2107 + set->type->flush(set);
2108 + write_unlock_bh(&set->lock);
2109 +}
2110 +
2111 +/*
2112 + * Flush data in a set - or in all sets
2113 + */
2114 +static int
2115 +ip_set_flush(ip_set_id_t index)
2116 +{
2117 + if (index != IP_SET_INVALID_ID) {
2118 + IP_SET_ASSERT(ip_set_list[index]);
2119 + ip_set_flush_set(ip_set_list[index]);
2120 + } else
2121 + FOREACH_SET_DO(ip_set_flush_set);
2122 +
2123 + return 0;
2124 +}
2125 +
2126 +/* Rename a set */
2127 +static int
2128 +ip_set_rename(ip_set_id_t index, const char *name)
2129 +{
2130 + struct ip_set *set = ip_set_list[index];
2131 + ip_set_id_t i;
2132 + int res = 0;
2133 +
2134 + DP("set: %s to %s", set->name, name);
2135 + write_lock_bh(&ip_set_lock);
2136 + for (i = 0; i < ip_set_max; i++) {
2137 + if (ip_set_list[i] != NULL
2138 + && strncmp(ip_set_list[i]->name,
2139 + name,
2140 + IP_SET_MAXNAMELEN - 1) == 0) {
2141 + res = -EEXIST;
2142 + goto unlock;
2143 + }
2144 + }
2145 + strncpy(set->name, name, IP_SET_MAXNAMELEN);
2146 + unlock:
2147 + write_unlock_bh(&ip_set_lock);
2148 + return res;
2149 +}
2150 +
2151 +/*
2152 + * Swap two sets so that name/index points to the other.
2153 + * References are also swapped.
2154 + */
2155 +static int
2156 +ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
2157 +{
2158 + struct ip_set *from = ip_set_list[from_index];
2159 + struct ip_set *to = ip_set_list[to_index];
2160 + char from_name[IP_SET_MAXNAMELEN];
2161 + u_int32_t from_ref;
2162 +
2163 + DP("set: %s to %s", from->name, to->name);
2164 + /* Features must not change. Artifical restriction. */
2165 + if (from->type->features != to->type->features)
2166 + return -ENOEXEC;
2167 +
2168 + /* No magic here: ref munging protected by the mutex */
2169 + write_lock_bh(&ip_set_lock);
2170 + strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
2171 + from_ref = atomic_read(&from->ref);
2172 +
2173 + strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
2174 + atomic_set(&from->ref, atomic_read(&to->ref));
2175 + strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
2176 + atomic_set(&to->ref, from_ref);
2177 +
2178 + ip_set_list[from_index] = to;
2179 + ip_set_list[to_index] = from;
2180 +
2181 + write_unlock_bh(&ip_set_lock);
2182 + return 0;
2183 +}
2184 +
2185 +/*
2186 + * List set data
2187 + */
2188 +
2189 +static inline void
2190 +__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
2191 + ip_set_id_t id, size_t *size)
2192 +{
2193 + if (set_hash->id == id)
2194 + *size += sizeof(struct ip_set_hash_list);
2195 +}
2196 +
2197 +static inline void
2198 +__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
2199 + ip_set_id_t id, size_t *size)
2200 +{
2201 + if (set_hash->id == id)
2202 + *size += sizeof(struct ip_set_hash_save);
2203 +}
2204 +
2205 +static inline void
2206 +__set_hash_bindings(struct ip_set_hash *set_hash,
2207 + ip_set_id_t id, void *data, int *used)
2208 +{
2209 + if (set_hash->id == id) {
2210 + struct ip_set_hash_list *hash_list =
2211 + (struct ip_set_hash_list *)(data + *used);
2212 +
2213 + hash_list->ip = set_hash->ip;
2214 + hash_list->binding = set_hash->binding;
2215 + *used += sizeof(struct ip_set_hash_list);
2216 + }
2217 +}
2218 +
2219 +static int ip_set_list_set(ip_set_id_t index,
2220 + void *data,
2221 + int *used,
2222 + int len)
2223 +{
2224 + struct ip_set *set = ip_set_list[index];
2225 + struct ip_set_list *set_list;
2226 +
2227 + /* Pointer to our header */
2228 + set_list = (struct ip_set_list *) (data + *used);
2229 +
2230 + DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
2231 +
2232 + /* Get and ensure header size */
2233 + if (*used + sizeof(struct ip_set_list) > len)
2234 + goto not_enough_mem;
2235 + *used += sizeof(struct ip_set_list);
2236 +
2237 + read_lock_bh(&set->lock);
2238 + /* Get and ensure set specific header size */
2239 + set_list->header_size = set->type->header_size;
2240 + if (*used + set_list->header_size > len)
2241 + goto unlock_set;
2242 +
2243 + /* Fill in the header */
2244 + set_list->index = index;
2245 + set_list->binding = set->binding;
2246 + set_list->ref = atomic_read(&set->ref);
2247 +
2248 + /* Fill in set spefific header data */
2249 + set->type->list_header(set, data + *used);
2250 + *used += set_list->header_size;
2251 +
2252 + /* Get and ensure set specific members size */
2253 + set_list->members_size = set->type->list_members_size(set);
2254 + if (*used + set_list->members_size > len)
2255 + goto unlock_set;
2256 +
2257 + /* Fill in set spefific members data */
2258 + set->type->list_members(set, data + *used);
2259 + *used += set_list->members_size;
2260 + read_unlock_bh(&set->lock);
2261 +
2262 + /* Bindings */
2263 +
2264 + /* Get and ensure set specific bindings size */
2265 + set_list->bindings_size = 0;
2266 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2267 + set->id, &set_list->bindings_size);
2268 + if (*used + set_list->bindings_size > len)
2269 + goto not_enough_mem;
2270 +
2271 + /* Fill in set spefific bindings data */
2272 + FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
2273 +
2274 + return 0;
2275 +
2276 + unlock_set:
2277 + read_unlock_bh(&set->lock);
2278 + not_enough_mem:
2279 + DP("not enough mem, try again");
2280 + return -EAGAIN;
2281 +}
2282 +
2283 +/*
2284 + * Save sets
2285 + */
2286 +static int ip_set_save_set(ip_set_id_t index,
2287 + void *data,
2288 + int *used,
2289 + int len)
2290 +{
2291 + struct ip_set *set;
2292 + struct ip_set_save *set_save;
2293 +
2294 + /* Pointer to our header */
2295 + set_save = (struct ip_set_save *) (data + *used);
2296 +
2297 + /* Get and ensure header size */
2298 + if (*used + sizeof(struct ip_set_save) > len)
2299 + goto not_enough_mem;
2300 + *used += sizeof(struct ip_set_save);
2301 +
2302 + set = ip_set_list[index];
2303 + DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
2304 + data, data + *used);
2305 +
2306 + read_lock_bh(&set->lock);
2307 + /* Get and ensure set specific header size */
2308 + set_save->header_size = set->type->header_size;
2309 + if (*used + set_save->header_size > len)
2310 + goto unlock_set;
2311 +
2312 + /* Fill in the header */
2313 + set_save->index = index;
2314 + set_save->binding = set->binding;
2315 +
2316 + /* Fill in set spefific header data */
2317 + set->type->list_header(set, data + *used);
2318 + *used += set_save->header_size;
2319 +
2320 + DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
2321 + set_save->header_size, data, data + *used);
2322 + /* Get and ensure set specific members size */
2323 + set_save->members_size = set->type->list_members_size(set);
2324 + if (*used + set_save->members_size > len)
2325 + goto unlock_set;
2326 +
2327 + /* Fill in set spefific members data */
2328 + set->type->list_members(set, data + *used);
2329 + *used += set_save->members_size;
2330 + read_unlock_bh(&set->lock);
2331 + DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
2332 + set_save->members_size, data, data + *used);
2333 + return 0;
2334 +
2335 + unlock_set:
2336 + read_unlock_bh(&set->lock);
2337 + not_enough_mem:
2338 + DP("not enough mem, try again");
2339 + return -EAGAIN;
2340 +}
2341 +
2342 +static inline void
2343 +__set_hash_save_bindings(struct ip_set_hash *set_hash,
2344 + ip_set_id_t id,
2345 + void *data,
2346 + int *used,
2347 + int len,
2348 + int *res)
2349 +{
2350 + if (*res == 0
2351 + && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
2352 + struct ip_set_hash_save *hash_save =
2353 + (struct ip_set_hash_save *)(data + *used);
2354 + /* Ensure bindings size */
2355 + if (*used + sizeof(struct ip_set_hash_save) > len) {
2356 + *res = -ENOMEM;
2357 + return;
2358 + }
2359 + hash_save->id = set_hash->id;
2360 + hash_save->ip = set_hash->ip;
2361 + hash_save->binding = set_hash->binding;
2362 + *used += sizeof(struct ip_set_hash_save);
2363 + }
2364 +}
2365 +
2366 +static int ip_set_save_bindings(ip_set_id_t index,
2367 + void *data,
2368 + int *used,
2369 + int len)
2370 +{
2371 + int res = 0;
2372 + struct ip_set_save *set_save;
2373 +
2374 + DP("used %u, len %u", *used, len);
2375 + /* Get and ensure header size */
2376 + if (*used + sizeof(struct ip_set_save) > len)
2377 + return -ENOMEM;
2378 +
2379 + /* Marker */
2380 + set_save = (struct ip_set_save *) (data + *used);
2381 + set_save->index = IP_SET_INVALID_ID;
2382 + set_save->header_size = 0;
2383 + set_save->members_size = 0;
2384 + *used += sizeof(struct ip_set_save);
2385 +
2386 + DP("marker added used %u, len %u", *used, len);
2387 + /* Fill in bindings data */
2388 + if (index != IP_SET_INVALID_ID)
2389 + /* Sets are identified by id in hash */
2390 + index = ip_set_list[index]->id;
2391 + FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
2392 +
2393 + return res;
2394 +}
2395 +
2396 +/*
2397 + * Restore sets
2398 + */
2399 +static int ip_set_restore(void *data,
2400 + int len)
2401 +{
2402 + int res = 0;
2403 + int line = 0, used = 0, members_size;
2404 + struct ip_set *set;
2405 + struct ip_set_hash_save *hash_save;
2406 + struct ip_set_restore *set_restore;
2407 + ip_set_id_t index;
2408 +
2409 + /* Loop to restore sets */
2410 + while (1) {
2411 + line++;
2412 +
2413 + DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
2414 + /* Get and ensure header size */
2415 + if (used + sizeof(struct ip_set_restore) > len)
2416 + return line;
2417 + set_restore = (struct ip_set_restore *) (data + used);
2418 + used += sizeof(struct ip_set_restore);
2419 +
2420 + /* Ensure data size */
2421 + if (used
2422 + + set_restore->header_size
2423 + + set_restore->members_size > len)
2424 + return line;
2425 +
2426 + /* Check marker */
2427 + if (set_restore->index == IP_SET_INVALID_ID) {
2428 + line--;
2429 + goto bindings;
2430 + }
2431 +
2432 + /* Try to create the set */
2433 + DP("restore %s %s", set_restore->name, set_restore->typename);
2434 + res = ip_set_create(set_restore->name,
2435 + set_restore->typename,
2436 + set_restore->index,
2437 + data + used,
2438 + set_restore->header_size);
2439 +
2440 + if (res != 0)
2441 + return line;
2442 + used += set_restore->header_size;
2443 +
2444 + index = ip_set_find_byindex(set_restore->index);
2445 + DP("index %u, restore_index %u", index, set_restore->index);
2446 + if (index != set_restore->index)
2447 + return line;
2448 + /* Try to restore members data */
2449 + set = ip_set_list[index];
2450 + members_size = 0;
2451 + DP("members_size %u reqsize %u",
2452 + set_restore->members_size, set->type->reqsize);
2453 + while (members_size + set->type->reqsize <=
2454 + set_restore->members_size) {
2455 + line++;
2456 + DP("members: %u, line %u", members_size, line);
2457 + res = __ip_set_addip(index,
2458 + data + used + members_size,
2459 + set->type->reqsize);
2460 + if (!(res == 0 || res == -EEXIST))
2461 + return line;
2462 + members_size += set->type->reqsize;
2463 + }
2464 +
2465 + DP("members_size %u %u",
2466 + set_restore->members_size, members_size);
2467 + if (members_size != set_restore->members_size)
2468 + return line++;
2469 + used += set_restore->members_size;
2470 + }
2471 +
2472 + bindings:
2473 + /* Loop to restore bindings */
2474 + while (used < len) {
2475 + line++;
2476 +
2477 + DP("restore binding, line %u", line);
2478 + /* Get and ensure size */
2479 + if (used + sizeof(struct ip_set_hash_save) > len)
2480 + return line;
2481 + hash_save = (struct ip_set_hash_save *) (data + used);
2482 + used += sizeof(struct ip_set_hash_save);
2483 +
2484 + /* hash_save->id is used to store the index */
2485 + index = ip_set_find_byindex(hash_save->id);
2486 + DP("restore binding index %u, id %u, %u -> %u",
2487 + index, hash_save->id, hash_save->ip, hash_save->binding);
2488 + if (index != hash_save->id)
2489 + return line;
2490 + if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
2491 + DP("corrupt binding set index %u", hash_save->binding);
2492 + return line;
2493 + }
2494 + set = ip_set_list[hash_save->id];
2495 + /* Null valued IP means default binding */
2496 + if (hash_save->ip)
2497 + res = ip_set_hash_add(set->id,
2498 + hash_save->ip,
2499 + hash_save->binding);
2500 + else {
2501 + IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
2502 + write_lock_bh(&ip_set_lock);
2503 + set->binding = hash_save->binding;
2504 + __ip_set_get(set->binding);
2505 + write_unlock_bh(&ip_set_lock);
2506 + DP("default binding: %u", set->binding);
2507 + }
2508 + if (res != 0)
2509 + return line;
2510 + }
2511 + if (used != len)
2512 + return line;
2513 +
2514 + return 0;
2515 +}
2516 +
2517 +static int
2518 +ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
2519 +{
2520 + void *data;
2521 + int res = 0; /* Assume OK */
2522 + unsigned *op;
2523 + struct ip_set_req_adt *req_adt;
2524 + ip_set_id_t index = IP_SET_INVALID_ID;
2525 + int (*adtfn)(ip_set_id_t index,
2526 + const void *data, size_t size);
2527 + struct fn_table {
2528 + int (*fn)(ip_set_id_t index,
2529 + const void *data, size_t size);
2530 + } adtfn_table[] =
2531 + { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
2532 + { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
2533 + };
2534 +
2535 + DP("optval=%d, user=%p, len=%d", optval, user, len);
2536 + if (!capable(CAP_NET_ADMIN))
2537 + return -EPERM;
2538 + if (optval != SO_IP_SET)
2539 + return -EBADF;
2540 + if (len <= sizeof(unsigned)) {
2541 + ip_set_printk("short userdata (want >%zu, got %u)",
2542 + sizeof(unsigned), len);
2543 + return -EINVAL;
2544 + }
2545 + data = vmalloc(len);
2546 + if (!data) {
2547 + DP("out of mem for %u bytes", len);
2548 + return -ENOMEM;
2549 + }
2550 + if (copy_from_user(data, user, len) != 0) {
2551 + res = -EFAULT;
2552 + goto done;
2553 + }
2554 + if (down_interruptible(&ip_set_app_mutex)) {
2555 + res = -EINTR;
2556 + goto done;
2557 + }
2558 +
2559 + op = (unsigned *)data;
2560 + DP("op=%x", *op);
2561 +
2562 + if (*op < IP_SET_OP_VERSION) {
2563 + /* Check the version at the beginning of operations */
2564 + struct ip_set_req_version *req_version =
2565 + (struct ip_set_req_version *) data;
2566 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2567 + res = -EPROTO;
2568 + goto done;
2569 + }
2570 + }
2571 +
2572 + switch (*op) {
2573 + case IP_SET_OP_CREATE:{
2574 + struct ip_set_req_create *req_create
2575 + = (struct ip_set_req_create *) data;
2576 +
2577 + if (len < sizeof(struct ip_set_req_create)) {
2578 + ip_set_printk("short CREATE data (want >=%zu, got %u)",
2579 + sizeof(struct ip_set_req_create), len);
2580 + res = -EINVAL;
2581 + goto done;
2582 + }
2583 + req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
2584 + req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2585 + res = ip_set_create(req_create->name,
2586 + req_create->typename,
2587 + IP_SET_INVALID_ID,
2588 + data + sizeof(struct ip_set_req_create),
2589 + len - sizeof(struct ip_set_req_create));
2590 + goto done;
2591 + }
2592 + case IP_SET_OP_DESTROY:{
2593 + struct ip_set_req_std *req_destroy
2594 + = (struct ip_set_req_std *) data;
2595 +
2596 + if (len != sizeof(struct ip_set_req_std)) {
2597 + ip_set_printk("invalid DESTROY data (want %zu, got %u)",
2598 + sizeof(struct ip_set_req_std), len);
2599 + res = -EINVAL;
2600 + goto done;
2601 + }
2602 + if (strcmp(req_destroy->name, IPSET_TOKEN_ALL) == 0) {
2603 + /* Destroy all sets */
2604 + index = IP_SET_INVALID_ID;
2605 + } else {
2606 + req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
2607 + index = ip_set_find_byname(req_destroy->name);
2608 +
2609 + if (index == IP_SET_INVALID_ID) {
2610 + res = -ENOENT;
2611 + goto done;
2612 + }
2613 + }
2614 +
2615 + res = ip_set_destroy(index);
2616 + goto done;
2617 + }
2618 + case IP_SET_OP_FLUSH:{
2619 + struct ip_set_req_std *req_flush =
2620 + (struct ip_set_req_std *) data;
2621 +
2622 + if (len != sizeof(struct ip_set_req_std)) {
2623 + ip_set_printk("invalid FLUSH data (want %zu, got %u)",
2624 + sizeof(struct ip_set_req_std), len);
2625 + res = -EINVAL;
2626 + goto done;
2627 + }
2628 + if (strcmp(req_flush->name, IPSET_TOKEN_ALL) == 0) {
2629 + /* Flush all sets */
2630 + index = IP_SET_INVALID_ID;
2631 + } else {
2632 + req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
2633 + index = ip_set_find_byname(req_flush->name);
2634 +
2635 + if (index == IP_SET_INVALID_ID) {
2636 + res = -ENOENT;
2637 + goto done;
2638 + }
2639 + }
2640 + res = ip_set_flush(index);
2641 + goto done;
2642 + }
2643 + case IP_SET_OP_RENAME:{
2644 + struct ip_set_req_create *req_rename
2645 + = (struct ip_set_req_create *) data;
2646 +
2647 + if (len != sizeof(struct ip_set_req_create)) {
2648 + ip_set_printk("invalid RENAME data (want %zu, got %u)",
2649 + sizeof(struct ip_set_req_create), len);
2650 + res = -EINVAL;
2651 + goto done;
2652 + }
2653 +
2654 + req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
2655 + req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2656 +
2657 + index = ip_set_find_byname(req_rename->name);
2658 + if (index == IP_SET_INVALID_ID) {
2659 + res = -ENOENT;
2660 + goto done;
2661 + }
2662 + res = ip_set_rename(index, req_rename->typename);
2663 + goto done;
2664 + }
2665 + case IP_SET_OP_SWAP:{
2666 + struct ip_set_req_create *req_swap
2667 + = (struct ip_set_req_create *) data;
2668 + ip_set_id_t to_index;
2669 +
2670 + if (len != sizeof(struct ip_set_req_create)) {
2671 + ip_set_printk("invalid SWAP data (want %zu, got %u)",
2672 + sizeof(struct ip_set_req_create), len);
2673 + res = -EINVAL;
2674 + goto done;
2675 + }
2676 +
2677 + req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
2678 + req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
2679 +
2680 + index = ip_set_find_byname(req_swap->name);
2681 + if (index == IP_SET_INVALID_ID) {
2682 + res = -ENOENT;
2683 + goto done;
2684 + }
2685 + to_index = ip_set_find_byname(req_swap->typename);
2686 + if (to_index == IP_SET_INVALID_ID) {
2687 + res = -ENOENT;
2688 + goto done;
2689 + }
2690 + res = ip_set_swap(index, to_index);
2691 + goto done;
2692 + }
2693 + default:
2694 + break; /* Set identified by id */
2695 + }
2696 +
2697 + /* There we may have add/del/test/bind/unbind/test_bind operations */
2698 + if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
2699 + res = -EBADMSG;
2700 + goto done;
2701 + }
2702 + adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
2703 +
2704 + if (len < sizeof(struct ip_set_req_adt)) {
2705 + ip_set_printk("short data in adt request (want >=%zu, got %u)",
2706 + sizeof(struct ip_set_req_adt), len);
2707 + res = -EINVAL;
2708 + goto done;
2709 + }
2710 + req_adt = (struct ip_set_req_adt *) data;
2711 +
2712 + /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
2713 + if (!(*op == IP_SET_OP_UNBIND_SET
2714 + && req_adt->index == IP_SET_INVALID_ID)) {
2715 + index = ip_set_find_byindex(req_adt->index);
2716 + if (index == IP_SET_INVALID_ID) {
2717 + res = -ENOENT;
2718 + goto done;
2719 + }
2720 + }
2721 + res = adtfn(index, data, len);
2722 +
2723 + done:
2724 + up(&ip_set_app_mutex);
2725 + vfree(data);
2726 + if (res > 0)
2727 + res = 0;
2728 + DP("final result %d", res);
2729 + return res;
2730 +}
2731 +
2732 +static int
2733 +ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
2734 +{
2735 + int res = 0;
2736 + unsigned *op;
2737 + ip_set_id_t index = IP_SET_INVALID_ID;
2738 + void *data;
2739 + int copylen = *len;
2740 +
2741 + DP("optval=%d, user=%p, len=%d", optval, user, *len);
2742 + if (!capable(CAP_NET_ADMIN))
2743 + return -EPERM;
2744 + if (optval != SO_IP_SET)
2745 + return -EBADF;
2746 + if (*len < sizeof(unsigned)) {
2747 + ip_set_printk("short userdata (want >=%zu, got %d)",
2748 + sizeof(unsigned), *len);
2749 + return -EINVAL;
2750 + }
2751 + data = vmalloc(*len);
2752 + if (!data) {
2753 + DP("out of mem for %d bytes", *len);
2754 + return -ENOMEM;
2755 + }
2756 + if (copy_from_user(data, user, *len) != 0) {
2757 + res = -EFAULT;
2758 + goto done;
2759 + }
2760 + if (down_interruptible(&ip_set_app_mutex)) {
2761 + res = -EINTR;
2762 + goto done;
2763 + }
2764 +
2765 + op = (unsigned *) data;
2766 + DP("op=%x", *op);
2767 +
2768 + if (*op < IP_SET_OP_VERSION) {
2769 + /* Check the version at the beginning of operations */
2770 + struct ip_set_req_version *req_version =
2771 + (struct ip_set_req_version *) data;
2772 + if (req_version->version != IP_SET_PROTOCOL_VERSION) {
2773 + res = -EPROTO;
2774 + goto done;
2775 + }
2776 + }
2777 +
2778 + switch (*op) {
2779 + case IP_SET_OP_VERSION: {
2780 + struct ip_set_req_version *req_version =
2781 + (struct ip_set_req_version *) data;
2782 +
2783 + if (*len != sizeof(struct ip_set_req_version)) {
2784 + ip_set_printk("invalid VERSION (want %zu, got %d)",
2785 + sizeof(struct ip_set_req_version),
2786 + *len);
2787 + res = -EINVAL;
2788 + goto done;
2789 + }
2790 +
2791 + req_version->version = IP_SET_PROTOCOL_VERSION;
2792 + res = copy_to_user(user, req_version,
2793 + sizeof(struct ip_set_req_version));
2794 + goto done;
2795 + }
2796 + case IP_SET_OP_GET_BYNAME: {
2797 + struct ip_set_req_get_set *req_get
2798 + = (struct ip_set_req_get_set *) data;
2799 +
2800 + if (*len != sizeof(struct ip_set_req_get_set)) {
2801 + ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
2802 + sizeof(struct ip_set_req_get_set), *len);
2803 + res = -EINVAL;
2804 + goto done;
2805 + }
2806 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2807 + index = ip_set_find_byname(req_get->set.name);
2808 + req_get->set.index = index;
2809 + goto copy;
2810 + }
2811 + case IP_SET_OP_GET_BYINDEX: {
2812 + struct ip_set_req_get_set *req_get
2813 + = (struct ip_set_req_get_set *) data;
2814 +
2815 + if (*len != sizeof(struct ip_set_req_get_set)) {
2816 + ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
2817 + sizeof(struct ip_set_req_get_set), *len);
2818 + res = -EINVAL;
2819 + goto done;
2820 + }
2821 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2822 + index = ip_set_find_byindex(req_get->set.index);
2823 + strncpy(req_get->set.name,
2824 + index == IP_SET_INVALID_ID ? ""
2825 + : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
2826 + goto copy;
2827 + }
2828 + case IP_SET_OP_ADT_GET: {
2829 + struct ip_set_req_adt_get *req_get
2830 + = (struct ip_set_req_adt_get *) data;
2831 +
2832 + if (*len != sizeof(struct ip_set_req_adt_get)) {
2833 + ip_set_printk("invalid ADT_GET (want %zu, got %d)",
2834 + sizeof(struct ip_set_req_adt_get), *len);
2835 + res = -EINVAL;
2836 + goto done;
2837 + }
2838 + req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2839 + index = ip_set_find_byname(req_get->set.name);
2840 + if (index != IP_SET_INVALID_ID) {
2841 + req_get->set.index = index;
2842 + strncpy(req_get->typename,
2843 + ip_set_list[index]->type->typename,
2844 + IP_SET_MAXNAMELEN - 1);
2845 + } else {
2846 + res = -ENOENT;
2847 + goto done;
2848 + }
2849 + goto copy;
2850 + }
2851 + case IP_SET_OP_MAX_SETS: {
2852 + struct ip_set_req_max_sets *req_max_sets
2853 + = (struct ip_set_req_max_sets *) data;
2854 + ip_set_id_t i;
2855 +
2856 + if (*len != sizeof(struct ip_set_req_max_sets)) {
2857 + ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
2858 + sizeof(struct ip_set_req_max_sets), *len);
2859 + res = -EINVAL;
2860 + goto done;
2861 + }
2862 +
2863 + if (strcmp(req_max_sets->set.name, IPSET_TOKEN_ALL) == 0) {
2864 + req_max_sets->set.index = IP_SET_INVALID_ID;
2865 + } else {
2866 + req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
2867 + req_max_sets->set.index =
2868 + ip_set_find_byname(req_max_sets->set.name);
2869 + if (req_max_sets->set.index == IP_SET_INVALID_ID) {
2870 + res = -ENOENT;
2871 + goto done;
2872 + }
2873 + }
2874 + req_max_sets->max_sets = ip_set_max;
2875 + req_max_sets->sets = 0;
2876 + for (i = 0; i < ip_set_max; i++) {
2877 + if (ip_set_list[i] != NULL)
2878 + req_max_sets->sets++;
2879 + }
2880 + goto copy;
2881 + }
2882 + case IP_SET_OP_LIST_SIZE:
2883 + case IP_SET_OP_SAVE_SIZE: {
2884 + struct ip_set_req_setnames *req_setnames
2885 + = (struct ip_set_req_setnames *) data;
2886 + struct ip_set_name_list *name_list;
2887 + struct ip_set *set;
2888 + ip_set_id_t i;
2889 + int used;
2890 +
2891 + if (*len < sizeof(struct ip_set_req_setnames)) {
2892 + ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
2893 + sizeof(struct ip_set_req_setnames), *len);
2894 + res = -EINVAL;
2895 + goto done;
2896 + }
2897 +
2898 + req_setnames->size = 0;
2899 + used = sizeof(struct ip_set_req_setnames);
2900 + for (i = 0; i < ip_set_max; i++) {
2901 + if (ip_set_list[i] == NULL)
2902 + continue;
2903 + name_list = (struct ip_set_name_list *)
2904 + (data + used);
2905 + used += sizeof(struct ip_set_name_list);
2906 + if (used > copylen) {
2907 + res = -EAGAIN;
2908 + goto done;
2909 + }
2910 + set = ip_set_list[i];
2911 + /* Fill in index, name, etc. */
2912 + name_list->index = i;
2913 + name_list->id = set->id;
2914 + strncpy(name_list->name,
2915 + set->name,
2916 + IP_SET_MAXNAMELEN - 1);
2917 + strncpy(name_list->typename,
2918 + set->type->typename,
2919 + IP_SET_MAXNAMELEN - 1);
2920 + DP("filled %s of type %s, index %u\n",
2921 + name_list->name, name_list->typename,
2922 + name_list->index);
2923 + if (!(req_setnames->index == IP_SET_INVALID_ID
2924 + || req_setnames->index == i))
2925 + continue;
2926 + /* Update size */
2927 + switch (*op) {
2928 + case IP_SET_OP_LIST_SIZE: {
2929 + req_setnames->size += sizeof(struct ip_set_list)
2930 + + set->type->header_size
2931 + + set->type->list_members_size(set);
2932 + /* Sets are identified by id in the hash */
2933 + FOREACH_HASH_DO(__set_hash_bindings_size_list,
2934 + set->id, &req_setnames->size);
2935 + break;
2936 + }
2937 + case IP_SET_OP_SAVE_SIZE: {
2938 + req_setnames->size += sizeof(struct ip_set_save)
2939 + + set->type->header_size
2940 + + set->type->list_members_size(set);
2941 + FOREACH_HASH_DO(__set_hash_bindings_size_save,
2942 + set->id, &req_setnames->size);
2943 + break;
2944 + }
2945 + default:
2946 + break;
2947 + }
2948 + }
2949 + if (copylen != used) {
2950 + res = -EAGAIN;
2951 + goto done;
2952 + }
2953 + goto copy;
2954 + }
2955 + case IP_SET_OP_LIST: {
2956 + struct ip_set_req_list *req_list
2957 + = (struct ip_set_req_list *) data;
2958 + ip_set_id_t i;
2959 + int used;
2960 +
2961 + if (*len < sizeof(struct ip_set_req_list)) {
2962 + ip_set_printk("short LIST (want >=%zu, got %d)",
2963 + sizeof(struct ip_set_req_list), *len);
2964 + res = -EINVAL;
2965 + goto done;
2966 + }
2967 + index = req_list->index;
2968 + if (index != IP_SET_INVALID_ID
2969 + && ip_set_find_byindex(index) != index) {
2970 + res = -ENOENT;
2971 + goto done;
2972 + }
2973 + used = 0;
2974 + if (index == IP_SET_INVALID_ID) {
2975 + /* List all sets */
2976 + for (i = 0; i < ip_set_max && res == 0; i++) {
2977 + if (ip_set_list[i] != NULL)
2978 + res = ip_set_list_set(i, data, &used, *len);
2979 + }
2980 + } else {
2981 + /* List an individual set */
2982 + res = ip_set_list_set(index, data, &used, *len);
2983 + }
2984 + if (res != 0)
2985 + goto done;
2986 + else if (copylen != used) {
2987 + res = -EAGAIN;
2988 + goto done;
2989 + }
2990 + goto copy;
2991 + }
2992 + case IP_SET_OP_SAVE: {
2993 + struct ip_set_req_list *req_save
2994 + = (struct ip_set_req_list *) data;
2995 + ip_set_id_t i;
2996 + int used;
2997 +
2998 + if (*len < sizeof(struct ip_set_req_list)) {
2999 + ip_set_printk("short SAVE (want >=%zu, got %d)",
3000 + sizeof(struct ip_set_req_list), *len);
3001 + res = -EINVAL;
3002 + goto done;
3003 + }
3004 + index = req_save->index;
3005 + if (index != IP_SET_INVALID_ID
3006 + && ip_set_find_byindex(index) != index) {
3007 + res = -ENOENT;
3008 + goto done;
3009 + }
3010 + used = 0;
3011 + if (index == IP_SET_INVALID_ID) {
3012 + /* Save all sets */
3013 + for (i = 0; i < ip_set_max && res == 0; i++) {
3014 + if (ip_set_list[i] != NULL)
3015 + res = ip_set_save_set(i, data, &used, *len);
3016 + }
3017 + } else {
3018 + /* Save an individual set */
3019 + res = ip_set_save_set(index, data, &used, *len);
3020 + }
3021 + if (res == 0)
3022 + res = ip_set_save_bindings(index, data, &used, *len);
3023 +
3024 + if (res != 0)
3025 + goto done;
3026 + else if (copylen != used) {
3027 + res = -EAGAIN;
3028 + goto done;
3029 + }
3030 + goto copy;
3031 + }
3032 + case IP_SET_OP_RESTORE: {
3033 + struct ip_set_req_setnames *req_restore
3034 + = (struct ip_set_req_setnames *) data;
3035 + int line;
3036 +
3037 + if (*len < sizeof(struct ip_set_req_setnames)
3038 + || *len != req_restore->size) {
3039 + ip_set_printk("invalid RESTORE (want =%zu, got %d)",
3040 + req_restore->size, *len);
3041 + res = -EINVAL;
3042 + goto done;
3043 + }
3044 + line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
3045 + req_restore->size - sizeof(struct ip_set_req_setnames));
3046 + DP("ip_set_restore: %u", line);
3047 + if (line != 0) {
3048 + res = -EAGAIN;
3049 + req_restore->size = line;
3050 + copylen = sizeof(struct ip_set_req_setnames);
3051 + goto copy;
3052 + }
3053 + goto done;
3054 + }
3055 + default:
3056 + res = -EBADMSG;
3057 + goto done;
3058 + } /* end of switch(op) */
3059 +
3060 + copy:
3061 + DP("set %s, copylen %u", index != IP_SET_INVALID_ID
3062 + && ip_set_list[index]
3063 + ? ip_set_list[index]->name
3064 + : ":all:", copylen);
3065 + res = copy_to_user(user, data, copylen);
3066 +
3067 + done:
3068 + up(&ip_set_app_mutex);
3069 + vfree(data);
3070 + if (res > 0)
3071 + res = 0;
3072 + DP("final result %d", res);
3073 + return res;
3074 +}
3075 +
3076 +static struct nf_sockopt_ops so_set = {
3077 + .pf = PF_INET,
3078 + .set_optmin = SO_IP_SET,
3079 + .set_optmax = SO_IP_SET + 1,
3080 + .set = &ip_set_sockfn_set,
3081 + .get_optmin = SO_IP_SET,
3082 + .get_optmax = SO_IP_SET + 1,
3083 + .get = &ip_set_sockfn_get,
3084 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
3085 + .owner = THIS_MODULE,
3086 +#endif
3087 +};
3088 +
3089 +static int max_sets, hash_size;
3090 +module_param(max_sets, int, 0600);
3091 +MODULE_PARM_DESC(max_sets, "maximal number of sets");
3092 +module_param(hash_size, int, 0600);
3093 +MODULE_PARM_DESC(hash_size, "hash size for bindings");
3094 +MODULE_LICENSE("GPL");
3095 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3096 +MODULE_DESCRIPTION("module implementing core IP set support");
3097 +
3098 +static int __init ip_set_init(void)
3099 +{
3100 + int res;
3101 + ip_set_id_t i;
3102 +
3103 + get_random_bytes(&ip_set_hash_random, 4);
3104 + if (max_sets)
3105 + ip_set_max = max_sets;
3106 + ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
3107 + if (!ip_set_list) {
3108 + printk(KERN_ERR "Unable to create ip_set_list\n");
3109 + return -ENOMEM;
3110 + }
3111 + memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
3112 + if (hash_size)
3113 + ip_set_bindings_hash_size = hash_size;
3114 + ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
3115 + if (!ip_set_hash) {
3116 + printk(KERN_ERR "Unable to create ip_set_hash\n");
3117 + vfree(ip_set_list);
3118 + return -ENOMEM;
3119 + }
3120 + for (i = 0; i < ip_set_bindings_hash_size; i++)
3121 + INIT_LIST_HEAD(&ip_set_hash[i]);
3122 +
3123 + INIT_LIST_HEAD(&set_type_list);
3124 +
3125 + res = nf_register_sockopt(&so_set);
3126 + if (res != 0) {
3127 + ip_set_printk("SO_SET registry failed: %d", res);
3128 + vfree(ip_set_list);
3129 + vfree(ip_set_hash);
3130 + return res;
3131 + }
3132 + return 0;
3133 +}
3134 +
3135 +static void __exit ip_set_fini(void)
3136 +{
3137 + /* There can't be any existing set or binding */
3138 + nf_unregister_sockopt(&so_set);
3139 + vfree(ip_set_list);
3140 + vfree(ip_set_hash);
3141 + DP("these are the famous last words");
3142 +}
3143 +
3144 +EXPORT_SYMBOL(ip_set_register_set_type);
3145 +EXPORT_SYMBOL(ip_set_unregister_set_type);
3146 +
3147 +EXPORT_SYMBOL(ip_set_get_byname);
3148 +EXPORT_SYMBOL(ip_set_get_byindex);
3149 +EXPORT_SYMBOL(ip_set_put);
3150 +
3151 +EXPORT_SYMBOL(ip_set_addip_kernel);
3152 +EXPORT_SYMBOL(ip_set_delip_kernel);
3153 +EXPORT_SYMBOL(ip_set_testip_kernel);
3154 +
3155 +module_init(ip_set_init);
3156 +module_exit(ip_set_fini);
3157 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_iphash.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iphash.c
3158 --- linux-2.6.23/net/ipv4/netfilter/ip_set_iphash.c 1970-01-01 01:00:00.000000000 +0100
3159 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iphash.c 2007-10-12 11:52:37.000000000 +0200
3160 @@ -0,0 +1,429 @@
3161 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3162 + *
3163 + * This program is free software; you can redistribute it and/or modify
3164 + * it under the terms of the GNU General Public License version 2 as
3165 + * published by the Free Software Foundation.
3166 + */
3167 +
3168 +/* Kernel module implementing an ip hash set */
3169 +
3170 +#include <linux/module.h>
3171 +#include <linux/ip.h>
3172 +#include <linux/skbuff.h>
3173 +#include <linux/version.h>
3174 +#include <linux/jhash.h>
3175 +#include <linux/netfilter_ipv4/ip_tables.h>
3176 +#include <linux/netfilter_ipv4/ip_set.h>
3177 +#include <linux/errno.h>
3178 +#include <asm/uaccess.h>
3179 +#include <asm/bitops.h>
3180 +#include <linux/spinlock.h>
3181 +#include <linux/vmalloc.h>
3182 +#include <linux/random.h>
3183 +
3184 +#include <net/ip.h>
3185 +
3186 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3187 +#include <linux/netfilter_ipv4/ip_set_iphash.h>
3188 +
3189 +static int limit = MAX_RANGE;
3190 +
3191 +static inline __u32
3192 +jhash_ip(const struct ip_set_iphash *map, uint16_t i, ip_set_ip_t ip)
3193 +{
3194 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
3195 +}
3196 +
3197 +static inline __u32
3198 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3199 +{
3200 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3201 + __u32 id;
3202 + u_int16_t i;
3203 + ip_set_ip_t *elem;
3204 +
3205 + *hash_ip = ip & map->netmask;
3206 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
3207 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
3208 +
3209 + for (i = 0; i < map->probes; i++) {
3210 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
3211 + DP("hash key: %u", id);
3212 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3213 + if (*elem == *hash_ip)
3214 + return id;
3215 + /* No shortcut at testing - there can be deleted
3216 + * entries. */
3217 + }
3218 + return UINT_MAX;
3219 +}
3220 +
3221 +static inline int
3222 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3223 +{
3224 + return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
3225 +}
3226 +
3227 +static int
3228 +testip(struct ip_set *set, const void *data, size_t size,
3229 + ip_set_ip_t *hash_ip)
3230 +{
3231 + struct ip_set_req_iphash *req =
3232 + (struct ip_set_req_iphash *) data;
3233 +
3234 + if (size != sizeof(struct ip_set_req_iphash)) {
3235 + ip_set_printk("data length wrong (want %zu, have %zu)",
3236 + sizeof(struct ip_set_req_iphash),
3237 + size);
3238 + return -EINVAL;
3239 + }
3240 + return __testip(set, req->ip, hash_ip);
3241 +}
3242 +
3243 +static int
3244 +testip_kernel(struct ip_set *set,
3245 + const struct sk_buff *skb,
3246 + ip_set_ip_t *hash_ip,
3247 + const u_int32_t *flags,
3248 + unsigned char index)
3249 +{
3250 + return __testip(set,
3251 + ntohl(flags[index] & IPSET_SRC
3252 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3253 + ? ip_hdr(skb)->saddr
3254 + : ip_hdr(skb)->daddr),
3255 +#else
3256 + ? skb->nh.iph->saddr
3257 + : skb->nh.iph->daddr),
3258 +#endif
3259 + hash_ip);
3260 +}
3261 +
3262 +static inline int
3263 +__addip(struct ip_set_iphash *map, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3264 +{
3265 + __u32 probe;
3266 + u_int16_t i;
3267 + ip_set_ip_t *elem;
3268 +
3269 + if (!ip || map->elements >= limit)
3270 + return -ERANGE;
3271 +
3272 + *hash_ip = ip & map->netmask;
3273 +
3274 + for (i = 0; i < map->probes; i++) {
3275 + probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
3276 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
3277 + if (*elem == *hash_ip)
3278 + return -EEXIST;
3279 + if (!*elem) {
3280 + *elem = *hash_ip;
3281 + map->elements++;
3282 + return 0;
3283 + }
3284 + }
3285 + /* Trigger rehashing */
3286 + return -EAGAIN;
3287 +}
3288 +
3289 +static int
3290 +addip(struct ip_set *set, const void *data, size_t size,
3291 + ip_set_ip_t *hash_ip)
3292 +{
3293 + struct ip_set_req_iphash *req =
3294 + (struct ip_set_req_iphash *) data;
3295 +
3296 + if (size != sizeof(struct ip_set_req_iphash)) {
3297 + ip_set_printk("data length wrong (want %zu, have %zu)",
3298 + sizeof(struct ip_set_req_iphash),
3299 + size);
3300 + return -EINVAL;
3301 + }
3302 + return __addip((struct ip_set_iphash *) set->data, req->ip, hash_ip);
3303 +}
3304 +
3305 +static int
3306 +addip_kernel(struct ip_set *set,
3307 + const struct sk_buff *skb,
3308 + ip_set_ip_t *hash_ip,
3309 + const u_int32_t *flags,
3310 + unsigned char index)
3311 +{
3312 + return __addip((struct ip_set_iphash *) set->data,
3313 + ntohl(flags[index] & IPSET_SRC
3314 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3315 + ? ip_hdr(skb)->saddr
3316 + : ip_hdr(skb)->daddr),
3317 +#else
3318 + ? skb->nh.iph->saddr
3319 + : skb->nh.iph->daddr),
3320 +#endif
3321 + hash_ip);
3322 +}
3323 +
3324 +static int retry(struct ip_set *set)
3325 +{
3326 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3327 + ip_set_ip_t hash_ip, *elem;
3328 + void *members;
3329 + u_int32_t i, hashsize = map->hashsize;
3330 + int res;
3331 + struct ip_set_iphash *tmp;
3332 +
3333 + if (map->resize == 0)
3334 + return -ERANGE;
3335 +
3336 + again:
3337 + res = 0;
3338 +
3339 + /* Calculate new hash size */
3340 + hashsize += (hashsize * map->resize)/100;
3341 + if (hashsize == map->hashsize)
3342 + hashsize++;
3343 +
3344 + ip_set_printk("rehashing of set %s triggered: "
3345 + "hashsize grows from %u to %u",
3346 + set->name, map->hashsize, hashsize);
3347 +
3348 + tmp = kmalloc(sizeof(struct ip_set_iphash)
3349 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
3350 + if (!tmp) {
3351 + DP("out of memory for %d bytes",
3352 + sizeof(struct ip_set_iphash)
3353 + + map->probes * sizeof(uint32_t));
3354 + return -ENOMEM;
3355 + }
3356 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
3357 + if (!tmp->members) {
3358 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
3359 + kfree(tmp);
3360 + return -ENOMEM;
3361 + }
3362 + tmp->hashsize = hashsize;
3363 + tmp->elements = 0;
3364 + tmp->probes = map->probes;
3365 + tmp->resize = map->resize;
3366 + tmp->netmask = map->netmask;
3367 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
3368 +
3369 + write_lock_bh(&set->lock);
3370 + map = (struct ip_set_iphash *) set->data; /* Play safe */
3371 + for (i = 0; i < map->hashsize && res == 0; i++) {
3372 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3373 + if (*elem)
3374 + res = __addip(tmp, *elem, &hash_ip);
3375 + }
3376 + if (res) {
3377 + /* Failure, try again */
3378 + write_unlock_bh(&set->lock);
3379 + harray_free(tmp->members);
3380 + kfree(tmp);
3381 + goto again;
3382 + }
3383 +
3384 + /* Success at resizing! */
3385 + members = map->members;
3386 +
3387 + map->hashsize = tmp->hashsize;
3388 + map->members = tmp->members;
3389 + write_unlock_bh(&set->lock);
3390 +
3391 + harray_free(members);
3392 + kfree(tmp);
3393 +
3394 + return 0;
3395 +}
3396 +
3397 +static inline int
3398 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3399 +{
3400 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3401 + ip_set_ip_t id, *elem;
3402 +
3403 + if (!ip)
3404 + return -ERANGE;
3405 +
3406 + id = hash_id(set, ip, hash_ip);
3407 + if (id == UINT_MAX)
3408 + return -EEXIST;
3409 +
3410 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
3411 + *elem = 0;
3412 + map->elements--;
3413 +
3414 + return 0;
3415 +}
3416 +
3417 +static int
3418 +delip(struct ip_set *set, const void *data, size_t size,
3419 + ip_set_ip_t *hash_ip)
3420 +{
3421 + struct ip_set_req_iphash *req =
3422 + (struct ip_set_req_iphash *) data;
3423 +
3424 + if (size != sizeof(struct ip_set_req_iphash)) {
3425 + ip_set_printk("data length wrong (want %zu, have %zu)",
3426 + sizeof(struct ip_set_req_iphash),
3427 + size);
3428 + return -EINVAL;
3429 + }
3430 + return __delip(set, req->ip, hash_ip);
3431 +}
3432 +
3433 +static int
3434 +delip_kernel(struct ip_set *set,
3435 + const struct sk_buff *skb,
3436 + ip_set_ip_t *hash_ip,
3437 + const u_int32_t *flags,
3438 + unsigned char index)
3439 +{
3440 + return __delip(set,
3441 + ntohl(flags[index] & IPSET_SRC
3442 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3443 + ? ip_hdr(skb)->saddr
3444 + : ip_hdr(skb)->daddr),
3445 +#else
3446 + ? skb->nh.iph->saddr
3447 + : skb->nh.iph->daddr),
3448 +#endif
3449 + hash_ip);
3450 +}
3451 +
3452 +static int create(struct ip_set *set, const void *data, size_t size)
3453 +{
3454 + struct ip_set_req_iphash_create *req =
3455 + (struct ip_set_req_iphash_create *) data;
3456 + struct ip_set_iphash *map;
3457 + uint16_t i;
3458 +
3459 + if (size != sizeof(struct ip_set_req_iphash_create)) {
3460 + ip_set_printk("data length wrong (want %zu, have %zu)",
3461 + sizeof(struct ip_set_req_iphash_create),
3462 + size);
3463 + return -EINVAL;
3464 + }
3465 +
3466 + if (req->hashsize < 1) {
3467 + ip_set_printk("hashsize too small");
3468 + return -ENOEXEC;
3469 + }
3470 +
3471 + if (req->probes < 1) {
3472 + ip_set_printk("probes too small");
3473 + return -ENOEXEC;
3474 + }
3475 +
3476 + map = kmalloc(sizeof(struct ip_set_iphash)
3477 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
3478 + if (!map) {
3479 + DP("out of memory for %d bytes",
3480 + sizeof(struct ip_set_iphash)
3481 + + req->probes * sizeof(uint32_t));
3482 + return -ENOMEM;
3483 + }
3484 + for (i = 0; i < req->probes; i++)
3485 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
3486 + map->elements = 0;
3487 + map->hashsize = req->hashsize;
3488 + map->probes = req->probes;
3489 + map->resize = req->resize;
3490 + map->netmask = req->netmask;
3491 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
3492 + if (!map->members) {
3493 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
3494 + kfree(map);
3495 + return -ENOMEM;
3496 + }
3497 +
3498 + set->data = map;
3499 + return 0;
3500 +}
3501 +
3502 +static void destroy(struct ip_set *set)
3503 +{
3504 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3505 +
3506 + harray_free(map->members);
3507 + kfree(map);
3508 +
3509 + set->data = NULL;
3510 +}
3511 +
3512 +static void flush(struct ip_set *set)
3513 +{
3514 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3515 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
3516 + map->elements = 0;
3517 +}
3518 +
3519 +static void list_header(const struct ip_set *set, void *data)
3520 +{
3521 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3522 + struct ip_set_req_iphash_create *header =
3523 + (struct ip_set_req_iphash_create *) data;
3524 +
3525 + header->hashsize = map->hashsize;
3526 + header->probes = map->probes;
3527 + header->resize = map->resize;
3528 + header->netmask = map->netmask;
3529 +}
3530 +
3531 +static int list_members_size(const struct ip_set *set)
3532 +{
3533 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3534 +
3535 + return (map->hashsize * sizeof(ip_set_ip_t));
3536 +}
3537 +
3538 +static void list_members(const struct ip_set *set, void *data)
3539 +{
3540 + struct ip_set_iphash *map = (struct ip_set_iphash *) set->data;
3541 + ip_set_ip_t i, *elem;
3542 +
3543 + for (i = 0; i < map->hashsize; i++) {
3544 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
3545 + ((ip_set_ip_t *)data)[i] = *elem;
3546 + }
3547 +}
3548 +
3549 +static struct ip_set_type ip_set_iphash = {
3550 + .typename = SETTYPE_NAME,
3551 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3552 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3553 + .create = &create,
3554 + .destroy = &destroy,
3555 + .flush = &flush,
3556 + .reqsize = sizeof(struct ip_set_req_iphash),
3557 + .addip = &addip,
3558 + .addip_kernel = &addip_kernel,
3559 + .retry = &retry,
3560 + .delip = &delip,
3561 + .delip_kernel = &delip_kernel,
3562 + .testip = &testip,
3563 + .testip_kernel = &testip_kernel,
3564 + .header_size = sizeof(struct ip_set_req_iphash_create),
3565 + .list_header = &list_header,
3566 + .list_members_size = &list_members_size,
3567 + .list_members = &list_members,
3568 + .me = THIS_MODULE,
3569 +};
3570 +
3571 +MODULE_LICENSE("GPL");
3572 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3573 +MODULE_DESCRIPTION("iphash type of IP sets");
3574 +module_param(limit, int, 0600);
3575 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
3576 +
3577 +static int __init ip_set_iphash_init(void)
3578 +{
3579 + return ip_set_register_set_type(&ip_set_iphash);
3580 +}
3581 +
3582 +static void __exit ip_set_iphash_fini(void)
3583 +{
3584 + /* FIXME: possible race with ip_set_create() */
3585 + ip_set_unregister_set_type(&ip_set_iphash);
3586 +}
3587 +
3588 +module_init(ip_set_iphash_init);
3589 +module_exit(ip_set_iphash_fini);
3590 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_ipmap.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_ipmap.c
3591 --- linux-2.6.23/net/ipv4/netfilter/ip_set_ipmap.c 1970-01-01 01:00:00.000000000 +0100
3592 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_ipmap.c 2007-10-12 11:52:37.000000000 +0200
3593 @@ -0,0 +1,336 @@
3594 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3595 + * Patrick Schaaf <bof@bof.de>
3596 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3597 + *
3598 + * This program is free software; you can redistribute it and/or modify
3599 + * it under the terms of the GNU General Public License version 2 as
3600 + * published by the Free Software Foundation.
3601 + */
3602 +
3603 +/* Kernel module implementing an IP set type: the single bitmap type */
3604 +
3605 +#include <linux/module.h>
3606 +#include <linux/ip.h>
3607 +#include <linux/skbuff.h>
3608 +#include <linux/version.h>
3609 +#include <linux/netfilter_ipv4/ip_tables.h>
3610 +#include <linux/netfilter_ipv4/ip_set.h>
3611 +#include <linux/errno.h>
3612 +#include <asm/uaccess.h>
3613 +#include <asm/bitops.h>
3614 +#include <linux/spinlock.h>
3615 +
3616 +#include <linux/netfilter_ipv4/ip_set_ipmap.h>
3617 +
3618 +static inline ip_set_ip_t
3619 +ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
3620 +{
3621 + return (ip - map->first_ip)/map->hosts;
3622 +}
3623 +
3624 +static inline int
3625 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3626 +{
3627 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3628 +
3629 + if (ip < map->first_ip || ip > map->last_ip)
3630 + return -ERANGE;
3631 +
3632 + *hash_ip = ip & map->netmask;
3633 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
3634 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
3635 + return !!test_bit(ip_to_id(map, *hash_ip), map->members);
3636 +}
3637 +
3638 +static int
3639 +testip(struct ip_set *set, const void *data, size_t size,
3640 + ip_set_ip_t *hash_ip)
3641 +{
3642 + struct ip_set_req_ipmap *req =
3643 + (struct ip_set_req_ipmap *) data;
3644 +
3645 + if (size != sizeof(struct ip_set_req_ipmap)) {
3646 + ip_set_printk("data length wrong (want %zu, have %zu)",
3647 + sizeof(struct ip_set_req_ipmap),
3648 + size);
3649 + return -EINVAL;
3650 + }
3651 + return __testip(set, req->ip, hash_ip);
3652 +}
3653 +
3654 +static int
3655 +testip_kernel(struct ip_set *set,
3656 + const struct sk_buff *skb,
3657 + ip_set_ip_t *hash_ip,
3658 + const u_int32_t *flags,
3659 + unsigned char index)
3660 +{
3661 + int res = __testip(set,
3662 + ntohl(flags[index] & IPSET_SRC
3663 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3664 + ? ip_hdr(skb)->saddr
3665 + : ip_hdr(skb)->daddr),
3666 +#else
3667 + ? skb->nh.iph->saddr
3668 + : skb->nh.iph->daddr),
3669 +#endif
3670 + hash_ip);
3671 + return (res < 0 ? 0 : res);
3672 +}
3673 +
3674 +static inline int
3675 +__addip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3676 +{
3677 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3678 +
3679 + if (ip < map->first_ip || ip > map->last_ip)
3680 + return -ERANGE;
3681 +
3682 + *hash_ip = ip & map->netmask;
3683 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3684 + if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
3685 + return -EEXIST;
3686 +
3687 + return 0;
3688 +}
3689 +
3690 +static int
3691 +addip(struct ip_set *set, const void *data, size_t size,
3692 + ip_set_ip_t *hash_ip)
3693 +{
3694 + struct ip_set_req_ipmap *req =
3695 + (struct ip_set_req_ipmap *) data;
3696 +
3697 + if (size != sizeof(struct ip_set_req_ipmap)) {
3698 + ip_set_printk("data length wrong (want %zu, have %zu)",
3699 + sizeof(struct ip_set_req_ipmap),
3700 + size);
3701 + return -EINVAL;
3702 + }
3703 + DP("%u.%u.%u.%u", HIPQUAD(req->ip));
3704 + return __addip(set, req->ip, hash_ip);
3705 +}
3706 +
3707 +static int
3708 +addip_kernel(struct ip_set *set,
3709 + const struct sk_buff *skb,
3710 + ip_set_ip_t *hash_ip,
3711 + const u_int32_t *flags,
3712 + unsigned char index)
3713 +{
3714 + return __addip(set,
3715 + ntohl(flags[index] & IPSET_SRC
3716 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3717 + ? ip_hdr(skb)->saddr
3718 + : ip_hdr(skb)->daddr),
3719 +#else
3720 + ? skb->nh.iph->saddr
3721 + : skb->nh.iph->daddr),
3722 +#endif
3723 + hash_ip);
3724 +}
3725 +
3726 +static inline int
3727 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
3728 +{
3729 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3730 +
3731 + if (ip < map->first_ip || ip > map->last_ip)
3732 + return -ERANGE;
3733 +
3734 + *hash_ip = ip & map->netmask;
3735 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
3736 + if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
3737 + return -EEXIST;
3738 +
3739 + return 0;
3740 +}
3741 +
3742 +static int
3743 +delip(struct ip_set *set, const void *data, size_t size,
3744 + ip_set_ip_t *hash_ip)
3745 +{
3746 + struct ip_set_req_ipmap *req =
3747 + (struct ip_set_req_ipmap *) data;
3748 +
3749 + if (size != sizeof(struct ip_set_req_ipmap)) {
3750 + ip_set_printk("data length wrong (want %zu, have %zu)",
3751 + sizeof(struct ip_set_req_ipmap),
3752 + size);
3753 + return -EINVAL;
3754 + }
3755 + return __delip(set, req->ip, hash_ip);
3756 +}
3757 +
3758 +static int
3759 +delip_kernel(struct ip_set *set,
3760 + const struct sk_buff *skb,
3761 + ip_set_ip_t *hash_ip,
3762 + const u_int32_t *flags,
3763 + unsigned char index)
3764 +{
3765 + return __delip(set,
3766 + ntohl(flags[index] & IPSET_SRC
3767 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3768 + ? ip_hdr(skb)->saddr
3769 + : ip_hdr(skb)->daddr),
3770 +#else
3771 + ? skb->nh.iph->saddr
3772 + : skb->nh.iph->daddr),
3773 +#endif
3774 + hash_ip);
3775 +}
3776 +
3777 +static int create(struct ip_set *set, const void *data, size_t size)
3778 +{
3779 + int newbytes;
3780 + struct ip_set_req_ipmap_create *req =
3781 + (struct ip_set_req_ipmap_create *) data;
3782 + struct ip_set_ipmap *map;
3783 +
3784 + if (size != sizeof(struct ip_set_req_ipmap_create)) {
3785 + ip_set_printk("data length wrong (want %zu, have %zu)",
3786 + sizeof(struct ip_set_req_ipmap_create),
3787 + size);
3788 + return -EINVAL;
3789 + }
3790 +
3791 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
3792 + HIPQUAD(req->from), HIPQUAD(req->to));
3793 +
3794 + if (req->from > req->to) {
3795 + DP("bad ip range");
3796 + return -ENOEXEC;
3797 + }
3798 +
3799 + map = kmalloc(sizeof(struct ip_set_ipmap), GFP_KERNEL);
3800 + if (!map) {
3801 + DP("out of memory for %d bytes",
3802 + sizeof(struct ip_set_ipmap));
3803 + return -ENOMEM;
3804 + }
3805 + map->first_ip = req->from;
3806 + map->last_ip = req->to;
3807 + map->netmask = req->netmask;
3808 +
3809 + if (req->netmask == 0xFFFFFFFF) {
3810 + map->hosts = 1;
3811 + map->sizeid = map->last_ip - map->first_ip + 1;
3812 + } else {
3813 + unsigned int mask_bits, netmask_bits;
3814 + ip_set_ip_t mask;
3815 +
3816 + map->first_ip &= map->netmask; /* Should we better bark? */
3817 +
3818 + mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
3819 + netmask_bits = mask_to_bits(map->netmask);
3820 +
3821 + if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
3822 + || netmask_bits <= mask_bits)
3823 + return -ENOEXEC;
3824 +
3825 + DP("mask_bits %u, netmask_bits %u",
3826 + mask_bits, netmask_bits);
3827 + map->hosts = 2 << (32 - netmask_bits - 1);
3828 + map->sizeid = 2 << (netmask_bits - mask_bits - 1);
3829 + }
3830 + if (map->sizeid > MAX_RANGE + 1) {
3831 + ip_set_printk("range too big (max %d addresses)",
3832 + MAX_RANGE+1);
3833 + kfree(map);
3834 + return -ENOEXEC;
3835 + }
3836 + DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
3837 + newbytes = bitmap_bytes(0, map->sizeid - 1);
3838 + map->members = kmalloc(newbytes, GFP_KERNEL);
3839 + if (!map->members) {
3840 + DP("out of memory for %d bytes", newbytes);
3841 + kfree(map);
3842 + return -ENOMEM;
3843 + }
3844 + memset(map->members, 0, newbytes);
3845 +
3846 + set->data = map;
3847 + return 0;
3848 +}
3849 +
3850 +static void destroy(struct ip_set *set)
3851 +{
3852 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3853 +
3854 + kfree(map->members);
3855 + kfree(map);
3856 +
3857 + set->data = NULL;
3858 +}
3859 +
3860 +static void flush(struct ip_set *set)
3861 +{
3862 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3863 + memset(map->members, 0, bitmap_bytes(0, map->sizeid - 1));
3864 +}
3865 +
3866 +static void list_header(const struct ip_set *set, void *data)
3867 +{
3868 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3869 + struct ip_set_req_ipmap_create *header =
3870 + (struct ip_set_req_ipmap_create *) data;
3871 +
3872 + header->from = map->first_ip;
3873 + header->to = map->last_ip;
3874 + header->netmask = map->netmask;
3875 +}
3876 +
3877 +static int list_members_size(const struct ip_set *set)
3878 +{
3879 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3880 +
3881 + return bitmap_bytes(0, map->sizeid - 1);
3882 +}
3883 +
3884 +static void list_members(const struct ip_set *set, void *data)
3885 +{
3886 + struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
3887 + int bytes = bitmap_bytes(0, map->sizeid - 1);
3888 +
3889 + memcpy(data, map->members, bytes);
3890 +}
3891 +
3892 +static struct ip_set_type ip_set_ipmap = {
3893 + .typename = SETTYPE_NAME,
3894 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
3895 + .protocol_version = IP_SET_PROTOCOL_VERSION,
3896 + .create = &create,
3897 + .destroy = &destroy,
3898 + .flush = &flush,
3899 + .reqsize = sizeof(struct ip_set_req_ipmap),
3900 + .addip = &addip,
3901 + .addip_kernel = &addip_kernel,
3902 + .delip = &delip,
3903 + .delip_kernel = &delip_kernel,
3904 + .testip = &testip,
3905 + .testip_kernel = &testip_kernel,
3906 + .header_size = sizeof(struct ip_set_req_ipmap_create),
3907 + .list_header = &list_header,
3908 + .list_members_size = &list_members_size,
3909 + .list_members = &list_members,
3910 + .me = THIS_MODULE,
3911 +};
3912 +
3913 +MODULE_LICENSE("GPL");
3914 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
3915 +MODULE_DESCRIPTION("ipmap type of IP sets");
3916 +
3917 +static int __init ip_set_ipmap_init(void)
3918 +{
3919 + return ip_set_register_set_type(&ip_set_ipmap);
3920 +}
3921 +
3922 +static void __exit ip_set_ipmap_fini(void)
3923 +{
3924 + /* FIXME: possible race with ip_set_create() */
3925 + ip_set_unregister_set_type(&ip_set_ipmap);
3926 +}
3927 +
3928 +module_init(ip_set_ipmap_init);
3929 +module_exit(ip_set_ipmap_fini);
3930 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_ipporthash.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_ipporthash.c
3931 --- linux-2.6.23/net/ipv4/netfilter/ip_set_ipporthash.c 1970-01-01 01:00:00.000000000 +0100
3932 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_ipporthash.c 2007-10-12 11:52:37.000000000 +0200
3933 @@ -0,0 +1,581 @@
3934 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
3935 + *
3936 + * This program is free software; you can redistribute it and/or modify
3937 + * it under the terms of the GNU General Public License version 2 as
3938 + * published by the Free Software Foundation.
3939 + */
3940 +
3941 +/* Kernel module implementing an ip+port hash set */
3942 +
3943 +#include <linux/module.h>
3944 +#include <linux/ip.h>
3945 +#include <linux/tcp.h>
3946 +#include <linux/udp.h>
3947 +#include <linux/skbuff.h>
3948 +#include <linux/version.h>
3949 +#include <linux/jhash.h>
3950 +#include <linux/netfilter_ipv4/ip_tables.h>
3951 +#include <linux/netfilter_ipv4/ip_set.h>
3952 +#include <linux/errno.h>
3953 +#include <asm/uaccess.h>
3954 +#include <asm/bitops.h>
3955 +#include <linux/spinlock.h>
3956 +#include <linux/vmalloc.h>
3957 +#include <linux/random.h>
3958 +
3959 +#include <net/ip.h>
3960 +
3961 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
3962 +#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
3963 +
3964 +static int limit = MAX_RANGE;
3965 +
3966 +/* We must handle non-linear skbs */
3967 +static inline ip_set_ip_t
3968 +get_port(const struct sk_buff *skb, u_int32_t flags)
3969 +{
3970 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3971 + struct iphdr *iph = ip_hdr(skb);
3972 +#else
3973 + struct iphdr *iph = skb->nh.iph;
3974 +#endif
3975 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
3976 +
3977 + switch (iph->protocol) {
3978 + case IPPROTO_TCP: {
3979 + struct tcphdr tcph;
3980 +
3981 + /* See comments at tcp_match in ip_tables.c */
3982 + if (offset)
3983 + return INVALID_PORT;
3984 +
3985 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
3986 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
3987 +#else
3988 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
3989 +#endif
3990 + /* No choice either */
3991 + return INVALID_PORT;
3992 +
3993 + return ntohs(flags & IPSET_SRC ?
3994 + tcph.source : tcph.dest);
3995 + }
3996 + case IPPROTO_UDP: {
3997 + struct udphdr udph;
3998 +
3999 + if (offset)
4000 + return INVALID_PORT;
4001 +
4002 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4003 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
4004 +#else
4005 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
4006 +#endif
4007 + /* No choice either */
4008 + return INVALID_PORT;
4009 +
4010 + return ntohs(flags & IPSET_SRC ?
4011 + udph.source : udph.dest);
4012 + }
4013 + default:
4014 + return INVALID_PORT;
4015 + }
4016 +}
4017 +
4018 +static inline __u32
4019 +jhash_ip(const struct ip_set_ipporthash *map, uint16_t i, ip_set_ip_t ip)
4020 +{
4021 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
4022 +}
4023 +
4024 +#define HASH_IP(map, ip, port) (port + ((ip - ((map)->first_ip)) << 16))
4025 +
4026 +static inline __u32
4027 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4028 + ip_set_ip_t *hash_ip)
4029 +{
4030 + struct ip_set_ipporthash *map =
4031 + (struct ip_set_ipporthash *) set->data;
4032 + __u32 id;
4033 + u_int16_t i;
4034 + ip_set_ip_t *elem;
4035 +
4036 + *hash_ip = HASH_IP(map, ip, port);
4037 + DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
4038 + set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
4039 +
4040 + for (i = 0; i < map->probes; i++) {
4041 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
4042 + DP("hash key: %u", id);
4043 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4044 + if (*elem == *hash_ip)
4045 + return id;
4046 + /* No shortcut at testing - there can be deleted
4047 + * entries. */
4048 + }
4049 + return UINT_MAX;
4050 +}
4051 +
4052 +static inline int
4053 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4054 + ip_set_ip_t *hash_ip)
4055 +{
4056 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4057 +
4058 + if (ip < map->first_ip || ip > map->last_ip)
4059 + return -ERANGE;
4060 +
4061 + return (hash_id(set, ip, port, hash_ip) != UINT_MAX);
4062 +}
4063 +
4064 +static int
4065 +testip(struct ip_set *set, const void *data, size_t size,
4066 + ip_set_ip_t *hash_ip)
4067 +{
4068 + struct ip_set_req_ipporthash *req =
4069 + (struct ip_set_req_ipporthash *) data;
4070 +
4071 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4072 + ip_set_printk("data length wrong (want %zu, have %zu)",
4073 + sizeof(struct ip_set_req_ipporthash),
4074 + size);
4075 + return -EINVAL;
4076 + }
4077 + return __testip(set, req->ip, req->port, hash_ip);
4078 +}
4079 +
4080 +static int
4081 +testip_kernel(struct ip_set *set,
4082 + const struct sk_buff *skb,
4083 + ip_set_ip_t *hash_ip,
4084 + const u_int32_t *flags,
4085 + unsigned char index)
4086 +{
4087 + ip_set_ip_t port;
4088 + int res;
4089 +
4090 + if (flags[index+1] == 0)
4091 + return 0;
4092 +
4093 + port = get_port(skb, flags[index+1]);
4094 +
4095 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4096 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4097 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4098 + NIPQUAD(ip_hdr(skb)->saddr),
4099 + NIPQUAD(ip_hdr(skb)->daddr));
4100 +#else
4101 + NIPQUAD(skb->nh.iph->saddr),
4102 + NIPQUAD(skb->nh.iph->daddr));
4103 +#endif
4104 + DP("flag %s port %u",
4105 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4106 + port);
4107 + if (port == INVALID_PORT)
4108 + return 0;
4109 +
4110 + res = __testip(set,
4111 + ntohl(flags[index] & IPSET_SRC
4112 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4113 + ? ip_hdr(skb)->saddr
4114 + : ip_hdr(skb)->daddr),
4115 +#else
4116 + ? skb->nh.iph->saddr
4117 + : skb->nh.iph->daddr),
4118 +#endif
4119 + port,
4120 + hash_ip);
4121 + return (res < 0 ? 0 : res);
4122 +
4123 +}
4124 +
4125 +static inline int
4126 +__add_haship(struct ip_set_ipporthash *map, ip_set_ip_t hash_ip)
4127 +{
4128 + __u32 probe;
4129 + u_int16_t i;
4130 + ip_set_ip_t *elem;
4131 +
4132 + for (i = 0; i < map->probes; i++) {
4133 + probe = jhash_ip(map, i, hash_ip) % map->hashsize;
4134 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
4135 + if (*elem == hash_ip)
4136 + return -EEXIST;
4137 + if (!*elem) {
4138 + *elem = hash_ip;
4139 + map->elements++;
4140 + return 0;
4141 + }
4142 + }
4143 + /* Trigger rehashing */
4144 + return -EAGAIN;
4145 +}
4146 +
4147 +static inline int
4148 +__addip(struct ip_set_ipporthash *map, ip_set_ip_t ip, ip_set_ip_t port,
4149 + ip_set_ip_t *hash_ip)
4150 +{
4151 + if (map->elements > limit)
4152 + return -ERANGE;
4153 + if (ip < map->first_ip || ip > map->last_ip)
4154 + return -ERANGE;
4155 +
4156 + *hash_ip = HASH_IP(map, ip, port);
4157 +
4158 + return __add_haship(map, *hash_ip);
4159 +}
4160 +
4161 +static int
4162 +addip(struct ip_set *set, const void *data, size_t size,
4163 + ip_set_ip_t *hash_ip)
4164 +{
4165 + struct ip_set_req_ipporthash *req =
4166 + (struct ip_set_req_ipporthash *) data;
4167 +
4168 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4169 + ip_set_printk("data length wrong (want %zu, have %zu)",
4170 + sizeof(struct ip_set_req_ipporthash),
4171 + size);
4172 + return -EINVAL;
4173 + }
4174 + return __addip((struct ip_set_ipporthash *) set->data,
4175 + req->ip, req->port, hash_ip);
4176 +}
4177 +
4178 +static int
4179 +addip_kernel(struct ip_set *set,
4180 + const struct sk_buff *skb,
4181 + ip_set_ip_t *hash_ip,
4182 + const u_int32_t *flags,
4183 + unsigned char index)
4184 +{
4185 + ip_set_ip_t port;
4186 +
4187 + if (flags[index+1] == 0)
4188 + return -EINVAL;
4189 +
4190 + port = get_port(skb, flags[index+1]);
4191 +
4192 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4193 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4194 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4195 + NIPQUAD(ip_hdr(skb)->saddr),
4196 + NIPQUAD(ip_hdr(skb)->daddr));
4197 +#else
4198 + NIPQUAD(skb->nh.iph->saddr),
4199 + NIPQUAD(skb->nh.iph->daddr));
4200 +#endif
4201 + DP("flag %s port %u",
4202 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4203 + port);
4204 + if (port == INVALID_PORT)
4205 + return -EINVAL;
4206 +
4207 + return __addip((struct ip_set_ipporthash *) set->data,
4208 + ntohl(flags[index] & IPSET_SRC
4209 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4210 + ? ip_hdr(skb)->saddr
4211 + : ip_hdr(skb)->daddr),
4212 +#else
4213 + ? skb->nh.iph->saddr
4214 + : skb->nh.iph->daddr),
4215 +#endif
4216 + port,
4217 + hash_ip);
4218 +}
4219 +
4220 +static int retry(struct ip_set *set)
4221 +{
4222 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4223 + ip_set_ip_t *elem;
4224 + void *members;
4225 + u_int32_t i, hashsize = map->hashsize;
4226 + int res;
4227 + struct ip_set_ipporthash *tmp;
4228 +
4229 + if (map->resize == 0)
4230 + return -ERANGE;
4231 +
4232 + again:
4233 + res = 0;
4234 +
4235 + /* Calculate new hash size */
4236 + hashsize += (hashsize * map->resize)/100;
4237 + if (hashsize == map->hashsize)
4238 + hashsize++;
4239 +
4240 + ip_set_printk("rehashing of set %s triggered: "
4241 + "hashsize grows from %u to %u",
4242 + set->name, map->hashsize, hashsize);
4243 +
4244 + tmp = kmalloc(sizeof(struct ip_set_ipporthash)
4245 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
4246 + if (!tmp) {
4247 + DP("out of memory for %d bytes",
4248 + sizeof(struct ip_set_ipporthash)
4249 + + map->probes * sizeof(uint32_t));
4250 + return -ENOMEM;
4251 + }
4252 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
4253 + if (!tmp->members) {
4254 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
4255 + kfree(tmp);
4256 + return -ENOMEM;
4257 + }
4258 + tmp->hashsize = hashsize;
4259 + tmp->elements = 0;
4260 + tmp->probes = map->probes;
4261 + tmp->resize = map->resize;
4262 + tmp->first_ip = map->first_ip;
4263 + tmp->last_ip = map->last_ip;
4264 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
4265 +
4266 + write_lock_bh(&set->lock);
4267 + map = (struct ip_set_ipporthash *) set->data; /* Play safe */
4268 + for (i = 0; i < map->hashsize && res == 0; i++) {
4269 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4270 + if (*elem)
4271 + res = __add_haship(tmp, *elem);
4272 + }
4273 + if (res) {
4274 + /* Failure, try again */
4275 + write_unlock_bh(&set->lock);
4276 + harray_free(tmp->members);
4277 + kfree(tmp);
4278 + goto again;
4279 + }
4280 +
4281 + /* Success at resizing! */
4282 + members = map->members;
4283 +
4284 + map->hashsize = tmp->hashsize;
4285 + map->members = tmp->members;
4286 + write_unlock_bh(&set->lock);
4287 +
4288 + harray_free(members);
4289 + kfree(tmp);
4290 +
4291 + return 0;
4292 +}
4293 +
4294 +static inline int
4295 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
4296 + ip_set_ip_t *hash_ip)
4297 +{
4298 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4299 + ip_set_ip_t id;
4300 + ip_set_ip_t *elem;
4301 +
4302 + if (ip < map->first_ip || ip > map->last_ip)
4303 + return -ERANGE;
4304 +
4305 + id = hash_id(set, ip, port, hash_ip);
4306 +
4307 + if (id == UINT_MAX)
4308 + return -EEXIST;
4309 +
4310 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
4311 + *elem = 0;
4312 + map->elements--;
4313 +
4314 + return 0;
4315 +}
4316 +
4317 +static int
4318 +delip(struct ip_set *set, const void *data, size_t size,
4319 + ip_set_ip_t *hash_ip)
4320 +{
4321 + struct ip_set_req_ipporthash *req =
4322 + (struct ip_set_req_ipporthash *) data;
4323 +
4324 + if (size != sizeof(struct ip_set_req_ipporthash)) {
4325 + ip_set_printk("data length wrong (want %zu, have %zu)",
4326 + sizeof(struct ip_set_req_ipporthash),
4327 + size);
4328 + return -EINVAL;
4329 + }
4330 + return __delip(set, req->ip, req->port, hash_ip);
4331 +}
4332 +
4333 +static int
4334 +delip_kernel(struct ip_set *set,
4335 + const struct sk_buff *skb,
4336 + ip_set_ip_t *hash_ip,
4337 + const u_int32_t *flags,
4338 + unsigned char index)
4339 +{
4340 + ip_set_ip_t port;
4341 +
4342 + if (flags[index+1] == 0)
4343 + return -EINVAL;
4344 +
4345 + port = get_port(skb, flags[index+1]);
4346 +
4347 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4348 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4349 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4350 + NIPQUAD(ip_hdr(skb)->saddr),
4351 + NIPQUAD(ip_hdr(skb)->daddr));
4352 +#else
4353 + NIPQUAD(skb->nh.iph->saddr),
4354 + NIPQUAD(skb->nh.iph->daddr));
4355 +#endif
4356 + DP("flag %s port %u",
4357 + flags[index+1] & IPSET_SRC ? "SRC" : "DST",
4358 + port);
4359 + if (port == INVALID_PORT)
4360 + return -EINVAL;
4361 +
4362 + return __delip(set,
4363 + ntohl(flags[index] & IPSET_SRC
4364 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4365 + ? ip_hdr(skb)->saddr
4366 + : ip_hdr(skb)->daddr),
4367 +#else
4368 + ? skb->nh.iph->saddr
4369 + : skb->nh.iph->daddr),
4370 +#endif
4371 + port,
4372 + hash_ip);
4373 +}
4374 +
4375 +static int create(struct ip_set *set, const void *data, size_t size)
4376 +{
4377 + struct ip_set_req_ipporthash_create *req =
4378 + (struct ip_set_req_ipporthash_create *) data;
4379 + struct ip_set_ipporthash *map;
4380 + uint16_t i;
4381 +
4382 + if (size != sizeof(struct ip_set_req_ipporthash_create)) {
4383 + ip_set_printk("data length wrong (want %zu, have %zu)",
4384 + sizeof(struct ip_set_req_ipporthash_create),
4385 + size);
4386 + return -EINVAL;
4387 + }
4388 +
4389 + if (req->hashsize < 1) {
4390 + ip_set_printk("hashsize too small");
4391 + return -ENOEXEC;
4392 + }
4393 +
4394 + if (req->probes < 1) {
4395 + ip_set_printk("probes too small");
4396 + return -ENOEXEC;
4397 + }
4398 +
4399 + map = kmalloc(sizeof(struct ip_set_ipporthash)
4400 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
4401 + if (!map) {
4402 + DP("out of memory for %d bytes",
4403 + sizeof(struct ip_set_ipporthash)
4404 + + req->probes * sizeof(uint32_t));
4405 + return -ENOMEM;
4406 + }
4407 + for (i = 0; i < req->probes; i++)
4408 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
4409 + map->elements = 0;
4410 + map->hashsize = req->hashsize;
4411 + map->probes = req->probes;
4412 + map->resize = req->resize;
4413 + map->first_ip = req->from;
4414 + map->last_ip = req->to;
4415 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
4416 + if (!map->members) {
4417 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
4418 + kfree(map);
4419 + return -ENOMEM;
4420 + }
4421 +
4422 + set->data = map;
4423 + return 0;
4424 +}
4425 +
4426 +static void destroy(struct ip_set *set)
4427 +{
4428 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4429 +
4430 + harray_free(map->members);
4431 + kfree(map);
4432 +
4433 + set->data = NULL;
4434 +}
4435 +
4436 +static void flush(struct ip_set *set)
4437 +{
4438 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4439 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
4440 + map->elements = 0;
4441 +}
4442 +
4443 +static void list_header(const struct ip_set *set, void *data)
4444 +{
4445 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4446 + struct ip_set_req_ipporthash_create *header =
4447 + (struct ip_set_req_ipporthash_create *) data;
4448 +
4449 + header->hashsize = map->hashsize;
4450 + header->probes = map->probes;
4451 + header->resize = map->resize;
4452 + header->from = map->first_ip;
4453 + header->to = map->last_ip;
4454 +}
4455 +
4456 +static int list_members_size(const struct ip_set *set)
4457 +{
4458 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4459 +
4460 + return (map->hashsize * sizeof(ip_set_ip_t));
4461 +}
4462 +
4463 +static void list_members(const struct ip_set *set, void *data)
4464 +{
4465 + struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
4466 + ip_set_ip_t i, *elem;
4467 +
4468 + for (i = 0; i < map->hashsize; i++) {
4469 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
4470 + ((ip_set_ip_t *)data)[i] = *elem;
4471 + }
4472 +}
4473 +
4474 +static struct ip_set_type ip_set_ipporthash = {
4475 + .typename = SETTYPE_NAME,
4476 + .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE,
4477 + .protocol_version = IP_SET_PROTOCOL_VERSION,
4478 + .create = &create,
4479 + .destroy = &destroy,
4480 + .flush = &flush,
4481 + .reqsize = sizeof(struct ip_set_req_ipporthash),
4482 + .addip = &addip,
4483 + .addip_kernel = &addip_kernel,
4484 + .retry = &retry,
4485 + .delip = &delip,
4486 + .delip_kernel = &delip_kernel,
4487 + .testip = &testip,
4488 + .testip_kernel = &testip_kernel,
4489 + .header_size = sizeof(struct ip_set_req_ipporthash_create),
4490 + .list_header = &list_header,
4491 + .list_members_size = &list_members_size,
4492 + .list_members = &list_members,
4493 + .me = THIS_MODULE,
4494 +};
4495 +
4496 +MODULE_LICENSE("GPL");
4497 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
4498 +MODULE_DESCRIPTION("ipporthash type of IP sets");
4499 +module_param(limit, int, 0600);
4500 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
4501 +
4502 +static int __init ip_set_ipporthash_init(void)
4503 +{
4504 + return ip_set_register_set_type(&ip_set_ipporthash);
4505 +}
4506 +
4507 +static void __exit ip_set_ipporthash_fini(void)
4508 +{
4509 + /* FIXME: possible race with ip_set_create() */
4510 + ip_set_unregister_set_type(&ip_set_ipporthash);
4511 +}
4512 +
4513 +module_init(ip_set_ipporthash_init);
4514 +module_exit(ip_set_ipporthash_fini);
4515 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_iptree.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iptree.c
4516 --- linux-2.6.23/net/ipv4/netfilter/ip_set_iptree.c 1970-01-01 01:00:00.000000000 +0100
4517 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iptree.c 2007-10-12 11:52:37.000000000 +0200
4518 @@ -0,0 +1,612 @@
4519 +/* Copyright (C) 2005 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
4520 + *
4521 + * This program is free software; you can redistribute it and/or modify
4522 + * it under the terms of the GNU General Public License version 2 as
4523 + * published by the Free Software Foundation.
4524 + */
4525 +
4526 +/* Kernel module implementing an IP set type: the iptree type */
4527 +
4528 +#include <linux/version.h>
4529 +#include <linux/module.h>
4530 +#include <linux/ip.h>
4531 +#include <linux/skbuff.h>
4532 +#include <linux/slab.h>
4533 +#include <linux/delay.h>
4534 +#include <linux/netfilter_ipv4/ip_tables.h>
4535 +#include <linux/netfilter_ipv4/ip_set.h>
4536 +#include <linux/errno.h>
4537 +#include <asm/uaccess.h>
4538 +#include <asm/bitops.h>
4539 +#include <linux/spinlock.h>
4540 +
4541 +/* Backward compatibility */
4542 +#ifndef __nocast
4543 +#define __nocast
4544 +#endif
4545 +
4546 +#include <linux/netfilter_ipv4/ip_set_iptree.h>
4547 +
4548 +static int limit = MAX_RANGE;
4549 +
4550 +/* Garbage collection interval in seconds: */
4551 +#define IPTREE_GC_TIME 5*60
4552 +/* Sleep so many milliseconds before trying again
4553 + * to delete the gc timer at destroying/flushing a set */
4554 +#define IPTREE_DESTROY_SLEEP 100
4555 +
4556 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
4557 +static struct kmem_cache *branch_cachep;
4558 +static struct kmem_cache *leaf_cachep;
4559 +#else
4560 +static kmem_cache_t *branch_cachep;
4561 +static kmem_cache_t *leaf_cachep;
4562 +#endif
4563 +
4564 +#if defined(__LITTLE_ENDIAN)
4565 +#define ABCD(a,b,c,d,addrp) do { \
4566 + a = ((unsigned char *)addrp)[3]; \
4567 + b = ((unsigned char *)addrp)[2]; \
4568 + c = ((unsigned char *)addrp)[1]; \
4569 + d = ((unsigned char *)addrp)[0]; \
4570 +} while (0)
4571 +#elif defined(__BIG_ENDIAN)
4572 +#define ABCD(a,b,c,d,addrp) do { \
4573 + a = ((unsigned char *)addrp)[0]; \
4574 + b = ((unsigned char *)addrp)[1]; \
4575 + c = ((unsigned char *)addrp)[2]; \
4576 + d = ((unsigned char *)addrp)[3]; \
4577 +} while (0)
4578 +#else
4579 +#error "Please fix asm/byteorder.h"
4580 +#endif /* __LITTLE_ENDIAN */
4581 +
4582 +#define TESTIP_WALK(map, elem, branch) do { \
4583 + if ((map)->tree[elem]) { \
4584 + branch = (map)->tree[elem]; \
4585 + } else \
4586 + return 0; \
4587 +} while (0)
4588 +
4589 +static inline int
4590 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4591 +{
4592 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4593 + struct ip_set_iptreeb *btree;
4594 + struct ip_set_iptreec *ctree;
4595 + struct ip_set_iptreed *dtree;
4596 + unsigned char a,b,c,d;
4597 +
4598 + if (!ip)
4599 + return -ERANGE;
4600 +
4601 + *hash_ip = ip;
4602 + ABCD(a, b, c, d, hash_ip);
4603 + DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
4604 + TESTIP_WALK(map, a, btree);
4605 + TESTIP_WALK(btree, b, ctree);
4606 + TESTIP_WALK(ctree, c, dtree);
4607 + DP("%lu %lu", dtree->expires[d], jiffies);
4608 + return dtree->expires[d]
4609 + && (!map->timeout
4610 + || time_after(dtree->expires[d], jiffies));
4611 +}
4612 +
4613 +static int
4614 +testip(struct ip_set *set, const void *data, size_t size,
4615 + ip_set_ip_t *hash_ip)
4616 +{
4617 + struct ip_set_req_iptree *req =
4618 + (struct ip_set_req_iptree *) data;
4619 +
4620 + if (size != sizeof(struct ip_set_req_iptree)) {
4621 + ip_set_printk("data length wrong (want %zu, have %zu)",
4622 + sizeof(struct ip_set_req_iptree),
4623 + size);
4624 + return -EINVAL;
4625 + }
4626 + return __testip(set, req->ip, hash_ip);
4627 +}
4628 +
4629 +static int
4630 +testip_kernel(struct ip_set *set,
4631 + const struct sk_buff *skb,
4632 + ip_set_ip_t *hash_ip,
4633 + const u_int32_t *flags,
4634 + unsigned char index)
4635 +{
4636 + int res;
4637 +
4638 + DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
4639 + flags[index] & IPSET_SRC ? "SRC" : "DST",
4640 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4641 + NIPQUAD(ip_hdr(skb)->saddr),
4642 + NIPQUAD(ip_hdr(skb)->daddr));
4643 +#else
4644 + NIPQUAD(skb->nh.iph->saddr),
4645 + NIPQUAD(skb->nh.iph->daddr));
4646 +#endif
4647 +
4648 + res = __testip(set,
4649 + ntohl(flags[index] & IPSET_SRC
4650 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4651 + ? ip_hdr(skb)->saddr
4652 + : ip_hdr(skb)->daddr),
4653 +#else
4654 + ? skb->nh.iph->saddr
4655 + : skb->nh.iph->daddr),
4656 +#endif
4657 + hash_ip);
4658 + return (res < 0 ? 0 : res);
4659 +}
4660 +
4661 +#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
4662 + if ((map)->tree[elem]) { \
4663 + DP("found %u", elem); \
4664 + branch = (map)->tree[elem]; \
4665 + } else { \
4666 + branch = (type *) \
4667 + kmem_cache_alloc(cachep, GFP_ATOMIC); \
4668 + if (branch == NULL) \
4669 + return -ENOMEM; \
4670 + memset(branch, 0, sizeof(*branch)); \
4671 + (map)->tree[elem] = branch; \
4672 + DP("alloc %u", elem); \
4673 + } \
4674 +} while (0)
4675 +
4676 +static inline int
4677 +__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
4678 + ip_set_ip_t *hash_ip)
4679 +{
4680 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4681 + struct ip_set_iptreeb *btree;
4682 + struct ip_set_iptreec *ctree;
4683 + struct ip_set_iptreed *dtree;
4684 + unsigned char a,b,c,d;
4685 + int ret = 0;
4686 +
4687 + if (!ip || map->elements >= limit)
4688 + /* We could call the garbage collector
4689 + * but it's probably overkill */
4690 + return -ERANGE;
4691 +
4692 + *hash_ip = ip;
4693 + ABCD(a, b, c, d, hash_ip);
4694 + DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
4695 + ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
4696 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
4697 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
4698 + if (dtree->expires[d]
4699 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
4700 + ret = -EEXIST;
4701 + dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
4702 + /* Lottery: I won! */
4703 + if (dtree->expires[d] == 0)
4704 + dtree->expires[d] = 1;
4705 + DP("%u %lu", d, dtree->expires[d]);
4706 + if (ret == 0)
4707 + map->elements++;
4708 + return ret;
4709 +}
4710 +
4711 +static int
4712 +addip(struct ip_set *set, const void *data, size_t size,
4713 + ip_set_ip_t *hash_ip)
4714 +{
4715 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4716 + struct ip_set_req_iptree *req =
4717 + (struct ip_set_req_iptree *) data;
4718 +
4719 + if (size != sizeof(struct ip_set_req_iptree)) {
4720 + ip_set_printk("data length wrong (want %zu, have %zu)",
4721 + sizeof(struct ip_set_req_iptree),
4722 + size);
4723 + return -EINVAL;
4724 + }
4725 + DP("%u.%u.%u.%u %u", HIPQUAD(req->ip), req->timeout);
4726 + return __addip(set, req->ip,
4727 + req->timeout ? req->timeout : map->timeout,
4728 + hash_ip);
4729 +}
4730 +
4731 +static int
4732 +addip_kernel(struct ip_set *set,
4733 + const struct sk_buff *skb,
4734 + ip_set_ip_t *hash_ip,
4735 + const u_int32_t *flags,
4736 + unsigned char index)
4737 +{
4738 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4739 +
4740 + return __addip(set,
4741 + ntohl(flags[index] & IPSET_SRC
4742 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4743 + ? ip_hdr(skb)->saddr
4744 + : ip_hdr(skb)->daddr),
4745 +#else
4746 + ? skb->nh.iph->saddr
4747 + : skb->nh.iph->daddr),
4748 +#endif
4749 + map->timeout,
4750 + hash_ip);
4751 +}
4752 +
4753 +#define DELIP_WALK(map, elem, branch) do { \
4754 + if ((map)->tree[elem]) { \
4755 + branch = (map)->tree[elem]; \
4756 + } else \
4757 + return -EEXIST; \
4758 +} while (0)
4759 +
4760 +static inline int
4761 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
4762 +{
4763 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4764 + struct ip_set_iptreeb *btree;
4765 + struct ip_set_iptreec *ctree;
4766 + struct ip_set_iptreed *dtree;
4767 + unsigned char a,b,c,d;
4768 +
4769 + if (!ip)
4770 + return -ERANGE;
4771 +
4772 + *hash_ip = ip;
4773 + ABCD(a, b, c, d, hash_ip);
4774 + DELIP_WALK(map, a, btree);
4775 + DELIP_WALK(btree, b, ctree);
4776 + DELIP_WALK(ctree, c, dtree);
4777 +
4778 + if (dtree->expires[d]) {
4779 + dtree->expires[d] = 0;
4780 + map->elements--;
4781 + return 0;
4782 + }
4783 + return -EEXIST;
4784 +}
4785 +
4786 +static int
4787 +delip(struct ip_set *set, const void *data, size_t size,
4788 + ip_set_ip_t *hash_ip)
4789 +{
4790 + struct ip_set_req_iptree *req =
4791 + (struct ip_set_req_iptree *) data;
4792 +
4793 + if (size != sizeof(struct ip_set_req_iptree)) {
4794 + ip_set_printk("data length wrong (want %zu, have %zu)",
4795 + sizeof(struct ip_set_req_iptree),
4796 + size);
4797 + return -EINVAL;
4798 + }
4799 + return __delip(set, req->ip, hash_ip);
4800 +}
4801 +
4802 +static int
4803 +delip_kernel(struct ip_set *set,
4804 + const struct sk_buff *skb,
4805 + ip_set_ip_t *hash_ip,
4806 + const u_int32_t *flags,
4807 + unsigned char index)
4808 +{
4809 + return __delip(set,
4810 + ntohl(flags[index] & IPSET_SRC
4811 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
4812 + ? ip_hdr(skb)->saddr
4813 + : ip_hdr(skb)->daddr),
4814 +#else
4815 + ? skb->nh.iph->saddr
4816 + : skb->nh.iph->daddr),
4817 +#endif
4818 + hash_ip);
4819 +}
4820 +
4821 +#define LOOP_WALK_BEGIN(map, i, branch) \
4822 + for (i = 0; i < 256; i++) { \
4823 + if (!(map)->tree[i]) \
4824 + continue; \
4825 + branch = (map)->tree[i]
4826 +
4827 +#define LOOP_WALK_END }
4828 +
4829 +static void ip_tree_gc(unsigned long ul_set)
4830 +{
4831 + struct ip_set *set = (void *) ul_set;
4832 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4833 + struct ip_set_iptreeb *btree;
4834 + struct ip_set_iptreec *ctree;
4835 + struct ip_set_iptreed *dtree;
4836 + unsigned int a,b,c,d;
4837 + unsigned char i,j,k;
4838 +
4839 + i = j = k = 0;
4840 + DP("gc: %s", set->name);
4841 + write_lock_bh(&set->lock);
4842 + LOOP_WALK_BEGIN(map, a, btree);
4843 + LOOP_WALK_BEGIN(btree, b, ctree);
4844 + LOOP_WALK_BEGIN(ctree, c, dtree);
4845 + for (d = 0; d < 256; d++) {
4846 + if (dtree->expires[d]) {
4847 + DP("gc: %u %u %u %u: expires %lu jiffies %lu",
4848 + a, b, c, d,
4849 + dtree->expires[d], jiffies);
4850 + if (map->timeout
4851 + && time_before(dtree->expires[d], jiffies)) {
4852 + dtree->expires[d] = 0;
4853 + map->elements--;
4854 + } else
4855 + k = 1;
4856 + }
4857 + }
4858 + if (k == 0) {
4859 + DP("gc: %s: leaf %u %u %u empty",
4860 + set->name, a, b, c);
4861 + kmem_cache_free(leaf_cachep, dtree);
4862 + ctree->tree[c] = NULL;
4863 + } else {
4864 + DP("gc: %s: leaf %u %u %u not empty",
4865 + set->name, a, b, c);
4866 + j = 1;
4867 + k = 0;
4868 + }
4869 + LOOP_WALK_END;
4870 + if (j == 0) {
4871 + DP("gc: %s: branch %u %u empty",
4872 + set->name, a, b);
4873 + kmem_cache_free(branch_cachep, ctree);
4874 + btree->tree[b] = NULL;
4875 + } else {
4876 + DP("gc: %s: branch %u %u not empty",
4877 + set->name, a, b);
4878 + i = 1;
4879 + j = k = 0;
4880 + }
4881 + LOOP_WALK_END;
4882 + if (i == 0) {
4883 + DP("gc: %s: branch %u empty",
4884 + set->name, a);
4885 + kmem_cache_free(branch_cachep, btree);
4886 + map->tree[a] = NULL;
4887 + } else {
4888 + DP("gc: %s: branch %u not empty",
4889 + set->name, a);
4890 + i = j = k = 0;
4891 + }
4892 + LOOP_WALK_END;
4893 + write_unlock_bh(&set->lock);
4894 +
4895 + map->gc.expires = jiffies + map->gc_interval * HZ;
4896 + add_timer(&map->gc);
4897 +}
4898 +
4899 +static inline void init_gc_timer(struct ip_set *set)
4900 +{
4901 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4902 +
4903 + /* Even if there is no timeout for the entries,
4904 + * we still have to call gc because delete
4905 + * do not clean up empty branches */
4906 + map->gc_interval = IPTREE_GC_TIME;
4907 + init_timer(&map->gc);
4908 + map->gc.data = (unsigned long) set;
4909 + map->gc.function = ip_tree_gc;
4910 + map->gc.expires = jiffies + map->gc_interval * HZ;
4911 + add_timer(&map->gc);
4912 +}
4913 +
4914 +static int create(struct ip_set *set, const void *data, size_t size)
4915 +{
4916 + struct ip_set_req_iptree_create *req =
4917 + (struct ip_set_req_iptree_create *) data;
4918 + struct ip_set_iptree *map;
4919 +
4920 + if (size != sizeof(struct ip_set_req_iptree_create)) {
4921 + ip_set_printk("data length wrong (want %zu, have %zu)",
4922 + sizeof(struct ip_set_req_iptree_create),
4923 + size);
4924 + return -EINVAL;
4925 + }
4926 +
4927 + map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
4928 + if (!map) {
4929 + DP("out of memory for %d bytes",
4930 + sizeof(struct ip_set_iptree));
4931 + return -ENOMEM;
4932 + }
4933 + memset(map, 0, sizeof(*map));
4934 + map->timeout = req->timeout;
4935 + map->elements = 0;
4936 + set->data = map;
4937 +
4938 + init_gc_timer(set);
4939 +
4940 + return 0;
4941 +}
4942 +
4943 +static void __flush(struct ip_set_iptree *map)
4944 +{
4945 + struct ip_set_iptreeb *btree;
4946 + struct ip_set_iptreec *ctree;
4947 + struct ip_set_iptreed *dtree;
4948 + unsigned int a,b,c;
4949 +
4950 + LOOP_WALK_BEGIN(map, a, btree);
4951 + LOOP_WALK_BEGIN(btree, b, ctree);
4952 + LOOP_WALK_BEGIN(ctree, c, dtree);
4953 + kmem_cache_free(leaf_cachep, dtree);
4954 + LOOP_WALK_END;
4955 + kmem_cache_free(branch_cachep, ctree);
4956 + LOOP_WALK_END;
4957 + kmem_cache_free(branch_cachep, btree);
4958 + LOOP_WALK_END;
4959 + map->elements = 0;
4960 +}
4961 +
4962 +static void destroy(struct ip_set *set)
4963 +{
4964 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4965 +
4966 + /* gc might be running */
4967 + while (!del_timer(&map->gc))
4968 + msleep(IPTREE_DESTROY_SLEEP);
4969 + __flush(map);
4970 + kfree(map);
4971 + set->data = NULL;
4972 +}
4973 +
4974 +static void flush(struct ip_set *set)
4975 +{
4976 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4977 + unsigned int timeout = map->timeout;
4978 +
4979 + /* gc might be running */
4980 + while (!del_timer(&map->gc))
4981 + msleep(IPTREE_DESTROY_SLEEP);
4982 + __flush(map);
4983 + memset(map, 0, sizeof(*map));
4984 + map->timeout = timeout;
4985 +
4986 + init_gc_timer(set);
4987 +}
4988 +
4989 +static void list_header(const struct ip_set *set, void *data)
4990 +{
4991 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
4992 + struct ip_set_req_iptree_create *header =
4993 + (struct ip_set_req_iptree_create *) data;
4994 +
4995 + header->timeout = map->timeout;
4996 +}
4997 +
4998 +static int list_members_size(const struct ip_set *set)
4999 +{
5000 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5001 + struct ip_set_iptreeb *btree;
5002 + struct ip_set_iptreec *ctree;
5003 + struct ip_set_iptreed *dtree;
5004 + unsigned int a,b,c,d;
5005 + unsigned int count = 0;
5006 +
5007 + LOOP_WALK_BEGIN(map, a, btree);
5008 + LOOP_WALK_BEGIN(btree, b, ctree);
5009 + LOOP_WALK_BEGIN(ctree, c, dtree);
5010 + for (d = 0; d < 256; d++) {
5011 + if (dtree->expires[d]
5012 + && (!map->timeout || time_after(dtree->expires[d], jiffies)))
5013 + count++;
5014 + }
5015 + LOOP_WALK_END;
5016 + LOOP_WALK_END;
5017 + LOOP_WALK_END;
5018 +
5019 + DP("members %u", count);
5020 + return (count * sizeof(struct ip_set_req_iptree));
5021 +}
5022 +
5023 +static void list_members(const struct ip_set *set, void *data)
5024 +{
5025 + struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
5026 + struct ip_set_iptreeb *btree;
5027 + struct ip_set_iptreec *ctree;
5028 + struct ip_set_iptreed *dtree;
5029 + unsigned int a,b,c,d;
5030 + size_t offset = 0;
5031 + struct ip_set_req_iptree *entry;
5032 +
5033 + LOOP_WALK_BEGIN(map, a, btree);
5034 + LOOP_WALK_BEGIN(btree, b, ctree);
5035 + LOOP_WALK_BEGIN(ctree, c, dtree);
5036 + for (d = 0; d < 256; d++) {
5037 + if (dtree->expires[d]
5038 + && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
5039 + entry = (struct ip_set_req_iptree *)(data + offset);
5040 + entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
5041 + entry->timeout = !map->timeout ? 0
5042 + : (dtree->expires[d] - jiffies)/HZ;
5043 + offset += sizeof(struct ip_set_req_iptree);
5044 + }
5045 + }
5046 + LOOP_WALK_END;
5047 + LOOP_WALK_END;
5048 + LOOP_WALK_END;
5049 +}
5050 +
5051 +static struct ip_set_type ip_set_iptree = {
5052 + .typename = SETTYPE_NAME,
5053 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5054 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5055 + .create = &create,
5056 + .destroy = &destroy,
5057 + .flush = &flush,
5058 + .reqsize = sizeof(struct ip_set_req_iptree),
5059 + .addip = &addip,
5060 + .addip_kernel = &addip_kernel,
5061 + .delip = &delip,
5062 + .delip_kernel = &delip_kernel,
5063 + .testip = &testip,
5064 + .testip_kernel = &testip_kernel,
5065 + .header_size = sizeof(struct ip_set_req_iptree_create),
5066 + .list_header = &list_header,
5067 + .list_members_size = &list_members_size,
5068 + .list_members = &list_members,
5069 + .me = THIS_MODULE,
5070 +};
5071 +
5072 +MODULE_LICENSE("GPL");
5073 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
5074 +MODULE_DESCRIPTION("iptree type of IP sets");
5075 +module_param(limit, int, 0600);
5076 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
5077 +
5078 +static int __init ip_set_iptree_init(void)
5079 +{
5080 + int ret;
5081 +
5082 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5083 + branch_cachep = kmem_cache_create("ip_set_iptreeb",
5084 + sizeof(struct ip_set_iptreeb),
5085 + 0, 0, NULL);
5086 +#else
5087 + branch_cachep = kmem_cache_create("ip_set_iptreeb",
5088 + sizeof(struct ip_set_iptreeb),
5089 + 0, 0, NULL, NULL);
5090 +#endif
5091 + if (!branch_cachep) {
5092 + printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
5093 + ret = -ENOMEM;
5094 + goto out;
5095 + }
5096 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5097 + leaf_cachep = kmem_cache_create("ip_set_iptreed",
5098 + sizeof(struct ip_set_iptreed),
5099 + 0, 0, NULL);
5100 +#else
5101 + leaf_cachep = kmem_cache_create("ip_set_iptreed",
5102 + sizeof(struct ip_set_iptreed),
5103 + 0, 0, NULL, NULL);
5104 +#endif
5105 + if (!leaf_cachep) {
5106 + printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
5107 + ret = -ENOMEM;
5108 + goto free_branch;
5109 + }
5110 + ret = ip_set_register_set_type(&ip_set_iptree);
5111 + if (ret == 0)
5112 + goto out;
5113 +
5114 + kmem_cache_destroy(leaf_cachep);
5115 + free_branch:
5116 + kmem_cache_destroy(branch_cachep);
5117 + out:
5118 + return ret;
5119 +}
5120 +
5121 +static void __exit ip_set_iptree_fini(void)
5122 +{
5123 + /* FIXME: possible race with ip_set_create() */
5124 + ip_set_unregister_set_type(&ip_set_iptree);
5125 + kmem_cache_destroy(leaf_cachep);
5126 + kmem_cache_destroy(branch_cachep);
5127 +}
5128 +
5129 +module_init(ip_set_iptree_init);
5130 +module_exit(ip_set_iptree_fini);
5131 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_iptreemap.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iptreemap.c
5132 --- linux-2.6.23/net/ipv4/netfilter/ip_set_iptreemap.c 1970-01-01 01:00:00.000000000 +0100
5133 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_iptreemap.c 2007-10-12 11:52:37.000000000 +0200
5134 @@ -0,0 +1,829 @@
5135 +/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
5136 + *
5137 + * This program is free software; you can redistribute it and/or modify it
5138 + * under the terms of the GNU General Public License version 2 as published by
5139 + * the Free Software Foundation.
5140 + */
5141 +
5142 +/* This modules implements the iptreemap ipset type. It uses bitmaps to
5143 + * represent every single IPv4 address as a single bit. The bitmaps are managed
5144 + * in a tree structure, where the first three octets of an addresses are used
5145 + * as an index to find the bitmap and the last octet is used as the bit number.
5146 + */
5147 +
5148 +#include <linux/version.h>
5149 +#include <linux/module.h>
5150 +#include <linux/ip.h>
5151 +#include <linux/skbuff.h>
5152 +#include <linux/slab.h>
5153 +#include <linux/delay.h>
5154 +#include <linux/netfilter_ipv4/ip_tables.h>
5155 +#include <linux/netfilter_ipv4/ip_set.h>
5156 +#include <linux/errno.h>
5157 +#include <asm/uaccess.h>
5158 +#include <asm/bitops.h>
5159 +#include <linux/spinlock.h>
5160 +
5161 +#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
5162 +
5163 +#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
5164 +#define IPTREEMAP_DESTROY_SLEEP (100)
5165 +
5166 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
5167 +static struct kmem_cache *cachep_b;
5168 +static struct kmem_cache *cachep_c;
5169 +static struct kmem_cache *cachep_d;
5170 +#else
5171 +static kmem_cache_t *cachep_b;
5172 +static kmem_cache_t *cachep_c;
5173 +static kmem_cache_t *cachep_d;
5174 +#endif
5175 +
5176 +static struct ip_set_iptreemap_d *fullbitmap_d;
5177 +static struct ip_set_iptreemap_c *fullbitmap_c;
5178 +static struct ip_set_iptreemap_b *fullbitmap_b;
5179 +
5180 +#if defined(__LITTLE_ENDIAN)
5181 +#define ABCD(a, b, c, d, addr) \
5182 + do { \
5183 + a = ((unsigned char *)addr)[3]; \
5184 + b = ((unsigned char *)addr)[2]; \
5185 + c = ((unsigned char *)addr)[1]; \
5186 + d = ((unsigned char *)addr)[0]; \
5187 + } while (0)
5188 +#elif defined(__BIG_ENDIAN)
5189 +#define ABCD(a,b,c,d,addrp) do { \
5190 + a = ((unsigned char *)addrp)[0]; \
5191 + b = ((unsigned char *)addrp)[1]; \
5192 + c = ((unsigned char *)addrp)[2]; \
5193 + d = ((unsigned char *)addrp)[3]; \
5194 +} while (0)
5195 +#else
5196 +#error "Please fix asm/byteorder.h"
5197 +#endif /* __LITTLE_ENDIAN */
5198 +
5199 +#define TESTIP_WALK(map, elem, branch, full) \
5200 + do { \
5201 + branch = (map)->tree[elem]; \
5202 + if (!branch) \
5203 + return 0; \
5204 + else if (branch == full) \
5205 + return 1; \
5206 + } while (0)
5207 +
5208 +#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
5209 + do { \
5210 + branch = (map)->tree[elem]; \
5211 + if (!branch) { \
5212 + branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
5213 + if (!branch) \
5214 + return -ENOMEM; \
5215 + memset(branch, 0, sizeof(*branch)); \
5216 + (map)->tree[elem] = branch; \
5217 + } else if (branch == full) { \
5218 + return -EEXIST; \
5219 + } \
5220 + } while (0)
5221 +
5222 +#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
5223 + for (a = a1; a <= a2; a++) { \
5224 + branch = (map)->tree[a]; \
5225 + if (branch != full) { \
5226 + if ((a > a1 && a < a2) || (hint)) { \
5227 + if (branch) \
5228 + free(branch); \
5229 + (map)->tree[a] = full; \
5230 + continue; \
5231 + } else if (!branch) { \
5232 + branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
5233 + if (!branch) \
5234 + return -ENOMEM; \
5235 + memset(branch, 0, sizeof(*branch)); \
5236 + (map)->tree[a] = branch; \
5237 + }
5238 +
5239 +#define ADDIP_RANGE_LOOP_END() \
5240 + } \
5241 + }
5242 +
5243 +#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
5244 + do { \
5245 + branch = (map)->tree[elem]; \
5246 + if (!branch) { \
5247 + return -EEXIST; \
5248 + } else if (branch == full) { \
5249 + branch = kmem_cache_alloc(cachep, flags); \
5250 + if (!branch) \
5251 + return -ENOMEM; \
5252 + memcpy(branch, full, sizeof(*full)); \
5253 + (map)->tree[elem] = branch; \
5254 + } \
5255 + } while (0)
5256 +
5257 +#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
5258 + for (a = a1; a <= a2; a++) { \
5259 + branch = (map)->tree[a]; \
5260 + if (branch) { \
5261 + if ((a > a1 && a < a2) || (hint)) { \
5262 + if (branch != full) \
5263 + free(branch); \
5264 + (map)->tree[a] = NULL; \
5265 + continue; \
5266 + } else if (branch == full) { \
5267 + branch = kmem_cache_alloc(cachep, flags); \
5268 + if (!branch) \
5269 + return -ENOMEM; \
5270 + memcpy(branch, full, sizeof(*branch)); \
5271 + (map)->tree[a] = branch; \
5272 + }
5273 +
5274 +#define DELIP_RANGE_LOOP_END() \
5275 + } \
5276 + }
5277 +
5278 +#define LOOP_WALK_BEGIN(map, i, branch) \
5279 + for (i = 0; i < 256; i++) { \
5280 + branch = (map)->tree[i]; \
5281 + if (likely(!branch)) \
5282 + continue;
5283 +
5284 +#define LOOP_WALK_END() \
5285 + }
5286 +
5287 +#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
5288 + count = -256; \
5289 + for (i = 0; i < 256; i++) { \
5290 + branch = (map)->tree[i]; \
5291 + if (likely(!branch)) \
5292 + continue; \
5293 + count++; \
5294 + if (branch == full) { \
5295 + count++; \
5296 + continue; \
5297 + }
5298 +
5299 +#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
5300 + if (-256 == count) { \
5301 + kmem_cache_free(cachep, branch); \
5302 + (map)->tree[i] = NULL; \
5303 + } else if (256 == count) { \
5304 + kmem_cache_free(cachep, branch); \
5305 + (map)->tree[i] = full; \
5306 + } \
5307 + }
5308 +
5309 +#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
5310 + for (i = 0; i < 256; i++) { \
5311 + if (!(map)->tree[i]) { \
5312 + if (inrange) { \
5313 + count++; \
5314 + inrange = 0; \
5315 + } \
5316 + continue; \
5317 + } \
5318 + branch = (map)->tree[i];
5319 +
5320 +#define LOOP_WALK_END_COUNT() \
5321 + }
5322 +
5323 +#define MIN(a, b) (a < b ? a : b)
5324 +#define MAX(a, b) (a > b ? a : b)
5325 +
5326 +#define GETVALUE1(a, a1, b1, r) \
5327 + (a == a1 ? b1 : r)
5328 +
5329 +#define GETVALUE2(a, b, a1, b1, c1, r) \
5330 + (a == a1 && b == b1 ? c1 : r)
5331 +
5332 +#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
5333 + (a == a1 && b == b1 && c == c1 ? d1 : r)
5334 +
5335 +#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
5336 + ( \
5337 + GETVALUE1(a, a1, b1, 0) == 0 \
5338 + && GETVALUE1(a, a2, b2, 255) == 255 \
5339 + && c1 == 0 \
5340 + && c2 == 255 \
5341 + && d1 == 0 \
5342 + && d2 == 255 \
5343 + )
5344 +
5345 +#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
5346 + ( \
5347 + GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
5348 + && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
5349 + && d1 == 0 \
5350 + && d2 == 255 \
5351 + )
5352 +
5353 +#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
5354 + ( \
5355 + GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
5356 + && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
5357 + )
5358 +
5359 +
5360 +static inline void
5361 +free_d(struct ip_set_iptreemap_d *map)
5362 +{
5363 + kmem_cache_free(cachep_d, map);
5364 +}
5365 +
5366 +static inline void
5367 +free_c(struct ip_set_iptreemap_c *map)
5368 +{
5369 + struct ip_set_iptreemap_d *dtree;
5370 + unsigned int i;
5371 +
5372 + LOOP_WALK_BEGIN(map, i, dtree) {
5373 + if (dtree != fullbitmap_d)
5374 + free_d(dtree);
5375 + } LOOP_WALK_END();
5376 +
5377 + kmem_cache_free(cachep_c, map);
5378 +}
5379 +
5380 +static inline void
5381 +free_b(struct ip_set_iptreemap_b *map)
5382 +{
5383 + struct ip_set_iptreemap_c *ctree;
5384 + unsigned int i;
5385 +
5386 + LOOP_WALK_BEGIN(map, i, ctree) {
5387 + if (ctree != fullbitmap_c)
5388 + free_c(ctree);
5389 + } LOOP_WALK_END();
5390 +
5391 + kmem_cache_free(cachep_b, map);
5392 +}
5393 +
5394 +static inline int
5395 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5396 +{
5397 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5398 + struct ip_set_iptreemap_b *btree;
5399 + struct ip_set_iptreemap_c *ctree;
5400 + struct ip_set_iptreemap_d *dtree;
5401 + unsigned char a, b, c, d;
5402 +
5403 + *hash_ip = ip;
5404 +
5405 + ABCD(a, b, c, d, hash_ip);
5406 +
5407 + TESTIP_WALK(map, a, btree, fullbitmap_b);
5408 + TESTIP_WALK(btree, b, ctree, fullbitmap_c);
5409 + TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
5410 +
5411 + return !!test_bit(d, (void *) dtree->bitmap);
5412 +}
5413 +
5414 +static int
5415 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5416 +{
5417 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5418 +
5419 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5420 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5421 + return -EINVAL;
5422 + }
5423 +
5424 + return __testip(set, req->start, hash_ip);
5425 +}
5426 +
5427 +static int
5428 +testip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5429 +{
5430 + int res;
5431 +
5432 + res = __testip(set,
5433 + ntohl(flags[index] & IPSET_SRC
5434 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5435 + ? ip_hdr(skb)->saddr
5436 + : ip_hdr(skb)->daddr),
5437 +#else
5438 + ? skb->nh.iph->saddr
5439 + : skb->nh.iph->daddr),
5440 +#endif
5441 + hash_ip);
5442 +
5443 + return (res < 0 ? 0 : res);
5444 +}
5445 +
5446 +static inline int
5447 +__addip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
5448 +{
5449 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5450 + struct ip_set_iptreemap_b *btree;
5451 + struct ip_set_iptreemap_c *ctree;
5452 + struct ip_set_iptreemap_d *dtree;
5453 + unsigned char a, b, c, d;
5454 +
5455 + *hash_ip = ip;
5456 +
5457 + ABCD(a, b, c, d, hash_ip);
5458 +
5459 + ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
5460 + ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
5461 + ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
5462 +
5463 + if (test_and_set_bit(d, (void *) dtree->bitmap))
5464 + return -EEXIST;
5465 +
5466 + set_bit(b, (void *) btree->dirty);
5467 +
5468 + return 0;
5469 +}
5470 +
5471 +static inline int
5472 +__addip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip)
5473 +{
5474 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5475 + struct ip_set_iptreemap_b *btree;
5476 + struct ip_set_iptreemap_c *ctree;
5477 + struct ip_set_iptreemap_d *dtree;
5478 + unsigned int a, b, c, d;
5479 + unsigned char a1, b1, c1, d1;
5480 + unsigned char a2, b2, c2, d2;
5481 +
5482 + if (start == end)
5483 + return __addip_single(set, start, hash_ip);
5484 +
5485 + *hash_ip = start;
5486 +
5487 + ABCD(a1, b1, c1, d1, &start);
5488 + ABCD(a2, b2, c2, d2, &end);
5489 +
5490 + /* This is sooo ugly... */
5491 + ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
5492 + ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
5493 + ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
5494 + for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5495 + set_bit(d, (void *) dtree->bitmap);
5496 + set_bit(b, (void *) btree->dirty);
5497 + } ADDIP_RANGE_LOOP_END();
5498 + } ADDIP_RANGE_LOOP_END();
5499 + } ADDIP_RANGE_LOOP_END();
5500 +
5501 + return 0;
5502 +}
5503 +
5504 +static int
5505 +addip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5506 +{
5507 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5508 +
5509 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5510 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5511 + return -EINVAL;
5512 + }
5513 +
5514 + return __addip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip);
5515 +}
5516 +
5517 +static int
5518 +addip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5519 +{
5520 +
5521 + return __addip_single(set,
5522 + ntohl(flags[index] & IPSET_SRC
5523 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5524 + ? ip_hdr(skb)->saddr
5525 + : ip_hdr(skb)->daddr),
5526 +#else
5527 + ? skb->nh.iph->saddr
5528 + : skb->nh.iph->daddr),
5529 +#endif
5530 + hash_ip);
5531 +}
5532 +
5533 +static inline int
5534 +__delip_single(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
5535 +{
5536 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5537 + struct ip_set_iptreemap_b *btree;
5538 + struct ip_set_iptreemap_c *ctree;
5539 + struct ip_set_iptreemap_d *dtree;
5540 + unsigned char a,b,c,d;
5541 +
5542 + *hash_ip = ip;
5543 +
5544 + ABCD(a, b, c, d, hash_ip);
5545 +
5546 + DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
5547 + DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
5548 + DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
5549 +
5550 + if (!test_and_clear_bit(d, (void *) dtree->bitmap))
5551 + return -EEXIST;
5552 +
5553 + set_bit(b, (void *) btree->dirty);
5554 +
5555 + return 0;
5556 +}
5557 +
5558 +static inline int
5559 +__delip_range(struct ip_set *set, ip_set_ip_t start, ip_set_ip_t end, ip_set_ip_t *hash_ip, unsigned int __nocast flags)
5560 +{
5561 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5562 + struct ip_set_iptreemap_b *btree;
5563 + struct ip_set_iptreemap_c *ctree;
5564 + struct ip_set_iptreemap_d *dtree;
5565 + unsigned int a, b, c, d;
5566 + unsigned char a1, b1, c1, d1;
5567 + unsigned char a2, b2, c2, d2;
5568 +
5569 + if (start == end)
5570 + return __delip_single(set, start, hash_ip, flags);
5571 +
5572 + *hash_ip = start;
5573 +
5574 + ABCD(a1, b1, c1, d1, &start);
5575 + ABCD(a2, b2, c2, d2, &end);
5576 +
5577 + /* This is sooo ugly... */
5578 + DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
5579 + DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
5580 + DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
5581 + for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
5582 + clear_bit(d, (void *) dtree->bitmap);
5583 + set_bit(b, (void *) btree->dirty);
5584 + } DELIP_RANGE_LOOP_END();
5585 + } DELIP_RANGE_LOOP_END();
5586 + } DELIP_RANGE_LOOP_END();
5587 +
5588 + return 0;
5589 +}
5590 +
5591 +static int
5592 +delip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5593 +{
5594 + struct ip_set_req_iptreemap *req = (struct ip_set_req_iptreemap *) data;
5595 +
5596 + if (size != sizeof(struct ip_set_req_iptreemap)) {
5597 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap), size);
5598 + return -EINVAL;
5599 + }
5600 +
5601 + return __delip_range(set, MIN(req->start, req->end), MAX(req->start, req->end), hash_ip, GFP_KERNEL);
5602 +}
5603 +
5604 +static int
5605 +delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
5606 +{
5607 + return __delip_single(set,
5608 + ntohl(flags[index] & IPSET_SRC
5609 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
5610 + ? ip_hdr(skb)->saddr
5611 + : ip_hdr(skb)->daddr),
5612 +#else
5613 + ? skb->nh.iph->saddr
5614 + : skb->nh.iph->daddr),
5615 +#endif
5616 + hash_ip,
5617 + GFP_ATOMIC);
5618 +}
5619 +
5620 +/* Check the status of the bitmap
5621 + * -1 == all bits cleared
5622 + * 1 == all bits set
5623 + * 0 == anything else
5624 + */
5625 +static inline int
5626 +bitmap_status(struct ip_set_iptreemap_d *dtree)
5627 +{
5628 + unsigned char first = dtree->bitmap[0];
5629 + int a;
5630 +
5631 + for (a = 1; a < 32; a++)
5632 + if (dtree->bitmap[a] != first)
5633 + return 0;
5634 +
5635 + return (first == 0 ? -1 : (first == 255 ? 1 : 0));
5636 +}
5637 +
5638 +static void
5639 +gc(unsigned long addr)
5640 +{
5641 + struct ip_set *set = (struct ip_set *) addr;
5642 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5643 + struct ip_set_iptreemap_b *btree;
5644 + struct ip_set_iptreemap_c *ctree;
5645 + struct ip_set_iptreemap_d *dtree;
5646 + unsigned int a, b, c;
5647 + int i, j, k;
5648 +
5649 + write_lock_bh(&set->lock);
5650 +
5651 + LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
5652 + LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
5653 + if (!test_and_clear_bit(b, (void *) btree->dirty))
5654 + continue;
5655 + LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
5656 + switch (bitmap_status(dtree)) {
5657 + case -1:
5658 + kmem_cache_free(cachep_d, dtree);
5659 + ctree->tree[c] = NULL;
5660 + k--;
5661 + break;
5662 + case 1:
5663 + kmem_cache_free(cachep_d, dtree);
5664 + ctree->tree[c] = fullbitmap_d;
5665 + k++;
5666 + break;
5667 + }
5668 + } LOOP_WALK_END();
5669 + } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
5670 + } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
5671 +
5672 + write_unlock_bh(&set->lock);
5673 +
5674 + map->gc.expires = jiffies + map->gc_interval * HZ;
5675 + add_timer(&map->gc);
5676 +}
5677 +
5678 +static inline void
5679 +init_gc_timer(struct ip_set *set)
5680 +{
5681 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5682 +
5683 + init_timer(&map->gc);
5684 + map->gc.data = (unsigned long) set;
5685 + map->gc.function = gc;
5686 + map->gc.expires = jiffies + map->gc_interval * HZ;
5687 + add_timer(&map->gc);
5688 +}
5689 +
5690 +static int create(struct ip_set *set, const void *data, size_t size)
5691 +{
5692 + struct ip_set_req_iptreemap_create *req = (struct ip_set_req_iptreemap_create *) data;
5693 + struct ip_set_iptreemap *map;
5694 +
5695 + if (size != sizeof(struct ip_set_req_iptreemap_create)) {
5696 + ip_set_printk("data length wrong (want %zu, have %zu)", sizeof(struct ip_set_req_iptreemap_create), size);
5697 + return -EINVAL;
5698 + }
5699 +
5700 + map = kzalloc(sizeof(*map), GFP_KERNEL);
5701 + if (!map)
5702 + return -ENOMEM;
5703 +
5704 + map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
5705 + set->data = map;
5706 +
5707 + init_gc_timer(set);
5708 +
5709 + return 0;
5710 +}
5711 +
5712 +static inline void __flush(struct ip_set_iptreemap *map)
5713 +{
5714 + struct ip_set_iptreemap_b *btree;
5715 + unsigned int a;
5716 +
5717 + LOOP_WALK_BEGIN(map, a, btree);
5718 + if (btree != fullbitmap_b)
5719 + free_b(btree);
5720 + LOOP_WALK_END();
5721 +}
5722 +
5723 +static void destroy(struct ip_set *set)
5724 +{
5725 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5726 +
5727 + while (!del_timer(&map->gc))
5728 + msleep(IPTREEMAP_DESTROY_SLEEP);
5729 +
5730 + __flush(map);
5731 + kfree(map);
5732 +
5733 + set->data = NULL;
5734 +}
5735 +
5736 +static void flush(struct ip_set *set)
5737 +{
5738 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5739 +
5740 + while (!del_timer(&map->gc))
5741 + msleep(IPTREEMAP_DESTROY_SLEEP);
5742 +
5743 + __flush(map);
5744 +
5745 + memset(map, 0, sizeof(*map));
5746 +
5747 + init_gc_timer(set);
5748 +}
5749 +
5750 +static void list_header(const struct ip_set *set, void *data)
5751 +{
5752 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5753 + struct ip_set_req_iptreemap_create *header = (struct ip_set_req_iptreemap_create *) data;
5754 +
5755 + header->gc_interval = map->gc_interval;
5756 +}
5757 +
5758 +static int list_members_size(const struct ip_set *set)
5759 +{
5760 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5761 + struct ip_set_iptreemap_b *btree;
5762 + struct ip_set_iptreemap_c *ctree;
5763 + struct ip_set_iptreemap_d *dtree;
5764 + unsigned int a, b, c, d, inrange = 0, count = 0;
5765 +
5766 + LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
5767 + LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
5768 + LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
5769 + for (d = 0; d < 256; d++) {
5770 + if (test_bit(d, (void *) dtree->bitmap)) {
5771 + inrange = 1;
5772 + } else if (inrange) {
5773 + count++;
5774 + inrange = 0;
5775 + }
5776 + }
5777 + } LOOP_WALK_END_COUNT();
5778 + } LOOP_WALK_END_COUNT();
5779 + } LOOP_WALK_END_COUNT();
5780 +
5781 + if (inrange)
5782 + count++;
5783 +
5784 + return (count * sizeof(struct ip_set_req_iptreemap));
5785 +}
5786 +
5787 +static inline size_t add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
5788 +{
5789 + struct ip_set_req_iptreemap *entry = (struct ip_set_req_iptreemap *) (data + offset);
5790 +
5791 + entry->start = start;
5792 + entry->end = end;
5793 +
5794 + return sizeof(*entry);
5795 +}
5796 +
5797 +static void list_members(const struct ip_set *set, void *data)
5798 +{
5799 + struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
5800 + struct ip_set_iptreemap_b *btree;
5801 + struct ip_set_iptreemap_c *ctree;
5802 + struct ip_set_iptreemap_d *dtree;
5803 + unsigned int a, b, c, d, inrange = 0;
5804 + size_t offset = 0;
5805 + ip_set_ip_t start = 0, end = 0, ip;
5806 +
5807 + LOOP_WALK_BEGIN(map, a, btree) {
5808 + LOOP_WALK_BEGIN(btree, b, ctree) {
5809 + LOOP_WALK_BEGIN(ctree, c, dtree) {
5810 + for (d = 0; d < 256; d++) {
5811 + if (test_bit(d, (void *) dtree->bitmap)) {
5812 + ip = ((a << 24) | (b << 16) | (c << 8) | d);
5813 + if (!inrange) {
5814 + inrange = 1;
5815 + start = ip;
5816 + } else if (end < ip - 1) {
5817 + offset += add_member(data, offset, start, end);
5818 + start = ip;
5819 + }
5820 + end = ip;
5821 + } else if (inrange) {
5822 + offset += add_member(data, offset, start, end);
5823 + inrange = 0;
5824 + }
5825 + }
5826 + } LOOP_WALK_END();
5827 + } LOOP_WALK_END();
5828 + } LOOP_WALK_END();
5829 +
5830 + if (inrange)
5831 + add_member(data, offset, start, end);
5832 +}
5833 +
5834 +static struct ip_set_type ip_set_iptreemap = {
5835 + .typename = SETTYPE_NAME,
5836 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
5837 + .protocol_version = IP_SET_PROTOCOL_VERSION,
5838 + .create = create,
5839 + .destroy = destroy,
5840 + .flush = flush,
5841 + .reqsize = sizeof(struct ip_set_req_iptreemap),
5842 + .addip = addip,
5843 + .addip_kernel = addip_kernel,
5844 + .delip = delip,
5845 + .delip_kernel = delip_kernel,
5846 + .testip = testip,
5847 + .testip_kernel = testip_kernel,
5848 + .header_size = sizeof(struct ip_set_req_iptreemap_create),
5849 + .list_header = list_header,
5850 + .list_members_size = list_members_size,
5851 + .list_members = list_members,
5852 + .me = THIS_MODULE,
5853 +};
5854 +
5855 +MODULE_LICENSE("GPL");
5856 +MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
5857 +MODULE_DESCRIPTION("iptreemap type of IP sets");
5858 +
5859 +static int __init ip_set_iptreemap_init(void)
5860 +{
5861 + int ret = -ENOMEM;
5862 + int a;
5863 +
5864 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5865 + cachep_b = kmem_cache_create("ip_set_iptreemap_b",
5866 + sizeof(struct ip_set_iptreemap_b),
5867 + 0, 0, NULL);
5868 +#else
5869 + cachep_b = kmem_cache_create("ip_set_iptreemap_b",
5870 + sizeof(struct ip_set_iptreemap_b),
5871 + 0, 0, NULL, NULL);
5872 +#endif
5873 + if (!cachep_b) {
5874 + ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
5875 + goto out;
5876 + }
5877 +
5878 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5879 + cachep_c = kmem_cache_create("ip_set_iptreemap_c",
5880 + sizeof(struct ip_set_iptreemap_c),
5881 + 0, 0, NULL);
5882 +#else
5883 + cachep_c = kmem_cache_create("ip_set_iptreemap_c",
5884 + sizeof(struct ip_set_iptreemap_c),
5885 + 0, 0, NULL, NULL);
5886 +#endif
5887 + if (!cachep_c) {
5888 + ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
5889 + goto outb;
5890 + }
5891 +
5892 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
5893 + cachep_d = kmem_cache_create("ip_set_iptreemap_d",
5894 + sizeof(struct ip_set_iptreemap_d),
5895 + 0, 0, NULL);
5896 +#else
5897 + cachep_d = kmem_cache_create("ip_set_iptreemap_d",
5898 + sizeof(struct ip_set_iptreemap_d),
5899 + 0, 0, NULL, NULL);
5900 +#endif
5901 + if (!cachep_d) {
5902 + ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
5903 + goto outc;
5904 + }
5905 +
5906 + fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
5907 + if (!fullbitmap_d)
5908 + goto outd;
5909 +
5910 + fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
5911 + if (!fullbitmap_c)
5912 + goto outbitmapd;
5913 +
5914 + fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
5915 + if (!fullbitmap_b)
5916 + goto outbitmapc;
5917 +
5918 + ret = ip_set_register_set_type(&ip_set_iptreemap);
5919 + if (0 > ret)
5920 + goto outbitmapb;
5921 +
5922 + /* Now init our global bitmaps */
5923 + memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
5924 +
5925 + for (a = 0; a < 256; a++)
5926 + fullbitmap_c->tree[a] = fullbitmap_d;
5927 +
5928 + for (a = 0; a < 256; a++)
5929 + fullbitmap_b->tree[a] = fullbitmap_c;
5930 + memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
5931 +
5932 + return 0;
5933 +
5934 +outbitmapb:
5935 + kmem_cache_free(cachep_b, fullbitmap_b);
5936 +outbitmapc:
5937 + kmem_cache_free(cachep_c, fullbitmap_c);
5938 +outbitmapd:
5939 + kmem_cache_free(cachep_d, fullbitmap_d);
5940 +outd:
5941 + kmem_cache_destroy(cachep_d);
5942 +outc:
5943 + kmem_cache_destroy(cachep_c);
5944 +outb:
5945 + kmem_cache_destroy(cachep_b);
5946 +out:
5947 +
5948 + return ret;
5949 +}
5950 +
5951 +static void __exit ip_set_iptreemap_fini(void)
5952 +{
5953 + ip_set_unregister_set_type(&ip_set_iptreemap);
5954 + kmem_cache_free(cachep_d, fullbitmap_d);
5955 + kmem_cache_free(cachep_c, fullbitmap_c);
5956 + kmem_cache_free(cachep_b, fullbitmap_b);
5957 + kmem_cache_destroy(cachep_d);
5958 + kmem_cache_destroy(cachep_c);
5959 + kmem_cache_destroy(cachep_b);
5960 +}
5961 +
5962 +module_init(ip_set_iptreemap_init);
5963 +module_exit(ip_set_iptreemap_fini);
5964 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_macipmap.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_macipmap.c
5965 --- linux-2.6.23/net/ipv4/netfilter/ip_set_macipmap.c 1970-01-01 01:00:00.000000000 +0100
5966 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_macipmap.c 2007-10-12 11:52:37.000000000 +0200
5967 @@ -0,0 +1,375 @@
5968 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
5969 + * Patrick Schaaf <bof@bof.de>
5970 + * Martin Josefsson <gandalf@wlug.westbo.se>
5971 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5972 + *
5973 + * This program is free software; you can redistribute it and/or modify
5974 + * it under the terms of the GNU General Public License version 2 as
5975 + * published by the Free Software Foundation.
5976 + */
5977 +
5978 +/* Kernel module implementing an IP set type: the macipmap type */
5979 +
5980 +#include <linux/module.h>
5981 +#include <linux/ip.h>
5982 +#include <linux/skbuff.h>
5983 +#include <linux/version.h>
5984 +#include <linux/netfilter_ipv4/ip_tables.h>
5985 +#include <linux/netfilter_ipv4/ip_set.h>
5986 +#include <linux/errno.h>
5987 +#include <asm/uaccess.h>
5988 +#include <asm/bitops.h>
5989 +#include <linux/spinlock.h>
5990 +#include <linux/if_ether.h>
5991 +#include <linux/vmalloc.h>
5992 +
5993 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
5994 +#include <linux/netfilter_ipv4/ip_set_macipmap.h>
5995 +
5996 +static int
5997 +testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
5998 +{
5999 + struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
6000 + struct ip_set_macip *table = (struct ip_set_macip *) map->members;
6001 + struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
6002 +
6003 + if (size != sizeof(struct ip_set_req_macipmap)) {
6004 + ip_set_printk("data length wrong (want %zu, have %zu)",
6005 + sizeof(struct ip_set_req_macipmap),
6006 + size);
6007 + return -EINVAL;
6008 + }
6009 +
6010 + if (req->ip < map->first_ip || req->ip > map->last_ip)
6011 + return -ERANGE;
6012 +
6013 + *hash_ip = req->ip;
6014 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6015 + set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
6016 + if (test_bit(IPSET_MACIP_ISSET,
6017 + (void *) &table[req->ip - map->first_ip].flags)) {
6018 + return (memcmp(req->ethernet,
6019 + &table[req->ip - map->first_ip].ethernet,
6020 + ETH_ALEN) == 0);
6021 + } else {
6022 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6023 + }
6024 +}
6025 +
6026 +static int
6027 +testip_kernel(struct ip_set *set,
6028 + const struct sk_buff *skb,
6029 + ip_set_ip_t *hash_ip,
6030 + const u_int32_t *flags,
6031 + unsigned char index)
6032 +{
6033 + struct ip_set_macipmap *map =
6034 + (struct ip_set_macipmap *) set->data;
6035 + struct ip_set_macip *table =
6036 + (struct ip_set_macip *) map->members;
6037 + ip_set_ip_t ip;
6038 +
6039 + ip = ntohl(flags[index] & IPSET_SRC
6040 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6041 + ? ip_hdr(skb)->saddr
6042 + : ip_hdr(skb)->daddr);
6043 +#else
6044 + ? skb->nh.iph->saddr
6045 + : skb->nh.iph->daddr);
6046 +#endif
6047 +
6048 + if (ip < map->first_ip || ip > map->last_ip)
6049 + return 0;
6050 +
6051 + *hash_ip = ip;
6052 + DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
6053 + set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
6054 + if (test_bit(IPSET_MACIP_ISSET,
6055 + (void *) &table[ip - map->first_ip].flags)) {
6056 + /* Is mac pointer valid?
6057 + * If so, compare... */
6058 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6059 + return (skb_mac_header(skb) >= skb->head
6060 + && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
6061 +#else
6062 + return (skb->mac.raw >= skb->head
6063 + && (skb->mac.raw + ETH_HLEN) <= skb->data
6064 +#endif
6065 + && (memcmp(eth_hdr(skb)->h_source,
6066 + &table[ip - map->first_ip].ethernet,
6067 + ETH_ALEN) == 0));
6068 + } else {
6069 + return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
6070 + }
6071 +}
6072 +
6073 +/* returns 0 on success */
6074 +static inline int
6075 +__addip(struct ip_set *set,
6076 + ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
6077 +{
6078 + struct ip_set_macipmap *map =
6079 + (struct ip_set_macipmap *) set->data;
6080 + struct ip_set_macip *table =
6081 + (struct ip_set_macip *) map->members;
6082 +
6083 + if (ip < map->first_ip || ip > map->last_ip)
6084 + return -ERANGE;
6085 + if (test_and_set_bit(IPSET_MACIP_ISSET,
6086 + (void *) &table[ip - map->first_ip].flags))
6087 + return -EEXIST;
6088 +
6089 + *hash_ip = ip;
6090 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6091 + memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
6092 + return 0;
6093 +}
6094 +
6095 +static int
6096 +addip(struct ip_set *set, const void *data, size_t size,
6097 + ip_set_ip_t *hash_ip)
6098 +{
6099 + struct ip_set_req_macipmap *req =
6100 + (struct ip_set_req_macipmap *) data;
6101 +
6102 + if (size != sizeof(struct ip_set_req_macipmap)) {
6103 + ip_set_printk("data length wrong (want %zu, have %zu)",
6104 + sizeof(struct ip_set_req_macipmap),
6105 + size);
6106 + return -EINVAL;
6107 + }
6108 + return __addip(set, req->ip, req->ethernet, hash_ip);
6109 +}
6110 +
6111 +static int
6112 +addip_kernel(struct ip_set *set,
6113 + const struct sk_buff *skb,
6114 + ip_set_ip_t *hash_ip,
6115 + const u_int32_t *flags,
6116 + unsigned char index)
6117 +{
6118 + ip_set_ip_t ip;
6119 +
6120 + ip = ntohl(flags[index] & IPSET_SRC
6121 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6122 + ? ip_hdr(skb)->saddr
6123 + : ip_hdr(skb)->daddr);
6124 +#else
6125 + ? skb->nh.iph->saddr
6126 + : skb->nh.iph->daddr);
6127 +#endif
6128 +
6129 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6130 + if (!(skb_mac_header(skb) >= skb->head
6131 + && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))
6132 +#else
6133 + if (!(skb->mac.raw >= skb->head
6134 + && (skb->mac.raw + ETH_HLEN) <= skb->data))
6135 +#endif
6136 + return -EINVAL;
6137 +
6138 + return __addip(set, ip, eth_hdr(skb)->h_source, hash_ip);
6139 +}
6140 +
6141 +static inline int
6142 +__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6143 +{
6144 + struct ip_set_macipmap *map =
6145 + (struct ip_set_macipmap *) set->data;
6146 + struct ip_set_macip *table =
6147 + (struct ip_set_macip *) map->members;
6148 +
6149 + if (ip < map->first_ip || ip > map->last_ip)
6150 + return -ERANGE;
6151 + if (!test_and_clear_bit(IPSET_MACIP_ISSET,
6152 + (void *)&table[ip - map->first_ip].flags))
6153 + return -EEXIST;
6154 +
6155 + *hash_ip = ip;
6156 + DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
6157 + return 0;
6158 +}
6159 +
6160 +static int
6161 +delip(struct ip_set *set, const void *data, size_t size,
6162 + ip_set_ip_t *hash_ip)
6163 +{
6164 + struct ip_set_req_macipmap *req =
6165 + (struct ip_set_req_macipmap *) data;
6166 +
6167 + if (size != sizeof(struct ip_set_req_macipmap)) {
6168 + ip_set_printk("data length wrong (want %zu, have %zu)",
6169 + sizeof(struct ip_set_req_macipmap),
6170 + size);
6171 + return -EINVAL;
6172 + }
6173 + return __delip(set, req->ip, hash_ip);
6174 +}
6175 +
6176 +static int
6177 +delip_kernel(struct ip_set *set,
6178 + const struct sk_buff *skb,
6179 + ip_set_ip_t *hash_ip,
6180 + const u_int32_t *flags,
6181 + unsigned char index)
6182 +{
6183 + return __delip(set,
6184 + ntohl(flags[index] & IPSET_SRC
6185 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6186 + ? ip_hdr(skb)->saddr
6187 + : ip_hdr(skb)->daddr),
6188 +#else
6189 + ? skb->nh.iph->saddr
6190 + : skb->nh.iph->daddr),
6191 +#endif
6192 + hash_ip);
6193 +}
6194 +
6195 +static inline size_t members_size(ip_set_id_t from, ip_set_id_t to)
6196 +{
6197 + return (size_t)((to - from + 1) * sizeof(struct ip_set_macip));
6198 +}
6199 +
6200 +static int create(struct ip_set *set, const void *data, size_t size)
6201 +{
6202 + int newbytes;
6203 + struct ip_set_req_macipmap_create *req =
6204 + (struct ip_set_req_macipmap_create *) data;
6205 + struct ip_set_macipmap *map;
6206 +
6207 + if (size != sizeof(struct ip_set_req_macipmap_create)) {
6208 + ip_set_printk("data length wrong (want %zu, have %zu)",
6209 + sizeof(struct ip_set_req_macipmap_create),
6210 + size);
6211 + return -EINVAL;
6212 + }
6213 +
6214 + DP("from %u.%u.%u.%u to %u.%u.%u.%u",
6215 + HIPQUAD(req->from), HIPQUAD(req->to));
6216 +
6217 + if (req->from > req->to) {
6218 + DP("bad ip range");
6219 + return -ENOEXEC;
6220 + }
6221 +
6222 + if (req->to - req->from > MAX_RANGE) {
6223 + ip_set_printk("range too big (max %d addresses)",
6224 + MAX_RANGE+1);
6225 + return -ENOEXEC;
6226 + }
6227 +
6228 + map = kmalloc(sizeof(struct ip_set_macipmap), GFP_KERNEL);
6229 + if (!map) {
6230 + DP("out of memory for %d bytes",
6231 + sizeof(struct ip_set_macipmap));
6232 + return -ENOMEM;
6233 + }
6234 + map->flags = req->flags;
6235 + map->first_ip = req->from;
6236 + map->last_ip = req->to;
6237 + newbytes = members_size(map->first_ip, map->last_ip);
6238 + map->members = ip_set_malloc(newbytes);
6239 + DP("members: %u %p", newbytes, map->members);
6240 + if (!map->members) {
6241 + DP("out of memory for %d bytes", newbytes);
6242 + kfree(map);
6243 + return -ENOMEM;
6244 + }
6245 + memset(map->members, 0, newbytes);
6246 +
6247 + set->data = map;
6248 + return 0;
6249 +}
6250 +
6251 +static void destroy(struct ip_set *set)
6252 +{
6253 + struct ip_set_macipmap *map =
6254 + (struct ip_set_macipmap *) set->data;
6255 +
6256 + ip_set_free(map->members, members_size(map->first_ip, map->last_ip));
6257 + kfree(map);
6258 +
6259 + set->data = NULL;
6260 +}
6261 +
6262 +static void flush(struct ip_set *set)
6263 +{
6264 + struct ip_set_macipmap *map =
6265 + (struct ip_set_macipmap *) set->data;
6266 + memset(map->members, 0, members_size(map->first_ip, map->last_ip));
6267 +}
6268 +
6269 +static void list_header(const struct ip_set *set, void *data)
6270 +{
6271 + struct ip_set_macipmap *map =
6272 + (struct ip_set_macipmap *) set->data;
6273 + struct ip_set_req_macipmap_create *header =
6274 + (struct ip_set_req_macipmap_create *) data;
6275 +
6276 + DP("list_header %x %x %u", map->first_ip, map->last_ip,
6277 + map->flags);
6278 +
6279 + header->from = map->first_ip;
6280 + header->to = map->last_ip;
6281 + header->flags = map->flags;
6282 +}
6283 +
6284 +static int list_members_size(const struct ip_set *set)
6285 +{
6286 + struct ip_set_macipmap *map =
6287 + (struct ip_set_macipmap *) set->data;
6288 +
6289 + DP("%u", members_size(map->first_ip, map->last_ip));
6290 + return members_size(map->first_ip, map->last_ip);
6291 +}
6292 +
6293 +static void list_members(const struct ip_set *set, void *data)
6294 +{
6295 + struct ip_set_macipmap *map =
6296 + (struct ip_set_macipmap *) set->data;
6297 +
6298 + int bytes = members_size(map->first_ip, map->last_ip);
6299 +
6300 + DP("members: %u %p", bytes, map->members);
6301 + memcpy(data, map->members, bytes);
6302 +}
6303 +
6304 +static struct ip_set_type ip_set_macipmap = {
6305 + .typename = SETTYPE_NAME,
6306 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
6307 + .protocol_version = IP_SET_PROTOCOL_VERSION,
6308 + .create = &create,
6309 + .destroy = &destroy,
6310 + .flush = &flush,
6311 + .reqsize = sizeof(struct ip_set_req_macipmap),
6312 + .addip = &addip,
6313 + .addip_kernel = &addip_kernel,
6314 + .delip = &delip,
6315 + .delip_kernel = &delip_kernel,
6316 + .testip = &testip,
6317 + .testip_kernel = &testip_kernel,
6318 + .header_size = sizeof(struct ip_set_req_macipmap_create),
6319 + .list_header = &list_header,
6320 + .list_members_size = &list_members_size,
6321 + .list_members = &list_members,
6322 + .me = THIS_MODULE,
6323 +};
6324 +
6325 +MODULE_LICENSE("GPL");
6326 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6327 +MODULE_DESCRIPTION("macipmap type of IP sets");
6328 +
6329 +static int __init ip_set_macipmap_init(void)
6330 +{
6331 + init_max_malloc_size();
6332 + return ip_set_register_set_type(&ip_set_macipmap);
6333 +}
6334 +
6335 +static void __exit ip_set_macipmap_fini(void)
6336 +{
6337 + /* FIXME: possible race with ip_set_create() */
6338 + ip_set_unregister_set_type(&ip_set_macipmap);
6339 +}
6340 +
6341 +module_init(ip_set_macipmap_init);
6342 +module_exit(ip_set_macipmap_fini);
6343 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_nethash.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_nethash.c
6344 --- linux-2.6.23/net/ipv4/netfilter/ip_set_nethash.c 1970-01-01 01:00:00.000000000 +0100
6345 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_nethash.c 2007-10-12 11:52:37.000000000 +0200
6346 @@ -0,0 +1,497 @@
6347 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6348 + *
6349 + * This program is free software; you can redistribute it and/or modify
6350 + * it under the terms of the GNU General Public License version 2 as
6351 + * published by the Free Software Foundation.
6352 + */
6353 +
6354 +/* Kernel module implementing a cidr nethash set */
6355 +
6356 +#include <linux/module.h>
6357 +#include <linux/ip.h>
6358 +#include <linux/skbuff.h>
6359 +#include <linux/version.h>
6360 +#include <linux/jhash.h>
6361 +#include <linux/netfilter_ipv4/ip_tables.h>
6362 +#include <linux/netfilter_ipv4/ip_set.h>
6363 +#include <linux/errno.h>
6364 +#include <asm/uaccess.h>
6365 +#include <asm/bitops.h>
6366 +#include <linux/spinlock.h>
6367 +#include <linux/vmalloc.h>
6368 +#include <linux/random.h>
6369 +
6370 +#include <net/ip.h>
6371 +
6372 +#include <linux/netfilter_ipv4/ip_set_malloc.h>
6373 +#include <linux/netfilter_ipv4/ip_set_nethash.h>
6374 +
6375 +static int limit = MAX_RANGE;
6376 +
6377 +static inline __u32
6378 +jhash_ip(const struct ip_set_nethash *map, uint16_t i, ip_set_ip_t ip)
6379 +{
6380 + return jhash_1word(ip, *(((uint32_t *) map->initval) + i));
6381 +}
6382 +
6383 +static inline __u32
6384 +hash_id_cidr(struct ip_set_nethash *map,
6385 + ip_set_ip_t ip,
6386 + unsigned char cidr,
6387 + ip_set_ip_t *hash_ip)
6388 +{
6389 + __u32 id;
6390 + u_int16_t i;
6391 + ip_set_ip_t *elem;
6392 +
6393 + *hash_ip = pack(ip, cidr);
6394 +
6395 + for (i = 0; i < map->probes; i++) {
6396 + id = jhash_ip(map, i, *hash_ip) % map->hashsize;
6397 + DP("hash key: %u", id);
6398 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6399 + if (*elem == *hash_ip)
6400 + return id;
6401 + }
6402 + return UINT_MAX;
6403 +}
6404 +
6405 +static inline __u32
6406 +hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6407 +{
6408 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6409 + __u32 id = UINT_MAX;
6410 + int i;
6411 +
6412 + for (i = 0; i < 30 && map->cidr[i]; i++) {
6413 + id = hash_id_cidr(map, ip, map->cidr[i], hash_ip);
6414 + if (id != UINT_MAX)
6415 + break;
6416 + }
6417 + return id;
6418 +}
6419 +
6420 +static inline int
6421 +__testip_cidr(struct ip_set *set, ip_set_ip_t ip, unsigned char cidr,
6422 + ip_set_ip_t *hash_ip)
6423 +{
6424 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6425 +
6426 + return (ip && hash_id_cidr(map, ip, cidr, hash_ip) != UINT_MAX);
6427 +}
6428 +
6429 +static inline int
6430 +__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
6431 +{
6432 + return (ip && hash_id(set, ip, hash_ip) != UINT_MAX);
6433 +}
6434 +
6435 +static int
6436 +testip(struct ip_set *set, const void *data, size_t size,
6437 + ip_set_ip_t *hash_ip)
6438 +{
6439 + struct ip_set_req_nethash *req =
6440 + (struct ip_set_req_nethash *) data;
6441 +
6442 + if (size != sizeof(struct ip_set_req_nethash)) {
6443 + ip_set_printk("data length wrong (want %zu, have %zu)",
6444 + sizeof(struct ip_set_req_nethash),
6445 + size);
6446 + return -EINVAL;
6447 + }
6448 + return (req->cidr == 32 ? __testip(set, req->ip, hash_ip)
6449 + : __testip_cidr(set, req->ip, req->cidr, hash_ip));
6450 +}
6451 +
6452 +static int
6453 +testip_kernel(struct ip_set *set,
6454 + const struct sk_buff *skb,
6455 + ip_set_ip_t *hash_ip,
6456 + const u_int32_t *flags,
6457 + unsigned char index)
6458 +{
6459 + return __testip(set,
6460 + ntohl(flags[index] & IPSET_SRC
6461 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6462 + ? ip_hdr(skb)->saddr
6463 + : ip_hdr(skb)->daddr),
6464 +#else
6465 + ? skb->nh.iph->saddr
6466 + : skb->nh.iph->daddr),
6467 +#endif
6468 + hash_ip);
6469 +}
6470 +
6471 +static inline int
6472 +__addip_base(struct ip_set_nethash *map, ip_set_ip_t ip)
6473 +{
6474 + __u32 probe;
6475 + u_int16_t i;
6476 + ip_set_ip_t *elem;
6477 +
6478 + for (i = 0; i < map->probes; i++) {
6479 + probe = jhash_ip(map, i, ip) % map->hashsize;
6480 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
6481 + if (*elem == ip)
6482 + return -EEXIST;
6483 + if (!*elem) {
6484 + *elem = ip;
6485 + map->elements++;
6486 + return 0;
6487 + }
6488 + }
6489 + /* Trigger rehashing */
6490 + return -EAGAIN;
6491 +}
6492 +
6493 +static inline int
6494 +__addip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
6495 + ip_set_ip_t *hash_ip)
6496 +{
6497 + if (!ip || map->elements >= limit)
6498 + return -ERANGE;
6499 +
6500 + *hash_ip = pack(ip, cidr);
6501 + DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
6502 +
6503 + return __addip_base(map, *hash_ip);
6504 +}
6505 +
6506 +static void
6507 +update_cidr_sizes(struct ip_set_nethash *map, unsigned char cidr)
6508 +{
6509 + unsigned char next;
6510 + int i;
6511 +
6512 + for (i = 0; i < 30 && map->cidr[i]; i++) {
6513 + if (map->cidr[i] == cidr) {
6514 + return;
6515 + } else if (map->cidr[i] < cidr) {
6516 + next = map->cidr[i];
6517 + map->cidr[i] = cidr;
6518 + cidr = next;
6519 + }
6520 + }
6521 + if (i < 30)
6522 + map->cidr[i] = cidr;
6523 +}
6524 +
6525 +static int
6526 +addip(struct ip_set *set, const void *data, size_t size,
6527 + ip_set_ip_t *hash_ip)
6528 +{
6529 + struct ip_set_req_nethash *req =
6530 + (struct ip_set_req_nethash *) data;
6531 + int ret;
6532 +
6533 + if (size != sizeof(struct ip_set_req_nethash)) {
6534 + ip_set_printk("data length wrong (want %zu, have %zu)",
6535 + sizeof(struct ip_set_req_nethash),
6536 + size);
6537 + return -EINVAL;
6538 + }
6539 + ret = __addip((struct ip_set_nethash *) set->data,
6540 + req->ip, req->cidr, hash_ip);
6541 +
6542 + if (ret == 0)
6543 + update_cidr_sizes((struct ip_set_nethash *) set->data,
6544 + req->cidr);
6545 +
6546 + return ret;
6547 +}
6548 +
6549 +static int
6550 +addip_kernel(struct ip_set *set,
6551 + const struct sk_buff *skb,
6552 + ip_set_ip_t *hash_ip,
6553 + const u_int32_t *flags,
6554 + unsigned char index)
6555 +{
6556 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6557 + int ret = -ERANGE;
6558 + ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
6559 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6560 + ? ip_hdr(skb)->saddr
6561 + : ip_hdr(skb)->daddr);
6562 +#else
6563 + ? skb->nh.iph->saddr
6564 + : skb->nh.iph->daddr);
6565 +#endif
6566 +
6567 + if (map->cidr[0])
6568 + ret = __addip(map, ip, map->cidr[0], hash_ip);
6569 +
6570 + return ret;
6571 +}
6572 +
6573 +static int retry(struct ip_set *set)
6574 +{
6575 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6576 + ip_set_ip_t *elem;
6577 + void *members;
6578 + u_int32_t i, hashsize = map->hashsize;
6579 + int res;
6580 + struct ip_set_nethash *tmp;
6581 +
6582 + if (map->resize == 0)
6583 + return -ERANGE;
6584 +
6585 + again:
6586 + res = 0;
6587 +
6588 + /* Calculate new parameters */
6589 + hashsize += (hashsize * map->resize)/100;
6590 + if (hashsize == map->hashsize)
6591 + hashsize++;
6592 +
6593 + ip_set_printk("rehashing of set %s triggered: "
6594 + "hashsize grows from %u to %u",
6595 + set->name, map->hashsize, hashsize);
6596 +
6597 + tmp = kmalloc(sizeof(struct ip_set_nethash)
6598 + + map->probes * sizeof(uint32_t), GFP_ATOMIC);
6599 + if (!tmp) {
6600 + DP("out of memory for %d bytes",
6601 + sizeof(struct ip_set_nethash)
6602 + + map->probes * sizeof(uint32_t));
6603 + return -ENOMEM;
6604 + }
6605 + tmp->members = harray_malloc(hashsize, sizeof(ip_set_ip_t), GFP_ATOMIC);
6606 + if (!tmp->members) {
6607 + DP("out of memory for %d bytes", hashsize * sizeof(ip_set_ip_t));
6608 + kfree(tmp);
6609 + return -ENOMEM;
6610 + }
6611 + tmp->hashsize = hashsize;
6612 + tmp->elements = 0;
6613 + tmp->probes = map->probes;
6614 + tmp->resize = map->resize;
6615 + memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
6616 + memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
6617 +
6618 + write_lock_bh(&set->lock);
6619 + map = (struct ip_set_nethash *) set->data; /* Play safe */
6620 + for (i = 0; i < map->hashsize && res == 0; i++) {
6621 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
6622 + if (*elem)
6623 + res = __addip_base(tmp, *elem);
6624 + }
6625 + if (res) {
6626 + /* Failure, try again */
6627 + write_unlock_bh(&set->lock);
6628 + harray_free(tmp->members);
6629 + kfree(tmp);
6630 + goto again;
6631 + }
6632 +
6633 + /* Success at resizing! */
6634 + members = map->members;
6635 +
6636 + map->hashsize = tmp->hashsize;
6637 + map->members = tmp->members;
6638 + write_unlock_bh(&set->lock);
6639 +
6640 + harray_free(members);
6641 + kfree(tmp);
6642 +
6643 + return 0;
6644 +}
6645 +
6646 +static inline int
6647 +__delip(struct ip_set_nethash *map, ip_set_ip_t ip, unsigned char cidr,
6648 + ip_set_ip_t *hash_ip)
6649 +{
6650 + ip_set_ip_t id, *elem;
6651 +
6652 + if (!ip)
6653 + return -ERANGE;
6654 +
6655 + id = hash_id_cidr(map, ip, cidr, hash_ip);
6656 + if (id == UINT_MAX)
6657 + return -EEXIST;
6658 +
6659 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
6660 + *elem = 0;
6661 + map->elements--;
6662 + return 0;
6663 +}
6664 +
6665 +static int
6666 +delip(struct ip_set *set, const void *data, size_t size,
6667 + ip_set_ip_t *hash_ip)
6668 +{
6669 + struct ip_set_req_nethash *req =
6670 + (struct ip_set_req_nethash *) data;
6671 +
6672 + if (size != sizeof(struct ip_set_req_nethash)) {
6673 + ip_set_printk("data length wrong (want %zu, have %zu)",
6674 + sizeof(struct ip_set_req_nethash),
6675 + size);
6676 + return -EINVAL;
6677 + }
6678 + /* TODO: no garbage collection in map->cidr */
6679 + return __delip((struct ip_set_nethash *) set->data,
6680 + req->ip, req->cidr, hash_ip);
6681 +}
6682 +
6683 +static int
6684 +delip_kernel(struct ip_set *set,
6685 + const struct sk_buff *skb,
6686 + ip_set_ip_t *hash_ip,
6687 + const u_int32_t *flags,
6688 + unsigned char index)
6689 +{
6690 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6691 + int ret = -ERANGE;
6692 + ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
6693 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6694 + ? ip_hdr(skb)->saddr
6695 + : ip_hdr(skb)->daddr);
6696 +#else
6697 + ? skb->nh.iph->saddr
6698 + : skb->nh.iph->daddr);
6699 +#endif
6700 +
6701 + if (map->cidr[0])
6702 + ret = __delip(map, ip, map->cidr[0], hash_ip);
6703 +
6704 + return ret;
6705 +}
6706 +
6707 +static int create(struct ip_set *set, const void *data, size_t size)
6708 +{
6709 + struct ip_set_req_nethash_create *req =
6710 + (struct ip_set_req_nethash_create *) data;
6711 + struct ip_set_nethash *map;
6712 + uint16_t i;
6713 +
6714 + if (size != sizeof(struct ip_set_req_nethash_create)) {
6715 + ip_set_printk("data length wrong (want %zu, have %zu)",
6716 + sizeof(struct ip_set_req_nethash_create),
6717 + size);
6718 + return -EINVAL;
6719 + }
6720 +
6721 + if (req->hashsize < 1) {
6722 + ip_set_printk("hashsize too small");
6723 + return -ENOEXEC;
6724 + }
6725 + if (req->probes < 1) {
6726 + ip_set_printk("probes too small");
6727 + return -ENOEXEC;
6728 + }
6729 +
6730 + map = kmalloc(sizeof(struct ip_set_nethash)
6731 + + req->probes * sizeof(uint32_t), GFP_KERNEL);
6732 + if (!map) {
6733 + DP("out of memory for %d bytes",
6734 + sizeof(struct ip_set_nethash)
6735 + + req->probes * sizeof(uint32_t));
6736 + return -ENOMEM;
6737 + }
6738 + for (i = 0; i < req->probes; i++)
6739 + get_random_bytes(((uint32_t *) map->initval)+i, 4);
6740 + map->elements = 0;
6741 + map->hashsize = req->hashsize;
6742 + map->probes = req->probes;
6743 + map->resize = req->resize;
6744 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
6745 + map->members = harray_malloc(map->hashsize, sizeof(ip_set_ip_t), GFP_KERNEL);
6746 + if (!map->members) {
6747 + DP("out of memory for %d bytes", map->hashsize * sizeof(ip_set_ip_t));
6748 + kfree(map);
6749 + return -ENOMEM;
6750 + }
6751 +
6752 + set->data = map;
6753 + return 0;
6754 +}
6755 +
6756 +static void destroy(struct ip_set *set)
6757 +{
6758 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6759 +
6760 + harray_free(map->members);
6761 + kfree(map);
6762 +
6763 + set->data = NULL;
6764 +}
6765 +
6766 +static void flush(struct ip_set *set)
6767 +{
6768 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6769 + harray_flush(map->members, map->hashsize, sizeof(ip_set_ip_t));
6770 + memset(map->cidr, 0, 30 * sizeof(unsigned char));
6771 + map->elements = 0;
6772 +}
6773 +
6774 +static void list_header(const struct ip_set *set, void *data)
6775 +{
6776 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6777 + struct ip_set_req_nethash_create *header =
6778 + (struct ip_set_req_nethash_create *) data;
6779 +
6780 + header->hashsize = map->hashsize;
6781 + header->probes = map->probes;
6782 + header->resize = map->resize;
6783 +}
6784 +
6785 +static int list_members_size(const struct ip_set *set)
6786 +{
6787 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6788 +
6789 + return (map->hashsize * sizeof(ip_set_ip_t));
6790 +}
6791 +
6792 +static void list_members(const struct ip_set *set, void *data)
6793 +{
6794 + struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
6795 + ip_set_ip_t i, *elem;
6796 +
6797 + for (i = 0; i < map->hashsize; i++) {
6798 + elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
6799 + ((ip_set_ip_t *)data)[i] = *elem;
6800 + }
6801 +}
6802 +
6803 +static struct ip_set_type ip_set_nethash = {
6804 + .typename = SETTYPE_NAME,
6805 + .features = IPSET_TYPE_IP | IPSET_DATA_SINGLE,
6806 + .protocol_version = IP_SET_PROTOCOL_VERSION,
6807 + .create = &create,
6808 + .destroy = &destroy,
6809 + .flush = &flush,
6810 + .reqsize = sizeof(struct ip_set_req_nethash),
6811 + .addip = &addip,
6812 + .addip_kernel = &addip_kernel,
6813 + .retry = &retry,
6814 + .delip = &delip,
6815 + .delip_kernel = &delip_kernel,
6816 + .testip = &testip,
6817 + .testip_kernel = &testip_kernel,
6818 + .header_size = sizeof(struct ip_set_req_nethash_create),
6819 + .list_header = &list_header,
6820 + .list_members_size = &list_members_size,
6821 + .list_members = &list_members,
6822 + .me = THIS_MODULE,
6823 +};
6824 +
6825 +MODULE_LICENSE("GPL");
6826 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
6827 +MODULE_DESCRIPTION("nethash type of IP sets");
6828 +module_param(limit, int, 0600);
6829 +MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
6830 +
6831 +static int __init ip_set_nethash_init(void)
6832 +{
6833 + return ip_set_register_set_type(&ip_set_nethash);
6834 +}
6835 +
6836 +static void __exit ip_set_nethash_fini(void)
6837 +{
6838 + /* FIXME: possible race with ip_set_create() */
6839 + ip_set_unregister_set_type(&ip_set_nethash);
6840 +}
6841 +
6842 +module_init(ip_set_nethash_init);
6843 +module_exit(ip_set_nethash_fini);
6844 diff -Nru linux-2.6.23/net/ipv4/netfilter/ip_set_portmap.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_portmap.c
6845 --- linux-2.6.23/net/ipv4/netfilter/ip_set_portmap.c 1970-01-01 01:00:00.000000000 +0100
6846 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ip_set_portmap.c 2007-10-12 11:52:37.000000000 +0200
6847 @@ -0,0 +1,346 @@
6848 +/* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6849 + *
6850 + * This program is free software; you can redistribute it and/or modify
6851 + * it under the terms of the GNU General Public License version 2 as
6852 + * published by the Free Software Foundation.
6853 + */
6854 +
6855 +/* Kernel module implementing a port set type as a bitmap */
6856 +
6857 +#include <linux/module.h>
6858 +#include <linux/ip.h>
6859 +#include <linux/tcp.h>
6860 +#include <linux/udp.h>
6861 +#include <linux/skbuff.h>
6862 +#include <linux/version.h>
6863 +#include <linux/netfilter_ipv4/ip_tables.h>
6864 +#include <linux/netfilter_ipv4/ip_set.h>
6865 +#include <linux/errno.h>
6866 +#include <asm/uaccess.h>
6867 +#include <asm/bitops.h>
6868 +#include <linux/spinlock.h>
6869 +
6870 +#include <net/ip.h>
6871 +
6872 +#include <linux/netfilter_ipv4/ip_set_portmap.h>
6873 +
6874 +/* We must handle non-linear skbs */
6875 +static inline ip_set_ip_t
6876 +get_port(const struct sk_buff *skb, u_int32_t flags)
6877 +{
6878 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6879 + struct iphdr *iph = ip_hdr(skb);
6880 +#else
6881 + struct iphdr *iph = skb->nh.iph;
6882 +#endif
6883 + u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
6884 + switch (iph->protocol) {
6885 + case IPPROTO_TCP: {
6886 + struct tcphdr tcph;
6887 +
6888 + /* See comments at tcp_match in ip_tables.c */
6889 + if (offset)
6890 + return INVALID_PORT;
6891 +
6892 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6893 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
6894 +#else
6895 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &tcph, sizeof(tcph)) < 0)
6896 +#endif
6897 + /* No choice either */
6898 + return INVALID_PORT;
6899 +
6900 + return ntohs(flags & IPSET_SRC ?
6901 + tcph.source : tcph.dest);
6902 + }
6903 + case IPPROTO_UDP: {
6904 + struct udphdr udph;
6905 +
6906 + if (offset)
6907 + return INVALID_PORT;
6908 +
6909 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
6910 + if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
6911 +#else
6912 + if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &udph, sizeof(udph)) < 0)
6913 +#endif
6914 + /* No choice either */
6915 + return INVALID_PORT;
6916 +
6917 + return ntohs(flags & IPSET_SRC ?
6918 + udph.source : udph.dest);
6919 + }
6920 + default:
6921 + return INVALID_PORT;
6922 + }
6923 +}
6924 +
6925 +static inline int
6926 +__testport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
6927 +{
6928 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6929 +
6930 + if (port < map->first_port || port > map->last_port)
6931 + return -ERANGE;
6932 +
6933 + *hash_port = port;
6934 + DP("set: %s, port:%u, %u", set->name, port, *hash_port);
6935 + return !!test_bit(port - map->first_port, map->members);
6936 +}
6937 +
6938 +static int
6939 +testport(struct ip_set *set, const void *data, size_t size,
6940 + ip_set_ip_t *hash_port)
6941 +{
6942 + struct ip_set_req_portmap *req =
6943 + (struct ip_set_req_portmap *) data;
6944 +
6945 + if (size != sizeof(struct ip_set_req_portmap)) {
6946 + ip_set_printk("data length wrong (want %zu, have %zu)",
6947 + sizeof(struct ip_set_req_portmap),
6948 + size);
6949 + return -EINVAL;
6950 + }
6951 + return __testport(set, req->port, hash_port);
6952 +}
6953 +
6954 +static int
6955 +testport_kernel(struct ip_set *set,
6956 + const struct sk_buff *skb,
6957 + ip_set_ip_t *hash_port,
6958 + const u_int32_t *flags,
6959 + unsigned char index)
6960 +{
6961 + int res;
6962 + ip_set_ip_t port = get_port(skb, flags[index]);
6963 +
6964 + DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
6965 + if (port == INVALID_PORT)
6966 + return 0;
6967 +
6968 + res = __testport(set, port, hash_port);
6969 +
6970 + return (res < 0 ? 0 : res);
6971 +}
6972 +
6973 +static inline int
6974 +__addport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
6975 +{
6976 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
6977 +
6978 + if (port < map->first_port || port > map->last_port)
6979 + return -ERANGE;
6980 + if (test_and_set_bit(port - map->first_port, map->members))
6981 + return -EEXIST;
6982 +
6983 + *hash_port = port;
6984 + DP("port %u", port);
6985 + return 0;
6986 +}
6987 +
6988 +static int
6989 +addport(struct ip_set *set, const void *data, size_t size,
6990 + ip_set_ip_t *hash_port)
6991 +{
6992 + struct ip_set_req_portmap *req =
6993 + (struct ip_set_req_portmap *) data;
6994 +
6995 + if (size != sizeof(struct ip_set_req_portmap)) {
6996 + ip_set_printk("data length wrong (want %zu, have %zu)",
6997 + sizeof(struct ip_set_req_portmap),
6998 + size);
6999 + return -EINVAL;
7000 + }
7001 + return __addport(set, req->port, hash_port);
7002 +}
7003 +
7004 +static int
7005 +addport_kernel(struct ip_set *set,
7006 + const struct sk_buff *skb,
7007 + ip_set_ip_t *hash_port,
7008 + const u_int32_t *flags,
7009 + unsigned char index)
7010 +{
7011 + ip_set_ip_t port = get_port(skb, flags[index]);
7012 +
7013 + if (port == INVALID_PORT)
7014 + return -EINVAL;
7015 +
7016 + return __addport(set, port, hash_port);
7017 +}
7018 +
7019 +static inline int
7020 +__delport(struct ip_set *set, ip_set_ip_t port, ip_set_ip_t *hash_port)
7021 +{
7022 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7023 +
7024 + if (port < map->first_port || port > map->last_port)
7025 + return -ERANGE;
7026 + if (!test_and_clear_bit(port - map->first_port, map->members))
7027 + return -EEXIST;
7028 +
7029 + *hash_port = port;
7030 + DP("port %u", port);
7031 + return 0;
7032 +}
7033 +
7034 +static int
7035 +delport(struct ip_set *set, const void *data, size_t size,
7036 + ip_set_ip_t *hash_port)
7037 +{
7038 + struct ip_set_req_portmap *req =
7039 + (struct ip_set_req_portmap *) data;
7040 +
7041 + if (size != sizeof(struct ip_set_req_portmap)) {
7042 + ip_set_printk("data length wrong (want %zu, have %zu)",
7043 + sizeof(struct ip_set_req_portmap),
7044 + size);
7045 + return -EINVAL;
7046 + }
7047 + return __delport(set, req->port, hash_port);
7048 +}
7049 +
7050 +static int
7051 +delport_kernel(struct ip_set *set,
7052 + const struct sk_buff *skb,
7053 + ip_set_ip_t *hash_port,
7054 + const u_int32_t *flags,
7055 + unsigned char index)
7056 +{
7057 + ip_set_ip_t port = get_port(skb, flags[index]);
7058 +
7059 + if (port == INVALID_PORT)
7060 + return -EINVAL;
7061 +
7062 + return __delport(set, port, hash_port);
7063 +}
7064 +
7065 +static int create(struct ip_set *set, const void *data, size_t size)
7066 +{
7067 + int newbytes;
7068 + struct ip_set_req_portmap_create *req =
7069 + (struct ip_set_req_portmap_create *) data;
7070 + struct ip_set_portmap *map;
7071 +
7072 + if (size != sizeof(struct ip_set_req_portmap_create)) {
7073 + ip_set_printk("data length wrong (want %zu, have %zu)",
7074 + sizeof(struct ip_set_req_portmap_create),
7075 + size);
7076 + return -EINVAL;
7077 + }
7078 +
7079 + DP("from %u to %u", req->from, req->to);
7080 +
7081 + if (req->from > req->to) {
7082 + DP("bad port range");
7083 + return -ENOEXEC;
7084 + }
7085 +
7086 + if (req->to - req->from > MAX_RANGE) {
7087 + ip_set_printk("range too big (max %d ports)",
7088 + MAX_RANGE+1);
7089 + return -ENOEXEC;
7090 + }
7091 +
7092 + map = kmalloc(sizeof(struct ip_set_portmap), GFP_KERNEL);
7093 + if (!map) {
7094 + DP("out of memory for %d bytes",
7095 + sizeof(struct ip_set_portmap));
7096 + return -ENOMEM;
7097 + }
7098 + map->first_port = req->from;
7099 + map->last_port = req->to;
7100 + newbytes = bitmap_bytes(req->from, req->to);
7101 + map->members = kmalloc(newbytes, GFP_KERNEL);
7102 + if (!map->members) {
7103 + DP("out of memory for %d bytes", newbytes);
7104 + kfree(map);
7105 + return -ENOMEM;
7106 + }
7107 + memset(map->members, 0, newbytes);
7108 +
7109 + set->data = map;
7110 + return 0;
7111 +}
7112 +
7113 +static void destroy(struct ip_set *set)
7114 +{
7115 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7116 +
7117 + kfree(map->members);
7118 + kfree(map);
7119 +
7120 + set->data = NULL;
7121 +}
7122 +
7123 +static void flush(struct ip_set *set)
7124 +{
7125 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7126 + memset(map->members, 0, bitmap_bytes(map->first_port, map->last_port));
7127 +}
7128 +
7129 +static void list_header(const struct ip_set *set, void *data)
7130 +{
7131 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7132 + struct ip_set_req_portmap_create *header =
7133 + (struct ip_set_req_portmap_create *) data;
7134 +
7135 + DP("list_header %u %u", map->first_port, map->last_port);
7136 +
7137 + header->from = map->first_port;
7138 + header->to = map->last_port;
7139 +}
7140 +
7141 +static int list_members_size(const struct ip_set *set)
7142 +{
7143 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7144 +
7145 + return bitmap_bytes(map->first_port, map->last_port);
7146 +}
7147 +
7148 +static void list_members(const struct ip_set *set, void *data)
7149 +{
7150 + struct ip_set_portmap *map = (struct ip_set_portmap *) set->data;
7151 + int bytes = bitmap_bytes(map->first_port, map->last_port);
7152 +
7153 + memcpy(data, map->members, bytes);
7154 +}
7155 +
7156 +static struct ip_set_type ip_set_portmap = {
7157 + .typename = SETTYPE_NAME,
7158 + .features = IPSET_TYPE_PORT | IPSET_DATA_SINGLE,
7159 + .protocol_version = IP_SET_PROTOCOL_VERSION,
7160 + .create = &create,
7161 + .destroy = &destroy,
7162 + .flush = &flush,
7163 + .reqsize = sizeof(struct ip_set_req_portmap),
7164 + .addip = &addport,
7165 + .addip_kernel = &addport_kernel,
7166 + .delip = &delport,
7167 + .delip_kernel = &delport_kernel,
7168 + .testip = &testport,
7169 + .testip_kernel = &testport_kernel,
7170 + .header_size = sizeof(struct ip_set_req_portmap_create),
7171 + .list_header = &list_header,
7172 + .list_members_size = &list_members_size,
7173 + .list_members = &list_members,
7174 + .me = THIS_MODULE,
7175 +};
7176 +
7177 +MODULE_LICENSE("GPL");
7178 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7179 +MODULE_DESCRIPTION("portmap type of IP sets");
7180 +
7181 +static int __init ip_set_portmap_init(void)
7182 +{
7183 + return ip_set_register_set_type(&ip_set_portmap);
7184 +}
7185 +
7186 +static void __exit ip_set_portmap_fini(void)
7187 +{
7188 + /* FIXME: possible race with ip_set_create() */
7189 + ip_set_unregister_set_type(&ip_set_portmap);
7190 +}
7191 +
7192 +module_init(ip_set_portmap_init);
7193 +module_exit(ip_set_portmap_fini);
7194 diff -Nru linux-2.6.23/net/ipv4/netfilter/ipt_set.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ipt_set.c
7195 --- linux-2.6.23/net/ipv4/netfilter/ipt_set.c 1970-01-01 01:00:00.000000000 +0100
7196 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ipt_set.c 2007-10-12 11:52:38.000000000 +0200
7197 @@ -0,0 +1,160 @@
7198 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7199 + * Patrick Schaaf <bof@bof.de>
7200 + * Martin Josefsson <gandalf@wlug.westbo.se>
7201 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7202 + *
7203 + * This program is free software; you can redistribute it and/or modify
7204 + * it under the terms of the GNU General Public License version 2 as
7205 + * published by the Free Software Foundation.
7206 + */
7207 +
7208 +/* Kernel module to match an IP set. */
7209 +
7210 +#include <linux/module.h>
7211 +#include <linux/ip.h>
7212 +#include <linux/skbuff.h>
7213 +#include <linux/version.h>
7214 +
7215 +#include <linux/netfilter_ipv4/ip_tables.h>
7216 +#include <linux/netfilter_ipv4/ip_set.h>
7217 +#include <linux/netfilter_ipv4/ipt_set.h>
7218 +
7219 +static inline int
7220 +match_set(const struct ipt_set_info *info,
7221 + const struct sk_buff *skb,
7222 + int inv)
7223 +{
7224 + if (ip_set_testip_kernel(info->index, skb, info->flags))
7225 + inv = !inv;
7226 + return inv;
7227 +}
7228 +
7229 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7230 +static bool
7231 +#else
7232 +static int
7233 +#endif
7234 +match(const struct sk_buff *skb,
7235 + const struct net_device *in,
7236 + const struct net_device *out,
7237 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7238 + const struct xt_match *match,
7239 +#endif
7240 + const void *matchinfo,
7241 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7242 + int offset, unsigned int protoff, bool *hotdrop)
7243 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7244 + int offset, unsigned int protoff, int *hotdrop)
7245 +#else
7246 + int offset, int *hotdrop)
7247 +#endif
7248 +{
7249 + const struct ipt_set_info_match *info = matchinfo;
7250 +
7251 + return match_set(&info->match_set,
7252 + skb,
7253 + info->match_set.flags[0] & IPSET_MATCH_INV);
7254 +}
7255 +
7256 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7257 +bool
7258 +#else
7259 +static int
7260 +#endif
7261 +checkentry(const char *tablename,
7262 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7263 + const void *inf,
7264 +#else
7265 + const struct ipt_ip *ip,
7266 +#endif
7267 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7268 + const struct xt_match *match,
7269 +#endif
7270 + void *matchinfo,
7271 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7272 + unsigned int matchsize,
7273 +#endif
7274 + unsigned int hook_mask)
7275 +{
7276 + struct ipt_set_info_match *info =
7277 + (struct ipt_set_info_match *) matchinfo;
7278 + ip_set_id_t index;
7279 +
7280 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7281 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7282 + ip_set_printk("invalid matchsize %d", matchsize);
7283 + return 0;
7284 + }
7285 +#endif
7286 +
7287 + index = ip_set_get_byindex(info->match_set.index);
7288 +
7289 + if (index == IP_SET_INVALID_ID) {
7290 + ip_set_printk("Cannot find set indentified by id %u to match",
7291 + info->match_set.index);
7292 + return 0; /* error */
7293 + }
7294 + if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7295 + ip_set_printk("That's nasty!");
7296 + return 0; /* error */
7297 + }
7298 +
7299 + return 1;
7300 +}
7301 +
7302 +static void destroy(
7303 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7304 + const struct xt_match *match,
7305 +#endif
7306 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7307 + void *matchinfo, unsigned int matchsize)
7308 +#else
7309 + void *matchinfo)
7310 +#endif
7311 +{
7312 + struct ipt_set_info_match *info = matchinfo;
7313 +
7314 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7315 + if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
7316 + ip_set_printk("invalid matchsize %d", matchsize);
7317 + return;
7318 + }
7319 +#endif
7320 + ip_set_put(info->match_set.index);
7321 +}
7322 +
7323 +static struct ipt_match set_match = {
7324 + .name = "set",
7325 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7326 + .family = AF_INET,
7327 +#endif
7328 + .match = &match,
7329 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7330 + .matchsize = sizeof(struct ipt_set_info_match),
7331 +#endif
7332 + .checkentry = &checkentry,
7333 + .destroy = &destroy,
7334 + .me = THIS_MODULE
7335 +};
7336 +
7337 +MODULE_LICENSE("GPL");
7338 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7339 +MODULE_DESCRIPTION("iptables IP set match module");
7340 +
7341 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7342 +#define ipt_register_match xt_register_match
7343 +#define ipt_unregister_match xt_unregister_match
7344 +#endif
7345 +
7346 +static int __init ipt_ipset_init(void)
7347 +{
7348 + return ipt_register_match(&set_match);
7349 +}
7350 +
7351 +static void __exit ipt_ipset_fini(void)
7352 +{
7353 + ipt_unregister_match(&set_match);
7354 +}
7355 +
7356 +module_init(ipt_ipset_init);
7357 +module_exit(ipt_ipset_fini);
7358 diff -Nru linux-2.6.23/net/ipv4/netfilter/ipt_SET.c linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ipt_SET.c
7359 --- linux-2.6.23/net/ipv4/netfilter/ipt_SET.c 1970-01-01 01:00:00.000000000 +0100
7360 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/ipt_SET.c 2007-10-12 11:52:37.000000000 +0200
7361 @@ -0,0 +1,172 @@
7362 +/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
7363 + * Patrick Schaaf <bof@bof.de>
7364 + * Martin Josefsson <gandalf@wlug.westbo.se>
7365 + * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
7366 + *
7367 + * This program is free software; you can redistribute it and/or modify
7368 + * it under the terms of the GNU General Public License version 2 as
7369 + * published by the Free Software Foundation.
7370 + */
7371 +
7372 +/* ipt_SET.c - netfilter target to manipulate IP sets */
7373 +
7374 +#include <linux/types.h>
7375 +#include <linux/ip.h>
7376 +#include <linux/timer.h>
7377 +#include <linux/module.h>
7378 +#include <linux/netfilter.h>
7379 +#include <linux/netdevice.h>
7380 +#include <linux/if.h>
7381 +#include <linux/inetdevice.h>
7382 +#include <linux/version.h>
7383 +#include <net/protocol.h>
7384 +#include <net/checksum.h>
7385 +#include <linux/netfilter_ipv4.h>
7386 +#include <linux/netfilter_ipv4/ip_tables.h>
7387 +#include <linux/netfilter_ipv4/ipt_set.h>
7388 +
7389 +static unsigned int
7390 +target(struct sk_buff **pskb,
7391 + const struct net_device *in,
7392 + const struct net_device *out,
7393 + unsigned int hooknum,
7394 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7395 + const struct xt_target *target,
7396 +#endif
7397 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7398 + const void *targinfo,
7399 + void *userinfo)
7400 +#else
7401 + const void *targinfo)
7402 +#endif
7403 +{
7404 + const struct ipt_set_info_target *info = targinfo;
7405 +
7406 + if (info->add_set.index != IP_SET_INVALID_ID)
7407 + ip_set_addip_kernel(info->add_set.index,
7408 + *pskb,
7409 + info->add_set.flags);
7410 + if (info->del_set.index != IP_SET_INVALID_ID)
7411 + ip_set_delip_kernel(info->del_set.index,
7412 + *pskb,
7413 + info->del_set.flags);
7414 +
7415 + return IPT_CONTINUE;
7416 +}
7417 +
7418 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
7419 +static bool
7420 +#else
7421 +static int
7422 +#endif
7423 +checkentry(const char *tablename,
7424 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
7425 + const void *e,
7426 +#else
7427 + const struct ipt_entry *e,
7428 +#endif
7429 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7430 + const struct xt_target *target,
7431 +#endif
7432 + void *targinfo,
7433 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7434 + unsigned int targinfosize,
7435 +#endif
7436 + unsigned int hook_mask)
7437 +{
7438 + struct ipt_set_info_target *info =
7439 + (struct ipt_set_info_target *) targinfo;
7440 + ip_set_id_t index;
7441 +
7442 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7443 + if (targinfosize != IPT_ALIGN(sizeof(*info))) {
7444 + DP("bad target info size %u", targinfosize);
7445 + return 0;
7446 + }
7447 +#endif
7448 +
7449 + if (info->add_set.index != IP_SET_INVALID_ID) {
7450 + index = ip_set_get_byindex(info->add_set.index);
7451 + if (index == IP_SET_INVALID_ID) {
7452 + ip_set_printk("cannot find add_set index %u as target",
7453 + info->add_set.index);
7454 + return 0; /* error */
7455 + }
7456 + }
7457 +
7458 + if (info->del_set.index != IP_SET_INVALID_ID) {
7459 + index = ip_set_get_byindex(info->del_set.index);
7460 + if (index == IP_SET_INVALID_ID) {
7461 + ip_set_printk("cannot find del_set index %u as target",
7462 + info->del_set.index);
7463 + return 0; /* error */
7464 + }
7465 + }
7466 + if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
7467 + || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
7468 + ip_set_printk("That's nasty!");
7469 + return 0; /* error */
7470 + }
7471 +
7472 + return 1;
7473 +}
7474 +
7475 +static void destroy(
7476 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7477 + const struct xt_target *target,
7478 +#endif
7479 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7480 + void *targetinfo, unsigned int targetsize)
7481 +#else
7482 + void *targetinfo)
7483 +#endif
7484 +{
7485 + struct ipt_set_info_target *info = targetinfo;
7486 +
7487 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
7488 + if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
7489 + ip_set_printk("invalid targetsize %d", targetsize);
7490 + return;
7491 + }
7492 +#endif
7493 + if (info->add_set.index != IP_SET_INVALID_ID)
7494 + ip_set_put(info->add_set.index);
7495 + if (info->del_set.index != IP_SET_INVALID_ID)
7496 + ip_set_put(info->del_set.index);
7497 +}
7498 +
7499 +static struct ipt_target SET_target = {
7500 + .name = "SET",
7501 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7502 + .family = AF_INET,
7503 +#endif
7504 + .target = target,
7505 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
7506 + .targetsize = sizeof(struct ipt_set_info_target),
7507 +#endif
7508 + .checkentry = checkentry,
7509 + .destroy = destroy,
7510 + .me = THIS_MODULE
7511 +};
7512 +
7513 +MODULE_LICENSE("GPL");
7514 +MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
7515 +MODULE_DESCRIPTION("iptables IP set target module");
7516 +
7517 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
7518 +#define ipt_register_target xt_register_target
7519 +#define ipt_unregister_target xt_unregister_target
7520 +#endif
7521 +
7522 +static int __init ipt_SET_init(void)
7523 +{
7524 + return ipt_register_target(&SET_target);
7525 +}
7526 +
7527 +static void __exit ipt_SET_fini(void)
7528 +{
7529 + ipt_unregister_target(&SET_target);
7530 +}
7531 +
7532 +module_init(ipt_SET_init);
7533 +module_exit(ipt_SET_fini);
7534 diff -Nru linux-2.6.23/net/ipv4/netfilter/Kconfig linux-2.6.23.pom2patch.set/net/ipv4/netfilter/Kconfig
7535 --- linux-2.6.23/net/ipv4/netfilter/Kconfig 2007-10-09 22:31:38.000000000 +0200
7536 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/Kconfig 2007-10-12 11:52:38.000000000 +0200
7537 @@ -402,5 +402,122 @@
7538 Allows altering the ARP packet payload: source and destination
7539 hardware and network addresses.
7540
7541 +config IP_NF_SET
7542 + tristate "IP set support"
7543 + depends on INET && NETFILTER
7544 + help
7545 + This option adds IP set support to the kernel.
7546 + In order to define and use sets, you need the userspace utility
7547 + ipset(8).
7548 +
7549 + To compile it as a module, choose M here. If unsure, say N.
7550 +
7551 +config IP_NF_SET_MAX
7552 + int "Maximum number of IP sets"
7553 + default 256
7554 + range 2 65534
7555 + depends on IP_NF_SET
7556 + help
7557 + You can define here default value of the maximum number
7558 + of IP sets for the kernel.
7559 +
7560 + The value can be overriden by the 'max_sets' module
7561 + parameter of the 'ip_set' module.
7562 +
7563 +config IP_NF_SET_HASHSIZE
7564 + int "Hash size for bindings of IP sets"
7565 + default 1024
7566 + depends on IP_NF_SET
7567 + help
7568 + You can define here default value of the hash size for
7569 + bindings of IP sets.
7570 +
7571 + The value can be overriden by the 'hash_size' module
7572 + parameter of the 'ip_set' module.
7573 +
7574 +config IP_NF_SET_IPMAP
7575 + tristate "ipmap set support"
7576 + depends on IP_NF_SET
7577 + help
7578 + This option adds the ipmap set type support.
7579 +
7580 + To compile it as a module, choose M here. If unsure, say N.
7581 +
7582 +config IP_NF_SET_MACIPMAP
7583 + tristate "macipmap set support"
7584 + depends on IP_NF_SET
7585 + help
7586 + This option adds the macipmap set type support.
7587 +
7588 + To compile it as a module, choose M here. If unsure, say N.
7589 +
7590 +config IP_NF_SET_PORTMAP
7591 + tristate "portmap set support"
7592 + depends on IP_NF_SET
7593 + help
7594 + This option adds the portmap set type support.
7595 +
7596 + To compile it as a module, choose M here. If unsure, say N.
7597 +
7598 +config IP_NF_SET_IPHASH
7599 + tristate "iphash set support"
7600 + depends on IP_NF_SET
7601 + help
7602 + This option adds the iphash set type support.
7603 +
7604 + To compile it as a module, choose M here. If unsure, say N.
7605 +
7606 +config IP_NF_SET_NETHASH
7607 + tristate "nethash set support"
7608 + depends on IP_NF_SET
7609 + help
7610 + This option adds the nethash set type support.
7611 +
7612 + To compile it as a module, choose M here. If unsure, say N.
7613 +
7614 +config IP_NF_SET_IPPORTHASH
7615 + tristate "ipporthash set support"
7616 + depends on IP_NF_SET
7617 + help
7618 + This option adds the ipporthash set type support.
7619 +
7620 + To compile it as a module, choose M here. If unsure, say N.
7621 +
7622 +config IP_NF_SET_IPTREE
7623 + tristate "iptree set support"
7624 + depends on IP_NF_SET
7625 + help
7626 + This option adds the iptree set type support.
7627 +
7628 + To compile it as a module, choose M here. If unsure, say N.
7629 +
7630 +config IP_NF_SET_IPTREEMAP
7631 + tristate "iptreemap set support"
7632 + depends on IP_NF_SET
7633 + help
7634 + This option adds the iptreemap set type support.
7635 +
7636 + To compile it as a module, choose M here. If unsure, say N.
7637 +
7638 +config IP_NF_MATCH_SET
7639 + tristate "set match support"
7640 + depends on IP_NF_SET
7641 + help
7642 + Set matching matches against given IP sets.
7643 + You need the ipset utility to create and set up the sets.
7644 +
7645 + To compile it as a module, choose M here. If unsure, say N.
7646 +
7647 +config IP_NF_TARGET_SET
7648 + tristate "SET target support"
7649 + depends on IP_NF_SET
7650 + help
7651 + The SET target makes possible to add/delete entries
7652 + in IP sets.
7653 + You need the ipset utility to create and set up the sets.
7654 +
7655 + To compile it as a module, choose M here. If unsure, say N.
7656 +
7657 +
7658 endmenu
7659
7660 diff -Nru linux-2.6.23/net/ipv4/netfilter/Makefile linux-2.6.23.pom2patch.set/net/ipv4/netfilter/Makefile
7661 --- linux-2.6.23/net/ipv4/netfilter/Makefile 2007-10-09 22:31:38.000000000 +0200
7662 +++ linux-2.6.23.pom2patch.set/net/ipv4/netfilter/Makefile 2007-10-12 11:52:38.000000000 +0200
7663 @@ -48,6 +48,7 @@
7664 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
7665 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
7666 obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
7667 +obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
7668 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
7669
7670 # targets
7671 @@ -62,6 +63,18 @@
7672 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
7673 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
7674 obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
7675 +obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
7676 +
7677 +# sets
7678 +obj-$(CONFIG_IP_NF_SET) += ip_set.o
7679 +obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
7680 +obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
7681 +obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
7682 +obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
7683 +obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
7684 +obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
7685 +obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
7686 +obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
7687
7688 # generic ARP tables
7689 obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o