arm: when linking the kernel, move the section discards after the other sections...
[openwrt/svn-archive/archive.git] / target / linux / generic-2.6 / patches-2.6.28 / 150-netfilter_imq.patch
1 --- /dev/null
2 +++ b/drivers/net/imq.c
3 @@ -0,0 +1,567 @@
4 +/*
5 + * Pseudo-driver for the intermediate queue device.
6 + *
7 + * This program is free software; you can redistribute it and/or
8 + * modify it under the terms of the GNU General Public License
9 + * as published by the Free Software Foundation; either version
10 + * 2 of the License, or (at your option) any later version.
11 + *
12 + * Authors: Patrick McHardy, <kaber@trash.net>
13 + *
14 + * The first version was written by Martin Devera, <devik@cdi.cz>
15 + *
16 + * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
17 + * - Update patch to 2.4.21
18 + * Sebastian Strollo <sstrollo@nortelnetworks.com>
19 + * - Fix "Dead-loop on netdevice imq"-issue
20 + * Marcel Sebek <sebek64@post.cz>
21 + * - Update to 2.6.2-rc1
22 + *
23 + * After some time of inactivity there is a group taking care
24 + * of IMQ again: http://www.linuximq.net
25 + *
26 + *
27 + * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7
28 + * including the following changes:
29 + *
30 + * - Correction of ipv6 support "+"s issue (Hasso Tepper)
31 + * - Correction of imq_init_devs() issue that resulted in
32 + * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
33 + * - Addition of functionality to choose number of IMQ devices
34 + * during kernel config (Andre Correa)
35 + * - Addition of functionality to choose how IMQ hooks on
36 + * PRE and POSTROUTING (after or before NAT) (Andre Correa)
37 + * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
38 + *
39 + *
40 + * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
41 + * released with almost no problems. 2.6.14-x was released
42 + * with some important changes: nfcache was removed; After
43 + * some weeks of trouble we figured out that some IMQ fields
44 + * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
45 + * These functions are correctly patched by this new patch version.
46 + *
47 + * Thanks for all who helped to figure out all the problems with
48 + * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
49 + * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
50 + * I didn't forget anybody). I apologize again for my lack of time.
51 + *
52 + *
53 + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
54 + * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
55 + * recursive locking. New initialization routines to fix 'rmmod' not
56 + * working anymore. Used code from ifb.c. (Jussi Kivilinna)
57 + *
58 + * 2008/08/06 - 2.6.26 - (JK)
59 + * - Replaced tasklet with 'netif_schedule()'.
60 + * - Cleaned up and added comments for imq_nf_queue().
61 + *
62 + * 2009/04/12
63 + * - Add skb_save_cb/skb_restore_cb helper functions for backuping
64 + * control buffer. This is needed because qdisc-layer on kernels
65 + * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
66 + * - Add better locking for IMQ device. Hopefully this will solve
67 + * SMP issues. (Jussi Kivilinna)
68 + * - Port to 2.6.27
69 + * - Port to 2.6.28
70 + *
71 + * 2009/04/20 - (Jussi Kivilinna)
72 + * - Fix rmmod not working
73 + * - Use netdevice feature flags to avoid extra packet handling
74 + * by core networking layer and possibly increase performance.
75 + *
76 + * Also, many thanks to pablo Sebastian Greco for making the initial
77 + * patch and to those who helped the testing.
78 + *
79 + * More info at: http://www.linuximq.net/ (Andre Correa)
80 + */
81 +
82 +#include <linux/module.h>
83 +#include <linux/kernel.h>
84 +#include <linux/moduleparam.h>
85 +#include <linux/list.h>
86 +#include <linux/skbuff.h>
87 +#include <linux/netdevice.h>
88 +#include <linux/etherdevice.h>
89 +#include <linux/rtnetlink.h>
90 +#include <linux/if_arp.h>
91 +#include <linux/netfilter.h>
92 +#include <linux/netfilter_ipv4.h>
93 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
94 + #include <linux/netfilter_ipv6.h>
95 +#endif
96 +#include <linux/imq.h>
97 +#include <net/pkt_sched.h>
98 +#include <net/netfilter/nf_queue.h>
99 +
100 +static nf_hookfn imq_nf_hook;
101 +
102 +static struct nf_hook_ops imq_ingress_ipv4 = {
103 + .hook = imq_nf_hook,
104 + .owner = THIS_MODULE,
105 + .pf = PF_INET,
106 + .hooknum = NF_INET_PRE_ROUTING,
107 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
108 + .priority = NF_IP_PRI_MANGLE + 1
109 +#else
110 + .priority = NF_IP_PRI_NAT_DST + 1
111 +#endif
112 +};
113 +
114 +static struct nf_hook_ops imq_egress_ipv4 = {
115 + .hook = imq_nf_hook,
116 + .owner = THIS_MODULE,
117 + .pf = PF_INET,
118 + .hooknum = NF_INET_POST_ROUTING,
119 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
120 + .priority = NF_IP_PRI_LAST
121 +#else
122 + .priority = NF_IP_PRI_NAT_SRC - 1
123 +#endif
124 +};
125 +
126 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
127 +static struct nf_hook_ops imq_ingress_ipv6 = {
128 + .hook = imq_nf_hook,
129 + .owner = THIS_MODULE,
130 + .pf = PF_INET6,
131 + .hooknum = NF_INET_PRE_ROUTING,
132 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
133 + .priority = NF_IP6_PRI_MANGLE + 1
134 +#else
135 + .priority = NF_IP6_PRI_NAT_DST + 1
136 +#endif
137 +};
138 +
139 +static struct nf_hook_ops imq_egress_ipv6 = {
140 + .hook = imq_nf_hook,
141 + .owner = THIS_MODULE,
142 + .pf = PF_INET6,
143 + .hooknum = NF_INET_POST_ROUTING,
144 +#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
145 + .priority = NF_IP6_PRI_LAST
146 +#else
147 + .priority = NF_IP6_PRI_NAT_SRC - 1
148 +#endif
149 +};
150 +#endif
151 +
152 +#if defined(CONFIG_IMQ_NUM_DEVS)
153 +static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
154 +#else
155 +static unsigned int numdevs = IMQ_MAX_DEVS;
156 +#endif
157 +
158 +static DEFINE_SPINLOCK(imq_nf_queue_lock);
159 +
160 +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
161 +
162 +
163 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
164 +{
165 + return &dev->stats;
166 +}
167 +
168 +/* called for packets kfree'd in qdiscs at places other than enqueue */
169 +static void imq_skb_destructor(struct sk_buff *skb)
170 +{
171 + struct nf_queue_entry *entry = skb->nf_queue_entry;
172 +
173 + if (entry) {
174 + nf_queue_entry_release_refs(entry);
175 + kfree(entry);
176 + }
177 +
178 + skb_restore_cb(skb); /* kfree backup */
179 +}
180 +
181 +static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
182 +{
183 + int status;
184 +
185 + if (!entry->next_outfn) {
186 + spin_lock_bh(&imq_nf_queue_lock);
187 + nf_reinject(entry, verdict);
188 + spin_unlock_bh(&imq_nf_queue_lock);
189 + return;
190 + }
191 +
192 + rcu_read_lock();
193 + local_bh_disable();
194 + status = entry->next_outfn(entry, entry->next_queuenum);
195 + local_bh_enable();
196 + if (status < 0) {
197 + nf_queue_entry_release_refs(entry);
198 + kfree_skb(entry->skb);
199 + kfree(entry);
200 + }
201 +
202 + rcu_read_unlock();
203 +}
204 +
205 +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
206 +{
207 + dev->stats.tx_bytes += skb->len;
208 + dev->stats.tx_packets++;
209 +
210 + skb->imq_flags = 0;
211 + skb->destructor = NULL;
212 +
213 + skb_restore_cb(skb); /* restore skb->cb */
214 +
215 + dev->trans_start = jiffies;
216 + imq_nf_reinject(skb->nf_queue_entry, NF_ACCEPT);
217 + return 0;
218 +}
219 +
220 +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
221 +{
222 + struct net_device *dev;
223 + struct sk_buff *skb_orig, *skb, *skb_shared;
224 + struct Qdisc *q;
225 + struct netdev_queue *txq;
226 + int users, index;
227 + int retval = -EINVAL;
228 +
229 + index = entry->skb->imq_flags & IMQ_F_IFMASK;
230 + if (unlikely(index > numdevs - 1)) {
231 + if (net_ratelimit())
232 + printk(KERN_WARNING
233 + "IMQ: invalid device specified, highest is %u\n",
234 + numdevs - 1);
235 + retval = -EINVAL;
236 + goto out;
237 + }
238 +
239 + /* check for imq device by index from cache */
240 + dev = imq_devs_cache[index];
241 + if (unlikely(!dev)) {
242 + char buf[8];
243 +
244 + /* get device by name and cache result */
245 + snprintf(buf, sizeof(buf), "imq%d", index);
246 + dev = dev_get_by_name(&init_net, buf);
247 + if (!dev) {
248 + /* not found ?!*/
249 + BUG();
250 + retval = -ENODEV;
251 + goto out;
252 + }
253 +
254 + imq_devs_cache[index] = dev;
255 + dev_put(dev);
256 + }
257 +
258 + if (unlikely(!(dev->flags & IFF_UP))) {
259 + entry->skb->imq_flags = 0;
260 + imq_nf_reinject(entry, NF_ACCEPT);
261 + retval = 0;
262 + goto out;
263 + }
264 + dev->last_rx = jiffies;
265 +
266 + skb = entry->skb;
267 + skb_orig = NULL;
268 +
269 + /* skb has owner? => make clone */
270 + if (unlikely(skb->destructor)) {
271 + skb_orig = skb;
272 + skb = skb_clone(skb, GFP_ATOMIC);
273 + if (!skb) {
274 + retval = -ENOMEM;
275 + goto out;
276 + }
277 + entry->skb = skb;
278 + }
279 +
280 + skb->nf_queue_entry = entry;
281 +
282 + dev->stats.rx_bytes += skb->len;
283 + dev->stats.rx_packets++;
284 +
285 + txq = dev_pick_tx(dev, skb);
286 +
287 + q = rcu_dereference(txq->qdisc);
288 + if (unlikely(!q->enqueue))
289 + goto packet_not_eaten_by_imq_dev;
290 +
291 + spin_lock_bh(qdisc_lock(q));
292 +
293 + users = atomic_read(&skb->users);
294 +
295 + skb_shared = skb_get(skb); /* increase reference count by one */
296 + skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will
297 + overwrite it */
298 + qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
299 +
300 + if (likely(atomic_read(&skb_shared->users) == users + 1)) {
301 + kfree_skb(skb_shared); /* decrease reference count by one */
302 +
303 + skb->destructor = &imq_skb_destructor;
304 +
305 + /* cloned? */
306 + if (skb_orig)
307 + kfree_skb(skb_orig); /* free original */
308 +
309 + spin_unlock_bh(qdisc_lock(q));
310 +
311 + /* schedule qdisc dequeue */
312 + __netif_schedule(q);
313 +
314 + retval = 0;
315 + goto out;
316 + } else {
317 + skb_restore_cb(skb_shared); /* restore skb->cb */
318 + /* qdisc dropped packet and decreased skb reference count of
319 + * skb, so we don't really want to and try refree as that would
320 + * actually destroy the skb. */
321 + spin_unlock_bh(qdisc_lock(q));
322 + goto packet_not_eaten_by_imq_dev;
323 + }
324 +
325 +packet_not_eaten_by_imq_dev:
326 + /* cloned? restore original */
327 + if (skb_orig) {
328 + kfree_skb(skb);
329 + entry->skb = skb_orig;
330 + }
331 + retval = -1;
332 +out:
333 + return retval;
334 +}
335 +
336 +static struct nf_queue_handler nfqh = {
337 + .name = "imq",
338 + .outfn = imq_nf_queue,
339 +};
340 +
341 +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
342 + const struct net_device *indev,
343 + const struct net_device *outdev,
344 + int (*okfn)(struct sk_buff *))
345 +{
346 + if (pskb->imq_flags & IMQ_F_ENQUEUE)
347 + return NF_QUEUE;
348 +
349 + return NF_ACCEPT;
350 +}
351 +
352 +static int imq_close(struct net_device *dev)
353 +{
354 + netif_stop_queue(dev);
355 + return 0;
356 +}
357 +
358 +static int imq_open(struct net_device *dev)
359 +{
360 + netif_start_queue(dev);
361 + return 0;
362 +}
363 +
364 +static void imq_setup(struct net_device *dev)
365 +{
366 + dev->hard_start_xmit = imq_dev_xmit;
367 + dev->open = imq_open;
368 + dev->get_stats = imq_get_stats;
369 + dev->stop = imq_close;
370 + dev->type = ARPHRD_VOID;
371 + dev->mtu = 16000;
372 + dev->tx_queue_len = 11000;
373 + dev->flags = IFF_NOARP;
374 + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
375 + NETIF_F_GSO | NETIF_F_HW_CSUM |
376 + NETIF_F_HIGHDMA;
377 +}
378 +
379 +static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
380 +{
381 + int ret = 0;
382 +
383 + if (tb[IFLA_ADDRESS]) {
384 + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
385 + ret = -EINVAL;
386 + goto end;
387 + }
388 + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
389 + ret = -EADDRNOTAVAIL;
390 + goto end;
391 + }
392 + }
393 + return 0;
394 +end:
395 + printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
396 + return ret;
397 +}
398 +
399 +static struct rtnl_link_ops imq_link_ops __read_mostly = {
400 + .kind = "imq",
401 + .priv_size = 0,
402 + .setup = imq_setup,
403 + .validate = imq_validate,
404 +};
405 +
406 +static int __init imq_init_hooks(void)
407 +{
408 + int err;
409 +
410 + nf_register_queue_imq_handler(&nfqh);
411 +
412 + err = nf_register_hook(&imq_ingress_ipv4);
413 + if (err)
414 + goto err1;
415 +
416 + err = nf_register_hook(&imq_egress_ipv4);
417 + if (err)
418 + goto err2;
419 +
420 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
421 + err = nf_register_hook(&imq_ingress_ipv6);
422 + if (err)
423 + goto err3;
424 +
425 + err = nf_register_hook(&imq_egress_ipv6);
426 + if (err)
427 + goto err4;
428 +#endif
429 +
430 + return 0;
431 +
432 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
433 +err4:
434 + nf_unregister_hook(&imq_ingress_ipv6);
435 +err3:
436 + nf_unregister_hook(&imq_egress_ipv4);
437 +#endif
438 +err2:
439 + nf_unregister_hook(&imq_ingress_ipv4);
440 +err1:
441 + nf_unregister_queue_imq_handler();
442 + return err;
443 +}
444 +
445 +static int __init imq_init_one(int index)
446 +{
447 + struct net_device *dev;
448 + int ret;
449 +
450 + dev = alloc_netdev(0, "imq%d", imq_setup);
451 + if (!dev)
452 + return -ENOMEM;
453 +
454 + ret = dev_alloc_name(dev, dev->name);
455 + if (ret < 0)
456 + goto fail;
457 +
458 + dev->rtnl_link_ops = &imq_link_ops;
459 + ret = register_netdevice(dev);
460 + if (ret < 0)
461 + goto fail;
462 +
463 + return 0;
464 +fail:
465 + free_netdev(dev);
466 + return ret;
467 +}
468 +
469 +static int __init imq_init_devs(void)
470 +{
471 + int err, i;
472 +
473 + if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
474 + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
475 + IMQ_MAX_DEVS);
476 + return -EINVAL;
477 + }
478 +
479 + rtnl_lock();
480 + err = __rtnl_link_register(&imq_link_ops);
481 +
482 + for (i = 0; i < numdevs && !err; i++)
483 + err = imq_init_one(i);
484 +
485 + if (err) {
486 + __rtnl_link_unregister(&imq_link_ops);
487 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
488 + }
489 + rtnl_unlock();
490 +
491 + return err;
492 +}
493 +
494 +static int __init imq_init_module(void)
495 +{
496 + int err;
497 +
498 +#if defined(CONFIG_IMQ_NUM_DEVS)
499 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
500 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
501 + BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
502 +#endif
503 +
504 + err = imq_init_devs();
505 + if (err) {
506 + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
507 + return err;
508 + }
509 +
510 + err = imq_init_hooks();
511 + if (err) {
512 + printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
513 + rtnl_link_unregister(&imq_link_ops);
514 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
515 + return err;
516 + }
517 +
518 + printk(KERN_INFO "IMQ driver loaded successfully.\n");
519 +
520 +#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
521 + printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
522 +#else
523 + printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
524 +#endif
525 +#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
526 + printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
527 +#else
528 + printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
529 +#endif
530 +
531 + return 0;
532 +}
533 +
534 +static void __exit imq_unhook(void)
535 +{
536 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
537 + nf_unregister_hook(&imq_ingress_ipv6);
538 + nf_unregister_hook(&imq_egress_ipv6);
539 +#endif
540 + nf_unregister_hook(&imq_ingress_ipv4);
541 + nf_unregister_hook(&imq_egress_ipv4);
542 +
543 + nf_unregister_queue_imq_handler();
544 +}
545 +
546 +static void __exit imq_cleanup_devs(void)
547 +{
548 + rtnl_link_unregister(&imq_link_ops);
549 + memset(imq_devs_cache, 0, sizeof(imq_devs_cache));
550 +}
551 +
552 +static void __exit imq_exit_module(void)
553 +{
554 + imq_unhook();
555 + imq_cleanup_devs();
556 + printk(KERN_INFO "IMQ driver unloaded successfully.\n");
557 +}
558 +
559 +module_init(imq_init_module);
560 +module_exit(imq_exit_module);
561 +
562 +module_param(numdevs, int, 0);
563 +MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will "
564 + "be created)");
565 +MODULE_AUTHOR("http://www.linuximq.net");
566 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See "
567 + "http://www.linuximq.net/ for more information.");
568 +MODULE_LICENSE("GPL");
569 +MODULE_ALIAS_RTNL_LINK("imq");
570 +
571 --- a/drivers/net/Kconfig
572 +++ b/drivers/net/Kconfig
573 @@ -109,6 +109,129 @@ config EQUALIZER
574 To compile this driver as a module, choose M here: the module
575 will be called eql. If unsure, say N.
576
577 +config IMQ
578 + tristate "IMQ (intermediate queueing device) support"
579 + depends on NETDEVICES && NETFILTER
580 + ---help---
581 + The IMQ device(s) is used as placeholder for QoS queueing
582 + disciplines. Every packet entering/leaving the IP stack can be
583 + directed through the IMQ device where it's enqueued/dequeued to the
584 + attached qdisc. This allows you to treat network devices as classes
585 + and distribute bandwidth among them. Iptables is used to specify
586 + through which IMQ device, if any, packets travel.
587 +
588 + More information at: http://www.linuximq.net/
589 +
590 + To compile this driver as a module, choose M here: the module
591 + will be called imq. If unsure, say N.
592 +
593 +choice
594 + prompt "IMQ behavior (PRE/POSTROUTING)"
595 + depends on IMQ
596 + default IMQ_BEHAVIOR_AB
597 + help
598 +
599 + This settings defines how IMQ behaves in respect to its
600 + hooking in PREROUTING and POSTROUTING.
601 +
602 + IMQ can work in any of the following ways:
603 +
604 + PREROUTING | POSTROUTING
605 + -----------------|-------------------
606 + #1 After NAT | After NAT
607 + #2 After NAT | Before NAT
608 + #3 Before NAT | After NAT
609 + #4 Before NAT | Before NAT
610 +
611 + The default behavior is to hook before NAT on PREROUTING
612 + and after NAT on POSTROUTING (#3).
613 +
614 + This settings are specially usefull when trying to use IMQ
615 + to shape NATed clients.
616 +
617 + More information can be found at: www.linuximq.net
618 +
619 + If not sure leave the default settings alone.
620 +
621 +config IMQ_BEHAVIOR_AA
622 + bool "IMQ AA"
623 + help
624 + This settings defines how IMQ behaves in respect to its
625 + hooking in PREROUTING and POSTROUTING.
626 +
627 + Choosing this option will make IMQ hook like this:
628 +
629 + PREROUTING: After NAT
630 + POSTROUTING: After NAT
631 +
632 + More information can be found at: www.linuximq.net
633 +
634 + If not sure leave the default settings alone.
635 +
636 +config IMQ_BEHAVIOR_AB
637 + bool "IMQ AB"
638 + help
639 + This settings defines how IMQ behaves in respect to its
640 + hooking in PREROUTING and POSTROUTING.
641 +
642 + Choosing this option will make IMQ hook like this:
643 +
644 + PREROUTING: After NAT
645 + POSTROUTING: Before NAT
646 +
647 + More information can be found at: www.linuximq.net
648 +
649 + If not sure leave the default settings alone.
650 +
651 +config IMQ_BEHAVIOR_BA
652 + bool "IMQ BA"
653 + help
654 + This settings defines how IMQ behaves in respect to its
655 + hooking in PREROUTING and POSTROUTING.
656 +
657 + Choosing this option will make IMQ hook like this:
658 +
659 + PREROUTING: Before NAT
660 + POSTROUTING: After NAT
661 +
662 + More information can be found at: www.linuximq.net
663 +
664 + If not sure leave the default settings alone.
665 +
666 +config IMQ_BEHAVIOR_BB
667 + bool "IMQ BB"
668 + help
669 + This settings defines how IMQ behaves in respect to its
670 + hooking in PREROUTING and POSTROUTING.
671 +
672 + Choosing this option will make IMQ hook like this:
673 +
674 + PREROUTING: Before NAT
675 + POSTROUTING: Before NAT
676 +
677 + More information can be found at: www.linuximq.net
678 +
679 + If not sure leave the default settings alone.
680 +
681 +endchoice
682 +
683 +config IMQ_NUM_DEVS
684 +
685 + int "Number of IMQ devices"
686 + range 2 16
687 + depends on IMQ
688 + default "16"
689 + help
690 +
691 + This settings defines how many IMQ devices will be
692 + created.
693 +
694 + The default value is 16.
695 +
696 + More information can be found at: www.linuximq.net
697 +
698 + If not sure leave the default settings alone.
699 +
700 config TUN
701 tristate "Universal TUN/TAP device driver support"
702 select CRC32
703 --- a/drivers/net/Makefile
704 +++ b/drivers/net/Makefile
705 @@ -148,6 +148,7 @@ obj-$(CONFIG_SLHC) += slhc.o
706 obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
707
708 obj-$(CONFIG_DUMMY) += dummy.o
709 +obj-$(CONFIG_IMQ) += imq.o
710 obj-$(CONFIG_IFB) += ifb.o
711 obj-$(CONFIG_MACVLAN) += macvlan.o
712 obj-$(CONFIG_DE600) += de600.o
713 --- /dev/null
714 +++ b/include/linux/imq.h
715 @@ -0,0 +1,13 @@
716 +#ifndef _IMQ_H
717 +#define _IMQ_H
718 +
719 +/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
720 +#define IMQ_F_BITS 5
721 +
722 +#define IMQ_F_IFMASK 0x0f
723 +#define IMQ_F_ENQUEUE 0x10
724 +
725 +#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
726 +
727 +#endif /* _IMQ_H */
728 +
729 --- /dev/null
730 +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
731 @@ -0,0 +1,10 @@
732 +#ifndef _IPT_IMQ_H
733 +#define _IPT_IMQ_H
734 +
735 +/* Backwards compatibility for old userspace */
736 +#include <linux/netfilter/xt_IMQ.h>
737 +
738 +#define ipt_imq_info xt_imq_info
739 +
740 +#endif /* _IPT_IMQ_H */
741 +
742 --- /dev/null
743 +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
744 @@ -0,0 +1,10 @@
745 +#ifndef _IP6T_IMQ_H
746 +#define _IP6T_IMQ_H
747 +
748 +/* Backwards compatibility for old userspace */
749 +#include <linux/netfilter/xt_IMQ.h>
750 +
751 +#define ip6t_imq_info xt_imq_info
752 +
753 +#endif /* _IP6T_IMQ_H */
754 +
755 --- a/include/linux/skbuff.h
756 +++ b/include/linux/skbuff.h
757 @@ -28,6 +28,9 @@
758 #include <linux/rcupdate.h>
759 #include <linux/dmaengine.h>
760 #include <linux/hrtimer.h>
761 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
762 +#include <linux/imq.h>
763 +#endif
764
765 #define HAVE_ALLOC_SKB /* For the drivers to know */
766 #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
767 @@ -278,6 +281,9 @@ struct sk_buff {
768 * first. This is owned by whoever has the skb queued ATM.
769 */
770 char cb[48];
771 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
772 + void *cb_next;
773 +#endif
774
775 unsigned int len,
776 data_len;
777 @@ -308,6 +314,9 @@ struct sk_buff {
778 struct nf_conntrack *nfct;
779 struct sk_buff *nfct_reasm;
780 #endif
781 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
782 + struct nf_queue_entry *nf_queue_entry;
783 +#endif
784 #ifdef CONFIG_BRIDGE_NETFILTER
785 struct nf_bridge_info *nf_bridge;
786 #endif
787 @@ -327,6 +336,9 @@ struct sk_buff {
788 __u8 do_not_encrypt:1;
789 #endif
790 /* 0/13/14 bit hole */
791 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
792 + __u8 imq_flags:IMQ_F_BITS;
793 +#endif
794
795 #ifdef CONFIG_NET_DMA
796 dma_cookie_t dma_cookie;
797 @@ -367,6 +379,12 @@ extern void skb_dma_unmap(struct device
798 enum dma_data_direction dir);
799 #endif
800
801 +
802 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
803 +extern int skb_save_cb(struct sk_buff *skb);
804 +extern int skb_restore_cb(struct sk_buff *skb);
805 +#endif
806 +
807 extern void kfree_skb(struct sk_buff *skb);
808 extern void __kfree_skb(struct sk_buff *skb);
809 extern struct sk_buff *__alloc_skb(unsigned int size,
810 @@ -1804,6 +1822,10 @@ static inline void __nf_copy(struct sk_b
811 dst->nfct_reasm = src->nfct_reasm;
812 nf_conntrack_get_reasm(src->nfct_reasm);
813 #endif
814 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
815 + dst->imq_flags = src->imq_flags;
816 + dst->nf_queue_entry = src->nf_queue_entry;
817 +#endif
818 #ifdef CONFIG_BRIDGE_NETFILTER
819 dst->nf_bridge = src->nf_bridge;
820 nf_bridge_get(src->nf_bridge);
821 --- a/net/core/dev.c
822 +++ b/net/core/dev.c
823 @@ -96,6 +96,9 @@
824 #include <net/net_namespace.h>
825 #include <net/sock.h>
826 #include <linux/rtnetlink.h>
827 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
828 +#include <linux/imq.h>
829 +#endif
830 #include <linux/proc_fs.h>
831 #include <linux/seq_file.h>
832 #include <linux/stat.h>
833 @@ -1655,7 +1658,11 @@ int dev_hard_start_xmit(struct sk_buff *
834 struct netdev_queue *txq)
835 {
836 if (likely(!skb->next)) {
837 - if (!list_empty(&ptype_all))
838 + if (!list_empty(&ptype_all)
839 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
840 + && !(skb->imq_flags & IMQ_F_ENQUEUE)
841 +#endif
842 + )
843 dev_queue_xmit_nit(skb, dev);
844
845 if (netif_needs_gso(dev, skb)) {
846 @@ -1746,8 +1753,7 @@ static u16 simple_tx_hash(struct net_dev
847 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
848 }
849
850 -static struct netdev_queue *dev_pick_tx(struct net_device *dev,
851 - struct sk_buff *skb)
852 +struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
853 {
854 u16 queue_index = 0;
855
856 @@ -1759,6 +1765,7 @@ static struct netdev_queue *dev_pick_tx(
857 skb_set_queue_mapping(skb, queue_index);
858 return netdev_get_tx_queue(dev, queue_index);
859 }
860 +EXPORT_SYMBOL(dev_pick_tx);
861
862 /**
863 * dev_queue_xmit - transmit a buffer
864 --- a/include/linux/netdevice.h
865 +++ b/include/linux/netdevice.h
866 @@ -950,6 +950,7 @@ extern int dev_alloc_name(struct net_de
867 extern int dev_open(struct net_device *dev);
868 extern int dev_close(struct net_device *dev);
869 extern void dev_disable_lro(struct net_device *dev);
870 +extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb);
871 extern int dev_queue_xmit(struct sk_buff *skb);
872 extern int register_netdevice(struct net_device *dev);
873 extern void unregister_netdevice(struct net_device *dev);
874 --- /dev/null
875 +++ b/include/linux/netfilter/xt_IMQ.h
876 @@ -0,0 +1,9 @@
877 +#ifndef _XT_IMQ_H
878 +#define _XT_IMQ_H
879 +
880 +struct xt_imq_info {
881 + unsigned int todev; /* target imq device */
882 +};
883 +
884 +#endif /* _XT_IMQ_H */
885 +
886 --- a/include/net/netfilter/nf_queue.h
887 +++ b/include/net/netfilter/nf_queue.h
888 @@ -13,6 +13,12 @@ struct nf_queue_entry {
889 struct net_device *indev;
890 struct net_device *outdev;
891 int (*okfn)(struct sk_buff *);
892 +
893 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
894 + int (*next_outfn)(struct nf_queue_entry *entry,
895 + unsigned int queuenum);
896 + unsigned int next_queuenum;
897 +#endif
898 };
899
900 #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
901 @@ -30,5 +36,11 @@ extern int nf_unregister_queue_handler(u
902 const struct nf_queue_handler *qh);
903 extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
904 extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
905 +extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
906 +
907 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
908 +extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
909 +extern void nf_unregister_queue_imq_handler(void);
910 +#endif
911
912 #endif /* _NF_QUEUE_H */
913 --- a/net/core/skbuff.c
914 +++ b/net/core/skbuff.c
915 @@ -69,6 +69,9 @@
916
917 static struct kmem_cache *skbuff_head_cache __read_mostly;
918 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
919 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
920 +static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
921 +#endif
922
923 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
924 struct pipe_buffer *buf)
925 @@ -88,6 +91,80 @@ static int sock_pipe_buf_steal(struct pi
926 return 1;
927 }
928
929 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
930 +/* Control buffer save/restore for IMQ devices */
931 +struct skb_cb_table {
932 + void *cb_next;
933 + atomic_t refcnt;
934 + char cb[48];
935 +};
936 +
937 +static DEFINE_SPINLOCK(skb_cb_store_lock);
938 +
939 +int skb_save_cb(struct sk_buff *skb)
940 +{
941 + struct skb_cb_table *next;
942 +
943 + next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
944 + if (!next)
945 + return -ENOMEM;
946 +
947 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
948 +
949 + memcpy(next->cb, skb->cb, sizeof(skb->cb));
950 + next->cb_next = skb->cb_next;
951 +
952 + atomic_set(&next->refcnt, 1);
953 +
954 + skb->cb_next = next;
955 + return 0;
956 +}
957 +EXPORT_SYMBOL(skb_save_cb);
958 +
959 +int skb_restore_cb(struct sk_buff *skb)
960 +{
961 + struct skb_cb_table *next;
962 +
963 + if (!skb->cb_next)
964 + return 0;
965 +
966 + next = skb->cb_next;
967 +
968 + BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
969 +
970 + memcpy(skb->cb, next->cb, sizeof(skb->cb));
971 + skb->cb_next = next->cb_next;
972 +
973 + spin_lock(&skb_cb_store_lock);
974 +
975 + if (atomic_dec_and_test(&next->refcnt)) {
976 + kmem_cache_free(skbuff_cb_store_cache, next);
977 + }
978 +
979 + spin_unlock(&skb_cb_store_lock);
980 +
981 + return 0;
982 +}
983 +EXPORT_SYMBOL(skb_restore_cb);
984 +
985 +static void skb_copy_stored_cb(struct sk_buff *new, struct sk_buff *old)
986 +{
987 + struct skb_cb_table *next;
988 +
989 + if (!old->cb_next) {
990 + new->cb_next = 0;
991 + return;
992 + }
993 +
994 + spin_lock(&skb_cb_store_lock);
995 +
996 + next = old->cb_next;
997 + atomic_inc(&next->refcnt);
998 + new->cb_next = next;
999 +
1000 + spin_unlock(&skb_cb_store_lock);
1001 +}
1002 +#endif
1003
1004 /* Pipe buffer operations for a socket. */
1005 static struct pipe_buf_operations sock_pipe_buf_ops = {
1006 @@ -381,6 +458,15 @@ static void skb_release_head_state(struc
1007 WARN_ON(in_irq());
1008 skb->destructor(skb);
1009 }
1010 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1011 + /* This should not happen. When it does, avoid memleak by restoring
1012 + the chain of cb-backups. */
1013 + while(skb->cb_next != NULL) {
1014 + printk(KERN_WARNING "kfree_skb: skb->cb_next: %08x\n",
1015 + skb->cb_next);
1016 + skb_restore_cb(skb);
1017 + }
1018 +#endif
1019 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1020 nf_conntrack_put(skb->nfct);
1021 nf_conntrack_put_reasm(skb->nfct_reasm);
1022 @@ -493,6 +579,9 @@ static void __copy_skb_header(struct sk_
1023 new->sp = secpath_get(old->sp);
1024 #endif
1025 memcpy(new->cb, old->cb, sizeof(old->cb));
1026 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1027 + skb_copy_stored_cb(new, old);
1028 +#endif
1029 new->csum_start = old->csum_start;
1030 new->csum_offset = old->csum_offset;
1031 new->local_df = old->local_df;
1032 @@ -2397,6 +2486,13 @@ void __init skb_init(void)
1033 0,
1034 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1035 NULL);
1036 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1037 + skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
1038 + sizeof(struct skb_cb_table),
1039 + 0,
1040 + SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1041 + NULL);
1042 +#endif
1043 }
1044
1045 /**
1046 --- a/net/netfilter/Kconfig
1047 +++ b/net/netfilter/Kconfig
1048 @@ -357,6 +357,18 @@ config NETFILTER_XT_TARGET_DSCP
1049
1050 To compile it as a module, choose M here. If unsure, say N.
1051
1052 +config NETFILTER_XT_TARGET_IMQ
1053 + tristate '"IMQ" target support'
1054 + depends on NETFILTER_XTABLES
1055 + depends on IP_NF_MANGLE || IP6_NF_MANGLE
1056 + select IMQ
1057 + default m if NETFILTER_ADVANCED=n
1058 + help
1059 + This option adds a `IMQ' target which is used to specify if and
1060 + to which imq device packets should get enqueued/dequeued.
1061 +
1062 + To compile it as a module, choose M here. If unsure, say N.
1063 +
1064 config NETFILTER_XT_TARGET_MARK
1065 tristate '"MARK" target support'
1066 default m if NETFILTER_ADVANCED=n
1067 --- a/net/netfilter/Makefile
1068 +++ b/net/netfilter/Makefile
1069 @@ -45,6 +45,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIF
1070 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
1071 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
1072 obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
1073 +obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
1074 obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
1075 obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
1076 obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
1077 --- a/net/netfilter/nf_queue.c
1078 +++ b/net/netfilter/nf_queue.c
1079 @@ -20,6 +20,26 @@ static const struct nf_queue_handler *qu
1080
1081 static DEFINE_MUTEX(queue_handler_mutex);
1082
1083 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1084 +static const struct nf_queue_handler *queue_imq_handler;
1085 +
1086 +void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
1087 +{
1088 + mutex_lock(&queue_handler_mutex);
1089 + rcu_assign_pointer(queue_imq_handler, qh);
1090 + mutex_unlock(&queue_handler_mutex);
1091 +}
1092 +EXPORT_SYMBOL(nf_register_queue_imq_handler);
1093 +
1094 +void nf_unregister_queue_imq_handler(void)
1095 +{
1096 + mutex_lock(&queue_handler_mutex);
1097 + rcu_assign_pointer(queue_imq_handler, NULL);
1098 + mutex_unlock(&queue_handler_mutex);
1099 +}
1100 +EXPORT_SYMBOL(nf_unregister_queue_imq_handler);
1101 +#endif
1102 +
1103 /* return EBUSY when somebody else is registered, return EEXIST if the
1104 * same handler is registered, return 0 in case of success. */
1105 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
1106 @@ -80,7 +100,7 @@ void nf_unregister_queue_handlers(const
1107 }
1108 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
1109
1110 -static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1111 +void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
1112 {
1113 /* Release those devices we held, or Alexey will kill me. */
1114 if (entry->indev)
1115 @@ -100,6 +120,7 @@ static void nf_queue_entry_release_refs(
1116 /* Drop reference to owner of hook which queued us. */
1117 module_put(entry->elem->owner);
1118 }
1119 +EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
1120
1121 /*
1122 * Any packet that leaves via this function must come back
1123 @@ -121,12 +142,26 @@ static int __nf_queue(struct sk_buff *sk
1124 #endif
1125 const struct nf_afinfo *afinfo;
1126 const struct nf_queue_handler *qh;
1127 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1128 + const struct nf_queue_handler *qih = NULL;
1129 +#endif
1130
1131 /* QUEUE == DROP if noone is waiting, to be safe. */
1132 rcu_read_lock();
1133
1134 qh = rcu_dereference(queue_handler[pf]);
1135 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1136 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1137 + if (pf == PF_INET || pf == PF_INET6)
1138 +#else
1139 + if (pf == PF_INET)
1140 +#endif
1141 + qih = rcu_dereference(queue_imq_handler);
1142 +
1143 + if (!qh && !qih)
1144 +#else /* !IMQ */
1145 if (!qh)
1146 +#endif
1147 goto err_unlock;
1148
1149 afinfo = nf_get_afinfo(pf);
1150 @@ -145,6 +180,10 @@ static int __nf_queue(struct sk_buff *sk
1151 .indev = indev,
1152 .outdev = outdev,
1153 .okfn = okfn,
1154 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1155 + .next_outfn = qh ? qh->outfn : NULL,
1156 + .next_queuenum = queuenum,
1157 +#endif
1158 };
1159
1160 /* If it's going away, ignore hook. */
1161 @@ -170,8 +209,19 @@ static int __nf_queue(struct sk_buff *sk
1162 }
1163 #endif
1164 afinfo->saveroute(skb, entry);
1165 +
1166 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1167 + if (qih) {
1168 + status = qih->outfn(entry, queuenum);
1169 + goto imq_skip_queue;
1170 + }
1171 +#endif
1172 +
1173 status = qh->outfn(entry, queuenum);
1174
1175 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1176 +imq_skip_queue:
1177 +#endif
1178 rcu_read_unlock();
1179
1180 if (status < 0) {
1181 --- /dev/null
1182 +++ b/net/netfilter/xt_IMQ.c
1183 @@ -0,0 +1,73 @@
1184 +/*
1185 + * This target marks packets to be enqueued to an imq device
1186 + */
1187 +#include <linux/module.h>
1188 +#include <linux/skbuff.h>
1189 +#include <linux/netfilter/x_tables.h>
1190 +#include <linux/netfilter/xt_IMQ.h>
1191 +#include <linux/imq.h>
1192 +
1193 +static unsigned int imq_target(struct sk_buff *pskb,
1194 + const struct xt_target_param *par)
1195 +{
1196 + const struct xt_imq_info *mr = par->targinfo;
1197 +
1198 + pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
1199 +
1200 + return XT_CONTINUE;
1201 +}
1202 +
1203 +static bool imq_checkentry(const struct xt_tgchk_param *par)
1204 +{
1205 + struct xt_imq_info *mr = par->targinfo;
1206 +
1207 + if (mr->todev > IMQ_MAX_DEVS - 1) {
1208 + printk(KERN_WARNING
1209 + "IMQ: invalid device specified, highest is %u\n",
1210 + IMQ_MAX_DEVS - 1);
1211 + return 0;
1212 + }
1213 +
1214 + return 1;
1215 +}
1216 +
1217 +static struct xt_target xt_imq_reg[] __read_mostly = {
1218 + {
1219 + .name = "IMQ",
1220 + .family = AF_INET,
1221 + .checkentry = imq_checkentry,
1222 + .target = imq_target,
1223 + .targetsize = sizeof(struct xt_imq_info),
1224 + .table = "mangle",
1225 + .me = THIS_MODULE
1226 + },
1227 + {
1228 + .name = "IMQ",
1229 + .family = AF_INET6,
1230 + .checkentry = imq_checkentry,
1231 + .target = imq_target,
1232 + .targetsize = sizeof(struct xt_imq_info),
1233 + .table = "mangle",
1234 + .me = THIS_MODULE
1235 + },
1236 +};
1237 +
1238 +static int __init imq_init(void)
1239 +{
1240 + return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1241 +}
1242 +
1243 +static void __exit imq_fini(void)
1244 +{
1245 + xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
1246 +}
1247 +
1248 +module_init(imq_init);
1249 +module_exit(imq_fini);
1250 +
1251 +MODULE_AUTHOR("http://www.linuximq.net");
1252 +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
1253 +MODULE_LICENSE("GPL");
1254 +MODULE_ALIAS("ipt_IMQ");
1255 +MODULE_ALIAS("ip6t_IMQ");
1256 +