|
|
@@ -1,6 +1,6 @@
|
|
|
--- /dev/null
|
|
|
+++ b/drivers/net/imq.c
|
|
|
-@@ -0,0 +1,474 @@
|
|
|
+@@ -0,0 +1,571 @@
|
|
|
+/*
|
|
|
+ * Pseudo-driver for the intermediate queue device.
|
|
|
+ *
|
|
|
@@ -51,10 +51,28 @@
|
|
|
+ *
|
|
|
+ *
|
|
|
+ * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
|
|
|
-+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
|
|
|
++ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
|
|
|
+ * recursive locking. New initialization routines to fix 'rmmod' not
|
|
|
+ * working anymore. Used code from ifb.c. (Jussi Kivilinna)
|
|
|
+ *
|
|
|
++ * 2008/08/06 - 2.6.26 - (JK)
|
|
|
++ * - Replaced tasklet with 'netif_schedule()'.
|
|
|
++ * - Cleaned up and added comments for imq_nf_queue().
|
|
|
++ *
|
|
|
++ * 2009/04/12
|
|
|
++ * - Add skb_save_cb/skb_restore_cb helper functions for backuping
|
|
|
++ * control buffer. This is needed because qdisc-layer on kernels
|
|
|
++ * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna)
|
|
|
++ * - Add better locking for IMQ device. Hopefully this will solve
|
|
|
++ * SMP issues. (Jussi Kivilinna)
|
|
|
++ * - Port to 2.6.27
|
|
|
++ * - Port to 2.6.28
|
|
|
++ * - Port to 2.6.29 + fix rmmod not working
|
|
|
++ *
|
|
|
++ * 2009/04/20 - (Jussi Kivilinna)
|
|
|
++ * - Use netdevice feature flags to avoid extra packet handling
|
|
|
++ * by core networking layer and possibly increase performance.
|
|
|
++ *
|
|
|
+ * Also, many thanks to pablo Sebastian Greco for making the initial
|
|
|
+ * patch and to those who helped the testing.
|
|
|
+ *
|
|
|
@@ -64,8 +82,10 @@
|
|
|
+#include <linux/module.h>
|
|
|
+#include <linux/kernel.h>
|
|
|
+#include <linux/moduleparam.h>
|
|
|
++#include <linux/list.h>
|
|
|
+#include <linux/skbuff.h>
|
|
|
+#include <linux/netdevice.h>
|
|
|
++#include <linux/etherdevice.h>
|
|
|
+#include <linux/rtnetlink.h>
|
|
|
+#include <linux/if_arp.h>
|
|
|
+#include <linux/netfilter.h>
|
|
|
@@ -77,11 +97,6 @@
|
|
|
+#include <net/pkt_sched.h>
|
|
|
+#include <net/netfilter/nf_queue.h>
|
|
|
+
|
|
|
-+struct imq_private {
|
|
|
-+ struct tasklet_struct tasklet;
|
|
|
-+ unsigned long tasklet_pending;
|
|
|
-+};
|
|
|
-+
|
|
|
+static nf_hookfn imq_nf_hook;
|
|
|
+
|
|
|
+static struct nf_hook_ops imq_ingress_ipv4 = {
|
|
|
@@ -140,8 +155,11 @@
|
|
|
+static unsigned int numdevs = IMQ_MAX_DEVS;
|
|
|
+#endif
|
|
|
+
|
|
|
++static DEFINE_SPINLOCK(imq_nf_queue_lock);
|
|
|
++
|
|
|
+static struct net_device *imq_devs_cache[IMQ_MAX_DEVS];
|
|
|
+
|
|
|
++
|
|
|
+static struct net_device_stats *imq_get_stats(struct net_device *dev)
|
|
|
+{
|
|
|
+ return &dev->stats;
|
|
|
@@ -153,12 +171,35 @@
|
|
|
+ struct nf_queue_entry *entry = skb->nf_queue_entry;
|
|
|
+
|
|
|
+ if (entry) {
|
|
|
-+ if (entry->indev)
|
|
|
-+ dev_put(entry->indev);
|
|
|
-+ if (entry->outdev)
|
|
|
-+ dev_put(entry->outdev);
|
|
|
++ nf_queue_entry_release_refs(entry);
|
|
|
++ kfree(entry);
|
|
|
++ }
|
|
|
++
|
|
|
++ skb_restore_cb(skb); /* kfree backup */
|
|
|
++}
|
|
|
++
|
|
|
++static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
|
|
|
++{
|
|
|
++ int status;
|
|
|
++
|
|
|
++ if (!entry->next_outfn) {
|
|
|
++ spin_lock_bh(&imq_nf_queue_lock);
|
|
|
++ nf_reinject(entry, verdict);
|
|
|
++ spin_unlock_bh(&imq_nf_queue_lock);
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ rcu_read_lock();
|
|
|
++ local_bh_disable();
|
|
|
++ status = entry->next_outfn(entry, entry->next_queuenum);
|
|
|
++ local_bh_enable();
|
|
|
++ if (status < 0) {
|
|
|
++ nf_queue_entry_release_refs(entry);
|
|
|
++ kfree_skb(entry->skb);
|
|
|
+ kfree(entry);
|
|
|
+ }
|
|
|
++
|
|
|
++ rcu_read_unlock();
|
|
|
+}
|
|
|
+
|
|
|
+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
@@ -169,26 +210,35 @@
|
|
|
+ skb->imq_flags = 0;
|
|
|
+ skb->destructor = NULL;
|
|
|
+
|
|
|
++ skb_restore_cb(skb); /* restore skb->cb */
|
|
|
++
|
|
|
+ dev->trans_start = jiffies;
|
|
|
-+ nf_reinject(skb->nf_queue_entry, NF_ACCEPT);
|
|
|
++ imq_nf_reinject(skb->nf_queue_entry, NF_ACCEPT);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num)
|
|
|
+{
|
|
|
+ struct net_device *dev;
|
|
|
-+ struct imq_private *priv;
|
|
|
-+ struct sk_buff *skb2 = NULL;
|
|
|
++ struct sk_buff *skb_orig, *skb, *skb_shared;
|
|
|
+ struct Qdisc *q;
|
|
|
-+ unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK;
|
|
|
-+ int ret = -1;
|
|
|
-+
|
|
|
-+ if (index > numdevs)
|
|
|
-+ return -1;
|
|
|
++ struct netdev_queue *txq;
|
|
|
++ int users, index;
|
|
|
++ int retval = -EINVAL;
|
|
|
++
|
|
|
++ index = entry->skb->imq_flags & IMQ_F_IFMASK;
|
|
|
++ if (unlikely(index > numdevs - 1)) {
|
|
|
++ if (net_ratelimit())
|
|
|
++ printk(KERN_WARNING
|
|
|
++ "IMQ: invalid device specified, highest is %u\n",
|
|
|
++ numdevs - 1);
|
|
|
++ retval = -EINVAL;
|
|
|
++ goto out;
|
|
|
++ }
|
|
|
+
|
|
|
+ /* check for imq device by index from cache */
|
|
|
+ dev = imq_devs_cache[index];
|
|
|
-+ if (!dev) {
|
|
|
++ if (unlikely(!dev)) {
|
|
|
+ char buf[8];
|
|
|
+
|
|
|
+ /* get device by name and cache result */
|
|
|
@@ -197,49 +247,90 @@
|
|
|
+ if (!dev) {
|
|
|
+ /* not found ?!*/
|
|
|
+ BUG();
|
|
|
-+ return -1;
|
|
|
++ retval = -ENODEV;
|
|
|
++ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ imq_devs_cache[index] = dev;
|
|
|
++ dev_put(dev);
|
|
|
+ }
|
|
|
+
|
|
|
-+ priv = netdev_priv(dev);
|
|
|
-+ if (!(dev->flags & IFF_UP)) {
|
|
|
++ if (unlikely(!(dev->flags & IFF_UP))) {
|
|
|
+ entry->skb->imq_flags = 0;
|
|
|
-+ nf_reinject(entry, NF_ACCEPT);
|
|
|
-+ return 0;
|
|
|
++ imq_nf_reinject(entry, NF_ACCEPT);
|
|
|
++ retval = 0;
|
|
|
++ goto out;
|
|
|
+ }
|
|
|
+ dev->last_rx = jiffies;
|
|
|
+
|
|
|
-+ if (entry->skb->destructor) {
|
|
|
-+ skb2 = entry->skb;
|
|
|
-+ entry->skb = skb_clone(entry->skb, GFP_ATOMIC);
|
|
|
-+ if (!entry->skb)
|
|
|
-+ return -1;
|
|
|
++ skb = entry->skb;
|
|
|
++ skb_orig = NULL;
|
|
|
++
|
|
|
++ /* skb has owner? => make clone */
|
|
|
++ if (unlikely(skb->destructor)) {
|
|
|
++ skb_orig = skb;
|
|
|
++ skb = skb_clone(skb, GFP_ATOMIC);
|
|
|
++ if (!skb) {
|
|
|
++ retval = -ENOMEM;
|
|
|
++ goto out;
|
|
|
++ }
|
|
|
++ entry->skb = skb;
|
|
|
+ }
|
|
|
-+ entry->skb->nf_queue_entry = entry;
|
|
|
+
|
|
|
-+ dev->stats.rx_bytes += entry->skb->len;
|
|
|
++ skb->nf_queue_entry = entry;
|
|
|
++
|
|
|
++ dev->stats.rx_bytes += skb->len;
|
|
|
+ dev->stats.rx_packets++;
|
|
|
+
|
|
|
-+ spin_lock_bh(&dev->queue_lock);
|
|
|
-+ q = dev->qdisc;
|
|
|
-+ if (q->enqueue) {
|
|
|
-+ q->enqueue(skb_get(entry->skb), q);
|
|
|
-+ if (skb_shared(entry->skb)) {
|
|
|
-+ entry->skb->destructor = imq_skb_destructor;
|
|
|
-+ kfree_skb(entry->skb);
|
|
|
-+ ret = 0;
|
|
|
-+ }
|
|
|
-+ }
|
|
|
-+ if (!test_and_set_bit(1, &priv->tasklet_pending))
|
|
|
-+ tasklet_schedule(&priv->tasklet);
|
|
|
-+ spin_unlock_bh(&dev->queue_lock);
|
|
|
++ txq = dev_pick_tx(dev, skb);
|
|
|
+
|
|
|
-+ if (skb2)
|
|
|
-+ kfree_skb(ret ? entry->skb : skb2);
|
|
|
++ q = rcu_dereference(txq->qdisc);
|
|
|
++ if (unlikely(!q->enqueue))
|
|
|
++ goto packet_not_eaten_by_imq_dev;
|
|
|
+
|
|
|
-+ return ret;
|
|
|
++ spin_lock_bh(qdisc_lock(q));
|
|
|
++
|
|
|
++ users = atomic_read(&skb->users);
|
|
|
++
|
|
|
++ skb_shared = skb_get(skb); /* increase reference count by one */
|
|
|
++ skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will
|
|
|
++ overwrite it */
|
|
|
++ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */
|
|
|
++
|
|
|
++ if (likely(atomic_read(&skb_shared->users) == users + 1)) {
|
|
|
++ kfree_skb(skb_shared); /* decrease reference count by one */
|
|
|
++
|
|
|
++ skb->destructor = &imq_skb_destructor;
|
|
|
++
|
|
|
++ /* cloned? */
|
|
|
++ if (skb_orig)
|
|
|
++ kfree_skb(skb_orig); /* free original */
|
|
|
++
|
|
|
++ spin_unlock_bh(qdisc_lock(q));
|
|
|
++
|
|
|
++ /* schedule qdisc dequeue */
|
|
|
++ __netif_schedule(q);
|
|
|
++
|
|
|
++ retval = 0;
|
|
|
++ goto out;
|
|
|
++ } else {
|
|
|
++ skb_restore_cb(skb_shared); /* restore skb->cb */
|
|
|
++ /* qdisc dropped packet and decreased skb reference count of
|
|
|
++ * skb, so we don't really want to and try refree as that would
|
|
|
++ * actually destroy the skb. */
|
|
|
++ spin_unlock_bh(qdisc_lock(q));
|
|
|
++ goto packet_not_eaten_by_imq_dev;
|
|
|
++ }
|
|
|
++
|
|
|
++packet_not_eaten_by_imq_dev:
|
|
|
++ /* cloned? restore original */
|
|
|
++ if (skb_orig) {
|
|
|
++ kfree_skb(skb);
|
|
|
++ entry->skb = skb_orig;
|
|
|
++ }
|
|
|
++ retval = -1;
|
|
|
++out:
|
|
|
++ return retval;
|
|
|
+}
|
|
|
+
|
|
|
+static struct nf_queue_handler nfqh = {
|
|
|
@@ -247,17 +338,6 @@
|
|
|
+ .outfn = imq_nf_queue,
|
|
|
+};
|
|
|
+
|
|
|
-+static void qdisc_run_tasklet(unsigned long arg)
|
|
|
-+{
|
|
|
-+ struct net_device *dev = (struct net_device *)arg;
|
|
|
-+ struct imq_private *priv = netdev_priv(dev);
|
|
|
-+
|
|
|
-+ spin_lock(&dev->queue_lock);
|
|
|
-+ qdisc_run(dev);
|
|
|
-+ clear_bit(1, &priv->tasklet_pending);
|
|
|
-+ spin_unlock(&dev->queue_lock);
|
|
|
-+}
|
|
|
-+
|
|
|
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb,
|
|
|
+ const struct net_device *indev,
|
|
|
+ const struct net_device *outdev,
|
|
|
@@ -271,87 +351,98 @@
|
|
|
+
|
|
|
+static int imq_close(struct net_device *dev)
|
|
|
+{
|
|
|
-+ struct imq_private *priv = netdev_priv(dev);
|
|
|
-+
|
|
|
-+ tasklet_kill(&priv->tasklet);
|
|
|
+ netif_stop_queue(dev);
|
|
|
-+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int imq_open(struct net_device *dev)
|
|
|
+{
|
|
|
-+ struct imq_private *priv = netdev_priv(dev);
|
|
|
-+
|
|
|
-+ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev);
|
|
|
+ netif_start_queue(dev);
|
|
|
-+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
++static const struct net_device_ops imq_netdev_ops = {
|
|
|
++ .ndo_open = imq_open,
|
|
|
++ .ndo_stop = imq_close,
|
|
|
++ .ndo_start_xmit = imq_dev_xmit,
|
|
|
++ .ndo_get_stats = imq_get_stats,
|
|
|
++};
|
|
|
++
|
|
|
+static void imq_setup(struct net_device *dev)
|
|
|
+{
|
|
|
-+ dev->hard_start_xmit = imq_dev_xmit;
|
|
|
-+ dev->open = imq_open;
|
|
|
-+ dev->get_stats = imq_get_stats;
|
|
|
-+ dev->stop = imq_close;
|
|
|
++ dev->netdev_ops = &imq_netdev_ops;
|
|
|
+ dev->type = ARPHRD_VOID;
|
|
|
+ dev->mtu = 16000;
|
|
|
+ dev->tx_queue_len = 11000;
|
|
|
+ dev->flags = IFF_NOARP;
|
|
|
++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
|
|
|
++ NETIF_F_GSO | NETIF_F_HW_CSUM |
|
|
|
++ NETIF_F_HIGHDMA;
|
|
|
++}
|
|
|
++
|
|
|
++static int imq_validate(struct nlattr *tb[], struct nlattr *data[])
|
|
|
++{
|
|
|
++ int ret = 0;
|
|
|
++
|
|
|
++ if (tb[IFLA_ADDRESS]) {
|
|
|
++ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
|
|
|
++ ret = -EINVAL;
|
|
|
++ goto end;
|
|
|
++ }
|
|
|
++ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
|
|
|
++ ret = -EADDRNOTAVAIL;
|
|
|
++ goto end;
|
|
|
++ }
|
|
|
++ }
|
|
|
++ return 0;
|
|
|
++end:
|
|
|
++ printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret);
|
|
|
++ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static struct rtnl_link_ops imq_link_ops __read_mostly = {
|
|
|
+ .kind = "imq",
|
|
|
-+ .priv_size = sizeof(struct imq_private),
|
|
|
++ .priv_size = 0,
|
|
|
+ .setup = imq_setup,
|
|
|
++ .validate = imq_validate,
|
|
|
+};
|
|
|
+
|
|
|
+static int __init imq_init_hooks(void)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+
|
|
|
-+ err = nf_register_queue_handler(PF_INET, &nfqh);
|
|
|
-+ if (err)
|
|
|
-+ goto err1;
|
|
|
++ nf_register_queue_imq_handler(&nfqh);
|
|
|
+
|
|
|
+ err = nf_register_hook(&imq_ingress_ipv4);
|
|
|
+ if (err)
|
|
|
-+ goto err2;
|
|
|
++ goto err1;
|
|
|
+
|
|
|
+ err = nf_register_hook(&imq_egress_ipv4);
|
|
|
+ if (err)
|
|
|
-+ goto err3;
|
|
|
++ goto err2;
|
|
|
+
|
|
|
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
|
|
-+ err = nf_register_queue_handler(PF_INET6, &nfqh);
|
|
|
-+ if (err)
|
|
|
-+ goto err4;
|
|
|
-+
|
|
|
+ err = nf_register_hook(&imq_ingress_ipv6);
|
|
|
+ if (err)
|
|
|
-+ goto err5;
|
|
|
++ goto err3;
|
|
|
+
|
|
|
+ err = nf_register_hook(&imq_egress_ipv6);
|
|
|
+ if (err)
|
|
|
-+ goto err6;
|
|
|
++ goto err4;
|
|
|
+#endif
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
|
|
-+err6:
|
|
|
-+ nf_unregister_hook(&imq_ingress_ipv6);
|
|
|
-+err5:
|
|
|
-+ nf_unregister_queue_handler(PF_INET6, &nfqh);
|
|
|
+err4:
|
|
|
++ nf_unregister_hook(&imq_ingress_ipv6);
|
|
|
++err3:
|
|
|
+ nf_unregister_hook(&imq_egress_ipv4);
|
|
|
+#endif
|
|
|
-+err3:
|
|
|
-+ nf_unregister_hook(&imq_ingress_ipv4);
|
|
|
+err2:
|
|
|
-+ nf_unregister_queue_handler(PF_INET, &nfqh);
|
|
|
++ nf_unregister_hook(&imq_ingress_ipv4);
|
|
|
+err1:
|
|
|
++ nf_unregister_queue_imq_handler();
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
@@ -360,7 +451,7 @@
|
|
|
+ struct net_device *dev;
|
|
|
+ int ret;
|
|
|
+
|
|
|
-+ dev = alloc_netdev(sizeof(struct imq_private), "imq%d", imq_setup);
|
|
|
++ dev = alloc_netdev(0, "imq%d", imq_setup);
|
|
|
+ if (!dev)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
@@ -383,7 +474,7 @@
|
|
|
+{
|
|
|
+ int err, i;
|
|
|
+
|
|
|
-+ if (!numdevs || numdevs > IMQ_MAX_DEVS) {
|
|
|
++ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) {
|
|
|
+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
|
|
|
+ IMQ_MAX_DEVS);
|
|
|
+ return -EINVAL;
|
|
|
@@ -408,6 +499,12 @@
|
|
|
+{
|
|
|
+ int err;
|
|
|
+
|
|
|
++#if defined(CONFIG_IMQ_NUM_DEVS)
|
|
|
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16);
|
|
|
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2);
|
|
|
++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK);
|
|
|
++#endif
|
|
|
++
|
|
|
+ err = imq_init_devs();
|
|
|
+ if (err) {
|
|
|
+ printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n");
|
|
|
@@ -443,11 +540,11 @@
|
|
|
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
|
|
+ nf_unregister_hook(&imq_ingress_ipv6);
|
|
|
+ nf_unregister_hook(&imq_egress_ipv6);
|
|
|
-+ nf_unregister_queue_handler(PF_INET6, &nfqh);
|
|
|
+#endif
|
|
|
+ nf_unregister_hook(&imq_ingress_ipv4);
|
|
|
+ nf_unregister_hook(&imq_egress_ipv4);
|
|
|
-+ nf_unregister_queue_handler(PF_INET, &nfqh);
|
|
|
++
|
|
|
++ nf_unregister_queue_imq_handler();
|
|
|
+}
|
|
|
+
|
|
|
+static void __exit imq_cleanup_devs(void)
|
|
|
@@ -477,7 +574,7 @@
|
|
|
+
|
|
|
--- a/drivers/net/Kconfig
|
|
|
+++ b/drivers/net/Kconfig
|
|
|
-@@ -110,6 +110,129 @@ config EQUALIZER
|
|
|
+@@ -110,6 +110,129 @@
|
|
|
To compile this driver as a module, choose M here: the module
|
|
|
will be called eql. If unsure, say N.
|
|
|
|
|
|
@@ -609,7 +706,7 @@
|
|
|
select CRC32
|
|
|
--- a/drivers/net/Makefile
|
|
|
+++ b/drivers/net/Makefile
|
|
|
-@@ -150,6 +150,7 @@ obj-$(CONFIG_SLHC) += slhc.o
|
|
|
+@@ -150,6 +150,7 @@
|
|
|
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
|
|
|
|
|
|
obj-$(CONFIG_DUMMY) += dummy.o
|
|
|
@@ -619,52 +716,102 @@
|
|
|
obj-$(CONFIG_DE600) += de600.o
|
|
|
--- /dev/null
|
|
|
+++ b/include/linux/imq.h
|
|
|
-@@ -0,0 +1,9 @@
|
|
|
+@@ -0,0 +1,13 @@
|
|
|
+#ifndef _IMQ_H
|
|
|
+#define _IMQ_H
|
|
|
+
|
|
|
-+#define IMQ_MAX_DEVS 16
|
|
|
++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */
|
|
|
++#define IMQ_F_BITS 5
|
|
|
++
|
|
|
++#define IMQ_F_IFMASK 0x0f
|
|
|
++#define IMQ_F_ENQUEUE 0x10
|
|
|
+
|
|
|
-+#define IMQ_F_IFMASK 0x7f
|
|
|
-+#define IMQ_F_ENQUEUE 0x80
|
|
|
++#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1)
|
|
|
+
|
|
|
+#endif /* _IMQ_H */
|
|
|
++
|
|
|
--- /dev/null
|
|
|
+++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
|
|
|
-@@ -0,0 +1,8 @@
|
|
|
+@@ -0,0 +1,10 @@
|
|
|
+#ifndef _IPT_IMQ_H
|
|
|
+#define _IPT_IMQ_H
|
|
|
+
|
|
|
-+struct ipt_imq_info {
|
|
|
-+ unsigned int todev; /* target imq device */
|
|
|
-+};
|
|
|
++/* Backwards compatibility for old userspace */
|
|
|
++#include <linux/netfilter/xt_IMQ.h>
|
|
|
++
|
|
|
++#define ipt_imq_info xt_imq_info
|
|
|
+
|
|
|
+#endif /* _IPT_IMQ_H */
|
|
|
++
|
|
|
--- /dev/null
|
|
|
+++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
|
|
|
-@@ -0,0 +1,8 @@
|
|
|
+@@ -0,0 +1,10 @@
|
|
|
+#ifndef _IP6T_IMQ_H
|
|
|
+#define _IP6T_IMQ_H
|
|
|
+
|
|
|
-+struct ip6t_imq_info {
|
|
|
-+ unsigned int todev; /* target imq device */
|
|
|
-+};
|
|
|
++/* Backwards compatibility for old userspace */
|
|
|
++#include <linux/netfilter/xt_IMQ.h>
|
|
|
++
|
|
|
++#define ip6t_imq_info xt_imq_info
|
|
|
+
|
|
|
+#endif /* _IP6T_IMQ_H */
|
|
|
++
|
|
|
--- a/include/linux/skbuff.h
|
|
|
+++ b/include/linux/skbuff.h
|
|
|
-@@ -312,6 +312,10 @@ struct sk_buff {
|
|
|
+@@ -28,6 +28,9 @@
|
|
|
+ #include <linux/rcupdate.h>
|
|
|
+ #include <linux/dmaengine.h>
|
|
|
+ #include <linux/hrtimer.h>
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++#include <linux/imq.h>
|
|
|
++#endif
|
|
|
+
|
|
|
+ #define HAVE_ALLOC_SKB /* For the drivers to know */
|
|
|
+ #define HAVE_ALIGNABLE_SKB /* Ditto 8) */
|
|
|
+@@ -282,6 +285,9 @@
|
|
|
+ * first. This is owned by whoever has the skb queued ATM.
|
|
|
+ */
|
|
|
+ char cb[48];
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ void *cb_next;
|
|
|
++#endif
|
|
|
+
|
|
|
+ unsigned int len,
|
|
|
+ data_len;
|
|
|
+@@ -312,6 +318,9 @@
|
|
|
struct nf_conntrack *nfct;
|
|
|
struct sk_buff *nfct_reasm;
|
|
|
#endif
|
|
|
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
-+ unsigned char imq_flags;
|
|
|
+ struct nf_queue_entry *nf_queue_entry;
|
|
|
+#endif
|
|
|
#ifdef CONFIG_BRIDGE_NETFILTER
|
|
|
struct nf_bridge_info *nf_bridge;
|
|
|
#endif
|
|
|
-@@ -1844,6 +1848,10 @@ static inline void __nf_copy(struct sk_b
|
|
|
+@@ -332,6 +341,9 @@
|
|
|
+ __u8 requeue:1;
|
|
|
+ #endif
|
|
|
+ /* 0/13/14 bit hole */
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ __u8 imq_flags:IMQ_F_BITS;
|
|
|
++#endif
|
|
|
+
|
|
|
+ #ifdef CONFIG_NET_DMA
|
|
|
+ dma_cookie_t dma_cookie;
|
|
|
+@@ -372,6 +384,12 @@
|
|
|
+ enum dma_data_direction dir);
|
|
|
+ #endif
|
|
|
+
|
|
|
++
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++extern int skb_save_cb(struct sk_buff *skb);
|
|
|
++extern int skb_restore_cb(struct sk_buff *skb);
|
|
|
++#endif
|
|
|
++
|
|
|
+ extern void kfree_skb(struct sk_buff *skb);
|
|
|
+ extern void __kfree_skb(struct sk_buff *skb);
|
|
|
+ extern struct sk_buff *__alloc_skb(unsigned int size,
|
|
|
+@@ -1844,6 +1862,10 @@
|
|
|
dst->nfct_reasm = src->nfct_reasm;
|
|
|
nf_conntrack_get_reasm(src->nfct_reasm);
|
|
|
#endif
|
|
|
@@ -687,7 +834,7 @@
|
|
|
#include <linux/proc_fs.h>
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/stat.h>
|
|
|
-@@ -1671,7 +1674,11 @@ int dev_hard_start_xmit(struct sk_buff *
|
|
|
+@@ -1671,7 +1674,11 @@
|
|
|
|
|
|
prefetch(&dev->netdev_ops->ndo_start_xmit);
|
|
|
if (likely(!skb->next)) {
|
|
|
@@ -700,215 +847,414 @@
|
|
|
dev_queue_xmit_nit(skb, dev);
|
|
|
|
|
|
if (netif_needs_gso(dev, skb)) {
|
|
|
+@@ -1762,8 +1769,7 @@
|
|
|
+ return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
|
|
|
+ }
|
|
|
+
|
|
|
+-static struct netdev_queue *dev_pick_tx(struct net_device *dev,
|
|
|
+- struct sk_buff *skb)
|
|
|
++struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb)
|
|
|
+ {
|
|
|
+ const struct net_device_ops *ops = dev->netdev_ops;
|
|
|
+ u16 queue_index = 0;
|
|
|
+@@ -1776,6 +1782,7 @@
|
|
|
+ skb_set_queue_mapping(skb, queue_index);
|
|
|
+ return netdev_get_tx_queue(dev, queue_index);
|
|
|
+ }
|
|
|
++EXPORT_SYMBOL(dev_pick_tx);
|
|
|
+
|
|
|
+ /**
|
|
|
+ * dev_queue_xmit - transmit a buffer
|
|
|
+--- a/include/linux/netdevice.h
|
|
|
++++ b/include/linux/netdevice.h
|
|
|
+@@ -1071,6 +1071,7 @@
|
|
|
+ extern int dev_open(struct net_device *dev);
|
|
|
+ extern int dev_close(struct net_device *dev);
|
|
|
+ extern void dev_disable_lro(struct net_device *dev);
|
|
|
++extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb);
|
|
|
+ extern int dev_queue_xmit(struct sk_buff *skb);
|
|
|
+ extern int register_netdevice(struct net_device *dev);
|
|
|
+ extern void unregister_netdevice(struct net_device *dev);
|
|
|
--- /dev/null
|
|
|
-+++ b/net/ipv4/netfilter/ipt_IMQ.c
|
|
|
-@@ -0,0 +1,69 @@
|
|
|
-+/*
|
|
|
-+ * This target marks packets to be enqueued to an imq device
|
|
|
-+ */
|
|
|
-+#include <linux/module.h>
|
|
|
-+#include <linux/skbuff.h>
|
|
|
-+#include <linux/netfilter_ipv4/ip_tables.h>
|
|
|
-+#include <linux/netfilter_ipv4/ipt_IMQ.h>
|
|
|
-+#include <linux/imq.h>
|
|
|
++++ b/include/linux/netfilter/xt_IMQ.h
|
|
|
+@@ -0,0 +1,9 @@
|
|
|
++#ifndef _XT_IMQ_H
|
|
|
++#define _XT_IMQ_H
|
|
|
+
|
|
|
-+static unsigned int imq_target(struct sk_buff *pskb,
|
|
|
-+ const struct net_device *in,
|
|
|
-+ const struct net_device *out,
|
|
|
-+ unsigned int hooknum,
|
|
|
-+ const struct xt_target *target,
|
|
|
-+ const void *targinfo)
|
|
|
++struct xt_imq_info {
|
|
|
++ unsigned int todev; /* target imq device */
|
|
|
++};
|
|
|
++
|
|
|
++#endif /* _XT_IMQ_H */
|
|
|
++
|
|
|
+--- a/include/net/netfilter/nf_queue.h
|
|
|
++++ b/include/net/netfilter/nf_queue.h
|
|
|
+@@ -13,6 +13,12 @@
|
|
|
+ struct net_device *indev;
|
|
|
+ struct net_device *outdev;
|
|
|
+ int (*okfn)(struct sk_buff *);
|
|
|
++
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ int (*next_outfn)(struct nf_queue_entry *entry,
|
|
|
++ unsigned int queuenum);
|
|
|
++ unsigned int next_queuenum;
|
|
|
++#endif
|
|
|
+ };
|
|
|
+
|
|
|
+ #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry))
|
|
|
+@@ -30,5 +36,11 @@
|
|
|
+ const struct nf_queue_handler *qh);
|
|
|
+ extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh);
|
|
|
+ extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
|
|
|
++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
|
|
|
++
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh);
|
|
|
++extern void nf_unregister_queue_imq_handler(void);
|
|
|
++#endif
|
|
|
+
|
|
|
+ #endif /* _NF_QUEUE_H */
|
|
|
+--- a/net/core/skbuff.c
|
|
|
++++ b/net/core/skbuff.c
|
|
|
+@@ -69,6 +69,9 @@
|
|
|
+
|
|
|
+ static struct kmem_cache *skbuff_head_cache __read_mostly;
|
|
|
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++static struct kmem_cache *skbuff_cb_store_cache __read_mostly;
|
|
|
++#endif
|
|
|
+
|
|
|
+ static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
|
|
|
+ struct pipe_buffer *buf)
|
|
|
+@@ -88,6 +91,80 @@
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++/* Control buffer save/restore for IMQ devices */
|
|
|
++struct skb_cb_table {
|
|
|
++ void *cb_next;
|
|
|
++ atomic_t refcnt;
|
|
|
++ char cb[48];
|
|
|
++};
|
|
|
++
|
|
|
++static DEFINE_SPINLOCK(skb_cb_store_lock);
|
|
|
++
|
|
|
++int skb_save_cb(struct sk_buff *skb)
|
|
|
+{
|
|
|
-+ struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo;
|
|
|
++ struct skb_cb_table *next;
|
|
|
+
|
|
|
-+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE;
|
|
|
++ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC);
|
|
|
++ if (!next)
|
|
|
++ return -ENOMEM;
|
|
|
+
|
|
|
-+ return XT_CONTINUE;
|
|
|
++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
|
|
|
++
|
|
|
++ memcpy(next->cb, skb->cb, sizeof(skb->cb));
|
|
|
++ next->cb_next = skb->cb_next;
|
|
|
++
|
|
|
++ atomic_set(&next->refcnt, 1);
|
|
|
++
|
|
|
++ skb->cb_next = next;
|
|
|
++ return 0;
|
|
|
+}
|
|
|
++EXPORT_SYMBOL(skb_save_cb);
|
|
|
+
|
|
|
-+static bool imq_checkentry(const char *tablename,
|
|
|
-+ const void *e,
|
|
|
-+ const struct xt_target *target,
|
|
|
-+ void *targinfo,
|
|
|
-+ unsigned int hook_mask)
|
|
|
++int skb_restore_cb(struct sk_buff *skb)
|
|
|
+{
|
|
|
-+ struct ipt_imq_info *mr;
|
|
|
-+
|
|
|
-+ mr = (struct ipt_imq_info *)targinfo;
|
|
|
++ struct skb_cb_table *next;
|
|
|
+
|
|
|
-+ if (mr->todev > IMQ_MAX_DEVS) {
|
|
|
-+ printk(KERN_WARNING
|
|
|
-+ "IMQ: invalid device specified, highest is %u\n",
|
|
|
-+ IMQ_MAX_DEVS);
|
|
|
++ if (!skb->cb_next)
|
|
|
+ return 0;
|
|
|
++
|
|
|
++ next = skb->cb_next;
|
|
|
++
|
|
|
++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb));
|
|
|
++
|
|
|
++ memcpy(skb->cb, next->cb, sizeof(skb->cb));
|
|
|
++ skb->cb_next = next->cb_next;
|
|
|
++
|
|
|
++ spin_lock(&skb_cb_store_lock);
|
|
|
++
|
|
|
++ if (atomic_dec_and_test(&next->refcnt)) {
|
|
|
++ kmem_cache_free(skbuff_cb_store_cache, next);
|
|
|
+ }
|
|
|
+
|
|
|
-+ return 1;
|
|
|
++ spin_unlock(&skb_cb_store_lock);
|
|
|
++
|
|
|
++ return 0;
|
|
|
+}
|
|
|
++EXPORT_SYMBOL(skb_restore_cb);
|
|
|
+
|
|
|
-+static struct xt_target ipt_imq_reg = {
|
|
|
-+ .name = "IMQ",
|
|
|
-+ .family = AF_INET,
|
|
|
-+ .target = imq_target,
|
|
|
-+ .targetsize = sizeof(struct ipt_imq_info),
|
|
|
-+ .checkentry = imq_checkentry,
|
|
|
-+ .me = THIS_MODULE,
|
|
|
-+ .table = "mangle"
|
|
|
-+};
|
|
|
++static void skb_copy_stored_cb(struct sk_buff *new, struct sk_buff *old)
|
|
|
++{
|
|
|
++ struct skb_cb_table *next;
|
|
|
++
|
|
|
++ if (!old->cb_next) {
|
|
|
++ new->cb_next = 0;
|
|
|
++ return;
|
|
|
++ }
|
|
|
++
|
|
|
++ spin_lock(&skb_cb_store_lock);
|
|
|
++
|
|
|
++ next = old->cb_next;
|
|
|
++ atomic_inc(&next->refcnt);
|
|
|
++ new->cb_next = next;
|
|
|
++
|
|
|
++ spin_unlock(&skb_cb_store_lock);
|
|
|
++}
|
|
|
++#endif
|
|
|
+
|
|
|
+ /* Pipe buffer operations for a socket. */
|
|
|
+ static struct pipe_buf_operations sock_pipe_buf_ops = {
|
|
|
+@@ -381,6 +458,15 @@
|
|
|
+ WARN_ON(in_irq());
|
|
|
+ skb->destructor(skb);
|
|
|
+ }
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ /* This should not happen. When it does, avoid memleak by restoring
|
|
|
++ the chain of cb-backups. */
|
|
|
++ while(skb->cb_next != NULL) {
|
|
|
++ printk(KERN_WARNING "kfree_skb: skb->cb_next: %08x\n",
|
|
|
++ skb->cb_next);
|
|
|
++ skb_restore_cb(skb);
|
|
|
++ }
|
|
|
++#endif
|
|
|
+ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
|
|
+ nf_conntrack_put(skb->nfct);
|
|
|
+ nf_conntrack_put_reasm(skb->nfct_reasm);
|
|
|
+@@ -493,6 +579,9 @@
|
|
|
+ new->sp = secpath_get(old->sp);
|
|
|
+ #endif
|
|
|
+ memcpy(new->cb, old->cb, sizeof(old->cb));
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ skb_copy_stored_cb(new, old);
|
|
|
++#endif
|
|
|
+ new->csum_start = old->csum_start;
|
|
|
+ new->csum_offset = old->csum_offset;
|
|
|
+ new->local_df = old->local_df;
|
|
|
+@@ -2664,6 +2753,13 @@
|
|
|
+ 0,
|
|
|
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
|
|
|
+ NULL);
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache",
|
|
|
++ sizeof(struct skb_cb_table),
|
|
|
++ 0,
|
|
|
++ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
|
|
|
++ NULL);
|
|
|
++#endif
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+--- a/net/netfilter/Kconfig
|
|
|
++++ b/net/netfilter/Kconfig
|
|
|
+@@ -357,6 +357,18 @@
|
|
|
+
|
|
|
+ To compile it as a module, choose M here. If unsure, say N.
|
|
|
+
|
|
|
++config NETFILTER_XT_TARGET_IMQ
|
|
|
++ tristate '"IMQ" target support'
|
|
|
++ depends on NETFILTER_XTABLES
|
|
|
++ depends on IP_NF_MANGLE || IP6_NF_MANGLE
|
|
|
++ select IMQ
|
|
|
++ default m if NETFILTER_ADVANCED=n
|
|
|
++ help
|
|
|
++ This option adds a `IMQ' target which is used to specify if and
|
|
|
++ to which imq device packets should get enqueued/dequeued.
|
|
|
+
|
|
|
-+static int __init init(void)
|
|
|
++ To compile it as a module, choose M here. If unsure, say N.
|
|
|
++
|
|
|
+ config NETFILTER_XT_TARGET_MARK
|
|
|
+ tristate '"MARK" target support'
|
|
|
+ default m if NETFILTER_ADVANCED=n
|
|
|
+--- a/net/netfilter/Makefile
|
|
|
++++ b/net/netfilter/Makefile
|
|
|
+@@ -45,6 +45,7 @@
|
|
|
+ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
|
|
|
+ obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
|
|
|
+ obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
|
|
|
++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o
|
|
|
+ obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
|
|
|
+ obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
|
|
|
+ obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
|
|
|
+--- a/net/netfilter/nf_queue.c
|
|
|
++++ b/net/netfilter/nf_queue.c
|
|
|
+@@ -20,6 +20,26 @@
|
|
|
+
|
|
|
+ static DEFINE_MUTEX(queue_handler_mutex);
|
|
|
+
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++static const struct nf_queue_handler *queue_imq_handler;
|
|
|
++
|
|
|
++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh)
|
|
|
+{
|
|
|
-+ return xt_register_target(&ipt_imq_reg);
|
|
|
++ mutex_lock(&queue_handler_mutex);
|
|
|
++ rcu_assign_pointer(queue_imq_handler, qh);
|
|
|
++ mutex_unlock(&queue_handler_mutex);
|
|
|
+}
|
|
|
++EXPORT_SYMBOL(nf_register_queue_imq_handler);
|
|
|
+
|
|
|
-+static void __exit fini(void)
|
|
|
++void nf_unregister_queue_imq_handler(void)
|
|
|
+{
|
|
|
-+ xt_unregister_target(&ipt_imq_reg);
|
|
|
++ mutex_lock(&queue_handler_mutex);
|
|
|
++ rcu_assign_pointer(queue_imq_handler, NULL);
|
|
|
++ mutex_unlock(&queue_handler_mutex);
|
|
|
+}
|
|
|
++EXPORT_SYMBOL(nf_unregister_queue_imq_handler);
|
|
|
++#endif
|
|
|
+
|
|
|
-+module_init(init);
|
|
|
-+module_exit(fini);
|
|
|
+ /* return EBUSY when somebody else is registered, return EEXIST if the
|
|
|
+ * same handler is registered, return 0 in case of success. */
|
|
|
+ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
|
|
|
+@@ -80,7 +100,7 @@
|
|
|
+ }
|
|
|
+ EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
|
|
|
+
|
|
|
+-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
|
|
|
++void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
|
|
|
+ {
|
|
|
+ /* Release those devices we held, or Alexey will kill me. */
|
|
|
+ if (entry->indev)
|
|
|
+@@ -100,6 +120,7 @@
|
|
|
+ /* Drop reference to owner of hook which queued us. */
|
|
|
+ module_put(entry->elem->owner);
|
|
|
+ }
|
|
|
++EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Any packet that leaves via this function must come back
|
|
|
+@@ -121,12 +142,26 @@
|
|
|
+ #endif
|
|
|
+ const struct nf_afinfo *afinfo;
|
|
|
+ const struct nf_queue_handler *qh;
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ const struct nf_queue_handler *qih = NULL;
|
|
|
++#endif
|
|
|
+
|
|
|
+ /* QUEUE == DROP if noone is waiting, to be safe. */
|
|
|
+ rcu_read_lock();
|
|
|
+
|
|
|
+ qh = rcu_dereference(queue_handler[pf]);
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
|
|
++ if (pf == PF_INET || pf == PF_INET6)
|
|
|
++#else
|
|
|
++ if (pf == PF_INET)
|
|
|
++#endif
|
|
|
++ qih = rcu_dereference(queue_imq_handler);
|
|
|
+
|
|
|
-+MODULE_AUTHOR("http://www.linuximq.net");
|
|
|
-+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
|
|
|
-+MODULE_LICENSE("GPL");
|
|
|
---- a/net/ipv4/netfilter/Kconfig
|
|
|
-+++ b/net/ipv4/netfilter/Kconfig
|
|
|
-@@ -112,6 +112,17 @@ config IP_NF_FILTER
|
|
|
++ if (!qh && !qih)
|
|
|
++#else /* !IMQ */
|
|
|
+ if (!qh)
|
|
|
++#endif
|
|
|
+ goto err_unlock;
|
|
|
|
|
|
- To compile it as a module, choose M here. If unsure, say N.
|
|
|
+ afinfo = nf_get_afinfo(pf);
|
|
|
+@@ -145,6 +180,10 @@
|
|
|
+ .indev = indev,
|
|
|
+ .outdev = outdev,
|
|
|
+ .okfn = okfn,
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ .next_outfn = qh ? qh->outfn : NULL,
|
|
|
++ .next_queuenum = queuenum,
|
|
|
++#endif
|
|
|
+ };
|
|
|
+
|
|
|
+ /* If it's going away, ignore hook. */
|
|
|
+@@ -170,8 +209,19 @@
|
|
|
+ }
|
|
|
+ #endif
|
|
|
+ afinfo->saveroute(skb, entry);
|
|
|
++
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++ if (qih) {
|
|
|
++ status = qih->outfn(entry, queuenum);
|
|
|
++ goto imq_skip_queue;
|
|
|
++ }
|
|
|
++#endif
|
|
|
++
|
|
|
+ status = qh->outfn(entry, queuenum);
|
|
|
+
|
|
|
++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
|
|
|
++imq_skip_queue:
|
|
|
++#endif
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
-+config IP_NF_TARGET_IMQ
|
|
|
-+ tristate "IMQ target support"
|
|
|
-+ depends on IP_NF_MANGLE && IMQ
|
|
|
-+ help
|
|
|
-+ This option adds a `IMQ' target which is used to specify if and
|
|
|
-+ to which IMQ device packets should get enqueued/dequeued.
|
|
|
-+
|
|
|
-+ For more information visit: http://www.linuximq.net/
|
|
|
-+
|
|
|
-+ To compile it as a module, choose M here. If unsure, say N.
|
|
|
-+
|
|
|
- config IP_NF_TARGET_REJECT
|
|
|
- tristate "REJECT target support"
|
|
|
- depends on IP_NF_FILTER
|
|
|
---- a/net/ipv4/netfilter/Makefile
|
|
|
-+++ b/net/ipv4/netfilter/Makefile
|
|
|
-@@ -58,6 +58,7 @@ obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set
|
|
|
- obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
|
|
|
- obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
|
|
|
- obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
|
|
|
-+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
|
|
|
- obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
|
|
|
- obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
|
|
|
- obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
|
|
|
+ if (status < 0) {
|
|
|
--- /dev/null
|
|
|
-+++ b/net/ipv6/netfilter/ip6t_IMQ.c
|
|
|
-@@ -0,0 +1,69 @@
|
|
|
++++ b/net/netfilter/xt_IMQ.c
|
|
|
+@@ -0,0 +1,73 @@
|
|
|
+/*
|
|
|
+ * This target marks packets to be enqueued to an imq device
|
|
|
+ */
|
|
|
+#include <linux/module.h>
|
|
|
+#include <linux/skbuff.h>
|
|
|
-+#include <linux/netfilter_ipv6/ip6_tables.h>
|
|
|
-+#include <linux/netfilter_ipv6/ip6t_IMQ.h>
|
|
|
++#include <linux/netfilter/x_tables.h>
|
|
|
++#include <linux/netfilter/xt_IMQ.h>
|
|
|
+#include <linux/imq.h>
|
|
|
+
|
|
|
+static unsigned int imq_target(struct sk_buff *pskb,
|
|
|
-+ const struct net_device *in,
|
|
|
-+ const struct net_device *out,
|
|
|
-+ unsigned int hooknum,
|
|
|
-+ const struct xt_target *target,
|
|
|
-+ const void *targinfo)
|
|
|
++ const struct xt_target_param *par)
|
|
|
+{
|
|
|
-+ struct ip6t_imq_info *mr = (struct ip6t_imq_info *)targinfo;
|
|
|
++ const struct xt_imq_info *mr = par->targinfo;
|
|
|
+
|
|
|
-+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE;
|
|
|
++ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE;
|
|
|
+
|
|
|
+ return XT_CONTINUE;
|
|
|
+}
|
|
|
+
|
|
|
-+static bool imq_checkentry(const char *tablename,
|
|
|
-+ const void *entry,
|
|
|
-+ const struct xt_target *target,
|
|
|
-+ void *targinfo,
|
|
|
-+ unsigned int hook_mask)
|
|
|
++static bool imq_checkentry(const struct xt_tgchk_param *par)
|
|
|
+{
|
|
|
-+ struct ip6t_imq_info *mr;
|
|
|
++ struct xt_imq_info *mr = par->targinfo;
|
|
|
+
|
|
|
-+ mr = (struct ip6t_imq_info *)targinfo;
|
|
|
-+
|
|
|
-+ if (mr->todev > IMQ_MAX_DEVS) {
|
|
|
++ if (mr->todev > IMQ_MAX_DEVS - 1) {
|
|
|
+ printk(KERN_WARNING
|
|
|
+ "IMQ: invalid device specified, highest is %u\n",
|
|
|
-+ IMQ_MAX_DEVS);
|
|
|
++ IMQ_MAX_DEVS - 1);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
-+static struct xt_target ip6t_imq_reg = {
|
|
|
-+ .name = "IMQ",
|
|
|
-+ .family = AF_INET6,
|
|
|
-+ .target = imq_target,
|
|
|
-+ .targetsize = sizeof(struct ip6t_imq_info),
|
|
|
-+ .table = "mangle",
|
|
|
-+ .checkentry = imq_checkentry,
|
|
|
-+ .me = THIS_MODULE
|
|
|
++static struct xt_target xt_imq_reg[] __read_mostly = {
|
|
|
++ {
|
|
|
++ .name = "IMQ",
|
|
|
++ .family = AF_INET,
|
|
|
++ .checkentry = imq_checkentry,
|
|
|
++ .target = imq_target,
|
|
|
++ .targetsize = sizeof(struct xt_imq_info),
|
|
|
++ .table = "mangle",
|
|
|
++ .me = THIS_MODULE
|
|
|
++ },
|
|
|
++ {
|
|
|
++ .name = "IMQ",
|
|
|
++ .family = AF_INET6,
|
|
|
++ .checkentry = imq_checkentry,
|
|
|
++ .target = imq_target,
|
|
|
++ .targetsize = sizeof(struct xt_imq_info),
|
|
|
++ .table = "mangle",
|
|
|
++ .me = THIS_MODULE
|
|
|
++ },
|
|
|
+};
|
|
|
+
|
|
|
-+static int __init init(void)
|
|
|
++static int __init imq_init(void)
|
|
|
+{
|
|
|
-+ return xt_register_target(&ip6t_imq_reg);
|
|
|
++ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
|
|
|
+}
|
|
|
+
|
|
|
-+static void __exit fini(void)
|
|
|
++static void __exit imq_fini(void)
|
|
|
+{
|
|
|
-+ xt_unregister_target(&ip6t_imq_reg);
|
|
|
++ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg));
|
|
|
+}
|
|
|
+
|
|
|
-+module_init(init);
|
|
|
-+module_exit(fini);
|
|
|
++module_init(imq_init);
|
|
|
++module_exit(imq_fini);
|
|
|
+
|
|
|
+MODULE_AUTHOR("http://www.linuximq.net");
|
|
|
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
|
|
|
+MODULE_LICENSE("GPL");
|
|
|
---- a/net/ipv6/netfilter/Kconfig
|
|
|
-+++ b/net/ipv6/netfilter/Kconfig
|
|
|
-@@ -170,6 +170,15 @@ config IP6_NF_MANGLE
|
|
|
-
|
|
|
- To compile it as a module, choose M here. If unsure, say N.
|
|
|
-
|
|
|
-+config IP6_NF_TARGET_IMQ
|
|
|
-+ tristate "IMQ target support"
|
|
|
-+ depends on IP6_NF_MANGLE && IMQ
|
|
|
-+ help
|
|
|
-+ This option adds a `IMQ' target which is used to specify if and
|
|
|
-+ to which imq device packets should get enqueued/dequeued.
|
|
|
-+
|
|
|
-+ To compile it as a module, choose M here. If unsure, say N.
|
|
|
++MODULE_ALIAS("ipt_IMQ");
|
|
|
++MODULE_ALIAS("ip6t_IMQ");
|
|
|
+
|
|
|
- config IP6_NF_TARGET_HL
|
|
|
- tristate 'HL (hoplimit) target support'
|
|
|
- depends on IP6_NF_MANGLE
|
|
|
---- a/net/ipv6/netfilter/Makefile
|
|
|
-+++ b/net/ipv6/netfilter/Makefile
|
|
|
-@@ -6,6 +6,7 @@
|
|
|
- obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
|
|
|
- obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
|
|
|
- obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
|
|
|
-+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o
|
|
|
- obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
|
|
|
- obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
|
|
|
- obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
|
|
|
---- a/net/sched/sch_generic.c
|
|
|
-+++ b/net/sched/sch_generic.c
|
|
|
-@@ -195,6 +195,7 @@ void __qdisc_run(struct Qdisc *q)
|
|
|
-
|
|
|
- clear_bit(__QDISC_STATE_RUNNING, &q->state);
|
|
|
- }
|
|
|
-+EXPORT_SYMBOL(__qdisc_run);
|
|
|
-
|
|
|
- static void dev_watchdog(unsigned long arg)
|
|
|
- {
|