Import source code for dummynet innode emulation.
marta [Wed, 8 Jul 2009 20:40:24 +0000 (20:40 +0000)]
100 files changed:
Makefile [new file with mode: 0644]
README [new file with mode: 0644]
dummynet/Makefile [new file with mode: 0644]
dummynet/bsd_compat.c [new file with mode: 0644]
dummynet/in_cksum.c [new file with mode: 0644]
dummynet/include/net/if.h [new file with mode: 0644]
dummynet/include/net/pfil.h [new file with mode: 0644]
dummynet/include/netgraph/ng_ipfw.h [new file with mode: 0644]
dummynet/include/netinet/ip.h [new file with mode: 0644]
dummynet/include/netinet/ip6.h [new file with mode: 0644]
dummynet/include/netinet/ip_divert.h [new file with mode: 0644]
dummynet/include/netinet/ip_dummynet.h [new file with mode: 0644]
dummynet/include/netinet/ip_fw.h [new file with mode: 0644]
dummynet/include/netinet/ip_icmp.h [new file with mode: 0644]
dummynet/include/netinet/tcp.h [new file with mode: 0644]
dummynet/include/netinet/tcp_var.h [new file with mode: 0644]
dummynet/include/netinet/udp.h [new file with mode: 0644]
dummynet/include/sys/cdefs.h [new file with mode: 0644]
dummynet/include/sys/kernel.h [new file with mode: 0644]
dummynet/include/sys/malloc.h [new file with mode: 0644]
dummynet/include/sys/mbuf.h [new file with mode: 0644]
dummynet/include/sys/module.h [new file with mode: 0644]
dummynet/include/sys/param.h [new file with mode: 0644]
dummynet/include/sys/queue.h [new file with mode: 0644]
dummynet/include/sys/syslog.h [new file with mode: 0644]
dummynet/include/sys/systm.h [new file with mode: 0644]
dummynet/include/sys/taskqueue.h [new file with mode: 0644]
dummynet/ip_dummynet.c [new file with mode: 0644]
dummynet/ip_fw2.c [new file with mode: 0644]
dummynet/ip_fw_pfil.c [new file with mode: 0644]
dummynet/ipfw2_mod.c [new file with mode: 0644]
dummynet/ipfw_mod.c [new file with mode: 0644]
dummynet/missing.h [new file with mode: 0644]
glue.h [new file with mode: 0644]
include_e/altq/if_altq.h [new file with mode: 0644]
include_e/arpa/inet.h [new file with mode: 0644]
include_e/machine/in_cksum.h [new file with mode: 0644]
include_e/net/ethernet.h [new file with mode: 0644]
include_e/net/netisr.h [new file with mode: 0644]
include_e/net/pf_mtag.h [new file with mode: 0644]
include_e/net/radix.h [new file with mode: 0644]
include_e/netinet/ether.h [new file with mode: 0644]
include_e/netinet/icmp6.h [new file with mode: 0644]
include_e/netinet/if_ether.h [new file with mode: 0644]
include_e/netinet/in.h [new file with mode: 0644]
include_e/netinet/in_pcb.h [new file with mode: 0644]
include_e/netinet/in_var.h [new file with mode: 0644]
include_e/netinet/ip_carp.h [new file with mode: 0644]
include_e/netinet/ip_var.h [new file with mode: 0644]
include_e/netinet/pim.h [new file with mode: 0644]
include_e/netinet/sctp.h [new file with mode: 0644]
include_e/netinet/tcp_timer.h [new file with mode: 0644]
include_e/netinet/tcpip.h [new file with mode: 0644]
include_e/netinet/udp_var.h [new file with mode: 0644]
include_e/netinet6/ip6_var.h [new file with mode: 0644]
include_e/opt_inet6.h [new file with mode: 0644]
include_e/opt_ipfw.h [new file with mode: 0644]
include_e/opt_ipsec.h [new file with mode: 0644]
include_e/opt_mac.h [new file with mode: 0644]
include_e/opt_mbuf_stress_test.h [new file with mode: 0644]
include_e/opt_param.h [new file with mode: 0644]
include_e/sys/_lock.h [new file with mode: 0644]
include_e/sys/_mutex.h [new file with mode: 0644]
include_e/sys/jail.h [new file with mode: 0644]
include_e/sys/limits.h [new file with mode: 0644]
include_e/sys/lock.h [new file with mode: 0644]
include_e/sys/mutex.h [new file with mode: 0644]
include_e/sys/priv.h [new file with mode: 0644]
include_e/sys/proc.h [new file with mode: 0644]
include_e/sys/rwlock.h [new file with mode: 0644]
include_e/sys/socket.h [new file with mode: 0644]
include_e/sys/socketvar.h [new file with mode: 0644]
include_e/sys/sysctl.h [new file with mode: 0644]
include_e/sys/time.h [new file with mode: 0644]
include_e/sys/ucred.h [new file with mode: 0644]
ipfw-cleanup [new file with mode: 0755]
ipfw-slice.spec [new file with mode: 0644]
ipfw.cron [new file with mode: 0644]
ipfw.spec [new file with mode: 0644]
ipfw/Makefile [new file with mode: 0644]
ipfw/add_rules [new file with mode: 0755]
ipfw/altq.c [new file with mode: 0644]
ipfw/dummynet.c [new file with mode: 0644]
ipfw/glue.c [new file with mode: 0644]
ipfw/include/alias.h [new file with mode: 0644]
ipfw/include/net/if_dl.h [new file with mode: 0644]
ipfw/include/net/pfvar.h [new file with mode: 0644]
ipfw/include/netinet/ip_dummynet.h [new file with mode: 0644]
ipfw/include/netinet/ip_fw.h [new file with mode: 0644]
ipfw/include/netinet/tcp.h [new file with mode: 0644]
ipfw/include/sys/sockio.h [new file with mode: 0644]
ipfw/include/timeconv.h [new file with mode: 0644]
ipfw/ipfw2.c [new file with mode: 0644]
ipfw/ipfw2.h [new file with mode: 0644]
ipfw/ipv6.c [new file with mode: 0644]
ipfw/main.c [new file with mode: 0644]
ipfw/nat.c [new file with mode: 0644]
ipfw/svn-commit. [new file with mode: 0644]
slice/ipfw.8.gz [new file with mode: 0644]
slice/netconfig [new file with mode: 0755]

diff --git a/Makefile b/Makefile
new file mode 100644 (file)
index 0000000..69b6481
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,13 @@
+# $Id$
+# Top level makefile for building ipfw kernel and userspace.
+# You can run it manually or also under the Planetlab build.
+# Planetlab wants also the 'install' target.
+
+_all: all
+
+all clean distclean:
+       echo target is $(@)
+       (cd ipfw && $(MAKE) $(@) )
+       (cd dummynet && $(MAKE) $(@) )
+
+install:
diff --git a/README b/README
new file mode 100644 (file)
index 0000000..fd40bac
--- /dev/null
+++ b/README
@@ -0,0 +1,107 @@
+#
+# $Id$
+#
+
+This directory contains a port of ipfw and dummynet to Linux and OpenWrt
+(a Windows version is in the works but not ready yet).
+Building the code produces:
+
+       a kernel module,        ipfw_mod.ko
+       a userland program,     /sbin/ipfw
+
+The source code here comes straight from FreeBSD (roughly the
+version in RELENG_7 and HEAD as of June 2009), plus some glue code
+and headers written from scratch.
+Unless specified otherwise, all the code here is under a BSD license.
+
+=== To compile for a 2.6 kernel, simply run
+
+       make
+
+    Make sure that kernel headers (or sources) are installed on your
+    system, and that the link "/lib/modules/`uname -r`/build" points
+    to the header/source tree matching your kernel.
+
+    You can override the default kernel tree with
+
+       make KERNELPATH=your_kernel_source_tree
+
+    NOTE: make sure CONFIG_NETFILTER is enabled in the kernel
+    configuration file. You can enable it by doing
+    
+       "(cd ${KERNELPATH}; make menuconfig)"
+
+    and enabling the option listed below:
+
+        Networking --->
+           Networking options  --->
+              [*] Network packet filtering framework (Netfilter)
+
+
+=== To compile for a 2.4 kernel:
+
+       make VER=2.4 KERNELPATH=...
+
+    You need to follow the same instruction for the 2.6 kernel, enabling
+    the kernel options:
+
+    Networking options  --->
+      [*] Network packet filtering (replaces ipchains)
+
+=== To build an Openwrt package
+
+    (Tested with kamikaze_8.09.1 and Linux 2.4)
+
+    + Download and extract the OpenWrt package, e.g.
+
+       wget http://downloads.openwrt.org/kamikaze/8.09.1/kamikaze_8.09.1_source.tar.bz2
+       tar xvjf kamikaze_8.09.1_source.tar.bz2
+
+    + "cd" to the directory with the OpenWrt sources (the one that
+      contains Config.in, rules.mk ...)
+
+       cd kamikaze_8.09.1
+
+    + Optional: to be sure that the tools are working, make a first
+      compilation as follows:
+
+       - run "make menuconfig" and set the correct target device,
+         drivers, and so on;
+       - run "make" to do the build
+
+    + Add ipfw2 to the openwrt package, as follows:
+
+      - fetch and extract the code e.g.
+
+       (cd ..; \
+       wget http://info.iet.unipi.it/~luigi/dummynet/ipfw_linux-20090622.tgz;\
+       tar xvzf ipfw_linux-20090622.tgz)
+
+       (but you should have done it already)
+
+      - run the following commands:
+       (mkdir packages/ipfw2;
+       cp ../ipfw_mod/Makefile.openwrt packages/ipfw2/Makefile)
+
+       to create the package/ipfw2 directory in the OpenWrt source
+       directory, and copy Makefile.openwrt to package/ipfw2/Makefile:
+
+      - if necessary, edit package/ipfw2/Makefile and set IPFW_DIR to point to
+       the directory with the ipfw sources (the directory
+       which contains this README, dummynet/ ipfw/ and so on);
+
+      - run "make menuconfig" and select ipfw2 as a module <M> in
+           Kernel Modules -> Other modules -> ipfw2
+
+      - run "make" to build the package, "make V=99" for verbose build.
+
+    The resulting package is located in bin/packages/mipsel/kmod-ipfw2*,
+    upload the file and install on the target system, as follows:
+
+    opkg install  kmod-ipfw2_2.4.35.4-brcm-2.4-1_mipsel.ipk #install
+    ls -l ls -l /lib/modules/2.4.35.4/ipfw*     # check
+    insmod /lib/modules/2.4.35.4/ipfw_mod.o     # load the module
+    /lib/modules/2.4.35.4/ipfw show             # launch the userspace tool
+    rmmod ipfw_mod.o                            # remove the module
+
+-----------------------------------------------------------------------------
diff --git a/dummynet/Makefile b/dummynet/Makefile
new file mode 100644 (file)
index 0000000..b361cba
--- /dev/null
@@ -0,0 +1,159 @@
+#
+# $Id$
+#
+# gnu Makefile to build linux module for ipfw+dummynet
+
+# Unless specified otherwise, variable names are arbitrary.
+# Exceptions are the following:
+# 
+#   ccflags-y  additional $(CC) flags
+#   M          used by Kbuild, we must set it to `pwd`
+#   obj-m      list of .o modules to build
+#   $(MOD)-y   for each $MOD in obj-m, the list of objects
+#   obj-y      same as above, for openwrt
+#   O_TARGET   the link target, for openwrt
+#    EXTRA_CFLAGS as the name says... in openwrt
+#    EXTRA_CFLAGS are used in 2.6.22 module kernel compilation too
+#---
+
+$(warning including dummynet/Makefile)
+
+# lets default for 2.6 for planetlab builds
+VER ?= 2.6
+
+ifeq ($(VER),openwrt)
+
+$(warning dummynet/Makefile doing openwrt)
+obj-m := ipfw_mod.o
+obj-y := ipfw2_mod.o bsd_compat.o \
+       in_cksum.o ip_dummynet.o ip_fw2.o ip_fw_pfil.o
+O_TARGET := ipfw_mod.o
+
+xcflags-y += -O1 -DLINUX_24
+xcflags-y += -DIPFIREWALL_DEFAULT_TO_ACCEPT
+xcflags-y += -g
+xcflags-y += -D_BSD_SOURCE     # enable __FAVOR_BSD (udp/tcp bsd struct over posix)
+xcflags-y += -DKERNEL_MODULE   # linux kernel module
+xcflags-y += -I include_e -I include
+xcflags-y += -include ../glue.h        # headers
+
+EXTRA_CFLAGS := $(xcflags-y)
+
+# we should not export anything
+#export-objs := ipfw2_mod.o
+-include $(TOPDIR)/Rules.make
+
+else   # !openwrt
+
+obj-m := ipfw_mod.o
+ifneq ($(shell echo $(VER)|grep '2.4'),)
+  $(warning "---- Building for Version $(VER)")
+  KERNELDIR := -isystem /usr/src/linux-2.4.35.4/include
+  # replace the system include directory
+  WARN += -nostdinc -isystem /usr/lib/gcc/i486-linux-gnu/4.2.4/include
+  #WARN = -Wp,-MD,/home/luigi/ports-luigi/dummynet-branches/ipfw_mod/dummynet/.ipfw2_mod.o.d
+  #WARN += -Iinclude  -include include/linux/autoconf.h
+  WARN += -Wall -Wundef
+  WARN += -Wstrict-prototypes -Wno-trigraphs -fno-strict-aliasing
+  WARN += -fno-common -Werror-implicit-function-declaration
+  # WARN += -O2  -fno-stack-protector -m32 -msoft-float -mregparm=3
+  # -mregparm=3 gives a printk error
+  WARN += -m32 -msoft-float # -mregparm=3
+  #WARN += -freg-struct-return -mpreferred-stack-boundary=2
+  WARN += -Wno-sign-compare
+  WARN += -Wdeclaration-after-statement -Wno-pointer-sign 
+
+  ccflags-y += -O1 -DLINUX_24
+  CFLAGS = -DMODULE -D__KERNEL__ ${KERNELDIR} ${ccflags-y}
+  # The Main target
+all: mod24
+
+else
+  # KERNELDIR is where the kernel headers reside
+  # XXX on Planetlab, KERNELDIR must be same as KERNELPATH
+  KERNELDIR ?= $(KERNELPATH)
+  # KERNELDIR := /home/luigi/linux-2.6.25.17/
+  # test on rock
+  #KERNELDIR := /usr/src/linux-2.6.24  # not with v.2237
+  #KERNELDIR := /usr/src/linux-2.6.26
+  #KERNELDIR := /usr/src/linux-2.6.22
+  #KERNELDIR := /usr/src/linux-source-2.6.26
+  #KERNELDIR := /lib/modules/`uname -r`/build
+  $(warning "---- Building Version 2.6 $(VER) in $(KERNELDIR)")
+  WARN := -O1 -Wall -Werror -DDEBUG_SPINLOCK -DDEBUG_MUTEXES
+  # The main target
+
+  # Required by kernel <= 2.6.22, ccflags-y is used on newer version
+LINUX_VERSION_CODE := $(shell grep LINUX_VERSION_CODE $(KERNELDIR)/include/linux/version.h|cut -d " " -f3)
+ifeq ($(LINUX_VERSION_CODE),132630)
+  EXTRA_CFLAGS += $(ccflags-y)
+endif
+
+all: include_e
+       $(MAKE) -C $(KERNELDIR) V=1 M=`pwd` modules
+endif
+
+# the list of object use to build the module
+ipfw_mod-y = $(IPFW_SRCS:%.c=%.o)
+
+#      Original ipfw + dummynet + FreeBSD stuff,
+IPFW_SRCS = ip_fw2.c ip_dummynet.c ip_fw_pfil.c in_cksum.c
+
+#      module glue and functions missing in linux
+IPFW_SRCS += ipfw2_mod.c bsd_compat.c
+
+
+# additional $(CC) flags
+ccflags-y += $(WARN)
+ccflags-y += -DIPFIREWALL_DEFAULT_TO_ACCEPT
+ccflags-y += -g
+ccflags-y += -D_BSD_SOURCE             # enable __FAVOR_BSD (udp/tcp bsd structure over posix)
+ccflags-y += -DKERNEL_MODULE           # linux kernel module
+ccflags-y += -I $(M)/include_e -I $(M)/include
+ccflags-y += -include $(M)/../glue.h   # headers
+
+mod24: include_e $(obj-m)
+
+$(obj-m): $(ipfw_mod-y)
+       $(LD) $(LDFLAGS) -m elf_i386 -r -o $@ $^
+clean:
+       -rm -f *.o *.ko Module.symvers *.mod.c
+
+distclean: clean
+       -rm -f .*cmd modules.order opt_*
+       -rm -rf .tmp_versions include_e
+
+# support to create empty dirs and files in include_e/
+# EDIRS is the list of directories, EFILES is the list of files.
+
+EDIRS= altq arpa machine net netinet netinet6 sys
+
+EFILES += opt_inet6.h opt_ipfw.h opt_ipsec.h opt_mac.h
+EFILES += opt_mbuf_stress_test.h opt_param.h
+
+EFILES += altq/if_altq.h
+EFILES += arpa/inet.h
+EFILES += machine/in_cksum.h
+EFILES += net/ethernet.h net/netisr.h net/pf_mtag.h net/radix.h
+
+EFILES += netinet/ether.h netinet/icmp6.h netinet/if_ether.h
+EFILES += netinet/in.h netinet/in_pcb.h netinet/in_var.h
+EFILES +=  netinet/ip_carp.h netinet/ip_var.h netinet/pim.h
+EFILES += netinet/sctp.h netinet/tcp_timer.h netinet/tcpip.h
+EFILES += netinet/udp_var.h
+
+EFILES += netinet6/ip6_var.h
+
+EFILES += sys/_lock.h sys/_mutex.h sys/jail.h
+EFILES += sys/limits.h sys/lock.h sys/mutex.h sys/priv.h
+EFILES += sys/proc.h sys/rwlock.h sys/socket.h sys/socketvar.h
+EFILES += sys/sysctl.h sys/time.h sys/ucred.h
+
+M ?= $(shell pwd)
+include_e:
+       echo "running in $M"
+       -@rm -rf $(M)/include_e opt_*
+       -@mkdir -p $(M)/include_e
+       -@(cd $(M)/include_e; mkdir -p $(EDIRS); touch $(EFILES) )
+
+endif # !openwrt
diff --git a/dummynet/bsd_compat.c b/dummynet/bsd_compat.c
new file mode 100644 (file)
index 0000000..995d60c
--- /dev/null
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2009 Luigi Rizzo, Marta Carbone, Universita` di Pisa
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * kernel variables and functions that are not available in linux.
+ */
+
+#include <sys/cdefs.h>
+#include <asm/div64.h> /* do_div on 2.4 */
+#include <linux/random.h>      /* get_random_bytes on 2.4 */
+#include "missing.h"
+
+/*
+ * gettimeofday would be in sys/time.h but it is not
+ * visible if _KERNEL is defined
+ */
+int gettimeofday(struct timeval *, struct timezone *);
+
+int ticks;             /* kernel ticks counter */
+int hz = 1000;         /* default clock time */
+long tick = 1000;      /* XXX is this 100000/hz ? */
+int bootverbose = 0;
+time_t time_uptime = 0;
+struct timeval boottime;
+
+int     ip_defttl;
+int fw_one_pass = 1;
+u_long  in_ifaddrhmask;                         /* mask for hash table */
+struct  in_ifaddrhashhead *in_ifaddrhashtbl;    /* inet addr hash table  */
+
+u_int rt_numfibs = RT_NUMFIBS;
+
+/*
+ * pfil hook support.
+ * We make pfil_head_get return a non-null pointer, which is then ignored
+ * in our 'add-hook' routines.
+ */
+struct pfil_head;
+typedef int (pfil_hook_t)
+       (void *, struct mbuf **, struct ifnet *, int, struct inpcb *);
+
+struct pfil_head *
+pfil_head_get(int proto, u_long flags)
+{
+       static int dummy;
+       return (struct pfil_head *)&dummy;
+}
+int
+pfil_add_hook(pfil_hook_t *func, void *arg, int dir, struct pfil_head *h)
+{
+       return 0;
+}
+
+int
+pfil_remove_hook(pfil_hook_t *func, void *arg, int dir, struct pfil_head *h)
+{
+       return 0;
+}
+
+/* define empty body for kernel function */
+int
+priv_check(struct thread *td, int priv)
+{
+       return 0;
+}
+
+int
+securelevel_ge(struct ucred *cr, int level)
+{
+       return 0;
+}
+
+int
+sysctl_handle_int(SYSCTL_HANDLER_ARGS)
+{
+       return 0;
+}
+
+int
+sysctl_handle_long(SYSCTL_HANDLER_ARGS)
+{
+       return 0;
+}
+
+void
+ether_demux(struct ifnet *ifp, struct mbuf *m)
+{
+       return;
+}
+
+int
+ether_output_frame(struct ifnet *ifp, struct mbuf *m)
+{
+       return 0;
+}
+
+void
+in_rtalloc_ign(struct route *ro, u_long ignflags, u_int fibnum)
+{
+       return;
+}
+
+void
+icmp_error(struct mbuf *n, int type, int code, uint32_t dest, int mtu)
+{
+       return;
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+       return 0;
+}
+
+u_short
+in_cksum_hdr(struct ip *ip)
+{
+       return 0;
+}
+
+struct mbuf *
+ip_reass(struct mbuf *clone)
+{
+       return clone;
+}
+#ifdef INP_LOCK_ASSERT
+#undef INP_LOCK_ASSERT
+#define INP_LOCK_ASSERT(a)
+#endif
+
+int
+jailed(struct ucred *cred)
+{
+       return 0;
+}
+
+/*
+* Return 1 if an internet address is for a ``local'' host
+* (one to which we have a connection).  If subnetsarelocal
+* is true, this includes other subnets of the local net.
+* Otherwise, it includes only the directly-connected (sub)nets.
+*/
+int
+in_localaddr(struct in_addr in)
+{
+       return 1;
+}
+
+int
+sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
+{
+       size_t valsize = sopt->sopt_valsize;
+
+       if (len < valsize)
+               sopt->sopt_valsize = valsize = len;
+       bcopy(buf, sopt->sopt_val, valsize);
+       return 0;
+}
+
+/*
+ * copy data from userland to kernel
+ */
+int
+sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen)
+{
+       size_t valsize = sopt->sopt_valsize;
+
+       if (valsize < minlen)
+               return EINVAL;
+       if (valsize > len)
+               sopt->sopt_valsize = valsize = len;
+       bcopy(sopt->sopt_val, buf, valsize);
+       return 0;
+}
+
+void
+getmicrouptime(struct timeval *tv)
+{
+#ifdef _WIN32
+#else
+       do_gettimeofday(tv);
+#endif
+}
+
+
+#include <arpa/inet.h>
+
+char *
+inet_ntoa_r(struct in_addr ina, char *buf)
+{
+#ifdef _WIN32
+#else
+       unsigned char *ucp = (unsigned char *)&ina;
+
+       sprintf(buf, "%d.%d.%d.%d",
+       ucp[0] & 0xff,
+       ucp[1] & 0xff,
+       ucp[2] & 0xff,
+       ucp[3] & 0xff);
+#endif
+       return buf;
+}
+
+char *
+inet_ntoa(struct in_addr ina)
+{
+       static char buf[16];
+       return inet_ntoa_r(ina, buf);
+}
+
+int
+random(void)
+{
+#ifdef _WIN32
+       return 0x123456;
+#else
+       int r;
+       get_random_bytes(&r, sizeof(r));
+       return r & 0x7fffffff; 
+#endif
+}
+
+
+/*
+ * do_div really does a u64 / u32 bit division.
+ * we save the sign and convert to uint befor calling.
+ * We are safe just because we always call it with small operands.
+ */
+int64_t
+div64(int64_t a, int64_t b)
+{
+#ifdef _WIN32
+        int a1 = a, b1 = b;
+       return a1/b1;
+#else
+       uint64_t ua, ub;
+       int sign = ((a>0)?1:-1) * ((b>0)?1:-1);
+
+       ua = ((a>0)?a:-a);
+       ub = ((b>0)?b:-b);
+        do_div(ua, ub);
+       return sign*ua;
+#endif
+}
+
+/*
+ * compact version of fnmatch.
+ */
+int
+fnmatch(const char *pattern, const char *string, int flags)
+{
+       char s;
+
+       if (!string || !pattern)
+               return 1;       /* no match */
+       while ( (s = *string++) ) {
+               char p = *pattern++;
+               if (p == '\0')          /* pattern is over, no match */
+                       return 1;
+               if (p == '*')           /* wildcard, match */
+                       return 0;
+               if (p == '.' || p == s) /* char match, continue */
+                       continue;
+               return 1;               /* no match */
+       }
+       /* end of string, make sure the pattern is over too */
+       if (*pattern == '\0' || *pattern == '*')
+               return 0;
+       return 1;       /* no match */
+}
+
+#ifdef _WIN32
+/*
+ * as good as anywhere, place here the missing calls
+ */
+
+void *
+my_alloc(int size)
+{
+       void *_ret = ExAllocatePoolWithTag(0, size, 'wfpi');
+       if (_ret)
+               memset(_ret, 0, size);
+       return _ret;
+}
+
+void
+panic(const char *fmt, ...)
+{
+       printf("%s", fmt);
+       for (;;);
+}
+
+#include <stdarg.h>
+
+extern int _vsnprintf(char *buf, int buf_size, char * fmt, va_list ap);
+
+/*
+ * Windows' _snprintf doesn't terminate buffer with zero if size > buf_size
+ */
+int
+snprintf(char *buf, int buf_size, char *fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    if (_vsnprintf(buf, buf_size, fmt, ap) < 0)
+        buf[buf_size - 1] = '\0';
+    va_end(ap);
+
+    return 0;
+}
+#endif
diff --git a/dummynet/in_cksum.c b/dummynet/in_cksum.c
new file mode 100644 (file)
index 0000000..ca56508
--- /dev/null
@@ -0,0 +1,146 @@
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     @(#)in_cksum.c  8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/netinet/in_cksum.c,v 1.10 2007/10/07 20:44:22 silby Exp $");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers (Portable Version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x)  (x > 65535 ? x -= 65535 : x)
+#define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);}
+
+int
+in_cksum(struct mbuf *m, int len)
+{
+       register u_short *w;
+       register int sum = 0;
+       register int mlen = 0;
+       int byte_swapped = 0;
+
+       union {
+               char    c[2];
+               u_short s;
+       } s_util;
+       union {
+               u_short s[2];
+               long    l;
+       } l_util;
+
+       for (;m && len; m = m->m_next) {
+               if (m->m_len == 0)
+                       continue;
+               w = mtod(m, u_short *);
+               if (mlen == -1) {
+                       /*
+                        * The first byte of this mbuf is the continuation
+                        * of a word spanning between this mbuf and the
+                        * last mbuf.
+                        *
+                        * s_util.c[0] is already saved when scanning previous
+                        * mbuf.
+                        */
+                       s_util.c[1] = *(char *)w;
+                       sum += s_util.s;
+                       w = (u_short *)((char *)w + 1);
+                       mlen = m->m_len - 1;
+                       len--;
+               } else
+                       mlen = m->m_len;
+               if (len < mlen)
+                       mlen = len;
+               len -= mlen;
+               /*
+                * Force to even boundary.
+                */
+               if ((1 & (int) w) && (mlen > 0)) {
+                       REDUCE;
+                       sum <<= 8;
+                       s_util.c[0] = *(u_char *)w;
+                       w = (u_short *)((char *)w + 1);
+                       mlen--;
+                       byte_swapped = 1;
+               }
+               /*
+                * Unroll the loop to make overhead from
+                * branches &c small.
+                */
+               while ((mlen -= 32) >= 0) {
+                       sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+                       sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7];
+                       sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11];
+                       sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15];
+                       w += 16;
+               }
+               mlen += 32;
+               while ((mlen -= 8) >= 0) {
+                       sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3];
+                       w += 4;
+               }
+               mlen += 8;
+               if (mlen == 0 && byte_swapped == 0)
+                       continue;
+               REDUCE;
+               while ((mlen -= 2) >= 0) {
+                       sum += *w++;
+               }
+               if (byte_swapped) {
+                       REDUCE;
+                       sum <<= 8;
+                       byte_swapped = 0;
+                       if (mlen == -1) {
+                               s_util.c[1] = *(char *)w;
+                               sum += s_util.s;
+                               mlen = 0;
+                       } else
+                               mlen = -1;
+               } else if (mlen == -1)
+                       s_util.c[0] = *(char *)w;
+       }
+       if (len)
+               printf("cksum: out of data\n");
+       if (mlen == -1) {
+               /* The last mbuf has odd # of bytes. Follow the
+                  standard (the odd byte may be shifted left by 8 bits
+                  or not as determined by endian-ness of the machine) */
+               s_util.c[1] = 0;
+               sum += s_util.s;
+       }
+       REDUCE;
+       return (~sum & 0xffff);
+}
diff --git a/dummynet/include/net/if.h b/dummynet/include/net/if.h
new file mode 100644 (file)
index 0000000..1aa8e7b
--- /dev/null
@@ -0,0 +1 @@
+#include <linux/if.h>
diff --git a/dummynet/include/net/pfil.h b/dummynet/include/net/pfil.h
new file mode 100644 (file)
index 0000000..19a3d9c
--- /dev/null
@@ -0,0 +1,118 @@
+/*     $FreeBSD: src/sys/net/pfil.h,v 1.16 2007/06/08 12:43:25 gallatin Exp $ */
+/*     $NetBSD: pfil.h,v 1.22 2003/06/23 12:57:08 martin Exp $ */
+
+/*-
+ * Copyright (c) 1996 Matthew R. Green
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NET_PFIL_H_
+#define _NET_PFIL_H_
+
+#include <sys/systm.h>
+#include <sys/queue.h>
+#include <sys/_lock.h>
+#include <sys/_mutex.h>
+#include <sys/lock.h>
+#include <sys/rwlock.h>
+
+struct mbuf;
+struct ifnet;
+struct inpcb;
+
+/*
+ * The packet filter hooks are designed for anything to call them to
+ * possibly intercept the packet.
+ */
+struct packet_filter_hook {
+        TAILQ_ENTRY(packet_filter_hook) pfil_link;
+       int     (*pfil_func)(void *, struct mbuf **, struct ifnet *, int, struct inpcb *);
+       void    *pfil_arg;
+       int     pfil_flags;
+};
+
+#define PFIL_IN                0x00000001
+#define PFIL_OUT       0x00000002
+#define PFIL_WAITOK    0x00000004
+#define PFIL_ALL       (PFIL_IN|PFIL_OUT)
+
+typedef        TAILQ_HEAD(pfil_list, packet_filter_hook) pfil_list_t;
+
+#define        PFIL_TYPE_AF            1       /* key is AF_* type */
+#define        PFIL_TYPE_IFNET         2       /* key is ifnet pointer */
+
+struct pfil_head {
+       pfil_list_t     ph_in;
+       pfil_list_t     ph_out;
+       int             ph_type;
+       int             ph_nhooks;
+#if defined( __linux__ ) || defined( _WIN32 )
+       rwlock_t        ph_mtx;
+#else
+       struct rwlock   ph_mtx;
+#endif
+       union {
+               u_long          phu_val;
+               void            *phu_ptr;
+       } ph_un;
+#define        ph_af           ph_un.phu_val
+#define        ph_ifnet        ph_un.phu_ptr
+       LIST_ENTRY(pfil_head) ph_list;
+};
+
+int    pfil_run_hooks(struct pfil_head *, struct mbuf **, struct ifnet *,
+           int, struct inpcb *inp);
+
+int    pfil_add_hook(int (*func)(void *, struct mbuf **,
+           struct ifnet *, int, struct inpcb *), void *, int, struct pfil_head *);
+int    pfil_remove_hook(int (*func)(void *, struct mbuf **,
+           struct ifnet *, int, struct inpcb *), void *, int, struct pfil_head *);
+
+int    pfil_head_register(struct pfil_head *);
+int    pfil_head_unregister(struct pfil_head *);
+
+struct pfil_head *pfil_head_get(int, u_long);
+
+#define        PFIL_HOOKED(p) ((p)->ph_nhooks > 0)
+#define PFIL_RLOCK(p) rw_rlock(&(p)->ph_mtx)
+#define PFIL_WLOCK(p) rw_wlock(&(p)->ph_mtx)
+#define PFIL_RUNLOCK(p) rw_runlock(&(p)->ph_mtx)
+#define PFIL_WUNLOCK(p) rw_wunlock(&(p)->ph_mtx)
+#define PFIL_LIST_LOCK() mtx_lock(&pfil_global_lock)
+#define PFIL_LIST_UNLOCK() mtx_unlock(&pfil_global_lock)
+
+static __inline struct packet_filter_hook *
+pfil_hook_get(int dir, struct pfil_head *ph)
+{
+       if (dir == PFIL_IN)
+               return (TAILQ_FIRST(&ph->ph_in));
+       else if (dir == PFIL_OUT)
+               return (TAILQ_FIRST(&ph->ph_out));
+       else
+               return (NULL);
+}
+
+#endif /* _NET_PFIL_H_ */
diff --git a/dummynet/include/netgraph/ng_ipfw.h b/dummynet/include/netgraph/ng_ipfw.h
new file mode 100644 (file)
index 0000000..a6b6ea9
--- /dev/null
@@ -0,0 +1,54 @@
+/*-
+ * Copyright 2005, Gleb Smirnoff <glebius@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/netgraph/ng_ipfw.h,v 1.2 2006/02/17 09:42:49 glebius Exp $
+ */
+#ifndef __NG_IPFW_H
+#define __NG_IPFW_H
+
+#define NG_IPFW_NODE_TYPE    "ipfw"
+#define NGM_IPFW_COOKIE      1105988990
+
+#ifdef _KERNEL
+
+struct mbuf;
+struct ip_fw_args;
+typedef int ng_ipfw_input_t(struct mbuf **, int, struct ip_fw_args *, int);
+extern ng_ipfw_input_t *ng_ipfw_input_p;
+#define        NG_IPFW_LOADED  (ng_ipfw_input_p != NULL)
+
+struct ng_ipfw_tag {
+       struct m_tag    mt;             /* tag header */
+       struct ip_fw    *rule;          /* matching rule */
+       struct ifnet    *ifp;           /* interface, for ip_output */
+       int             dir;
+#define        NG_IPFW_OUT     0
+#define        NG_IPFW_IN      1
+};
+
+#define        TAGSIZ  (sizeof(struct ng_ipfw_tag) - sizeof(struct m_tag))
+
+#endif /* _KERNEL */
+#endif /* __NG_IPFW_H */
diff --git a/dummynet/include/netinet/ip.h b/dummynet/include/netinet/ip.h
new file mode 100644 (file)
index 0000000..bdd8cf0
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef _NETINET_IP_H_
+#define _NETINET_IP_H_
+
+#define LITTLE_ENDIAN   1234
+#define BIG_ENDIAN      4321
+#if defined(__BIG_ENDIAN)
+#error we are in bigendian
+#elif defined(__LITTLE_ENDIAN)
+//#warning we are in littleendian
+#define BYTE_ORDER      LITTLE_ENDIAN
+#else
+#error no platform
+#endif
+
+/* XXX endiannes doesn't belong here */
+// #define LITTLE_ENDIAN   1234
+// #define BIG_ENDIAN      4321
+// #define BYTE_ORDER      LITTLE_ENDIAN
+
+/*
+ * Structure of an internet header, naked of options.
+ */
+struct ip {
+#if BYTE_ORDER == LITTLE_ENDIAN
+        u_int   ip_hl:4,                /* header length */
+                ip_v:4;                 /* version */
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+        u_int   ip_v:4,                 /* version */
+                ip_hl:4;                /* header length */
+#endif
+        u_char  ip_tos;                 /* type of service */
+        u_short ip_len;                 /* total length */
+        u_short ip_id;                  /* identification */
+        u_short ip_off;                 /* fragment offset field */
+#define IP_RF 0x8000                    /* reserved fragment flag */
+#define IP_DF 0x4000                    /* dont fragment flag */
+#define IP_MF 0x2000                    /* more fragments flag */
+#define IP_OFFMASK 0x1fff               /* mask for fragmenting bits */
+        u_char  ip_ttl;                 /* time to live */
+        u_char  ip_p;                   /* protocol */
+        u_short ip_sum;                 /* checksum */
+        struct  in_addr ip_src,ip_dst;  /* source and dest address */
+} __packed __aligned(4);
+
+#endif /* _NETINET_IP_H_ */
diff --git a/dummynet/include/netinet/ip6.h b/dummynet/include/netinet/ip6.h
new file mode 100644 (file)
index 0000000..88b42a4
--- /dev/null
@@ -0,0 +1,58 @@
+#ifndef _NETINET_IP6_H_
+#define _NETINET_IP6_H_
+#define IN6_ARE_ADDR_EQUAL(a, b)                        \
+(memcmp(&(a)->s6_addr[0], &(b)->s6_addr[0], sizeof(struct in6_addr)) == 0)
+
+struct ip6_hdr {
+        union {
+                struct ip6_hdrctl {
+                        u_int32_t ip6_un1_flow; /* 20 bits of flow-ID */  
+                        u_int16_t ip6_un1_plen; /* payload length */
+                        u_int8_t  ip6_un1_nxt;  /* next header */
+                        u_int8_t  ip6_un1_hlim; /* hop limit */
+                } ip6_un1;
+                u_int8_t ip6_un2_vfc;   /* 4 bits version, top 4 bits class */
+        } ip6_ctlun;
+        struct in6_addr ip6_src;        /* source address */
+        struct in6_addr ip6_dst;        /* destination address */
+};
+#define ip6_nxt         ip6_ctlun.ip6_un1.ip6_un1_nxt
+#define ip6_flow        ip6_ctlun.ip6_un1.ip6_un1_flow
+
+
+struct icmp6_hdr {
+        u_int8_t        icmp6_type;     /* type field */
+        u_int8_t        icmp6_code;     /* code field */
+        u_int16_t       icmp6_cksum;    /* checksum field */
+        union {
+                u_int32_t       icmp6_un_data32[1]; /* type-specific field */
+                u_int16_t       icmp6_un_data16[2]; /* type-specific field */
+                u_int8_t        icmp6_un_data8[4];  /* type-specific field */
+        } icmp6_dataun;
+};
+
+struct ip6_hbh {
+        u_int8_t ip6h_nxt;      /* next header */
+        u_int8_t ip6h_len;      /* length in units of 8 octets */
+        /* followed by options */
+}; 
+struct ip6_rthdr {
+        u_int8_t  ip6r_nxt;     /* next header */
+        u_int8_t  ip6r_len;     /* length in units of 8 octets */
+        u_int8_t  ip6r_type;    /* routing type */
+        u_int8_t  ip6r_segleft; /* segments left */
+        /* followed by routing type specific data */
+};
+struct ip6_frag {
+        u_int8_t  ip6f_nxt;             /* next header */
+        u_int8_t  ip6f_reserved;        /* reserved field */
+        u_int16_t ip6f_offlg;           /* offset, reserved, and flag */
+        u_int32_t ip6f_ident;           /* identification */
+};
+#define IP6F_OFF_MASK           0xfff8  /* mask out offset from _offlg */
+#define IP6F_MORE_FRAG          0x0001  /* more-fragments flag */
+struct  ip6_ext {
+        u_int8_t ip6e_nxt;
+        u_int8_t ip6e_len;
+};
+#endif /* _NETINET_IP6_H_ */
diff --git a/dummynet/include/netinet/ip_divert.h b/dummynet/include/netinet/ip_divert.h
new file mode 100644 (file)
index 0000000..4bb6e42
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef _IP_DIVERT_H
+#define _IP_DIVERT_H
+
+struct mbuf;
+typedef void ip_divert_packet_t(struct mbuf *, int);
+
+extern  ip_divert_packet_t *ip_divert_ptr;
+
+struct divert_tag {
+        u_int32_t       info;           /* port & flags */
+        u_int16_t       cookie;         /* ipfw rule number */
+};
+
+#endif /* !_IP_DIVERT_H */
diff --git a/dummynet/include/netinet/ip_dummynet.h b/dummynet/include/netinet/ip_dummynet.h
new file mode 100644 (file)
index 0000000..c6a6575
--- /dev/null
@@ -0,0 +1,399 @@
+/*-
+ * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
+ * Portions Copyright (c) 2000 Akamba Corp.
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/netinet/ip_dummynet.h,v 1.40.2.1 2008/04/25 10:26:30 oleg Exp $
+ */
+
+#ifndef _IP_DUMMYNET_H
+#define _IP_DUMMYNET_H
+
+/*
+ * Definition of dummynet data structures. In the structures, I decided
+ * not to use the macros in <sys/queue.h> in the hope of making the code
+ * easier to port to other architectures. The type of lists and queue we
+ * use here is pretty simple anyways.
+ */
+
+/*
+ * We start with a heap, which is used in the scheduler to decide when
+ * to transmit packets etc.
+ *
+ * The key for the heap is used for two different values:
+ *
+ * 1. timer ticks- max 10K/second, so 32 bits are enough;
+ *
+ * 2. virtual times. These increase in steps of len/x, where len is the
+ *    packet length, and x is either the weight of the flow, or the
+ *    sum of all weights.
+ *    If we limit to max 1000 flows and a max weight of 100, then
+ *    x needs 17 bits. The packet size is 16 bits, so we can easily
+ *    overflow if we do not allow errors.
+ * So we use a key "dn_key" which is 64 bits. Some macros are used to
+ * compare key values and handle wraparounds.
+ * MAX64 returns the largest of two key values.
+ * MY_M is used as a shift count when doing fixed point arithmetic
+ * (a better name would be useful...).
+ */
+typedef u_int64_t dn_key ;      /* sorting key */
+#define DN_KEY_LT(a,b)     ((int64_t)((a)-(b)) < 0)
+#define DN_KEY_LEQ(a,b)    ((int64_t)((a)-(b)) <= 0)
+#define DN_KEY_GT(a,b)     ((int64_t)((a)-(b)) > 0)
+#define DN_KEY_GEQ(a,b)    ((int64_t)((a)-(b)) >= 0)
+#define MAX64(x,y)  (( (int64_t) ( (y)-(x) )) > 0 ) ? (y) : (x)
+#define MY_M   16 /* number of left shift to obtain a larger precision */
+
+/*
+ * XXX With this scaling, max 1000 flows, max weight 100, 1Gbit/s, the
+ * virtual time wraps every 15 days.
+ */
+
+
+/*
+ * The maximum hash table size for queues.  This value must be a power
+ * of 2.
+ */
+#define DN_MAX_HASH_SIZE 65536
+
+/*
+ * A heap entry is made of a key and a pointer to the actual
+ * object stored in the heap.
+ * The heap is an array of dn_heap_entry entries, dynamically allocated.
+ * Current size is "size", with "elements" actually in use.
+ * The heap normally supports only ordered insert and extract from the top.
+ * If we want to extract an object from the middle of the heap, we
+ * have to know where the object itself is located in the heap (or we
+ * need to scan the whole array). To this purpose, an object has a
+ * field (int) which contains the index of the object itself into the
+ * heap. When the object is moved, the field must also be updated.
+ * The offset of the index in the object is stored in the 'offset'
+ * field in the heap descriptor. The assumption is that this offset
+ * is non-zero if we want to support extract from the middle.
+ */
+struct dn_heap_entry {
+    dn_key key ;       /* sorting key. Topmost element is smallest one */
+    void *object ;     /* object pointer */
+} ;
+
+struct dn_heap {
+    int size ;
+    int elements ;
+    int offset ; /* XXX if > 0 this is the offset of direct ptr to obj */
+    struct dn_heap_entry *p ;  /* really an array of "size" entries */
+} ;
+
+#ifdef _KERNEL
+/*
+ * Packets processed by dummynet have an mbuf tag associated with
+ * them that carries their dummynet state.  This is used within
+ * the dummynet code as well as outside when checking for special
+ * processing requirements.
+ */
+struct dn_pkt_tag {
+    struct ip_fw *rule;                /* matching rule */
+    int dn_dir;                        /* action when packet comes out. */
+#define DN_TO_IP_OUT   1
+#define DN_TO_IP_IN    2
+/* Obsolete: #define DN_TO_BDG_FWD     3 */
+#define DN_TO_ETH_DEMUX        4
+#define DN_TO_ETH_OUT  5
+#define DN_TO_IP6_IN   6
+#define DN_TO_IP6_OUT  7
+#define DN_TO_IFB_FWD  8
+
+    dn_key output_time;                /* when the pkt is due for delivery     */
+    struct ifnet *ifp;         /* interface, for ip_output             */
+    struct _ip6dn_args ip6opt; /* XXX ipv6 options                     */
+};
+#endif /* _KERNEL */
+
+/*
+ * Overall structure of dummynet (with WF2Q+):
+
+In dummynet, packets are selected with the firewall rules, and passed
+to two different objects: PIPE or QUEUE.
+
+A QUEUE is just a queue with configurable size and queue management
+policy. It is also associated with a mask (to discriminate among
+different flows), a weight (used to give different shares of the
+bandwidth to different flows) and a "pipe", which essentially
+supplies the transmit clock for all queues associated with that
+pipe.
+
+A PIPE emulates a fixed-bandwidth link, whose bandwidth is
+configurable.  The "clock" for a pipe can come from either an
+internal timer, or from the transmit interrupt of an interface.
+A pipe is also associated with one (or more, if masks are used)
+queue, where all packets for that pipe are stored.
+
+The bandwidth available on the pipe is shared by the queues
+associated with that pipe (only one in case the packet is sent
+to a PIPE) according to the WF2Q+ scheduling algorithm and the
+configured weights.
+
+In general, incoming packets are stored in the appropriate queue,
+which is then placed into one of a few heaps managed by a scheduler
+to decide when the packet should be extracted.
+The scheduler (a function called dummynet()) is run at every timer
+tick, and grabs queues from the head of the heaps when they are
+ready for processing.
+
+There are three data structures definining a pipe and associated queues:
+
+ + dn_pipe, which contains the main configuration parameters related
+   to delay and bandwidth;
+ + dn_flow_set, which contains WF2Q+ configuration, flow
+   masks, plr and RED configuration;
+ + dn_flow_queue, which is the per-flow queue (containing the packets)
+
+Multiple dn_flow_set can be linked to the same pipe, and multiple
+dn_flow_queue can be linked to the same dn_flow_set.
+All data structures are linked in a linear list which is used for
+housekeeping purposes.
+
+During configuration, we create and initialize the dn_flow_set
+and dn_pipe structures (a dn_pipe also contains a dn_flow_set).
+
+At runtime: packets are sent to the appropriate dn_flow_set (either
+WFQ ones, or the one embedded in the dn_pipe for fixed-rate flows),
+which in turn dispatches them to the appropriate dn_flow_queue
+(created dynamically according to the masks).
+
+The transmit clock for fixed rate flows (ready_event()) selects the
+dn_flow_queue to be used to transmit the next packet. For WF2Q,
+wfq_ready_event() extract a pipe which in turn selects the right
+flow using a number of heaps defined into the pipe itself.
+
+ *
+ */
+
+/*
+ * per flow queue. This contains the flow identifier, the queue
+ * of packets, counters, and parameters used to support both RED and
+ * WF2Q+.
+ *
+ * A dn_flow_queue is created and initialized whenever a packet for
+ * a new flow arrives.
+ */
+struct dn_flow_queue {
+    struct dn_flow_queue *next ;
+    struct ipfw_flow_id id ;
+
+    struct mbuf *head, *tail ; /* queue of packets */
+    u_int len ;
+    u_int len_bytes ;
+
+    /*
+     * When we emulate MAC overheads, or channel unavailability due
+     * to other traffic on a shared medium, we augment the packet at
+     * the head of the queue with an 'extra_bits' field representsing
+     * the additional delay the packet will be subject to:
+     *         extra_bits = bw*unavailable_time.
+     * With large bandwidth and large delays, extra_bits (and also numbytes)
+     * can become very large, so better play safe and use 64 bit
+     */
+    uint64_t numbytes ;                /* credit for transmission (dynamic queues) */
+    int64_t extra_bits;                /* extra bits simulating unavailable channel */
+
+    u_int64_t tot_pkts ;       /* statistics counters  */
+    u_int64_t tot_bytes ;
+    u_int32_t drops ;
+
+    int hash_slot ;            /* debugging/diagnostic */
+
+    /* RED parameters */
+    int avg ;                   /* average queue length est. (scaled) */
+    int count ;                 /* arrivals since last RED drop */
+    int random ;                /* random value (scaled) */
+    dn_key q_time;             /* start of queue idle time */
+
+    /* WF2Q+ support */
+    struct dn_flow_set *fs ;   /* parent flow set */
+    int heap_pos ;             /* position (index) of struct in heap */
+    dn_key sched_time ;                /* current time when queue enters ready_heap */
+
+    dn_key S,F ;               /* start time, finish time */
+    /*
+     * Setting F < S means the timestamp is invalid. We only need
+     * to test this when the queue is empty.
+     */
+} ;
+
+/*
+ * flow_set descriptor. Contains the "template" parameters for the
+ * queue configuration, and pointers to the hash table of dn_flow_queue's.
+ *
+ * The hash table is an array of lists -- we identify the slot by
+ * hashing the flow-id, then scan the list looking for a match.
+ * The size of the hash table (buckets) is configurable on a per-queue
+ * basis.
+ *
+ * A dn_flow_set is created whenever a new queue or pipe is created (in the
+ * latter case, the structure is located inside the struct dn_pipe).
+ */
+struct dn_flow_set {
+    SLIST_ENTRY(dn_flow_set)   next;   /* linked list in a hash slot */
+
+    u_short fs_nr ;             /* flow_set number       */
+    u_short flags_fs;
+#define DN_HAVE_FLOW_MASK      0x0001
+#define DN_IS_RED              0x0002
+#define DN_IS_GENTLE_RED       0x0004
+#define DN_QSIZE_IS_BYTES      0x0008  /* queue size is measured in bytes */
+#define DN_NOERROR             0x0010  /* do not report ENOBUFS on drops  */
+#define        DN_HAS_PROFILE          0x0020  /* the pipe has a delay profile. */
+#define DN_IS_PIPE             0x4000
+#define DN_IS_QUEUE            0x8000
+
+    struct dn_pipe *pipe ;     /* pointer to parent pipe */
+    u_short parent_nr ;                /* parent pipe#, 0 if local to a pipe */
+
+    int weight ;               /* WFQ queue weight */
+    int qsize ;                        /* queue size in slots or bytes */
+    int plr ;                  /* pkt loss rate (2^31-1 means 100%) */
+
+    struct ipfw_flow_id flow_mask ;
+
+    /* hash table of queues onto this flow_set */
+    int rq_size ;              /* number of slots */
+    int rq_elements ;          /* active elements */
+    struct dn_flow_queue **rq; /* array of rq_size entries */
+
+    u_int32_t last_expired ;   /* do not expire too frequently */
+    int backlogged ;           /* #active queues for this flowset */
+
+        /* RED parameters */
+#define SCALE_RED               16
+#define SCALE(x)                ( (x) << SCALE_RED )
+#define SCALE_VAL(x)            ( (x) >> SCALE_RED )
+#define SCALE_MUL(x,y)          ( ( (x) * (y) ) >> SCALE_RED )
+    int w_q ;                  /* queue weight (scaled) */
+    int max_th ;               /* maximum threshold for queue (scaled) */
+    int min_th ;               /* minimum threshold for queue (scaled) */
+    int max_p ;                        /* maximum value for p_b (scaled) */
+    u_int c_1 ;                        /* max_p/(max_th-min_th) (scaled) */
+    u_int c_2 ;                        /* max_p*min_th/(max_th-min_th) (scaled) */
+    u_int c_3 ;                        /* for GRED, (1-max_p)/max_th (scaled) */
+    u_int c_4 ;                        /* for GRED, 1 - 2*max_p (scaled) */
+    u_int * w_q_lookup ;       /* lookup table for computing (1-w_q)^t */
+    u_int lookup_depth ;       /* depth of lookup table */
+    int lookup_step ;          /* granularity inside the lookup table */
+    int lookup_weight ;                /* equal to (1-w_q)^t / (1-w_q)^(t+1) */
+    int avg_pkt_size ;         /* medium packet size */
+    int max_pkt_size ;         /* max packet size */
+};
+SLIST_HEAD(dn_flow_set_head, dn_flow_set);
+
+/*
+ * Pipe descriptor. Contains global parameters, delay-line queue,
+ * and the flow_set used for fixed-rate queues.
+ *
+ * For WF2Q+ support it also has 3 heaps holding dn_flow_queue:
+ *   not_eligible_heap, for queues whose start time is higher
+ *     than the virtual time. Sorted by start time.
+ *   scheduler_heap, for queues eligible for scheduling. Sorted by
+ *     finish time.
+ *   idle_heap, all flows that are idle and can be removed. We
+ *     do that on each tick so we do not slow down too much
+ *     operations during forwarding.
+ *
+ */
+struct dn_pipe {               /* a pipe */
+    SLIST_ENTRY(dn_pipe)       next;   /* linked list in a hash slot */
+
+    int        pipe_nr ;               /* number       */
+    int bandwidth;             /* really, bytes/tick.  */
+    int        delay ;                 /* really, ticks        */
+
+    struct     mbuf *head, *tail ;     /* packets in delay line */
+
+    /* WF2Q+ */
+    struct dn_heap scheduler_heap ; /* top extract - key Finish time*/
+    struct dn_heap not_eligible_heap; /* top extract- key Start time */
+    struct dn_heap idle_heap ; /* random extract - key Start=Finish time */
+
+    dn_key V ;                 /* virtual time */
+    int sum;                   /* sum of weights of all active sessions */
+
+    /* Same as in dn_flow_queue, numbytes can become large */
+    int64_t numbytes;          /* bits I can transmit (more or less). */
+
+    dn_key sched_time ;                /* time pipe was scheduled in ready_heap */
+
+    /*
+     * When the tx clock come from an interface (if_name[0] != '\0'), its name
+     * is stored below, whereas the ifp is filled when the rule is configured.
+     */
+    char if_name[IFNAMSIZ];
+    struct ifnet *ifp ;
+    int ready ; /* set if ifp != NULL and we got a signal from it */
+
+    struct dn_flow_set fs ; /* used with fixed-rate flows */
+
+    /* fields to simulate a delay profile */
+
+#define ED_MAX_NAME_LEN                32
+    char name[ED_MAX_NAME_LEN];
+    int loss_level;
+    int samples_no;
+    int *samples;
+};
+
+/* dn_pipe_max is used to pass pipe configuration from userland onto
+ * kernel space and back
+ */
+#define ED_MAX_SAMPLES_NO      1024
+struct dn_pipe_max {
+       struct dn_pipe pipe;
+       int samples[ED_MAX_SAMPLES_NO];
+};
+
+SLIST_HEAD(dn_pipe_head, dn_pipe);
+
+#ifdef _KERNEL
+typedef        int ip_dn_ctl_t(struct sockopt *); /* raw_ip.c */
+typedef        void ip_dn_ruledel_t(void *); /* ip_fw.c */
+typedef        int ip_dn_io_t(struct mbuf **m, int dir, struct ip_fw_args *fwa);
+extern ip_dn_ctl_t *ip_dn_ctl_ptr;
+extern ip_dn_ruledel_t *ip_dn_ruledel_ptr;
+extern ip_dn_io_t *ip_dn_io_ptr;
+#define        DUMMYNET_LOADED (ip_dn_io_ptr != NULL)
+
+/*
+ * Return the IPFW rule associated with the dummynet tag; if any.
+ * Make sure that the dummynet tag is not reused by lower layers.
+ */
+static __inline struct ip_fw *
+ip_dn_claim_rule(struct mbuf *m)
+{
+       struct m_tag *mtag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
+       if (mtag != NULL) {
+               mtag->m_tag_id = PACKET_TAG_NONE;
+               return (((struct dn_pkt_tag *)(mtag+1))->rule);
+       } else
+               return (NULL);
+}
+#endif
+#endif /* _IP_DUMMYNET_H */
diff --git a/dummynet/include/netinet/ip_fw.h b/dummynet/include/netinet/ip_fw.h
new file mode 100644 (file)
index 0000000..bd4d3f9
--- /dev/null
@@ -0,0 +1,680 @@
+/*-
+ * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/netinet/ip_fw.h,v 1.110.2.6 2008/10/14 08:03:58 rwatson Exp $
+ */
+
+#ifndef _IPFW2_H
+#define _IPFW2_H
+
+/*
+ * The default rule number.  By the design of ip_fw, the default rule
+ * is the last one, so its number can also serve as the highest number
+ * allowed for a rule.  The ip_fw code relies on both meanings of this
+ * constant. 
+ */
+#define        IPFW_DEFAULT_RULE       65535
+
+/*
+ * The kernel representation of ipfw rules is made of a list of
+ * 'instructions' (for all practical purposes equivalent to BPF
+ * instructions), which specify which fields of the packet
+ * (or its metadata) should be analysed.
+ *
+ * Each instruction is stored in a structure which begins with
+ * "ipfw_insn", and can contain extra fields depending on the
+ * instruction type (listed below).
+ * Note that the code is written so that individual instructions
+ * have a size which is a multiple of 32 bits. This means that, if
+ * such structures contain pointers or other 64-bit entities,
+ * (there is just one instance now) they may end up unaligned on
+ * 64-bit architectures, so the must be handled with care.
+ *
+ * "enum ipfw_opcodes" are the opcodes supported. We can have up
+ * to 256 different opcodes. When adding new opcodes, they should
+ * be appended to the end of the opcode list before O_LAST_OPCODE,
+ * this will prevent the ABI from being broken, otherwise users
+ * will have to recompile ipfw(8) when they update the kernel.
+ */
+
+enum ipfw_opcodes {            /* arguments (4 byte each)      */
+       O_NOP,
+
+       O_IP_SRC,               /* u32 = IP                     */
+       O_IP_SRC_MASK,          /* ip = IP/mask                 */
+       O_IP_SRC_ME,            /* none                         */
+       O_IP_SRC_SET,           /* u32=base, arg1=len, bitmap   */
+
+       O_IP_DST,               /* u32 = IP                     */
+       O_IP_DST_MASK,          /* ip = IP/mask                 */
+       O_IP_DST_ME,            /* none                         */
+       O_IP_DST_SET,           /* u32=base, arg1=len, bitmap   */
+
+       O_IP_SRCPORT,           /* (n)port list:mask 4 byte ea  */
+       O_IP_DSTPORT,           /* (n)port list:mask 4 byte ea  */
+       O_PROTO,                /* arg1=protocol                */
+
+       O_MACADDR2,             /* 2 mac addr:mask              */
+       O_MAC_TYPE,             /* same as srcport              */
+
+       O_LAYER2,               /* none                         */
+       O_IN,                   /* none                         */
+       O_FRAG,                 /* none                         */
+
+       O_RECV,                 /* none                         */
+       O_XMIT,                 /* none                         */
+       O_VIA,                  /* none                         */
+
+       O_IPOPT,                /* arg1 = 2*u8 bitmap           */
+       O_IPLEN,                /* arg1 = len                   */
+       O_IPID,                 /* arg1 = id                    */
+
+       O_IPTOS,                /* arg1 = id                    */
+       O_IPPRECEDENCE,         /* arg1 = precedence << 5       */
+       O_IPTTL,                /* arg1 = TTL                   */
+
+       O_IPVER,                /* arg1 = version               */
+       O_UID,                  /* u32 = id                     */
+       O_GID,                  /* u32 = id                     */
+       O_ESTAB,                /* none (tcp established)       */
+       O_TCPFLAGS,             /* arg1 = 2*u8 bitmap           */
+       O_TCPWIN,               /* arg1 = desired win           */
+       O_TCPSEQ,               /* u32 = desired seq.           */
+       O_TCPACK,               /* u32 = desired seq.           */
+       O_ICMPTYPE,             /* u32 = icmp bitmap            */
+       O_TCPOPTS,              /* arg1 = 2*u8 bitmap           */
+
+       O_VERREVPATH,           /* none                         */
+       O_VERSRCREACH,          /* none                         */
+
+       O_PROBE_STATE,          /* none                         */
+       O_KEEP_STATE,           /* none                         */
+       O_LIMIT,                /* ipfw_insn_limit              */
+       O_LIMIT_PARENT,         /* dyn_type, not an opcode.     */
+
+       /*
+        * These are really 'actions'.
+        */
+
+       O_LOG,                  /* ipfw_insn_log                */
+       O_PROB,                 /* u32 = match probability      */
+
+       O_CHECK_STATE,          /* none                         */
+       O_ACCEPT,               /* none                         */
+       O_DENY,                 /* none                         */
+       O_REJECT,               /* arg1=icmp arg (same as deny) */
+       O_COUNT,                /* none                         */
+       O_SKIPTO,               /* arg1=next rule number        */
+       O_PIPE,                 /* arg1=pipe number             */
+       O_QUEUE,                /* arg1=queue number            */
+       O_DIVERT,               /* arg1=port number             */
+       O_TEE,                  /* arg1=port number             */
+       O_FORWARD_IP,           /* fwd sockaddr                 */
+       O_FORWARD_MAC,          /* fwd mac                      */
+       O_NAT,                  /* nope                         */
+       O_REASS,                /* none                         */
+
+       /*
+        * More opcodes.
+        */
+       O_IPSEC,                /* has ipsec history            */
+       O_IP_SRC_LOOKUP,        /* arg1=table number, u32=value */
+       O_IP_DST_LOOKUP,        /* arg1=table number, u32=value */
+       O_ANTISPOOF,            /* none                         */
+       O_JAIL,                 /* u32 = id                     */
+       O_ALTQ,                 /* u32 = altq classif. qid      */
+       O_DIVERTED,             /* arg1=bitmap (1:loop, 2:out)  */
+       O_TCPDATALEN,           /* arg1 = tcp data len          */
+       O_IP6_SRC,              /* address without mask         */
+       O_IP6_SRC_ME,           /* my addresses                 */
+       O_IP6_SRC_MASK,         /* address with the mask        */
+       O_IP6_DST,
+       O_IP6_DST_ME,
+       O_IP6_DST_MASK,
+       O_FLOW6ID,              /* for flow id tag in the ipv6 pkt */
+       O_ICMP6TYPE,            /* icmp6 packet type filtering  */
+       O_EXT_HDR,              /* filtering for ipv6 extension header */
+       O_IP6,
+
+       /*
+        * actions for ng_ipfw
+        */
+       O_NETGRAPH,             /* send to ng_ipfw              */
+       O_NGTEE,                /* copy to ng_ipfw              */
+
+       O_IP4,
+
+       O_UNREACH6,             /* arg1=icmpv6 code arg (deny)  */
+
+       O_TAG,                  /* arg1=tag number */
+       O_TAGGED,               /* arg1=tag number */
+
+       O_SETFIB,               /* arg1=FIB number */
+       O_FIB,                  /* arg1=FIB desired fib number */
+
+       O_LAST_OPCODE           /* not an opcode!               */
+};
+
+/*
+ * The extension header are filtered only for presence using a bit
+ * vector with a flag for each header.
+ */
+#define EXT_FRAGMENT   0x1
+#define EXT_HOPOPTS    0x2
+#define EXT_ROUTING    0x4
+#define EXT_AH         0x8
+#define EXT_ESP                0x10
+#define EXT_DSTOPTS    0x20
+#define EXT_RTHDR0             0x40
+#define EXT_RTHDR2             0x80
+
+/*
+ * Template for instructions.
+ *
+ * ipfw_insn is used for all instructions which require no operands,
+ * a single 16-bit value (arg1), or a couple of 8-bit values.
+ *
+ * For other instructions which require different/larger arguments
+ * we have derived structures, ipfw_insn_*.
+ *
+ * The size of the instruction (in 32-bit words) is in the low
+ * 6 bits of "len". The 2 remaining bits are used to implement
+ * NOT and OR on individual instructions. Given a type, you can
+ * compute the length to be put in "len" using F_INSN_SIZE(t)
+ *
+ * F_NOT       negates the match result of the instruction.
+ *
+ * F_OR                is used to build or blocks. By default, instructions
+ *             are evaluated as part of a logical AND. An "or" block
+ *             { X or Y or Z } contains F_OR set in all but the last
+ *             instruction of the block. A match will cause the code
+ *             to skip past the last instruction of the block.
+ *
+ * NOTA BENE: in a couple of places we assume that
+ *     sizeof(ipfw_insn) == sizeof(u_int32_t)
+ * this needs to be fixed.
+ *
+ */
+typedef struct _ipfw_insn {    /* template for instructions */
+       enum ipfw_opcodes       opcode:8;
+       u_int8_t        len;    /* number of 32-bit words */
+#define        F_NOT           0x80
+#define        F_OR            0x40
+#define        F_LEN_MASK      0x3f
+#define        F_LEN(cmd)      ((cmd)->len & F_LEN_MASK)
+
+       u_int16_t       arg1;
+} ipfw_insn;
+
+/*
+ * The F_INSN_SIZE(type) computes the size, in 4-byte words, of
+ * a given type.
+ */
+#define        F_INSN_SIZE(t)  ((sizeof (t))/sizeof(u_int32_t))
+
+#define MTAG_IPFW      1148380143      /* IPFW-tagged cookie */
+
+/*
+ * This is used to store an array of 16-bit entries (ports etc.)
+ */
+typedef struct _ipfw_insn_u16 {
+       ipfw_insn o;
+       u_int16_t ports[2];     /* there may be more */
+} ipfw_insn_u16;
+
+/*
+ * This is used to store an array of 32-bit entries
+ * (uid, single IPv4 addresses etc.)
+ */
+typedef struct _ipfw_insn_u32 {
+       ipfw_insn o;
+       u_int32_t d[1]; /* one or more */
+} ipfw_insn_u32;
+
+/*
+ * This is used to store IP addr-mask pairs.
+ */
+typedef struct _ipfw_insn_ip {
+       ipfw_insn o;
+       struct in_addr  addr;
+       struct in_addr  mask;
+} ipfw_insn_ip;
+
+/*
+ * This is used to forward to a given address (ip).
+ */
+typedef struct  _ipfw_insn_sa {
+       ipfw_insn o;
+       struct sockaddr_in sa;
+} ipfw_insn_sa;
+
+/*
+ * This is used for MAC addr-mask pairs.
+ */
+typedef struct _ipfw_insn_mac {
+       ipfw_insn o;
+       u_char addr[12];        /* dst[6] + src[6] */
+       u_char mask[12];        /* dst[6] + src[6] */
+} ipfw_insn_mac;
+
+/*
+ * This is used for interface match rules (recv xx, xmit xx).
+ */
+typedef struct _ipfw_insn_if {
+       ipfw_insn o;
+       union {
+               struct in_addr ip;
+               int glob;
+       } p;
+       char name[IFNAMSIZ];
+} ipfw_insn_if;
+
+/*
+ * This is used for storing an altq queue id number.
+ */
+typedef struct _ipfw_insn_altq {
+       ipfw_insn       o;
+       u_int32_t       qid;
+} ipfw_insn_altq;
+
+/*
+ * This is used for limit rules.
+ */
+typedef struct _ipfw_insn_limit {
+       ipfw_insn o;
+       u_int8_t _pad;
+       u_int8_t limit_mask;    /* combination of DYN_* below   */
+#define        DYN_SRC_ADDR    0x1
+#define        DYN_SRC_PORT    0x2
+#define        DYN_DST_ADDR    0x4
+#define        DYN_DST_PORT    0x8
+
+       u_int16_t conn_limit;
+} ipfw_insn_limit;
+
+/*
+ * This is used for log instructions.
+ */
+typedef struct  _ipfw_insn_log {
+        ipfw_insn o;
+       u_int32_t max_log;      /* how many do we log -- 0 = all */
+       u_int32_t log_left;     /* how many left to log         */
+} ipfw_insn_log;
+
+/*
+ * Data structures required by both ipfw(8) and ipfw(4) but not part of the
+ * management API are protected by IPFW_INTERNAL.
+ */
+#ifdef IPFW_INTERNAL
+/* Server pool support (LSNAT). */
+struct cfg_spool {
+       LIST_ENTRY(cfg_spool)   _next;          /* chain of spool instances */
+       struct in_addr          addr;
+       u_short                 port;
+};
+#endif
+
+/* Redirect modes id. */
+#define REDIR_ADDR      0x01
+#define REDIR_PORT      0x02
+#define REDIR_PROTO     0x04
+
+#ifdef IPFW_INTERNAL
+/* Nat redirect configuration. */
+struct cfg_redir {
+       LIST_ENTRY(cfg_redir)   _next;          /* chain of redir instances */
+       u_int16_t               mode;           /* type of redirect mode */
+       struct in_addr          laddr;          /* local ip address */
+       struct in_addr          paddr;          /* public ip address */
+       struct in_addr          raddr;          /* remote ip address */
+       u_short                 lport;          /* local port */
+       u_short                 pport;          /* public port */
+       u_short                 rport;          /* remote port  */
+       u_short                 pport_cnt;      /* number of public ports */
+       u_short                 rport_cnt;      /* number of remote ports */
+       int                     proto;          /* protocol: tcp/udp */
+       struct alias_link       **alink;        
+       /* num of entry in spool chain */
+       u_int16_t               spool_cnt;      
+       /* chain of spool instances */
+       LIST_HEAD(spool_chain, cfg_spool) spool_chain;
+};
+#endif
+
+#define NAT_BUF_LEN     1024
+
+#ifdef IPFW_INTERNAL
+/* Nat configuration data struct. */
+struct cfg_nat {
+       /* chain of nat instances */
+       LIST_ENTRY(cfg_nat)     _next;
+       int                     id;                     /* nat id */
+       struct in_addr          ip;                     /* nat ip address */
+       char                    if_name[IF_NAMESIZE];   /* interface name */
+       int                     mode;                   /* aliasing mode */
+       struct libalias         *lib;                   /* libalias instance */
+       /* number of entry in spool chain */
+       int                     redir_cnt;              
+       /* chain of redir instances */
+       LIST_HEAD(redir_chain, cfg_redir) redir_chain;  
+};
+#endif
+
+#define SOF_NAT         sizeof(struct cfg_nat)
+#define SOF_REDIR       sizeof(struct cfg_redir)
+#define SOF_SPOOL       sizeof(struct cfg_spool)
+
+/* Nat command. */
+typedef struct _ipfw_insn_nat {
+       ipfw_insn       o;
+       struct cfg_nat *nat;    
+} ipfw_insn_nat;
+
+/* Apply ipv6 mask on ipv6 addr */
+#define APPLY_MASK(addr,mask)                          \
+    (addr)->__u6_addr.__u6_addr32[0] &= (mask)->__u6_addr.__u6_addr32[0]; \
+    (addr)->__u6_addr.__u6_addr32[1] &= (mask)->__u6_addr.__u6_addr32[1]; \
+    (addr)->__u6_addr.__u6_addr32[2] &= (mask)->__u6_addr.__u6_addr32[2]; \
+    (addr)->__u6_addr.__u6_addr32[3] &= (mask)->__u6_addr.__u6_addr32[3];
+
+/* Structure for ipv6 */
+typedef struct _ipfw_insn_ip6 {
+       ipfw_insn o;
+       struct in6_addr addr6;
+       struct in6_addr mask6;
+} ipfw_insn_ip6;
+
+/* Used to support icmp6 types */
+typedef struct _ipfw_insn_icmp6 {
+       ipfw_insn o;
+       uint32_t d[7]; /* XXX This number si related to the netinet/icmp6.h
+                       *     define ICMP6_MAXTYPE
+                       *     as follows: n = ICMP6_MAXTYPE/32 + 1
+                        *     Actually is 203 
+                       */
+} ipfw_insn_icmp6;
+
+/*
+ * Here we have the structure representing an ipfw rule.
+ *
+ * It starts with a general area (with link fields and counters)
+ * followed by an array of one or more instructions, which the code
+ * accesses as an array of 32-bit values.
+ *
+ * Given a rule pointer  r:
+ *
+ *  r->cmd             is the start of the first instruction.
+ *  ACTION_PTR(r)      is the start of the first action (things to do
+ *                     once a rule matched).
+ *
+ * When assembling instruction, remember the following:
+ *
+ *  + if a rule has a "keep-state" (or "limit") option, then the
+ *     first instruction (at r->cmd) MUST BE an O_PROBE_STATE
+ *  + if a rule has a "log" option, then the first action
+ *     (at ACTION_PTR(r)) MUST be O_LOG
+ *  + if a rule has an "altq" option, it comes after "log"
+ *  + if a rule has an O_TAG option, it comes after "log" and "altq"
+ *
+ * NOTE: we use a simple linked list of rules because we never need
+ *     to delete a rule without scanning the list. We do not use
+ *     queue(3) macros for portability and readability.
+ */
+
+struct ip_fw {
+       struct ip_fw    *next;          /* linked list of rules         */
+       struct ip_fw    *next_rule;     /* ptr to next [skipto] rule    */
+       /* 'next_rule' is used to pass up 'set_disable' status          */
+
+       u_int16_t       act_ofs;        /* offset of action in 32-bit units */
+       u_int16_t       cmd_len;        /* # of 32-bit words in cmd     */
+       u_int16_t       rulenum;        /* rule number                  */
+       u_int8_t        set;            /* rule set (0..31)             */
+#define        RESVD_SET       31      /* set for default and persistent rules */
+       u_int8_t        _pad;           /* padding                      */
+
+       /* These fields are present in all rules.                       */
+       u_int64_t       pcnt;           /* Packet counter               */
+       u_int64_t       bcnt;           /* Byte counter                 */
+       u_int32_t       timestamp;      /* tv_sec of last match         */
+
+       ipfw_insn       cmd[1];         /* storage for commands         */
+};
+
+#define ACTION_PTR(rule)                               \
+       (ipfw_insn *)( (u_int32_t *)((rule)->cmd) + ((rule)->act_ofs) )
+
+#define RULESIZE(rule)  (sizeof(struct ip_fw) + \
+       ((struct ip_fw *)(rule))->cmd_len * 4 - 4)
+
+/*
+ * This structure is used as a flow mask and a flow id for various
+ * parts of the code.
+ */
+struct ipfw_flow_id {
+       u_int32_t       dst_ip;
+       u_int32_t       src_ip;
+       u_int16_t       dst_port;
+       u_int16_t       src_port;
+       u_int8_t        fib;
+       u_int8_t        proto;
+       u_int8_t        flags;  /* protocol-specific flags */
+       uint8_t         addr_type; /* 4 = ipv4, 6 = ipv6, 1=ether ? */
+       struct in6_addr dst_ip6;        /* could also store MAC addr! */
+       struct in6_addr src_ip6;
+       u_int32_t       flow_id6;
+       u_int32_t       frag_id6;
+};
+
+#define IS_IP6_FLOW_ID(id)     ((id)->addr_type == 6)
+
+/*
+ * Dynamic ipfw rule.
+ */
+typedef struct _ipfw_dyn_rule ipfw_dyn_rule;
+
+struct _ipfw_dyn_rule {
+       ipfw_dyn_rule   *next;          /* linked list of rules.        */
+       struct ip_fw *rule;             /* pointer to rule              */
+       /* 'rule' is used to pass up the rule number (from the parent)  */
+
+       ipfw_dyn_rule *parent;          /* pointer to parent rule       */
+       u_int64_t       pcnt;           /* packet match counter         */
+       u_int64_t       bcnt;           /* byte match counter           */
+       struct ipfw_flow_id id;         /* (masked) flow id             */
+       u_int32_t       expire;         /* expire time                  */
+       u_int32_t       bucket;         /* which bucket in hash table   */
+       u_int32_t       state;          /* state of this rule (typically a
+                                        * combination of TCP flags)
+                                        */
+       u_int32_t       ack_fwd;        /* most recent ACKs in forward  */
+       u_int32_t       ack_rev;        /* and reverse directions (used */
+                                       /* to generate keepalives)      */
+       u_int16_t       dyn_type;       /* rule type                    */
+       u_int16_t       count;          /* refcount                     */
+};
+
+/*
+ * Definitions for IP option names.
+ */
+#define        IP_FW_IPOPT_LSRR        0x01
+#define        IP_FW_IPOPT_SSRR        0x02
+#define        IP_FW_IPOPT_RR          0x04
+#define        IP_FW_IPOPT_TS          0x08
+
+/*
+ * Definitions for TCP option names.
+ */
+#define        IP_FW_TCPOPT_MSS        0x01
+#define        IP_FW_TCPOPT_WINDOW     0x02
+#define        IP_FW_TCPOPT_SACK       0x04
+#define        IP_FW_TCPOPT_TS         0x08
+#define        IP_FW_TCPOPT_CC         0x10
+
+#define        ICMP_REJECT_RST         0x100   /* fake ICMP code (send a TCP RST) */
+#define        ICMP6_UNREACH_RST       0x100   /* fake ICMPv6 code (send a TCP RST) */
+
+/*
+ * These are used for lookup tables.
+ */
+typedef struct _ipfw_table_entry {
+       in_addr_t       addr;           /* network address              */
+       u_int32_t       value;          /* value                        */
+       u_int16_t       tbl;            /* table number                 */
+       u_int8_t        masklen;        /* mask length                  */
+} ipfw_table_entry;
+
+typedef struct _ipfw_table {
+       u_int32_t       size;           /* size of entries in bytes     */
+       u_int32_t       cnt;            /* # of entries                 */
+       u_int16_t       tbl;            /* table number                 */
+       ipfw_table_entry ent[0];        /* entries                      */
+} ipfw_table;
+
+#define IP_FW_TABLEARG 65535
+
+/*
+ * Main firewall chains definitions and global var's definitions.
+ */
+#ifdef _KERNEL
+
+/* Return values from ipfw_chk() */
+enum {
+       IP_FW_PASS = 0,
+       IP_FW_DENY,
+       IP_FW_DIVERT,
+       IP_FW_TEE,
+       IP_FW_DUMMYNET,
+       IP_FW_NETGRAPH,
+       IP_FW_NGTEE,
+       IP_FW_NAT,
+       IP_FW_REASS,
+};
+
+/* flags for divert mtag */
+#define        IP_FW_DIVERT_LOOPBACK_FLAG      0x00080000
+#define        IP_FW_DIVERT_OUTPUT_FLAG        0x00100000
+
+/*
+ * Structure for collecting parameters to dummynet for ip6_output forwarding
+ */
+struct _ip6dn_args {
+       struct ip6_pktopts *opt_or;
+       struct route_in6 ro_or;
+       int flags_or;
+       struct ip6_moptions *im6o_or;
+       struct ifnet *origifp_or;
+       struct ifnet *ifp_or;
+       struct sockaddr_in6 dst_or;
+       u_long mtu_or;
+       struct route_in6 ro_pmtu_or;
+};
+
+/*
+ * Arguments for calling ipfw_chk() and dummynet_io(). We put them
+ * all into a structure because this way it is easier and more
+ * efficient to pass variables around and extend the interface.
+ */
+struct ip_fw_args {
+       struct mbuf     *m;             /* the mbuf chain               */
+       struct ifnet    *oif;           /* output interface             */
+       struct sockaddr_in *next_hop;   /* forward address              */
+       struct ip_fw    *rule;          /* matching rule                */
+       struct ether_header *eh;        /* for bridged packets          */
+
+       struct ipfw_flow_id f_id;       /* grabbed from IP header       */
+       u_int32_t       cookie;         /* a cookie depending on rule action */
+       struct inpcb    *inp;
+
+       struct _ip6dn_args      dummypar; /* dummynet->ip6_output */
+       struct sockaddr_in hopstore;    /* store here if cannot use a pointer */
+};
+
+/*
+ * Function definitions.
+ */
+
+/* Firewall hooks */
+struct sockopt;
+struct dn_flow_set;
+
+int ipfw_check_in(void *, struct mbuf **, struct ifnet *, int, struct inpcb *inp);
+int ipfw_check_out(void *, struct mbuf **, struct ifnet *, int, struct inpcb *inp);
+
+int ipfw_chk(struct ip_fw_args *);
+
+int ipfw_init(void);
+void ipfw_destroy(void);
+
+typedef int ip_fw_ctl_t(struct sockopt *);
+extern ip_fw_ctl_t *ip_fw_ctl_ptr;
+extern int fw_one_pass;
+extern int fw_enable;
+#ifdef INET6
+extern int fw6_enable;
+#endif
+
+/* For kernel ipfw_ether and ipfw_bridge. */
+typedef        int ip_fw_chk_t(struct ip_fw_args *args);
+extern ip_fw_chk_t     *ip_fw_chk_ptr;
+#define        IPFW_LOADED     (ip_fw_chk_ptr != NULL)
+
+#ifdef IPFW_INTERNAL
+
+#define        IPFW_TABLES_MAX         128
+struct ip_fw_chain {
+       struct ip_fw    *rules;         /* list of rules */
+       struct ip_fw    *reap;          /* list of rules to reap */
+       LIST_HEAD(, cfg_nat) nat;       /* list of nat entries */
+       struct radix_node_head *tables[IPFW_TABLES_MAX];
+#if defined( __linux__ ) || defined( _WIN32 )
+       spinlock_t rwmtx;
+#else
+       struct rwlock   rwmtx;
+#endif /* !__linux__ */
+};
+#define        IPFW_LOCK_INIT(_chain) \
+       rw_init(&(_chain)->rwmtx, "IPFW static rules")
+#define        IPFW_LOCK_DESTROY(_chain)       rw_destroy(&(_chain)->rwmtx)
+#define        IPFW_WLOCK_ASSERT(_chain)       rw_assert(&(_chain)->rwmtx, RA_WLOCKED)
+
+#define IPFW_RLOCK(p) rw_rlock(&(p)->rwmtx)
+#define IPFW_RUNLOCK(p) rw_runlock(&(p)->rwmtx)
+#define IPFW_WLOCK(p) rw_wlock(&(p)->rwmtx)
+#define IPFW_WUNLOCK(p) rw_wunlock(&(p)->rwmtx)
+
+#define LOOKUP_NAT(l, i, p) do {                                       \
+               LIST_FOREACH((p), &(l.nat), _next) {                    \
+                       if ((p)->id == (i)) {                           \
+                               break;                                  \
+                       }                                               \
+               }                                                       \
+       } while (0)
+
+typedef int ipfw_nat_t(struct ip_fw_args *, struct cfg_nat *, struct mbuf *);
+typedef int ipfw_nat_cfg_t(struct sockopt *);
+#endif
+
+#endif /* _KERNEL */
+#endif /* _IPFW2_H */
diff --git a/dummynet/include/netinet/ip_icmp.h b/dummynet/include/netinet/ip_icmp.h
new file mode 100644 (file)
index 0000000..5c7b851
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * additional define not present in linux
+ * should go in glue.h
+ */
+#ifndef _NETINET_IP_ICMP_H_
+#define _NETINET_IP_ICMP_H_
+
+#define ICMP_MAXTYPE            40      /* defined as 18 in compat.h */
+#define ICMP_ROUTERSOLICIT      10              /* router solicitation */
+#define ICMP_TSTAMP             13              /* timestamp request */
+#define ICMP_IREQ               15              /* information request */
+#define ICMP_MASKREQ            17              /* address mask request */
+#define         ICMP_UNREACH_HOST       1               /* bad host */
+
+#define ICMP_UNREACH            3               /* dest unreachable, codes: */
+
+#endif /* _NETINET_IP_ICMP_H_ */
diff --git a/dummynet/include/netinet/tcp.h b/dummynet/include/netinet/tcp.h
new file mode 100644 (file)
index 0000000..168d971
--- /dev/null
@@ -0,0 +1,228 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     @(#)tcp.h       8.1 (Berkeley) 6/10/93
+ * $FreeBSD: src/sys/netinet/tcp.h,v 1.40.2.2 2008/07/31 06:10:25 kmacy Exp $
+ */
+
+#ifndef _NETINET_TCP_H_
+#define _NETINET_TCP_H_
+
+#include <sys/cdefs.h>
+
+#define __BSD_VISIBLE 1
+
+#if __BSD_VISIBLE
+
+typedef        u_int32_t tcp_seq;
+
+#define tcp6_seq       tcp_seq /* for KAME src sync over BSD*'s */
+#define tcp6hdr                tcphdr  /* for KAME src sync over BSD*'s */
+
+/*
+ * TCP header.
+ * Per RFC 793, September, 1981.
+ */
+struct tcphdr {
+       u_short th_sport;               /* source port */
+       u_short th_dport;               /* destination port */
+       tcp_seq th_seq;                 /* sequence number */
+       tcp_seq th_ack;                 /* acknowledgement number */
+#if BYTE_ORDER == LITTLE_ENDIAN
+       u_int   th_x2:4,                /* (unused) */
+               th_off:4;               /* data offset */
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+       u_int   th_off:4,               /* data offset */
+               th_x2:4;                /* (unused) */
+#endif
+       u_char  th_flags;
+#define        TH_FIN  0x01
+#define        TH_SYN  0x02
+#define        TH_RST  0x04
+#define        TH_PUSH 0x08
+#define        TH_ACK  0x10
+#define        TH_URG  0x20
+#define        TH_ECE  0x40
+#define        TH_CWR  0x80
+#define        TH_FLAGS        (TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG|TH_ECE|TH_CWR)
+#define        PRINT_TH_FLAGS  "\20\1FIN\2SYN\3RST\4PUSH\5ACK\6URG\7ECE\10CWR"
+
+       u_short th_win;                 /* window */
+       u_short th_sum;                 /* checksum */
+       u_short th_urp;                 /* urgent pointer */
+};
+
+#define        TCPOPT_EOL              0
+#define           TCPOLEN_EOL                  1
+#define        TCPOPT_PAD              0               /* padding after EOL */
+#define           TCPOLEN_PAD                  1
+#define        TCPOPT_NOP              1
+#define           TCPOLEN_NOP                  1
+#define        TCPOPT_MAXSEG           2
+#define    TCPOLEN_MAXSEG              4
+#define TCPOPT_WINDOW          3
+#define    TCPOLEN_WINDOW              3
+#define TCPOPT_SACK_PERMITTED  4
+#define    TCPOLEN_SACK_PERMITTED      2
+#define TCPOPT_SACK            5
+#define           TCPOLEN_SACKHDR              2
+#define    TCPOLEN_SACK                        8       /* 2*sizeof(tcp_seq) */
+#define TCPOPT_TIMESTAMP       8
+#define    TCPOLEN_TIMESTAMP           10
+#define    TCPOLEN_TSTAMP_APPA         (TCPOLEN_TIMESTAMP+2) /* appendix A */
+#define        TCPOPT_SIGNATURE        19              /* Keyed MD5: RFC 2385 */
+#define           TCPOLEN_SIGNATURE            18
+
+/* Miscellaneous constants */
+#define        MAX_SACK_BLKS   6       /* Max # SACK blocks stored at receiver side */
+#define        TCP_MAX_SACK    4       /* MAX # SACKs sent in any segment */
+
+
+/*
+ * Default maximum segment size for TCP.
+ * With an IP MTU of 576, this is 536,
+ * but 512 is probably more convenient.
+ * This should be defined as MIN(512, IP_MSS - sizeof (struct tcpiphdr)).
+ */
+#define        TCP_MSS 512
+/*
+ * TCP_MINMSS is defined to be 216 which is fine for the smallest
+ * link MTU (256 bytes, AX.25 packet radio) in the Internet.
+ * However it is very unlikely to come across such low MTU interfaces
+ * these days (anno dato 2003).
+ * See tcp_subr.c tcp_minmss SYSCTL declaration for more comments.
+ * Setting this to "0" disables the minmss check.
+ */
+#define        TCP_MINMSS 216
+
+/*
+ * Default maximum segment size for TCP6.
+ * With an IP6 MSS of 1280, this is 1220,
+ * but 1024 is probably more convenient. (xxx kazu in doubt)
+ * This should be defined as MIN(1024, IP6_MSS - sizeof (struct tcpip6hdr))
+ */
+#define        TCP6_MSS        1024
+
+#define        TCP_MAXWIN      65535   /* largest value for (unscaled) window */
+#define        TTCP_CLIENT_SND_WND     4096    /* dflt send window for T/TCP client */
+
+#define TCP_MAX_WINSHIFT       14      /* maximum window shift */
+
+#define TCP_MAXBURST           4       /* maximum segments in a burst */
+
+#define TCP_MAXHLEN    (0xf<<2)        /* max length of header in bytes */
+#define TCP_MAXOLEN    (TCP_MAXHLEN - sizeof(struct tcphdr))
+                                       /* max space left for options */
+#endif /* __BSD_VISIBLE */
+
+/*
+ * User-settable options (used with setsockopt).
+ */
+#define        TCP_NODELAY     0x01    /* don't delay send to coalesce packets */
+#if __BSD_VISIBLE
+#define        TCP_MAXSEG      0x02    /* set maximum segment size */
+#define TCP_NOPUSH     0x04    /* don't push last block of write */
+#define TCP_NOOPT      0x08    /* don't use TCP options */
+#define TCP_MD5SIG     0x10    /* use MD5 digests (RFC2385) */
+#define        TCP_INFO        0x20    /* retrieve tcp_info structure */
+#define        TCP_CONGESTION  0x40    /* get/set congestion control algorithm */
+
+#define        TCP_CA_NAME_MAX 16      /* max congestion control name length */
+
+#define        TCPI_OPT_TIMESTAMPS     0x01
+#define        TCPI_OPT_SACK           0x02
+#define        TCPI_OPT_WSCALE         0x04
+#define        TCPI_OPT_ECN            0x08
+#define        TCPI_OPT_TOE            0x10
+
+/*
+ * The TCP_INFO socket option comes from the Linux 2.6 TCP API, and permits
+ * the caller to query certain information about the state of a TCP
+ * connection.  We provide an overlapping set of fields with the Linux
+ * implementation, but since this is a fixed size structure, room has been
+ * left for growth.  In order to maximize potential future compatibility with
+ * the Linux API, the same variable names and order have been adopted, and
+ * padding left to make room for omitted fields in case they are added later.
+ *
+ * XXX: This is currently an unstable ABI/API, in that it is expected to
+ * change.
+ */
+struct tcp_info {
+       u_int8_t        tcpi_state;             /* TCP FSM state. */
+       u_int8_t        __tcpi_ca_state;
+       u_int8_t        __tcpi_retransmits;
+       u_int8_t        __tcpi_probes;
+       u_int8_t        __tcpi_backoff;
+       u_int8_t        tcpi_options;           /* Options enabled on conn. */
+       u_int8_t        tcpi_snd_wscale:4,      /* RFC1323 send shift value. */
+                       tcpi_rcv_wscale:4;      /* RFC1323 recv shift value. */
+
+       u_int32_t       __tcpi_rto;
+       u_int32_t       __tcpi_ato;
+       u_int32_t       __tcpi_snd_mss;
+       u_int32_t       __tcpi_rcv_mss;
+
+       u_int32_t       __tcpi_unacked;
+       u_int32_t       __tcpi_sacked;
+       u_int32_t       __tcpi_lost;
+       u_int32_t       __tcpi_retrans;
+       u_int32_t       __tcpi_fackets;
+
+       /* Times; measurements in usecs. */
+       u_int32_t       __tcpi_last_data_sent;
+       u_int32_t       __tcpi_last_ack_sent;   /* Also unimpl. on Linux? */
+       u_int32_t       __tcpi_last_data_recv;
+       u_int32_t       __tcpi_last_ack_recv;
+
+       /* Metrics; variable units. */
+       u_int32_t       __tcpi_pmtu;
+       u_int32_t       __tcpi_rcv_ssthresh;
+       u_int32_t       tcpi_rtt;               /* Smoothed RTT in usecs. */
+       u_int32_t       tcpi_rttvar;            /* RTT variance in usecs. */
+       u_int32_t       tcpi_snd_ssthresh;      /* Slow start threshold. */
+       u_int32_t       tcpi_snd_cwnd;          /* Send congestion window. */
+       u_int32_t       __tcpi_advmss;
+       u_int32_t       __tcpi_reordering;
+
+       u_int32_t       __tcpi_rcv_rtt;
+       u_int32_t       tcpi_rcv_space;         /* Advertised recv window. */
+
+       /* FreeBSD extensions to tcp_info. */
+       u_int32_t       tcpi_snd_wnd;           /* Advertised send window. */
+       u_int32_t       tcpi_snd_bwnd;          /* Bandwidth send window. */
+       u_int32_t       tcpi_snd_nxt;           /* Next egress seqno */
+       u_int32_t       tcpi_rcv_nxt;           /* Next ingress seqno */
+       u_int32_t       tcpi_toe_tid;           /* HWTID for TOE endpoints */
+       
+       /* Padding to grow without breaking ABI. */
+       u_int32_t       __tcpi_pad[29];         /* Padding. */
+};
+#endif
+
+#endif /* !_NETINET_TCP_H_ */
diff --git a/dummynet/include/netinet/tcp_var.h b/dummynet/include/netinet/tcp_var.h
new file mode 100644 (file)
index 0000000..35196a2
--- /dev/null
@@ -0,0 +1,4 @@
+#ifndef _NETINET_TCP_VAR_H_
+#define _NETINET_TCP_VAR_H_
+#include <netinet/tcp.h>
+#endif /* !_NETINET_TCP_VAR_H_ */
diff --git a/dummynet/include/netinet/udp.h b/dummynet/include/netinet/udp.h
new file mode 100644 (file)
index 0000000..aed3099
--- /dev/null
@@ -0,0 +1,48 @@
+/*-
+ * Copyright (c) 1982, 1986, 1993
+ *     The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     @(#)udp.h       8.1 (Berkeley) 6/10/93
+ * $FreeBSD: src/sys/netinet/udp.h,v 1.10 2007/02/20 10:13:11 rwatson Exp $
+ */
+
+#ifndef _NETINET_UDP_H_
+#define        _NETINET_UDP_H_
+
+/*
+ * UDP protocol header.
+ * Per RFC 768, September, 1981.
+ */
+struct udphdr {
+       u_short uh_sport;               /* source port */
+       u_short uh_dport;               /* destination port */
+       u_short uh_ulen;                /* udp length */
+       u_short uh_sum;                 /* udp checksum */
+};
+
+#endif
diff --git a/dummynet/include/sys/cdefs.h b/dummynet/include/sys/cdefs.h
new file mode 100644 (file)
index 0000000..b95b4b7
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _CDEFS_H_
+#define _CDEFS_H_
+
+/*
+ * various compiler macros and common functions
+ */
+
+#ifndef __unused
+#define __unused       __attribute__ ((__unused__))
+#endif
+
+#ifndef __packed
+#define __packed       __attribute__ ((__packed__))
+#endif
+
+#ifndef __aligned
+#define __aligned(x) __attribute__((__aligned__(x)))
+#endif
+
+/* defined as assert */
+void panic(const char *fmt, ...);
+
+#define KASSERT(exp,msg) do {                                           \
+        if (__predict_false(!(exp)))                                    \
+                panic msg;                                              \
+} while (0)
+
+/* don't bother to optimize */
+#ifndef __predict_false
+#define __predict_false(x)   (x)       /* __builtin_expect((exp), 0) */
+#endif
+
+#endif /* !_CDEFS_H_ */
diff --git a/dummynet/include/sys/kernel.h b/dummynet/include/sys/kernel.h
new file mode 100644 (file)
index 0000000..61b3bec
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * from freebsd's kernel.h
+ */
+#ifndef _SYS_KERNEL_H_
+#define _SYS_KERNEL_H_
+
+#define SYSINIT(a, b, c, d, e)  \
+        void *dummy_ ## d = d
+
+/*
+ * Some enumerated orders; "ANY" sorts last.
+ */
+enum sysinit_elem_order {
+        SI_ORDER_FIRST          = 0x0000000,    /* first*/
+        SI_ORDER_SECOND         = 0x0000001,    /* second*/
+        SI_ORDER_THIRD          = 0x0000002,    /* third*/
+        SI_ORDER_MIDDLE         = 0x1000000,    /* somewhere in the middle */
+        SI_ORDER_ANY            = 0xfffffff     /* last*/
+};
+#endif
diff --git a/dummynet/include/sys/malloc.h b/dummynet/include/sys/malloc.h
new file mode 100644 (file)
index 0000000..d103801
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef _SYS_MALLOC_H_
+#define _SYS_MALLOC_H_
+
+/*
+ * No matter what, try to get clear memory and be non-blocking.
+ * XXX check if 2.4 has a native way to zero memory,
+ * XXX obey to the flags (M_NOWAIT <-> GPF_ATOMIC, M_WAIT <-> GPF_KERNEL)
+ */
+#ifndef _WIN32 /* this is the linux version */
+
+#ifndef LINUX_24
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22)
+#define malloc(_size, type, flags)                     \
+       kmalloc(_size, GFP_ATOMIC | __GFP_ZERO)
+#else /* LINUX < 2.6.22 and LINUX_24 */
+/* linux 2.6.22 does not zero allocated memory */
+#define malloc(_size, type, flags)                     \
+       ({ int _s = _size;                              \
+       void *_ret = kmalloc(_s, GFP_ATOMIC);           \
+       if (_ret) memset(_ret, 0, _s);                  \
+        (_ret);                                                \
+        })
+#endif /* !LINUX_24 */
+#endif /* LINUX < 2.6.22 */
+
+#define calloc(_n, _s) malloc((_n * _s), NULL, GFP_ATOMIC | __GFP_ZERO)
+#define free(_var, type) kfree(_var)
+
+#else /* _WIN32, the windows version */
+
+/*
+ * ntddk.h uses win_malloc() and MmFreeContiguousMemory().
+ * wipfw uses
+ * ExAllocatePoolWithTag(, pool, len, tag)
+ * ExFreePoolWithTag(ptr, tag)
+ */
+#define malloc(_size, _type, _flags) my_alloc(_size)
+
+void *my_alloc(int _size);
+/* the 'tag' version does not work without -Gz in the linker */
+#define free(_var, type) ExFreePool(_var)
+//#define free(_var, type) ExFreePoolWithTag(_var, 'wfpi')
+
+#endif /* _WIN32 */
+
+#define M_NOWAIT        0x0001          /* do not block */
+#define M_ZERO          0x0100          /* bzero the allocation */
+#endif /* _SYS_MALLOC_H_ */
diff --git a/dummynet/include/sys/mbuf.h b/dummynet/include/sys/mbuf.h
new file mode 100644 (file)
index 0000000..e4e7591
--- /dev/null
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2009 Luigi Rizzo, Universita` di Pisa
+ *
+ * BSD copyright.
+ *
+ * A simple compatibility interface to map mbufs onto sk_buff
+ */
+
+#ifndef _SYS_MBUF_H_
+#define        _SYS_MBUF_H_
+
+#include <sys/malloc.h>                /* we use free() */
+/* hopefully queue.h is already included by someone else */
+#include <sys/queue.h>
+#ifdef _KERNEL
+
+/* bzero not present on linux, but this should go in glue.h */
+#define bzero(s, n) memset(s, 0, n)
+
+/*
+ * We implement a very simplified UMA allocator where the backend
+ * is simply malloc, and uma_zone only stores the length of the components.
+ */
+typedef int uma_zone_t;                /* the zone size */
+
+#define uma_zcreate(name, len, _3, _4, _5, _6, _7, _8) (len)
+
+
+#define uma_zfree(zone, item)  free(item, M_IPFW)
+#define uma_zalloc(zone, flags) malloc(zone, M_IPFW, flags)
+#define uma_zdestroy(zone)     do {} while (0)
+
+/*-
+ * Macros for type conversion:
+ * mtod(m, t)  -- Convert mbuf pointer to data pointer of correct type.
+ */
+#define        mtod(m, t)      ((t)((m)->m_data))
+
+#endif /* _KERNEL */
+
+/*
+ * Packet tag structure (see below for details).
+ */
+struct m_tag {
+       SLIST_ENTRY(m_tag)      m_tag_link;     /* List of packet tags */
+       u_int16_t               m_tag_id;       /* Tag ID */
+       u_int16_t               m_tag_len;      /* Length of data */
+       u_int32_t               m_tag_cookie;   /* ABI/Module ID */
+       void                    (*m_tag_free)(struct m_tag *);
+};
+
+#if defined(__linux__) || defined( _WIN32 )
+
+/*
+ * Auxiliary structure to store values from the sk_buf.
+ * Note that we should not alter the sk_buff, and if we do
+ * so make sure to keep the values in sync between the mbuf
+ * and the sk_buff (especially m_len and m_pkthdr.len).
+ */
+
+struct mbuf {
+       struct mbuf *m_next;
+       struct mbuf *m_nextpkt;
+       void *m_data;
+       int m_len;      /* length in this mbuf */
+       int m_flags;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+       struct nf_info *queue_entry;
+#else
+       struct nf_queue_entry *queue_entry;
+#endif
+       struct sk_buff *m_skb;
+       struct {
+               struct net_device *rcvif;
+               int len;        /* total packet len */
+               SLIST_HEAD (packet_tags, m_tag) tags;
+       } m_pkthdr;
+};
+
+#define M_SKIP_FIREWALL        0x01            /* skip firewall processing */
+#define M_BCAST         0x02 /* send/received as link-level broadcast */
+#define M_MCAST         0x04 /* send/received as link-level multicast */
+
+#define M_DONTWAIT      M_NOWAIT       /* should not be here... */
+
+
+/*
+ * m_dup() is used in the TEE case, currently unsupported so we
+ * just return.
+ */
+static __inline struct mbuf    *m_dup(struct mbuf __unused *m, int __unused n)
+{
+       return NULL;
+};
+
+#define        MTAG_ABI_COMPAT         0               /* compatibility ABI */
+static __inline struct m_tag *
+m_tag_find(struct mbuf __unused *m, int __unused type, struct m_tag __unused *start)
+{
+       return NULL;
+};
+
+
+static __inline void
+m_tag_prepend(struct mbuf *m, struct m_tag *t)
+{
+       SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link);
+}
+
+/*
+ * Create an mtag of the given type
+ */
+static __inline struct m_tag *
+m_tag_get(int type, int length, int wait)
+{
+       int l = length + sizeof(struct m_tag);
+       struct m_tag *m = malloc(l, 0, M_NOWAIT);
+       if (m) {
+               memset(m, 0, l);
+               m->m_tag_id = type;
+               m->m_tag_len = length;
+       }
+       return m;
+};
+
+static __inline struct m_tag *
+m_tag_first(struct mbuf *m)
+{
+       return SLIST_FIRST(&m->m_pkthdr.tags);
+};
+
+static __inline void
+m_tag_delete(struct mbuf *m, struct m_tag *t)
+{
+};
+
+static __inline struct m_tag *
+m_tag_locate(struct mbuf *m, u_int32_t n, int x, struct m_tag *t)
+{
+       return NULL;
+};
+
+static __inline void
+m_freem(struct mbuf *m)
+{
+#if 0
+       struct m_tag *t;
+
+       while ( (t = SLIST_FIRST(&m->m_pkthdr.tags) ) ) {
+               SLIST_REMOVE_HEAD(&m->m_pkthdr.tags, m_tag_link);
+               free(t, 0);
+       }
+#endif
+       free(m, 0);
+};
+
+/* we cannot pullup */
+#define m_pullup(__m, __i)     (m)
+
+#define M_GETFIB(_m)   0
+
+#endif /* !__linux__ */
+
+/*
+ * Persistent tags stay with an mbuf until the mbuf is reclaimed.  Otherwise
+ * tags are expected to ``vanish'' when they pass through a network
+ * interface.  For most interfaces this happens normally as the tags are
+ * reclaimed when the mbuf is free'd.  However in some special cases
+ * reclaiming must be done manually.  An example is packets that pass through
+ * the loopback interface.  Also, one must be careful to do this when
+ * ``turning around'' packets (e.g., icmp_reflect).
+ *
+ * To mark a tag persistent bit-or this flag in when defining the tag id.
+ * The tag will then be treated as described above.
+ */
+#define        MTAG_PERSISTENT                         0x800
+
+#define        PACKET_TAG_NONE                         0  /* Nadda */
+
+/* Packet tags for use with PACKET_ABI_COMPAT. */
+#define        PACKET_TAG_IPSEC_IN_DONE                1  /* IPsec applied, in */
+#define        PACKET_TAG_IPSEC_OUT_DONE               2  /* IPsec applied, out */
+#define        PACKET_TAG_IPSEC_IN_CRYPTO_DONE         3  /* NIC IPsec crypto done */
+#define        PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED      4  /* NIC IPsec crypto req'ed */
+#define        PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO     5  /* NIC notifies IPsec */
+#define        PACKET_TAG_IPSEC_PENDING_TDB            6  /* Reminder to do IPsec */
+#define        PACKET_TAG_BRIDGE                       7  /* Bridge processing done */
+#define        PACKET_TAG_GIF                          8  /* GIF processing done */
+#define        PACKET_TAG_GRE                          9  /* GRE processing done */
+#define        PACKET_TAG_IN_PACKET_CHECKSUM           10 /* NIC checksumming done */
+#define        PACKET_TAG_ENCAP                        11 /* Encap.  processing */
+#define        PACKET_TAG_IPSEC_SOCKET                 12 /* IPSEC socket ref */
+#define        PACKET_TAG_IPSEC_HISTORY                13 /* IPSEC history */
+#define        PACKET_TAG_IPV6_INPUT                   14 /* IPV6 input processing */
+#define        PACKET_TAG_DUMMYNET                     15 /* dummynet info */
+#define        PACKET_TAG_DIVERT                       17 /* divert info */
+#define        PACKET_TAG_IPFORWARD                    18 /* ipforward info */
+#define        PACKET_TAG_MACLABEL     (19 | MTAG_PERSISTENT) /* MAC label */
+#define        PACKET_TAG_PF                           21 /* PF + ALTQ information */
+#define        PACKET_TAG_RTSOCKFAM                    25 /* rtsock sa family */
+#define        PACKET_TAG_IPOPTIONS                    27 /* Saved IP options */
+#define        PACKET_TAG_CARP                         28 /* CARP info */
+
+#endif /* !_SYS_MBUF_H_ */
diff --git a/dummynet/include/sys/module.h b/dummynet/include/sys/module.h
new file mode 100644 (file)
index 0000000..5296517
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * trivial module support
+ */
+#ifndef _SYS_MODULE_H_
+#define _SYS_MODULE_H_
+typedef struct module *module_t;
+typedef int (*modeventhand_t)(module_t, int /* modeventtype_t */, void *);
+typedef enum modeventtype {
+        MOD_LOAD,
+        MOD_UNLOAD,
+        MOD_SHUTDOWN,
+        MOD_QUIESCE
+} modeventtype_t;
+typedef struct moduledata {
+        const char      *name;          /* module name */
+        modeventhand_t  evhand;         /* event handler */
+        void            *priv;          /* extra data */
+} moduledata_t;
+
+int my_mod_register(struct moduledata *mod, const char *name, int order);
+/*
+ * Hook the module descriptor, md, into our list of things to do.
+ * We should in principle respect the order of loading.
+ *
+ * XXX use the gcc .init functions
+ */
+#define DECLARE_MODULE(a, md, c,d)                             \
+    moduledata_t *moddesc_##a = &md;
+
+/*
+ * XXX MODULE_VERSION is define in linux too
+ */
+#define MODULE_DEPEND(a,b,c,d,e)
+#if defined( __linux__ ) || defined( _WIN32 )
+#undef MODULE_VERSION
+#define MODULE_VERSION(a,b)
+#endif
+
+#endif /* _SYS_MODULE_H_ */
+
diff --git a/dummynet/include/sys/param.h b/dummynet/include/sys/param.h
new file mode 100644 (file)
index 0000000..f068998
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef _SYS_PARAM_H_
+#define _SYS_PARAM_H_
+
+/*
+ * number of additional groups
+ */
+#ifndef LINUX_24
+#define NGROUPS                16
+#endif
+
+#endif /* _SYS_PARAM_H_ */
diff --git a/dummynet/include/sys/queue.h b/dummynet/include/sys/queue.h
new file mode 100644 (file)
index 0000000..8f06f17
--- /dev/null
@@ -0,0 +1,620 @@
+/*-
+ * Copyright (c) 1991, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     @(#)queue.h     8.5 (Berkeley) 8/20/94
+ * $FreeBSD: src/sys/sys/queue.h,v 1.68 2006/10/24 11:20:29 ru Exp $
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define        _SYS_QUEUE_H_
+
+//#include <sys/cdefs.h>
+
+/*
+ * This file defines four types of data structures: singly-linked lists,
+ * singly-linked tail queues, lists and tail queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A singly-linked tail queue is headed by a pair of pointers, one to the
+ * head of the list and the other to the tail of the list. The elements are
+ * singly linked for minimum space and pointer manipulation overhead at the
+ * expense of O(n) removal for arbitrary elements. New elements can be added
+ * to the list after an existing element, at the head of the list, or at the
+ * end of the list. Elements being removed from the head of the tail queue
+ * should use the explicit macro for this purpose for optimum efficiency.
+ * A singly-linked tail queue may only be traversed in the forward direction.
+ * Singly-linked tail queues are ideal for applications with large datasets
+ * and few or no removals or for implementing a FIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ *
+ *
+ *                             SLIST   LIST    STAILQ  TAILQ
+ * _HEAD                       +       +       +       +
+ * _HEAD_INITIALIZER           +       +       +       +
+ * _ENTRY                      +       +       +       +
+ * _INIT                       +       +       +       +
+ * _EMPTY                      +       +       +       +
+ * _FIRST                      +       +       +       +
+ * _NEXT                       +       +       +       +
+ * _PREV                       -       -       -       +
+ * _LAST                       -       -       +       +
+ * _FOREACH                    +       +       +       +
+ * _FOREACH_SAFE               +       +       +       +
+ * _FOREACH_REVERSE            -       -       -       +
+ * _FOREACH_REVERSE_SAFE       -       -       -       +
+ * _INSERT_HEAD                        +       +       +       +
+ * _INSERT_BEFORE              -       +       -       +
+ * _INSERT_AFTER               +       +       +       +
+ * _INSERT_TAIL                        -       -       +       +
+ * _CONCAT                     -       -       +       +
+ * _REMOVE_HEAD                        +       -       +       -
+ * _REMOVE                     +       +       +       +
+ *
+ */
+#ifdef QUEUE_MACRO_DEBUG
+/* Store the last 2 places the queue element or head was altered */
+struct qm_trace {
+       char * lastfile;
+       int lastline;
+       char * prevfile;
+       int prevline;
+};
+
+#define        TRACEBUF        struct qm_trace trace;
+#define        TRASHIT(x)      do {(x) = (void *)-1;} while (0)
+
+#define        QMD_TRACE_HEAD(head) do {                                       \
+       (head)->trace.prevline = (head)->trace.lastline;                \
+       (head)->trace.prevfile = (head)->trace.lastfile;                \
+       (head)->trace.lastline = __LINE__;                              \
+       (head)->trace.lastfile = __FILE__;                              \
+} while (0)
+
+#define        QMD_TRACE_ELEM(elem) do {                                       \
+       (elem)->trace.prevline = (elem)->trace.lastline;                \
+       (elem)->trace.prevfile = (elem)->trace.lastfile;                \
+       (elem)->trace.lastline = __LINE__;                              \
+       (elem)->trace.lastfile = __FILE__;                              \
+} while (0)
+
+#else
+#define        QMD_TRACE_ELEM(elem)
+#define        QMD_TRACE_HEAD(head)
+#define        TRACEBUF
+#define        TRASHIT(x)
+#endif /* QUEUE_MACRO_DEBUG */
+
+/*
+ * Singly-linked List declarations.
+ */
+#define        SLIST_HEAD(name, type)                                          \
+struct name {                                                          \
+       struct type *slh_first; /* first element */                     \
+}
+
+#define        SLIST_HEAD_INITIALIZER(head)                                    \
+       { NULL }
+
+#define        SLIST_ENTRY(type)                                               \
+struct {                                                               \
+       struct type *sle_next;  /* next element */                      \
+}
+
+/*
+ * Singly-linked List functions.
+ */
+#define        SLIST_EMPTY(head)       ((head)->slh_first == NULL)
+
+#define        SLIST_FIRST(head)       ((head)->slh_first)
+
+#define        SLIST_FOREACH(var, head, field)                                 \
+       for ((var) = SLIST_FIRST((head));                               \
+           (var);                                                      \
+           (var) = SLIST_NEXT((var), field))
+
+#define        SLIST_FOREACH_SAFE(var, head, field, tvar)                      \
+       for ((var) = SLIST_FIRST((head));                               \
+           (var) && ((tvar) = SLIST_NEXT((var), field), 1);            \
+           (var) = (tvar))
+
+#define        SLIST_FOREACH_PREVPTR(var, varp, head, field)                   \
+       for ((varp) = &SLIST_FIRST((head));                             \
+           ((var) = *(varp)) != NULL;                                  \
+           (varp) = &SLIST_NEXT((var), field))
+
+#define        SLIST_INIT(head) do {                                           \
+       SLIST_FIRST((head)) = NULL;                                     \
+} while (0)
+
+#define        SLIST_INSERT_AFTER(slistelm, elm, field) do {                   \
+       SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field);       \
+       SLIST_NEXT((slistelm), field) = (elm);                          \
+} while (0)
+
+#define        SLIST_INSERT_HEAD(head, elm, field) do {                        \
+       SLIST_NEXT((elm), field) = SLIST_FIRST((head));                 \
+       SLIST_FIRST((head)) = (elm);                                    \
+} while (0)
+
+#define        SLIST_NEXT(elm, field)  ((elm)->field.sle_next)
+
+#define        SLIST_REMOVE(head, elm, type, field) do {                       \
+       if (SLIST_FIRST((head)) == (elm)) {                             \
+               SLIST_REMOVE_HEAD((head), field);                       \
+       }                                                               \
+       else {                                                          \
+               struct type *curelm = SLIST_FIRST((head));              \
+               while (SLIST_NEXT(curelm, field) != (elm))              \
+                       curelm = SLIST_NEXT(curelm, field);             \
+               SLIST_NEXT(curelm, field) =                             \
+                   SLIST_NEXT(SLIST_NEXT(curelm, field), field);       \
+       }                                                               \
+       TRASHIT((elm)->field.sle_next);                                 \
+} while (0)
+
+#define        SLIST_REMOVE_HEAD(head, field) do {                             \
+       SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field);   \
+} while (0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define        STAILQ_HEAD(name, type)                                         \
+struct name {                                                          \
+       struct type *stqh_first;/* first element */                     \
+       struct type **stqh_last;/* addr of last next element */         \
+}
+
+#define        STAILQ_HEAD_INITIALIZER(head)                                   \
+       { NULL, &(head).stqh_first }
+
+#define        STAILQ_ENTRY(type)                                              \
+struct {                                                               \
+       struct type *stqe_next; /* next element */                      \
+}
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define        STAILQ_CONCAT(head1, head2) do {                                \
+       if (!STAILQ_EMPTY((head2))) {                                   \
+               *(head1)->stqh_last = (head2)->stqh_first;              \
+               (head1)->stqh_last = (head2)->stqh_last;                \
+               STAILQ_INIT((head2));                                   \
+       }                                                               \
+} while (0)
+
+#define        STAILQ_EMPTY(head)      ((head)->stqh_first == NULL)
+
+#define        STAILQ_FIRST(head)      ((head)->stqh_first)
+
+#define        STAILQ_FOREACH(var, head, field)                                \
+       for((var) = STAILQ_FIRST((head));                               \
+          (var);                                                       \
+          (var) = STAILQ_NEXT((var), field))
+
+
+#define        STAILQ_FOREACH_SAFE(var, head, field, tvar)                     \
+       for ((var) = STAILQ_FIRST((head));                              \
+           (var) && ((tvar) = STAILQ_NEXT((var), field), 1);           \
+           (var) = (tvar))
+
+#define        STAILQ_INIT(head) do {                                          \
+       STAILQ_FIRST((head)) = NULL;                                    \
+       (head)->stqh_last = &STAILQ_FIRST((head));                      \
+} while (0)
+
+#define        STAILQ_INSERT_AFTER(head, tqelm, elm, field) do {               \
+       if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
+               (head)->stqh_last = &STAILQ_NEXT((elm), field);         \
+       STAILQ_NEXT((tqelm), field) = (elm);                            \
+} while (0)
+
+#define        STAILQ_INSERT_HEAD(head, elm, field) do {                       \
+       if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
+               (head)->stqh_last = &STAILQ_NEXT((elm), field);         \
+       STAILQ_FIRST((head)) = (elm);                                   \
+} while (0)
+
+#define        STAILQ_INSERT_TAIL(head, elm, field) do {                       \
+       STAILQ_NEXT((elm), field) = NULL;                               \
+       *(head)->stqh_last = (elm);                                     \
+       (head)->stqh_last = &STAILQ_NEXT((elm), field);                 \
+} while (0)
+
+#define        STAILQ_LAST(head, type, field)                                  \
+       (STAILQ_EMPTY((head)) ?                                         \
+               NULL :                                                  \
+               ((struct type *)(void *)                                \
+               ((char *)((head)->stqh_last) - __offsetof(struct type, field))))
+
+#define        STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+
+#define        STAILQ_REMOVE(head, elm, type, field) do {                      \
+       if (STAILQ_FIRST((head)) == (elm)) {                            \
+               STAILQ_REMOVE_HEAD((head), field);                      \
+       }                                                               \
+       else {                                                          \
+               struct type *curelm = STAILQ_FIRST((head));             \
+               while (STAILQ_NEXT(curelm, field) != (elm))             \
+                       curelm = STAILQ_NEXT(curelm, field);            \
+               if ((STAILQ_NEXT(curelm, field) =                       \
+                    STAILQ_NEXT(STAILQ_NEXT(curelm, field), field)) == NULL)\
+                       (head)->stqh_last = &STAILQ_NEXT((curelm), field);\
+       }                                                               \
+       TRASHIT((elm)->field.stqe_next);                                \
+} while (0)
+
+#define        STAILQ_REMOVE_HEAD(head, field) do {                            \
+       if ((STAILQ_FIRST((head)) =                                     \
+            STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL)         \
+               (head)->stqh_last = &STAILQ_FIRST((head));              \
+} while (0)
+
+#ifndef LIST_HEAD
+/*
+ * List declarations.
+ */
+#define        LIST_HEAD(name, type)                                           \
+struct name {                                                          \
+       struct type *lh_first;  /* first element */                     \
+}
+
+#define        LIST_HEAD_INITIALIZER(head)                                     \
+       { NULL }
+
+#define        LIST_ENTRY(type)                                                \
+struct {                                                               \
+       struct type *le_next;   /* next element */                      \
+       struct type **le_prev;  /* address of previous next element */  \
+}
+
+/*
+ * List functions.
+ */
+
+#if (defined(_KERNEL) && defined(INVARIANTS))
+#define        QMD_LIST_CHECK_HEAD(head, field) do {                           \
+       if (LIST_FIRST((head)) != NULL &&                               \
+           LIST_FIRST((head))->field.le_prev !=                        \
+            &LIST_FIRST((head)))                                       \
+               panic("Bad list head %p first->prev != head", (head));  \
+} while (0)
+
+#define        QMD_LIST_CHECK_NEXT(elm, field) do {                            \
+       if (LIST_NEXT((elm), field) != NULL &&                          \
+           LIST_NEXT((elm), field)->field.le_prev !=                   \
+            &((elm)->field.le_next))                                   \
+               panic("Bad link elm %p next->prev != elm", (elm));      \
+} while (0)
+
+#define        QMD_LIST_CHECK_PREV(elm, field) do {                            \
+       if (*(elm)->field.le_prev != (elm))                             \
+               panic("Bad link elm %p prev->next != elm", (elm));      \
+} while (0)
+#else
+#define        QMD_LIST_CHECK_HEAD(head, field)
+#define        QMD_LIST_CHECK_NEXT(elm, field)
+#define        QMD_LIST_CHECK_PREV(elm, field)
+#endif /* (_KERNEL && INVARIANTS) */
+
+#define        LIST_EMPTY(head)        ((head)->lh_first == NULL)
+
+#define        LIST_FIRST(head)        ((head)->lh_first)
+
+#define        LIST_FOREACH(var, head, field)                                  \
+       for ((var) = LIST_FIRST((head));                                \
+           (var);                                                      \
+           (var) = LIST_NEXT((var), field))
+
+#define        LIST_FOREACH_SAFE(var, head, field, tvar)                       \
+       for ((var) = LIST_FIRST((head));                                \
+           (var) && ((tvar) = LIST_NEXT((var), field), 1);             \
+           (var) = (tvar))
+
+#define        LIST_INIT(head) do {                                            \
+       LIST_FIRST((head)) = NULL;                                      \
+} while (0)
+
+#define        LIST_INSERT_AFTER(listelm, elm, field) do {                     \
+       QMD_LIST_CHECK_NEXT(listelm, field);                            \
+       if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
+               LIST_NEXT((listelm), field)->field.le_prev =            \
+                   &LIST_NEXT((elm), field);                           \
+       LIST_NEXT((listelm), field) = (elm);                            \
+       (elm)->field.le_prev = &LIST_NEXT((listelm), field);            \
+} while (0)
+
+#define        LIST_INSERT_BEFORE(listelm, elm, field) do {                    \
+       QMD_LIST_CHECK_PREV(listelm, field);                            \
+       (elm)->field.le_prev = (listelm)->field.le_prev;                \
+       LIST_NEXT((elm), field) = (listelm);                            \
+       *(listelm)->field.le_prev = (elm);                              \
+       (listelm)->field.le_prev = &LIST_NEXT((elm), field);            \
+} while (0)
+
+#define        LIST_INSERT_HEAD(head, elm, field) do {                         \
+       QMD_LIST_CHECK_HEAD((head), field);                             \
+       if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL)     \
+               LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
+       LIST_FIRST((head)) = (elm);                                     \
+       (elm)->field.le_prev = &LIST_FIRST((head));                     \
+} while (0)
+
+#define        LIST_NEXT(elm, field)   ((elm)->field.le_next)
+
+#define        LIST_REMOVE(elm, field) do {                                    \
+       QMD_LIST_CHECK_NEXT(elm, field);                                \
+       QMD_LIST_CHECK_PREV(elm, field);                                \
+       if (LIST_NEXT((elm), field) != NULL)                            \
+               LIST_NEXT((elm), field)->field.le_prev =                \
+                   (elm)->field.le_prev;                               \
+       *(elm)->field.le_prev = LIST_NEXT((elm), field);                \
+       TRASHIT((elm)->field.le_next);                                  \
+       TRASHIT((elm)->field.le_prev);                                  \
+} while (0)
+#endif /* LIST_HEAD */
+
+/*
+ * Tail queue declarations.
+ */
+#define        TAILQ_HEAD(name, type)                                          \
+struct name {                                                          \
+       struct type *tqh_first; /* first element */                     \
+       struct type **tqh_last; /* addr of last next element */         \
+       TRACEBUF                                                        \
+}
+
+#define        TAILQ_HEAD_INITIALIZER(head)                                    \
+       { NULL, &(head).tqh_first }
+
+#define        TAILQ_ENTRY(type)                                               \
+struct {                                                               \
+       struct type *tqe_next;  /* next element */                      \
+       struct type **tqe_prev; /* address of previous next element */  \
+       TRACEBUF                                                        \
+}
+
+/*
+ * Tail queue functions.
+ */
+#if (defined(_KERNEL) && defined(INVARIANTS))
+#define        QMD_TAILQ_CHECK_HEAD(head, field) do {                          \
+       if (!TAILQ_EMPTY(head) &&                                       \
+           TAILQ_FIRST((head))->field.tqe_prev !=                      \
+            &TAILQ_FIRST((head)))                                      \
+               panic("Bad tailq head %p first->prev != head", (head)); \
+} while (0)
+
+#define        QMD_TAILQ_CHECK_TAIL(head, field) do {                          \
+       if (*(head)->tqh_last != NULL)                                  \
+               panic("Bad tailq NEXT(%p->tqh_last) != NULL", (head));  \
+} while (0)
+
+#define        QMD_TAILQ_CHECK_NEXT(elm, field) do {                           \
+       if (TAILQ_NEXT((elm), field) != NULL &&                         \
+           TAILQ_NEXT((elm), field)->field.tqe_prev !=                 \
+            &((elm)->field.tqe_next))                                  \
+               panic("Bad link elm %p next->prev != elm", (elm));      \
+} while (0)
+
+#define        QMD_TAILQ_CHECK_PREV(elm, field) do {                           \
+       if (*(elm)->field.tqe_prev != (elm))                            \
+               panic("Bad link elm %p prev->next != elm", (elm));      \
+} while (0)
+#else
+#define        QMD_TAILQ_CHECK_HEAD(head, field)
+#define        QMD_TAILQ_CHECK_TAIL(head, headname)
+#define        QMD_TAILQ_CHECK_NEXT(elm, field)
+#define        QMD_TAILQ_CHECK_PREV(elm, field)
+#endif /* (_KERNEL && INVARIANTS) */
+
+#define        TAILQ_CONCAT(head1, head2, field) do {                          \
+       if (!TAILQ_EMPTY(head2)) {                                      \
+               *(head1)->tqh_last = (head2)->tqh_first;                \
+               (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
+               (head1)->tqh_last = (head2)->tqh_last;                  \
+               TAILQ_INIT((head2));                                    \
+               QMD_TRACE_HEAD(head1);                                  \
+               QMD_TRACE_HEAD(head2);                                  \
+       }                                                               \
+} while (0)
+
+#define        TAILQ_EMPTY(head)       ((head)->tqh_first == NULL)
+
+#define        TAILQ_FIRST(head)       ((head)->tqh_first)
+
+#define        TAILQ_FOREACH(var, head, field)                                 \
+       for ((var) = TAILQ_FIRST((head));                               \
+           (var);                                                      \
+           (var) = TAILQ_NEXT((var), field))
+
+#define        TAILQ_FOREACH_SAFE(var, head, field, tvar)                      \
+       for ((var) = TAILQ_FIRST((head));                               \
+           (var) && ((tvar) = TAILQ_NEXT((var), field), 1);            \
+           (var) = (tvar))
+
+#define        TAILQ_FOREACH_REVERSE(var, head, headname, field)               \
+       for ((var) = TAILQ_LAST((head), headname);                      \
+           (var);                                                      \
+           (var) = TAILQ_PREV((var), headname, field))
+
+#define        TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar)    \
+       for ((var) = TAILQ_LAST((head), headname);                      \
+           (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1);  \
+           (var) = (tvar))
+
+#define        TAILQ_INIT(head) do {                                           \
+       TAILQ_FIRST((head)) = NULL;                                     \
+       (head)->tqh_last = &TAILQ_FIRST((head));                        \
+       QMD_TRACE_HEAD(head);                                           \
+} while (0)
+
+#define        TAILQ_INSERT_AFTER(head, listelm, elm, field) do {              \
+       QMD_TAILQ_CHECK_NEXT(listelm, field);                           \
+       if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
+               TAILQ_NEXT((elm), field)->field.tqe_prev =              \
+                   &TAILQ_NEXT((elm), field);                          \
+       else {                                                          \
+               (head)->tqh_last = &TAILQ_NEXT((elm), field);           \
+               QMD_TRACE_HEAD(head);                                   \
+       }                                                               \
+       TAILQ_NEXT((listelm), field) = (elm);                           \
+       (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field);          \
+       QMD_TRACE_ELEM(&(elm)->field);                                  \
+       QMD_TRACE_ELEM(&listelm->field);                                \
+} while (0)
+
+#define        TAILQ_INSERT_BEFORE(listelm, elm, field) do {                   \
+       QMD_TAILQ_CHECK_PREV(listelm, field);                           \
+       (elm)->field.tqe_prev = (listelm)->field.tqe_prev;              \
+       TAILQ_NEXT((elm), field) = (listelm);                           \
+       *(listelm)->field.tqe_prev = (elm);                             \
+       (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field);          \
+       QMD_TRACE_ELEM(&(elm)->field);                                  \
+       QMD_TRACE_ELEM(&listelm->field);                                \
+} while (0)
+
+#define        TAILQ_INSERT_HEAD(head, elm, field) do {                        \
+       QMD_TAILQ_CHECK_HEAD(head, field);                              \
+       if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL)   \
+               TAILQ_FIRST((head))->field.tqe_prev =                   \
+                   &TAILQ_NEXT((elm), field);                          \
+       else                                                            \
+               (head)->tqh_last = &TAILQ_NEXT((elm), field);           \
+       TAILQ_FIRST((head)) = (elm);                                    \
+       (elm)->field.tqe_prev = &TAILQ_FIRST((head));                   \
+       QMD_TRACE_HEAD(head);                                           \
+       QMD_TRACE_ELEM(&(elm)->field);                                  \
+} while (0)
+
+#define        TAILQ_INSERT_TAIL(head, elm, field) do {                        \
+       QMD_TAILQ_CHECK_TAIL(head, field);                              \
+       TAILQ_NEXT((elm), field) = NULL;                                \
+       (elm)->field.tqe_prev = (head)->tqh_last;                       \
+       *(head)->tqh_last = (elm);                                      \
+       (head)->tqh_last = &TAILQ_NEXT((elm), field);                   \
+       QMD_TRACE_HEAD(head);                                           \
+       QMD_TRACE_ELEM(&(elm)->field);                                  \
+} while (0)
+
+#define        TAILQ_LAST(head, headname)                                      \
+       (*(((struct headname *)((head)->tqh_last))->tqh_last))
+
+#define        TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+
+#define        TAILQ_PREV(elm, headname, field)                                \
+       (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+
+#define        TAILQ_REMOVE(head, elm, field) do {                             \
+       QMD_TAILQ_CHECK_NEXT(elm, field);                               \
+       QMD_TAILQ_CHECK_PREV(elm, field);                               \
+       if ((TAILQ_NEXT((elm), field)) != NULL)                         \
+               TAILQ_NEXT((elm), field)->field.tqe_prev =              \
+                   (elm)->field.tqe_prev;                              \
+       else {                                                          \
+               (head)->tqh_last = (elm)->field.tqe_prev;               \
+               QMD_TRACE_HEAD(head);                                   \
+       }                                                               \
+       *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field);              \
+       TRASHIT((elm)->field.tqe_next);                                 \
+       TRASHIT((elm)->field.tqe_prev);                                 \
+       QMD_TRACE_ELEM(&(elm)->field);                                  \
+} while (0)
+
+
+#ifdef _KERNEL
+
+/*
+ * XXX insque() and remque() are an old way of handling certain queues.
+ * They bogusly assumes that all queue heads look alike.
+ */
+
+struct quehead {
+       struct quehead *qh_link;
+       struct quehead *qh_rlink;
+};
+
+#ifdef __CC_SUPPORTS___INLINE
+
+static __inline void
+insque(void *a, void *b)
+{
+       struct quehead *element = (struct quehead *)a,
+                *head = (struct quehead *)b;
+
+       element->qh_link = head->qh_link;
+       element->qh_rlink = head;
+       head->qh_link = element;
+       element->qh_link->qh_rlink = element;
+}
+
+static __inline void
+remque(void *a)
+{
+       struct quehead *element = (struct quehead *)a;
+
+       element->qh_link->qh_rlink = element->qh_rlink;
+       element->qh_rlink->qh_link = element->qh_link;
+       element->qh_rlink = 0;
+}
+
+#else /* !__CC_SUPPORTS___INLINE */
+
+void   insque(void *a, void *b);
+void   remque(void *a);
+
+#endif /* __CC_SUPPORTS___INLINE */
+
+#endif /* _KERNEL */
+
+#endif /* !_SYS_QUEUE_H_ */
diff --git a/dummynet/include/sys/syslog.h b/dummynet/include/sys/syslog.h
new file mode 100644 (file)
index 0000000..143df1f
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _SYS_SYSLOG_H_
+#define _SYS_SYSLOG_H_
+/* XXX find linux equivalent */
+#define LOG_SECURITY 0
+#define LOG_NOTICE 0
+#define LOG_DEBUG 0
+#endif /* _SYS_SYSLOG_H_ */
diff --git a/dummynet/include/sys/systm.h b/dummynet/include/sys/systm.h
new file mode 100644 (file)
index 0000000..238a7d3
--- /dev/null
@@ -0,0 +1,73 @@
+#ifndef _SYS_SYSTM_H_
+#define _SYS_SYSTM_H_
+
+#ifndef _WIN32 /* this is the linux version */
+/* callout support, in <sys/callout.h> on FreeBSD */
+/*
+ * callout support on linux module is done using timers
+ */
+#include <linux/timer.h>
+#ifdef LINUX_24
+#include <linux/sched.h>        /* jiffies definition is here in 2.4 */
+#endif
+#define callout timer_list
+static __inline int
+callout_reset(struct callout *co, int ticks, void (*fn)(void *), void *arg)
+{
+        co->expires = jiffies + ticks;
+        co->function = (void (*)(unsigned long))fn;
+        co->data = (unsigned long)arg;
+        add_timer(co);
+        return 0;
+}
+
+#define callout_init(co, safe)  init_timer(co)
+#define callout_drain(co)       del_timer(co)
+#define callout_stop(co)        del_timer(co)
+
+#define CALLOUT_ACTIVE          0x0002 /* callout is currently active */
+#define CALLOUT_MPSAFE          0x0008 /* callout handler is mp safe */
+
+#else /* _WIN32 */
+
+/* This is the windows part for callout support */
+struct callout {
+       int dummy;
+};
+static __inline int
+callout_reset(struct callout *co, int ticks, void (*fn)(void *), void *arg)
+{
+       return 0;
+}
+
+#define callout_init(co, safe)
+#define callout_drain(co)
+#define callout_stop(co)
+#endif /* !_WIN32 */
+
+
+#if 0
+/* add out timer to the kernel global timer list */
+NTSTATUS 
+  IoInitializeTimer(
+    IN PDEVICE_OBJECT  DeviceObject,
+    IN PIO_TIMER_ROUTINE  TimerRoutine,
+    IN PVOID  Context
+    );
+
+/* see differences :
+IoInitializeDpcRequest
+       http://dsrg.mff.cuni.cz/~ceres/sch/osy/text/ch04s01s01.php
+       example http://www.beyondlogic.org/interrupts/winnt_isr_dpc.htm
+KeInitializeDpc  IRQL: Any level
+IoInitializeTimer IRQL: Passive level
+KeInitializeTimer */
+VOID 
+  KeInitializeDpc(
+    IN PRKDPC  Dpc,
+    IN PKDEFERRED_ROUTINE  DeferredRoutine,
+    IN PVOID  DeferredContext
+    );
+#endif /* commented out */ 
+
+#endif /* _SYS_SYSTM_H_ */
diff --git a/dummynet/include/sys/taskqueue.h b/dummynet/include/sys/taskqueue.h
new file mode 100644 (file)
index 0000000..f11d286
--- /dev/null
@@ -0,0 +1,24 @@
+#ifndef _SYS_TASKQUEUE_H_
+#define _SYS_TASKQUEUE_H_
+
+/*
+ * Remap taskqueue to direct calls
+ */
+struct task {
+       void (*func)(void);
+};
+#define taskqueue_enqueue(tq, ta)      (ta)->func()
+#define TASK_INIT(a,b,c,d) do {                                \
+       (a)->func = (void (*)(void))c; } while (0)
+
+#define taskqueue_create_fast(_a, _b, _c, _d)  NULL
+#define taskqueue_start_threads(_a, _b, _c, _d)
+
+#define        taskqueue_drain(_a, _b) /* XXX to be completed */
+#define        taskqueue_free(_a)      /* XXX to be completed */
+
+#define PRI_MIN                 (0)             /* Highest priority. */
+#define PRI_MIN_ITHD            (PRI_MIN)
+#define PI_NET                  (PRI_MIN_ITHD + 16)
+
+#endif /* !_SYS_TASKQUEUE_H_ */
diff --git a/dummynet/ip_dummynet.c b/dummynet/ip_dummynet.c
new file mode 100644 (file)
index 0000000..bdf0a8e
--- /dev/null
@@ -0,0 +1,2406 @@
+/*-
+ * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
+ * Portions Copyright (c) 2000 Akamba Corp.
+ * All rights reserved
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.110.2.4 2008/10/31 12:58:12 oleg Exp $");
+
+#define        DUMMYNET_DEBUG
+
+#include "opt_inet6.h"
+
+/*
+ * This module implements IP dummynet, a bandwidth limiter/delay emulator
+ * used in conjunction with the ipfw package.
+ * Description of the data structures used is in ip_dummynet.h
+ * Here you mainly find the following blocks of code:
+ *  + variable declarations;
+ *  + heap management functions;
+ *  + scheduler and dummynet functions;
+ *  + configuration and initialization.
+ *
+ * NOTA BENE: critical sections are protected by the "dummynet lock".
+ *
+ * Most important Changes:
+ *
+ * 011004: KLDable
+ * 010124: Fixed WF2Q behaviour
+ * 010122: Fixed spl protection.
+ * 000601: WF2Q support
+ * 000106: large rewrite, use heaps to handle very many pipes.
+ * 980513:     initial release
+ *
+ * include files marked with XXX are probably not needed
+ */
+
+#include <sys/limits.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/time.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <net/if.h>    /* IFNAMSIZ, struct ifaddr, ifq head */
+#include <net/netisr.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>                /* ip_len, ip_off */
+#include <netinet/ip_fw.h>
+#include <netinet/ip_dummynet.h>
+#include <netinet/ip_var.h>    /* ip_output(), IP_FORWARDING */
+
+#include <netinet/if_ether.h> /* various ether_* routines */
+
+#include <netinet/ip6.h>       /* for ip6_input, ip6_output prototypes */
+#include <netinet6/ip6_var.h>
+
+#include "missing.h"
+/*
+ * We keep a private variable for the simulation time, but we could
+ * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
+ */
+static dn_key curr_time = 0 ; /* current simulation time */
+
+static int dn_hash_size = 64 ; /* default hash size */
+
+/* statistics on number of queue searches and search steps */
+static long searches, search_steps ;
+static int pipe_expire = 1 ;   /* expire queue if empty */
+static int dn_max_ratio = 16 ; /* max queues/buckets ratio */
+
+static long pipe_slot_limit = 100; /* Foot shooting limit for pipe queues. */
+static long pipe_byte_limit = 1024 * 1024;
+
+static int red_lookup_depth = 256;     /* RED - default lookup table depth */
+static int red_avg_pkt_size = 512;      /* RED - default medium packet size */
+static int red_max_pkt_size = 1500;     /* RED - default max packet size */
+
+static struct timeval prev_t, t;
+static long tick_last;                 /* Last tick duration (usec). */
+static long tick_delta;                        /* Last vs standard tick diff (usec). */
+static long tick_delta_sum;            /* Accumulated tick difference (usec).*/
+static long tick_adjustment;           /* Tick adjustments done. */
+static long tick_lost;                 /* Lost(coalesced) ticks number. */
+/* Adjusted vs non-adjusted curr_time difference (ticks). */
+static long tick_diff;
+
+static int             io_fast;
+static unsigned long   io_pkt;
+static unsigned long   io_pkt_fast;
+static unsigned long   io_pkt_drop;
+
+/*
+ * Three heaps contain queues and pipes that the scheduler handles:
+ *
+ * ready_heap contains all dn_flow_queue related to fixed-rate pipes.
+ *
+ * wfq_ready_heap contains the pipes associated with WF2Q flows
+ *
+ * extract_heap contains pipes associated with delay lines.
+ *
+ */
+
+MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
+
+static struct dn_heap ready_heap, extract_heap, wfq_ready_heap ;
+
+static int     heap_init(struct dn_heap *h, int size);
+static int     heap_insert (struct dn_heap *h, dn_key key1, void *p);
+static void    heap_extract(struct dn_heap *h, void *obj);
+static void    transmit_event(struct dn_pipe *pipe, struct mbuf **head,
+                   struct mbuf **tail);
+static void    ready_event(struct dn_flow_queue *q, struct mbuf **head,
+                   struct mbuf **tail);
+static void    ready_event_wfq(struct dn_pipe *p, struct mbuf **head,
+                   struct mbuf **tail);
+
+#define        HASHSIZE        16
+#define        HASH(num)       ((((num) >> 8) ^ ((num) >> 4) ^ (num)) & 0x0f)
+static struct dn_pipe_head     pipehash[HASHSIZE];     /* all pipes */
+static struct dn_flow_set_head flowsethash[HASHSIZE];  /* all flowsets */
+
+static struct callout dn_timeout;
+
+extern void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
+
+#ifdef SYSCTL_NODE
+SYSCTL_DECL(_net_inet);
+SYSCTL_DECL(_net_inet_ip);
+
+SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size,
+    CTLFLAG_RW, &dn_hash_size, 0, "Default hash table size");
+#if 0 /* curr_time is 64 bit */
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, curr_time,
+    CTLFLAG_RD, &curr_time, 0, "Current tick");
+#endif
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap,
+    CTLFLAG_RD, &ready_heap.size, 0, "Size of ready heap");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap,
+    CTLFLAG_RD, &extract_heap.size, 0, "Size of extract heap");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, searches,
+    CTLFLAG_RD, &searches, 0, "Number of queue searches");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, search_steps,
+    CTLFLAG_RD, &search_steps, 0, "Number of queue search steps");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire,
+    CTLFLAG_RW, &pipe_expire, 0, "Expire queue if empty");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len,
+    CTLFLAG_RW, &dn_max_ratio, 0,
+    "Max ratio between dynamic queues and buckets");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
+    CTLFLAG_RD, &red_lookup_depth, 0, "Depth of RED lookup table");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
+    CTLFLAG_RD, &red_avg_pkt_size, 0, "RED Medium packet size");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
+    CTLFLAG_RD, &red_max_pkt_size, 0, "RED Max packet size");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
+    CTLFLAG_RD, &tick_delta, 0, "Last vs standard tick difference (usec).");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
+    CTLFLAG_RD, &tick_delta_sum, 0, "Accumulated tick difference (usec).");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
+    CTLFLAG_RD, &tick_adjustment, 0, "Tick adjustments done.");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
+    CTLFLAG_RD, &tick_diff, 0,
+    "Adjusted vs non-adjusted curr_time difference (ticks).");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
+    CTLFLAG_RD, &tick_lost, 0,
+    "Number of ticks coalesced by dummynet taskqueue.");
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
+    CTLFLAG_RW, &io_fast, 0, "Enable fast dummynet io.");
+SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
+    CTLFLAG_RD, &io_pkt, 0,
+    "Number of packets passed to dummynet.");
+SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
+    CTLFLAG_RD, &io_pkt_fast, 0,
+    "Number of packets bypassed dummynet scheduler.");
+SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
+    CTLFLAG_RD, &io_pkt_drop, 0,
+    "Number of packets dropped by dummynet.");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
+    CTLFLAG_RW, &pipe_slot_limit, 0, "Upper limit in slots for pipe queue.");
+SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
+    CTLFLAG_RW, &pipe_byte_limit, 0, "Upper limit in bytes for pipe queue.");
+#endif
+
+#ifdef DUMMYNET_DEBUG
+int    dummynet_debug = 0;
+#ifdef SYSCTL_NODE
+SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &dummynet_debug,
+           0, "control debugging printfs");
+#endif
+#define        DPRINTF(X)      if (dummynet_debug) printf X
+#else
+#define        DPRINTF(X)
+#endif
+
+static struct task     dn_task;
+static struct taskqueue        *dn_tq = NULL;
+static void dummynet_task(void *, int);
+
+#if defined( __linux__ ) || defined( _WIN32 )
+static DEFINE_SPINLOCK(dummynet_mtx);
+#else
+static struct mtx dummynet_mtx;
+#endif
+#define        DUMMYNET_LOCK_INIT() \
+       mtx_init(&dummynet_mtx, "dummynet", NULL, MTX_DEF)
+#define        DUMMYNET_LOCK_DESTROY() mtx_destroy(&dummynet_mtx)
+#define        DUMMYNET_LOCK()         mtx_lock(&dummynet_mtx)
+#define        DUMMYNET_UNLOCK()       mtx_unlock(&dummynet_mtx)
+#define        DUMMYNET_LOCK_ASSERT()  mtx_assert(&dummynet_mtx, MA_OWNED)
+
+static int     config_pipe(struct dn_pipe *p);
+static int     ip_dn_ctl(struct sockopt *sopt);
+
+static void    dummynet(void *);
+static void    dummynet_flush(void);
+static void    dummynet_send(struct mbuf *);
+void           dummynet_drain(void);
+static ip_dn_io_t dummynet_io;
+static void    dn_rule_delete(void *);
+
+/*
+ * Heap management functions.
+ *
+ * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
+ * Some macros help finding parent/children so we can optimize them.
+ *
+ * heap_init() is called to expand the heap when needed.
+ * Increment size in blocks of 16 entries.
+ * XXX failure to allocate a new element is a pretty bad failure
+ * as we basically stall a whole queue forever!!
+ * Returns 1 on error, 0 on success
+ */
+#define HEAP_FATHER(x) ( ( (x) - 1 ) / 2 )
+#define HEAP_LEFT(x) ( 2*(x) + 1 )
+#define HEAP_IS_LEFT(x) ( (x) & 1 )
+#define HEAP_RIGHT(x) ( 2*(x) + 2 )
+#define        HEAP_SWAP(a, b, buffer) { buffer = a ; a = b ; b = buffer ; }
+#define HEAP_INCREMENT 15
+
+static int
+heap_init(struct dn_heap *h, int new_size)
+{
+    struct dn_heap_entry *p;
+
+    if (h->size >= new_size ) {
+       printf("dummynet: %s, Bogus call, have %d want %d\n", __func__,
+               h->size, new_size);
+       return 0 ;
+    }
+    new_size = (new_size + HEAP_INCREMENT ) & ~HEAP_INCREMENT ;
+    p = malloc(new_size * sizeof(*p), M_DUMMYNET, M_NOWAIT);
+    if (p == NULL) {
+       printf("dummynet: %s, resize %d failed\n", __func__, new_size );
+       return 1 ; /* error */
+    }
+    if (h->size > 0) {
+       bcopy(h->p, p, h->size * sizeof(*p) );
+       free(h->p, M_DUMMYNET);
+    }
+    h->p = p ;
+    h->size = new_size ;
+    return 0 ;
+}
+
+/*
+ * Insert element in heap. Normally, p != NULL, we insert p in
+ * a new position and bubble up. If p == NULL, then the element is
+ * already in place, and key is the position where to start the
+ * bubble-up.
+ * Returns 1 on failure (cannot allocate new heap entry)
+ *
+ * If offset > 0 the position (index, int) of the element in the heap is
+ * also stored in the element itself at the given offset in bytes.
+ */
+#define SET_OFFSET(heap, node) \
+    if (heap->offset > 0) \
+           *((int *)((char *)(heap->p[node].object) + heap->offset)) = node ;
+/*
+ * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
+ */
+#define RESET_OFFSET(heap, node) \
+    if (heap->offset > 0) \
+           *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1 ;
+static int
+heap_insert(struct dn_heap *h, dn_key key1, void *p)
+{
+    int son = h->elements ;
+
+    if (p == NULL)     /* data already there, set starting point */
+       son = key1 ;
+    else {             /* insert new element at the end, possibly resize */
+       son = h->elements ;
+       if (son == h->size) /* need resize... */
+           if (heap_init(h, h->elements+1) )
+               return 1 ; /* failure... */
+       h->p[son].object = p ;
+       h->p[son].key = key1 ;
+       h->elements++ ;
+    }
+    while (son > 0) {                          /* bubble up */
+       int father = HEAP_FATHER(son) ;
+       struct dn_heap_entry tmp  ;
+
+       if (DN_KEY_LT( h->p[father].key, h->p[son].key ) )
+           break ; /* found right position */
+       /* son smaller than father, swap and repeat */
+       HEAP_SWAP(h->p[son], h->p[father], tmp) ;
+       SET_OFFSET(h, son);
+       son = father ;
+    }
+    SET_OFFSET(h, son);
+    return 0 ;
+}
+
+/*
+ * remove top element from heap, or obj if obj != NULL
+ */
+static void
+heap_extract(struct dn_heap *h, void *obj)
+{
+    int child, father, max = h->elements - 1 ;
+
+    if (max < 0) {
+       printf("dummynet: warning, extract from empty heap 0x%p\n", h);
+       return ;
+    }
+    father = 0 ; /* default: move up smallest child */
+    if (obj != NULL) { /* extract specific element, index is at offset */
+       if (h->offset <= 0)
+           panic("dummynet: heap_extract from middle not supported on this heap!!!\n");
+       father = *((int *)((char *)obj + h->offset)) ;
+       if (father < 0 || father >= h->elements) {
+           printf("dummynet: heap_extract, father %d out of bound 0..%d\n",
+               father, h->elements);
+           panic("dummynet: heap_extract");
+       }
+    }
+    RESET_OFFSET(h, father);
+    child = HEAP_LEFT(father) ;                /* left child */
+    while (child <= max) {             /* valid entry */
+       if (child != max && DN_KEY_LT(h->p[child+1].key, h->p[child].key) )
+           child = child+1 ;           /* take right child, otherwise left */
+       h->p[father] = h->p[child] ;
+       SET_OFFSET(h, father);
+       father = child ;
+       child = HEAP_LEFT(child) ;   /* left child for next loop */
+    }
+    h->elements-- ;
+    if (father != max) {
+       /*
+        * Fill hole with last entry and bubble up, reusing the insert code
+        */
+       h->p[father] = h->p[max] ;
+       heap_insert(h, father, NULL); /* this one cannot fail */
+    }
+}
+
+#if 0
+/*
+ * change object position and update references
+ * XXX this one is never used!
+ */
+static void
+heap_move(struct dn_heap *h, dn_key new_key, void *object)
+{
+    int temp;
+    int i ;
+    int max = h->elements-1 ;
+    struct dn_heap_entry buf ;
+
+    if (h->offset <= 0)
+       panic("cannot move items on this heap");
+
+    i = *((int *)((char *)object + h->offset));
+    if (DN_KEY_LT(new_key, h->p[i].key) ) { /* must move up */
+       h->p[i].key = new_key ;
+       for (; i>0 && DN_KEY_LT(new_key, h->p[(temp = HEAP_FATHER(i))].key) ;
+                i = temp ) { /* bubble up */
+           HEAP_SWAP(h->p[i], h->p[temp], buf) ;
+           SET_OFFSET(h, i);
+       }
+    } else {           /* must move down */
+       h->p[i].key = new_key ;
+       while ( (temp = HEAP_LEFT(i)) <= max ) { /* found left child */
+           if ((temp != max) && DN_KEY_GT(h->p[temp].key, h->p[temp+1].key))
+               temp++ ; /* select child with min key */
+           if (DN_KEY_GT(new_key, h->p[temp].key)) { /* go down */
+               HEAP_SWAP(h->p[i], h->p[temp], buf) ;
+               SET_OFFSET(h, i);
+           } else
+               break ;
+           i = temp ;
+       }
+    }
+    SET_OFFSET(h, i);
+}
+#endif /* heap_move, unused */
+
+/*
+ * heapify() will reorganize data inside an array to maintain the
+ * heap property. It is needed when we delete a bunch of entries.
+ */
+static void
+heapify(struct dn_heap *h)
+{
+    int i ;
+
+    for (i = 0 ; i < h->elements ; i++ )
+       heap_insert(h, i , NULL) ;
+}
+
+/*
+ * cleanup the heap and free data structure
+ */
+static void
+heap_free(struct dn_heap *h)
+{
+    if (h->size >0 )
+       free(h->p, M_DUMMYNET);
+    bzero(h, sizeof(*h) );
+}
+
+/*
+ * --- end of heap management functions ---
+ */
+
+/*
+ * Return the mbuf tag holding the dummynet state.  As an optimization
+ * this is assumed to be the first tag on the list.  If this turns out
+ * wrong we'll need to search the list.
+ */
+static struct dn_pkt_tag *
+dn_tag_get(struct mbuf *m)
+{
+    struct m_tag *mtag = m_tag_first(m);
+    KASSERT(mtag != NULL &&
+           mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
+           mtag->m_tag_id == PACKET_TAG_DUMMYNET,
+           ("packet on dummynet queue w/o dummynet tag!"));
+    return (struct dn_pkt_tag *)(mtag+1);
+}
+
+/*
+ * Scheduler functions:
+ *
+ * transmit_event() is called when the delay-line needs to enter
+ * the scheduler, either because of existing pkts getting ready,
+ * or new packets entering the queue. The event handled is the delivery
+ * time of the packet.
+ *
+ * ready_event() does something similar with fixed-rate queues, and the
+ * event handled is the finish time of the head pkt.
+ *
+ * wfq_ready_event() does something similar with WF2Q queues, and the
+ * event handled is the start time of the head pkt.
+ *
+ * In all cases, we make sure that the data structures are consistent
+ * before passing pkts out, because this might trigger recursive
+ * invocations of the procedures.
+ */
+static void
+transmit_event(struct dn_pipe *pipe, struct mbuf **head, struct mbuf **tail)
+{
+       struct mbuf *m;
+       struct dn_pkt_tag *pkt;
+
+       DUMMYNET_LOCK_ASSERT();
+
+       while ((m = pipe->head) != NULL) {
+               pkt = dn_tag_get(m);
+               if (!DN_KEY_LEQ(pkt->output_time, curr_time))
+                       break;
+
+               pipe->head = m->m_nextpkt;
+               if (*tail != NULL)
+                       (*tail)->m_nextpkt = m;
+               else
+                       *head = m;
+               *tail = m;
+       }
+       if (*tail != NULL)
+               (*tail)->m_nextpkt = NULL;
+
+       /* If there are leftover packets, put into the heap for next event. */
+       if ((m = pipe->head) != NULL) {
+               pkt = dn_tag_get(m);
+               /*
+                * XXX Should check errors on heap_insert, by draining the
+                * whole pipe p and hoping in the future we are more successful.
+                */
+               heap_insert(&extract_heap, pkt->output_time, pipe);
+       }
+}
+
+#ifndef __linux__
+#define div64(a, b)    ((int64_t)(a) / (int64_t)(b))
+#endif
+#define DN_TO_DROP     0xffff
+/*
+ * Compute how many ticks we have to wait before being able to send
+ * a packet. This is computed as the "wire time" for the packet
+ * (length + extra bits), minus the credit available, scaled to ticks.
+ * Check that the result is not be negative (it could be if we have
+ * too much leftover credit in q->numbytes).
+ */
+static inline dn_key
+set_ticks(struct mbuf *m, struct dn_flow_queue *q, struct dn_pipe *p)
+{
+       int64_t ret;
+
+       ret = div64( (m->m_pkthdr.len * 8 + q->extra_bits) * hz
+               - q->numbytes + p->bandwidth - 1 , p->bandwidth);
+#if 0
+       printf("%s %d extra_bits %d numb %d ret %d\n",
+               __FUNCTION__, __LINE__,
+               (int)(q->extra_bits & 0xffffffff),
+               (int)(q->numbytes & 0xffffffff),
+               (int)(ret & 0xffffffff));
+#endif
+       if (ret < 0)
+               ret = 0;
+       return ret;
+}
+
+/*
+ * Convert the additional MAC overheads/delays into an equivalent
+ * number of bits for the given data rate. The samples are in milliseconds
+ * so we need to divide by 1000.
+ */
+static dn_key
+compute_extra_bits(struct mbuf *pkt, struct dn_pipe *p)
+{
+       int index;
+       dn_key extra_bits;
+
+       if (!p->samples || p->samples_no == 0)
+               return 0;
+       index  = random() % p->samples_no;
+       extra_bits = div64((dn_key)p->samples[index] * p->bandwidth, 1000);
+       if (index >= p->loss_level) {
+               struct dn_pkt_tag *dt = dn_tag_get(pkt);
+               if (dt)
+                       dt->dn_dir = DN_TO_DROP;
+       }
+       return extra_bits;
+}
+
+static void
+free_pipe(struct dn_pipe *p)
+{
+       if (p->samples)
+               free(p->samples, M_DUMMYNET);
+       free(p, M_DUMMYNET);
+}
+
+/*
+ * extract pkt from queue, compute output time (could be now)
+ * and put into delay line (p_queue)
+ */
+static void
+move_pkt(struct mbuf *pkt, struct dn_flow_queue *q, struct dn_pipe *p,
+    int len)
+{
+    struct dn_pkt_tag *dt = dn_tag_get(pkt);
+
+    q->head = pkt->m_nextpkt ;
+    q->len-- ;
+    q->len_bytes -= len ;
+
+    dt->output_time = curr_time + p->delay ;
+
+    if (p->head == NULL)
+       p->head = pkt;
+    else
+       p->tail->m_nextpkt = pkt;
+    p->tail = pkt;
+    p->tail->m_nextpkt = NULL;
+}
+
+/*
+ * ready_event() is invoked every time the queue must enter the
+ * scheduler, either because the first packet arrives, or because
+ * a previously scheduled event fired.
+ * On invokation, drain as many pkts as possible (could be 0) and then
+ * if there are leftover packets reinsert the pkt in the scheduler.
+ */
+static void
+ready_event(struct dn_flow_queue *q, struct mbuf **head, struct mbuf **tail)
+{
+       struct mbuf *pkt;
+       struct dn_pipe *p = q->fs->pipe;
+       int p_was_empty;
+
+       DUMMYNET_LOCK_ASSERT();
+
+       if (p == NULL) {
+               printf("dummynet: ready_event- pipe is gone\n");
+               return;
+       }
+       p_was_empty = (p->head == NULL);
+
+       /*
+        * Schedule fixed-rate queues linked to this pipe:
+        * account for the bw accumulated since last scheduling, then
+        * drain as many pkts as allowed by q->numbytes and move to
+        * the delay line (in p) computing output time.
+        * bandwidth==0 (no limit) means we can drain the whole queue,
+        * setting len_scaled = 0 does the job.
+        */
+       q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
+       while ((pkt = q->head) != NULL) {
+               int len = pkt->m_pkthdr.len;
+               dn_key len_scaled = p->bandwidth ? len*8*hz
+                       + q->extra_bits*hz
+                       : 0;
+
+               if (DN_KEY_GT(len_scaled, q->numbytes))
+                       break;
+               q->numbytes -= len_scaled;
+               move_pkt(pkt, q, p, len);
+               if (q->head)
+                       q->extra_bits = compute_extra_bits(q->head, p);
+       }
+       /*
+        * If we have more packets queued, schedule next ready event
+        * (can only occur when bandwidth != 0, otherwise we would have
+        * flushed the whole queue in the previous loop).
+        * To this purpose we record the current time and compute how many
+        * ticks to go for the finish time of the packet.
+        */
+       if ((pkt = q->head) != NULL) {  /* this implies bandwidth != 0 */
+               dn_key t = set_ticks(pkt, q, p); /* ticks i have to wait */
+
+               q->sched_time = curr_time;
+               heap_insert(&ready_heap, curr_time + t, (void *)q);
+               /*
+                * XXX Should check errors on heap_insert, and drain the whole
+                * queue on error hoping next time we are luckier.
+                */
+       } else          /* RED needs to know when the queue becomes empty. */
+               q->q_time = curr_time;
+
+       /*
+        * If the delay line was empty call transmit_event() now.
+        * Otherwise, the scheduler will take care of it.
+        */
+       if (p_was_empty)
+               transmit_event(p, head, tail);
+}
+
+/*
+ * Called when we can transmit packets on WF2Q queues. Take pkts out of
+ * the queues at their start time, and enqueue into the delay line.
+ * Packets are drained until p->numbytes < 0. As long as
+ * len_scaled >= p->numbytes, the packet goes into the delay line
+ * with a deadline p->delay. For the last packet, if p->numbytes < 0,
+ * there is an additional delay.
+ */
+static void
+ready_event_wfq(struct dn_pipe *p, struct mbuf **head, struct mbuf **tail)
+{
+       int p_was_empty = (p->head == NULL);
+       struct dn_heap *sch = &(p->scheduler_heap);
+       struct dn_heap *neh = &(p->not_eligible_heap);
+       int64_t p_numbytes = p->numbytes;
+
+       DUMMYNET_LOCK_ASSERT();
+
+       if (p->if_name[0] == 0)         /* tx clock is simulated */
+               /*
+                * Since result may not fit into p->numbytes (32bit) we
+                * are using 64bit var here.
+                */
+               p_numbytes += (curr_time - p->sched_time) * p->bandwidth;
+       else {  /*
+                * tx clock is for real,
+                * the ifq must be empty or this is a NOP.
+                * XXX not supported in Linux
+                */
+               if (1) // p->ifp && p->ifp->if_snd.ifq_head != NULL)
+                       return;
+               else {
+                       DPRINTF(("dummynet: pipe %d ready from %s --\n",
+                           p->pipe_nr, p->if_name));
+               }
+       }
+
+       /*
+        * While we have backlogged traffic AND credit, we need to do
+        * something on the queue.
+        */
+       while (p_numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
+               if (sch->elements > 0) {
+                       /* Have some eligible pkts to send out. */
+                       struct dn_flow_queue *q = sch->p[0].object;
+                       struct mbuf *pkt = q->head;
+                       struct dn_flow_set *fs = q->fs;
+                       uint64_t len = pkt->m_pkthdr.len;
+                       int len_scaled = p->bandwidth ? len * 8 * hz : 0;
+
+                       heap_extract(sch, NULL); /* Remove queue from heap. */
+                       p_numbytes -= len_scaled;
+                       move_pkt(pkt, q, p, len);
+
+                       p->V += div64((len << MY_M), p->sum);   /* Update V. */
+                       q->S = q->F;                    /* Update start time. */
+                       if (q->len == 0) {
+                               /* Flow not backlogged any more. */
+                               fs->backlogged--;
+                               heap_insert(&(p->idle_heap), q->F, q);
+                       } else {
+                               /* Still backlogged. */
+
+                               /*
+                                * Update F and position in backlogged queue,
+                                * then put flow in not_eligible_heap
+                                * (we will fix this later).
+                                */
+                               len = (q->head)->m_pkthdr.len;
+                               q->F += div64((len << MY_M), fs->weight);
+                               if (DN_KEY_LEQ(q->S, p->V))
+                                       heap_insert(neh, q->S, q);
+                               else
+                                       heap_insert(sch, q->F, q);
+                       }
+               }
+               /*
+                * Now compute V = max(V, min(S_i)). Remember that all elements
+                * in sch have by definition S_i <= V so if sch is not empty,
+                * V is surely the max and we must not update it. Conversely,
+                * if sch is empty we only need to look at neh.
+                */
+               if (sch->elements == 0 && neh->elements > 0)
+                       p->V = MAX64(p->V, neh->p[0].key);
+               /* Move from neh to sch any packets that have become eligible */
+               while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
+                       struct dn_flow_queue *q = neh->p[0].object;
+                       heap_extract(neh, NULL);
+                       heap_insert(sch, q->F, q);
+               }
+
+               if (p->if_name[0] != '\0') { /* Tx clock is from a real thing */
+                       p_numbytes = -1;        /* Mark not ready for I/O. */
+                       break;
+               }
+       }
+       if (sch->elements == 0 && neh->elements == 0 && p_numbytes >= 0 &&
+           p->idle_heap.elements > 0) {
+               /*
+                * No traffic and no events scheduled.
+                * We can get rid of idle-heap.
+                */
+               int i;
+
+               for (i = 0; i < p->idle_heap.elements; i++) {
+                       struct dn_flow_queue *q = p->idle_heap.p[i].object;
+
+                       q->F = 0;
+                       q->S = q->F + 1;
+               }
+               p->sum = 0;
+               p->V = 0;
+               p->idle_heap.elements = 0;
+       }
+       /*
+        * If we are getting clocks from dummynet (not a real interface) and
+        * If we are under credit, schedule the next ready event.
+        * Also fix the delivery time of the last packet.
+        */
+       if (p->if_name[0]==0 && p_numbytes < 0) { /* This implies bw > 0. */
+               dn_key t = 0;           /* Number of ticks i have to wait. */
+
+               if (p->bandwidth > 0)
+                       t = div64(p->bandwidth - 1 - p_numbytes, p->bandwidth);
+               dn_tag_get(p->tail)->output_time += t;
+               p->sched_time = curr_time;
+               heap_insert(&wfq_ready_heap, curr_time + t, (void *)p);
+               /*
+                * XXX Should check errors on heap_insert, and drain the whole
+                * queue on error hoping next time we are luckier.
+                */
+       }
+
+       /* Fit (adjust if necessary) 64bit result into 32bit variable. */
+       if (p_numbytes > INT_MAX)
+               p->numbytes = INT_MAX;
+       else if (p_numbytes < INT_MIN)
+               p->numbytes = INT_MIN;
+       else
+               p->numbytes = p_numbytes;
+
+       /*
+        * If the delay line was empty call transmit_event() now.
+        * Otherwise, the scheduler will take care of it.
+        */
+       if (p_was_empty)
+               transmit_event(p, head, tail);
+}
+
+/*
+ * This is called one tick, after previous run. It is used to
+ * schedule next run.
+ */
+static void
+dummynet(void * __unused unused)
+{
+
+       taskqueue_enqueue(dn_tq, &dn_task);
+}
+
+/*
+ * The main dummynet processing function.
+ */
+static void
+dummynet_task(void *context, int pending)
+{
+       struct mbuf *head = NULL, *tail = NULL;
+       struct dn_pipe *pipe;
+       struct dn_heap *heaps[3];
+       struct dn_heap *h;
+       void *p;        /* generic parameter to handler */
+       int i;
+
+       DUMMYNET_LOCK();
+
+       heaps[0] = &ready_heap;                 /* fixed-rate queues */
+       heaps[1] = &wfq_ready_heap;             /* wfq queues */
+       heaps[2] = &extract_heap;               /* delay line */
+
+       /* Update number of lost(coalesced) ticks. */
+       tick_lost += pending - 1;
+       getmicrouptime(&t);
+       /* Last tick duration (usec). */
+       tick_last = (t.tv_sec - prev_t.tv_sec) * 1000000 +
+           (t.tv_usec - prev_t.tv_usec);
+       /* Last tick vs standard tick difference (usec). */
+       tick_delta = (tick_last * hz - 1000000) / hz;
+       /* Accumulated tick difference (usec). */
+       tick_delta_sum += tick_delta;
+       prev_t = t;
+       /*
+        * Adjust curr_time if accumulated tick difference greater than
+        * 'standard' tick. Since curr_time should be monotonically increasing,
+        * we do positive adjustment as required and throttle curr_time in
+        * case of negative adjustment.
+        */
+       curr_time++;
+       if (tick_delta_sum - tick >= 0) {
+               int diff = tick_delta_sum / tick;
+               curr_time += diff;
+               tick_diff += diff;
+               tick_delta_sum %= tick;
+               tick_adjustment++;
+       } else if (tick_delta_sum + tick <= 0) {
+               curr_time--;
+               tick_diff--;
+               tick_delta_sum += tick;
+               tick_adjustment++;
+       }
+
+       for (i = 0; i < 3; i++) {
+               h = heaps[i];
+               while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
+                       if (h->p[0].key > curr_time)
+                               printf("dummynet: warning, "
+                                   "heap %d is %d ticks late\n",
+                                   i, (int)(curr_time - h->p[0].key));
+                       /* store a copy before heap_extract */
+                       p = h->p[0].object;
+                       /* need to extract before processing */
+                       heap_extract(h, NULL);
+                       if (i == 0)
+                               ready_event(p, &head, &tail);
+                       else if (i == 1) {
+                               struct dn_pipe *pipe = p;
+                               if (pipe->if_name[0] != '\0')
+                                       printf("dummynet: bad ready_event_wfq "
+                                           "for pipe %s\n", pipe->if_name);
+                               else
+                                       ready_event_wfq(p, &head, &tail);
+                       } else
+                               transmit_event(p, &head, &tail);
+               }
+       }
+
+       /* Sweep pipes trying to expire idle flow_queues. */
+       for (i = 0; i < HASHSIZE; i++)
+               SLIST_FOREACH(pipe, &pipehash[i], next)
+                       if (pipe->idle_heap.elements > 0 &&
+                           DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
+                               struct dn_flow_queue *q =
+                                   pipe->idle_heap.p[0].object;
+
+                               heap_extract(&(pipe->idle_heap), NULL);
+                               /* Mark timestamp as invalid. */
+                               q->S = q->F + 1;
+                               pipe->sum -= q->fs->weight;
+                       }
+
+       DUMMYNET_UNLOCK();
+
+       if (head != NULL)
+               dummynet_send(head);
+
+       callout_reset(&dn_timeout, 1, dummynet, NULL);
+}
+
+static void
+dummynet_send(struct mbuf *m)
+{
+       struct dn_pkt_tag *pkt;
+       struct mbuf *n;
+       struct ip *ip;
+       int dst;
+
+       for (; m != NULL; m = n) {
+               n = m->m_nextpkt;
+               m->m_nextpkt = NULL;
+               if (m_tag_first(m) == NULL) {
+                       pkt = NULL; /* probably unnecessary */
+                       dst = DN_TO_DROP;
+               } else {
+                       pkt = dn_tag_get(m);
+                       dst = pkt->dn_dir;
+               }
+               switch (dst) {
+               case DN_TO_IP_OUT:
+                       ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
+                       break ;
+               case DN_TO_IP_IN :
+                       ip = mtod(m, struct ip *);
+#ifndef __linux__      /* restore net format for FreeBSD */
+                       ip->ip_len = htons(ip->ip_len);
+                       ip->ip_off = htons(ip->ip_off);
+#endif
+                       netisr_dispatch(NETISR_IP, m);
+                       break;
+#ifdef INET6
+               case DN_TO_IP6_IN:
+                       netisr_dispatch(NETISR_IPV6, m);
+                       break;
+
+               case DN_TO_IP6_OUT:
+                       ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
+                       break;
+#endif
+               case DN_TO_IFB_FWD:
+                       if (bridge_dn_p != NULL)
+                               ((*bridge_dn_p)(m, pkt->ifp));
+                       else
+                               printf("dummynet: if_bridge not loaded\n");
+
+                       break;
+               case DN_TO_ETH_DEMUX:
+                       /*
+                        * The Ethernet code assumes the Ethernet header is
+                        * contiguous in the first mbuf header.
+                        * Insure this is true.
+                        */
+                       if (m->m_len < ETHER_HDR_LEN &&
+                           (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
+                               printf("dummynet/ether: pullup failed, "
+                                   "dropping packet\n");
+                               break;
+                       }
+                       ether_demux(m->m_pkthdr.rcvif, m);
+                       break;
+               case DN_TO_ETH_OUT:
+                       ether_output_frame(pkt->ifp, m);
+                       break;
+
+               case DN_TO_DROP:
+                       /* drop the packet after some time */
+#ifdef __linux__
+                       netisr_dispatch(-1, m); /* -1 drop the packet */
+#else
+                       m_freem(m);
+#endif
+                       printf("need to drop the skbuf\n");
+                       break;
+
+               default:
+                       printf("dummynet: bad switch %d!\n", pkt->dn_dir);
+                       m_freem(m);
+                       break;
+               }
+       }
+}
+
+/*
+ * Unconditionally expire empty queues in case of shortage.
+ * Returns the number of queues freed.
+ */
+static int
+expire_queues(struct dn_flow_set *fs)
+{
+    struct dn_flow_queue *q, *prev ;
+    int i, initial_elements = fs->rq_elements ;
+
+    if (fs->last_expired == time_uptime)
+       return 0 ;
+    fs->last_expired = time_uptime ;
+    for (i = 0 ; i <= fs->rq_size ; i++) /* last one is overflow */
+       for (prev=NULL, q = fs->rq[i] ; q != NULL ; )
+           if (q->head != NULL || q->S != q->F+1) {
+               prev = q ;
+               q = q->next ;
+           } else { /* entry is idle, expire it */
+               struct dn_flow_queue *old_q = q ;
+
+               if (prev != NULL)
+                   prev->next = q = q->next ;
+               else
+                   fs->rq[i] = q = q->next ;
+               fs->rq_elements-- ;
+               free(old_q, M_DUMMYNET);
+           }
+    return initial_elements - fs->rq_elements ;
+}
+
+/*
+ * If room, create a new queue and put at head of slot i;
+ * otherwise, create or use the default queue.
+ */
+static struct dn_flow_queue *
+create_queue(struct dn_flow_set *fs, int i)
+{
+       struct dn_flow_queue *q;
+
+       if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
+           expire_queues(fs) == 0) {
+               /* No way to get room, use or create overflow queue. */
+               i = fs->rq_size;
+               if (fs->rq[i] != NULL)
+                   return fs->rq[i];
+       }
+       q = malloc(sizeof(*q), M_DUMMYNET, M_NOWAIT | M_ZERO);
+       if (q == NULL) {
+               printf("dummynet: sorry, cannot allocate queue for new flow\n");
+               return (NULL);
+       }
+       q->fs = fs;
+       q->hash_slot = i;
+       q->next = fs->rq[i];
+       q->S = q->F + 1;        /* hack - mark timestamp as invalid. */
+       q->numbytes = io_fast ? fs->pipe->bandwidth : 0;
+       fs->rq[i] = q;
+       fs->rq_elements++;
+       return (q);
+}
+
+/*
+ * Given a flow_set and a pkt in last_pkt, find a matching queue
+ * after appropriate masking. The queue is moved to front
+ * so that further searches take less time.
+ */
+static struct dn_flow_queue *
+find_queue(struct dn_flow_set *fs, struct ipfw_flow_id *id)
+{
+    int i = 0 ; /* we need i and q for new allocations */
+    struct dn_flow_queue *q, *prev;
+    int is_v6 = IS_IP6_FLOW_ID(id);
+
+    if ( !(fs->flags_fs & DN_HAVE_FLOW_MASK) )
+       q = fs->rq[0] ;
+    else {
+       /* first, do the masking, then hash */
+       id->dst_port &= fs->flow_mask.dst_port ;
+       id->src_port &= fs->flow_mask.src_port ;
+       id->proto &= fs->flow_mask.proto ;
+       id->flags = 0 ; /* we don't care about this one */
+       if (is_v6) {
+           APPLY_MASK(&id->dst_ip6, &fs->flow_mask.dst_ip6);
+           APPLY_MASK(&id->src_ip6, &fs->flow_mask.src_ip6);
+           id->flow_id6 &= fs->flow_mask.flow_id6;
+
+           i = ((id->dst_ip6.__u6_addr.__u6_addr32[0]) & 0xffff)^
+               ((id->dst_ip6.__u6_addr.__u6_addr32[1]) & 0xffff)^
+               ((id->dst_ip6.__u6_addr.__u6_addr32[2]) & 0xffff)^
+               ((id->dst_ip6.__u6_addr.__u6_addr32[3]) & 0xffff)^
+
+               ((id->dst_ip6.__u6_addr.__u6_addr32[0] >> 15) & 0xffff)^
+               ((id->dst_ip6.__u6_addr.__u6_addr32[1] >> 15) & 0xffff)^
+               ((id->dst_ip6.__u6_addr.__u6_addr32[2] >> 15) & 0xffff)^
+               ((id->dst_ip6.__u6_addr.__u6_addr32[3] >> 15) & 0xffff)^
+
+               ((id->src_ip6.__u6_addr.__u6_addr32[0] << 1) & 0xfffff)^
+               ((id->src_ip6.__u6_addr.__u6_addr32[1] << 1) & 0xfffff)^
+               ((id->src_ip6.__u6_addr.__u6_addr32[2] << 1) & 0xfffff)^
+               ((id->src_ip6.__u6_addr.__u6_addr32[3] << 1) & 0xfffff)^
+
+               ((id->src_ip6.__u6_addr.__u6_addr32[0] << 16) & 0xffff)^
+               ((id->src_ip6.__u6_addr.__u6_addr32[1] << 16) & 0xffff)^
+               ((id->src_ip6.__u6_addr.__u6_addr32[2] << 16) & 0xffff)^
+               ((id->src_ip6.__u6_addr.__u6_addr32[3] << 16) & 0xffff)^
+
+               (id->dst_port << 1) ^ (id->src_port) ^
+               (id->proto ) ^
+               (id->flow_id6);
+       } else {
+           id->dst_ip &= fs->flow_mask.dst_ip ;
+           id->src_ip &= fs->flow_mask.src_ip ;
+
+           i = ( (id->dst_ip) & 0xffff ) ^
+               ( (id->dst_ip >> 15) & 0xffff ) ^
+               ( (id->src_ip << 1) & 0xffff ) ^
+               ( (id->src_ip >> 16 ) & 0xffff ) ^
+               (id->dst_port << 1) ^ (id->src_port) ^
+               (id->proto );
+       }
+       i = i % fs->rq_size ;
+       /* finally, scan the current list for a match */
+       searches++ ;
+       for (prev=NULL, q = fs->rq[i] ; q ; ) {
+           search_steps++;
+           if (is_v6 &&
+                   IN6_ARE_ADDR_EQUAL(&id->dst_ip6,&q->id.dst_ip6) &&  
+                   IN6_ARE_ADDR_EQUAL(&id->src_ip6,&q->id.src_ip6) &&  
+                   id->dst_port == q->id.dst_port &&
+                   id->src_port == q->id.src_port &&
+                   id->proto == q->id.proto &&
+                   id->flags == q->id.flags &&
+                   id->flow_id6 == q->id.flow_id6)
+               break ; /* found */
+
+           if (!is_v6 && id->dst_ip == q->id.dst_ip &&
+                   id->src_ip == q->id.src_ip &&
+                   id->dst_port == q->id.dst_port &&
+                   id->src_port == q->id.src_port &&
+                   id->proto == q->id.proto &&
+                   id->flags == q->id.flags)
+               break ; /* found */
+
+           /* No match. Check if we can expire the entry */
+           if (pipe_expire && q->head == NULL && q->S == q->F+1 ) {
+               /* entry is idle and not in any heap, expire it */
+               struct dn_flow_queue *old_q = q ;
+
+               if (prev != NULL)
+                   prev->next = q = q->next ;
+               else
+                   fs->rq[i] = q = q->next ;
+               fs->rq_elements-- ;
+               free(old_q, M_DUMMYNET);
+               continue ;
+           }
+           prev = q ;
+           q = q->next ;
+       }
+       if (q && prev != NULL) { /* found and not in front */
+           prev->next = q->next ;
+           q->next = fs->rq[i] ;
+           fs->rq[i] = q ;
+       }
+    }
+    if (q == NULL) { /* no match, need to allocate a new entry */
+       q = create_queue(fs, i);
+       if (q != NULL)
+       q->id = *id ;
+    }
+    return q ;
+}
+
+static int
+red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
+{
+       /*
+        * RED algorithm
+        *
+        * RED calculates the average queue size (avg) using a low-pass filter
+        * with an exponential weighted (w_q) moving average:
+        *      avg  <-  (1-w_q) * avg + w_q * q_size
+        * where q_size is the queue length (measured in bytes or * packets).
+        *
+        * If q_size == 0, we compute the idle time for the link, and set
+        *      avg = (1 - w_q)^(idle/s)
+        * where s is the time needed for transmitting a medium-sized packet.
+        *
+        * Now, if avg < min_th the packet is enqueued.
+        * If avg > max_th the packet is dropped. Otherwise, the packet is
+        * dropped with probability P function of avg.
+        */
+
+       int64_t p_b = 0;
+
+       /* Queue in bytes or packets? */
+       u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ?
+           q->len_bytes : q->len;
+
+       DPRINTF(("\ndummynet: %d q: %2u ", (int)curr_time, q_size));
+
+       /* Average queue size estimation. */
+       if (q_size != 0) {
+               /* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
+               int diff = SCALE(q_size) - q->avg;
+               int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
+
+               q->avg += (int)v;
+       } else {
+               /*
+                * Queue is empty, find for how long the queue has been
+                * empty and use a lookup table for computing
+                * (1 - * w_q)^(idle_time/s) where s is the time to send a
+                * (small) packet.
+                * XXX check wraps...
+                */
+               if (q->avg) {
+                       u_int t = div64(curr_time - q->q_time,
+                           fs->lookup_step);
+
+                       q->avg = (t >= 0 && t < fs->lookup_depth) ?
+                           SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
+               }
+       }
+       DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
+
+       /* Should i drop? */
+       if (q->avg < fs->min_th) {
+               q->count = -1;
+               return (0);     /* accept packet */
+       }
+       if (q->avg >= fs->max_th) {     /* average queue >=  max threshold */
+               if (fs->flags_fs & DN_IS_GENTLE_RED) {
+                       /*
+                        * According to Gentle-RED, if avg is greater than
+                        * max_th the packet is dropped with a probability
+                        *       p_b = c_3 * avg - c_4
+                        * where c_3 = (1 - max_p) / max_th
+                        *       c_4 = 1 - 2 * max_p
+                        */
+                       p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
+                           fs->c_4;
+               } else {
+                       q->count = -1;
+                       DPRINTF(("dummynet: - drop"));
+                       return (1);
+               }
+       } else if (q->avg > fs->min_th) {
+               /*
+                * We compute p_b using the linear dropping function
+                *       p_b = c_1 * avg - c_2
+                * where c_1 = max_p / (max_th - min_th)
+                *       c_2 = max_p * min_th / (max_th - min_th)
+                */
+               p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
+       }
+
+       if (fs->flags_fs & DN_QSIZE_IS_BYTES)
+               p_b = div64(p_b * len, fs->max_pkt_size);
+       if (++q->count == 0)
+               q->random = random() & 0xffff;
+       else {
+               /*
+                * q->count counts packets arrived since last drop, so a greater
+                * value of q->count means a greater packet drop probability.
+                */
+               if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
+                       q->count = 0;
+                       DPRINTF(("dummynet: - red drop"));
+                       /* After a drop we calculate a new random value. */
+                       q->random = random() & 0xffff;
+                       return (1);     /* drop */
+               }
+       }
+       /* End of RED algorithm. */
+
+       return (0);     /* accept */
+}
+
+static __inline struct dn_flow_set *
+locate_flowset(int fs_nr)
+{
+       struct dn_flow_set *fs;
+
+       SLIST_FOREACH(fs, &flowsethash[HASH(fs_nr)], next)
+               if (fs->fs_nr == fs_nr)
+                       return (fs);
+
+       return (NULL);
+}
+
+static __inline struct dn_pipe *
+locate_pipe(int pipe_nr)
+{
+       struct dn_pipe *pipe;
+
+       SLIST_FOREACH(pipe, &pipehash[HASH(pipe_nr)], next)
+               if (pipe->pipe_nr == pipe_nr)
+                       return (pipe);
+
+       return (NULL);
+}
+
+/*
+ * dummynet hook for packets. Below 'pipe' is a pipe or a queue
+ * depending on whether WF2Q or fixed bw is used.
+ *
+ * pipe_nr     pipe or queue the packet is destined for.
+ * dir         where shall we send the packet after dummynet.
+ * m           the mbuf with the packet
+ * ifp         the 'ifp' parameter from the caller.
+ *             NULL in ip_input, destination interface in ip_output,
+ * rule                matching rule, in case of multiple passes
+ */
+static int
+dummynet_io(struct mbuf **m0, int dir, struct ip_fw_args *fwa)
+{
+       struct mbuf *m = *m0, *head = NULL, *tail = NULL;
+       struct dn_pkt_tag *pkt;
+       struct m_tag *mtag;
+       struct dn_flow_set *fs = NULL;
+       struct dn_pipe *pipe;
+       uint64_t len = m->m_pkthdr.len;
+       struct dn_flow_queue *q = NULL;
+       int is_pipe;
+       ipfw_insn *cmd = ACTION_PTR(fwa->rule);
+
+       KASSERT(m->m_nextpkt == NULL,
+           ("dummynet_io: mbuf queue passed to dummynet"));
+
+       if (cmd->opcode == O_LOG)
+               cmd += F_LEN(cmd);
+       if (cmd->opcode == O_ALTQ)
+               cmd += F_LEN(cmd);
+       if (cmd->opcode == O_TAG)
+               cmd += F_LEN(cmd);
+       is_pipe = (cmd->opcode == O_PIPE);
+
+       DUMMYNET_LOCK();
+       io_pkt++;
+       /*
+        * This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
+        *
+        * XXXGL: probably the pipe->fs and fs->pipe logic here
+        * below can be simplified.
+        */
+       if (is_pipe) {
+               pipe = locate_pipe(fwa->cookie);
+               if (pipe != NULL)
+                       fs = &(pipe->fs);
+       } else
+               fs = locate_flowset(fwa->cookie);
+
+       if (fs == NULL)
+               goto dropit;    /* This queue/pipe does not exist! */
+       pipe = fs->pipe;
+       if (pipe == NULL) {     /* Must be a queue, try find a matching pipe. */
+               pipe = locate_pipe(fs->parent_nr);
+               if (pipe != NULL)
+                       fs->pipe = pipe;
+               else {
+                       printf("dummynet: no pipe %d for queue %d, drop pkt\n",
+                           fs->parent_nr, fs->fs_nr);
+                       goto dropit;
+               }
+       }
+       q = find_queue(fs, &(fwa->f_id));
+       if (q == NULL)
+               goto dropit;            /* Cannot allocate queue. */
+
+       /* Update statistics, then check reasons to drop pkt. */
+       q->tot_bytes += len;
+       q->tot_pkts++;
+       if (fs->plr && random() < fs->plr)
+               goto dropit;            /* Random pkt drop. */
+       if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
+               if (q->len_bytes > fs->qsize)
+                       goto dropit;    /* Queue size overflow. */
+       } else {
+               if (q->len >= fs->qsize)
+                       goto dropit;    /* Queue count overflow. */
+       }
+       if (fs->flags_fs & DN_IS_RED && red_drops(fs, q, len))
+               goto dropit;
+
+       /* XXX expensive to zero, see if we can remove it. */
+       mtag = m_tag_get(PACKET_TAG_DUMMYNET,
+           sizeof(struct dn_pkt_tag), M_NOWAIT | M_ZERO);
+       if (mtag == NULL)
+               goto dropit;            /* Cannot allocate packet header. */
+       m_tag_prepend(m, mtag);         /* Attach to mbuf chain. */
+
+       pkt = (struct dn_pkt_tag *)(mtag + 1);
+       /*
+        * Ok, i can handle the pkt now...
+        * Build and enqueue packet + parameters.
+        */
+       pkt->rule = fwa->rule;
+       pkt->dn_dir = dir;
+
+       pkt->ifp = fwa->oif;
+
+       if (q->head == NULL)
+               q->head = m;
+       else
+               q->tail->m_nextpkt = m;
+       q->tail = m;
+       q->len++;
+       q->len_bytes += len;
+
+       if (q->head != m)               /* Flow was not idle, we are done. */
+               goto done;
+
+       if (q->q_time < (uint32_t)curr_time)
+               q->numbytes = io_fast ? fs->pipe->bandwidth : 0;
+       q->q_time = curr_time;
+
+       /*
+        * If we reach this point the flow was previously idle, so we need
+        * to schedule it. This involves different actions for fixed-rate or
+        * WF2Q queues.
+        */
+       if (is_pipe) {
+               /* Fixed-rate queue: just insert into the ready_heap. */
+               dn_key t = 0;
+
+               if (pipe->bandwidth) {
+                       q->extra_bits = compute_extra_bits(m, pipe);
+                       t = set_ticks(m, q, pipe);
+               }
+               q->sched_time = curr_time;
+               if (t == 0)             /* Must process it now. */
+                       ready_event(q, &head, &tail);
+               else
+                       heap_insert(&ready_heap, curr_time + t , q);
+       } else {
+               /*
+                * WF2Q. First, compute start time S: if the flow was
+                * idle (S = F + 1) set S to the virtual time V for the
+                * controlling pipe, and update the sum of weights for the pipe;
+                * otherwise, remove flow from idle_heap and set S to max(F,V).
+                * Second, compute finish time F = S + len / weight.
+                * Third, if pipe was idle, update V = max(S, V).
+                * Fourth, count one more backlogged flow.
+                */
+               if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid. */
+                       q->S = pipe->V;
+                       pipe->sum += fs->weight; /* Add weight of new queue. */
+               } else {
+                       heap_extract(&(pipe->idle_heap), q);
+                       q->S = MAX64(q->F, pipe->V);
+               }
+               q->F = div64(q->S + (len << MY_M), fs->weight);
+
+               if (pipe->not_eligible_heap.elements == 0 &&
+                   pipe->scheduler_heap.elements == 0)
+                       pipe->V = MAX64(q->S, pipe->V);
+               fs->backlogged++;
+               /*
+                * Look at eligibility. A flow is not eligibile if S>V (when
+                * this happens, it means that there is some other flow already
+                * scheduled for the same pipe, so the scheduler_heap cannot be
+                * empty). If the flow is not eligible we just store it in the
+                * not_eligible_heap. Otherwise, we store in the scheduler_heap
+                * and possibly invoke ready_event_wfq() right now if there is
+                * leftover credit.
+                * Note that for all flows in scheduler_heap (SCH), S_i <= V,
+                * and for all flows in not_eligible_heap (NEH), S_i > V.
+                * So when we need to compute max(V, min(S_i)) forall i in
+                * SCH+NEH, we only need to look into NEH.
+                */
+               if (DN_KEY_GT(q->S, pipe->V)) {         /* Not eligible. */
+                       if (pipe->scheduler_heap.elements == 0)
+                               printf("dummynet: ++ ouch! not eligible but empty scheduler!\n");
+                       heap_insert(&(pipe->not_eligible_heap), q->S, q);
+               } else {
+                       heap_insert(&(pipe->scheduler_heap), q->F, q);
+                       if (pipe->numbytes >= 0) {       /* Pipe is idle. */
+                               if (pipe->scheduler_heap.elements != 1)
+                                       printf("dummynet: OUCH! pipe should have been idle!\n");
+                               DPRINTF(("dummynet: waking up pipe %d at %d\n",
+                                   pipe->pipe_nr, (int)(q->F >> MY_M)));
+                               pipe->sched_time = curr_time;
+                               ready_event_wfq(pipe, &head, &tail);
+                       }
+               }
+       }
+done:
+       if (head == m && dir != DN_TO_IFB_FWD && dir != DN_TO_ETH_DEMUX &&
+           dir != DN_TO_ETH_OUT) {     /* Fast io. */
+               io_pkt_fast++;
+               if (m->m_nextpkt != NULL)
+                       printf("dummynet: fast io: pkt chain detected!\n");
+               head = m->m_nextpkt = NULL;
+       } else
+               *m0 = NULL;             /* Normal io. */
+
+       DUMMYNET_UNLOCK();
+       if (head != NULL)
+               dummynet_send(head);
+       return (0);
+
+dropit:
+       io_pkt_drop++;
+       if (q)
+               q->drops++;
+       DUMMYNET_UNLOCK();
+       /*
+        * set the tag, if present. dn_tag_get cannot fail
+        * so we need to check first
+        */
+       if (m_tag_first(m)) {
+               pkt = dn_tag_get(m);
+               pkt->dn_dir = DN_TO_DROP;
+       }
+       dummynet_send(m);       /* drop the packet */
+       *m0 = NULL;
+       return ((fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS);
+}
+
+/*
+ * Below, the rt_unref is only needed when (pkt->dn_dir == DN_TO_IP_OUT)
+ * Doing this would probably save us the initial bzero of dn_pkt
+ */
+#define        DN_FREE_PKT(_m) do {                            \
+       m_freem(_m);                                    \
+} while (0)
+
+/*
+ * Dispose all packets and flow_queues on a flow_set.
+ * If all=1, also remove red lookup table and other storage,
+ * including the descriptor itself.
+ * For the one in dn_pipe MUST also cleanup ready_heap...
+ */
+static void
+purge_flow_set(struct dn_flow_set *fs, int all)
+{
+       struct dn_flow_queue *q, *qn;
+       int i;
+
+       DUMMYNET_LOCK_ASSERT();
+
+       for (i = 0; i <= fs->rq_size; i++) {
+               for (q = fs->rq[i]; q != NULL; q = qn) {
+                       struct mbuf *m, *mnext;
+
+                       mnext = q->head;
+                       while ((m = mnext) != NULL) {
+                               mnext = m->m_nextpkt;
+                               DN_FREE_PKT(m);
+                       }
+                       qn = q->next;
+                       free(q, M_DUMMYNET);
+               }
+               fs->rq[i] = NULL;
+       }
+
+       fs->rq_elements = 0;
+       if (all) {
+               /* RED - free lookup table. */
+               if (fs->w_q_lookup != NULL)
+                       free(fs->w_q_lookup, M_DUMMYNET);
+               if (fs->rq != NULL)
+                       free(fs->rq, M_DUMMYNET);
+               /* If this fs is not part of a pipe, free it. */
+               if (fs->pipe == NULL || fs != &(fs->pipe->fs))
+                       free(fs, M_DUMMYNET);
+       }
+}
+
+/*
+ * Dispose all packets queued on a pipe (not a flow_set).
+ * Also free all resources associated to a pipe, which is about
+ * to be deleted.
+ */
+static void
+purge_pipe(struct dn_pipe *pipe)
+{
+    struct mbuf *m, *mnext;
+
+    purge_flow_set( &(pipe->fs), 1 );
+
+    mnext = pipe->head;
+    while ((m = mnext) != NULL) {
+       mnext = m->m_nextpkt;
+       DN_FREE_PKT(m);
+    }
+
+    heap_free( &(pipe->scheduler_heap) );
+    heap_free( &(pipe->not_eligible_heap) );
+    heap_free( &(pipe->idle_heap) );
+}
+
+/*
+ * Delete all pipes and heaps returning memory. Must also
+ * remove references from all ipfw rules to all pipes.
+ */
+static void
+dummynet_flush(void)
+{
+       struct dn_pipe *pipe, *pipe1;
+       struct dn_flow_set *fs, *fs1;
+       int i;
+
+       DUMMYNET_LOCK();
+       /* Free heaps so we don't have unwanted events. */
+       heap_free(&ready_heap);
+       heap_free(&wfq_ready_heap);
+       heap_free(&extract_heap);
+
+       /*
+        * Now purge all queued pkts and delete all pipes.
+        *
+        * XXXGL: can we merge the for(;;) cycles into one or not?
+        */
+       for (i = 0; i < HASHSIZE; i++)
+               SLIST_FOREACH_SAFE(fs, &flowsethash[i], next, fs1) {
+                       SLIST_REMOVE(&flowsethash[i], fs, dn_flow_set, next);
+                       purge_flow_set(fs, 1);
+               }
+       for (i = 0; i < HASHSIZE; i++)
+               SLIST_FOREACH_SAFE(pipe, &pipehash[i], next, pipe1) {
+                       SLIST_REMOVE(&pipehash[i], pipe, dn_pipe, next);
+                       purge_pipe(pipe);
+                       free_pipe(pipe);
+               }
+       DUMMYNET_UNLOCK();
+}
+
+extern struct ip_fw *ip_fw_default_rule;
+static void
+dn_rule_delete_fs(struct dn_flow_set *fs, void *r)
+{
+    int i ;
+    struct dn_flow_queue *q ;
+    struct mbuf *m ;
+
+    for (i = 0 ; i <= fs->rq_size ; i++) /* last one is ovflow */
+       for (q = fs->rq[i] ; q ; q = q->next )
+           for (m = q->head ; m ; m = m->m_nextpkt ) {
+               struct dn_pkt_tag *pkt = dn_tag_get(m) ;
+               if (pkt->rule == r)
+                   pkt->rule = ip_fw_default_rule ;
+           }
+}
+
+/*
+ * When a firewall rule is deleted, scan all queues and remove the pointer
+ * to the rule from matching packets, making them point to the default rule.
+ * The pointer is used to reinject packets in case one_pass = 0.
+ */
+void
+dn_rule_delete(void *r)
+{
+    struct dn_pipe *pipe;
+    struct dn_flow_set *fs;
+    struct dn_pkt_tag *pkt;
+    struct mbuf *m;
+    int i;
+
+    DUMMYNET_LOCK();
+    /*
+     * If the rule references a queue (dn_flow_set), then scan
+     * the flow set, otherwise scan pipes. Should do either, but doing
+     * both does not harm.
+     */
+    for (i = 0; i < HASHSIZE; i++)
+       SLIST_FOREACH(fs, &flowsethash[i], next)
+               dn_rule_delete_fs(fs, r);
+
+    for (i = 0; i < HASHSIZE; i++)
+       SLIST_FOREACH(pipe, &pipehash[i], next) {
+               fs = &(pipe->fs);
+               dn_rule_delete_fs(fs, r);
+               for (m = pipe->head ; m ; m = m->m_nextpkt ) {
+                       pkt = dn_tag_get(m);
+                       if (pkt->rule == r)
+                               pkt->rule = ip_fw_default_rule;
+               }
+       }
+    DUMMYNET_UNLOCK();
+}
+
+/*
+ * setup RED parameters
+ */
+static int
+config_red(struct dn_flow_set *p, struct dn_flow_set *x)
+{
+       int i;
+
+       x->w_q = p->w_q;
+       x->min_th = SCALE(p->min_th);
+       x->max_th = SCALE(p->max_th);
+       x->max_p = p->max_p;
+
+       x->c_1 = p->max_p / (p->max_th - p->min_th);
+       x->c_2 = SCALE_MUL(x->c_1, SCALE(p->min_th));
+
+       if (x->flags_fs & DN_IS_GENTLE_RED) {
+               x->c_3 = (SCALE(1) - p->max_p) / p->max_th;
+               x->c_4 = SCALE(1) - 2 * p->max_p;
+       }
+
+       /* If the lookup table already exist, free and create it again. */
+       if (x->w_q_lookup) {
+               free(x->w_q_lookup, M_DUMMYNET);
+               x->w_q_lookup = NULL;
+       }
+       if (red_lookup_depth == 0) {
+               printf("\ndummynet: net.inet.ip.dummynet.red_lookup_depth"
+                   "must be > 0\n");
+               free(x, M_DUMMYNET);
+               return (EINVAL);
+       }
+       x->lookup_depth = red_lookup_depth;
+       x->w_q_lookup = (u_int *)malloc(x->lookup_depth * sizeof(int),
+           M_DUMMYNET, M_NOWAIT);
+       if (x->w_q_lookup == NULL) {
+               printf("dummynet: sorry, cannot allocate red lookup table\n");
+               free(x, M_DUMMYNET);
+               return(ENOSPC);
+       }
+
+       /* Fill the lookup table with (1 - w_q)^x */
+       x->lookup_step = p->lookup_step;
+       x->lookup_weight = p->lookup_weight;
+       x->w_q_lookup[0] = SCALE(1) - x->w_q;
+
+       for (i = 1; i < x->lookup_depth; i++)
+               x->w_q_lookup[i] =
+                   SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
+
+       if (red_avg_pkt_size < 1)
+               red_avg_pkt_size = 512;
+       x->avg_pkt_size = red_avg_pkt_size;
+       if (red_max_pkt_size < 1)
+               red_max_pkt_size = 1500;
+       x->max_pkt_size = red_max_pkt_size;
+       return (0);
+}
+
+static int
+alloc_hash(struct dn_flow_set *x, struct dn_flow_set *pfs)
+{
+    if (x->flags_fs & DN_HAVE_FLOW_MASK) {     /* allocate some slots */
+       int l = pfs->rq_size;
+
+       if (l == 0)
+           l = dn_hash_size;
+       if (l < 4)
+           l = 4;
+       else if (l > DN_MAX_HASH_SIZE)
+           l = DN_MAX_HASH_SIZE;
+       x->rq_size = l;
+    } else                  /* one is enough for null mask */
+       x->rq_size = 1;
+    x->rq = malloc((1 + x->rq_size) * sizeof(struct dn_flow_queue *),
+           M_DUMMYNET, M_NOWAIT | M_ZERO);
+    if (x->rq == NULL) {
+       printf("dummynet: sorry, cannot allocate queue\n");
+       return (ENOMEM);
+    }
+    x->rq_elements = 0;
+    return 0 ;
+}
+
+static void
+set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
+{
+       x->flags_fs = src->flags_fs;
+       x->qsize = src->qsize;
+       x->plr = src->plr;
+       x->flow_mask = src->flow_mask;
+       if (x->flags_fs & DN_QSIZE_IS_BYTES) {
+               if (x->qsize > pipe_byte_limit)
+                       x->qsize = 1024 * 1024;
+       } else {
+               if (x->qsize == 0)
+                       x->qsize = 50;
+               if (x->qsize > pipe_slot_limit)
+                       x->qsize = 50;
+       }
+       /* Configuring RED. */
+       if (x->flags_fs & DN_IS_RED)
+               config_red(src, x);     /* XXX should check errors */
+}
+
+/*
+ * Setup pipe or queue parameters.
+ */
+static int
+config_pipe(struct dn_pipe *p)
+{
+       struct dn_flow_set *pfs = &(p->fs);
+       struct dn_flow_queue *q;
+       int i, error;
+
+       /*
+        * The config program passes parameters as follows:
+        * bw = bits/second (0 means no limits),
+        * delay = ms, must be translated into ticks.
+        * qsize = slots/bytes
+        */
+       p->delay = (p->delay * hz) / 1000;
+       /* We need either a pipe number or a flow_set number. */
+       if (p->pipe_nr == 0 && pfs->fs_nr == 0)
+               return (EINVAL);
+       if (p->pipe_nr != 0 && pfs->fs_nr != 0)
+               return (EINVAL);
+       if (p->pipe_nr != 0) {                  /* this is a pipe */
+               struct dn_pipe *pipe;
+
+               DUMMYNET_LOCK();
+               pipe = locate_pipe(p->pipe_nr); /* locate pipe */
+
+               if (pipe == NULL) {             /* new pipe */
+                       pipe = malloc(sizeof(struct dn_pipe), M_DUMMYNET,
+                           M_NOWAIT | M_ZERO);
+                       if (pipe == NULL) {
+                               DUMMYNET_UNLOCK();
+                               printf("dummynet: no memory for new pipe\n");
+                               return (ENOMEM);
+                       }
+                       pipe->pipe_nr = p->pipe_nr;
+                       pipe->fs.pipe = pipe;
+                       /*
+                        * idle_heap is the only one from which
+                        * we extract from the middle.
+                        */
+                       pipe->idle_heap.size = pipe->idle_heap.elements = 0;
+                       pipe->idle_heap.offset =
+                           offsetof(struct dn_flow_queue, heap_pos);
+               } else
+                       /* Flush accumulated credit for all queues. */
+                       for (i = 0; i <= pipe->fs.rq_size; i++)
+                               for (q = pipe->fs.rq[i]; q; q = q->next)
+                                       q->numbytes = io_fast ? p->bandwidth : 0;
+
+               pipe->bandwidth = p->bandwidth;
+               pipe->numbytes = 0;             /* just in case... */
+               bcopy(p->if_name, pipe->if_name, sizeof(p->if_name));
+               pipe->ifp = NULL;               /* reset interface ptr */
+               pipe->delay = p->delay;
+               set_fs_parms(&(pipe->fs), pfs);
+
+               /* Handle changes in the delay profile. */
+               if (p->samples_no > 0) {
+                       if (pipe->samples_no != p->samples_no) {
+                               if (pipe->samples != NULL)
+                                       free(pipe->samples, M_DUMMYNET);
+                               pipe->samples =
+                                   malloc(p->samples_no*sizeof(dn_key),
+                                       M_DUMMYNET, M_NOWAIT | M_ZERO);
+                               if (pipe->samples == NULL) {
+                                       DUMMYNET_UNLOCK();
+                                       printf("dummynet: no memory "
+                                               "for new samples\n");
+                                       return (ENOMEM);
+                               }
+                               pipe->samples_no = p->samples_no;
+                       }
+
+                       strncpy(pipe->name,p->name,sizeof(pipe->name));
+                       pipe->loss_level = p->loss_level;
+                       for (i = 0; i<pipe->samples_no; ++i)
+                               pipe->samples[i] = p->samples[i];
+               } else if (pipe->samples != NULL) {
+                       free(pipe->samples, M_DUMMYNET);
+                       pipe->samples = NULL;
+                       pipe->samples_no = 0;
+               }
+
+               if (pipe->fs.rq == NULL) {      /* a new pipe */
+                       error = alloc_hash(&(pipe->fs), pfs);
+                       if (error) {
+                               DUMMYNET_UNLOCK();
+                               free_pipe(pipe);
+                               return (error);
+                       }
+                       SLIST_INSERT_HEAD(&pipehash[HASH(pipe->pipe_nr)],
+                           pipe, next);
+               }
+               DUMMYNET_UNLOCK();
+       } else {                                /* config queue */
+               struct dn_flow_set *fs;
+
+               DUMMYNET_LOCK();
+               fs = locate_flowset(pfs->fs_nr); /* locate flow_set */
+
+               if (fs == NULL) {               /* new */
+                       if (pfs->parent_nr == 0) { /* need link to a pipe */
+                               DUMMYNET_UNLOCK();
+                               return (EINVAL);
+                       }
+                       fs = malloc(sizeof(struct dn_flow_set), M_DUMMYNET,
+                           M_NOWAIT | M_ZERO);
+                       if (fs == NULL) {
+                               DUMMYNET_UNLOCK();
+                               printf(
+                                   "dummynet: no memory for new flow_set\n");
+                               return (ENOMEM);
+                       }
+                       fs->fs_nr = pfs->fs_nr;
+                       fs->parent_nr = pfs->parent_nr;
+                       fs->weight = pfs->weight;
+                       if (fs->weight == 0)
+                               fs->weight = 1;
+                       else if (fs->weight > 100)
+                               fs->weight = 100;
+               } else {
+                       /*
+                        * Change parent pipe not allowed;
+                        * must delete and recreate.
+                        */
+                       if (pfs->parent_nr != 0 &&
+                           fs->parent_nr != pfs->parent_nr) {
+                               DUMMYNET_UNLOCK();
+                               return (EINVAL);
+                       }
+               }
+
+               set_fs_parms(fs, pfs);
+
+               if (fs->rq == NULL) {           /* a new flow_set */
+                       error = alloc_hash(fs, pfs);
+                       if (error) {
+                               DUMMYNET_UNLOCK();
+                               free(fs, M_DUMMYNET);
+                               return (error);
+                       }
+                       SLIST_INSERT_HEAD(&flowsethash[HASH(fs->fs_nr)],
+                           fs, next);
+               }
+               DUMMYNET_UNLOCK();
+       }
+       return (0);
+}
+
+/*
+ * Helper function to remove from a heap queues which are linked to
+ * a flow_set about to be deleted.
+ */
+static void
+fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
+{
+    int i = 0, found = 0 ;
+    for (; i < h->elements ;)
+       if ( ((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
+           h->elements-- ;
+           h->p[i] = h->p[h->elements] ;
+           found++ ;
+       } else
+           i++ ;
+    if (found)
+       heapify(h);
+}
+
+/*
+ * helper function to remove a pipe from a heap (can be there at most once)
+ */
+static void
+pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
+{
+    if (h->elements > 0) {
+       int i = 0 ;
+       for (i=0; i < h->elements ; i++ ) {
+           if (h->p[i].object == p) { /* found it */
+               h->elements-- ;
+               h->p[i] = h->p[h->elements] ;
+               heapify(h);
+               break ;
+           }
+       }
+    }
+}
+
+/*
+ * drain all queues. Called in case of severe mbuf shortage.
+ */
+void
+dummynet_drain(void)
+{
+    struct dn_flow_set *fs;
+    struct dn_pipe *pipe;
+    struct mbuf *m, *mnext;
+    int i;
+
+    DUMMYNET_LOCK_ASSERT();
+
+    heap_free(&ready_heap);
+    heap_free(&wfq_ready_heap);
+    heap_free(&extract_heap);
+    /* remove all references to this pipe from flow_sets */
+    for (i = 0; i < HASHSIZE; i++)
+       SLIST_FOREACH(fs, &flowsethash[i], next)
+               purge_flow_set(fs, 0);
+
+    for (i = 0; i < HASHSIZE; i++) {
+       SLIST_FOREACH(pipe, &pipehash[i], next) {
+               purge_flow_set(&(pipe->fs), 0);
+
+               mnext = pipe->head;
+               while ((m = mnext) != NULL) {
+                       mnext = m->m_nextpkt;
+                       DN_FREE_PKT(m);
+               }
+               pipe->head = pipe->tail = NULL;
+       }
+    }
+}
+
+/*
+ * Fully delete a pipe or a queue, cleaning up associated info.
+ */
+static int
+delete_pipe(struct dn_pipe *p)
+{
+
+    if (p->pipe_nr == 0 && p->fs.fs_nr == 0)
+       return EINVAL ;
+    if (p->pipe_nr != 0 && p->fs.fs_nr != 0)
+       return EINVAL ;
+    if (p->pipe_nr != 0) { /* this is an old-style pipe */
+       struct dn_pipe *pipe;
+       struct dn_flow_set *fs;
+       int i;
+
+       DUMMYNET_LOCK();
+       pipe = locate_pipe(p->pipe_nr); /* locate pipe */
+
+       if (pipe == NULL) {
+           DUMMYNET_UNLOCK();
+           return (ENOENT);    /* not found */
+       }
+
+       /* Unlink from list of pipes. */
+       SLIST_REMOVE(&pipehash[HASH(pipe->pipe_nr)], pipe, dn_pipe, next);
+
+       /* Remove all references to this pipe from flow_sets. */
+       for (i = 0; i < HASHSIZE; i++)
+           SLIST_FOREACH(fs, &flowsethash[i], next)
+               if (fs->pipe == pipe) {
+                       printf("dummynet: ++ ref to pipe %d from fs %d\n",
+                           p->pipe_nr, fs->fs_nr);
+                       fs->pipe = NULL ;
+                       purge_flow_set(fs, 0);
+               }
+       fs_remove_from_heap(&ready_heap, &(pipe->fs));
+       purge_pipe(pipe); /* remove all data associated to this pipe */
+       /* remove reference to here from extract_heap and wfq_ready_heap */
+       pipe_remove_from_heap(&extract_heap, pipe);
+       pipe_remove_from_heap(&wfq_ready_heap, pipe);
+       DUMMYNET_UNLOCK();
+
+       free_pipe(pipe);
+    } else { /* this is a WF2Q queue (dn_flow_set) */
+       struct dn_flow_set *fs;
+
+       DUMMYNET_LOCK();
+       fs = locate_flowset(p->fs.fs_nr); /* locate set */
+
+       if (fs == NULL) {
+           DUMMYNET_UNLOCK();
+           return (ENOENT); /* not found */
+       }
+
+       /* Unlink from list of flowsets. */
+       SLIST_REMOVE( &flowsethash[HASH(fs->fs_nr)], fs, dn_flow_set, next);
+
+       if (fs->pipe != NULL) {
+           /* Update total weight on parent pipe and cleanup parent heaps. */
+           fs->pipe->sum -= fs->weight * fs->backlogged ;
+           fs_remove_from_heap(&(fs->pipe->not_eligible_heap), fs);
+           fs_remove_from_heap(&(fs->pipe->scheduler_heap), fs);
+#if 1  /* XXX should i remove from idle_heap as well ? */
+           fs_remove_from_heap(&(fs->pipe->idle_heap), fs);
+#endif
+       }
+       purge_flow_set(fs, 1);
+       DUMMYNET_UNLOCK();
+    }
+    return 0 ;
+}
+
+/*
+ * helper function used to copy data from kernel in DUMMYNET_GET
+ */
+static char *
+dn_copy_set(struct dn_flow_set *set, char *bp)
+{
+    int i, copied = 0 ;
+    struct dn_flow_queue *q, *qp = (struct dn_flow_queue *)bp;
+
+    DUMMYNET_LOCK_ASSERT();
+
+    for (i = 0 ; i <= set->rq_size ; i++)
+       for (q = set->rq[i] ; q ; q = q->next, qp++ ) {
+           if (q->hash_slot != i)
+               printf("dummynet: ++ at %d: wrong slot (have %d, "
+                   "should be %d)\n", copied, q->hash_slot, i);
+           if (q->fs != set)
+               printf("dummynet: ++ at %d: wrong fs ptr (have %p, should be %p)\n",
+                       i, q->fs, set);
+           copied++ ;
+           bcopy(q, qp, sizeof( *q ) );
+           /* cleanup pointers */
+           qp->next = NULL ;
+           qp->head = qp->tail = NULL ;
+           qp->fs = NULL ;
+       }
+    if (copied != set->rq_elements)
+       printf("dummynet: ++ wrong count, have %d should be %d\n",
+           copied, set->rq_elements);
+    return (char *)qp ;
+}
+
+static size_t
+dn_calc_size(void)
+{
+    struct dn_flow_set *fs;
+    struct dn_pipe *pipe;
+    size_t size = 0;
+    int i;
+
+    DUMMYNET_LOCK_ASSERT();
+    /*
+     * Compute size of data structures: list of pipes and flow_sets.
+     */
+    for (i = 0; i < HASHSIZE; i++) {
+       SLIST_FOREACH(pipe, &pipehash[i], next)
+               size += sizeof(*pipe) +
+                   pipe->fs.rq_elements * sizeof(struct dn_flow_queue);
+       SLIST_FOREACH(fs, &flowsethash[i], next)
+               size += sizeof (*fs) +
+                   fs->rq_elements * sizeof(struct dn_flow_queue);
+    }
+    return size;
+}
+
+static int
+dummynet_get(struct sockopt *sopt)
+{
+    char *buf, *bp ; /* bp is the "copy-pointer" */
+    size_t size ;
+    struct dn_flow_set *fs;
+    struct dn_pipe *pipe;
+    int error=0, i ;
+
+    /* XXX lock held too long */
+    DUMMYNET_LOCK();
+    /*
+     * XXX: Ugly, but we need to allocate memory with M_WAITOK flag and we
+     *      cannot use this flag while holding a mutex.
+     */
+    for (i = 0; i < 10; i++) {
+       size = dn_calc_size();
+       DUMMYNET_UNLOCK();
+       buf = malloc(size, M_TEMP, M_WAITOK);
+       DUMMYNET_LOCK();
+       if (size == dn_calc_size())
+               break;
+       free(buf, M_TEMP);
+       buf = NULL;
+    }
+    if (buf == NULL) {
+       DUMMYNET_UNLOCK();
+       return ENOBUFS ;
+    }
+    bp = buf;
+    for (i = 0; i < HASHSIZE; i++) 
+       SLIST_FOREACH(pipe, &pipehash[i], next) {
+               struct dn_pipe *pipe_bp = (struct dn_pipe *)bp;
+
+               /*
+                * Copy pipe descriptor into *bp, convert delay back to ms,
+                * then copy the flow_set descriptor(s) one at a time.
+                * After each flow_set, copy the queue descriptor it owns.
+                */
+               bcopy(pipe, bp, sizeof(*pipe));
+               pipe_bp->delay = (pipe_bp->delay * 1000) / hz;
+               /*
+                * XXX the following is a hack based on ->next being the
+                * first field in dn_pipe and dn_flow_set. The correct
+                * solution would be to move the dn_flow_set to the beginning
+                * of struct dn_pipe.
+                */
+               pipe_bp->next.sle_next = (struct dn_pipe *)DN_IS_PIPE;
+               /* Clean pointers. */
+               pipe_bp->head = pipe_bp->tail = NULL;
+               pipe_bp->fs.next.sle_next = NULL;
+               pipe_bp->fs.pipe = NULL;
+               pipe_bp->fs.rq = NULL;
+               pipe_bp->samples = NULL;
+
+               bp += sizeof(*pipe) ;
+               bp = dn_copy_set(&(pipe->fs), bp);
+       }
+
+    for (i = 0; i < HASHSIZE; i++) 
+       SLIST_FOREACH(fs, &flowsethash[i], next) {
+               struct dn_flow_set *fs_bp = (struct dn_flow_set *)bp;
+
+               bcopy(fs, bp, sizeof(*fs));
+               /* XXX same hack as above */
+               fs_bp->next.sle_next = (struct dn_flow_set *)DN_IS_QUEUE;
+               fs_bp->pipe = NULL;
+               fs_bp->rq = NULL;
+               bp += sizeof(*fs);
+               bp = dn_copy_set(fs, bp);
+       }
+
+    DUMMYNET_UNLOCK();
+
+    error = sooptcopyout(sopt, buf, size);
+    free(buf, M_TEMP);
+    return error ;
+}
+
+/*
+ * Handler for the various dummynet socket options (get, flush, config, del)
+ */
+static int
+ip_dn_ctl(struct sockopt *sopt)
+{
+    int error;
+    struct dn_pipe *p = NULL;
+
+    error = priv_check(sopt->sopt_td, PRIV_NETINET_DUMMYNET);
+    if (error)
+       return (error);
+
+    /* Disallow sets in really-really secure mode. */
+    if (sopt->sopt_dir == SOPT_SET) {
+#if __FreeBSD_version >= 500034
+       error =  securelevel_ge(sopt->sopt_td->td_ucred, 3);
+       if (error)
+           return (error);
+#else
+       if (securelevel >= 3)
+           return (EPERM);
+#endif
+    }
+
+    switch (sopt->sopt_name) {
+    default :
+       printf("dummynet: -- unknown option %d", sopt->sopt_name);
+        error = EINVAL ;
+       break ;
+
+    case IP_DUMMYNET_GET :
+       error = dummynet_get(sopt);
+       break ;
+
+    case IP_DUMMYNET_FLUSH :
+       dummynet_flush() ;
+       break ;
+
+    case IP_DUMMYNET_CONFIGURE :
+       p = malloc(sizeof(struct dn_pipe_max), M_TEMP, M_WAITOK);
+       error = sooptcopyin(sopt, p, sizeof(struct dn_pipe_max), sizeof *p);
+       if (error)
+           break ;
+       if (p->samples_no > 0)
+           p->samples = &( ((struct dn_pipe_max*) p)->samples[0] );
+
+       error = config_pipe(p);
+       break ;
+
+    case IP_DUMMYNET_DEL :     /* remove a pipe or queue */
+       p = malloc(sizeof(struct dn_pipe_max), M_TEMP, M_WAITOK);
+       error = sooptcopyin(sopt, p, sizeof *p, sizeof *p);
+       if (error)
+           break ;
+
+       error = delete_pipe(p);
+       break ;
+    }
+
+    if (p != NULL)
+       free(p, M_TEMP);
+
+    return error ;
+}
+
+static void
+ip_dn_init(void)
+{
+       int i;
+
+       if (bootverbose)
+               printf("DUMMYNET with IPv6 initialized (040826)\n");
+
+       DUMMYNET_LOCK_INIT();
+
+       for (i = 0; i < HASHSIZE; i++) {
+               SLIST_INIT(&pipehash[i]);
+               SLIST_INIT(&flowsethash[i]);
+       }
+       ready_heap.size = ready_heap.elements = 0;
+       ready_heap.offset = 0;
+
+       wfq_ready_heap.size = wfq_ready_heap.elements = 0;
+       wfq_ready_heap.offset = 0;
+
+       extract_heap.size = extract_heap.elements = 0;
+       extract_heap.offset = 0;
+
+       ip_dn_ctl_ptr = ip_dn_ctl;
+       ip_dn_io_ptr = dummynet_io;
+       ip_dn_ruledel_ptr = dn_rule_delete;
+
+       TASK_INIT(&dn_task, 0, dummynet_task, NULL);
+       dn_tq = taskqueue_create_fast("dummynet", M_NOWAIT,
+           taskqueue_thread_enqueue, &dn_tq);
+       taskqueue_start_threads(&dn_tq, 1, PI_NET, "dummynet");
+
+       callout_init(&dn_timeout, CALLOUT_MPSAFE);
+       callout_reset(&dn_timeout, 1, dummynet, NULL);
+       /* Initialize curr_time adjustment mechanics. */
+       getmicrouptime(&prev_t);
+}
+
+#ifdef KLD_MODULE
+static void
+ip_dn_destroy(void)
+{
+       ip_dn_ctl_ptr = NULL;
+       ip_dn_io_ptr = NULL;
+       ip_dn_ruledel_ptr = NULL;
+
+       DUMMYNET_LOCK();
+       callout_stop(&dn_timeout);
+       DUMMYNET_UNLOCK();
+       taskqueue_drain(dn_tq, &dn_task);
+       taskqueue_free(dn_tq);
+
+       dummynet_flush();
+
+       DUMMYNET_LOCK_DESTROY();
+}
+#endif /* KLD_MODULE */
+
+static int
+dummynet_modevent(module_t mod, int type, void *data)
+{
+
+       switch (type) {
+       case MOD_LOAD:
+               if (ip_dn_io_ptr) {
+                   printf("DUMMYNET already loaded\n");
+                   return EEXIST ;
+               }
+               ip_dn_init();
+               break;
+
+       case MOD_UNLOAD:
+#if !defined(KLD_MODULE)
+               printf("dummynet statically compiled, cannot unload\n");
+               return EINVAL ;
+#else
+               ip_dn_destroy();
+#endif
+               break ;
+       default:
+               return EOPNOTSUPP;
+               break ;
+       }
+       return 0 ;
+}
+
+static moduledata_t dummynet_mod = {
+       "dummynet",
+       dummynet_modevent,
+       NULL
+};
+DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
+MODULE_DEPEND(dummynet, ipfw, 2, 2, 2);
+MODULE_VERSION(dummynet, 1);
diff --git a/dummynet/ip_fw2.c b/dummynet/ip_fw2.c
new file mode 100644 (file)
index 0000000..bdcfe11
--- /dev/null
@@ -0,0 +1,4665 @@
+/*-
+ * Copyright (c) 2002 Luigi Rizzo, Universita` di Pisa
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/netinet/ip_fw2.c,v 1.175.2.13 2008/10/30 16:29:04 bz Exp $");
+
+#define        DEB(x)
+#define        DDB(x) x
+
+/*
+ * Implement IP packet firewall (new version)
+ */
+
+#if !defined(KLD_MODULE)
+#include "opt_ipfw.h"
+#include "opt_ipdivert.h"
+#include "opt_ipdn.h"
+#include "opt_inet.h"
+#ifndef INET
+#error IPFIREWALL requires INET.
+#endif /* INET */
+#endif
+#include "opt_inet6.h"
+#include "opt_ipsec.h"
+#include "opt_mac.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/jail.h>
+#include <sys/module.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sysctl.h>
+#include <sys/syslog.h>
+#include <sys/ucred.h>
+#include <net/ethernet.h> /* for ETHERTYPE_IP */
+#include <net/if.h>
+#include <net/radix.h>
+#include <net/route.h>
+#include <net/pf_mtag.h>
+
+#define        IPFW_INTERNAL   /* Access to protected data structures in ip_fw.h. */
+
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/in_pcb.h>
+#include <netinet/ip.h>
+#include <netinet/ip_var.h>
+#include <netinet/ip_icmp.h>
+#include <netinet/ip_fw.h>
+#include <netinet/ip_divert.h>
+#include <netinet/ip_dummynet.h>
+#include <netinet/ip_carp.h>
+#include <netinet/pim.h>
+#include <netinet/tcp_var.h>
+#include <netinet/udp.h>
+#include <netinet/udp_var.h>
+#include <netinet/sctp.h>
+#include <netgraph/ng_ipfw.h>
+
+#include <netinet/ip6.h>
+#include <netinet/icmp6.h>
+#ifdef INET6
+#include <netinet6/scope6_var.h>
+#endif
+
+#include <machine/in_cksum.h>  /* XXX for in_cksum */
+
+#ifdef MAC
+#include <security/mac/mac_framework.h>
+#endif
+
+#include "missing.h"
+
+/*
+ * set_disable contains one bit per set value (0..31).
+ * If the bit is set, all rules with the corresponding set
+ * are disabled. Set RESVD_SET(31) is reserved for the default rule
+ * and rules that are not deleted by the flush command,
+ * and CANNOT be disabled.
+ * Rules in set RESVD_SET can only be deleted explicitly.
+ */
+static u_int32_t set_disable;
+static int fw_verbose;
+static struct callout ipfw_timeout;
+static int verbose_limit;
+
+static uma_zone_t ipfw_dyn_rule_zone;
+
+/*
+ * Data structure to cache our ucred related
+ * information. This structure only gets used if
+ * the user specified UID/GID based constraints in
+ * a firewall rule.
+ */
+struct ip_fw_ugid {
+       gid_t           fw_groups[NGROUPS];
+       int             fw_ngroups;
+       uid_t           fw_uid;
+       int             fw_prid;
+};
+
+/*
+ * list of rules for layer 3
+ */
+struct ip_fw_chain layer3_chain;
+
+MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's");
+MALLOC_DEFINE(M_IPFW_TBL, "ipfw_tbl", "IpFw tables");
+#define IPFW_NAT_LOADED (ipfw_nat_ptr != NULL)
+ipfw_nat_t *ipfw_nat_ptr = NULL;
+ipfw_nat_cfg_t *ipfw_nat_cfg_ptr;
+ipfw_nat_cfg_t *ipfw_nat_del_ptr;
+ipfw_nat_cfg_t *ipfw_nat_get_cfg_ptr;
+ipfw_nat_cfg_t *ipfw_nat_get_log_ptr;
+
+struct table_entry {
+       struct radix_node       rn[2];
+       struct sockaddr_in      addr, mask;
+       u_int32_t               value;
+};
+
+static int autoinc_step = 100; /* bounded to 1..1000 in add_rule() */
+
+extern int ipfw_chg_hook(SYSCTL_HANDLER_ARGS);
+
+#ifdef SYSCTL_NODE
+SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall");
+SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable,
+    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE3, &fw_enable, 0,
+    ipfw_chg_hook, "I", "Enable ipfw");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, autoinc_step, CTLFLAG_RW,
+    &autoinc_step, 0, "Rule number autincrement step");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, one_pass,
+    CTLFLAG_RW | CTLFLAG_SECURE3,
+    &fw_one_pass, 0,
+    "Only do a single pass through ipfw when using dummynet(4)");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose,
+    CTLFLAG_RW | CTLFLAG_SECURE3,
+    &fw_verbose, 0, "Log matches to ipfw rules");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW,
+    &verbose_limit, 0, "Set upper limit of matches of ipfw rules logged");
+SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, default_rule, CTLFLAG_RD,
+    NULL, IPFW_DEFAULT_RULE, "The default/max possible rule number.");
+SYSCTL_UINT(_net_inet_ip_fw, OID_AUTO, tables_max, CTLFLAG_RD,
+    NULL, IPFW_TABLES_MAX, "The maximum number of tables.");
+#endif /* SYSCTL_NODE */
+
+/*
+ * Description of dynamic rules.
+ *
+ * Dynamic rules are stored in lists accessed through a hash table
+ * (ipfw_dyn_v) whose size is curr_dyn_buckets. This value can
+ * be modified through the sysctl variable dyn_buckets which is
+ * updated when the table becomes empty.
+ *
+ * XXX currently there is only one list, ipfw_dyn.
+ *
+ * When a packet is received, its address fields are first masked
+ * with the mask defined for the rule, then hashed, then matched
+ * against the entries in the corresponding list.
+ * Dynamic rules can be used for different purposes:
+ *  + stateful rules;
+ *  + enforcing limits on the number of sessions;
+ *  + in-kernel NAT (not implemented yet)
+ *
+ * The lifetime of dynamic rules is regulated by dyn_*_lifetime,
+ * measured in seconds and depending on the flags.
+ *
+ * The total number of dynamic rules is stored in dyn_count.
+ * The max number of dynamic rules is dyn_max. When we reach
+ * the maximum number of rules we do not create anymore. This is
+ * done to avoid consuming too much memory, but also too much
+ * time when searching on each packet (ideally, we should try instead
+ * to put a limit on the length of the list on each bucket...).
+ *
+ * Each dynamic rule holds a pointer to the parent ipfw rule so
+ * we know what action to perform. Dynamic rules are removed when
+ * the parent rule is deleted. XXX we should make them survive.
+ *
+ * There are some limitations with dynamic rules -- we do not
+ * obey the 'randomized match', and we do not do multiple
+ * passes through the firewall. XXX check the latter!!!
+ */
+static ipfw_dyn_rule **ipfw_dyn_v = NULL;
+static u_int32_t dyn_buckets = 256; /* must be power of 2 */
+static u_int32_t curr_dyn_buckets = 256; /* must be power of 2 */
+
+#if defined( __linux__ ) || defined( _WIN32 )
+DEFINE_SPINLOCK(ipfw_dyn_mtx);
+#else
+static struct mtx ipfw_dyn_mtx;                /* mutex guarding dynamic rules */
+#endif /* !__linux__ */
+#define        IPFW_DYN_LOCK_INIT() \
+       mtx_init(&ipfw_dyn_mtx, "IPFW dynamic rules", NULL, MTX_DEF)
+#define        IPFW_DYN_LOCK_DESTROY() mtx_destroy(&ipfw_dyn_mtx)
+#define        IPFW_DYN_LOCK()         mtx_lock(&ipfw_dyn_mtx)
+#define        IPFW_DYN_UNLOCK()       mtx_unlock(&ipfw_dyn_mtx)
+#define        IPFW_DYN_LOCK_ASSERT()  mtx_assert(&ipfw_dyn_mtx, MA_OWNED)
+
+/*
+ * Timeouts for various events in handing dynamic rules.
+ */
+static u_int32_t dyn_ack_lifetime = 300;
+static u_int32_t dyn_syn_lifetime = 20;
+static u_int32_t dyn_fin_lifetime = 1;
+static u_int32_t dyn_rst_lifetime = 1;
+static u_int32_t dyn_udp_lifetime = 10;
+static u_int32_t dyn_short_lifetime = 5;
+
+/*
+ * Keepalives are sent if dyn_keepalive is set. They are sent every
+ * dyn_keepalive_period seconds, in the last dyn_keepalive_interval
+ * seconds of lifetime of a rule.
+ * dyn_rst_lifetime and dyn_fin_lifetime should be strictly lower
+ * than dyn_keepalive_period.
+ */
+
+static u_int32_t dyn_keepalive_interval = 20;
+static u_int32_t dyn_keepalive_period = 5;
+static u_int32_t dyn_keepalive = 1;    /* do send keepalives */
+
+static u_int32_t static_count; /* # of static rules */
+static u_int32_t static_len;   /* size in bytes of static rules */
+static u_int32_t dyn_count;            /* # of dynamic rules */
+static u_int32_t dyn_max = 4096;       /* max # of dynamic rules */
+
+#ifdef SYSCTL_NODE
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLFLAG_RW,
+    &dyn_buckets, 0, "Number of dyn. buckets");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, curr_dyn_buckets, CTLFLAG_RD,
+    &curr_dyn_buckets, 0, "Current Number of dyn. buckets");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_count, CTLFLAG_RD,
+    &dyn_count, 0, "Number of dyn. rules");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_max, CTLFLAG_RW,
+    &dyn_max, 0, "Max number of dyn. rules");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, static_count, CTLFLAG_RD,
+    &static_count, 0, "Number of static rules");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_ack_lifetime, CTLFLAG_RW,
+    &dyn_ack_lifetime, 0, "Lifetime of dyn. rules for acks");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_syn_lifetime, CTLFLAG_RW,
+    &dyn_syn_lifetime, 0, "Lifetime of dyn. rules for syn");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_fin_lifetime, CTLFLAG_RW,
+    &dyn_fin_lifetime, 0, "Lifetime of dyn. rules for fin");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_rst_lifetime, CTLFLAG_RW,
+    &dyn_rst_lifetime, 0, "Lifetime of dyn. rules for rst");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_udp_lifetime, CTLFLAG_RW,
+    &dyn_udp_lifetime, 0, "Lifetime of dyn. rules for UDP");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_short_lifetime, CTLFLAG_RW,
+    &dyn_short_lifetime, 0, "Lifetime of dyn. rules for other situations");
+SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_keepalive, CTLFLAG_RW,
+    &dyn_keepalive, 0, "Enable keepalives for dyn. rules");
+#endif /* SYSCTL_NODE */
+
+#ifdef INET6
+/*
+ * IPv6 specific variables
+ */
+#ifdef SYSCTL_NODE
+SYSCTL_DECL(_net_inet6_ip6);
+#endif /* SYSCTL_NODE */
+
+static struct sysctl_ctx_list ip6_fw_sysctl_ctx;
+static struct sysctl_oid *ip6_fw_sysctl_tree;
+#endif /* INET6 */
+
+static int fw_deny_unknown_exthdrs = 1;
+
+
+/*
+ * L3HDR maps an ipv4 pointer into a layer3 header pointer of type T
+ * Other macros just cast void * into the appropriate type
+ */
+#define        L3HDR(T, ip)    ((T *)((u_int32_t *)(ip) + (ip)->ip_hl))
+#define        TCP(p)          ((struct tcphdr *)(p))
+#define        SCTP(p)         ((struct sctphdr *)(p))
+#define        UDP(p)          ((struct udphdr *)(p))
+#define        ICMP(p)         ((struct icmphdr *)(p))
+#define        ICMP6(p)        ((struct icmp6_hdr *)(p))
+
+static __inline int
+icmptype_match(struct icmphdr *icmp, ipfw_insn_u32 *cmd)
+{
+       int type = icmp->icmp_type;
+
+       return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<<type)) );
+}
+
+#define TT     ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \
+    (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) )
+
+static int
+is_icmp_query(struct icmphdr *icmp)
+{
+       int type = icmp->icmp_type;
+
+       return (type <= ICMP_MAXTYPE && (TT & (1<<type)) );
+}
+#undef TT
+
+/*
+ * The following checks use two arrays of 8 or 16 bits to store the
+ * bits that we want set or clear, respectively. They are in the
+ * low and high half of cmd->arg1 or cmd->d[0].
+ *
+ * We scan options and store the bits we find set. We succeed if
+ *
+ *     (want_set & ~bits) == 0 && (want_clear & ~bits) == want_clear
+ *
+ * The code is sometimes optimized not to store additional variables.
+ */
+
+static int
+flags_match(ipfw_insn *cmd, u_int8_t bits)
+{
+       u_char want_clear;
+       bits = ~bits;
+
+       if ( ((cmd->arg1 & 0xff) & bits) != 0)
+               return 0; /* some bits we want set were clear */
+       want_clear = (cmd->arg1 >> 8) & 0xff;
+       if ( (want_clear & bits) != want_clear)
+               return 0; /* some bits we want clear were set */
+       return 1;
+}
+
+static int
+ipopts_match(struct ip *ip, ipfw_insn *cmd)
+{
+       int optlen, bits = 0;
+       u_char *cp = (u_char *)(ip + 1);
+       int x = (ip->ip_hl << 2) - sizeof (struct ip);
+
+       for (; x > 0; x -= optlen, cp += optlen) {
+               int opt = cp[IPOPT_OPTVAL];
+
+               if (opt == IPOPT_EOL)
+                       break;
+               if (opt == IPOPT_NOP)
+                       optlen = 1;
+               else {
+                       optlen = cp[IPOPT_OLEN];
+                       if (optlen <= 0 || optlen > x)
+                               return 0; /* invalid or truncated */
+               }
+               switch (opt) {
+
+               default:
+                       break;
+
+               case IPOPT_LSRR:
+                       bits |= IP_FW_IPOPT_LSRR;
+                       break;
+
+               case IPOPT_SSRR:
+                       bits |= IP_FW_IPOPT_SSRR;
+                       break;
+
+               case IPOPT_RR:
+                       bits |= IP_FW_IPOPT_RR;
+                       break;
+
+               case IPOPT_TS:
+                       bits |= IP_FW_IPOPT_TS;
+                       break;
+               }
+       }
+       return (flags_match(cmd, bits));
+}
+
+static int
+tcpopts_match(struct tcphdr *tcp, ipfw_insn *cmd)
+{
+       int optlen, bits = 0;
+       u_char *cp = (u_char *)(tcp + 1);
+       int x = (tcp->th_off << 2) - sizeof(struct tcphdr);
+
+       for (; x > 0; x -= optlen, cp += optlen) {
+               int opt = cp[0];
+               if (opt == TCPOPT_EOL)
+                       break;
+               if (opt == TCPOPT_NOP)
+                       optlen = 1;
+               else {
+                       optlen = cp[1];
+                       if (optlen <= 0)
+                               break;
+               }
+
+               switch (opt) {
+
+               default:
+                       break;
+
+               case TCPOPT_MAXSEG:
+                       bits |= IP_FW_TCPOPT_MSS;
+                       break;
+
+               case TCPOPT_WINDOW:
+                       bits |= IP_FW_TCPOPT_WINDOW;
+                       break;
+
+               case TCPOPT_SACK_PERMITTED:
+               case TCPOPT_SACK:
+                       bits |= IP_FW_TCPOPT_SACK;
+                       break;
+
+               case TCPOPT_TIMESTAMP:
+                       bits |= IP_FW_TCPOPT_TS;
+                       break;
+
+               }
+       }
+       return (flags_match(cmd, bits));
+}
+
+static int
+iface_match(struct ifnet *ifp, ipfw_insn_if *cmd)
+{
+       if (ifp == NULL)        /* no iface with this packet, match fails */
+               return 0;
+       /* Check by name or by IP address */
+       if (cmd->name[0] != '\0') { /* match by name */
+               /* Check name */
+               if (cmd->p.glob) {
+                       if (fnmatch(cmd->name, ifp->if_xname, 0) == 0)
+                               return(1);
+               } else {
+                       if (strncmp(ifp->if_xname, cmd->name, IFNAMSIZ) == 0)
+                               return(1);
+               }
+       } else {
+#if !defined( __linux__ ) && !defined( _WIN32 )
+               struct ifaddr *ia;
+
+               /* XXX lock? */
+               TAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
+                       if (ia->ifa_addr->sa_family != AF_INET)
+                               continue;
+                       if (cmd->p.ip.s_addr == ((struct sockaddr_in *)
+                           (ia->ifa_addr))->sin_addr.s_addr)
+                               return(1);      /* match */
+               }
+#endif
+       }
+       return(0);      /* no match, fail ... */
+}
+
+#if !defined( __linux__ ) && !defined( _WIN32 )
+/*
+ * The verify_path function checks if a route to the src exists and
+ * if it is reachable via ifp (when provided).
+ * 
+ * The 'verrevpath' option checks that the interface that an IP packet
+ * arrives on is the same interface that traffic destined for the
+ * packet's source address would be routed out of.  The 'versrcreach'
+ * option just checks that the source address is reachable via any route
+ * (except default) in the routing table.  These two are a measure to block
+ * forged packets.  This is also commonly known as "anti-spoofing" or Unicast
+ * Reverse Path Forwarding (Unicast RFP) in Cisco-ese. The name of the knobs
+ * is purposely reminiscent of the Cisco IOS command,
+ *
+ *   ip verify unicast reverse-path
+ *   ip verify unicast source reachable-via any
+ *
+ * which implements the same functionality. But note that syntax is
+ * misleading. The check may be performed on all IP packets whether unicast,
+ * multicast, or broadcast.
+ */
+static int
+verify_path(struct in_addr src, struct ifnet *ifp, u_int fib)
+{
+       struct route ro;
+       struct sockaddr_in *dst;
+
+       bzero(&ro, sizeof(ro));
+
+       dst = (struct sockaddr_in *)&(ro.ro_dst);
+       dst->sin_family = AF_INET;
+       dst->sin_len = sizeof(*dst);
+       dst->sin_addr = src;
+       in_rtalloc_ign(&ro, RTF_CLONING, fib);
+
+       if (ro.ro_rt == NULL)
+               return 0;
+
+       /*
+        * If ifp is provided, check for equality with rtentry.
+        * We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp,
+        * in order to pass packets injected back by if_simloop():
+        * if useloopback == 1 routing entry (via lo0) for our own address
+        * may exist, so we need to handle routing assymetry.
+        */
+       if (ifp != NULL && ro.ro_rt->rt_ifa->ifa_ifp != ifp) {
+               RTFREE(ro.ro_rt);
+               return 0;
+       }
+
+       /* if no ifp provided, check if rtentry is not default route */
+       if (ifp == NULL &&
+            satosin(rt_key(ro.ro_rt))->sin_addr.s_addr == INADDR_ANY) {
+               RTFREE(ro.ro_rt);
+               return 0;
+       }
+
+       /* or if this is a blackhole/reject route */
+       if (ifp == NULL && ro.ro_rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
+               RTFREE(ro.ro_rt);
+               return 0;
+       }
+
+       /* found valid route */
+       RTFREE(ro.ro_rt);
+       return 1;
+}
+#endif
+
+#ifdef INET6
+/*
+ * ipv6 specific rules here...
+ */
+static __inline int
+icmp6type_match (int type, ipfw_insn_u32 *cmd)
+{
+       return (type <= ICMP6_MAXTYPE && (cmd->d[type/32] & (1<<(type%32)) ) );
+}
+
+static int
+flow6id_match( int curr_flow, ipfw_insn_u32 *cmd )
+{
+       int i;
+       for (i=0; i <= cmd->o.arg1; ++i )
+               if (curr_flow == cmd->d[i] )
+                       return 1;
+       return 0;
+}
+
+/* support for IP6_*_ME opcodes */
+static int
+search_ip6_addr_net (struct in6_addr * ip6_addr)
+{
+       struct ifnet *mdc;
+       struct ifaddr *mdc2;
+       struct in6_ifaddr *fdm;
+       struct in6_addr copia;
+
+       TAILQ_FOREACH(mdc, &ifnet, if_link)
+               TAILQ_FOREACH(mdc2, &mdc->if_addrlist, ifa_list) {
+                       if (mdc2->ifa_addr->sa_family == AF_INET6) {
+                               fdm = (struct in6_ifaddr *)mdc2;
+                               copia = fdm->ia_addr.sin6_addr;
+                               /* need for leaving scope_id in the sock_addr */
+                               in6_clearscope(&copia);
+                               if (IN6_ARE_ADDR_EQUAL(ip6_addr, &copia))
+                                       return 1;
+                       }
+               }
+       return 0;
+}
+
+static int
+verify_path6(struct in6_addr *src, struct ifnet *ifp)
+{
+       struct route_in6 ro;
+       struct sockaddr_in6 *dst;
+
+       bzero(&ro, sizeof(ro));
+
+       dst = (struct sockaddr_in6 * )&(ro.ro_dst);
+       dst->sin6_family = AF_INET6;
+       dst->sin6_len = sizeof(*dst);
+       dst->sin6_addr = *src;
+       /* XXX MRT 0 for ipv6 at this time */
+       rtalloc_ign((struct route *)&ro, RTF_CLONING);
+
+       if (ro.ro_rt == NULL)
+               return 0;
+
+       /* 
+        * if ifp is provided, check for equality with rtentry
+        * We should use rt->rt_ifa->ifa_ifp, instead of rt->rt_ifp,
+        * to support the case of sending packets to an address of our own.
+        * (where the former interface is the first argument of if_simloop()
+        *  (=ifp), the latter is lo0)
+        */
+       if (ifp != NULL && ro.ro_rt->rt_ifa->ifa_ifp != ifp) {
+               RTFREE(ro.ro_rt);
+               return 0;
+       }
+
+       /* if no ifp provided, check if rtentry is not default route */
+       if (ifp == NULL &&
+           IN6_IS_ADDR_UNSPECIFIED(&satosin6(rt_key(ro.ro_rt))->sin6_addr)) {
+               RTFREE(ro.ro_rt);
+               return 0;
+       }
+
+       /* or if this is a blackhole/reject route */
+       if (ifp == NULL && ro.ro_rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) {
+               RTFREE(ro.ro_rt);
+               return 0;
+       }
+
+       /* found valid route */
+       RTFREE(ro.ro_rt);
+       return 1;
+
+}
+static __inline int
+hash_packet6(struct ipfw_flow_id *id)
+{
+       u_int32_t i;
+       i = (id->dst_ip6.__u6_addr.__u6_addr32[2]) ^
+           (id->dst_ip6.__u6_addr.__u6_addr32[3]) ^
+           (id->src_ip6.__u6_addr.__u6_addr32[2]) ^
+           (id->src_ip6.__u6_addr.__u6_addr32[3]) ^
+           (id->dst_port) ^ (id->src_port);
+       return i;
+}
+
+static int
+is_icmp6_query(int icmp6_type)
+{
+       if ((icmp6_type <= ICMP6_MAXTYPE) &&
+           (icmp6_type == ICMP6_ECHO_REQUEST ||
+           icmp6_type == ICMP6_MEMBERSHIP_QUERY ||
+           icmp6_type == ICMP6_WRUREQUEST ||
+           icmp6_type == ICMP6_FQDN_QUERY ||
+           icmp6_type == ICMP6_NI_QUERY))
+               return (1);
+
+       return (0);
+}
+
+static void
+send_reject6(struct ip_fw_args *args, int code, u_int hlen, struct ip6_hdr *ip6)
+{
+       struct mbuf *m;
+
+       m = args->m;
+       if (code == ICMP6_UNREACH_RST && args->f_id.proto == IPPROTO_TCP) {
+               struct tcphdr *tcp;
+               tcp_seq ack, seq;
+               int flags;
+               struct {
+                       struct ip6_hdr ip6;
+                       struct tcphdr th;
+               } ti;
+               tcp = (struct tcphdr *)((char *)ip6 + hlen);
+
+               if ((tcp->th_flags & TH_RST) != 0) {
+                       m_freem(m);
+                       args->m = NULL;
+                       return;
+               }
+
+               ti.ip6 = *ip6;
+               ti.th = *tcp;
+               ti.th.th_seq = ntohl(ti.th.th_seq);
+               ti.th.th_ack = ntohl(ti.th.th_ack);
+               ti.ip6.ip6_nxt = IPPROTO_TCP;
+
+               if (ti.th.th_flags & TH_ACK) {
+                       ack = 0;
+                       seq = ti.th.th_ack;
+                       flags = TH_RST;
+               } else {
+                       ack = ti.th.th_seq;
+                       if ((m->m_flags & M_PKTHDR) != 0) {
+                               /*
+                                * total new data to ACK is:
+                                * total packet length,
+                                * minus the header length,
+                                * minus the tcp header length.
+                                */
+                               ack += m->m_pkthdr.len - hlen
+                                       - (ti.th.th_off << 2);
+                       } else if (ip6->ip6_plen) {
+                               ack += ntohs(ip6->ip6_plen) + sizeof(*ip6) -
+                                   hlen - (ti.th.th_off << 2);
+                       } else {
+                               m_freem(m);
+                               return;
+                       }
+                       if (tcp->th_flags & TH_SYN)
+                               ack++;
+                       seq = 0;
+                       flags = TH_RST|TH_ACK;
+               }
+               bcopy(&ti, ip6, sizeof(ti));
+               /*
+                * m is only used to recycle the mbuf
+                * The data in it is never read so we don't need
+                * to correct the offsets or anything
+                */
+               tcp_respond(NULL, ip6, tcp, m, ack, seq, flags);
+       } else if (code != ICMP6_UNREACH_RST) { /* Send an ICMPv6 unreach. */
+#if 0
+               /*
+                * Unlike above, the mbufs need to line up with the ip6 hdr,
+                * as the contents are read. We need to m_adj() the
+                * needed amount.
+                * The mbuf will however be thrown away so we can adjust it.
+                * Remember we did an m_pullup on it already so we
+                * can make some assumptions about contiguousness.
+                */
+               if (args->L3offset)
+                       m_adj(m, args->L3offset);
+#endif
+               icmp6_error(m, ICMP6_DST_UNREACH, code, 0);
+       } else
+               m_freem(m);
+
+       args->m = NULL;
+}
+
+#endif /* INET6 */
+
+static u_int64_t norule_counter;       /* counter for ipfw_log(NULL...) */
+
+#define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
+#define SNP(buf) buf, sizeof(buf)
+
+/*
+ * We enter here when we have a rule with O_LOG.
+ * XXX this function alone takes about 2Kbytes of code!
+ */
+static void
+ipfw_log(struct ip_fw *f, u_int hlen, struct ip_fw_args *args,
+    struct mbuf *m, struct ifnet *oif, u_short offset, uint32_t tablearg,
+    struct ip *ip)
+{
+       struct ether_header *eh = args->eh;
+       char *action;
+       int limit_reached = 0;
+       char action2[40], proto[128], fragment[32];
+
+       fragment[0] = '\0';
+       proto[0] = '\0';
+
+       if (f == NULL) {        /* bogus pkt */
+               if (verbose_limit != 0 && norule_counter >= verbose_limit)
+                       return;
+               norule_counter++;
+               if (norule_counter == verbose_limit)
+                       limit_reached = verbose_limit;
+               action = "Refuse";
+       } else {        /* O_LOG is the first action, find the real one */
+               ipfw_insn *cmd = ACTION_PTR(f);
+               ipfw_insn_log *l = (ipfw_insn_log *)cmd;
+
+               if (l->max_log != 0 && l->log_left == 0)
+                       return;
+               l->log_left--;
+               if (l->log_left == 0)
+                       limit_reached = l->max_log;
+               cmd += F_LEN(cmd);      /* point to first action */
+               if (cmd->opcode == O_ALTQ) {
+                       ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd;
+
+                       snprintf(SNPARGS(action2, 0), "Altq %d",
+                               altq->qid);
+                       cmd += F_LEN(cmd);
+               }
+               if (cmd->opcode == O_PROB)
+                       cmd += F_LEN(cmd);
+
+               if (cmd->opcode == O_TAG)
+                       cmd += F_LEN(cmd);
+
+               action = action2;
+               switch (cmd->opcode) {
+               case O_DENY:
+                       action = "Deny";
+                       break;
+
+               case O_REJECT:
+                       if (cmd->arg1==ICMP_REJECT_RST)
+                               action = "Reset";
+                       else if (cmd->arg1==ICMP_UNREACH_HOST)
+                               action = "Reject";
+                       else
+                               snprintf(SNPARGS(action2, 0), "Unreach %d",
+                                       cmd->arg1);
+                       break;
+
+               case O_UNREACH6:
+                       if (cmd->arg1==ICMP6_UNREACH_RST)
+                               action = "Reset";
+                       else
+                               snprintf(SNPARGS(action2, 0), "Unreach %d",
+                                       cmd->arg1);
+                       break;
+
+               case O_ACCEPT:
+                       action = "Accept";
+                       break;
+               case O_COUNT:
+                       action = "Count";
+                       break;
+               case O_DIVERT:
+                       snprintf(SNPARGS(action2, 0), "Divert %d",
+                               cmd->arg1);
+                       break;
+               case O_TEE:
+                       snprintf(SNPARGS(action2, 0), "Tee %d",
+                               cmd->arg1);
+                       break;
+               case O_SETFIB:
+                       snprintf(SNPARGS(action2, 0), "SetFib %d",
+                               cmd->arg1);
+                       break;
+               case O_SKIPTO:
+                       snprintf(SNPARGS(action2, 0), "SkipTo %d",
+                               cmd->arg1);
+                       break;
+               case O_PIPE:
+                       snprintf(SNPARGS(action2, 0), "Pipe %d",
+                               cmd->arg1);
+                       break;
+               case O_QUEUE:
+                       snprintf(SNPARGS(action2, 0), "Queue %d",
+                               cmd->arg1);
+                       break;
+               case O_FORWARD_IP: {
+                       ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
+                       int len;
+                       struct in_addr dummyaddr;
+                       if (sa->sa.sin_addr.s_addr == INADDR_ANY)
+                               dummyaddr.s_addr = htonl(tablearg);
+                       else
+                               dummyaddr.s_addr = sa->sa.sin_addr.s_addr;
+
+                       len = snprintf(SNPARGS(action2, 0), "Forward to %s",
+                               inet_ntoa(dummyaddr));
+
+                       if (sa->sa.sin_port)
+                               snprintf(SNPARGS(action2, len), ":%d",
+                                   sa->sa.sin_port);
+                       }
+                       break;
+               case O_NETGRAPH:
+                       snprintf(SNPARGS(action2, 0), "Netgraph %d",
+                               cmd->arg1);
+                       break;
+               case O_NGTEE:
+                       snprintf(SNPARGS(action2, 0), "Ngtee %d",
+                               cmd->arg1);
+                       break;
+               case O_NAT:
+                       action = "Nat";
+                       break;
+               default:
+                       action = "UNKNOWN";
+                       break;
+               }
+       }
+
+       if (hlen == 0) {        /* non-ip */
+               snprintf(SNPARGS(proto, 0), "MAC");
+
+       } else {
+               int len;
+               char src[48], dst[48];
+               struct icmphdr *icmp;
+               struct tcphdr *tcp;
+               struct udphdr *udp;
+#ifdef INET6
+               struct ip6_hdr *ip6 = NULL;
+               struct icmp6_hdr *icmp6;
+#endif
+               src[0] = '\0';
+               dst[0] = '\0';
+#ifdef INET6
+               if (IS_IP6_FLOW_ID(&(args->f_id))) {
+                       char ip6buf[INET6_ADDRSTRLEN];
+                       snprintf(src, sizeof(src), "[%s]",
+                           ip6_sprintf(ip6buf, &args->f_id.src_ip6));
+                       snprintf(dst, sizeof(dst), "[%s]",
+                           ip6_sprintf(ip6buf, &args->f_id.dst_ip6));
+
+                       ip6 = (struct ip6_hdr *)ip;
+                       tcp = (struct tcphdr *)(((char *)ip) + hlen);
+                       udp = (struct udphdr *)(((char *)ip) + hlen);
+               } else
+#endif
+               {
+                       tcp = L3HDR(struct tcphdr, ip);
+                       udp = L3HDR(struct udphdr, ip);
+
+                       inet_ntoa_r(ip->ip_src, src);
+                       inet_ntoa_r(ip->ip_dst, dst);
+               }
+
+               switch (args->f_id.proto) {
+               case IPPROTO_TCP:
+                       len = snprintf(SNPARGS(proto, 0), "TCP %s", src);
+                       if (offset == 0)
+                               snprintf(SNPARGS(proto, len), ":%d %s:%d",
+                                   ntohs(tcp->th_sport),
+                                   dst,
+                                   ntohs(tcp->th_dport));
+                       else
+                               snprintf(SNPARGS(proto, len), " %s", dst);
+                       break;
+
+               case IPPROTO_UDP:
+                       len = snprintf(SNPARGS(proto, 0), "UDP %s", src);
+                       if (offset == 0)
+                               snprintf(SNPARGS(proto, len), ":%d %s:%d",
+                                   ntohs(udp->uh_sport),
+                                   dst,
+                                   ntohs(udp->uh_dport));
+                       else
+                               snprintf(SNPARGS(proto, len), " %s", dst);
+                       break;
+
+               case IPPROTO_ICMP:
+                       icmp = L3HDR(struct icmphdr, ip);
+                       if (offset == 0)
+                               len = snprintf(SNPARGS(proto, 0),
+                                   "ICMP:%u.%u ",
+                                   icmp->icmp_type, icmp->icmp_code);
+                       else
+                               len = snprintf(SNPARGS(proto, 0), "ICMP ");
+                       len += snprintf(SNPARGS(proto, len), "%s", src);
+                       snprintf(SNPARGS(proto, len), " %s", dst);
+                       break;
+#ifdef INET6
+               case IPPROTO_ICMPV6:
+                       icmp6 = (struct icmp6_hdr *)(((char *)ip) + hlen);
+                       if (offset == 0)
+                               len = snprintf(SNPARGS(proto, 0),
+                                   "ICMPv6:%u.%u ",
+                                   icmp6->icmp6_type, icmp6->icmp6_code);
+                       else
+                               len = snprintf(SNPARGS(proto, 0), "ICMPv6 ");
+                       len += snprintf(SNPARGS(proto, len), "%s", src);
+                       snprintf(SNPARGS(proto, len), " %s", dst);
+                       break;
+#endif
+               default:
+                       len = snprintf(SNPARGS(proto, 0), "P:%d %s",
+                           args->f_id.proto, src);
+                       snprintf(SNPARGS(proto, len), " %s", dst);
+                       break;
+               }
+
+#ifdef INET6
+               if (IS_IP6_FLOW_ID(&(args->f_id))) {
+                       if (offset & (IP6F_OFF_MASK | IP6F_MORE_FRAG))
+                               snprintf(SNPARGS(fragment, 0),
+                                   " (frag %08x:%d@%d%s)",
+                                   args->f_id.frag_id6,
+                                   ntohs(ip6->ip6_plen) - hlen,
+                                   ntohs(offset & IP6F_OFF_MASK) << 3,
+                                   (offset & IP6F_MORE_FRAG) ? "+" : "");
+               } else
+#endif
+               {
+                       int ip_off, ip_len;
+                       if (1 || eh != NULL) { /* layer 2 packets are as on the wire */
+                               ip_off = ntohs(ip->ip_off);
+                               ip_len = ntohs(ip->ip_len);
+                       } else {
+                               ip_off = ip->ip_off;
+                               ip_len = ip->ip_len;
+                       }
+                       if (ip_off & (IP_MF | IP_OFFMASK))
+                               snprintf(SNPARGS(fragment, 0),
+                                   " (frag %d:%d@%d%s)",
+                                   ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2),
+                                   offset << 3,
+                                   (ip_off & IP_MF) ? "+" : "");
+               }
+       }
+       if (oif || m->m_pkthdr.rcvif)
+               log(LOG_SECURITY | LOG_INFO,
+                   "ipfw: %d %s %s %s via %s%s\n",
+                   f ? f->rulenum : -1,
+                   action, proto, oif ? "out" : "in",
+                   oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
+                   fragment);
+       else
+               log(LOG_SECURITY | LOG_INFO,
+                   "ipfw: %d %s %s [no if info]%s\n",
+                   f ? f->rulenum : -1,
+                   action, proto, fragment);
+       if (limit_reached)
+               log(LOG_SECURITY | LOG_NOTICE,
+                   "ipfw: limit %d reached on entry %d\n",
+                   limit_reached, f ? f->rulenum : -1);
+}
+
+/*
+ * IMPORTANT: the hash function for dynamic rules must be commutative
+ * in source and destination (ip,port), because rules are bidirectional
+ * and we want to find both in the same bucket.
+ */
+static __inline int
+hash_packet(struct ipfw_flow_id *id)
+{
+       u_int32_t i;
+
+#ifdef INET6
+       if (IS_IP6_FLOW_ID(id)) 
+               i = hash_packet6(id);
+       else
+#endif /* INET6 */
+       i = (id->dst_ip) ^ (id->src_ip) ^ (id->dst_port) ^ (id->src_port);
+       i &= (curr_dyn_buckets - 1);
+       return i;
+}
+
+/**
+ * unlink a dynamic rule from a chain. prev is a pointer to
+ * the previous one, q is a pointer to the rule to delete,
+ * head is a pointer to the head of the queue.
+ * Modifies q and potentially also head.
+ */
+#define UNLINK_DYN_RULE(prev, head, q) {                               \
+       ipfw_dyn_rule *old_q = q;                                       \
+                                                                       \
+       /* remove a refcount to the parent */                           \
+       if (q->dyn_type == O_LIMIT)                                     \
+               q->parent->count--;                                     \
+       DEB(printf("ipfw: unlink entry 0x%08x %d -> 0x%08x %d, %d left\n",\
+               (q->id.src_ip), (q->id.src_port),                       \
+               (q->id.dst_ip), (q->id.dst_port), dyn_count-1 ); )      \
+       if (prev != NULL)                                               \
+               prev->next = q = q->next;                               \
+       else                                                            \
+               head = q = q->next;                                     \
+       dyn_count--;                                                    \
+       uma_zfree(ipfw_dyn_rule_zone, old_q); }
+
+#define TIME_LEQ(a,b)       ((int)((a)-(b)) <= 0)
+
+/**
+ * Remove dynamic rules pointing to "rule", or all of them if rule == NULL.
+ *
+ * If keep_me == NULL, rules are deleted even if not expired,
+ * otherwise only expired rules are removed.
+ *
+ * The value of the second parameter is also used to point to identify
+ * a rule we absolutely do not want to remove (e.g. because we are
+ * holding a reference to it -- this is the case with O_LIMIT_PARENT
+ * rules). The pointer is only used for comparison, so any non-null
+ * value will do.
+ */
+static void
+remove_dyn_rule(struct ip_fw *rule, ipfw_dyn_rule *keep_me)
+{
+       static u_int32_t last_remove = 0;
+
+#define FORCE (keep_me == NULL)
+
+       ipfw_dyn_rule *prev, *q;
+       int i, pass = 0, max_pass = 0;
+
+       IPFW_DYN_LOCK_ASSERT();
+
+       if (ipfw_dyn_v == NULL || dyn_count == 0)
+               return;
+       /* do not expire more than once per second, it is useless */
+       if (!FORCE && last_remove == time_uptime)
+               return;
+       last_remove = time_uptime;
+
+       /*
+        * because O_LIMIT refer to parent rules, during the first pass only
+        * remove child and mark any pending LIMIT_PARENT, and remove
+        * them in a second pass.
+        */
+next_pass:
+       for (i = 0 ; i < curr_dyn_buckets ; i++) {
+               for (prev=NULL, q = ipfw_dyn_v[i] ; q ; ) {
+                       /*
+                        * Logic can become complex here, so we split tests.
+                        */
+                       if (q == keep_me)
+                               goto next;
+                       if (rule != NULL && rule != q->rule)
+                               goto next; /* not the one we are looking for */
+                       if (q->dyn_type == O_LIMIT_PARENT) {
+                               /*
+                                * handle parent in the second pass,
+                                * record we need one.
+                                */
+                               max_pass = 1;
+                               if (pass == 0)
+                                       goto next;
+                               if (FORCE && q->count != 0 ) {
+                                       /* XXX should not happen! */
+                                       printf("ipfw: OUCH! cannot remove rule,"
+                                            " count %d\n", q->count);
+                               }
+                       } else {
+                               if (!FORCE &&
+                                   !TIME_LEQ( q->expire, time_uptime ))
+                                       goto next;
+                       }
+             if (q->dyn_type != O_LIMIT_PARENT || !q->count) {
+                     UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
+                     continue;
+             }
+next:
+                       prev=q;
+                       q=q->next;
+               }
+       }
+       if (pass++ < max_pass)
+               goto next_pass;
+}
+
+
+/**
+ * lookup a dynamic rule.
+ */
+static ipfw_dyn_rule *
+lookup_dyn_rule_locked(struct ipfw_flow_id *pkt, int *match_direction,
+    struct tcphdr *tcp)
+{
+       /*
+        * stateful ipfw extensions.
+        * Lookup into dynamic session queue
+        */
+#define MATCH_REVERSE  0
+#define MATCH_FORWARD  1
+#define MATCH_NONE     2
+#define MATCH_UNKNOWN  3
+       int i, dir = MATCH_NONE;
+       ipfw_dyn_rule *prev, *q=NULL;
+
+       IPFW_DYN_LOCK_ASSERT();
+
+       if (ipfw_dyn_v == NULL)
+               goto done;      /* not found */
+       i = hash_packet( pkt );
+       for (prev=NULL, q = ipfw_dyn_v[i] ; q != NULL ; ) {
+               if (q->dyn_type == O_LIMIT_PARENT && q->count)
+                       goto next;
+               if (TIME_LEQ( q->expire, time_uptime)) { /* expire entry */
+                       UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q);
+                       continue;
+               }
+               if (pkt->proto == q->id.proto &&
+                   q->dyn_type != O_LIMIT_PARENT) {
+                       if (IS_IP6_FLOW_ID(pkt)) {
+                           if (IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
+                               &(q->id.src_ip6)) &&
+                           IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
+                               &(q->id.dst_ip6)) &&
+                           pkt->src_port == q->id.src_port &&
+                           pkt->dst_port == q->id.dst_port ) {
+                               dir = MATCH_FORWARD;
+                               break;
+                           }
+                           if (IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
+                                   &(q->id.dst_ip6)) &&
+                               IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
+                                   &(q->id.src_ip6)) &&
+                               pkt->src_port == q->id.dst_port &&
+                               pkt->dst_port == q->id.src_port ) {
+                                   dir = MATCH_REVERSE;
+                                   break;
+                           }
+                       } else {
+                           if (pkt->src_ip == q->id.src_ip &&
+                               pkt->dst_ip == q->id.dst_ip &&
+                               pkt->src_port == q->id.src_port &&
+                               pkt->dst_port == q->id.dst_port ) {
+                                   dir = MATCH_FORWARD;
+                                   break;
+                           }
+                           if (pkt->src_ip == q->id.dst_ip &&
+                               pkt->dst_ip == q->id.src_ip &&
+                               pkt->src_port == q->id.dst_port &&
+                               pkt->dst_port == q->id.src_port ) {
+                                   dir = MATCH_REVERSE;
+                                   break;
+                           }
+                       }
+               }
+next:
+               prev = q;
+               q = q->next;
+       }
+       if (q == NULL)
+               goto done; /* q = NULL, not found */
+
+       if ( prev != NULL) { /* found and not in front */
+               prev->next = q->next;
+               q->next = ipfw_dyn_v[i];
+               ipfw_dyn_v[i] = q;
+       }
+       if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */
+               u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST);
+
+#define BOTH_SYN       (TH_SYN | (TH_SYN << 8))
+#define BOTH_FIN       (TH_FIN | (TH_FIN << 8))
+               q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8);
+               switch (q->state) {
+               case TH_SYN:                            /* opening */
+                       q->expire = time_uptime + dyn_syn_lifetime;
+                       break;
+
+               case BOTH_SYN:                  /* move to established */
+               case BOTH_SYN | TH_FIN :        /* one side tries to close */
+               case BOTH_SYN | (TH_FIN << 8) :
+                       if (tcp) {
+#define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0)
+                           u_int32_t ack = ntohl(tcp->th_ack);
+                           if (dir == MATCH_FORWARD) {
+                               if (q->ack_fwd == 0 || _SEQ_GE(ack, q->ack_fwd))
+                                   q->ack_fwd = ack;
+                               else { /* ignore out-of-sequence */
+                                   break;
+                               }
+                           } else {
+                               if (q->ack_rev == 0 || _SEQ_GE(ack, q->ack_rev))
+                                   q->ack_rev = ack;
+                               else { /* ignore out-of-sequence */
+                                   break;
+                               }
+                           }
+                       }
+                       q->expire = time_uptime + dyn_ack_lifetime;
+                       break;
+
+               case BOTH_SYN | BOTH_FIN:       /* both sides closed */
+                       if (dyn_fin_lifetime >= dyn_keepalive_period)
+                               dyn_fin_lifetime = dyn_keepalive_period - 1;
+                       q->expire = time_uptime + dyn_fin_lifetime;
+                       break;
+
+               default:
+#if 0
+                       /*
+                        * reset or some invalid combination, but can also
+                        * occur if we use keep-state the wrong way.
+                        */
+                       if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0)
+                               printf("invalid state: 0x%x\n", q->state);
+#endif
+                       if (dyn_rst_lifetime >= dyn_keepalive_period)
+                               dyn_rst_lifetime = dyn_keepalive_period - 1;
+                       q->expire = time_uptime + dyn_rst_lifetime;
+                       break;
+               }
+       } else if (pkt->proto == IPPROTO_UDP) {
+               q->expire = time_uptime + dyn_udp_lifetime;
+       } else {
+               /* other protocols */
+               q->expire = time_uptime + dyn_short_lifetime;
+       }
+done:
+       if (match_direction)
+               *match_direction = dir;
+       return q;
+}
+
+static ipfw_dyn_rule *
+lookup_dyn_rule(struct ipfw_flow_id *pkt, int *match_direction,
+    struct tcphdr *tcp)
+{
+       ipfw_dyn_rule *q;
+
+       IPFW_DYN_LOCK();
+       q = lookup_dyn_rule_locked(pkt, match_direction, tcp);
+       if (q == NULL)
+               IPFW_DYN_UNLOCK();
+       /* NB: return table locked when q is not NULL */
+       return q;
+}
+
+static void
+realloc_dynamic_table(void)
+{
+       IPFW_DYN_LOCK_ASSERT();
+
+       /*
+        * Try reallocation, make sure we have a power of 2 and do
+        * not allow more than 64k entries. In case of overflow,
+        * default to 1024.
+        */
+
+       if (dyn_buckets > 65536)
+               dyn_buckets = 1024;
+       if ((dyn_buckets & (dyn_buckets-1)) != 0) { /* not a power of 2 */
+               dyn_buckets = curr_dyn_buckets; /* reset */
+               return;
+       }
+       curr_dyn_buckets = dyn_buckets;
+       if (ipfw_dyn_v != NULL)
+               free(ipfw_dyn_v, M_IPFW);
+       for (;;) {
+               ipfw_dyn_v = malloc(curr_dyn_buckets * sizeof(ipfw_dyn_rule *),
+                      M_IPFW, M_NOWAIT | M_ZERO);
+               if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2)
+                       break;
+               curr_dyn_buckets /= 2;
+       }
+}
+
+/**
+ * Install state of type 'type' for a dynamic session.
+ * The hash table contains two type of rules:
+ * - regular rules (O_KEEP_STATE)
+ * - rules for sessions with limited number of sess per user
+ *   (O_LIMIT). When they are created, the parent is
+ *   increased by 1, and decreased on delete. In this case,
+ *   the third parameter is the parent rule and not the chain.
+ * - "parent" rules for the above (O_LIMIT_PARENT).
+ */
+static ipfw_dyn_rule *
+add_dyn_rule(struct ipfw_flow_id *id, u_int8_t dyn_type, struct ip_fw *rule)
+{
+       ipfw_dyn_rule *r;
+       int i;
+
+       IPFW_DYN_LOCK_ASSERT();
+
+       if (ipfw_dyn_v == NULL ||
+           (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) {
+               realloc_dynamic_table();
+               if (ipfw_dyn_v == NULL)
+                       return NULL; /* failed ! */
+       }
+       i = hash_packet(id);
+
+       r = uma_zalloc(ipfw_dyn_rule_zone, M_NOWAIT | M_ZERO);
+       if (r == NULL) {
+               printf ("ipfw: sorry cannot allocate state\n");
+               return NULL;
+       }
+
+       /* increase refcount on parent, and set pointer */
+       if (dyn_type == O_LIMIT) {
+               ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule;
+               if ( parent->dyn_type != O_LIMIT_PARENT)
+                       panic("invalid parent");
+               parent->count++;
+               r->parent = parent;
+               rule = parent->rule;
+       }
+
+       r->id = *id;
+       r->expire = time_uptime + dyn_syn_lifetime;
+       r->rule = rule;
+       r->dyn_type = dyn_type;
+       r->pcnt = r->bcnt = 0;
+       r->count = 0;
+
+       r->bucket = i;
+       r->next = ipfw_dyn_v[i];
+       ipfw_dyn_v[i] = r;
+       dyn_count++;
+       DEB(printf("ipfw: add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n",
+          dyn_type,
+          (r->id.src_ip), (r->id.src_port),
+          (r->id.dst_ip), (r->id.dst_port),
+          dyn_count ); )
+       return r;
+}
+
+/**
+ * lookup dynamic parent rule using pkt and rule as search keys.
+ * If the lookup fails, then install one.
+ */
+static ipfw_dyn_rule *
+lookup_dyn_parent(struct ipfw_flow_id *pkt, struct ip_fw *rule)
+{
+       ipfw_dyn_rule *q;
+       int i;
+
+       IPFW_DYN_LOCK_ASSERT();
+
+       if (ipfw_dyn_v) {
+               int is_v6 = IS_IP6_FLOW_ID(pkt);
+               i = hash_packet( pkt );
+               for (q = ipfw_dyn_v[i] ; q != NULL ; q=q->next)
+                       if (q->dyn_type == O_LIMIT_PARENT &&
+                           rule== q->rule &&
+                           pkt->proto == q->id.proto &&
+                           pkt->src_port == q->id.src_port &&
+                           pkt->dst_port == q->id.dst_port &&
+                           (
+                               (is_v6 &&
+                                IN6_ARE_ADDR_EQUAL(&(pkt->src_ip6),
+                                       &(q->id.src_ip6)) &&
+                                IN6_ARE_ADDR_EQUAL(&(pkt->dst_ip6),
+                                       &(q->id.dst_ip6))) ||
+                               (!is_v6 &&
+                                pkt->src_ip == q->id.src_ip &&
+                                pkt->dst_ip == q->id.dst_ip)
+                           )
+                       ) {
+                               q->expire = time_uptime + dyn_short_lifetime;
+                               DEB(printf("ipfw: lookup_dyn_parent found 0x%p\n",q);)
+                               return q;
+                       }
+       }
+       return add_dyn_rule(pkt, O_LIMIT_PARENT, rule);
+}
+
+/**
+ * Install dynamic state for rule type cmd->o.opcode
+ *
+ * Returns 1 (failure) if state is not installed because of errors or because
+ * session limitations are enforced.
+ */
+static int
+install_state(struct ip_fw *rule, ipfw_insn_limit *cmd,
+    struct ip_fw_args *args, uint32_t tablearg)
+{
+       static int last_log;
+       ipfw_dyn_rule *q;
+       struct in_addr da;
+       char src[48], dst[48];
+
+       src[0] = '\0';
+       dst[0] = '\0';
+
+       DEB(
+       printf("ipfw: %s: type %d 0x%08x %u -> 0x%08x %u\n",
+           __func__, cmd->o.opcode,
+           (args->f_id.src_ip), (args->f_id.src_port),
+           (args->f_id.dst_ip), (args->f_id.dst_port));
+       )
+
+       IPFW_DYN_LOCK();
+
+       q = lookup_dyn_rule_locked(&args->f_id, NULL, NULL);
+
+       if (q != NULL) {        /* should never occur */
+               if (last_log != time_uptime) {
+                       last_log = time_uptime;
+                       printf("ipfw: %s: entry already present, done\n",
+                           __func__);
+               }
+               IPFW_DYN_UNLOCK();
+               return (0);
+       }
+
+       if (dyn_count >= dyn_max)
+               /* Run out of slots, try to remove any expired rule. */
+               remove_dyn_rule(NULL, (ipfw_dyn_rule *)1);
+
+       if (dyn_count >= dyn_max) {
+               if (last_log != time_uptime) {
+                       last_log = time_uptime;
+                       printf("ipfw: %s: Too many dynamic rules\n", __func__);
+               }
+               IPFW_DYN_UNLOCK();
+               return (1);     /* cannot install, notify caller */
+       }
+
+       switch (cmd->o.opcode) {
+       case O_KEEP_STATE:      /* bidir rule */
+               add_dyn_rule(&args->f_id, O_KEEP_STATE, rule);
+               break;
+
+       case O_LIMIT: {         /* limit number of sessions */
+               struct ipfw_flow_id id;
+               ipfw_dyn_rule *parent;
+               uint32_t conn_limit;
+               uint16_t limit_mask = cmd->limit_mask;
+
+               conn_limit = (cmd->conn_limit == IP_FW_TABLEARG) ?
+                   tablearg : cmd->conn_limit;
+                 
+               DEB(
+               if (cmd->conn_limit == IP_FW_TABLEARG)
+                       printf("ipfw: %s: O_LIMIT rule, conn_limit: %u "
+                           "(tablearg)\n", __func__, conn_limit);
+               else
+                       printf("ipfw: %s: O_LIMIT rule, conn_limit: %u\n",
+                           __func__, conn_limit);
+               )
+
+               id.dst_ip = id.src_ip = id.dst_port = id.src_port = 0;
+               id.proto = args->f_id.proto;
+               id.addr_type = args->f_id.addr_type;
+               id.fib = M_GETFIB(args->m);
+
+               if (IS_IP6_FLOW_ID (&(args->f_id))) {
+                       if (limit_mask & DYN_SRC_ADDR)
+                               id.src_ip6 = args->f_id.src_ip6;
+                       if (limit_mask & DYN_DST_ADDR)
+                               id.dst_ip6 = args->f_id.dst_ip6;
+               } else {
+                       if (limit_mask & DYN_SRC_ADDR)
+                               id.src_ip = args->f_id.src_ip;
+                       if (limit_mask & DYN_DST_ADDR)
+                               id.dst_ip = args->f_id.dst_ip;
+               }
+               if (limit_mask & DYN_SRC_PORT)
+                       id.src_port = args->f_id.src_port;
+               if (limit_mask & DYN_DST_PORT)
+                       id.dst_port = args->f_id.dst_port;
+               if ((parent = lookup_dyn_parent(&id, rule)) == NULL) {
+                       printf("ipfw: %s: add parent failed\n", __func__);
+                       IPFW_DYN_UNLOCK();
+                       return (1);
+               }
+
+               if (parent->count >= conn_limit) {
+                       /* See if we can remove some expired rule. */
+                       remove_dyn_rule(rule, parent);
+                       if (parent->count >= conn_limit) {
+                               if (fw_verbose && last_log != time_uptime) {
+                                       last_log = time_uptime;
+#ifdef INET6
+                                       /*
+                                        * XXX IPv6 flows are not
+                                        * supported yet.
+                                        */
+                                       if (IS_IP6_FLOW_ID(&(args->f_id))) {
+                                               char ip6buf[INET6_ADDRSTRLEN];
+                                               snprintf(src, sizeof(src),
+                                                   "[%s]", ip6_sprintf(ip6buf,
+                                                       &args->f_id.src_ip6));
+                                               snprintf(dst, sizeof(dst),
+                                                   "[%s]", ip6_sprintf(ip6buf,
+                                                       &args->f_id.dst_ip6));
+                                       } else
+#endif
+                                       {
+                                               da.s_addr =
+                                                   htonl(args->f_id.src_ip);
+                                               inet_ntoa_r(da, src);
+                                               da.s_addr =
+                                                   htonl(args->f_id.dst_ip);
+                                               inet_ntoa_r(da, dst);
+                                       }
+                                       log(LOG_SECURITY | LOG_DEBUG,
+                                           "ipfw: %d %s %s:%u -> %s:%u, %s\n",
+                                           parent->rule->rulenum,
+                                           "drop session",
+                  &nbs