Subject: lib/16518: some fixes to libpcap
To: None <gnats-bugs@gnats.netbsd.org>
From: None <yamt@mwd.biglobe.ne.jp>
List: netbsd-bugs
Date: 04/27/2002 20:56:57
>Number: 16518
>Category: lib
>Synopsis: some fixes to libpcap
>Confidential: no
>Severity: non-critical
>Priority: low
>Responsible: lib-bug-people
>State: open
>Class: sw-bug
>Submitter-Id: net
>Arrival-Date: Sat Apr 27 04:58:00 PDT 2002
>Closed-Date:
>Last-Modified:
>Originator: YAMAMOTO Takashi
>Release: NetBSD 1.5ZC
>Organization:
>Environment:
System: NetBSD bear.yamanet 1.5ZC NetBSD 1.5ZC (build) #13: Wed Apr 3 03:54:19 JST 2002 takashi@bear.yamanet:/usr/home/takashi/work/kernel/build i386
Architecture: i386
Machine: i386
>Description:
- make tcpdump "1>1" works correctly.
- make things unsigned as in-kernel filter does.
- fix a bug that prevents optimization. (and might cause bad codes)
eg. "1 & len == 1"
- fix wrong optimizations.
eg. "0 >> len == 1", "0 - len == 1"
>How-To-Repeat:
try above examples.
>Fix:
apply following patches.
I sent them to patches@tcpdump.org, too.
Index: gencode.c
===================================================================
RCS file: /cvs/NetBSD/basesrc/lib/libpcap/gencode.c,v
retrieving revision 1.29
diff -u -r1.29 gencode.c
--- gencode.c 2001/06/01 15:49:53 1.29
+++ gencode.c 2002/04/24 18:01:24
@@ -2635,16 +2635,17 @@
s0 = xfer_to_x(a1);
s1 = xfer_to_a(a0);
- s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X);
- b = new_block(JMP(code));
- if (code == BPF_JGT || code == BPF_JGE) {
- reversed = !reversed;
- b->s.k = 0x80000000;
+ if (code == BPF_JEQ) {
+ s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X);
+ b = new_block(JMP(code));
+ sappend(s1, s2);
}
+ else {
+ b = new_block(BPF_JMP|code|BPF_X);
+ }
if (reversed)
gen_not(b);
- sappend(s1, s2);
sappend(s0, s1);
sappend(a1->s, s0);
sappend(a0->s, a1->s);
Index: optimize.c
===================================================================
RCS file: /cvs/NetBSD/basesrc/lib/libpcap/optimize.c,v
retrieving revision 1.13
diff -u -r1.13 optimize.c
--- optimize.c 2001/01/06 02:11:18 1.13
+++ optimize.c 2002/04/24 21:55:22
@@ -585,7 +585,7 @@
struct stmt *s;
int v0, v1;
{
- bpf_int32 a, b;
+ bpf_u_int32 a, b;
a = vmap[v0].const_val;
b = vmap[v1].const_val;
@@ -669,7 +669,7 @@
return;
last = s;
- while (1) {
+ for (;; s = next) {
s = this_op(s);
if (s == 0)
break;
@@ -712,23 +712,23 @@
* any local dependencies.
*/
if (ATOMELEM(b->out_use, X_ATOM))
- break;
+ continue;
if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
add = next;
else
add = this_op(next->next);
if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
- break;
+ continue;
tax = this_op(add->next);
if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
- break;
+ continue;
ild = this_op(tax->next);
if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
BPF_MODE(ild->s.code) != BPF_IND)
- break;
+ continue;
/*
* XXX We need to check that X is not
* subsequently used. We know we can eliminate the
@@ -758,27 +758,21 @@
tax->s.code = NOP;
done = 0;
}
- s = next;
}
/*
* If we have a subtract to do a comparison, and the X register
* is a known constant, we can merge this value into the
* comparison.
*/
+ if (BPF_OP(b->s.code) == BPF_JEQ) {
+ /* XXX should indent correctly.
+ * just to make diff small :)
+ */
if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) &&
!ATOMELEM(b->out_use, A_ATOM)) {
val = b->val[X_ATOM];
if (vmap[val].is_const) {
- int op;
-
b->s.k += vmap[val].const_val;
- op = BPF_OP(b->s.code);
- if (op == BPF_JGT || op == BPF_JGE) {
- struct block *t = JT(b);
- JT(b) = JF(b);
- JF(b) = t;
- b->s.k += 0x80000000;
- }
last->s.code = NOP;
done = 0;
} else if (b->s.k == 0) {
@@ -797,19 +791,13 @@
*/
else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) &&
!ATOMELEM(b->out_use, A_ATOM)) {
- int op;
- b->s.k += last->s.k;
last->s.code = NOP;
- op = BPF_OP(b->s.code);
- if (op == BPF_JGT || op == BPF_JGE) {
- struct block *t = JT(b);
- JT(b) = JF(b);
- JF(b) = t;
- b->s.k += 0x80000000;
- }
+ b->s.k += last->s.k;
+
done = 0;
}
+ }
/*
* and #k nop
* jeq #0 -> jset #k
@@ -988,18 +976,17 @@
* that is 0, and simplify. This may not seem like
* much of a simplification but it could open up further
* optimizations.
- * XXX We could also check for mul by 1, and -1, etc.
+ * XXX We could also check for mul by 1, etc.
*/
if (alter && vmap[val[A_ATOM]].is_const
&& vmap[val[A_ATOM]].const_val == 0) {
- if (op == BPF_ADD || op == BPF_OR ||
- op == BPF_LSH || op == BPF_RSH || op == BPF_SUB) {
+ if (op == BPF_ADD || op == BPF_OR) {
s->code = BPF_MISC|BPF_TXA;
vstore(s, &val[A_ATOM], val[X_ATOM], alter);
break;
}
else if (op == BPF_MUL || op == BPF_DIV ||
- op == BPF_AND) {
+ op == BPF_AND || op == BPF_RSH || op == BPF_LSH) {
s->code = BPF_LD|BPF_IMM;
s->k = 0;
vstore(s, &val[A_ATOM], K(s->k), alter);
>Release-Note:
>Audit-Trail:
>Unformatted: