Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/net Don't trigger BJ_ASSERT(false) on invalid BPF_Jxxx o...
details: https://anonhg.NetBSD.org/src/rev/755324108188
branches: trunk
changeset: 346716:755324108188
user: alnsn <alnsn%NetBSD.org@localhost>
date: Fri Jul 29 20:29:38 2016 +0000
description:
Don't trigger BJ_ASSERT(false) on invalid BPF_Jxxx opcode in jmp_to_op().
This change helps survive AFL fuzzing without calling bpf_validate() first.
Also change alu_to_op() function to have a similar interface.
diffstat:
sys/net/bpfjit.c | 92 ++++++++++++++++++++++++++++++++-----------------------
1 files changed, 53 insertions(+), 39 deletions(-)
diffs (174 lines):
diff -r 904abd9c0b0d -r 755324108188 sys/net/bpfjit.c
--- a/sys/net/bpfjit.c Fri Jul 29 20:27:37 2016 +0000
+++ b/sys/net/bpfjit.c Fri Jul 29 20:29:38 2016 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $ */
+/* $NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $ */
/*-
* Copyright (c) 2011-2015 Alexander Nasonov.
@@ -31,9 +31,9 @@
#include <sys/cdefs.h>
#ifdef _KERNEL
-__KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $");
#else
-__RCSID("$NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $");
+__RCSID("$NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $");
#endif
#include <sys/types.h>
@@ -1594,10 +1594,9 @@
/*
* Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation.
*/
-static int
-bpf_alu_to_sljit_op(const struct bpf_insn *pc)
+static bool
+alu_to_op(const struct bpf_insn *pc, int *res)
{
- const int bad = SLJIT_UNUSED;
const uint32_t k = pc->k;
/*
@@ -1605,49 +1604,64 @@
* instruction so SLJIT_I32_OP doesn't have any overhead.
*/
switch (BPF_OP(pc->code)) {
- case BPF_ADD: return SLJIT_ADD;
- case BPF_SUB: return SLJIT_SUB;
- case BPF_MUL: return SLJIT_MUL|SLJIT_I32_OP;
- case BPF_OR: return SLJIT_OR;
- case BPF_XOR: return SLJIT_XOR;
- case BPF_AND: return SLJIT_AND;
- case BPF_LSH: return (k > 31) ? bad : SLJIT_SHL;
- case BPF_RSH: return (k > 31) ? bad : SLJIT_LSHR|SLJIT_I32_OP;
+ case BPF_ADD:
+ *res = SLJIT_ADD;
+ return true;
+ case BPF_SUB:
+ *res = SLJIT_SUB;
+ return true;
+ case BPF_MUL:
+ *res = SLJIT_MUL|SLJIT_I32_OP;
+ return true;
+ case BPF_OR:
+ *res = SLJIT_OR;
+ return true;
+ case BPF_XOR:
+ *res = SLJIT_XOR;
+ return true;
+ case BPF_AND:
+ *res = SLJIT_AND;
+ return true;
+ case BPF_LSH:
+ *res = SLJIT_SHL;
+ return k < 32;
+ case BPF_RSH:
+ *res = SLJIT_LSHR|SLJIT_I32_OP;
+ return k < 32;
default:
- return bad;
+ return false;
}
}
/*
* Convert BPF_JMP operations except BPF_JA to sljit condition.
*/
-static int
-bpf_jmp_to_sljit_cond(const struct bpf_insn *pc, bool negate)
+static bool
+jmp_to_cond(const struct bpf_insn *pc, bool negate, int *res)
{
+
/*
* Note: all supported 64bit arches have 32bit comparison
* instructions so SLJIT_I32_OP doesn't have any overhead.
*/
- int rv = SLJIT_I32_OP;
+ *res = SLJIT_I32_OP;
switch (BPF_OP(pc->code)) {
case BPF_JGT:
- rv |= negate ? SLJIT_LESS_EQUAL : SLJIT_GREATER;
- break;
+ *res |= negate ? SLJIT_LESS_EQUAL : SLJIT_GREATER;
+ return true;
case BPF_JGE:
- rv |= negate ? SLJIT_LESS : SLJIT_GREATER_EQUAL;
- break;
+ *res |= negate ? SLJIT_LESS : SLJIT_GREATER_EQUAL;
+ return true;
case BPF_JEQ:
- rv |= negate ? SLJIT_NOT_EQUAL : SLJIT_EQUAL;
- break;
+ *res |= negate ? SLJIT_NOT_EQUAL : SLJIT_EQUAL;
+ return true;
case BPF_JSET:
- rv |= negate ? SLJIT_EQUAL : SLJIT_NOT_EQUAL;
- break;
+ *res |= negate ? SLJIT_EQUAL : SLJIT_NOT_EQUAL;
+ return true;
default:
- BJ_ASSERT(false);
+ return false;
}
-
- return rv;
}
/*
@@ -1695,9 +1709,9 @@
struct sljit_jump *to_mchain_jump;
size_t i;
- int status;
+ unsigned int rval, mode, src, op;
int branching, negate;
- unsigned int rval, mode, src, op;
+ int status, cond, op2;
uint32_t jt, jf;
bool unconditional_ret;
@@ -1935,10 +1949,9 @@
op = BPF_OP(pc->code);
if (op != BPF_DIV && op != BPF_MOD) {
- const int op2 = bpf_alu_to_sljit_op(pc);
+ if (!alu_to_op(pc, &op2))
+ goto fail;
- if (op2 == SLJIT_UNUSED)
- goto fail;
status = sljit_emit_op2(compiler,
op2, BJ_AREG, 0, BJ_AREG, 0,
kx_to_reg(pc), kx_to_reg_arg(pc));
@@ -2005,9 +2018,10 @@
if (branching) {
if (op != BPF_JSET) {
+ if (!jmp_to_cond(pc, negate, &cond))
+ goto fail;
jump = sljit_emit_cmp(compiler,
- bpf_jmp_to_sljit_cond(pc, negate),
- BJ_AREG, 0,
+ cond, BJ_AREG, 0,
kx_to_reg(pc), kx_to_reg_arg(pc));
} else {
status = sljit_emit_op2(compiler,
@@ -2018,10 +2032,10 @@
if (status != SLJIT_SUCCESS)
goto fail;
+ if (!jmp_to_cond(pc, negate, &cond))
+ goto fail;
jump = sljit_emit_cmp(compiler,
- bpf_jmp_to_sljit_cond(pc, negate),
- BJ_TMP1REG, 0,
- SLJIT_IMM, 0);
+ cond, BJ_TMP1REG, 0, SLJIT_IMM, 0);
}
if (jump == NULL)
Home |
Main Index |
Thread Index |
Old Index