Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/usr.bin/vndcompress Clarify compile-time and run-time arithm...
details: https://anonhg.NetBSD.org/src/rev/fe287e6f8340
branches: trunk
changeset: 355468:fe287e6f8340
user: riastradh <riastradh%NetBSD.org@localhost>
date: Sat Jul 29 21:04:07 2017 +0000
description:
Clarify compile-time and run-time arithmetic safety assertions.
This is an experiment with a handful of macros for writing the
checks, most of which are compile-time:
MUL_OK(t, a, b) Does a*b avoid overflow in type t?
ADD_OK(t, a, b) Does a + b avoid overflow in type t?
TOOMANY(t, x, b, m) Are there more than m b-element blocks in x in type t?
(I.e., does ceiling(x/b) > m?)
Addenda that might make sense but are not needed here:
MUL(t, a, b, &p) Set p = a*b and return 0, or return ERANGE if overflow.
ADD(t, a, b, &s) Set s = a+b and return 0, or return ERANGE if overflow.
Example:
uint32_t a = ..., b = ..., y = ..., z = ..., x, w;
/* input validation */
error = MUL(size_t, a, b, &x);
if (error)
fail;
if (TOOMANY(uint32_t, x, BLKSIZ, MAX_NBLK))
fail;
y = HOWMANY(x, BLKSIZ);
if (z > Z_MAX)
fail;
...
/* internal computation */
__CTASSERT(MUL_OK(uint32_t, Z_MAX, MAX_NBLK));
w = z*y;
Obvious shortcomings:
1. Nothing checks your ctassert matches your subsequent arithmetic.
(Maybe we could have BOUNDED_MUL(t, x, xmax, y, ymax) with a
ctassert inside.)
2. Nothing flows the bounds needed by the arithmetic you use back
into candidate definitions of X_MAX/Y_MAX.
But at least the reviewer's job is only to make sure that (a) the
MUL_OK matches the *, and (b) the bounds in the assertion match the
bounds on the inputs -- in particular, the reviewer need not derive
the bounds from the context, only confirm they are supported by the
paths to it.
This is not meant to be a general-purpose proof assistant, or even a
special-purpose one like gfverif <http://gfverif.cryptojedi.org/>.
Rather, it is an experiment in adding a modicum of compile-time
verification with a simple C API change.
This also is not intended to serve as trapping arithmetic on
overflow. The goal here is to enable writing the program with
explicit checks on input and compile-time annotations on computation
to gain confident that overflow won't happen in the computation.
diffstat:
usr.bin/vndcompress/common.h | 12 +++++-
usr.bin/vndcompress/offtab.c | 44 ++++++++++++-----------
usr.bin/vndcompress/vndcompress.c | 70 ++++++++++++++++++++----------------
usr.bin/vndcompress/vnduncompress.c | 14 +++---
4 files changed, 79 insertions(+), 61 deletions(-)
diffs (truncated from 364 to 300 lines):
diff -r fac2fc96b30b -r fe287e6f8340 usr.bin/vndcompress/common.h
--- a/usr.bin/vndcompress/common.h Sat Jul 29 19:39:58 2017 +0000
+++ b/usr.bin/vndcompress/common.h Sat Jul 29 21:04:07 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: common.h,v 1.7 2017/04/16 23:43:57 riastradh Exp $ */
+/* $NetBSD: common.h,v 1.8 2017/07/29 21:04:07 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -83,6 +83,16 @@
#define ISSET(t, f) ((t) & (f))
/*
+ * Bounds checks for arithmetic.
+ */
+#define ADD_OK(T, A, B) ((A) <= __type_max(T) - (B))
+
+#define MUL_OK(T, A, B) ((A) <= __type_max(T)/(B))
+
+#define HOWMANY(X, N) (((X) + ((N) - 1))/(N))
+#define TOOMANY(T, X, N, M) (!ADD_OK(T, X, (N) - 1) || HOWMANY(X, N) > (M))
+
+/*
* We require:
*
* 0 < blocksize (duh)
diff -r fac2fc96b30b -r fe287e6f8340 usr.bin/vndcompress/offtab.c
--- a/usr.bin/vndcompress/offtab.c Sat Jul 29 19:39:58 2017 +0000
+++ b/usr.bin/vndcompress/offtab.c Sat Jul 29 21:04:07 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: offtab.c,v 1.14 2017/04/16 23:50:40 riastradh Exp $ */
+/* $NetBSD: offtab.c,v 1.15 2017/07/29 21:04:07 riastradh Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: offtab.c,v 1.14 2017/04/16 23:50:40 riastradh Exp $");
+__RCSID("$NetBSD: offtab.c,v 1.15 2017/07/29 21:04:07 riastradh Exp $");
#include <sys/types.h>
#include <sys/endian.h>
@@ -95,18 +95,18 @@
const uint32_t window_size = offtab_compute_window_size(offtab,
window_start);
- __CTASSERT(MAX_WINDOW_SIZE <= (SIZE_MAX / sizeof(uint64_t)));
+ __CTASSERT(MUL_OK(size_t, MAX_WINDOW_SIZE, sizeof(uint64_t)));
*bytes = (window_size * sizeof(uint64_t));
assert(window_start <= offtab->ot_n_offsets);
- __CTASSERT(MAX_N_OFFSETS <= (OFF_MAX / sizeof(uint64_t)));
+ __CTASSERT(MUL_OK(off_t, MAX_N_OFFSETS, sizeof(uint64_t)));
const off_t window_offset = ((off_t)window_start *
(off_t)sizeof(uint64_t));
assert(offtab->ot_fdpos <= OFFTAB_MAX_FDPOS);
- __CTASSERT(OFFTAB_MAX_FDPOS <=
- (OFF_MAX - (off_t)MAX_N_OFFSETS*sizeof(uint64_t)));
- assert(offtab->ot_fdpos <= (OFF_MAX - window_offset));
+ __CTASSERT(ADD_OK(off_t, OFFTAB_MAX_FDPOS,
+ (off_t)MAX_N_OFFSETS*sizeof(uint64_t)));
+ assert(ADD_OK(off_t, offtab->ot_fdpos, window_offset));
*pos = (offtab->ot_fdpos + window_offset);
}
@@ -220,7 +220,7 @@
offtab->ot_window_size = window_size;
assert(offtab->ot_window_size <= offtab->ot_n_offsets);
offtab->ot_window_start = (uint32_t)-1;
- __CTASSERT(MAX_WINDOW_SIZE <= (SIZE_MAX / sizeof(uint64_t)));
+ __CTASSERT(MUL_OK(size_t, MAX_WINDOW_SIZE, sizeof(uint64_t)));
offtab->ot_window = malloc(offtab->ot_window_size * sizeof(uint64_t));
if (offtab->ot_window == NULL)
err(1, "malloc offset table");
@@ -293,13 +293,13 @@
return false;
if (offtab->ot_window_size < offtab->ot_n_offsets) {
- __CTASSERT(MAX_N_OFFSETS <= (OFF_MAX / sizeof(uint64_t)));
+ __CTASSERT(MUL_OK(off_t, MAX_N_OFFSETS, sizeof(uint64_t)));
const off_t offtab_bytes = ((off_t)offtab->ot_n_offsets *
(off_t)sizeof(uint64_t));
assert(offtab->ot_fdpos <= OFFTAB_MAX_FDPOS);
- __CTASSERT(OFFTAB_MAX_FDPOS <=
- (OFF_MAX - (off_t)MAX_N_OFFSETS*sizeof(uint64_t)));
- assert(offtab->ot_fdpos <= (OFF_MAX - offtab_bytes));
+ __CTASSERT(ADD_OK(off_t, OFFTAB_MAX_FDPOS,
+ (off_t)MAX_N_OFFSETS*sizeof(uint64_t)));
+ assert(ADD_OK(off_t, offtab->ot_fdpos, offtab_bytes));
const off_t first_offset = (offtab->ot_fdpos + offtab_bytes);
if (lseek(offtab->ot_fd, first_offset, SEEK_SET) == -1) {
(*offtab->ot_report)("lseek to first offset 0x%"PRIx64,
@@ -387,21 +387,21 @@
}
/* Compute the number of bytes in the offset table. */
- __CTASSERT(MAX_N_OFFSETS <= OFF_MAX/sizeof(uint64_t));
+ __CTASSERT(MUL_OK(off_t, MAX_N_OFFSETS, sizeof(uint64_t)));
const off_t offtab_bytes = ((off_t)offtab->ot_n_offsets *
sizeof(uint64_t));
/* Compute the offset of the first block. */
assert(offtab->ot_fdpos <= OFFTAB_MAX_FDPOS);
- __CTASSERT(OFFTAB_MAX_FDPOS <=
- (OFF_MAX - (off_t)MAX_N_OFFSETS*sizeof(uint64_t)));
- assert(offtab->ot_fdpos <= (OFF_MAX - offtab_bytes));
+ __CTASSERT(ADD_OK(off_t, OFFTAB_MAX_FDPOS,
+ MAX_N_OFFSETS*sizeof(uint64_t)));
+ assert(ADD_OK(off_t, offtab->ot_fdpos, offtab_bytes));
const off_t first_offset = (offtab->ot_fdpos + offtab_bytes);
/* Assert that it fits in 64 bits. */
- __CTASSERT(MAX_N_OFFSETS <= UINT64_MAX/sizeof(uint64_t));
- __CTASSERT(OFFTAB_MAX_FDPOS <=
- (UINT64_MAX - (uint64_t)MAX_N_OFFSETS*sizeof(uint64_t)));
+ __CTASSERT(MUL_OK(uint64_t, MAX_N_OFFSETS, sizeof(uint64_t)));
+ __CTASSERT(ADD_OK(uint64_t, OFFTAB_MAX_FDPOS,
+ (uint64_t)MAX_N_OFFSETS*sizeof(uint64_t)));
/* Write out the first window with the first offset. */
offtab->ot_window_start = 0;
@@ -439,10 +439,12 @@
offtab_maybe_write_window(offtab, 0, n_offsets);
if (ISSET(flags, OFFTAB_CHECKPOINT_SYNC)) {
- __CTASSERT(MAX_N_OFFSETS <= (OFF_MAX / sizeof(uint64_t)));
+ __CTASSERT(MUL_OK(off_t, MAX_N_OFFSETS, sizeof(uint64_t)));
const off_t sync_bytes = ((off_t)n_offsets *
(off_t)sizeof(uint64_t));
- assert(offtab->ot_fdpos <= (OFF_MAX - sync_bytes));
+ __CTASSERT(ADD_OK(off_t, OFFTAB_MAX_FDPOS,
+ MAX_N_OFFSETS*sizeof(uint64_t)));
+ assert(ADD_OK(off_t, offtab->ot_fdpos, sync_bytes));
if (fsync_range(offtab->ot_fd, (FFILESYNC | FDISKSYNC),
offtab->ot_fdpos, (offtab->ot_fdpos + sync_bytes))
== -1)
diff -r fac2fc96b30b -r fe287e6f8340 usr.bin/vndcompress/vndcompress.c
--- a/usr.bin/vndcompress/vndcompress.c Sat Jul 29 19:39:58 2017 +0000
+++ b/usr.bin/vndcompress/vndcompress.c Sat Jul 29 21:04:07 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vndcompress.c,v 1.28 2017/04/17 00:02:45 riastradh Exp $ */
+/* $NetBSD: vndcompress.c,v 1.29 2017/07/29 21:04:07 riastradh Exp $ */
/*-
* Copyright (c) 2013 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: vndcompress.c,v 1.28 2017/04/17 00:02:45 riastradh Exp $");
+__RCSID("$NetBSD: vndcompress.c,v 1.29 2017/07/29 21:04:07 riastradh Exp $");
#include <sys/endian.h>
#include <sys/stat.h>
@@ -148,7 +148,7 @@
err(1, "malloc uncompressed buffer");
/* XXX compression ratio bound */
- __CTASSERT(MAX_BLOCKSIZE <= (SIZE_MAX / 2));
+ __CTASSERT(MUL_OK(size_t, 2, MAX_BLOCKSIZE));
void *const compbuf = malloc(2 * (size_t)S->blocksize);
if (compbuf == NULL)
err(1, "malloc compressed buffer");
@@ -178,10 +178,11 @@
/* Fail noisily if we might be about to overflow. */
/* XXX compression ratio bound */
- __CTASSERT(MAX_BLOCKSIZE <= (UINTMAX_MAX / 2));
+ __CTASSERT(MUL_OK(uint64_t, 2, MAX_BLOCKSIZE));
+ __CTASSERT(MUL_OK(off_t, 2, MAX_BLOCKSIZE));
assert(S->offset <= MIN(UINT64_MAX, OFF_MAX));
- if ((2 * (uintmax_t)readsize) >
- (MIN(UINT64_MAX, OFF_MAX) - S->offset))
+ if (!ADD_OK(uint64_t, S->offset, 2*(uintmax_t)readsize) ||
+ !ADD_OK(off_t, S->offset, 2*(uintmax_t)readsize))
errx(1, "blkno %"PRIu32" may overflow: %ju + 2*%ju",
S->blkno, (uintmax_t)S->offset,
(uintmax_t)readsize);
@@ -197,8 +198,9 @@
* (b) how far we are now in the output file, and
* (c) where the last block ended.
*/
- assert(S->blkno <= (UINT32_MAX - 1));
- assert(complen <= (MIN(UINT64_MAX, OFF_MAX) - S->offset));
+ assert(ADD_OK(uint32_t, S->blkno, 1));
+ assert(ADD_OK(uint64_t, S->offset, complen));
+ assert(ADD_OK(off_t, (off_t)S->offset, (off_t)complen));
assert((S->blkno + 1) < S->n_offsets);
{
sigset_t old_sigmask;
@@ -231,7 +233,8 @@
(size_t)n_written, n_padding);
/* Account for the extra bytes in the output file. */
- assert(n_padding <= (MIN(UINT64_MAX, OFF_MAX) - S->offset));
+ assert(ADD_OK(uint64_t, S->offset, n_padding));
+ assert(ADD_OK(off_t, (off_t)S->offset, (off_t)n_padding));
{
sigset_t old_sigmask;
block_signals(&old_sigmask);
@@ -304,14 +307,13 @@
/* Carefully calculate our I/O position. */
assert(S->blocksize > 0);
- __CTASSERT(MAX_N_BLOCKS <= (UINT64_MAX / MAX_BLOCKSIZE));
+ __CTASSERT(MUL_OK(uint64_t, MAX_N_BLOCKS, MAX_BLOCKSIZE));
const uint64_t nread = ((uint64_t)S->blkno * (uint64_t)S->blocksize);
assert(S->n_blocks > 0);
- __CTASSERT(CLOOP2_OFFSET_TABLE_OFFSET <=
- (UINT64_MAX / sizeof(uint64_t)));
- __CTASSERT(MAX_N_BLOCKS <= ((UINT64_MAX / sizeof(uint64_t)) -
- CLOOP2_OFFSET_TABLE_OFFSET));
+ __CTASSERT(MUL_OK(uint64_t, MAX_N_BLOCKS, sizeof(uint64_t)));
+ __CTASSERT(ADD_OK(uint64_t, CLOOP2_OFFSET_TABLE_OFFSET,
+ MAX_N_BLOCKS*sizeof(uint64_t)));
const uint64_t nwritten = (S->offset <= (CLOOP2_OFFSET_TABLE_OFFSET +
((uint64_t)S->n_blocks * sizeof(uint64_t)))?
0 : S->offset);
@@ -324,7 +326,9 @@
: 0);
/* Format the status. */
- assert(S->n_checkpointed_blocks <= (UINT64_MAX / S->blocksize));
+ assert(S->n_checkpointed_blocks <= MAX_N_BLOCKS);
+ assert(S->blocksize <= MAX_BLOCKSIZE);
+ __CTASSERT(MUL_OK(uint64_t, MAX_N_BLOCKS, MAX_BLOCKSIZE));
const int n = snprintf_ss(buf, sizeof(buf),
"vndcompress: read %"PRIu64" bytes, wrote %"PRIu64" bytes, "
"compression ratio %"PRIu64"%% (checkpointed %"PRIu64" bytes)\n",
@@ -360,8 +364,9 @@
assert(S->cloop2_fd >= 0);
/* Take a checkpoint. */
- assert(S->blocksize > 0);
- assert(S->blkno <= (UINT64_MAX / S->blocksize));
+ assert(S->blkno <= MAX_N_BLOCKS);
+ assert(S->blocksize <= MAX_BLOCKSIZE);
+ __CTASSERT(MUL_OK(uint64_t, MAX_N_BLOCKS, MAX_BLOCKSIZE));
warnx_ss("checkpointing %"PRIu64" bytes",
((uint64_t)S->blkno * (uint64_t)S->blocksize));
compress_checkpoint(S);
@@ -465,15 +470,16 @@
assert(S->size <= OFF_MAX);
/* Find number of full blocks and whether there's a partial block. */
- S->n_full_blocks = (S->size / S->blocksize);
- assert(S->n_full_blocks <=
- (UINT32_MAX - ((S->size % S->blocksize) > 0)));
- S->n_blocks = (S->n_full_blocks + ((S->size % S->blocksize) > 0));
- assert(S->n_full_blocks <= S->n_blocks);
-
- if (S->n_blocks > MAX_N_BLOCKS)
+ __CTASSERT(0 < MIN_BLOCKSIZE);
+ assert(0 < S->blocksize);
+ if (TOOMANY(off_t, (off_t)S->size, (off_t)S->blocksize,
+ (off_t)MAX_N_BLOCKS))
errx(1, "image too large for block size %"PRIu32": %"PRIu64,
S->blocksize, S->size);
+ __CTASSERT(MAX_N_BLOCKS <= UINT32_MAX);
+ S->n_full_blocks = S->size/S->blocksize;
+ S->n_blocks = HOWMANY(S->size, S->blocksize);
+ assert(S->n_full_blocks <= S->n_blocks);
assert(S->n_blocks <= MAX_N_BLOCKS);
/* Choose a window size. */
@@ -481,10 +487,10 @@
DEF_WINDOW_SIZE);
/* Create an offset table for the blocks; one extra for the end. */
- __CTASSERT(MAX_N_BLOCKS <= (UINT32_MAX - 1));
+ __CTASSERT(ADD_OK(uint32_t, MAX_N_BLOCKS, 1));
S->n_offsets = (S->n_blocks + 1);
__CTASSERT(MAX_N_OFFSETS == (MAX_N_BLOCKS + 1));
- __CTASSERT(MAX_N_OFFSETS <= (SIZE_MAX / sizeof(uint64_t)));
+ __CTASSERT(MUL_OK(size_t, MAX_N_OFFSETS, sizeof(uint64_t)));
__CTASSERT(CLOOP2_OFFSET_TABLE_OFFSET <= OFFTAB_MAX_FDPOS);
offtab_init(&S->offtab, S->n_offsets, window_size, S->cloop2_fd,
CLOOP2_OFFSET_TABLE_OFFSET);
@@ -607,10 +613,10 @@
if (!offtab_prepare_get(&S->offtab, 0))
return false;
const uint64_t first_offset = offtab_get(&S->offtab, 0);
- __CTASSERT(MAX_N_OFFSETS <= UINT64_MAX/sizeof(uint64_t));
- __CTASSERT(sizeof(struct cloop2_header) <=
- (UINT64_MAX - MAX_N_OFFSETS*sizeof(uint64_t)));
- const uint64_t expected = sizeof(struct cloop2_header) +
+ __CTASSERT(MUL_OK(uint64_t, MAX_N_OFFSETS, sizeof(uint64_t)));
+ __CTASSERT(ADD_OK(uint64_t, sizeof(struct cloop2_header),
+ MAX_N_OFFSETS*sizeof(uint64_t)));
+ const uint64_t expected = sizeof(struct cloop2_header) +
((uint64_t)S->n_offsets * sizeof(uint64_t));
if (first_offset != expected) {
warnx("first offset is not 0x%"PRIx64": 0x%"PRIx64,
@@ -638,7 +644,7 @@
return false;
}
Home |
Main Index |
Thread Index |
Old Index