tech-kern archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: [patch] fix some /dev/u?random MP issues
Slightly updated patch. This removes the code in fstat(1) to
kmem-grovel /dev/u?random contexts, makes the context structure opaque
by moving it into rndpseudo.c, and reduces the diff to subr_cprng.c.
commit c063ab93511759e6940e56e02a72b3518b1c1561
Author: Taylor R Campbell <riastradh%NetBSD.org@localhost>
Date: Sun Jun 23 15:39:36 2013 +0000
Fix races in /dev/u?random initialization and accounting.
- Push /dev/random `information-theoretic' accounting into cprng(9).
- Use percpu(9) for the per-CPU CPRNGs.
- Use atomics with correct memory barriers for lazy CPRNG creation.
- Remove /dev/random file kmem grovelling from fstat(1).
diff --git a/sys/dev/rndpseudo.c b/sys/dev/rndpseudo.c
index 3bbb522..dd61fae 100644
--- a/sys/dev/rndpseudo.c
+++ b/sys/dev/rndpseudo.c
@@ -1,11 +1,12 @@
/* $NetBSD: rndpseudo.c,v 1.12 2013/06/13 00:55:01 tls Exp $ */
/*-
- * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
+ * Copyright (c) 1997-2013 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
- * by Michael Graff <explorer%flame.org@localhost> and Thor Lancelot Simon.
+ * by Michael Graff <explorer%flame.org@localhost>, Thor Lancelot Simon, and
+ * Taylor R. Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,6 +57,7 @@ __KERNEL_RCSID(0, "$NetBSD: rndpseudo.c,v 1.12 2013/06/13
00:55:01 tls Exp $");
#include <sys/cprng.h>
#include <sys/cpu.h>
#include <sys/stat.h>
+#include <sys/percpu.h>
#include <sys/rnd.h>
#ifdef COMPAT_50
@@ -88,18 +90,26 @@ extern int rnd_debug;
#endif
/*
- * The size of a temporary buffer, kmem_alloc()ed when needed, and used for
- * reading and writing data.
+ * The size of a temporary buffer for reading and writing entropy.
*/
#define RND_TEMP_BUFFER_SIZE 512
-static pool_cache_t rp_pc;
-static pool_cache_t rp_cpc;
+static pool_cache_t rnd_temp_buffer_cache;
+
+/*
+ * Per-open state -- a lazily initialized CPRNG.
+ */
+struct rnd_ctx {
+ struct cprng_strong *rc_cprng;
+ bool rc_hard;
+};
+
+static pool_cache_t rnd_ctx_cache;
/*
* The per-CPU RNGs used for short requests
*/
-cprng_strong_t **rp_cpurngs;
+static percpu_t *percpu_urandom_cprng;
/*
* Our random pool. This is defined here rather than using the general
@@ -164,190 +174,223 @@ rndpseudo_counter(void)
}
/*
- * "Attach" the random device. This is an (almost) empty stub, since
- * pseudo-devices don't get attached until after config, after the
- * entropy sources will attach. We just use the timing of this event
- * as another potential source of initial entropy.
+ * `Attach' the random device. We use the timing of this event as
+ * another potential source of initial entropy.
*/
void
rndattach(int num)
{
- u_int32_t c;
+ uint32_t c;
- /* Trap unwary players who don't call rnd_init() early */
+ /* Trap unwary players who don't call rnd_init() early. */
KASSERT(rnd_ready);
- rp_pc = pool_cache_init(RND_TEMP_BUFFER_SIZE, 0, 0, 0,
- "rndtemp", NULL, IPL_NONE,
- NULL, NULL, NULL);
- rp_cpc = pool_cache_init(sizeof(rp_ctx_t), 0, 0, 0,
- "rndctx", NULL, IPL_NONE,
- NULL, NULL, NULL);
+ rnd_temp_buffer_cache = pool_cache_init(RND_TEMP_BUFFER_SIZE, 0, 0, 0,
+ "rndtemp", NULL, IPL_NONE, NULL, NULL, NULL);
+ rnd_ctx_cache = pool_cache_init(sizeof(struct rnd_ctx), 0, 0, 0,
+ "rndctx", NULL, IPL_NONE, NULL, NULL, NULL);
+ percpu_urandom_cprng = percpu_alloc(sizeof(struct cprng_strong *));
- /* mix in another counter */
+ /* Mix in another counter. */
c = rndpseudo_counter();
mutex_spin_enter(&rndpool_mtx);
- rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
+ rndpool_add_data(&rnd_pool, &c, sizeof(c), 1);
mutex_spin_exit(&rndpool_mtx);
-
- rp_cpurngs = kmem_zalloc(maxcpus * sizeof(cprng_strong_t *),
- KM_SLEEP);
}
int
-rndopen(dev_t dev, int flag, int ifmt,
- struct lwp *l)
+rndopen(dev_t dev, int flags, int fmt, struct lwp *l)
{
- rp_ctx_t *ctx;
- file_t *fp;
- int fd, hard, error = 0;
+ bool hard;
+ struct file *fp;
+ int fd;
+ int error;
switch (minor(dev)) {
- case RND_DEV_URANDOM:
- hard = 0;
+ case RND_DEV_URANDOM:
+ hard = false;
break;
- case RND_DEV_RANDOM:
- hard = 1;
+
+ case RND_DEV_RANDOM:
+ hard = true;
break;
- default:
+
+ default:
return ENXIO;
}
- ctx = pool_cache_get(rp_cpc, PR_WAITOK);
- if ((error = fd_allocfile(&fp, &fd)) != 0) {
- pool_cache_put(rp_cpc, ctx);
- return error;
- }
- ctx->cprng = NULL;
- ctx->hard = hard;
- ctx->bytesonkey = 0;
- mutex_init(&ctx->interlock, MUTEX_DEFAULT, IPL_NONE);
- return fd_clone(fp, fd, flag, &rnd_fileops, ctx);
+ error = fd_allocfile(&fp, &fd);
+ if (error)
+ return error;
+
+ /*
+ * Allocate a context, but don't create a CPRNG yet -- do that
+ * lazily because it consumes entropy from the system entropy
+ * pool, which (currently) has the effect of depleting it and
+ * causing readers from /dev/random to block. If this is
+ * /dev/urandom and the process is about to send only short
+ * reads to it, then we will be using a per-CPU CPRNG anyway.
+ */
+ struct rnd_ctx *const ctx = pool_cache_get(rnd_ctx_cache, PR_WAITOK);
+ ctx->rc_cprng = NULL;
+ ctx->rc_hard = hard;
+
+ error = fd_clone(fp, fd, flags, &rnd_fileops, ctx);
+ KASSERT(error == EMOVEFD);
+
+ return error;
}
-static void
-rnd_alloc_cprng(rp_ctx_t *ctx)
+/*
+ * Fetch a /dev/u?random context's CPRNG, or create and save one if
+ * necessary.
+ */
+static struct cprng_strong *
+rnd_ctx_cprng(struct rnd_ctx *ctx)
{
- char personalization_buf[64];
- struct lwp *l = curlwp;
- int cflags = ctx->hard ? CPRNG_USE_CV :
- CPRNG_INIT_ANY|CPRNG_REKEY_ANY;
-
- mutex_enter(&ctx->interlock);
- if (__predict_true(ctx->cprng == NULL)) {
- snprintf(personalization_buf,
- sizeof(personalization_buf),
- "%d%llud%d", l->l_proc->p_pid,
- (unsigned long long int)l->l_ncsw, l->l_cpticks);
- ctx->cprng = cprng_strong_create(personalization_buf,
- IPL_NONE, cflags);
+ struct cprng_strong *cprng, *tmp = NULL;
+
+ /* Fast path: if someone has already allocated a CPRNG, use it. */
+ cprng = ctx->rc_cprng;
+ if (__predict_true(cprng != NULL)) {
+ /* Make sure the CPU hasn't prefetched cprng's guts. */
+ membar_consumer();
+ goto out;
}
- membar_sync();
- mutex_exit(&ctx->interlock);
+
+ /* Slow path: create a CPRNG. Allocate before taking locks. */
+ char name[64];
+ struct lwp *const l = curlwp;
+ (void)snprintf(name, sizeof(name), "%d %"PRIu64" %u",
+ (int)l->l_proc->p_pid, l->l_ncsw, l->l_cpticks);
+ const int flags = (ctx->rc_hard? (CPRNG_USE_CV | CPRNG_HARD) :
+ (CPRNG_INIT_ANY | CPRNG_REKEY_ANY));
+ tmp = cprng_strong_create(name, IPL_NONE, flags);
+
+ /* Publish cprng's guts before the pointer to them. */
+ membar_producer();
+
+ /* Attempt to publish tmp, unless someone beat us. */
+ cprng = atomic_cas_ptr(&ctx->rc_cprng, NULL, tmp);
+ if (__predict_false(cprng != NULL)) {
+ /* Make sure the CPU hasn't prefetched cprng's guts. */
+ membar_consumer();
+ goto out;
+ }
+
+ /* Published. Commit tmp. */
+ cprng = tmp;
+ tmp = NULL;
+
+out: if (tmp != NULL)
+ cprng_strong_destroy(tmp);
+ KASSERT(cprng != NULL);
+ return cprng;
+}
+
+/*
+ * Fetch a per-CPU CPRNG, or create and save one if necessary.
+ */
+static struct cprng_strong *
+rnd_percpu_cprng(void)
+{
+ struct cprng_strong **cprngp, *cprng, *tmp;
+
+ /* Fast path: if there already is a CPRNG for this CPU, use it. */
+ cprngp = percpu_getref(percpu_urandom_cprng);
+ cprng = *cprngp;
+ if (__predict_true(cprng != NULL))
+ goto out;
+ percpu_putref(percpu_urandom_cprng);
+
+ /*
+ * Slow path: create a CPRNG named by this CPU.
+ *
+ * XXX The CPU of the name may be different from the CPU to
+ * which it is assigned, because we need to choose a name and
+ * allocate a cprng while preemption is enabled. This could be
+ * fixed by changing the cprng_strong API (e.g., by adding a
+ * cprng_strong_setname or by separating allocation from
+ * initialization), but it's not clear that's worth the
+ * trouble.
+ */
+ char name[32];
+ (void)snprintf(name, sizeof(name), "urandom%u", cpu_index(curcpu()));
+ tmp = cprng_strong_create(name, IPL_NONE,
+ (CPRNG_INIT_ANY | CPRNG_REKEY_ANY));
+
+ /* Try again, but we may have been preempted and lost a race. */
+ cprngp = percpu_getref(percpu_urandom_cprng);
+ cprng = *cprngp;
+ if (__predict_false(cprng != NULL))
+ goto out;
+
+ /* Commit the CPRNG we just created. */
+ cprng = tmp;
+ tmp = NULL;
+ *cprngp = cprng;
+
+out: percpu_putref(percpu_urandom_cprng);
+ if (tmp != NULL)
+ cprng_strong_destroy(tmp);
+ KASSERT(cprng != NULL);
+ return cprng;
}
static int
-rnd_read(struct file * fp, off_t *offp, struct uio *uio,
- kauth_cred_t cred, int flags)
+rnd_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred,
+ int flags)
{
- rp_ctx_t *ctx = fp->f_data;
- cprng_strong_t *cprng;
- u_int8_t *bf;
- int strength, ret;
- struct cpu_info *ci = curcpu();
+ int error;
DPRINTF(RND_DEBUG_READ,
- ("Random: Read of %zu requested, flags 0x%08x\n",
- uio->uio_resid, flags));
+ ("Random: Read of %zu requested, flags 0x%08x\n",
+ uio->uio_resid, flags));
if (uio->uio_resid == 0)
- return (0);
+ return 0;
- if (ctx->hard || uio->uio_resid > NIST_BLOCK_KEYLEN_BYTES) {
- if (ctx->cprng == NULL) {
- rnd_alloc_cprng(ctx);
- }
- cprng = ctx->cprng;
- } else {
- int index = cpu_index(ci);
-
- if (__predict_false(rp_cpurngs[index] == NULL)) {
- char rngname[32];
-
- snprintf(rngname, sizeof(rngname),
- "%s-short", cpu_name(ci));
- rp_cpurngs[index] =
- cprng_strong_create(rngname, IPL_NONE,
- CPRNG_INIT_ANY |
- CPRNG_REKEY_ANY);
- }
- cprng = rp_cpurngs[index];
- }
+ struct rnd_ctx *const ctx = fp->f_data;
+ uint8_t *const buf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK);
- if (__predict_false(cprng == NULL)) {
- printf("NULL rng!\n");
- return EIO;
- }
+ /*
+ * Choose a CPRNG to use -- either the per-open CPRNG, if this
+ * is /dev/random or a long read, or the per-CPU one otherwise.
+ *
+ * XXX NIST_BLOCK_KEYLEN_BYTES is a detail of the cprng(9)
+ * implementation and as such should not be mentioned here.
+ */
+ struct cprng_strong *const cprng =
+ ((ctx->rc_hard || (uio->uio_resid > NIST_BLOCK_KEYLEN_BYTES))?
+ rnd_ctx_cprng(ctx) : rnd_percpu_cprng());
- strength = cprng_strong_strength(cprng);
- ret = 0;
- bf = pool_cache_get(rp_pc, PR_WAITOK);
+ /*
+ * Generate the data in RND_TEMP_BUFFER_SIZE chunks.
+ */
while (uio->uio_resid > 0) {
- int n, nread, want;
-
- want = MIN(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
-
- /* XXX is this _really_ what's wanted? */
- if (ctx->hard) {
-#ifdef RND_VERBOSE
- printf("rnd: hard, want = %d, strength = %d, "
- "bytesonkey = %d\n", (int)want, (int)strength,
- (int)ctx->bytesonkey);
-#endif
- n = MIN(want, strength - ctx->bytesonkey);
- if (n < 1) {
-#ifdef RND_VERBOSE
- printf("rnd: BAD BAD BAD: n = %d, want = %d, "
- "strength = %d, bytesonkey = %d\n", n,
- (int)want, (int)strength,
- (int)ctx->bytesonkey);
-#endif
- }
- } else {
- n = want;
- }
+ const size_t n_req = MIN(uio->uio_resid, RND_TEMP_BUFFER_SIZE);
- nread = cprng_strong(cprng, bf, n,
- (fp->f_flag & FNONBLOCK) ? FNONBLOCK : 0);
+ CTASSERT(RND_TEMP_BUFFER_SIZE <= CPRNG_MAX_LEN);
+ const size_t n_read = cprng_strong(cprng, buf, n_req,
+ ((ctx->rc_hard && ISSET(fp->f_flag, FNONBLOCK))?
+ FNONBLOCK : 0));
- if (ctx->hard && nread > 0) {
- if (atomic_add_int_nv(&ctx->bytesonkey, nread) >=
- strength) {
- cprng_strong_deplete(cprng);
- ctx->bytesonkey = 0;
- membar_producer();
- }
-#ifdef RND_VERBOSE
- printf("rnd: new bytesonkey %d\n", ctx->bytesonkey);
-#endif
- }
- if (nread < 1) {
- if (fp->f_flag & FNONBLOCK) {
- ret = EWOULDBLOCK;
- } else {
- ret = EINTR;
- }
- goto out;
- }
+ /*
+ * Equality will hold unless this is /dev/random, in
+ * which case we get only as many bytes as are left
+ * from the CPRNG's `information-theoretic strength'
+ * since the last rekey.
+ */
+ KASSERT(n_read <= n_req);
+ KASSERT(ctx->rc_hard || (n_read == n_req));
- ret = uiomove((void *)bf, nread, uio);
- if (ret != 0 || n < want) {
+ error = uiomove(buf, n_read, uio);
+ if (error)
goto out;
- }
}
-out:
- pool_cache_put(rp_pc, bf);
- return (ret);
+
+out: pool_cache_put(rnd_temp_buffer_cache, buf);
+ return error;
}
static int
@@ -371,7 +414,7 @@ rnd_write(struct file *fp, off_t *offp, struct uio *uio,
if (uio->uio_resid == 0)
return (0);
ret = 0;
- bf = pool_cache_get(rp_pc, PR_WAITOK);
+ bf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK);
while (uio->uio_resid > 0) {
/*
* Don't flood the pool.
@@ -418,7 +461,7 @@ rnd_write(struct file *fp, off_t *offp, struct uio *uio,
added += n;
DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
}
- pool_cache_put(rp_pc, bf);
+ pool_cache_put(rnd_temp_buffer_cache, bf);
return (ret);
}
@@ -658,8 +701,8 @@ rnd_ioctl(struct file *fp, u_long cmd, void *addr)
static int
rnd_poll(struct file *fp, int events)
{
+ struct rnd_ctx *const ctx = fp->f_data;
int revents;
- rp_ctx_t *ctx = fp->f_data;
/*
* We are always writable.
@@ -670,34 +713,29 @@ rnd_poll(struct file *fp, int events)
* Save some work if not checking for reads.
*/
if ((events & (POLLIN | POLLRDNORM)) == 0)
- return (revents);
+ return revents;
- if (ctx->cprng == NULL) {
- rnd_alloc_cprng(ctx);
- if (__predict_false(ctx->cprng == NULL)) {
- return EIO;
- }
- }
-
- if (ctx->hard) {
- revents |= cprng_strong_poll(ctx->cprng, events);
- } else {
+ /*
+ * For /dev/random, ask the CPRNG, which may require creating
+ * one. For /dev/urandom, we're always readable.
+ */
+ if (ctx->rc_hard)
+ revents |= cprng_strong_poll(rnd_ctx_cprng(ctx), events);
+ else
revents |= (events & (POLLIN | POLLRDNORM));
- }
- return (revents);
+ return revents;
}
static int
rnd_stat(struct file *fp, struct stat *st)
{
- rp_ctx_t *ctx = fp->f_data;
+ struct rnd_ctx *const ctx = fp->f_data;
/* XXX lock, if cprng allocated? why? */
memset(st, 0, sizeof(*st));
st->st_dev = makedev(cdevsw_lookup_major(&rnd_cdevsw),
- ctx->hard ? RND_DEV_RANDOM :
- RND_DEV_URANDOM);
+ (ctx->rc_hard? RND_DEV_RANDOM : RND_DEV_URANDOM));
/* XXX leave atimespect, mtimespec, ctimespec = 0? */
st->st_uid = kauth_cred_geteuid(fp->f_cred);
@@ -709,14 +747,12 @@ rnd_stat(struct file *fp, struct stat *st)
static int
rnd_close(struct file *fp)
{
- rp_ctx_t *ctx = fp->f_data;
+ struct rnd_ctx *const ctx = fp->f_data;
- if (ctx->cprng) {
- cprng_strong_destroy(ctx->cprng);
- }
+ if (ctx->rc_cprng != NULL)
+ cprng_strong_destroy(ctx->rc_cprng);
fp->f_data = NULL;
- mutex_destroy(&ctx->interlock);
- pool_cache_put(rp_cpc, ctx);
+ pool_cache_put(rnd_ctx_cache, ctx);
return 0;
}
@@ -724,14 +760,7 @@ rnd_close(struct file *fp)
static int
rnd_kqfilter(struct file *fp, struct knote *kn)
{
- rp_ctx_t *ctx = fp->f_data;
-
- if (ctx->cprng == NULL) {
- rnd_alloc_cprng(ctx);
- if (__predict_false(ctx->cprng == NULL)) {
- return EIO;
- }
- }
+ struct rnd_ctx *const ctx = fp->f_data;
- return cprng_strong_kqfilter(ctx->cprng, kn);
+ return cprng_strong_kqfilter(rnd_ctx_cprng(ctx), kn);
}
diff --git a/sys/kern/subr_cprng.c b/sys/kern/subr_cprng.c
index 77f6aa9..f38ab01 100644
--- a/sys/kern/subr_cprng.c
+++ b/sys/kern/subr_cprng.c
@@ -98,6 +98,9 @@ struct cprng_strong {
struct rndsink *cs_rndsink;
bool cs_ready;
NIST_CTR_DRBG cs_drbg;
+
+ /* XXX Kludge for /dev/random `information-theoretic' properties. */
+ unsigned int cs_remaining;
};
struct cprng_strong *
@@ -133,6 +136,11 @@ cprng_strong_create(const char *name, int ipl, int flags)
cprng->cs_name);
explicit_bzero(seed, sizeof(seed));
+ if (ISSET(flags, CPRNG_HARD))
+ cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
+ else
+ cprng->cs_remaining = 0;
+
if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
printf("cprng %s: creating with partial entropy\n",
cprng->cs_name);
@@ -192,6 +200,27 @@ cprng_strong(struct cprng_strong *cprng, void *buffer,
size_t bytes, int flags)
}
}
+ /*
+ * Debit the entropy if requested.
+ *
+ * XXX Kludge for /dev/random `information-theoretic' properties.
+ */
+ if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
+ KASSERT(0 < cprng->cs_remaining);
+ KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
+ if (bytes < cprng->cs_remaining) {
+ cprng->cs_remaining -= bytes;
+ } else {
+ bytes = cprng->cs_remaining;
+ cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
+ cprng->cs_ready = false;
+ rndsink_schedule(cprng->cs_rndsink);
+ }
+ KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES);
+ KASSERT(0 < cprng->cs_remaining);
+ KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
+ }
+
cprng_strong_generate(cprng, buffer, bytes);
result = bytes;
@@ -279,19 +308,6 @@ cprng_strong_poll(struct cprng_strong *cprng, int events)
}
/*
- * XXX Kludge for the current /dev/random implementation.
- */
-void
-cprng_strong_deplete(struct cprng_strong *cprng)
-{
-
- mutex_enter(&cprng->cs_lock);
- cprng->cs_ready = false;
- rndsink_schedule(cprng->cs_rndsink);
- mutex_exit(&cprng->cs_lock);
-}
-
-/*
* XXX Move nist_ctr_drbg_reseed_advised_p and
* nist_ctr_drbg_reseed_needed_p into the nist_ctr_drbg API and make
* the NIST_CTR_DRBG structure opaque.
@@ -314,8 +330,7 @@ nist_ctr_drbg_reseed_needed_p(NIST_CTR_DRBG *drbg)
* Generate some data from the underlying generator.
*/
static void
-cprng_strong_generate(struct cprng_strong *cprng, void *buffer,
- size_t bytes)
+cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
{
const uint32_t cc = cprng_counter();
@@ -342,9 +357,6 @@ cprng_strong_generate(struct cprng_strong *cprng, void
*buffer,
/*
* If we just exhausted the generator, inform the next user
* that we need a reseed.
- *
- * XXX For /dev/random CPRNGs, the criterion is supposed to be
- * `Has this seeding generated 32 bytes?'.
*/
if (__predict_false(nist_ctr_drbg_reseed_needed_p(&cprng->cs_drbg))) {
cprng->cs_ready = false;
diff --git a/sys/sys/cprng.h b/sys/sys/cprng.h
index 89b3f69..b54a652 100644
--- a/sys/sys/cprng.h
+++ b/sys/sys/cprng.h
@@ -87,10 +87,12 @@ void cprng_init(void);
#define CPRNG_INIT_ANY 0x00000001
#define CPRNG_REKEY_ANY 0x00000002
#define CPRNG_USE_CV 0x00000004
+#define CPRNG_HARD 0x00000008
#define CPRNG_FMT "\177\020\
b\0INIT_ANY\0\
b\1REKEY_ANY\0\
-b\2USE_CV\0"
+b\2USE_CV\0\
+b\3HARD\0"
cprng_strong_t *
cprng_strong_create(const char *, int, int);
@@ -100,7 +102,6 @@ size_t cprng_strong(cprng_strong_t *, void *, size_t,
int);
struct knote; /* XXX temp, for /dev/random */
int cprng_strong_kqfilter(cprng_strong_t *, struct knote *); /* XXX " */
int cprng_strong_poll(cprng_strong_t *, int); /* XXX " */
-void cprng_strong_deplete(cprng_strong_t *); /* XXX " */
extern cprng_strong_t *kern_cprng;
diff --git a/sys/sys/rnd.h b/sys/sys/rnd.h
index 6333c84..37f1f82 100644
--- a/sys/sys/rnd.h
+++ b/sys/sys/rnd.h
@@ -41,7 +41,6 @@
#include <sys/sha1.h>
#ifdef _KERNEL
-#include <sys/mutex.h>
#include <sys/queue.h>
#endif
@@ -233,16 +232,4 @@ typedef struct {
#define RNDADDDATA _IOW('R', 105, rnddata_t) /* add data to the
pool */
#define RNDGETPOOLSTAT _IOR('R', 106, rndpoolstat_t) /* get
statistics */
-#ifdef _KERNEL
-/*
- * A context. cprng plus a smidge.
- */
-typedef struct {
- struct cprng_strong *cprng;
- int hard;
- int bytesonkey;
- kmutex_t interlock;
-} rp_ctx_t;
-#endif
-
#endif /* !_SYS_RND_H_ */
diff --git a/usr.bin/fstat/misc.c b/usr.bin/fstat/misc.c
index 0b67bb2..716ade6 100644
--- a/usr.bin/fstat/misc.c
+++ b/usr.bin/fstat/misc.c
@@ -101,8 +101,6 @@ static struct nlist nl[] = {
{ .n_name = "vnops" },
#define NL_XENEVT 16
{ .n_name = "xenevt_fileops" },
-#define NL_RND 17
- { .n_name = "rnd_fileops" },
#define NL_MAX 18
{ .n_name = NULL }
};
@@ -196,21 +194,6 @@ p_kqueue(struct file *f)
return 0;
}
-static int
-p_rnd(struct file *f)
-{
- rp_ctx_t rp;
-
- if (!KVM_READ(f->f_data, &rp, sizeof(rp))) {
- dprintf("can't read rnd at %p for pid %d", f->f_data, Pid);
- return 0;
- }
- (void)printf("* rnd");
- if (rp.hard)
- printf(" bytesonkey=%d", rp.bytesonkey);
- printf("\n");
- return 0;
-}
int
pmisc(struct file *f, const char *name)
{
@@ -244,8 +227,6 @@ pmisc(struct file *f, const char *name)
return p_kqueue(f);
case NL_SEM:
return p_sem(f);
- case NL_RND:
- return p_rnd(f);
case NL_TAP:
printf("* tap %lu\n", (unsigned long)(intptr_t)f->f_data);
return 0;
Home |
Main Index |
Thread Index |
Old Index