Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/kern entropy: Sample cycle counter or timecounter in har...
details: https://anonhg.NetBSD.org/src/rev/2826828743c3
branches: trunk
changeset: 979881:2826828743c3
user: riastradh <riastradh%NetBSD.org@localhost>
date: Sat Jan 16 02:20:00 2021 +0000
description:
entropy: Sample cycle counter or timecounter in hardclock.
Only do so when we're short on entropy, in order to minimize
performance impact.
The sampling should stay close to the time of the actual hardclock
timer interrupt, so that the oscillator driving it determines when we
sample the cycle counter or timecounter, which we hope is driven by
an independent oscillator.
If we used a callout, there might be many other influences -- such as
spin lock delays possibly synchronized with this core's cycle counter
-- that could get between the timer interrupt and the sample.
In the glorious tickless future, this should instead be wired up to
the timer interrupt handler, however that manifests in the future
tickless API.
diffstat:
sys/kern/kern_clock.c | 75 +++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 73 insertions(+), 2 deletions(-)
diffs (124 lines):
diff -r f532670d8ce6 -r 2826828743c3 sys/kern/kern_clock.c
--- a/sys/kern/kern_clock.c Sat Jan 16 01:47:23 2021 +0000
+++ b/sys/kern/kern_clock.c Sat Jan 16 02:20:00 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_clock.c,v 1.143 2020/12/05 18:17:01 thorpej Exp $ */
+/* $NetBSD: kern_clock.c,v 1.144 2021/01/16 02:20:00 riastradh Exp $ */
/*-
* Copyright (c) 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.143 2020/12/05 18:17:01 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.144 2021/01/16 02:20:00 riastradh Exp $");
#ifdef _KERNEL_OPT
#include "opt_dtrace.h"
@@ -90,6 +90,7 @@
#include <sys/timetc.h>
#include <sys/cpu.h>
#include <sys/atomic.h>
+#include <sys/rndsource.h>
#ifdef GPROF
#include <sys/gmon.h>
@@ -138,6 +139,61 @@
static int psdiv; /* prof => stat divider */
int psratio; /* ratio: prof / stat */
+struct clockrnd {
+ struct krndsource source;
+ unsigned needed;
+};
+
+static struct clockrnd hardclockrnd __aligned(COHERENCY_UNIT);
+static struct clockrnd statclockrnd __aligned(COHERENCY_UNIT);
+
+static void
+clockrnd_get(size_t needed, void *cookie)
+{
+ struct clockrnd *C = cookie;
+
+ /* Start sampling. */
+ atomic_store_relaxed(&C->needed, 2*NBBY*needed);
+}
+
+static void
+clockrnd_sample(struct clockrnd *C)
+{
+ struct cpu_info *ci = curcpu();
+
+ /* If there's nothing needed right now, stop here. */
+ if (__predict_true(C->needed == 0))
+ return;
+
+ /*
+ * If we're not the primary core of a package, we're probably
+ * driven by the same clock as the primary core, so don't
+ * bother.
+ */
+ if (ci != ci->ci_package1st)
+ return;
+
+ /* Take a sample and enter it into the pool. */
+ rnd_add_uint32(&C->source, 0);
+
+ /*
+ * On the primary CPU, count down. Using an atomic decrement
+ * here isn't really necessary -- on every platform we care
+ * about, stores to unsigned int are atomic, and the only other
+ * memory operation that could happen here is for another CPU
+ * to store a higher value for needed. But using an atomic
+ * decrement avoids giving the impression of data races, and is
+ * unlikely to hurt because only one CPU will ever be writing
+ * to the location.
+ */
+ if (CPU_IS_PRIMARY(curcpu())) {
+ unsigned needed __diagused;
+
+ needed = atomic_dec_uint_nv(&C->needed);
+ KASSERT(needed != UINT_MAX);
+ }
+}
+
static u_int get_intr_timecount(struct timecounter *);
static struct timecounter intr_timecounter = {
@@ -224,6 +280,16 @@
SYSCTL_DESCR("Number of hardclock ticks"),
NULL, 0, &hardclock_ticks, sizeof(hardclock_ticks),
CTL_KERN, KERN_HARDCLOCK_TICKS, CTL_EOL);
+
+ rndsource_setcb(&hardclockrnd.source, clockrnd_get, &hardclockrnd);
+ rnd_attach_source(&hardclockrnd.source, "hardclock", RND_TYPE_SKEW,
+ RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB);
+ if (stathz) {
+ rndsource_setcb(&statclockrnd.source, clockrnd_get,
+ &statclockrnd);
+ rnd_attach_source(&statclockrnd.source, "statclock",
+ RND_TYPE_SKEW, RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB);
+ }
}
/*
@@ -235,6 +301,8 @@
struct lwp *l;
struct cpu_info *ci;
+ clockrnd_sample(&hardclockrnd);
+
ci = curcpu();
l = ci->ci_onproc;
@@ -338,6 +406,9 @@
struct proc *p;
struct lwp *l;
+ if (stathz)
+ clockrnd_sample(&statclockrnd);
+
/*
* Notice changes in divisor frequency, and adjust clock
* frequency accordingly.
Home |
Main Index |
Thread Index |
Old Index