Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/dev/ic Fix the TSC timecounter (on the systems I have ac...
details: https://anonhg.NetBSD.org/src/rev/e23bda089971
branches: trunk
changeset: 1009956:e23bda089971
user: ad <ad%NetBSD.org@localhost>
date: Fri May 08 22:01:54 2020 +0000
description:
Fix the TSC timecounter (on the systems I have access to):
- Make the early i8254-based calculation of frequency a bit more accurate.
- Keep track of how far the HPET & TSC advance between HPET attach and
secondary CPU boot, and use to compute an accurate value before attaching
the timecounter. Initial idea from joerg@.
- When determining skew and drift between CPUs, make each measurement 1000
times and pick the lowest observed value. Increase the error threshold to
1000 clock cycles.
- Use the frequency computed on the boot CPU for secondary CPUs too.
- Remove cpu_counter_serializing().
diffstat:
sys/arch/x86/include/cpu_counter.h | 3 +-
sys/arch/x86/x86/cpu.c | 76 +++++++++++++++++++++++++++-----------
sys/arch/x86/x86/tsc.c | 52 +++++++++++--------------
sys/dev/ic/hpet.c | 50 ++++++++++++++++++++++++-
sys/dev/ic/hpetvar.h | 3 +-
5 files changed, 128 insertions(+), 56 deletions(-)
diffs (truncated from 419 to 300 lines):
diff -r 0b133b5c8b27 -r e23bda089971 sys/arch/x86/include/cpu_counter.h
--- a/sys/arch/x86/include/cpu_counter.h Fri May 08 21:58:03 2020 +0000
+++ b/sys/arch/x86/include/cpu_counter.h Fri May 08 22:01:54 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu_counter.h,v 1.5 2011/02/02 12:26:42 bouyer Exp $ */
+/* $NetBSD: cpu_counter.h,v 1.6 2020/05/08 22:01:54 ad Exp $ */
/*-
* Copyright (c) 2000, 2008 The NetBSD Foundation, Inc.
@@ -35,7 +35,6 @@
#ifdef _KERNEL
uint64_t cpu_counter(void);
-uint64_t cpu_counter_serializing(void);
uint32_t cpu_counter32(void);
uint64_t cpu_frequency(struct cpu_info *);
int cpu_hascounter(void);
diff -r 0b133b5c8b27 -r e23bda089971 sys/arch/x86/x86/cpu.c
--- a/sys/arch/x86/x86/cpu.c Fri May 08 21:58:03 2020 +0000
+++ b/sys/arch/x86/x86/cpu.c Fri May 08 22:01:54 2020 +0000
@@ -1,7 +1,7 @@
-/* $NetBSD: cpu.c,v 1.189 2020/05/02 16:44:36 bouyer Exp $ */
+/* $NetBSD: cpu.c,v 1.190 2020/05/08 22:01:55 ad Exp $ */
/*
- * Copyright (c) 2000-2012 NetBSD Foundation, Inc.
+ * Copyright (c) 2000-2020 NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.189 2020/05/02 16:44:36 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.190 2020/05/08 22:01:55 ad Exp $");
#include "opt_ddb.h"
#include "opt_mpbios.h" /* for MPDEBUG */
@@ -73,6 +73,7 @@
#include "lapic.h"
#include "ioapic.h"
#include "acpica.h"
+#include "hpet.h"
#include <sys/param.h>
#include <sys/proc.h>
@@ -119,6 +120,7 @@
#endif
#include <dev/ic/mc146818reg.h>
+#include <dev/ic/hpetvar.h>
#include <i386/isa/nvram.h>
#include <dev/isa/isareg.h>
@@ -433,8 +435,14 @@
* must be done to allow booting other processors.
*/
if (!again) {
+ /* Make sure DELAY() (likely i8254_delay()) is initialized. */
+ DELAY(1);
+
+ /*
+ * Basic init. Compute an approximate frequency for the TSC
+ * using the i8254. If there's a HPET we'll redo it later.
+ */
atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY);
- /* Basic init. */
cpu_intr_init(ci);
cpu_get_tsc_freq(ci);
cpu_init(ci);
@@ -451,8 +459,6 @@
lapic_calibrate_timer(ci);
}
#endif
- /* Make sure DELAY() is initialized. */
- DELAY(1);
kcsan_cpu_init(ci);
again = true;
}
@@ -718,7 +724,6 @@
if (ci != &cpu_info_primary) {
/* Synchronize TSC */
- wbinvd();
atomic_or_32(&ci->ci_flags, CPUF_RUNNING);
tsc_sync_ap(ci);
} else {
@@ -734,6 +739,14 @@
kcpuset_t *cpus;
u_long i;
+#if NHPET > 0
+ /* Use HPET delay, and re-calibrate TSC on boot CPU using HPET. */
+ if (hpet_delay_p() && x86_delay == i8254_delay) {
+ delay_func = x86_delay = hpet_delay;
+ cpu_get_tsc_freq(curcpu());
+ }
+#endif
+
/* Now that we know the number of CPUs, patch the text segment. */
x86_patch(false);
@@ -842,7 +855,6 @@
*/
psl = x86_read_psl();
x86_disable_intr();
- wbinvd();
tsc_sync_bp(ci);
x86_write_psl(psl);
}
@@ -873,7 +885,6 @@
drift = ci->ci_data.cpu_cc_skew;
psl = x86_read_psl();
x86_disable_intr();
- wbinvd();
tsc_sync_bp(ci);
x86_write_psl(psl);
drift -= ci->ci_data.cpu_cc_skew;
@@ -919,7 +930,6 @@
* Synchronize the TSC for the first time. Note that interrupts are
* off at this point.
*/
- wbinvd();
atomic_or_32(&ci->ci_flags, CPUF_PRESENT);
tsc_sync_ap(ci);
@@ -1310,21 +1320,43 @@
void
cpu_get_tsc_freq(struct cpu_info *ci)
{
- uint64_t freq = 0, last_tsc;
-
- if (cpu_hascounter())
- freq = cpu_tsc_freq_cpuid(ci);
+ uint64_t freq = 0, t0, t1;
+ int64_t overhead;
- if (freq != 0) {
- /* Use TSC frequency taken from CPUID. */
- ci->ci_data.cpu_cc_freq = freq;
+ if ((ci->ci_flags & CPUF_PRIMARY) != 0 && cpu_hascounter()) {
+ freq = cpu_tsc_freq_cpuid(ci);
+#if NHPET > 0
+ if (freq == 0)
+ freq = hpet_tsc_freq();
+#endif
+ if (freq == 0) {
+ /*
+ * Work out the approximate overhead involved below.
+ * Discard the result of the first go around the
+ * loop.
+ */
+ overhead = 0;
+ for (int i = 0; i <= 8; i++) {
+ t0 = cpu_counter();
+ x86_delay(0);
+ t1 = cpu_counter();
+ if (i > 0) {
+ overhead += (t1 - t0);
+ }
+ }
+ overhead >>= 3;
+
+ /* Now do the calibration. */
+ t0 = cpu_counter();
+ x86_delay(100000);
+ t1 = cpu_counter();
+ freq = (t1 - t0 - overhead) * 10;
+ }
} else {
- /* Calibrate TSC frequency. */
- last_tsc = cpu_counter_serializing();
- delay_func(100000);
- ci->ci_data.cpu_cc_freq =
- (cpu_counter_serializing() - last_tsc) * 10;
+ freq = cpu_info_primary.ci_data.cpu_cc_freq;
}
+
+ ci->ci_data.cpu_cc_freq = freq;
}
void
diff -r 0b133b5c8b27 -r e23bda089971 sys/arch/x86/x86/tsc.c
--- a/sys/arch/x86/x86/tsc.c Fri May 08 21:58:03 2020 +0000
+++ b/sys/arch/x86/x86/tsc.c Fri May 08 22:01:54 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: tsc.c,v 1.43 2020/04/25 15:26:18 bouyer Exp $ */
+/* $NetBSD: tsc.c,v 1.44 2020/05/08 22:01:55 ad Exp $ */
/*-
* Copyright (c) 2008, 2020 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.43 2020/04/25 15:26:18 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.44 2020/05/08 22:01:55 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -47,12 +47,14 @@
#include "tsc.h"
+#define TSC_SYNC_ROUNDS 1000
+#define ABS(a) ((a) >= 0 ? (a) : -(a))
+
u_int tsc_get_timecount(struct timecounter *);
uint64_t tsc_freq; /* exported for sysctl */
-static int64_t tsc_drift_max = 250; /* max cycles */
+static int64_t tsc_drift_max = 1000; /* max cycles */
static int64_t tsc_drift_observed;
-static bool tsc_good;
int tsc_user_enabled = 1;
@@ -158,9 +160,6 @@
ci = curcpu();
tsc_freq = ci->ci_data.cpu_cc_freq;
- tsc_good = (cpu_feature[0] & CPUID_MSR) != 0 &&
- (rdmsr(MSR_TSC) != 0 || rdmsr(MSR_TSC) != 0);
-
invariant = tsc_is_invariant();
if (!invariant) {
aprint_debug("TSC not known invariant on this CPU\n");
@@ -206,13 +205,12 @@
/* Flag it and read our TSC. */
atomic_or_uint(&ci->ci_flags, CPUF_SYNCTSC);
- bptsc = (rdtsc() >> 1);
/* Wait for remote to complete, and read ours again. */
while ((ci->ci_flags & CPUF_SYNCTSC) != 0) {
__insn_barrier();
}
- bptsc += (rdtsc() >> 1);
+ bptsc = rdtsc();
/* Wait for the results to come in. */
while (tsc_sync_cpu == ci) {
@@ -229,17 +227,21 @@
void
tsc_sync_bp(struct cpu_info *ci)
{
- int64_t bptsc, aptsc, bsum = 0, asum = 0;
+ int64_t bptsc, aptsc, val, diff;
+
+ if (!cpu_hascounter())
+ return;
- tsc_read_bp(ci, &bptsc, &aptsc); /* discarded - cache effects */
- for (int i = 0; i < 8; i++) {
+ val = INT64_MAX;
+ for (int i = 0; i < TSC_SYNC_ROUNDS; i++) {
tsc_read_bp(ci, &bptsc, &aptsc);
- bsum += bptsc;
- asum += aptsc;
+ diff = bptsc - aptsc;
+ if (ABS(diff) < ABS(val)) {
+ val = diff;
+ }
}
- /* Compute final value to adjust for skew. */
- ci->ci_data.cpu_cc_skew = (bsum - asum) >> 3;
+ ci->ci_data.cpu_cc_skew = val;
}
/*
@@ -255,11 +257,10 @@
while ((ci->ci_flags & CPUF_SYNCTSC) == 0) {
__insn_barrier();
}
- tsc = (rdtsc() >> 1);
/* Instruct primary to read its counter. */
atomic_and_uint(&ci->ci_flags, ~CPUF_SYNCTSC);
- tsc += (rdtsc() >> 1);
+ tsc = rdtsc();
/* Post result. Ensure the whole value goes out atomically. */
(void)atomic_swap_64(&tsc_sync_val, tsc);
@@ -273,8 +274,10 @@
tsc_sync_ap(struct cpu_info *ci)
{
- tsc_post_ap(ci);
- for (int i = 0; i < 8; i++) {
+ if (!cpu_hascounter())
+ return;
+
+ for (int i = 0; i < TSC_SYNC_ROUNDS; i++) {
tsc_post_ap(ci);
}
}
@@ -321,12 +324,3 @@
Home |
Main Index |
Thread Index |
Old Index