Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Serialize rdtsc using with lfence, mfence or cpuid to re...
details: https://anonhg.NetBSD.org/src/rev/750a94c401fe
branches: trunk
changeset: 1011032:750a94c401fe
user: msaitoh <msaitoh%NetBSD.org@localhost>
date: Mon Jun 15 09:09:23 2020 +0000
description:
Serialize rdtsc using with lfence, mfence or cpuid to read TSC more precisely.
x86/x86/tsc.c rev. 1.67 reduced cache problem and got big improvement, but it
still has room. I measured the effect of lfence, mfence, cpuid and rdtscp.
The impact to TSC skew and/or drift is:
AMD: mfence > rdtscp > cpuid > lfence-serialize > lfence = nomodify
Intel: lfence > rdtscp > cpuid > nomodify
So, mfence is the best on AMD and lfence is the best on Intel. If it has no
SSE2, we can use cpuid.
NOTE:
- An AMD's document says DE_CFG_LFENCE_SERIALIZE bit can be used for
serializing, but it's not so good.
- On Intel i386(not amd64), it seems the improvement is very little.
- rdtscp instruct can be used as serializing instruction + rdtsc, but
it's not good as [lm]fence. Both Intel and AMD's document say that
the latency of rdtscp is bigger than rdtsc, so I suspect the difference
of the result comes from it.
diffstat:
sys/arch/amd64/amd64/cpufunc.S | 86 +++++++----
sys/arch/i386/i386/cpufunc.S | 72 +++++++--
sys/arch/x86/include/cpu_counter.h | 19 +-
sys/arch/x86/include/cpufunc.h | 42 ++++-
sys/arch/x86/x86/cpu.c | 5 +-
sys/arch/x86/x86/hyperv.c | 102 +++++--------
sys/arch/x86/x86/tsc.c | 52 ++++++-
sys/arch/x86/x86/tsc.h | 3 +-
sys/rump/librump/rumpkern/arch/x86/rump_x86_cpu_counter.c | 19 +-
9 files changed, 258 insertions(+), 142 deletions(-)
diffs (truncated from 616 to 300 lines):
diff -r 0fb94533ca0e -r 750a94c401fe sys/arch/amd64/amd64/cpufunc.S
--- a/sys/arch/amd64/amd64/cpufunc.S Mon Jun 15 07:55:45 2020 +0000
+++ b/sys/arch/amd64/amd64/cpufunc.S Mon Jun 15 09:09:23 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpufunc.S,v 1.60 2020/06/13 23:58:51 ad Exp $ */
+/* $NetBSD: cpufunc.S,v 1.61 2020/06/15 09:09:23 msaitoh Exp $ */
/*
* Copyright (c) 1998, 2007, 2008, 2020 The NetBSD Foundation, Inc.
@@ -207,38 +207,62 @@
#endif /* !XENPV */
/*
- * Could be exact same as cpu_counter, but KMSAN needs to have the correct
- * size of the return value.
+ * cpu_counter and cpu_counter32 could be exact same, but KMSAN needs to have
+ * the correct size of the return value.
*/
-ENTRY(cpu_counter32)
- movq CPUVAR(CURLWP), %rcx
-1:
- movq L_NCSW(%rcx), %rdi
- rdtsc
- addl CPUVAR(CC_SKEW), %eax
- cmpq %rdi, L_NCSW(%rcx)
- jne 2f
- KMSAN_INIT_RET(4)
- ret
-2:
- jmp 1b
-END(cpu_counter32)
+#define SERIALIZE_lfence lfence
+#define SERIALIZE_mfence mfence
+
+#define ADD_counter32 addl CPUVAR(CC_SKEW), %eax
+#define ADD_counter shlq $32, %rdx ;\
+ orq %rdx, %rax ;\
+ addq CPUVAR(CC_SKEW), %rax
+
+#define RSIZE_counter32 4
+#define RSIZE_counter 8
+
+#define CPU_COUNTER_FENCE(counter, fence) \
+ENTRY(cpu_ ## counter ## _ ## fence) ;\
+ movq CPUVAR(CURLWP), %rcx ;\
+1: ;\
+ movq L_NCSW(%rcx), %rdi ;\
+ SERIALIZE_ ## fence ;\
+ rdtsc ;\
+ ADD_ ## counter ;\
+ cmpq %rdi, L_NCSW(%rcx) ;\
+ jne 2f ;\
+ KMSAN_INIT_RET(RSIZE_ ## counter) ;\
+ ret ;\
+2: ;\
+ jmp 1b ;\
+END(cpu_ ## counter ## _ ## fence)
-ENTRY(cpu_counter)
- movq CPUVAR(CURLWP), %rcx
-1:
- movq L_NCSW(%rcx), %rdi
- rdtsc
- shlq $32, %rdx
- orq %rdx, %rax
- addq CPUVAR(CC_SKEW), %rax
- cmpq %rdi, L_NCSW(%rcx)
- jne 2f
- KMSAN_INIT_RET(8)
- ret
-2:
- jmp 1b
-END(cpu_counter)
+CPU_COUNTER_FENCE(counter, lfence)
+CPU_COUNTER_FENCE(counter, mfence)
+CPU_COUNTER_FENCE(counter32, lfence)
+CPU_COUNTER_FENCE(counter32, mfence)
+
+#define CPU_COUNTER_CPUID(counter) \
+ENTRY(cpu_ ## counter ## _cpuid) ;\
+ movq %rbx, %r9 ;\
+ movq CPUVAR(CURLWP), %r8 ;\
+1: ;\
+ movq L_NCSW(%r8), %rdi ;\
+ xor %eax, %eax ;\
+ cpuid ;\
+ rdtsc ;\
+ ADD_ ## counter ;\
+ cmpq %rdi, L_NCSW(%r8) ;\
+ jne 2f ;\
+ movq %r9, %rbx ;\
+ KMSAN_INIT_RET(RSIZE_ ## counter) ;\
+ ret ;\
+2: ;\
+ jmp 1b ;\
+END(cpu_ ## counter ## _cpuid)
+
+CPU_COUNTER_CPUID(counter)
+CPU_COUNTER_CPUID(counter32)
ENTRY(rdmsr_safe)
movq CPUVAR(CURLWP), %r8
diff -r 0fb94533ca0e -r 750a94c401fe sys/arch/i386/i386/cpufunc.S
--- a/sys/arch/i386/i386/cpufunc.S Mon Jun 15 07:55:45 2020 +0000
+++ b/sys/arch/i386/i386/cpufunc.S Mon Jun 15 09:09:23 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpufunc.S,v 1.46 2020/06/13 23:58:52 ad Exp $ */
+/* $NetBSD: cpufunc.S,v 1.47 2020/06/15 09:09:23 msaitoh Exp $ */
/*-
* Copyright (c) 1998, 2007, 2020 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
#include <sys/errno.h>
#include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.46 2020/06/13 23:58:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.47 2020/06/15 09:09:23 msaitoh Exp $");
#include "opt_xen.h"
@@ -163,23 +163,59 @@
ret
END(msr_onfault)
-ENTRY(cpu_counter)
- pushl %ebx
- movl CPUVAR(CURLWP), %ecx
-1:
- movl L_NCSW(%ecx), %ebx
- rdtsc
- addl CPUVAR(CC_SKEW), %eax
- adcl CPUVAR(CC_SKEW+4), %edx
- cmpl %ebx, L_NCSW(%ecx)
- jne 2f
- popl %ebx
- ret
-2:
- jmp 1b
-END(cpu_counter)
+#define ADD_counter32 addl CPUVAR(CC_SKEW), %eax
+#define ADD_counter ADD_counter32 ;\
+ adcl CPUVAR(CC_SKEW+4), %edx
+
+#define SERIALIZE_lfence lfence
+#define SERIALIZE_mfence mfence
+
+#define CPU_COUNTER_FENCE(counter, fence) \
+ENTRY(cpu_ ## counter ## _ ## fence) ;\
+ pushl %ebx ;\
+ movl CPUVAR(CURLWP), %ecx ;\
+1: ;\
+ movl L_NCSW(%ecx), %ebx ;\
+ SERIALIZE_ ## fence ;\
+ rdtsc ;\
+ ADD_ ## counter ;\
+ cmpl %ebx, L_NCSW(%ecx) ;\
+ jne 2f ;\
+ popl %ebx ;\
+ ret ;\
+2: ;\
+ jmp 1b ;\
+END(cpu_ ## counter ## _ ## fence)
-STRONG_ALIAS(cpu_counter32, cpu_counter)
+CPU_COUNTER_FENCE(counter, lfence)
+CPU_COUNTER_FENCE(counter, mfence)
+CPU_COUNTER_FENCE(counter32, lfence)
+CPU_COUNTER_FENCE(counter32, mfence)
+
+#define CPU_COUNTER_CPUID(counter) \
+ENTRY(cpu_ ## counter ## _cpuid) ;\
+ pushl %ebx ;\
+ pushl %esi ;\
+ movl CPUVAR(CURLWP), %ecx ;\
+1: ;\
+ movl L_NCSW(%ecx), %esi ;\
+ pushl %ecx ;\
+ xor %eax, %eax ;\
+ cpuid ;\
+ rdtsc ;\
+ ADD_ ## counter ;\
+ popl %ecx ;\
+ cmpl %esi, L_NCSW(%ecx) ;\
+ jne 2f ;\
+ popl %esi ;\
+ popl %ebx ;\
+ ret ;\
+2: ;\
+ jmp 1b ;\
+END(cpu_ ## counter ##_cpuid)
+
+CPU_COUNTER_CPUID(counter)
+CPU_COUNTER_CPUID(counter32)
ENTRY(breakpoint)
pushl %ebp
diff -r 0fb94533ca0e -r 750a94c401fe sys/arch/x86/include/cpu_counter.h
--- a/sys/arch/x86/include/cpu_counter.h Mon Jun 15 07:55:45 2020 +0000
+++ b/sys/arch/x86/include/cpu_counter.h Mon Jun 15 09:09:23 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu_counter.h,v 1.6 2020/05/08 22:01:54 ad Exp $ */
+/* $NetBSD: cpu_counter.h,v 1.7 2020/06/15 09:09:23 msaitoh Exp $ */
/*-
* Copyright (c) 2000, 2008 The NetBSD Foundation, Inc.
@@ -34,10 +34,19 @@
#ifdef _KERNEL
-uint64_t cpu_counter(void);
-uint32_t cpu_counter32(void);
-uint64_t cpu_frequency(struct cpu_info *);
-int cpu_hascounter(void);
+#include <sys/lwp.h>
+
+extern uint64_t cpu_frequency(struct cpu_info *);
+extern int cpu_hascounter(void);
+extern uint64_t (*cpu_counter)(void);
+extern uint32_t (*cpu_counter32)(void);
+
+extern uint64_t cpu_counter_cpuid(void);
+extern uint64_t cpu_counter_lfence(void);
+extern uint64_t cpu_counter_mfence(void);
+extern uint32_t cpu_counter32_cpuid(void);
+extern uint32_t cpu_counter32_lfence(void);
+extern uint32_t cpu_counter32_mfence(void);
#endif /* _KERNEL */
diff -r 0fb94533ca0e -r 750a94c401fe sys/arch/x86/include/cpufunc.h
--- a/sys/arch/x86/include/cpufunc.h Mon Jun 15 07:55:45 2020 +0000
+++ b/sys/arch/x86/include/cpufunc.h Mon Jun 15 09:09:23 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpufunc.h,v 1.40 2020/06/14 16:12:05 riastradh Exp $ */
+/* $NetBSD: cpufunc.h,v 1.41 2020/06/15 09:09:23 msaitoh Exp $ */
/*
* Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
@@ -89,19 +89,37 @@
);
}
-static inline uint64_t
-rdtsc(void)
-{
- uint32_t low, high;
+extern uint64_t (*rdtsc)(void);
+
+#define _SERIALIZE_lfence __asm volatile ("lfence")
+#define _SERIALIZE_mfence __asm volatile ("mfence")
+#define _SERIALIZE_cpuid __asm volatile ("xor %%eax, %%eax;cpuid" ::: \
+ "eax", "ebx", "ecx", "edx");
- __asm volatile (
- "rdtsc"
- : "=a" (low), "=d" (high)
- :
- );
+#define RDTSCFUNC(fence) \
+static inline uint64_t \
+rdtsc_##fence(void) \
+{ \
+ uint32_t low, high; \
+ \
+ _SERIALIZE_##fence; \
+ __asm volatile ( \
+ "rdtsc" \
+ : "=a" (low), "=d" (high) \
+ : \
+ ); \
+ \
+ return (low | ((uint64_t)high << 32)); \
+}
- return (low | ((uint64_t)high << 32));
-}
+RDTSCFUNC(lfence)
+RDTSCFUNC(mfence)
+RDTSCFUNC(cpuid)
+
+#undef _SERIALIZE_LFENCE
+#undef _SERIALIZE_MFENCE
+#undef _SERIALIZE_CPUID
+
#ifndef XENPV
struct x86_hotpatch_source {
diff -r 0fb94533ca0e -r 750a94c401fe sys/arch/x86/x86/cpu.c
--- a/sys/arch/x86/x86/cpu.c Mon Jun 15 07:55:45 2020 +0000
+++ b/sys/arch/x86/x86/cpu.c Mon Jun 15 09:09:23 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.c,v 1.193 2020/06/13 20:01:27 ad Exp $ */
+/* $NetBSD: cpu.c,v 1.194 2020/06/15 09:09:24 msaitoh Exp $ */
/*
* Copyright (c) 2000-2020 NetBSD Foundation, Inc.
@@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
Home |
Main Index |
Thread Index |
Old Index