Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Excise XEN specific code out of x86/x86/intr.c into...
details: https://anonhg.NetBSD.org/src/rev/c60f79656b35
branches: trunk
changeset: 446918:c60f79656b35
user: cherry <cherry%NetBSD.org@localhost>
date: Tue Dec 25 06:50:11 2018 +0000
description:
Excise XEN specific code out of x86/x86/intr.c into xen/x86/xen_intr.c
While at it, separate the source function tracking so that the interrupt
paths are truly independant.
Use weak symbol exporting to provision for future PVHVM co-existence
of both files, but with independant paths. Introduce assembler code
such that in a unified scenario, native interrupts get first priority
in spllower(), followed by XEN event callbacks. IPL management and
semantics are unchanged - native handlers and xen callbacks are
expected to maintain their ipl related semantics.
In summary, after this commit, native and XEN now have completely
unrelated interrupt handling mechanisms, including
intr_establish_xname() and assembler stubs and intr handler
management.
Happy Christmas!
diffstat:
sys/arch/amd64/amd64/genassym.cf | 12 +-
sys/arch/amd64/amd64/lock_stubs.S | 20 ++-
sys/arch/amd64/amd64/spl.S | 32 +++-
sys/arch/amd64/amd64/vector.S | 4 +-
sys/arch/i386/i386/genassym.cf | 10 +-
sys/arch/i386/i386/spl.S | 30 +++-
sys/arch/i386/i386/vector.S | 6 +-
sys/arch/x86/include/cpu.h | 11 +-
sys/arch/x86/isa/isa_machdep.c | 8 +-
sys/arch/x86/x86/i8259.c | 15 +-
sys/arch/x86/x86/intr.c | 57 +-------
sys/arch/xen/conf/files.xen | 3 +-
sys/arch/xen/include/intr.h | 5 +-
sys/arch/xen/x86/hypervisor_machdep.c | 22 +-
sys/arch/xen/x86/xen_intr.c | 254 ++++++++++++++++++++++++++++++++-
sys/arch/xen/xen/clock.c | 6 +-
sys/arch/xen/xen/evtchn.c | 30 ++--
sys/arch/xen/xen/xenevt.c | 6 +-
18 files changed, 407 insertions(+), 124 deletions(-)
diffs (truncated from 1246 to 300 lines):
diff -r b54289847d31 -r c60f79656b35 sys/arch/amd64/amd64/genassym.cf
--- a/sys/arch/amd64/amd64/genassym.cf Tue Dec 25 05:44:13 2018 +0000
+++ b/sys/arch/amd64/amd64/genassym.cf Tue Dec 25 06:50:11 2018 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.70 2018/08/12 15:31:01 maxv Exp $
+# $NetBSD: genassym.cf,v 1.71 2018/12/25 06:50:11 cherry Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -248,12 +248,14 @@
define CPU_INFO_FPCURLWP offsetof(struct cpu_info, ci_fpcurlwp)
define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt)
+define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel)
+define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth)
+if !defined(XEN)
define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending)
define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask)
define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask)
-define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel)
-define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth)
define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources)
+endif
define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl)
define CPU_INFO_CPUID offsetof(struct cpu_info, ci_cpuid)
@@ -352,6 +354,10 @@
ifdef XEN
define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu)
+define CPU_INFO_XPENDING offsetof(struct cpu_info, ci_xpending)
+define CPU_INFO_XMASK offsetof(struct cpu_info, ci_xmask)
+define CPU_INFO_XUNMASK offsetof(struct cpu_info, ci_xunmask)
+define CPU_INFO_XSOURCES offsetof(struct cpu_info, ci_xsources)
define EVTCHN_UPCALL_MASK offsetof(struct vcpu_info, evtchn_upcall_mask)
define XEN_PT_BASE offsetof(struct start_info, pt_base)
define XEN_NR_PT_FRAMES offsetof(struct start_info, nr_pt_frames)
diff -r b54289847d31 -r c60f79656b35 sys/arch/amd64/amd64/lock_stubs.S
--- a/sys/arch/amd64/amd64/lock_stubs.S Tue Dec 25 05:44:13 2018 +0000
+++ b/sys/arch/amd64/amd64/lock_stubs.S Tue Dec 25 06:50:11 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: lock_stubs.S,v 1.29 2018/07/14 14:29:40 maxv Exp $ */
+/* $NetBSD: lock_stubs.S,v 1.30 2018/12/25 06:50:11 cherry Exp $ */
/*-
* Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -136,10 +136,18 @@
jnz 1f
cmpl CPU_INFO_ILEVEL(%r8), %edi
jae 1f
+#if !defined(XEN)
movl CPU_INFO_IUNMASK(%r8,%rdi,4), %esi
CLI(ax)
testl CPU_INFO_IPENDING(%r8), %esi
jnz _C_LABEL(Xspllower)
+#endif
+#if defined(XEN)
+ movl CPU_INFO_XUNMASK(%r8,%rdi,4), %esi
+ CLI(ax)
+ testl CPU_INFO_XPENDING(%r8), %esi
+ jnz _C_LABEL(Xspllower)
+#endif
movl %edi, CPU_INFO_ILEVEL(%r8)
STI(ax)
1: rep /* double byte ret as branch */
@@ -157,12 +165,22 @@
cmpl %edx,%ecx /* new level is lower? */
jae 2f
1:
+#if !defined(XEN)
movl CPU_INFO_IPENDING(%rsi),%eax
testl %eax,CPU_INFO_IUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
jnz 3f
movl %eax,%ebx
cmpxchg8b CPU_INFO_ISTATE(%rsi) /* swap in new ilevel */
jnz 4f
+#endif
+#if defined(XEN)
+ movl CPU_INFO_XPENDING(%rsi),%eax
+ testl %eax,CPU_INFO_XUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
+ jnz 3f
+ movl %edx, %eax
+ cmpxchgl %ecx, CPU_INFO_ILEVEL(%rsi)
+ jnz 4f
+#endif
2:
popq %rbx
ret
diff -r b54289847d31 -r c60f79656b35 sys/arch/amd64/amd64/spl.S
--- a/sys/arch/amd64/amd64/spl.S Tue Dec 25 05:44:13 2018 +0000
+++ b/sys/arch/amd64/amd64/spl.S Tue Dec 25 06:50:11 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: spl.S,v 1.36 2018/08/22 17:04:36 maxv Exp $ */
+/* $NetBSD: spl.S,v 1.37 2018/12/25 06:50:11 cherry Exp $ */
/*
* Copyright (c) 2003 Wasabi Systems, Inc.
@@ -240,7 +240,6 @@
.align 16
END(spllower)
LABEL(spllower_end)
-#endif /* !XEN */
/*
* void cx8_spllower(int s);
@@ -280,6 +279,7 @@
END(cx8_spllower_patch)
END(cx8_spllower)
LABEL(cx8_spllower_end)
+#endif /* !XEN */
/*
* void Xspllower(int s);
@@ -308,6 +308,7 @@
movl %edi,%ebx
leaq 1f(%rip),%r13 /* address to resume loop at */
1: movl %ebx,%eax /* get cpl */
+#if !defined(XEN)
movl CPUVAR(IUNMASK)(,%rax,4),%eax
CLI(si)
andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */
@@ -316,7 +317,19 @@
btrl %eax,CPUVAR(IPENDING)
movq CPUVAR(ISOURCES)(,%rax,8),%rax
jmp *IS_RECURSE(%rax)
+#endif
2:
+#if defined(XEN)
+ movl CPUVAR(XUNMASK)(,%rax,4),%eax
+ CLI(si)
+ andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */
+ jz 3f
+ bsrl %eax,%eax
+ btrl %eax,CPUVAR(XPENDING)
+ movq CPUVAR(XSOURCES)(,%rax,8),%rax
+ jmp *IS_RECURSE(%rax)
+#endif
+3:
movl %ebx,CPUVAR(ILEVEL)
STI(si)
popq %r12
@@ -339,6 +352,7 @@
decl CPUVAR(IDEPTH)
leaq 1f(%rip),%r13
1: movl %ebx,%eax
+#if !defined(XEN)
movl CPUVAR(IUNMASK)(,%rax,4),%eax
CLI(si)
andl CPUVAR(IPENDING),%eax
@@ -347,7 +361,19 @@
btrl %eax,CPUVAR(IPENDING)
movq CPUVAR(ISOURCES)(,%rax,8),%rax
jmp *IS_RESUME(%rax)
-2: /* Check for ASTs on exit to user mode. */
+#endif
+2:
+#if defined(XEN)
+ movl CPUVAR(XUNMASK)(,%rax,4),%eax
+ CLI(si)
+ andl CPUVAR(XPENDING),%eax
+ jz 3f
+ bsrl %eax,%eax /* slow, but not worth optimizing */
+ btrl %eax,CPUVAR(XPENDING)
+ movq CPUVAR(XSOURCES)(,%rax,8),%rax
+ jmp *IS_RESUME(%rax)
+#endif
+3: /* Check for ASTs on exit to user mode. */
movl %ebx,CPUVAR(ILEVEL)
5:
testb $SEL_RPL,TF_CS(%rsp)
diff -r b54289847d31 -r c60f79656b35 sys/arch/amd64/amd64/vector.S
--- a/sys/arch/amd64/amd64/vector.S Tue Dec 25 05:44:13 2018 +0000
+++ b/sys/arch/amd64/amd64/vector.S Tue Dec 25 06:50:11 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vector.S,v 1.64 2018/07/14 14:29:40 maxv Exp $ */
+/* $NetBSD: vector.S,v 1.65 2018/12/25 06:50:11 cherry Exp $ */
/*
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -656,7 +656,7 @@
IDTVEC(resume_ ## name ## num) \
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
movl %ebx,%r13d ;\
- movq CPUVAR(ISOURCES) + (num) * 8,%r14 ;\
+ movq CPUVAR(XSOURCES) + (num) * 8,%r14 ;\
1: \
pushq %r13 ;\
movl $num,CPUVAR(ILEVEL) ;\
diff -r b54289847d31 -r c60f79656b35 sys/arch/i386/i386/genassym.cf
--- a/sys/arch/i386/i386/genassym.cf Tue Dec 25 05:44:13 2018 +0000
+++ b/sys/arch/i386/i386/genassym.cf Tue Dec 25 06:50:11 2018 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.107 2018/01/04 14:02:23 maxv Exp $
+# $NetBSD: genassym.cf,v 1.108 2018/12/25 06:50:11 cherry Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -268,12 +268,14 @@
define CPU_INFO_SIGNATURE offsetof(struct cpu_info, ci_signature)
define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt)
+if !defined(XEN)
define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending)
define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask)
+define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources)
define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask)
+endif
define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel)
define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth)
-define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources)
define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl)
define CPU_INFO_INTRSTACK offsetof(struct cpu_info, ci_intrstack)
@@ -372,6 +374,10 @@
ifdef XEN
define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu)
+define CPU_INFO_XPENDING offsetof(struct cpu_info, ci_xpending)
+define CPU_INFO_XMASK offsetof(struct cpu_info, ci_xmask)
+define CPU_INFO_XUNMASK offsetof(struct cpu_info, ci_xunmask)
+define CPU_INFO_XSOURCES offsetof(struct cpu_info, ci_xsources)
define START_INFO_SHARED_INFO offsetof(struct start_info, shared_info)
define START_INFO_FLAGS offsetof(struct start_info, flags)
define START_INFO_CONSOLE_MFN offsetof(struct start_info, console.domU.mfn)
diff -r b54289847d31 -r c60f79656b35 sys/arch/i386/i386/spl.S
--- a/sys/arch/i386/i386/spl.S Tue Dec 25 05:44:13 2018 +0000
+++ b/sys/arch/i386/i386/spl.S Tue Dec 25 06:50:11 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: spl.S,v 1.43 2018/04/04 22:52:58 christos Exp $ */
+/* $NetBSD: spl.S,v 1.44 2018/12/25 06:50:11 cherry Exp $ */
/*
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.43 2018/04/04 22:52:58 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.44 2018/12/25 06:50:11 cherry Exp $");
#include "opt_ddb.h"
#include "opt_spldebug.h"
@@ -200,6 +200,7 @@
jz .Lspllower_panic
#endif /* XEN */
#endif /* defined(DEBUG) */
+#if !defined(XEN)
movl %ebx,%eax /* get cpl */
movl CPUVAR(IUNMASK)(,%eax,4),%eax
andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */
@@ -208,7 +209,19 @@
btrl %eax,CPUVAR(IPENDING)
movl CPUVAR(ISOURCES)(,%eax,4),%eax
jmp *IS_RECURSE(%eax)
+#endif
2:
+#if defined(XEN)
+ movl %ebx,%eax /* get cpl */
+ movl CPUVAR(XUNMASK)(,%eax,4),%eax
+ andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */
+ jz 3f
+ bsrl %eax,%eax
+ btrl %eax,CPUVAR(XPENDING)
+ movl CPUVAR(XSOURCES)(,%eax,4),%eax
+ jmp *IS_RECURSE(%eax)
+#endif
+3:
movl %ebx,CPUVAR(ILEVEL)
#ifdef XEN
STIC(%eax)
@@ -264,6 +277,7 @@
jz .Ldoreti_panic
#endif /* XEN */
#endif /* defined(DEBUG) */
+#if !defined(XEN)
movl %ebx,%eax
movl CPUVAR(IUNMASK)(,%eax,4),%eax
andl CPUVAR(IPENDING),%eax
@@ -272,7 +286,19 @@
btrl %eax,CPUVAR(IPENDING)
movl CPUVAR(ISOURCES)(,%eax, 4),%eax
jmp *IS_RESUME(%eax)
+#endif
2: /* Check for ASTs on exit to user mode. */
+#if defined(XEN)
+ movl %ebx,%eax
+ movl CPUVAR(IUNMASK)(,%eax,4),%eax
+ andl CPUVAR(IPENDING),%eax
+ jz 3f
+ bsrl %eax,%eax /* slow, but not worth optimizing */
+ btrl %eax,CPUVAR(IPENDING)
+ movl CPUVAR(ISOURCES)(,%eax, 4),%eax
+ jmp *IS_RESUME(%eax)
+#endif
+3:
movl %ebx,CPUVAR(ILEVEL)
5:
testb $CHK_UPL,TF_CS(%esp)
diff -r b54289847d31 -r c60f79656b35 sys/arch/i386/i386/vector.S
--- a/sys/arch/i386/i386/vector.S Tue Dec 25 05:44:13 2018 +0000
Home |
Main Index |
Thread Index |
Old Index