Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/bouyer-xenpvh]: src/sys/arch Get rid of xen-specific ci_x* interrupt han...
details: https://anonhg.NetBSD.org/src/rev/52b862f351b7
branches: bouyer-xenpvh
changeset: 982926:52b862f351b7
user: bouyer <bouyer%NetBSD.org@localhost>
date: Sun Apr 12 17:25:52 2020 +0000
description:
Get rid of xen-specific ci_x* interrupt handling:
- use the general SIR mechanism, reserving 3 more slots for IPL_VM, IPL_SCHED
and IPL_HIGH
- remove specific handling from C sources, or change to ipending
- convert IPL number to SIR number in various places
- Remove XUNMASK/XPENDING in assembly or change to IUNMASK/IPENDING
- remove Xen-specific ci_xsources, ci_xmask, ci_xunmask, ci_xpending from
struct cpu_info
- for now remove a KASSERT that there are no pending interrupts in
idle_block(). We can get there with some software interrupts pending
in autoconf XXX needs to be looked at.
diffstat:
sys/arch/amd64/amd64/genassym.cf | 11 ++-
sys/arch/amd64/amd64/lock_stubs.S | 15 +-----
sys/arch/amd64/amd64/spl.S | 33 +-----------
sys/arch/amd64/amd64/vector.S | 95 +++++++---------------------------
sys/arch/i386/i386/genassym.cf | 11 ++-
sys/arch/i386/i386/i386_trap.S | 12 ++--
sys/arch/i386/i386/locore.S | 12 ++--
sys/arch/i386/i386/spl.S | 33 +----------
sys/arch/i386/i386/vector.S | 97 +++++++---------------------------
sys/arch/x86/include/cpu.h | 8 +--
sys/arch/x86/include/intrdefs.h | 7 ++-
sys/arch/x86/isa/isa_machdep.c | 8 +--
sys/arch/x86/x86/i8259.c | 12 +---
sys/arch/x86/x86/intr.c | 11 +---
sys/arch/xen/include/hypervisor.h | 4 +-
sys/arch/xen/include/intr.h | 5 +-
sys/arch/xen/x86/hypervisor_machdep.c | 40 +++++--------
sys/arch/xen/x86/xen_intr.c | 12 +---
sys/arch/xen/xen/clock.c | 5 +-
sys/arch/xen/xen/evtchn.c | 35 +++++++-----
sys/arch/xen/xen/xenevt.c | 6 +-
21 files changed, 132 insertions(+), 340 deletions(-)
diffs (truncated from 1067 to 300 lines):
diff -r f84b053b1b57 -r 52b862f351b7 sys/arch/amd64/amd64/genassym.cf
--- a/sys/arch/amd64/amd64/genassym.cf Sun Apr 12 17:17:38 2020 +0000
+++ b/sys/arch/amd64/amd64/genassym.cf Sun Apr 12 17:25:52 2020 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.82.4.2 2020/04/11 10:11:30 bouyer Exp $
+# $NetBSD: genassym.cf,v 1.82.4.3 2020/04/12 17:25:52 bouyer Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -323,6 +323,8 @@
define IPL_PREEMPT IPL_PREEMPT
define IPL_NET IPL_NET
define IPL_CLOCK IPL_CLOCK
+define IPL_VM IPL_VM
+define IPL_SCHED IPL_SCHED
define IPL_HIGH IPL_HIGH
define LIR_IPI LIR_IPI
@@ -362,10 +364,9 @@
ifdef XEN
define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu)
-define CPU_INFO_XPENDING offsetof(struct cpu_info, ci_xpending)
-define CPU_INFO_XMASK offsetof(struct cpu_info, ci_xmask)
-define CPU_INFO_XUNMASK offsetof(struct cpu_info, ci_xunmask)
-define CPU_INFO_XSOURCES offsetof(struct cpu_info, ci_xsources)
+define SIR_XENIPL_VM SIR_XENIPL_VM
+define SIR_XENIPL_SCHED SIR_XENIPL_SCHED
+define SIR_XENIPL_HIGH SIR_XENIPL_HIGH
define EVTCHN_UPCALL_MASK offsetof(struct vcpu_info, evtchn_upcall_mask)
ifdef XENPV
define XEN_PT_BASE offsetof(struct start_info, pt_base)
diff -r f84b053b1b57 -r 52b862f351b7 sys/arch/amd64/amd64/lock_stubs.S
--- a/sys/arch/amd64/amd64/lock_stubs.S Sun Apr 12 17:17:38 2020 +0000
+++ b/sys/arch/amd64/amd64/lock_stubs.S Sun Apr 12 17:25:52 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: lock_stubs.S,v 1.35.6.1 2020/04/11 18:26:06 bouyer Exp $ */
+/* $NetBSD: lock_stubs.S,v 1.35.6.2 2020/04/12 17:25:52 bouyer Exp $ */
/*
* Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -130,11 +130,6 @@
CLI(ax)
testl CPU_INFO_IPENDING(%r8), %esi
jnz _C_LABEL(Xspllower)
-#if defined(XEN)
- movl CPU_INFO_XUNMASK(%r8,%rdi,4), %esi
- testl CPU_INFO_XPENDING(%r8), %esi
- jnz _C_LABEL(Xspllower)
-#endif
movl %edi, CPU_INFO_ILEVEL(%r8)
STI(ax)
1: rep /* double byte ret as branch */
@@ -158,14 +153,6 @@
movl %eax,%ebx
cmpxchg8b CPU_INFO_ISTATE(%rsi) /* swap in new ilevel */
jnz 4f
-#if defined(XEN)
- movl CPU_INFO_XPENDING(%rsi),%eax
- testl %eax,CPU_INFO_XUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
- jnz 3f
- movl %edx, %eax
- cmpxchgl %ecx, CPU_INFO_ILEVEL(%rsi)
- jnz 4f
-#endif
2:
popq %rbx
ret
diff -r f84b053b1b57 -r 52b862f351b7 sys/arch/amd64/amd64/spl.S
--- a/sys/arch/amd64/amd64/spl.S Sun Apr 12 17:17:38 2020 +0000
+++ b/sys/arch/amd64/amd64/spl.S Sun Apr 12 17:25:52 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: spl.S,v 1.43.4.5 2020/04/11 18:26:06 bouyer Exp $ */
+/* $NetBSD: spl.S,v 1.43.4.6 2020/04/12 17:25:52 bouyer Exp $ */
/*
* Copyright (c) 2003 Wasabi Systems, Inc.
@@ -239,11 +239,6 @@
CLI(ax)
testl CPUVAR(IPENDING),%edx
jnz 2f
-#if defined(XEN)
- movl CPUVAR(XUNMASK)(,%rdi,4),%edx
- testl CPUVAR(XPENDING),%edx
- jnz 2f
-#endif
movl %edi,CPUVAR(ILEVEL)
POPF /* clobbers %rdi */
1:
@@ -344,18 +339,6 @@
movq CPUVAR(ISOURCES)(,%rax,8),%rax
jmp *IS_RECURSE(%rax)
2:
-#if defined(XEN)
- movl %ebx,%eax /* get cpl */
- movl CPUVAR(XUNMASK)(,%rax,4),%eax
- CLI(si)
- andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */
- jz 3f
- bsrl %eax,%eax
- btrl %eax,CPUVAR(XPENDING)
- movq CPUVAR(XSOURCES)(,%rax,8),%rax
- jmp *IS_RECURSE(%rax)
-#endif
-3:
movl %ebx,CPUVAR(ILEVEL)
STI(si)
popq %r12
@@ -387,19 +370,7 @@
btrl %eax,CPUVAR(IPENDING)
movq CPUVAR(ISOURCES)(,%rax,8),%rax
jmp *IS_RESUME(%rax)
-2:
-#if defined(XEN)
- movl %ebx,%eax
- movl CPUVAR(XUNMASK)(,%rax,4),%eax
- CLI(si)
- andl CPUVAR(XPENDING),%eax
- jz 3f
- bsrl %eax,%eax /* slow, but not worth optimizing */
- btrl %eax,CPUVAR(XPENDING)
- movq CPUVAR(XSOURCES)(,%rax,8),%rax
- jmp *IS_RESUME(%rax)
-#endif
-3: /* Check for ASTs on exit to user mode. */
+2: /* Check for ASTs on exit to user mode. */
movl %ebx,CPUVAR(ILEVEL)
5:
testb $SEL_RPL,TF_CS(%rsp)
diff -r f84b053b1b57 -r 52b862f351b7 sys/arch/amd64/amd64/vector.S
--- a/sys/arch/amd64/amd64/vector.S Sun Apr 12 17:17:38 2020 +0000
+++ b/sys/arch/amd64/amd64/vector.S Sun Apr 12 17:25:52 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vector.S,v 1.73.6.2 2020/04/11 11:56:51 bouyer Exp $ */
+/* $NetBSD: vector.S,v 1.73.6.3 2020/04/12 17:25:52 bouyer Exp $ */
/*
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -693,19 +693,19 @@
#if defined(XEN)
/* Resume/recurse procedures for spl() */
-#define XENINTRSTUB(name, num, early_ack, late_ack, mask, unmask, level_mask) \
-IDTVEC(recurse_ ## name ## num) ;\
+#define XENINTRSTUB(name, sir, level, unmask) \
+IDTVEC(recurse_ ## name ## sir) ;\
INTR_RECURSE_HWFRAME ;\
subq $8,%rsp ;\
pushq $T_ASTFLT /* trap # for doing ASTs */ ;\
INTR_RECURSE_ENTRY ;\
-IDTVEC(resume_ ## name ## num) \
+IDTVEC(resume_ ## name ## sir) \
movq $IREENT_MAGIC,TF_ERR(%rsp) ;\
movl %ebx,%r13d ;\
- movq CPUVAR(XSOURCES) + (num) * 8,%r14 ;\
+ movq CPUVAR(ISOURCES) + (sir) * 8,%r14 ;\
1: \
pushq %r13 ;\
- movl $num,CPUVAR(ILEVEL) ;\
+ movl $level,CPUVAR(ILEVEL) ;\
STI(si) ;\
incl CPUVAR(IDEPTH) ;\
movq IS_HANDLERS(%r14),%rbx ;\
@@ -718,48 +718,18 @@
jnz 6b ;\
5: \
CLI(si) ;\
- unmask(num) /* unmask it in hardware */ ;\
- late_ack(num) ;\
+ unmask(sir) /* unmask it in hardware */ ;\
STI(si) ;\
jmp _C_LABEL(Xdoreti) /* lower spl and do ASTs */ ;\
/* The unmask func for Xen events */
-#define hypervisor_asm_unmask(num) \
- movq $num,%rdi ;\
- call _C_LABEL(hypervisor_enable_ipl)
+#define hypervisor_asm_unmask(sir) \
+ movq $sir,%rdi ;\
+ call _C_LABEL(hypervisor_enable_sir)
-XENINTRSTUB(xenev,0,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,1,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,2,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,3,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,4,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,5,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,6,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,7,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,8,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,9,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,10,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,11,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,12,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,13,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,14,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,15,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,16,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,17,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,18,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,19,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,20,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,21,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,22,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,23,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,24,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,25,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,26,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,27,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,28,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,29,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,30,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
-XENINTRSTUB(xenev,31,voidop,voidop,voidop,hypervisor_asm_unmask,voidop)
+XENINTRSTUB(xenev,SIR_XENIPL_VM,IPL_VM,hypervisor_asm_unmask)
+XENINTRSTUB(xenev,SIR_XENIPL_SCHED,IPL_SCHED,hypervisor_asm_unmask)
+XENINTRSTUB(xenev,SIR_XENIPL_HIGH,IPL_HIGH,hypervisor_asm_unmask)
/* On Xen, the xenev_stubs are purely for spl entry, since there is no
* vector based mechanism. We however provide the entrypoint to ensure
@@ -771,39 +741,14 @@
callq _C_LABEL(panic)
END(entry_xenev)
+#define XENINTRSTUB_ENTRY(name, sir) \
+ .quad entry_xenev , _C_LABEL(Xrecurse_ ## name ## sir); \
+ .quad _C_LABEL(Xresume_ ## name ## sir);
+
LABEL(xenev_stubs)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev0), _C_LABEL(Xresume_xenev0)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev1) ,_C_LABEL(Xresume_xenev1)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev2) ,_C_LABEL(Xresume_xenev2)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev3) ,_C_LABEL(Xresume_xenev3)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev4) ,_C_LABEL(Xresume_xenev4)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev5) ,_C_LABEL(Xresume_xenev5)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev6) ,_C_LABEL(Xresume_xenev6)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev7) ,_C_LABEL(Xresume_xenev7)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev8) ,_C_LABEL(Xresume_xenev8)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev9) ,_C_LABEL(Xresume_xenev9)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev10), _C_LABEL(Xresume_xenev10)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev11), _C_LABEL(Xresume_xenev11)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev12), _C_LABEL(Xresume_xenev12)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev13), _C_LABEL(Xresume_xenev13)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev14), _C_LABEL(Xresume_xenev14)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev15), _C_LABEL(Xresume_xenev15)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev16), _C_LABEL(Xresume_xenev16)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev17), _C_LABEL(Xresume_xenev17)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev18), _C_LABEL(Xresume_xenev18)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev19), _C_LABEL(Xresume_xenev19)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev20), _C_LABEL(Xresume_xenev20)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev21), _C_LABEL(Xresume_xenev21)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev22), _C_LABEL(Xresume_xenev22)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev23), _C_LABEL(Xresume_xenev23)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev24), _C_LABEL(Xresume_xenev24)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev25), _C_LABEL(Xresume_xenev25)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev26), _C_LABEL(Xresume_xenev26)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev27), _C_LABEL(Xresume_xenev27)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev28), _C_LABEL(Xresume_xenev28)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev29), _C_LABEL(Xresume_xenev29)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev30), _C_LABEL(Xresume_xenev30)
- .quad entry_xenev, _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31)
+ XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_VM) ;
+ XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_SCHED) ;
+ XENINTRSTUB_ENTRY(xenev, SIR_XENIPL_HIGH) ;
END(xenev_stubs)
/*
diff -r f84b053b1b57 -r 52b862f351b7 sys/arch/i386/i386/genassym.cf
--- a/sys/arch/i386/i386/genassym.cf Sun Apr 12 17:17:38 2020 +0000
+++ b/sys/arch/i386/i386/genassym.cf Sun Apr 12 17:25:52 2020 +0000
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.119.4.2 2020/04/11 10:11:31 bouyer Exp $
+# $NetBSD: genassym.cf,v 1.119.4.3 2020/04/12 17:25:52 bouyer Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -324,6 +324,8 @@
define IPL_NET IPL_NET
define IPL_SCHED IPL_SCHED
define IPL_CLOCK IPL_CLOCK
+define IPL_VM IPL_VM
+define IPL_SCHED IPL_SCHED
define IPL_HIGH IPL_HIGH
define IPL_SOFTNET IPL_SOFTNET
@@ -376,10 +378,9 @@
ifdef XEN
define CPU_INFO_VCPU offsetof(struct cpu_info, ci_vcpu)
-define CPU_INFO_XPENDING offsetof(struct cpu_info, ci_xpending)
-define CPU_INFO_XMASK offsetof(struct cpu_info, ci_xmask)
-define CPU_INFO_XUNMASK offsetof(struct cpu_info, ci_xunmask)
-define CPU_INFO_XSOURCES offsetof(struct cpu_info, ci_xsources)
+define SIR_XENIPL_VM SIR_XENIPL_VM
+define SIR_XENIPL_SCHED SIR_XENIPL_SCHED
+define SIR_XENIPL_HIGH SIR_XENIPL_HIGH
define START_INFO_SHARED_INFO offsetof(struct start_info, shared_info)
define START_INFO_FLAGS offsetof(struct start_info, flags)
define START_INFO_CONSOLE_MFN offsetof(struct start_info, console.domU.mfn)
diff -r f84b053b1b57 -r 52b862f351b7 sys/arch/i386/i386/i386_trap.S
--- a/sys/arch/i386/i386/i386_trap.S Sun Apr 12 17:17:38 2020 +0000
Home |
Main Index |
Thread Index |
Old Index