Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/xen Simplify the xen event handler callback by:
details: https://anonhg.NetBSD.org/src/rev/11f9541c2383
branches: trunk
changeset: 783541:11f9541c2383
user: cherry <cherry%NetBSD.org@localhost>
date: Fri Dec 28 06:29:56 2012 +0000
description:
Simplify the xen event handler callback by:
- moving the interrupt handler callback traversal into a separate
function.
- using evt_iterate_bits() to scan through the pending bitfield
- removing cross-cpu pending actions - events recieved on the wrong
vcpu are re-routed via hypervisor_send_event().
- simplifying nested while() loops by encapsulating them in
equivalent functions.
Many thanks for multiple reviews by bouyer@ and jym@
diffstat:
sys/arch/xen/include/evtchn.h | 5 +-
sys/arch/xen/include/hypervisor.h | 3 +-
sys/arch/xen/x86/hypervisor_machdep.c | 91 +++++++++++++++++++++--
sys/arch/xen/xen/evtchn.c | 127 ++++++++++++++++++++-------------
4 files changed, 164 insertions(+), 62 deletions(-)
diffs (truncated from 358 to 300 lines):
diff -r 534f4eb554c4 -r 11f9541c2383 sys/arch/xen/include/evtchn.h
--- a/sys/arch/xen/include/evtchn.h Fri Dec 28 05:15:08 2012 +0000
+++ b/sys/arch/xen/include/evtchn.h Fri Dec 28 06:29:56 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: evtchn.h,v 1.20 2011/09/20 00:12:23 jym Exp $ */
+/* $NetBSD: evtchn.h,v 1.21 2012/12/28 06:29:56 cherry Exp $ */
/*
*
@@ -33,6 +33,9 @@
extern struct evtsource *evtsource[];
+#include <sys/mutex.h>
+extern kmutex_t evtlock[];
+
void events_default_setup(void);
void events_init(void);
bool events_suspend(void);
diff -r 534f4eb554c4 -r 11f9541c2383 sys/arch/xen/include/hypervisor.h
--- a/sys/arch/xen/include/hypervisor.h Fri Dec 28 05:15:08 2012 +0000
+++ b/sys/arch/xen/include/hypervisor.h Fri Dec 28 06:29:56 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: hypervisor.h,v 1.39 2012/11/25 08:39:35 cherry Exp $ */
+/* $NetBSD: hypervisor.h,v 1.40 2012/12/28 06:29:56 cherry Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -142,6 +142,7 @@
void hypervisor_mask_event(unsigned int);
void hypervisor_clear_event(unsigned int);
void hypervisor_enable_ipl(unsigned int);
+void hypervisor_do_iplpending(unsigned int, void *);
void hypervisor_set_ipending(uint32_t, int, int);
void hypervisor_machdep_attach(void);
void hypervisor_machdep_resume(void);
diff -r 534f4eb554c4 -r 11f9541c2383 sys/arch/xen/x86/hypervisor_machdep.c
--- a/sys/arch/xen/x86/hypervisor_machdep.c Fri Dec 28 05:15:08 2012 +0000
+++ b/sys/arch/xen/x86/hypervisor_machdep.c Fri Dec 28 06:29:56 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: hypervisor_machdep.c,v 1.23 2012/11/25 08:39:36 cherry Exp $ */
+/* $NetBSD: hypervisor_machdep.c,v 1.24 2012/12/28 06:29:56 cherry Exp $ */
/*
*
@@ -54,7 +54,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.23 2012/11/25 08:39:36 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.24 2012/12/28 06:29:56 cherry Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -187,7 +187,7 @@
*/
while (vci->evtchn_upcall_pending) {
- cli();
+ x86_disable_intr();
vci->evtchn_upcall_pending = 0;
@@ -195,7 +195,7 @@
s->evtchn_pending, s->evtchn_mask,
evt_set_pending, &ret);
- sti();
+ x86_enable_intr();
}
#if 0
@@ -284,6 +284,82 @@
#endif
}
+/* Iterate through pending events and call the event handler */
+struct splargs {
+ int ipl;
+ struct intrframe *regs;
+};
+
+static inline void
+evt_do_iplcallback(unsigned int evtch, unsigned int l1i,
+ unsigned int l2i, void *args)
+{
+ KASSERT(args != NULL);
+ struct splargs *sargs = args;
+
+ struct intrhand *ih;
+ int (*ih_fun)(void *, void *);
+
+ int ipl = sargs->ipl;
+ struct cpu_info *ci = curcpu();
+ struct intrframe *regs = sargs->regs;
+
+ KASSERT(evtsource[evtch] != 0);
+
+ KASSERT(ci->ci_ilevel == ipl);
+
+ KASSERT(x86_read_psl() != 0);
+ x86_enable_intr();
+ mutex_spin_enter(&evtlock[evtch]);
+ ih = evtsource[evtch]->ev_handlers;
+ while (ih != NULL) {
+ if (ih->ih_cpu != ci) { /* Skip non-local handlers */
+ ih = ih->ih_evt_next;
+ continue;
+ }
+ if (ih->ih_level == ipl) {
+ KASSERT(x86_read_psl() == 0);
+ x86_disable_intr();
+ ci->ci_ilevel = ih->ih_level;
+ x86_enable_intr();
+ ih_fun = (void *)ih->ih_fun;
+ ih_fun(ih->ih_arg, regs);
+ if (ci->ci_ilevel != ipl) {
+ printf("evtchn_do_event: "
+ "handler %p didn't lower "
+ "ipl %d %d\n",
+ ih_fun, ci->ci_ilevel, ipl);
+ }
+ }
+ ih = ih->ih_evt_next;
+ }
+ mutex_spin_exit(&evtlock[evtch]);
+ hypervisor_enable_event(evtch);
+ x86_disable_intr();
+
+ KASSERT(ci->ci_ilevel == ipl);
+}
+
+/*
+ * Iterate through all pending interrupt handlers at 'ipl', on current
+ * cpu.
+ */
+void
+hypervisor_do_iplpending(unsigned int ipl, void *regs)
+{
+ struct cpu_info *ci;
+ struct splargs splargs = {
+ .ipl = ipl,
+ .regs = regs};
+
+ ci = curcpu();
+
+ evt_iterate_bits(&ci->ci_isources[ipl]->ipl_evt_mask1,
+ ci->ci_isources[ipl]->ipl_evt_mask2, NULL,
+ evt_do_iplcallback, &splargs);
+}
+
+
void
hypervisor_send_event(struct cpu_info *ci, unsigned int ev)
{
@@ -437,13 +513,6 @@
KASSERT(ci->ci_isources[ipl] != NULL);
ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1;
ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
- if (__predict_false(ci != curcpu())) {
- if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
- panic("hypervisor_set_ipending: "
- "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
- (int) ci->ci_cpuid);
- }
- }
}
void
diff -r 534f4eb554c4 -r 11f9541c2383 sys/arch/xen/xen/evtchn.c
--- a/sys/arch/xen/xen/evtchn.c Fri Dec 28 05:15:08 2012 +0000
+++ b/sys/arch/xen/xen/evtchn.c Fri Dec 28 06:29:56 2012 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: evtchn.c,v 1.65 2012/11/25 20:56:33 cherry Exp $ */
+/* $NetBSD: evtchn.c,v 1.66 2012/12/28 06:29:56 cherry Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -54,7 +54,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.65 2012/11/25 20:56:33 cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.66 2012/12/28 06:29:56 cherry Exp $");
#include "opt_xen.h"
#include "isa.h"
@@ -89,7 +89,7 @@
struct evtsource *evtsource[NR_EVENT_CHANNELS];
/* channel locks */
-static kmutex_t evtlock[NR_EVENT_CHANNELS];
+kmutex_t evtlock[NR_EVENT_CHANNELS];
/* Reference counts for bindings to event channels XXX: redo for SMP */
static uint8_t evtch_bindcount[NR_EVENT_CHANNELS];
@@ -228,6 +228,50 @@
}
+static int
+xspllower(int ilevel, void *regs)
+{
+ int i;
+ uint32_t iplbit;
+ uint32_t iplpending;
+
+ struct cpu_info *ci;
+
+ KASSERT(kpreempt_disabled());
+ KASSERT(x86_read_psl() != 0); /* Interrupts must be disabled */
+
+ ci = curcpu();
+ ci->ci_idepth++;
+
+ KASSERT(ilevel < ci->ci_ilevel);
+ /*
+ * C version of spllower(). ASTs will be checked when
+ * hypevisor_callback() exits, so no need to check here.
+ */
+ iplpending = (IUNMASK(ci, ilevel) & ci->ci_ipending);
+ while (iplpending != 0) {
+ iplbit = 1 << (NIPL - 1);
+ i = (NIPL - 1);
+ while (iplpending != 0 && i > ilevel) {
+ while (iplpending & iplbit) {
+ ci->ci_ipending &= ~iplbit;
+ ci->ci_ilevel = i;
+ hypervisor_do_iplpending(i, regs);
+ KASSERT(x86_read_psl() != 0);
+ /* more pending IPLs may have been registered */
+ iplpending =
+ (IUNMASK(ci, ilevel) & ci->ci_ipending);
+ }
+ i--;
+ iplbit >>= 1;
+ }
+ }
+ KASSERT(x86_read_psl() != 0);
+ ci->ci_ilevel = ilevel;
+ ci->ci_idepth--;
+ return 0;
+}
+
unsigned int
evtchn_do_event(int evtch, struct intrframe *regs)
{
@@ -236,8 +280,6 @@
struct intrhand *ih;
int (*ih_fun)(void *, void *);
uint32_t iplmask;
- int i;
- uint32_t iplbit;
#ifdef DIAGNOSTIC
if (evtch >= NR_EVENT_CHANNELS) {
@@ -292,9 +334,12 @@
}
ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
iplmask = evtsource[evtch]->ev_imask;
- sti();
+
+ KASSERT(x86_read_psl() != 0);
+ x86_enable_intr();
mutex_spin_enter(&evtlock[evtch]);
ih = evtsource[evtch]->ev_handlers;
+
while (ih != NULL) {
if (ih->ih_cpu != ci) {
hypervisor_send_event(ih->ih_cpu, evtch);
@@ -307,60 +352,44 @@
if (evtch == IRQ_DEBUG)
printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
#endif
- cli();
- hypervisor_set_ipending(iplmask,
- evtch >> LONG_SHIFT, evtch & LONG_MASK);
+ mutex_spin_exit(&evtlock[evtch]);
+ x86_disable_intr();
+ hypervisor_set_ipending(iplmask,
+ evtch >> LONG_SHIFT, evtch & LONG_MASK);
+
/* leave masked */
- mutex_spin_exit(&evtlock[evtch]);
goto splx;
}
iplmask &= ~IUNMASK(ci, ih->ih_level);
- ci->ci_ilevel = ih->ih_level;
+
+ KASSERT(x86_read_psl() == 0);
+ KASSERT(ih->ih_level > ilevel);
+
+ { /* Lower current spl to current handler */
+ x86_disable_intr();
+
+ /* assert for handlers being sorted by spl */
+ KASSERT(ci->ci_ilevel >= ih->ih_level);
+
+ /* Lower it */
+ ci->ci_ilevel = ih->ih_level;
Home |
Main Index |
Thread Index |
Old Index