Port-xen archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: xen spl rework
On 7 December 2012 05:58, Cherry G. Mathew
<cherry.g.mathew%gmail.com@localhost> wrote:
> On 6 December 2012 17:09, Manuel Bouyer <bouyer%antioche.eu.org@localhost>
> wrote:
>> On Thu, Dec 06, 2012 at 03:35:02PM -0600, Cherry G. Mathew wrote:
>>> Hi Manuel,
>>>
>>> Thanks for taking the time.
>>>
>>> On 6 December 2012 04:00, Manuel Bouyer <bouyer%antioche.eu.org@localhost>
>>> wrote:
>>> > On Sun, Nov 25, 2012 at 02:29:50PM -0600, Cherry G. Mathew wrote:
>>> >> I've updated the patch with your suggestions.
>>> >> ftp://ftp.netbsd.org/pub/NetBSD/misc/cherry/tmp/port-xen/spl1.c.diff
>>> >
>>> > Cosmetic first:
>>> > move extern variable and function declaration to hearders, included by
>>> > all .c that need it. It's way too easy for things to get out of
>>> > sync otherwise.
>>> >
>>>
>>> Ok, thanks.
>>>
>>> >
>>> > in evtchn_do_event() seems to not be used anymore (it's computed but
>>> > never read).
>>>
>>> Not sure I understand, it's called from hypervisor_machdep.c
>>
>> It should have been:
>> in evtchn_do_event(), iplmask seems to not be used anymore (it's computed but
>> never read).
>>
>>>
>>> > And, more important, what is setting bits in ci->ci_ipending now ?
>>> > It seems that hypervisor_set_ipending() is never called any more ?
>>>
>>> I think you're referring to this:
>>>
>>> Index: arch/xen/xen/evtchn.c
>>> ===================================================================
>>> RCS file: /cvsroot/src/sys/arch/xen/xen/evtchn.c,v
>>> retrieving revision 1.64
>>> diff -u -r1.64 evtchn.c
>>> --- arch/xen/xen/evtchn.c 25 Nov 2012 08:39:36 -0000 1.64
>>> +++ arch/xen/xen/evtchn.c 25 Nov 2012 20:19:15 -0000
>>> [...]
>>>
>>> ***************
>>> *** 307,366 ****
>>> if (evtch == IRQ_DEBUG)
>>> printf("ih->ih_level %d <= ilevel %d\n",
>>> ih->ih_level, ilevel);
>>> #endif
>>> - cli();
>>> - hypervisor_set_ipending(iplmask,
>>> - evtch >> LONG_SHIFT, evtch & LONG_MASK);
>>>
>>>
>>> It removes a redundant (second) hypervisor_set_ipending() call - jym
>>> pointed this up a bit earlier in this thread - probably should have
>>> been broken out into a separate patch. The new codepath makes sure
>>> that pending bits are only set on local cpus and uses ipis to set
>>> remote pending bits.
>>
>> Right, there's another case where hypervisor_set_ipending() is called that I
>> missed. But it's for a different case. Here you're skipping calling
>> handlers because the IPL isn't low enough, but as the pending bit will not
>> be set (even on local CPU) nothing will call this handler when the spl
>> is lowered. What did I miss ?
>>
>
> Yes you're right, I'll have another go at this.
>
> Thanks!
>
> --
> ~Cherry
Pasted a version fixed for your comments:
I've tested by doing a full build-world on domU
Cheers,
--
~Cherry
Index: arch/xen/include/evtchn.h
===================================================================
RCS file: /cvsroot/src/sys/arch/xen/include/evtchn.h,v
retrieving revision 1.20
diff -u -r1.20 evtchn.h
--- arch/xen/include/evtchn.h 20 Sep 2011 00:12:23 -0000 1.20
+++ arch/xen/include/evtchn.h 27 Dec 2012 06:57:12 -0000
@@ -33,6 +33,9 @@
extern struct evtsource *evtsource[];
+#include <sys/mutex.h>
+extern kmutex_t evtlock[];
+
void events_default_setup(void);
void events_init(void);
bool events_suspend(void);
Index: arch/xen/include/hypervisor.h
===================================================================
RCS file: /cvsroot/src/sys/arch/xen/include/hypervisor.h,v
retrieving revision 1.39
diff -u -r1.39 hypervisor.h
--- arch/xen/include/hypervisor.h 25 Nov 2012 08:39:35 -0000 1.39
+++ arch/xen/include/hypervisor.h 27 Dec 2012 06:57:12 -0000
@@ -118,7 +118,6 @@
/* For use in guest OSes. */
extern volatile shared_info_t *HYPERVISOR_shared_info;
-
/* Structural guest handles introduced in 0x00030201. */
#if __XEN_INTERFACE_VERSION__ >= 0x00030201
#define xenguest_handle(hnd) (hnd).p
@@ -142,6 +141,7 @@
void hypervisor_mask_event(unsigned int);
void hypervisor_clear_event(unsigned int);
void hypervisor_enable_ipl(unsigned int);
+void hypervisor_do_iplpending(unsigned int, void *);
void hypervisor_set_ipending(uint32_t, int, int);
void hypervisor_machdep_attach(void);
void hypervisor_machdep_resume(void);
Index: arch/xen/x86/hypervisor_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/xen/x86/hypervisor_machdep.c,v
retrieving revision 1.23
diff -u -r1.23 hypervisor_machdep.c
--- arch/xen/x86/hypervisor_machdep.c 25 Nov 2012 08:39:36 -0000 1.23
+++ arch/xen/x86/hypervisor_machdep.c 27 Dec 2012 06:57:12 -0000
@@ -187,7 +187,7 @@
*/
while (vci->evtchn_upcall_pending) {
- cli();
+ x86_disable_intr();
vci->evtchn_upcall_pending = 0;
@@ -195,7 +195,7 @@
s->evtchn_pending, s->evtchn_mask,
evt_set_pending, &ret);
- sti();
+ x86_enable_intr();
}
#if 0
@@ -284,6 +284,82 @@
#endif
}
+/* Iterate through pending events and call the event handler */
+struct splargs {
+ int ipl;
+ struct intrframe *regs;
+};
+
+static inline void
+evt_do_iplcallback(unsigned int evtch, unsigned int l1i,
+ unsigned int l2i, void *args)
+{
+ KASSERT(args != NULL);
+ struct splargs *sargs = args;
+
+ struct intrhand *ih;
+ int (*ih_fun)(void *, void *);
+
+ int ipl = sargs->ipl;
+ struct cpu_info *ci = curcpu();
+ struct intrframe *regs = sargs->regs;
+
+ KASSERT(evtsource[evtch] != 0);
+
+ KASSERT(ci->ci_ilevel == ipl);
+
+ KASSERT(x86_read_psl() != 0);
+ x86_enable_intr();
+ mutex_spin_enter(&evtlock[evtch]);
+ ih = evtsource[evtch]->ev_handlers;
+ while (ih != NULL) {
+ if (ih->ih_cpu != ci) { /* Skip non-local handlers */
+ ih = ih->ih_evt_next;
+ continue;
+ }
+ if (ih->ih_level == ipl) {
+ KASSERT(x86_read_psl() == 0);
+ x86_disable_intr();
+ ci->ci_ilevel = ih->ih_level;
+ x86_enable_intr();
+ ih_fun = (void *)ih->ih_fun;
+ ih_fun(ih->ih_arg, regs);
+ if (ci->ci_ilevel != ipl) {
+ printf("evtchn_do_event: "
+ "handler %p didn't lower "
+ "ipl %d %d\n",
+ ih_fun, ci->ci_ilevel, ipl);
+ }
+ }
+ ih = ih->ih_evt_next;
+ }
+ mutex_spin_exit(&evtlock[evtch]);
+ hypervisor_enable_event(evtch);
+ x86_disable_intr();
+
+ KASSERT(ci->ci_ilevel == ipl);
+}
+
+/*
+ * Iterate through all pending interrupt handlers at 'ipl', on current
+ * cpu.
+ */
+void
+hypervisor_do_iplpending(unsigned int ipl, void *regs)
+{
+ struct cpu_info *ci;
+ struct splargs splargs = {
+ .ipl = ipl,
+ .regs = regs};
+
+ ci = curcpu();
+
+ evt_iterate_bits(&ci->ci_isources[ipl]->ipl_evt_mask1,
+ ci->ci_isources[ipl]->ipl_evt_mask2, NULL,
+ evt_do_iplcallback, &splargs);
+}
+
+
void
hypervisor_send_event(struct cpu_info *ci, unsigned int ev)
{
@@ -437,13 +513,6 @@
KASSERT(ci->ci_isources[ipl] != NULL);
ci->ci_isources[ipl]->ipl_evt_mask1 |= 1UL << l1;
ci->ci_isources[ipl]->ipl_evt_mask2[l1] |= 1UL << l2;
- if (__predict_false(ci != curcpu())) {
- if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
- panic("hypervisor_set_ipending: "
- "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n",
- (int) ci->ci_cpuid);
- }
- }
}
void
Index: arch/xen/x86/xen_ipi.c
===================================================================
RCS file: /cvsroot/src/sys/arch/xen/x86/xen_ipi.c,v
retrieving revision 1.11
diff -u -r1.11 xen_ipi.c
--- arch/xen/x86/xen_ipi.c 27 Dec 2012 06:42:14 -0000 1.11
+++ arch/xen/x86/xen_ipi.c 27 Dec 2012 06:57:12 -0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xen_ipi.c,v 1.11 2012/12/27 06:42:14 cherry Exp $ */
+/* $NetBSD: xen_ipi.c,v 1.10 2012/02/17 18:40:20 bouyer Exp $ */
/*-
* Copyright (c) 2011 The NetBSD Foundation, Inc.
@@ -33,10 +33,10 @@
/*
* Based on: x86/ipi.c
- * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.11 2012/12/27 06:42:14
cherry Exp $");
+ * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.10 2012/02/17 18:40:20
bouyer Exp $");
*/
-__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.11 2012/12/27 06:42:14
cherry Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.10 2012/02/17 18:40:20
bouyer Exp $");
#include <sys/types.h>
@@ -56,7 +56,6 @@
#include <machine/frame.h>
#include <machine/segments.h>
-#include <xen/evtchn.h>
#include <xen/intr.h>
#include <xen/intrdefs.h>
#include <xen/hypervisor.h>
Index: arch/xen/xen/evtchn.c
===================================================================
RCS file: /cvsroot/src/sys/arch/xen/xen/evtchn.c,v
retrieving revision 1.65
diff -u -r1.65 evtchn.c
--- arch/xen/xen/evtchn.c 25 Nov 2012 20:56:33 -0000 1.65
+++ arch/xen/xen/evtchn.c 27 Dec 2012 06:57:12 -0000
@@ -89,7 +89,7 @@
struct evtsource *evtsource[NR_EVENT_CHANNELS];
/* channel locks */
-static kmutex_t evtlock[NR_EVENT_CHANNELS];
+kmutex_t evtlock[NR_EVENT_CHANNELS];
/* Reference counts for bindings to event channels XXX: redo for SMP */
static uint8_t evtch_bindcount[NR_EVENT_CHANNELS];
@@ -228,6 +228,50 @@
}
+static int
+xspllower(int ilevel, void *regs)
+{
+ int i;
+ uint32_t iplbit;
+ uint32_t iplpending;
+
+ struct cpu_info *ci;
+
+ KASSERT(kpreempt_disabled());
+ KASSERT(x86_read_psl() != 0); /* Interrupts must be disabled */
+
+ ci = curcpu();
+ ci->ci_idepth++;
+
+ KASSERT(ilevel < ci->ci_ilevel);
+ /*
+ * C version of spllower(). ASTs will be checked when
+ * hypevisor_callback() exits, so no need to check here.
+ */
+ iplpending = (IUNMASK(ci, ilevel) & ci->ci_ipending);
+ while (iplpending != 0) {
+ iplbit = 1 << (NIPL - 1);
+ i = (NIPL - 1);
+ while (iplpending != 0 && i > ilevel) {
+ while (iplpending & iplbit) {
+ ci->ci_ipending &= ~iplbit;
+ ci->ci_ilevel = i;
+ hypervisor_do_iplpending(i, regs);
+ KASSERT(x86_read_psl() != 0);
+ /* more pending IPLs may have been registered */
+ iplpending =
+ (IUNMASK(ci, ilevel) & ci->ci_ipending);
+ }
+ i--;
+ iplbit >>= 1;
+ }
+ }
+ KASSERT(x86_read_psl() != 0);
+ ci->ci_ilevel = ilevel;
+ ci->ci_idepth--;
+ return 0;
+}
+
unsigned int
evtchn_do_event(int evtch, struct intrframe *regs)
{
@@ -236,8 +280,6 @@
struct intrhand *ih;
int (*ih_fun)(void *, void *);
uint32_t iplmask;
- int i;
- uint32_t iplbit;
#ifdef DIAGNOSTIC
if (evtch >= NR_EVENT_CHANNELS) {
@@ -292,7 +334,10 @@
}
ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
iplmask = evtsource[evtch]->ev_imask;
- sti();
+
+ KASSERT(x86_read_psl() != 0);
+ x86_enable_intr();
+
mutex_spin_enter(&evtlock[evtch]);
ih = evtsource[evtch]->ev_handlers;
while (ih != NULL) {
@@ -307,60 +352,45 @@
if (evtch == IRQ_DEBUG)
printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level,
ilevel);
#endif
- cli();
- hypervisor_set_ipending(iplmask,
- evtch >> LONG_SHIFT, evtch & LONG_MASK);
- /* leave masked */
+
mutex_spin_exit(&evtlock[evtch]);
+ x86_disable_intr();
+ hypervisor_set_ipending(iplmask,
+ evtch >> LONG_SHIFT, evtch & LONG_MASK);
+
+ /* leave masked */
goto splx;
}
iplmask &= ~IUNMASK(ci, ih->ih_level);
- ci->ci_ilevel = ih->ih_level;
+
+ KASSERT(x86_read_psl() == 0);
+ KASSERT(ih->ih_level > ilevel);
+
+ { /* Lower current spl to current handler */
+ x86_disable_intr();
+
+ /* assert for handlers being sorted by spl */
+ KASSERT(ci->ci_ilevel >= ih->ih_level);
+
+ /* Lower it */
+ ci->ci_ilevel = ih->ih_level;
+
+ x86_enable_intr();
+ }
+
+ /* Assert handler doesn't break spl rules */
+ KASSERT(ih->ih_level > ilevel);
+
ih_fun = (void *)ih->ih_fun;
ih_fun(ih->ih_arg, regs);
ih = ih->ih_evt_next;
}
mutex_spin_exit(&evtlock[evtch]);
- cli();
+ x86_disable_intr();
hypervisor_enable_event(evtch);
splx:
- /*
- * C version of spllower(). ASTs will be checked when
- * hypevisor_callback() exits, so no need to check here.
- */
- iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending);
- while (iplmask != 0) {
- iplbit = 1 << (NIPL - 1);
- i = (NIPL - 1);
- while (iplmask != 0 && i > ilevel) {
- while (iplmask & iplbit) {
- ci->ci_ipending &= ~iplbit;
- ci->ci_ilevel = i;
- for (ih = ci->ci_isources[i]->ipl_handlers;
- ih != NULL; ih = ih->ih_ipl_next) {
- KASSERT(ih->ih_cpu == ci);
- sti();
- ih_fun = (void *)ih->ih_fun;
- ih_fun(ih->ih_arg, regs);
- cli();
- if (ci->ci_ilevel != i) {
- printf("evtchn_do_event: "
- "handler %p didn't lower "
- "ipl %d %d\n",
- ih_fun, ci->ci_ilevel, i);
- ci->ci_ilevel = i;
- }
- }
- hypervisor_enable_ipl(i);
- /* more pending IPLs may have been registered */
- iplmask =
- (IUNMASK(ci, ilevel) & ci->ci_ipending);
- }
- i--;
- iplbit >>= 1;
- }
- }
- ci->ci_ilevel = ilevel;
+ KASSERT(ci->ci_ilevel > ilevel);
+ xspllower(ilevel, regs);
return 0;
}
Home |
Main Index |
Thread Index |
Old Index