pkgsrc-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[pkgsrc/trunk]: pkgsrc/sysutils/xenkernel411 Add upstream patches for Xen sec...
details: https://anonhg.NetBSD.org/pkgsrc/rev/a78c182da675
branches: trunk
changeset: 443606:a78c182da675
user: bouyer <bouyer%pkgsrc.org@localhost>
date: Thu Dec 17 16:47:30 2020 +0000
description:
Add upstream patches for Xen security advisory 348, 358 and 359.
Bump PKGREVISION
diffstat:
sysutils/xenkernel411/Makefile | 4 +-
sysutils/xenkernel411/distinfo | 5 +-
sysutils/xenkernel411/patches/patch-XSA348 | 166 +++++++++++++++++++++++++++++
sysutils/xenkernel411/patches/patch-XSA358 | 48 ++++++++
sysutils/xenkernel411/patches/patch-XSA359 | 42 +++++++
5 files changed, 262 insertions(+), 3 deletions(-)
diffs (truncated from 302 to 300 lines):
diff -r a49ea1c3e229 -r a78c182da675 sysutils/xenkernel411/Makefile
--- a/sysutils/xenkernel411/Makefile Thu Dec 17 16:17:55 2020 +0000
+++ b/sysutils/xenkernel411/Makefile Thu Dec 17 16:47:30 2020 +0000
@@ -1,8 +1,8 @@
-# $NetBSD: Makefile,v 1.18 2020/11/12 11:29:25 bouyer Exp $
+# $NetBSD: Makefile,v 1.19 2020/12/17 16:47:30 bouyer Exp $
VERSION= 4.11.4
#keep >= 1 if we have security patches
-PKGREVISION= 4
+PKGREVISION= 5
DISTNAME= xen-${VERSION}
PKGNAME= xenkernel411-${VERSION}
CATEGORIES= sysutils
diff -r a49ea1c3e229 -r a78c182da675 sysutils/xenkernel411/distinfo
--- a/sysutils/xenkernel411/distinfo Thu Dec 17 16:17:55 2020 +0000
+++ b/sysutils/xenkernel411/distinfo Thu Dec 17 16:47:30 2020 +0000
@@ -1,4 +1,4 @@
-$NetBSD: distinfo,v 1.16 2020/11/12 11:29:25 bouyer Exp $
+$NetBSD: distinfo,v 1.17 2020/12/17 16:47:30 bouyer Exp $
SHA1 (xen411/xen-4.11.4.tar.gz) = 6c8cdf441621c14dc5345196b48df6982c060c4f
RMD160 (xen411/xen-4.11.4.tar.gz) = 49819fcd1de3985d4dea370be962548c862f2933
@@ -23,7 +23,10 @@
SHA1 (patch-XSA345) = 14ab754703af1045b2d049de1c6ba1c5baca5d81
SHA1 (patch-XSA346) = c1962c037c5ab62c2f7e9a558c4565331c981be0
SHA1 (patch-XSA347) = f3f98a794584d5d4321b95c2b1b9c88821fa567e
+SHA1 (patch-XSA348) = 2a1128e86e31eff4596958324cbaab82830b51a8
SHA1 (patch-XSA351) = fca8d8c5c77ba8d6007d7643330be7f8835bbc5a
+SHA1 (patch-XSA358) = 3831faa429ac35fa993a60c426fca877bea35bbd
+SHA1 (patch-XSA359) = 4b778a86fffbe0e2a364e1589d573bbc7c27ff99
SHA1 (patch-xen_Makefile) = 465388d80de414ca3bb84faefa0f52d817e423a6
SHA1 (patch-xen_Rules.mk) = c743dc63f51fc280d529a7d9e08650292c171dac
SHA1 (patch-xen_arch_x86_Rules.mk) = 0bedfc53a128a87b6a249ae04fbdf6a053bfb70b
diff -r a49ea1c3e229 -r a78c182da675 sysutils/xenkernel411/patches/patch-XSA348
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel411/patches/patch-XSA348 Thu Dec 17 16:47:30 2020 +0000
@@ -0,0 +1,166 @@
+$NetBSD: patch-XSA348,v 1.1 2020/12/17 16:47:30 bouyer Exp $
+
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: x86: avoid calling {svm,vmx}_do_resume()
+
+These functions follow the following path: hvm_do_resume() ->
+handle_hvm_io_completion() -> hvm_wait_for_io() ->
+wait_on_xen_event_channel() -> do_softirq() -> schedule() ->
+sched_context_switch() -> continue_running() and hence may
+recursively invoke themselves. If this ends up happening a couple of
+times, a stack overflow would result.
+
+Prevent this by also resetting the stack at the
+->arch.ctxt_switch->tail() invocations (in both places for consistency)
+and thus jumping to the functions instead of calling them.
+
+This is XSA-348 / CVE-2020-29566.
+
+Reported-by: Julien Grall <jgrall%amazon.com@localhost>
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Juergen Gross <jgross%suse.com@localhost>
+
+--- xen/arch/x86/domain.c.orig 2020-10-15 17:35:17.000000000 +0200
++++ xen/arch/x86/domain.c 2020-11-10 17:56:59.000000000 +0100
+@@ -121,7 +121,7 @@ static void play_dead(void)
+ (*dead_idle)();
+ }
+
+-static void idle_loop(void)
++static void noreturn idle_loop(void)
+ {
+ unsigned int cpu = smp_processor_id();
+
+@@ -161,11 +161,6 @@ void startup_cpu_idle_loop(void)
+ reset_stack_and_jump(idle_loop);
+ }
+
+-static void noreturn continue_idle_domain(struct vcpu *v)
+-{
+- reset_stack_and_jump(idle_loop);
+-}
+-
+ void dump_pageframe_info(struct domain *d)
+ {
+ struct page_info *page;
+@@ -456,7 +451,7 @@ int arch_domain_create(struct domain *d,
+ static const struct arch_csw idle_csw = {
+ .from = paravirt_ctxt_switch_from,
+ .to = paravirt_ctxt_switch_to,
+- .tail = continue_idle_domain,
++ .tail = idle_loop,
+ };
+
+ d->arch.ctxt_switch = &idle_csw;
+@@ -1770,20 +1765,12 @@ void context_switch(struct vcpu *prev, s
+ /* Ensure that the vcpu has an up-to-date time base. */
+ update_vcpu_system_time(next);
+
+- /*
+- * Schedule tail *should* be a terminal function pointer, but leave a
+- * bug frame around just in case it returns, to save going back into the
+- * context switching code and leaving a far more subtle crash to diagnose.
+- */
+- nextd->arch.ctxt_switch->tail(next);
+- BUG();
++ reset_stack_and_jump_ind(nextd->arch.ctxt_switch->tail);
+ }
+
+ void continue_running(struct vcpu *same)
+ {
+- /* See the comment above. */
+- same->domain->arch.ctxt_switch->tail(same);
+- BUG();
++ reset_stack_and_jump_ind(same->domain->arch.ctxt_switch->tail);
+ }
+
+ int __sync_local_execstate(void)
+--- xen/arch/x86/hvm/svm/svm.c.orig 2020-06-18 15:13:13.001760095 +0200
++++ xen/arch/x86/hvm/svm/svm.c 2020-11-10 17:56:59.000000000 +0100
+@@ -1111,8 +1111,9 @@ static void svm_ctxt_switch_to(struct vc
+ wrmsr_tsc_aux(hvm_msr_tsc_aux(v));
+ }
+
+-static void noreturn svm_do_resume(struct vcpu *v)
++static void noreturn svm_do_resume(void)
+ {
++ struct vcpu *v = current;
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ bool debug_state = (v->domain->debugger_attached ||
+ v->domain->arch.monitor.software_breakpoint_enabled ||
+--- xen/arch/x86/hvm/vmx/vmcs.c.orig 2019-12-03 17:46:26.000000000 +0100
++++ xen/arch/x86/hvm/vmx/vmcs.c 2020-11-10 17:56:59.000000000 +0100
+@@ -1782,8 +1782,9 @@ void vmx_vmentry_failure(void)
+ domain_crash_synchronous();
+ }
+
+-void vmx_do_resume(struct vcpu *v)
++void vmx_do_resume(void)
+ {
++ struct vcpu *v = current;
+ bool_t debug_state;
+ unsigned long host_cr4;
+
+--- xen/arch/x86/pv/domain.c.orig 2019-06-25 23:47:11.000000000 +0200
++++ xen/arch/x86/pv/domain.c 2020-11-10 17:56:59.000000000 +0100
+@@ -58,7 +58,7 @@ static int parse_pcid(const char *s)
+ }
+ custom_runtime_param("pcid", parse_pcid);
+
+-static void noreturn continue_nonidle_domain(struct vcpu *v)
++static void noreturn continue_nonidle_domain(void)
+ {
+ check_wakeup_from_wait();
+ reset_stack_and_jump(ret_from_intr);
+--- xen/include/asm-x86/current.h.orig 2019-06-25 23:47:11.000000000 +0200
++++ xen/include/asm-x86/current.h 2020-11-10 17:56:59.000000000 +0100
+@@ -124,16 +124,23 @@ unsigned long get_stack_dump_bottom (uns
+ # define CHECK_FOR_LIVEPATCH_WORK ""
+ #endif
+
+-#define reset_stack_and_jump(__fn) \
++#define switch_stack_and_jump(fn, instr, constr) \
+ ({ \
+ __asm__ __volatile__ ( \
+ "mov %0,%%"__OP"sp;" \
+- CHECK_FOR_LIVEPATCH_WORK \
+- "jmp %c1" \
+- : : "r" (guest_cpu_user_regs()), "i" (__fn) : "memory" ); \
++ CHECK_FOR_LIVEPATCH_WORK \
++ instr "1" \
++ : : "r" (guest_cpu_user_regs()), constr (fn) : "memory" ); \
+ unreachable(); \
+ })
+
++#define reset_stack_and_jump(fn) \
++ switch_stack_and_jump(fn, "jmp %c", "i")
++
++/* The constraint may only specify non-call-clobbered registers. */
++#define reset_stack_and_jump_ind(fn) \
++ switch_stack_and_jump(fn, "INDIRECT_JMP %", "b")
++
+ /*
+ * Which VCPU's state is currently running on each CPU?
+ * This is not necesasrily the same as 'current' as a CPU may be
+--- xen/include/asm-x86/domain.h.orig 2019-12-03 17:46:26.000000000 +0100
++++ xen/include/asm-x86/domain.h 2020-11-10 17:56:59.000000000 +0100
+@@ -328,7 +328,7 @@ struct arch_domain
+ const struct arch_csw {
+ void (*from)(struct vcpu *);
+ void (*to)(struct vcpu *);
+- void (*tail)(struct vcpu *);
++ void noreturn (*tail)(void);
+ } *ctxt_switch;
+
+ /* nestedhvm: translate l2 guest physical to host physical */
+--- xen/include/asm-x86/hvm/vmx/vmx.h.orig 2019-12-03 17:46:26.000000000 +0100
++++ xen/include/asm-x86/hvm/vmx/vmx.h 2020-11-10 17:56:59.000000000 +0100
+@@ -95,7 +95,7 @@ typedef enum {
+ void vmx_asm_vmexit_handler(struct cpu_user_regs);
+ void vmx_asm_do_vmentry(void);
+ void vmx_intr_assist(void);
+-void noreturn vmx_do_resume(struct vcpu *);
++void noreturn vmx_do_resume(void);
+ void vmx_vlapic_msr_changed(struct vcpu *v);
+ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt);
+ void vmx_realmode(struct cpu_user_regs *regs);
diff -r a49ea1c3e229 -r a78c182da675 sysutils/xenkernel411/patches/patch-XSA358
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel411/patches/patch-XSA358 Thu Dec 17 16:47:30 2020 +0000
@@ -0,0 +1,48 @@
+$NetBSD: patch-XSA358,v 1.1 2020/12/17 16:47:30 bouyer Exp $
+
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: evtchn/FIFO: re-order and synchronize (with) map_control_block()
+
+For evtchn_fifo_set_pending()'s check of the control block having been
+set to be effective, ordering of respective reads and writes needs to be
+ensured: The control block pointer needs to be recorded strictly after
+the setting of all the queue heads, and it needs checking strictly
+before any uses of them (this latter aspect was already guaranteed).
+
+This is XSA-358 / CVE-2020-29570.
+
+Reported-by: Julien Grall <jgrall%amazon.com@localhost>
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Acked-by: Julien Grall <jgrall%amazon.com@localhost>
+---
+v3: Drop read-side barrier again, leveraging guest_test_and_set_bit().
+v2: Re-base over queue locking re-work.
+
+--- xen/common/event_fifo.c.orig
++++ xen/common/event_fifo.c
+@@ -474,6 +478,7 @@ static int setup_control_block(struct vc
+ static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset)
+ {
+ void *virt;
++ struct evtchn_fifo_control_block *control_block;
+ unsigned int i;
+ int rc;
+
+@@ -484,10 +489,15 @@ static int map_control_block(struct vcpu
+ if ( rc < 0 )
+ return rc;
+
+- v->evtchn_fifo->control_block = virt + offset;
++ control_block = virt + offset;
+
+ for ( i = 0; i <= EVTCHN_FIFO_PRIORITY_MIN; i++ )
+- v->evtchn_fifo->queue[i].head = &v->evtchn_fifo->control_block->head[i];
++ v->evtchn_fifo->queue[i].head = &control_block->head[i];
++
++ /* All queue heads must have been set before setting the control block. */
++ smp_wmb();
++
++ v->evtchn_fifo->control_block = control_block;
+
+ return 0;
+ }
diff -r a49ea1c3e229 -r a78c182da675 sysutils/xenkernel411/patches/patch-XSA359
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel411/patches/patch-XSA359 Thu Dec 17 16:47:30 2020 +0000
@@ -0,0 +1,42 @@
+$NetBSD: patch-XSA359,v 1.1 2020/12/17 16:47:30 bouyer Exp $
+
+From: Jan Beulich <jbeulich%suse.com@localhost>
+Subject: evtchn/FIFO: add 2nd smp_rmb() to evtchn_fifo_word_from_port()
+
+Besides with add_page_to_event_array() the function also needs to
+synchronize with evtchn_fifo_init_control() setting both d->evtchn_fifo
+and (subsequently) d->evtchn_port_ops.
+
+This is XSA-359 / CVE-2020-29571.
+
+Reported-by: Julien Grall <jgrall%amazon.com@localhost>
+Signed-off-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Julien Grall <jgrall%amazon.com@localhost>
+
+--- xen/common/event_fifo.c.orig
++++ xen/common/event_fifo.c
+@@ -55,6 +55,13 @@ static inline event_word_t *evtchn_fifo_
+ {
+ unsigned int p, w;
+
++ /*
++ * Callers aren't required to hold d->event_lock, so we need to synchronize
++ * with evtchn_fifo_init_control() setting d->evtchn_port_ops /after/
++ * d->evtchn_fifo.
++ */
++ smp_rmb();
++
+ if ( unlikely(port >= d->evtchn_fifo->num_evtchns) )
+ return NULL;
+
+@@ -606,6 +613,10 @@ int evtchn_fifo_init_control(struct evtc
+ if ( rc < 0 )
+ goto error;
+
++ /*
++ * This call, as a side effect, synchronizes with
++ * evtchn_fifo_word_from_port().
++ */
+ rc = map_control_block(v, gfn, offset);
Home |
Main Index |
Thread Index |
Old Index