pkgsrc-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[pkgsrc/trunk]: pkgsrc/sysutils/xenkernel413 Add uptream fixes for
details: https://anonhg.NetBSD.org/pkgsrc/rev/314c027fd25b
branches: trunk
changeset: 440008:314c027fd25b
user: bouyer <bouyer%pkgsrc.org@localhost>
date: Thu Oct 01 12:41:18 2020 +0000
description:
Add uptream fixes for
XSA333, XSA334, XSA336, XSA337, XSA338, XSA339, XSA340, XSA342, XSA343, XSA344
bump PKGREVISION
diffstat:
sysutils/xenkernel413/Makefile | 4 +-
sysutils/xenkernel413/distinfo | 12 +-
sysutils/xenkernel413/patches/patch-XSA333 | 41 +
sysutils/xenkernel413/patches/patch-XSA334 | 53 +
sysutils/xenkernel413/patches/patch-XSA336 | 285 +++++++++
sysutils/xenkernel413/patches/patch-XSA337 | 270 ++++++++
sysutils/xenkernel413/patches/patch-XSA338 | 44 +
sysutils/xenkernel413/patches/patch-XSA339 | 78 ++
sysutils/xenkernel413/patches/patch-XSA340 | 67 ++
sysutils/xenkernel413/patches/patch-XSA342 | 147 ++++
sysutils/xenkernel413/patches/patch-XSA343 | 888 +++++++++++++++++++++++++++++
sysutils/xenkernel413/patches/patch-XSA344 | 335 ++++++++++
12 files changed, 2221 insertions(+), 3 deletions(-)
diffs (truncated from 2287 to 300 lines):
diff -r 0cb01dcaea1b -r 314c027fd25b sysutils/xenkernel413/Makefile
--- a/sysutils/xenkernel413/Makefile Thu Oct 01 12:05:30 2020 +0000
+++ b/sysutils/xenkernel413/Makefile Thu Oct 01 12:41:18 2020 +0000
@@ -1,7 +1,7 @@
-# $NetBSD: Makefile,v 1.4 2020/09/20 03:18:33 joerg Exp $
+# $NetBSD: Makefile,v 1.5 2020/10/01 12:41:18 bouyer Exp $
VERSION= 4.13.1
-PKGREVISION= 1
+PKGREVISION= 2
DISTNAME= xen-${VERSION}
PKGNAME= xenkernel413-${VERSION}
CATEGORIES= sysutils
diff -r 0cb01dcaea1b -r 314c027fd25b sysutils/xenkernel413/distinfo
--- a/sysutils/xenkernel413/distinfo Thu Oct 01 12:05:30 2020 +0000
+++ b/sysutils/xenkernel413/distinfo Thu Oct 01 12:41:18 2020 +0000
@@ -1,4 +1,4 @@
-$NetBSD: distinfo,v 1.2 2020/07/16 09:56:47 bouyer Exp $
+$NetBSD: distinfo,v 1.3 2020/10/01 12:41:18 bouyer Exp $
SHA1 (xen413/xen-4.13.1.tar.gz) = 194a314171120dad0b3c5433104c92343ec884ba
RMD160 (xen413/xen-4.13.1.tar.gz) = 29cfb90b9da0ede99c1228b8e5964a99547c205d
@@ -10,6 +10,16 @@
SHA1 (patch-XSA320) = db978d49298660fb750dc6b50c2a1ddd099c8fa0
SHA1 (patch-XSA321) = 257dfc7e15a63b2149a9b9aed4e6e3b10f01f551
SHA1 (patch-XSA328) = eb86e10b7279318006a8593561e3932b76adbc0c
+SHA1 (patch-XSA333) = 47660b70b2c998436587600bb9a25c2f494afa49
+SHA1 (patch-XSA334) = d11e778775314dff561d67da629d71704cc0bd8c
+SHA1 (patch-XSA336) = d3cfcb7a9800475967417e4b5b47698ed6a6b5a5
+SHA1 (patch-XSA337) = 3fcfab120c9b4c5e4dc55a4138f68aceaed9bebf
+SHA1 (patch-XSA338) = 0adcebec2c25a389155a10de84bf999ff2e5425d
+SHA1 (patch-XSA339) = 4f97076bda8150d1b1c68f6000d563f3c3314c02
+SHA1 (patch-XSA340) = 23888acfe25fc82ff085fa9acfbb36c156a15bc3
+SHA1 (patch-XSA342) = a61c4e28a8c8219b88e3bab534a109b2b29e2cc3
+SHA1 (patch-XSA343) = f4656c110229fdc63b57b8af76fc6e60386ef3cd
+SHA1 (patch-XSA344) = 616fb56027ee289bb3b7b061e2f9f6f6d81e358b
SHA1 (patch-xen_Makefile) = 465388d80de414ca3bb84faefa0f52d817e423a6
SHA1 (patch-xen_Rules.mk) = c743dc63f51fc280d529a7d9e08650292c171dac
SHA1 (patch-xen_arch_x86_Rules.mk) = 0bedfc53a128a87b6a249ae04fbdf6a053bfb70b
diff -r 0cb01dcaea1b -r 314c027fd25b sysutils/xenkernel413/patches/patch-XSA333
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel413/patches/patch-XSA333 Thu Oct 01 12:41:18 2020 +0000
@@ -0,0 +1,41 @@
+$NetBSD: patch-XSA333,v 1.1 2020/10/01 12:41:19 bouyer Exp $
+
+From: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+Subject: x86/pv: Handle the Intel-specific MSR_MISC_ENABLE correctly
+
+This MSR doesn't exist on AMD hardware, and switching away from the safe
+functions in the common MSR path was an erroneous change.
+
+Partially revert the change.
+
+This is XSA-333.
+
+Fixes: 4fdc932b3cc ("x86/Intel: drop another 32-bit leftover")
+Signed-off-by: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+Reviewed-by: Jan Beulich <jbeulich%suse.com@localhost>
+Reviewed-by: Wei Liu <wl%xen.org@localhost>
+
+diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
+index efeb2a727e..6332c74b80 100644
+--- xen/arch/x86/pv/emul-priv-op.c.orig
++++ xen/arch/x86/pv/emul-priv-op.c
+@@ -924,7 +924,8 @@ static int read_msr(unsigned int reg, uint64_t *val,
+ return X86EMUL_OKAY;
+
+ case MSR_IA32_MISC_ENABLE:
+- rdmsrl(reg, *val);
++ if ( rdmsr_safe(reg, *val) )
++ break;
+ *val = guest_misc_enable(*val);
+ return X86EMUL_OKAY;
+
+@@ -1059,7 +1060,8 @@ static int write_msr(unsigned int reg, uint64_t val,
+ break;
+
+ case MSR_IA32_MISC_ENABLE:
+- rdmsrl(reg, temp);
++ if ( rdmsr_safe(reg, temp) )
++ break;
+ if ( val != guest_misc_enable(temp) )
+ goto invalid;
+ return X86EMUL_OKAY;
diff -r 0cb01dcaea1b -r 314c027fd25b sysutils/xenkernel413/patches/patch-XSA334
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel413/patches/patch-XSA334 Thu Oct 01 12:41:18 2020 +0000
@@ -0,0 +1,53 @@
+$NetBSD: patch-XSA334,v 1.1 2020/10/01 12:41:19 bouyer Exp $
+
+From: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+Subject: xen/memory: Don't skip the RCU unlock path in acquire_resource()
+
+In the case that an HVM Stubdomain makes an XENMEM_acquire_resource hypercall,
+the FIXME path will bypass rcu_unlock_domain() on the way out of the function.
+
+Move the check to the start of the function. This does change the behaviour
+of the get-size path for HVM Stubdomains, but that functionality is currently
+broken and unused anyway, as well as being quite useless to entities which
+can't actually map the resource anyway.
+
+This is XSA-334.
+
+Fixes: 83fa6552ce ("common: add a new mappable resource type: XENMEM_resource_grant_table")
+Signed-off-by: Andrew Cooper <andrew.cooper3%citrix.com@localhost>
+Reviewed-by: Jan Beulich <jbeulich%suse.com@localhost>
+
+diff --git a/xen/common/memory.c b/xen/common/memory.c
+index 1a3c9ffb30..29741d8904 100644
+--- xen/common/memory.c.orig
++++ xen/common/memory.c
+@@ -1058,6 +1058,14 @@ static int acquire_resource(
+ xen_pfn_t mfn_list[32];
+ int rc;
+
++ /*
++ * FIXME: Until foreign pages inserted into the P2M are properly
++ * reference counted, it is unsafe to allow mapping of
++ * resource pages unless the caller is the hardware domain.
++ */
++ if ( paging_mode_translate(currd) && !is_hardware_domain(currd) )
++ return -EACCES;
++
+ if ( copy_from_guest(&xmar, arg, 1) )
+ return -EFAULT;
+
+@@ -1114,14 +1122,6 @@ static int acquire_resource(
+ xen_pfn_t gfn_list[ARRAY_SIZE(mfn_list)];
+ unsigned int i;
+
+- /*
+- * FIXME: Until foreign pages inserted into the P2M are properly
+- * reference counted, it is unsafe to allow mapping of
+- * resource pages unless the caller is the hardware domain.
+- */
+- if ( !is_hardware_domain(currd) )
+- return -EACCES;
+-
+ if ( copy_from_guest(gfn_list, xmar.frame_list, xmar.nr_frames) )
+ rc = -EFAULT;
+
diff -r 0cb01dcaea1b -r 314c027fd25b sysutils/xenkernel413/patches/patch-XSA336
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sysutils/xenkernel413/patches/patch-XSA336 Thu Oct 01 12:41:18 2020 +0000
@@ -0,0 +1,285 @@
+$NetBSD: patch-XSA336,v 1.1 2020/10/01 12:41:19 bouyer Exp $
+
+From: Roger Pau Monné <roger.pau%citrix.com@localhost>
+Subject: x86/vpt: fix race when migrating timers between vCPUs
+
+The current vPT code will migrate the emulated timers between vCPUs
+(change the pt->vcpu field) while just holding the destination lock,
+either from create_periodic_time or pt_adjust_global_vcpu_target if
+the global target is adjusted. Changing the periodic_timer vCPU field
+in this way creates a race where a third party could grab the lock in
+the unlocked region of pt_adjust_global_vcpu_target (or before
+create_periodic_time performs the vcpu change) and then release the
+lock from a different vCPU, creating a locking imbalance.
+
+Introduce a per-domain rwlock in order to protect periodic_time
+migration between vCPU lists. Taking the lock in read mode prevents
+any timer from being migrated to a different vCPU, while taking it in
+write mode allows performing migration of timers across vCPUs. The
+per-vcpu locks are still used to protect all the other fields from the
+periodic_timer struct.
+
+Note that such migration shouldn't happen frequently, and hence
+there's no performance drop as a result of such locking.
+
+This is XSA-336.
+
+Reported-by: Igor Druzhinin <igor.druzhinin%citrix.com@localhost>
+Tested-by: Igor Druzhinin <igor.druzhinin%citrix.com@localhost>
+Signed-off-by: Roger Pau Monné <roger.pau%citrix.com@localhost>
+Reviewed-by: Jan Beulich <jbeulich%suse.com@localhost>
+---
+Changes since v2:
+ - Re-order pt_adjust_vcpu to remove one if.
+ - Fix pt_lock to not call pt_vcpu_lock, as we might end up using a
+ stale value of pt->vcpu when taking the per-vcpu lock.
+
+Changes since v1:
+ - Use a per-domain rwlock to protect timer vCPU migration.
+
+--- xen/arch/x86/hvm/hvm.c.orig
++++ xen/arch/x86/hvm/hvm.c
+@@ -658,6 +658,8 @@ int hvm_domain_initialise(struct domain
+ /* need link to containing domain */
+ d->arch.hvm.pl_time->domain = d;
+
++ rwlock_init(&d->arch.hvm.pl_time->pt_migrate);
++
+ /* Set the default IO Bitmap. */
+ if ( is_hardware_domain(d) )
+ {
+--- xen/arch/x86/hvm/vpt.c.orig
++++ xen/arch/x86/hvm/vpt.c
+@@ -153,23 +153,32 @@ static int pt_irq_masked(struct periodic
+ return 1;
+ }
+
+-static void pt_lock(struct periodic_time *pt)
++static void pt_vcpu_lock(struct vcpu *v)
+ {
+- struct vcpu *v;
++ read_lock(&v->domain->arch.hvm.pl_time->pt_migrate);
++ spin_lock(&v->arch.hvm.tm_lock);
++}
+
+- for ( ; ; )
+- {
+- v = pt->vcpu;
+- spin_lock(&v->arch.hvm.tm_lock);
+- if ( likely(pt->vcpu == v) )
+- break;
+- spin_unlock(&v->arch.hvm.tm_lock);
+- }
++static void pt_vcpu_unlock(struct vcpu *v)
++{
++ spin_unlock(&v->arch.hvm.tm_lock);
++ read_unlock(&v->domain->arch.hvm.pl_time->pt_migrate);
++}
++
++static void pt_lock(struct periodic_time *pt)
++{
++ /*
++ * We cannot use pt_vcpu_lock here, because we need to acquire the
++ * per-domain lock first and then (re-)fetch the value of pt->vcpu, or
++ * else we might be using a stale value of pt->vcpu.
++ */
++ read_lock(&pt->vcpu->domain->arch.hvm.pl_time->pt_migrate);
++ spin_lock(&pt->vcpu->arch.hvm.tm_lock);
+ }
+
+ static void pt_unlock(struct periodic_time *pt)
+ {
+- spin_unlock(&pt->vcpu->arch.hvm.tm_lock);
++ pt_vcpu_unlock(pt->vcpu);
+ }
+
+ static void pt_process_missed_ticks(struct periodic_time *pt)
+@@ -219,7 +228,7 @@ void pt_save_timer(struct vcpu *v)
+ if ( v->pause_flags & VPF_blocked )
+ return;
+
+- spin_lock(&v->arch.hvm.tm_lock);
++ pt_vcpu_lock(v);
+
+ list_for_each_entry ( pt, head, list )
+ if ( !pt->do_not_freeze )
+@@ -227,7 +236,7 @@ void pt_save_timer(struct vcpu *v)
+
+ pt_freeze_time(v);
+
+- spin_unlock(&v->arch.hvm.tm_lock);
++ pt_vcpu_unlock(v);
+ }
+
+ void pt_restore_timer(struct vcpu *v)
+@@ -235,7 +244,7 @@ void pt_restore_timer(struct vcpu *v)
+ struct list_head *head = &v->arch.hvm.tm_list;
+ struct periodic_time *pt;
+
+- spin_lock(&v->arch.hvm.tm_lock);
++ pt_vcpu_lock(v);
+
+ list_for_each_entry ( pt, head, list )
+ {
+@@ -248,7 +257,7 @@ void pt_restore_timer(struct vcpu *v)
+
+ pt_thaw_time(v);
+
+- spin_unlock(&v->arch.hvm.tm_lock);
++ pt_vcpu_unlock(v);
+ }
+
+ static void pt_timer_fn(void *data)
+@@ -309,7 +318,7 @@ int pt_update_irq(struct vcpu *v)
+ int irq, pt_vector = -1;
+ bool level;
+
+- spin_lock(&v->arch.hvm.tm_lock);
++ pt_vcpu_lock(v);
+
+ earliest_pt = NULL;
+ max_lag = -1ULL;
+@@ -339,7 +348,7 @@ int pt_update_irq(struct vcpu *v)
+
+ if ( earliest_pt == NULL )
+ {
+- spin_unlock(&v->arch.hvm.tm_lock);
++ pt_vcpu_unlock(v);
+ return -1;
+ }
+
+@@ -347,7 +356,7 @@ int pt_update_irq(struct vcpu *v)
+ irq = earliest_pt->irq;
+ level = earliest_pt->level;
+
+- spin_unlock(&v->arch.hvm.tm_lock);
Home |
Main Index |
Thread Index |
Old Index