Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Mitigation for the SS bug, CVE-2018-88...
details: https://anonhg.NetBSD.org/src/rev/d5d71f3f5dd0
branches: trunk
changeset: 318881:d5d71f3f5dd0
user: maxv <maxv%NetBSD.org@localhost>
date: Tue May 08 17:20:44 2018 +0000
description:
Mitigation for the SS bug, CVE-2018-8897. We disabled dbregs a month ago
in -current and -8 so we are not particularly affected anymore.
The #DB handler runs on ist3, if we decide to process the exception we
copy the iret frame on the correct non-ist stack and continue as usual.
diffstat:
sys/arch/amd64/amd64/amd64_trap.S | 120 +++++++++++++++++++++++++++++++++++++-
sys/arch/amd64/amd64/machdep.c | 15 ++++-
sys/arch/x86/include/pmap.h | 3 +-
3 files changed, 134 insertions(+), 4 deletions(-)
diffs (202 lines):
diff -r 222cfb652f2d -r d5d71f3f5dd0 sys/arch/amd64/amd64/amd64_trap.S
--- a/sys/arch/amd64/amd64/amd64_trap.S Tue May 08 16:47:58 2018 +0000
+++ b/sys/arch/amd64/amd64/amd64_trap.S Tue May 08 17:20:44 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: amd64_trap.S,v 1.40 2018/03/28 16:02:49 maxv Exp $ */
+/* $NetBSD: amd64_trap.S,v 1.41 2018/05/08 17:20:44 maxv Exp $ */
/*
* Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
@@ -106,14 +106,132 @@
.text
+/*
+ * ASM macro, used to leave the IST3 stack and to put ourselves on a non-IST
+ * stack. Only RDX, RCX and RAX are allowed to be used.
+ *
+ * +------------------------------+
+ * The iret frame we copy is: | rip | cs | rflags | rsp | ss |
+ * +------------------------------+
+ */
+.macro IST3_LEAVE is_user
+ .if \is_user
+ movq CPUVAR(CURLWP),%rax
+ movq L_PCB(%rax),%rax
+ movq PCB_RSP0(%rax),%rax
+ .else
+ movq TF_RSP(%rsp),%rax
+ .endif
+
+ subq $(5*8),%rax
+ movq %rax,CPUVAR(SCRATCH)
+
+ /* Copy the iret frame. */
+ movq TF_SS(%rsp),%rcx
+ movq %rcx,(4*8)(%rax)
+ movq TF_RSP(%rsp),%rcx
+ movq %rcx,(3*8)(%rax)
+ movq TF_RFLAGS(%rsp),%rcx
+ movq %rcx,(2*8)(%rax)
+ movq TF_CS(%rsp),%rcx
+ movq %rcx,(1*8)(%rax)
+ movq TF_RIP(%rsp),%rcx
+ movq %rcx,(0*8)(%rax)
+
+ /* Restore. */
+ movq TF_RDX(%rsp),%rdx
+ movq TF_RCX(%rsp),%rcx
+ movq TF_RAX(%rsp),%rax
+
+ /* Zero out the stack we used, RDX+RCX+RAX+IRET. */
+ movq $0,TF_RDX(%rsp)
+ movq $0,TF_RCX(%rsp)
+ movq $0,TF_RAX(%rsp)
+ movq $0,TF_RIP(%rsp)
+ movq $0,TF_CS(%rsp)
+ movq $0,TF_RFLAGS(%rsp)
+ movq $0,TF_RSP(%rsp)
+ movq $0,TF_SS(%rsp)
+
+ movq CPUVAR(SCRATCH),%rsp
+.endm
+
TEXT_USER_BEGIN
IDTVEC(trap00)
ZTRAP(T_DIVIDE)
IDTVEC_END(trap00)
+/*
+ * Handle the SS shadow, CVE-2018-8897.
+ *
+ * We are running on the IST3 stack. If we are under an SS shadow, ignore
+ * the exception and return immediately. Otherwise, copy the iret frame
+ * onto the non-IST stack, and ZTRAP on it as usual.
+ *
+ * IST3 is used temporarily, and is mapped in userland by SVS. It contains
+ * a few secrets, the values of the CPU context. These secrets are zeroed
+ * out when we leave.
+ *
+ * When we ignore an SS shadow, we can't zero out the iret frame. It is
+ * not a problem, because in this particular case, the frame is known not
+ * to contain secrets.
+ */
IDTVEC(trap01)
+#ifndef XEN
+ subq $(TF_REGSIZE+16),%rsp
+
+ /* We clobber only RDX, RCX and RAX. */
+ movq %rdx,TF_RDX(%rsp)
+ movq %rcx,TF_RCX(%rsp)
+ movq %rax,TF_RAX(%rsp)
+
+ testb $SEL_UPL,TF_CS(%rsp)
+ jnz .Luser_dbentry
+
+ movl $MSR_GSBASE,%ecx
+ rdmsr
+ cmpl $VM_MIN_KERNEL_ADDRESS_HIGH32,%edx
+ jae .Lkern_dbentry
+
+ /* SS shadow, ignore the exception. */
+ xorq %rax,%rax
+ movq %rax,%dr6
+
+ /* Restore and zero out. */
+ movq TF_RDX(%rsp),%rdx
+ movq TF_RCX(%rsp),%rcx
+ movq TF_RAX(%rsp),%rax
+ movq $0,TF_RDX(%rsp)
+ movq $0,TF_RCX(%rsp)
+ movq $0,TF_RAX(%rsp)
+
+ addq $(TF_REGSIZE+16),%rsp
+ iretq
+
+.Lkern_dbentry:
+ IST3_LEAVE 0
ZTRAP(T_TRCTRAP)
+
+.Luser_dbentry:
+ swapgs
+ SVS_ENTER_ALTSTACK
+ IST3_LEAVE 1
+ ZTRAP_NJ(T_TRCTRAP)
+ subq $TF_REGSIZE,%rsp
+ INTR_SAVE_GPRS
+ cld
+ SMAP_ENABLE
+ IBRS_ENTER
+ movw %gs,TF_GS(%rsp)
+ movw %fs,TF_FS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+
+ jmp .Lalltraps_noentry
+#else
+ ZTRAP(T_TRCTRAP)
+#endif
IDTVEC_END(trap01)
/*
diff -r 222cfb652f2d -r d5d71f3f5dd0 sys/arch/amd64/amd64/machdep.c
--- a/sys/arch/amd64/amd64/machdep.c Tue May 08 16:47:58 2018 +0000
+++ b/sys/arch/amd64/amd64/machdep.c Tue May 08 17:20:44 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: machdep.c,v 1.303 2018/04/04 12:59:49 maxv Exp $ */
+/* $NetBSD: machdep.c,v 1.304 2018/05/08 17:20:44 maxv Exp $ */
/*
* Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,7 +110,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.303 2018/04/04 12:59:49 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.304 2018/05/08 17:20:44 maxv Exp $");
/* #define XENDEBUG_LOW */
@@ -545,6 +545,14 @@
#endif
cputss->tss.tss_ist[2] = p + PAGE_SIZE - 16;
+ /* DB */
+#ifdef __HAVE_PCPU_AREA
+ p = (vaddr_t)&pcpuarea->ent[cid].ist3;
+#else
+ p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
+#endif
+ cputss->tss.tss_ist[3] = p + PAGE_SIZE - 16;
+
ci->ci_tss = cputss;
ci->ci_tss_sel = tss_alloc(&cputss->tss);
}
@@ -1773,6 +1781,9 @@
#ifndef XEN
idt_vec_reserve(x);
switch (x) {
+ case 1: /* DB */
+ ist = 4;
+ break;
case 2: /* NMI */
ist = 3;
break;
diff -r 222cfb652f2d -r d5d71f3f5dd0 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h Tue May 08 16:47:58 2018 +0000
+++ b/sys/arch/x86/include/pmap.h Tue May 08 17:20:44 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.76 2018/03/04 10:13:08 jdolecek Exp $ */
+/* $NetBSD: pmap.h,v 1.77 2018/05/08 17:20:44 maxv Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -163,6 +163,7 @@
uint8_t ist0[PAGE_SIZE];
uint8_t ist1[PAGE_SIZE];
uint8_t ist2[PAGE_SIZE];
+ uint8_t ist3[PAGE_SIZE];
uint8_t rsp0[2 * PAGE_SIZE];
} __packed;
Home |
Main Index |
Thread Index |
Old Index