Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/trunk]: src/sys/arch/amd64/amd64 Unconditionnally save the segment regis...



details:   https://anonhg.NetBSD.org/src/rev/69569ecc6600
branches:  trunk
changeset: 822549:69569ecc6600
user:      maxv <maxv%NetBSD.org@localhost>
date:      Fri Mar 24 18:03:32 2017 +0000

description:
Unconditionnally save the segment registers - because we could have a
kernel %gs and a userland %es/%ds -, and explain why T_NMI is a special
case.

Note that checking %gs directly is not a good idea: recent CPUs have the
FSGSBASE instruction set, which allows userland to directly modify %gs
without going through the kernel. If we ever enable this set, we will have
to change this function, since we won't be able to test %gs against
VM_MIN_KERNEL_ADDRESS anymore.

diffstat:

 sys/arch/amd64/amd64/amd64_trap.S |  47 +++++++++++++++++++++++++-------------
 1 files changed, 31 insertions(+), 16 deletions(-)

diffs (87 lines):

diff -r 519edd64ace7 -r 69569ecc6600 sys/arch/amd64/amd64/amd64_trap.S
--- a/sys/arch/amd64/amd64/amd64_trap.S Fri Mar 24 17:40:44 2017 +0000
+++ b/sys/arch/amd64/amd64/amd64_trap.S Fri Mar 24 18:03:32 2017 +0000
@@ -1,4 +1,4 @@
-/*     $NetBSD: amd64_trap.S,v 1.4 2016/08/07 09:04:55 maxv Exp $      */
+/*     $NetBSD: amd64_trap.S,v 1.5 2017/03/24 18:03:32 maxv Exp $      */
 
 /*-
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -66,7 +66,7 @@
 
 #if 0
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.4 2016/08/07 09:04:55 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.5 2017/03/24 18:03:32 maxv Exp $");
 #endif
 
 /*
@@ -103,39 +103,54 @@
        ZTRAP(T_TRCTRAP)
 IDTVEC_END(trap01)
 
+/*
+ * Non Maskable Interrupts are a special case: they can be triggered even
+ * with interrupts disabled, and once triggered they block further NMIs
+ * until an 'iret' instruction is executed.
+ *
+ * Therefore we don't enable interrupts, because the CPU could switch to
+ * another LWP, call 'iret' and unintentionally leave the NMI mode.
+ *
+ * We need to be careful about %gs too, because it is possible that we were
+ * running in kernel mode with a userland %gs.
+ */
 IDTVEC(trap02)
 #if defined(XEN)
        ZTRAP(T_NMI)
-#else /* defined(XEN) */
-       pushq $0
-       pushq $T_NMI
+#else
+       pushq   $0
+       pushq   $T_NMI
        subq    $TF_REGSIZE,%rsp
        INTR_SAVE_GPRS
-       movl    $MSR_GSBASE,%ecx
-       rdmsr
-       cmpl    $VM_MIN_KERNEL_ADDRESS_HIGH32,%edx
-       jae     1f
-       swapgs
        movw    %gs,TF_GS(%rsp)
        movw    %fs,TF_FS(%rsp)
        movw    %es,TF_ES(%rsp)
        movw    %ds,TF_DS(%rsp)
+
+       movl    $MSR_GSBASE,%ecx
+       rdmsr
+       cmpl    $VM_MIN_KERNEL_ADDRESS_HIGH32,%edx
+       jae     noswapgs
+
+       swapgs
        movq    %rsp,%rdi
        incq    CPUVAR(NTRAP)
        call    _C_LABEL(trap)
-       movw    TF_ES(%rsp),%es
-       movw    TF_DS(%rsp),%ds
        swapgs
-       jmp     2f
-1:
+       jmp     nmileave
+
+noswapgs:
        movq    %rsp,%rdi
        incq    CPUVAR(NTRAP)
        call    _C_LABEL(trap)
-2:
+
+nmileave:
+       movw    TF_ES(%rsp),%es
+       movw    TF_DS(%rsp),%ds
        INTR_RESTORE_GPRS
        addq    $TF_REGSIZE+16,%rsp
        iretq
-#endif /* defined(XEN) */
+#endif
 IDTVEC_END(trap02)
 
 IDTVEC(trap03)



Home | Main Index | Thread Index | Old Index