Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-8]: src/sys/arch Pull up the following revisions (via patch), req...
details: https://anonhg.NetBSD.org/src/rev/78f4a6d380d6
branches: netbsd-8
changeset: 851450:78f4a6d380d6
user: martin <martin%NetBSD.org@localhost>
date: Wed Mar 07 14:50:56 2018 +0000
description:
Pull up the following revisions (via patch), requested by maxv in ticket #610:
sys/arch/amd64/amd64/amd64_trap.S 1.8,1.10,1.12 (partial),1.13-1.15,
1.19 (partial),1.20,1.21,1.22,1.24
(via patch)
sys/arch/amd64/amd64/locore.S 1.129 (partial),1.132 (via patch)
sys/arch/amd64/amd64/trap.c 1.97 (partial),1.111 (via patch)
sys/arch/amd64/amd64/vector.S 1.54,1.55 (via patch)
sys/arch/amd64/include/frameasm.h 1.21,1.23 (via patch)
sys/arch/x86/x86/cpu.c 1.138 (via patch)
sys/arch/xen/conf/Makefile.xen 1.45 (via patch)
Rename and reorder several things in amd64_trap.S.
Compile amd64_trap.S as a file.
Introduce nmitrap and doubletrap.
Have the CPU clear PSL_D automatically in the syscall entry point.
diffstat:
sys/arch/amd64/amd64/amd64_trap.S | 383 +++++++++++++++++++++----------------
sys/arch/amd64/amd64/locore.S | 16 +-
sys/arch/amd64/amd64/trap.c | 54 +++-
sys/arch/amd64/amd64/vector.S | 8 +-
sys/arch/amd64/conf/files.amd64 | 3 +-
sys/arch/amd64/include/frameasm.h | 23 +-
sys/arch/x86/x86/cpu.c | 6 +-
sys/arch/xen/conf/Makefile.xen | 15 +-
8 files changed, 298 insertions(+), 210 deletions(-)
diffs (truncated from 817 to 300 lines):
diff -r 92469d272d8e -r 78f4a6d380d6 sys/arch/amd64/amd64/amd64_trap.S
--- a/sys/arch/amd64/amd64/amd64_trap.S Wed Mar 07 13:46:41 2018 +0000
+++ b/sys/arch/amd64/amd64/amd64_trap.S Wed Mar 07 14:50:56 2018 +0000
@@ -1,11 +1,11 @@
-/* $NetBSD: amd64_trap.S,v 1.5 2017/03/24 18:03:32 maxv Exp $ */
+/* $NetBSD: amd64_trap.S,v 1.5.6.1 2018/03/07 14:50:56 martin Exp $ */
-/*-
- * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
+/*
+ * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
- * by Charles M. Hannum and by Andrew Doran.
+ * by Charles M. Hannum, by Andrew Doran and by Maxime Villard.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -64,10 +64,19 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#if 0
#include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.5 2017/03/24 18:03:32 maxv Exp $");
-#endif
+
+#include "opt_xen.h"
+#include "opt_dtrace.h"
+
+#define ALIGN_TEXT .align 16,0x90
+
+#include <machine/frameasm.h>
+#include <machine/segments.h>
+#include <machine/trap.h>
+#include <machine/specialreg.h>
+
+#include "assym.h"
/*
* Trap and fault vector routines
@@ -78,12 +87,10 @@
* (possibly the next clock tick). Thus, we disable interrupt before checking,
* and only enable them again on the final `iret' or before calling the AST
* handler.
- */
-
-/*****************************************************************************/
+ */
#ifdef XEN
-#define PRE_TRAP movq (%rsp),%rcx ; movq 8(%rsp),%r11 ; addq $0x10,%rsp
+#define PRE_TRAP movq (%rsp),%rcx ; movq 8(%rsp),%r11 ; addq $0x10,%rsp
#else
#define PRE_TRAP
#endif
@@ -118,10 +125,10 @@
#if defined(XEN)
ZTRAP(T_NMI)
#else
- pushq $0
- pushq $T_NMI
+ ZTRAP_NJ(T_NMI)
subq $TF_REGSIZE,%rsp
INTR_SAVE_GPRS
+ cld
movw %gs,TF_GS(%rsp)
movw %fs,TF_FS(%rsp)
movw %es,TF_ES(%rsp)
@@ -130,23 +137,21 @@
movl $MSR_GSBASE,%ecx
rdmsr
cmpl $VM_MIN_KERNEL_ADDRESS_HIGH32,%edx
- jae noswapgs
+ jae .Lnoswapgs
swapgs
movq %rsp,%rdi
incq CPUVAR(NTRAP)
- call _C_LABEL(trap)
+ call _C_LABEL(nmitrap)
swapgs
- jmp nmileave
+ jmp .Lnmileave
-noswapgs:
+.Lnoswapgs:
movq %rsp,%rdi
incq CPUVAR(NTRAP)
- call _C_LABEL(trap)
+ call _C_LABEL(nmitrap)
-nmileave:
- movw TF_ES(%rsp),%es
- movw TF_DS(%rsp),%ds
+.Lnmileave:
INTR_RESTORE_GPRS
addq $TF_REGSIZE+16,%rsp
iretq
@@ -179,21 +184,6 @@
/* Jump to the code hooked in by DTrace. */
movq dtrace_invop_jump_addr, %rax
jmpq *dtrace_invop_jump_addr
-
- .bss
- .globl dtrace_invop_jump_addr
- .align 8
- .type dtrace_invop_jump_addr, @object
- .size dtrace_invop_jump_addr, 8
-dtrace_invop_jump_addr:
- .zero 8
- .globl dtrace_invop_calltrap_addr
- .align 8
- .type dtrace_invop_calltrap_addr, @object
- .size dtrace_invop_calltrap_addr, 8
-dtrace_invop_calltrap_addr:
- .zero 8
- .text
#endif
IDTVEC_END(trap03)
@@ -214,23 +204,55 @@
INTRENTRY
#ifdef DIAGNOSTIC
movl CPUVAR(ILEVEL),%ebx
-#endif /* DIAGNOSTIC */
+#endif
movq %rsp,%rdi
call _C_LABEL(fpudna)
jmp .Lalltraps_checkusr
IDTVEC_END(trap07)
+/*
+ * Double faults execute on a particular stack, and we must not jump out
+ * of it. So don't enable interrupts.
+ */
IDTVEC(trap08)
+#if defined(XEN)
TRAP(T_DOUBLEFLT)
+#else
+ TRAP_NJ(T_DOUBLEFLT)
+ subq $TF_REGSIZE,%rsp
+ INTR_SAVE_GPRS
+ testb $SEL_UPL,TF_CS(%rsp)
+ jz 1f
+ swapgs
+1:
+ cld
+ movw %gs,TF_GS(%rsp)
+ movw %fs,TF_FS(%rsp)
+ movw %es,TF_ES(%rsp)
+ movw %ds,TF_DS(%rsp)
+
+ movq %rsp,%rdi
+ incq CPUVAR(NTRAP)
+ call _C_LABEL(doubletrap)
+
+ INTR_RESTORE_GPRS
+
+ testb $SEL_UPL,TF_CS(%rsp)
+ jz 1f
+ swapgs
+1:
+ addq $TF_REGSIZE+16,%rsp
+ iretq
+#endif
IDTVEC_END(trap08)
IDTVEC(trap09)
ZTRAP(T_FPOPFLT)
IDTVEC_END(trap09)
-IDTVEC(trap0a)
+IDTVEC(trap10)
TRAP(T_TSSFLT)
-IDTVEC_END(trap0a)
+IDTVEC_END(trap10)
#ifdef XEN
/*
@@ -241,21 +263,115 @@
#define check_swapgs alltraps
#endif
-IDTVEC(trap0b) /* #NP() Segment not present */
+IDTVEC(trap11) /* #NP() Segment not present */
TRAP_NJ(T_SEGNPFLT)
jmp check_swapgs
-IDTVEC_END(trap0b)
+IDTVEC_END(trap11)
-IDTVEC(trap0c) /* #SS() Stack exception */
+IDTVEC(trap12) /* #SS() Stack exception */
TRAP_NJ(T_STKFLT)
jmp check_swapgs
-IDTVEC_END(trap0c)
+IDTVEC_END(trap12)
-IDTVEC(trap0d) /* #GP() General protection */
+IDTVEC(trap13) /* #GP() General protection */
TRAP_NJ(T_PROTFLT)
-#ifdef check_swapgs
jmp check_swapgs
-#else
+IDTVEC_END(trap13)
+
+IDTVEC(trap14)
+ TRAP(T_PAGEFLT)
+IDTVEC_END(trap14)
+
+IDTVEC(trap15)
+ ZTRAP_NJ(T_ASTFLT)
+ INTRENTRY
+#ifdef DIAGNOSTIC
+ movl CPUVAR(ILEVEL),%ebx
+#endif
+ jmp .Lalltraps_checkusr
+IDTVEC_END(trap15)
+
+IDTVEC(trap16)
+ ZTRAP_NJ(T_ARITHTRAP)
+.Ldo_fputrap:
+ INTRENTRY
+#ifdef DIAGNOSTIC
+ movl CPUVAR(ILEVEL),%ebx
+#endif
+ movq %rsp,%rdi
+ call _C_LABEL(fputrap)
+ jmp .Lalltraps_checkusr
+IDTVEC_END(trap16)
+
+IDTVEC(trap17)
+ TRAP(T_ALIGNFLT)
+IDTVEC_END(trap17)
+
+IDTVEC(trap18)
+ ZTRAP(T_MCA)
+IDTVEC_END(trap18)
+
+IDTVEC(trap19)
+ ZTRAP_NJ(T_XMM)
+ jmp .Ldo_fputrap
+IDTVEC_END(trap19)
+
+IDTVEC(trap20)
+IDTVEC(trap21)
+IDTVEC(trap22)
+IDTVEC(trap23)
+IDTVEC(trap24)
+IDTVEC(trap25)
+IDTVEC(trap26)
+IDTVEC(trap27)
+IDTVEC(trap28)
+IDTVEC(trap29)
+IDTVEC(trap30)
+IDTVEC(trap31)
+ /* 20 - 31 reserved for future exp */
+ ZTRAP(T_RESERVED)
+IDTVEC_END(trap20)
+IDTVEC_END(trap21)
+IDTVEC_END(trap22)
+IDTVEC_END(trap23)
+IDTVEC_END(trap24)
+IDTVEC_END(trap25)
+IDTVEC_END(trap26)
+IDTVEC_END(trap27)
+IDTVEC_END(trap28)
+IDTVEC_END(trap29)
+IDTVEC_END(trap30)
+IDTVEC_END(trap31)
+
+IDTVEC(intrspurious)
+ ZTRAP_NJ(T_ASTFLT)
+ INTRENTRY
+#ifdef DIAGNOSTIC
+ movl CPUVAR(ILEVEL),%ebx
+#endif
+ jmp .Lalltraps_checkusr
+IDTVEC_END(intrspurious)
+
+
+/*
+ * trap() calls here when it detects a fault in INTRFASTEXIT (loading the
+ * segment registers or during the iret itself). The address of the (possibly
+ * reconstructed) user trap frame is passed as an argument.
+ *
+ * Typically the code will have raised a SIGSEGV which will be actioned
+ * by the code below.
+ */
+ .type _C_LABEL(trap_return_fault_return), @function
+LABEL(trap_return_fault_return)
+ mov %rdi,%rsp /* frame for user return */
+#ifdef DIAGNOSTIC
+ /* We can't recover the saved %rbx, so suppress warning */
+ movl CPUVAR(ILEVEL),%ebx
+#endif
+ jmp .Lalltraps_checkusr
+END(trap_return_fault_return)
+
Home |
Main Index |
Thread Index |
Old Index