Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Add support for SMAP on amd64.
details: https://anonhg.NetBSD.org/src/rev/588ba1ff0bf0
branches: trunk
changeset: 356850:588ba1ff0bf0
user: maxv <maxv%NetBSD.org@localhost>
date: Tue Oct 17 06:58:15 2017 +0000
description:
Add support for SMAP on amd64.
PSL_AC is cleared from %rflags in each kernel entry point. In the copy
sections, a copy window is opened and the kernel can touch userland
pages. This window is closed when the kernel is done, either at the end
of the copy sections or in the fault-recover functions.
This implementation is not optimized yet, due to the fact that INTRENTRY
is a macro, and we can't hotpatch macros.
Sent on tech-kern@ a month or two ago, tested on a Kabylake.
diffstat:
sys/arch/amd64/amd64/copy.S | 60 ++++++++++++++++++++++++++++++++++++++-
sys/arch/amd64/amd64/trap.c | 11 +++++-
sys/arch/amd64/include/frameasm.h | 5 +-
sys/arch/x86/x86/cpu.c | 12 +++++-
sys/arch/x86/x86/patch.c | 29 +++++++++++++++++-
5 files changed, 107 insertions(+), 10 deletions(-)
diffs (truncated from 388 to 300 lines):
diff -r d6b6d0224772 -r 588ba1ff0bf0 sys/arch/amd64/amd64/copy.S
--- a/sys/arch/amd64/amd64/copy.S Tue Oct 17 06:50:00 2017 +0000
+++ b/sys/arch/amd64/amd64/copy.S Tue Oct 17 06:58:15 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: copy.S,v 1.24 2017/08/25 11:35:03 maxv Exp $ */
+/* $NetBSD: copy.S,v 1.25 2017/10/17 06:58:15 maxv Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@@ -107,6 +107,24 @@
ret
/*
+ * SMAP functions. ret+int3+int3 is patched dynamically to STAC/CLAC.
+ */
+
+ENTRY(smap_enable)
+.Lclacpatch:
+ ret
+ int3
+ int3
+ ret
+
+ENTRY(smap_disable)
+.Lstacpatch:
+ ret
+ int3
+ int3
+ ret
+
+/*
* Copy routines from and to userland, plus a few more. See the
* section 9 manpages for info. Some cases can be optimized more.
*
@@ -185,6 +203,7 @@
cmpq %r8,%rdx
ja _C_LABEL(copy_efault) /* jump if end in kernel space */
+ callq smap_disable
.Lcopyout_start:
movq %rax,%rcx /* length */
shrq $3,%rcx /* count of 8-byte words */
@@ -195,6 +214,7 @@
rep
movsb /* copy remaining bytes */
.Lcopyout_end:
+ callq smap_enable
xorl %eax,%eax
ret
@@ -212,6 +232,7 @@
cmpq %r8,%rdx
ja _C_LABEL(copy_efault) /* j if end in kernel space */
+ callq smap_disable
.Lcopyin_start:
3: /* bcopy(%rsi, %rdi, %rax); */
movq %rax,%rcx
@@ -223,6 +244,7 @@
rep
movsb
.Lcopyin_end:
+ callq smap_enable
xorl %eax,%eax
ret
@@ -241,6 +263,7 @@
ret
NENTRY(copy_fault)
+ callq smap_enable
ret
ENTRY(copyoutstr)
@@ -261,6 +284,7 @@
movq %rax,%r8
1: incq %rdx
+ callq smap_disable
.Lcopyoutstr_start:
1: decq %rdx
jz 2f
@@ -269,6 +293,7 @@
testb %al,%al
jnz 1b
.Lcopyoutstr_end:
+ callq smap_enable
/* Success -- 0 byte reached. */
decq %rdx
@@ -276,6 +301,7 @@
jmp copystr_return
2: /* rdx is zero -- return EFAULT or ENAMETOOLONG. */
+ callq smap_enable
movq $VM_MAXUSER_ADDRESS,%r11
cmpq %r11,%rdi
jae _C_LABEL(copystr_efault)
@@ -301,6 +327,7 @@
movq %rax,%r8
1: incq %rdx
+ callq smap_disable
.Lcopyinstr_start:
1: decq %rdx
jz 2f
@@ -309,6 +336,7 @@
testb %al,%al
jnz 1b
.Lcopyinstr_end:
+ callq smap_enable
/* Success -- 0 byte reached. */
decq %rdx
@@ -316,6 +344,7 @@
jmp copystr_return
2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
+ callq smap_enable
movq $VM_MAXUSER_ADDRESS,%r11
cmpq %r11,%rsi
jae _C_LABEL(copystr_efault)
@@ -327,6 +356,7 @@
movl $EFAULT,%eax
ENTRY(copystr_fault)
+ callq smap_enable
copystr_return:
/* Set *lencopied and return %eax. */
testq %r9,%r9
@@ -376,7 +406,9 @@
leaq _C_LABEL(fusuintrfailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
+ callq smap_disable
movzwl (%rdi),%eax
+ callq smap_enable
movq $0,PCB_ONFAULT(%rcx)
ret
@@ -390,7 +422,9 @@
leaq _C_LABEL(fusufailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
+ callq smap_disable
movzbl (%rdi),%eax
+ callq smap_enable
movq $0,PCB_ONFAULT(%rcx)
ret
@@ -406,7 +440,9 @@
leaq _C_LABEL(fusuintrfailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
+ callq smap_disable
movw %si,(%rdi)
+ callq smap_enable
xorq %rax,%rax
movq %rax,PCB_ONFAULT(%rcx)
@@ -422,7 +458,9 @@
leaq _C_LABEL(fusufailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
+ callq smap_disable
movb %sil,(%rdi)
+ callq smap_enable
xorq %rax,%rax
movq %rax,PCB_ONFAULT(%rcx)
@@ -434,11 +472,13 @@
* because trap.c checks for them.
*/
ENTRY(fusuintrfailure)
+ callq smap_enable
movq $0,PCB_ONFAULT(%rcx)
movl $-1,%eax
ret
ENTRY(fusufailure)
+ callq smap_enable
movq $0,PCB_ONFAULT(%rcx)
movl $-1,%eax
ret
@@ -460,11 +500,13 @@
ja _C_LABEL(ucas_efault)
movq %rsi,%rax
+ callq smap_disable
.Lucas64_start:
/* Perform the CAS */
lock
cmpxchgq %rdx,(%rdi)
.Lucas64_end:
+ callq smap_enable
/*
* Note: %rax is "old" value.
@@ -486,11 +528,13 @@
ja _C_LABEL(ucas_efault)
movl %esi,%eax
+ callq smap_disable
.Lucas32_start:
/* Perform the CAS */
lock
cmpxchgl %edx,(%rdi)
.Lucas32_end:
+ callq smap_enable
/*
* Note: %eax is "old" value.
@@ -505,6 +549,7 @@
movq $EFAULT,%rax
NENTRY(ucas_fault)
+ callq smap_enable
ret
/*
@@ -524,6 +569,19 @@
*/
.section ".rodata"
.globl _C_LABEL(onfault_table)
+ .type _C_LABEL(x86_clacpatch),@object
+ .type _C_LABEL(x86_stacpatch),@object
+
+LABEL(x86_clacpatch)
+ .quad .Lclacpatch
+ .quad 0 /* terminate */
+END(x86_clacpatch)
+
+LABEL(x86_stacpatch)
+ .quad .Lstacpatch
+ .quad 0 /* terminate */
+END(x86_stacpatch)
+
_C_LABEL(onfault_table):
.quad .Lcopyin_start
.quad .Lcopyin_end
diff -r d6b6d0224772 -r 588ba1ff0bf0 sys/arch/amd64/amd64/trap.c
--- a/sys/arch/amd64/amd64/trap.c Tue Oct 17 06:50:00 2017 +0000
+++ b/sys/arch/amd64/amd64/trap.c Tue Oct 17 06:58:15 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: trap.c,v 1.101 2017/09/17 09:41:35 maxv Exp $ */
+/* $NetBSD: trap.c,v 1.102 2017/10/17 06:58:15 maxv Exp $ */
/*
* Copyright (c) 1998, 2000, 2017 The NetBSD Foundation, Inc.
@@ -64,7 +64,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.101 2017/09/17 09:41:35 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.102 2017/10/17 06:58:15 maxv Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@@ -556,6 +556,13 @@
(void *)cr2);
}
+ if (cr2 < VM_MAXUSER_ADDRESS) {
+ /* SMAP might have brought us here */
+ if (onfault_handler(pcb, frame) == NULL)
+ panic("prevented access to %p (SMAP)",
+ (void *)cr2);
+ }
+
goto faultcommon;
case T_PAGEFLT|T_USER: {
diff -r d6b6d0224772 -r 588ba1ff0bf0 sys/arch/amd64/include/frameasm.h
--- a/sys/arch/amd64/include/frameasm.h Tue Oct 17 06:50:00 2017 +0000
+++ b/sys/arch/amd64/include/frameasm.h Tue Oct 17 06:58:15 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: frameasm.h,v 1.21 2017/09/15 17:32:12 maxv Exp $ */
+/* $NetBSD: frameasm.h,v 1.22 2017/10/17 06:58:15 maxv Exp $ */
#ifndef _AMD64_MACHINE_FRAMEASM_H
#define _AMD64_MACHINE_FRAMEASM_H
@@ -56,7 +56,8 @@
movq %rbp,TF_RBP(%rsp) ; \
movq %rbx,TF_RBX(%rsp) ; \
movq %rax,TF_RAX(%rsp) ; \
- cld
+ cld ; \
+ callq smap_enable
#define INTR_RESTORE_GPRS \
movq TF_RDI(%rsp),%rdi ; \
diff -r d6b6d0224772 -r 588ba1ff0bf0 sys/arch/x86/x86/cpu.c
--- a/sys/arch/x86/x86/cpu.c Tue Oct 17 06:50:00 2017 +0000
+++ b/sys/arch/x86/x86/cpu.c Tue Oct 17 06:58:15 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu.c,v 1.136 2017/09/28 17:48:20 maxv Exp $ */
+/* $NetBSD: cpu.c,v 1.137 2017/10/17 06:58:15 maxv Exp $ */
/*
* Copyright (c) 2000-2012 NetBSD Foundation, Inc.
Home |
Main Index |
Thread Index |
Old Index