Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Implement a real hotpatch feature.
details: https://anonhg.NetBSD.org/src/rev/5546e509b73b
branches: trunk
changeset: 828847:5546e509b73b
user: maxv <maxv%NetBSD.org@localhost>
date: Sun Jan 07 12:42:46 2018 +0000
description:
Implement a real hotpatch feature.
Define a HOTPATCH() macro, that puts a label and additional information
in the new .rodata.hotpatch kernel section. In patch.c, scan the section
and patch what needs to be. Now it is possible to hotpatch the content of
a macro.
SMAP is switched to use this new system; this saves a call+ret in each
kernel entry/exit point.
Many other operating systems do the same.
diffstat:
sys/arch/amd64/amd64/amd64_trap.S | 6 +-
sys/arch/amd64/amd64/copy.S | 88 ++++++++++----------------------
sys/arch/amd64/conf/kern.ldscript | 10 +++-
sys/arch/amd64/conf/kern.ldscript.Xen | 10 +++-
sys/arch/amd64/conf/kern.ldscript.kaslr | 10 +++-
sys/arch/amd64/include/frameasm.h | 23 +++++++-
sys/arch/i386/conf/kern.ldscript | 10 +++-
sys/arch/i386/conf/kern.ldscript.4MB | 10 +++-
sys/arch/i386/conf/kern.ldscript.Xen | 10 +++-
sys/arch/i386/include/frameasm.h | 10 +++-
sys/arch/x86/x86/patch.c | 49 +++++++++++++-----
11 files changed, 150 insertions(+), 86 deletions(-)
diffs (truncated from 603 to 300 lines):
diff -r 4629f8c3b65e -r 5546e509b73b sys/arch/amd64/amd64/amd64_trap.S
--- a/sys/arch/amd64/amd64/amd64_trap.S Sun Jan 07 12:29:25 2018 +0000
+++ b/sys/arch/amd64/amd64/amd64_trap.S Sun Jan 07 12:42:46 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: amd64_trap.S,v 1.15 2018/01/06 08:44:01 maxv Exp $ */
+/* $NetBSD: amd64_trap.S,v 1.16 2018/01/07 12:42:46 maxv Exp $ */
/*
* Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
@@ -66,7 +66,7 @@
#if 0
#include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.15 2018/01/06 08:44:01 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.16 2018/01/07 12:42:46 maxv Exp $");
#endif
/*
@@ -122,7 +122,7 @@
subq $TF_REGSIZE,%rsp
INTR_SAVE_GPRS
cld
- callq smap_enable
+ SMAP_ENABLE
movw %gs,TF_GS(%rsp)
movw %fs,TF_FS(%rsp)
movw %es,TF_ES(%rsp)
diff -r 4629f8c3b65e -r 5546e509b73b sys/arch/amd64/amd64/copy.S
--- a/sys/arch/amd64/amd64/copy.S Sun Jan 07 12:29:25 2018 +0000
+++ b/sys/arch/amd64/amd64/copy.S Sun Jan 07 12:42:46 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: copy.S,v 1.28 2017/11/01 09:17:28 maxv Exp $ */
+/* $NetBSD: copy.S,v 1.29 2018/01/07 12:42:46 maxv Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@@ -108,26 +108,6 @@
END(do_pmap_load)
/*
- * SMAP functions. ret+int3+int3 is patched dynamically to STAC/CLAC.
- */
-
-ENTRY(smap_enable)
-.Lclacpatch:
- ret
- int3
- int3
- ret
-END(smap_enable)
-
-ENTRY(smap_disable)
-.Lstacpatch:
- ret
- int3
- int3
- ret
-END(smap_disable)
-
-/*
* Copy routines from and to userland, plus a few more. See the
* section 9 manpages for info. Some cases can be optimized more.
*
@@ -207,7 +187,7 @@
cmpq %r8,%rdx
ja _C_LABEL(copy_efault) /* jump if end in kernel space */
- callq smap_disable
+ SMAP_DISABLE
.Lcopyout_start:
movq %rax,%rcx /* length */
shrq $3,%rcx /* count of 8-byte words */
@@ -218,7 +198,7 @@
rep
movsb /* copy remaining bytes */
.Lcopyout_end:
- callq smap_enable
+ SMAP_ENABLE
xorl %eax,%eax
ret
@@ -237,7 +217,7 @@
cmpq %r8,%rdx
ja _C_LABEL(copy_efault) /* j if end in kernel space */
- callq smap_disable
+ SMAP_DISABLE
.Lcopyin_start:
3: /* bcopy(%rsi, %rdi, %rax); */
movq %rax,%rcx
@@ -249,7 +229,7 @@
rep
movsb
.Lcopyin_end:
- callq smap_enable
+ SMAP_ENABLE
xorl %eax,%eax
ret
@@ -266,7 +246,7 @@
END(kcopy_fault)
NENTRY(copy_fault)
- callq smap_enable
+ SMAP_ENABLE
ret
END(copy_fault)
@@ -288,7 +268,7 @@
movq %rax,%r8
1: incq %rdx
- callq smap_disable
+ SMAP_DISABLE
.Lcopyoutstr_start:
1: decq %rdx
jz 2f
@@ -297,7 +277,7 @@
testb %al,%al
jnz 1b
.Lcopyoutstr_end:
- callq smap_enable
+ SMAP_ENABLE
/* Success -- 0 byte reached. */
decq %rdx
@@ -305,7 +285,7 @@
jmp copystr_return
2: /* rdx is zero -- return EFAULT or ENAMETOOLONG. */
- callq smap_enable
+ SMAP_ENABLE
movq $VM_MAXUSER_ADDRESS,%r11
cmpq %r11,%rdi
jae _C_LABEL(copystr_efault)
@@ -332,7 +312,7 @@
movq %rax,%r8
1: incq %rdx
- callq smap_disable
+ SMAP_DISABLE
.Lcopyinstr_start:
1: decq %rdx
jz 2f
@@ -341,7 +321,7 @@
testb %al,%al
jnz 1b
.Lcopyinstr_end:
- callq smap_enable
+ SMAP_ENABLE
/* Success -- 0 byte reached. */
decq %rdx
@@ -349,7 +329,7 @@
jmp copystr_return
2: /* rdx is zero -- return EFAULT or ENAMETOOLONG. */
- callq smap_enable
+ SMAP_ENABLE
movq $VM_MAXUSER_ADDRESS,%r11
cmpq %r11,%rsi
jae _C_LABEL(copystr_efault)
@@ -364,7 +344,7 @@
END(copystr_efault)
ENTRY(copystr_fault)
- callq smap_enable
+ SMAP_ENABLE
copystr_return:
/* Set *lencopied and return %eax. */
testq %r9,%r9
@@ -414,9 +394,9 @@
leaq _C_LABEL(fusuintrfailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
- callq smap_disable
+ SMAP_DISABLE
movzwl (%rdi),%eax
- callq smap_enable
+ SMAP_ENABLE
movq $0,PCB_ONFAULT(%rcx)
ret
@@ -431,9 +411,9 @@
leaq _C_LABEL(fusufailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
- callq smap_disable
+ SMAP_DISABLE
movzbl (%rdi),%eax
- callq smap_enable
+ SMAP_ENABLE
movq $0,PCB_ONFAULT(%rcx)
ret
@@ -450,9 +430,9 @@
leaq _C_LABEL(fusuintrfailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
- callq smap_disable
+ SMAP_DISABLE
movw %si,(%rdi)
- callq smap_enable
+ SMAP_ENABLE
xorq %rax,%rax
movq %rax,PCB_ONFAULT(%rcx)
@@ -469,9 +449,9 @@
leaq _C_LABEL(fusufailure)(%rip),%r11
movq %r11,PCB_ONFAULT(%rcx)
- callq smap_disable
+ SMAP_DISABLE
movb %sil,(%rdi)
- callq smap_enable
+ SMAP_ENABLE
xorq %rax,%rax
movq %rax,PCB_ONFAULT(%rcx)
@@ -484,14 +464,14 @@
* because trap.c checks for them.
*/
ENTRY(fusuintrfailure)
- callq smap_enable
+ SMAP_ENABLE
movq $0,PCB_ONFAULT(%rcx)
movl $-1,%eax
ret
END(fusuintrfailure)
ENTRY(fusufailure)
- callq smap_enable
+ SMAP_ENABLE
movq $0,PCB_ONFAULT(%rcx)
movl $-1,%eax
ret
@@ -515,13 +495,13 @@
ja _C_LABEL(ucas_efault)
movq %rsi,%rax
- callq smap_disable
+ SMAP_DISABLE
.Lucas64_start:
/* Perform the CAS */
lock
cmpxchgq %rdx,(%rdi)
.Lucas64_end:
- callq smap_enable
+ SMAP_ENABLE
/*
* Note: %rax is "old" value.
@@ -544,13 +524,13 @@
ja _C_LABEL(ucas_efault)
movl %esi,%eax
- callq smap_disable
+ SMAP_DISABLE
.Lucas32_start:
/* Perform the CAS */
lock
cmpxchgl %edx,(%rdi)
.Lucas32_end:
- callq smap_enable
+ SMAP_ENABLE
/*
* Note: %eax is "old" value.
@@ -568,7 +548,7 @@
END(ucas_efault)
NENTRY(ucas_fault)
- callq smap_enable
+ SMAP_ENABLE
ret
END(ucas_fault)
@@ -589,18 +569,6 @@
*/
.section ".rodata"
.globl _C_LABEL(onfault_table)
- .type _C_LABEL(x86_clacpatch),@object
- .type _C_LABEL(x86_stacpatch),@object
-
-LABEL(x86_clacpatch)
- .quad .Lclacpatch
- .quad 0 /* terminate */
-END(x86_clacpatch)
-
-LABEL(x86_stacpatch)
- .quad .Lstacpatch
- .quad 0 /* terminate */
-END(x86_stacpatch)
_C_LABEL(onfault_table):
.quad .Lcopyin_start
diff -r 4629f8c3b65e -r 5546e509b73b sys/arch/amd64/conf/kern.ldscript
--- a/sys/arch/amd64/conf/kern.ldscript Sun Jan 07 12:29:25 2018 +0000
+++ b/sys/arch/amd64/conf/kern.ldscript Sun Jan 07 12:42:46 2018 +0000
@@ -1,4 +1,4 @@
Home |
Main Index |
Thread Index |
Old Index