Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-8]: src/sys/arch/amd64/amd64 Pull up following revision(s) (reque...
details: https://anonhg.NetBSD.org/src/rev/199c484a364e
branches: netbsd-8
changeset: 851406:199c484a364e
user: snj <snj%NetBSD.org@localhost>
date: Mon Feb 26 00:49:48 2018 +0000
description:
Pull up following revision(s) (requested by maxv in ticket #575):
sys/arch/amd64/amd64/copy.S: 1.28 via patch
sys/arch/amd64/amd64/cpufunc.S: 1.31
Don't fall through functions, explicitly jump instead.
diffstat:
sys/arch/amd64/amd64/copy.S | 41 ++++++++++++++++++++++++++++++-----------
sys/arch/amd64/amd64/cpufunc.S | 15 ++++++++-------
2 files changed, 38 insertions(+), 18 deletions(-)
diffs (246 lines):
diff -r 6b07ff28484a -r 199c484a364e sys/arch/amd64/amd64/copy.S
--- a/sys/arch/amd64/amd64/copy.S Mon Feb 26 00:43:23 2018 +0000
+++ b/sys/arch/amd64/amd64/copy.S Mon Feb 26 00:49:48 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: copy.S,v 1.20.10.1 2017/09/04 20:41:28 snj Exp $ */
+/* $NetBSD: copy.S,v 1.20.10.2 2018/02/26 00:49:48 snj Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@@ -105,6 +105,7 @@
popq %rdi
leaveq
ret
+END(do_pmap_load)
/*
* Copy routines from and to userland, plus a few more. See the
@@ -172,6 +173,7 @@
.Lkcopy_end:
xorq %rax,%rax
ret
+END(kcopy)
ENTRY(copyout)
DEFERRED_SWITCH_CHECK
@@ -199,6 +201,7 @@
xorl %eax,%eax
ret
DEFERRED_SWITCH_CALL
+END(copyout)
ENTRY(copyin)
DEFERRED_SWITCH_CHECK
@@ -227,21 +230,20 @@
xorl %eax,%eax
ret
DEFERRED_SWITCH_CALL
+END(copyin)
NENTRY(copy_efault)
movq $EFAULT,%rax
-
-/*
- * kcopy_fault is used by kcopy and copy_fault is used by copyin/out.
- *
- * they're distinguished for lazy pmap switching. see trap().
- */
+ ret
+END(copy_efault)
NENTRY(kcopy_fault)
ret
+END(kcopy_fault)
NENTRY(copy_fault)
ret
+END(copy_fault)
ENTRY(copyoutstr)
DEFERRED_SWITCH_CHECK
@@ -282,6 +284,7 @@
movq $ENAMETOOLONG,%rax
jmp copystr_return
DEFERRED_SWITCH_CALL
+END(copyoutstr)
ENTRY(copyinstr)
DEFERRED_SWITCH_CHECK
@@ -315,16 +318,19 @@
xorq %rax,%rax
jmp copystr_return
-2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
+2: /* rdx is zero -- return EFAULT or ENAMETOOLONG. */
movq $VM_MAXUSER_ADDRESS,%r11
cmpq %r11,%rsi
jae _C_LABEL(copystr_efault)
movq $ENAMETOOLONG,%rax
jmp copystr_return
DEFERRED_SWITCH_CALL
+END(copyinstr)
ENTRY(copystr_efault)
movl $EFAULT,%eax
+ jmp copystr_return
+END(copystr_efault)
ENTRY(copystr_fault)
copystr_return:
@@ -333,8 +339,8 @@
jz 8f
subq %rdx,%r8
movq %r8,(%r9)
-
8: ret
+END(copystr_fault)
ENTRY(copystr)
xchgq %rdi,%rsi
@@ -354,7 +360,7 @@
xorl %eax,%eax
jmp 6f
-4: /* edx is zero -- return ENAMETOOLONG. */
+4: /* rdx is zero -- return ENAMETOOLONG. */
movl $ENAMETOOLONG,%eax
6: /* Set *lencopied and return %eax. */
@@ -364,7 +370,7 @@
movq %r8,(%rcx)
7: ret
-
+END(copystr)
ENTRY(fuswintr)
cmpl $TLBSTATE_VALID,CPUVAR(TLBSTATE)
@@ -380,6 +386,7 @@
movq $0,PCB_ONFAULT(%rcx)
ret
+END(fuswintr)
ENTRY(fubyte)
DEFERRED_SWITCH_CHECK
@@ -395,6 +402,7 @@
movq $0,PCB_ONFAULT(%rcx)
ret
DEFERRED_SWITCH_CALL
+END(fubyte)
ENTRY(suswintr)
cmpl $TLBSTATE_VALID,CPUVAR(TLBSTATE)
@@ -411,6 +419,7 @@
xorq %rax,%rax
movq %rax,PCB_ONFAULT(%rcx)
ret
+END(suswintr)
ENTRY(subyte)
DEFERRED_SWITCH_CHECK
@@ -428,6 +437,7 @@
movq %rax,PCB_ONFAULT(%rcx)
ret
DEFERRED_SWITCH_CALL
+END(subyte)
/*
* These are the same, but must reside at different addresses,
@@ -437,15 +447,18 @@
movq $0,PCB_ONFAULT(%rcx)
movl $-1,%eax
ret
+END(fusuintrfailure)
ENTRY(fusufailure)
movq $0,PCB_ONFAULT(%rcx)
movl $-1,%eax
ret
+END(fusufailure)
ENTRY(fusuaddrfault)
movl $-1,%eax
ret
+END(fusuaddrfault)
/*
* Compare-and-swap the 64-bit integer in the user-space.
@@ -474,6 +487,7 @@
xorq %rax,%rax
ret
DEFERRED_SWITCH_CALL
+END(ucas_64)
/*
* int ucas_32(volatile int32_t *uptr, int32_t old, int32_t new, int32_t *ret);
@@ -500,12 +514,16 @@
xorq %rax,%rax
ret
DEFERRED_SWITCH_CALL
+END(ucas_32)
ENTRY(ucas_efault)
movq $EFAULT,%rax
+ ret
+END(ucas_efault)
NENTRY(ucas_fault)
ret
+END(ucas_fault)
/*
* int ucas_ptr(volatile void **uptr, void *old, void *new, void **ret);
@@ -524,6 +542,7 @@
*/
.section ".rodata"
.globl _C_LABEL(onfault_table)
+
_C_LABEL(onfault_table):
.quad .Lcopyin_start
.quad .Lcopyin_end
diff -r 6b07ff28484a -r 199c484a364e sys/arch/amd64/amd64/cpufunc.S
--- a/sys/arch/amd64/amd64/cpufunc.S Mon Feb 26 00:43:23 2018 +0000
+++ b/sys/arch/amd64/amd64/cpufunc.S Mon Feb 26 00:49:48 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpufunc.S,v 1.27 2016/11/27 14:49:21 kamil Exp $ */
+/* $NetBSD: cpufunc.S,v 1.27.8.1 2018/02/26 00:49:48 snj Exp $ */
/*-
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -371,8 +371,7 @@
* Load a new GDT pointer (and do any necessary cleanup).
* XXX It's somewhat questionable whether reloading all the segment registers
* is necessary, since the actual descriptor data is not changed except by
- * process creation and exit, both of which clean up via task switches. OTOH,
- * this only happens at run time when the GDT is resized.
+ * process creation and exit, both of which clean up via task switches.
*/
#ifndef XEN
ENTRY(lgdt)
@@ -382,19 +381,21 @@
/* Flush the prefetch q. */
jmp 1f
nop
-1: /* Reload "stale" selectors. */
-#else /* XEN */
+1: jmp _C_LABEL(lgdt_finish)
+END(lgdt)
+#endif
+
/*
* void lgdt_finish(void);
* Reload segments after a GDT change
*/
ENTRY(lgdt_finish)
-#endif /* XEN */
movl $GSEL(GDATA_SEL, SEL_KPL),%eax
movl %eax,%ds
movl %eax,%es
movl %eax,%ss
- /* FALLTHROUGH */
+ jmp _C_LABEL(x86_flush)
+END(lgdt_finish)
/*
* void x86_flush()
Home |
Main Index |
Thread Index |
Old Index