Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/amd64/amd64 Don't fall through functions, explicitl...
details: https://anonhg.NetBSD.org/src/rev/1f6a1c493ceb
branches: trunk
changeset: 357230:1f6a1c493ceb
user: maxv <maxv%NetBSD.org@localhost>
date: Wed Nov 01 09:17:28 2017 +0000
description:
Don't fall through functions, explicitly jump instead. While here don't
call smap_enable twice (harmless), and add END() markers.
diffstat:
sys/arch/amd64/amd64/copy.S | 19 +++++++++++--------
sys/arch/amd64/amd64/cpufunc.S | 15 ++++++++-------
2 files changed, 19 insertions(+), 15 deletions(-)
diffs (111 lines):
diff -r 0067216b6bc3 -r 1f6a1c493ceb sys/arch/amd64/amd64/copy.S
--- a/sys/arch/amd64/amd64/copy.S Wed Nov 01 08:44:34 2017 +0000
+++ b/sys/arch/amd64/amd64/copy.S Wed Nov 01 09:17:28 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: copy.S,v 1.27 2017/10/30 17:06:42 maxv Exp $ */
+/* $NetBSD: copy.S,v 1.28 2017/11/01 09:17:28 maxv Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@@ -258,19 +258,17 @@
NENTRY(copy_efault)
movq $EFAULT,%rax
-
-/*
- * kcopy_fault is used by kcopy and copy_fault is used by copyin/out.
- *
- * they're distinguished for lazy pmap switching. see trap().
- */
+ ret
+END(copy_efault)
NENTRY(kcopy_fault)
ret
+END(kcopy_fault)
NENTRY(copy_fault)
callq smap_enable
ret
+END(copy_fault)
ENTRY(copyoutstr)
DEFERRED_SWITCH_CHECK
@@ -362,6 +360,8 @@
ENTRY(copystr_efault)
movl $EFAULT,%eax
+ jmp copystr_return
+END(copystr_efault)
ENTRY(copystr_fault)
callq smap_enable
@@ -371,8 +371,8 @@
jz 8f
subq %rdx,%r8
movq %r8,(%r9)
-
8: ret
+END(copystr_fault)
ENTRY(copystr)
xchgq %rdi,%rsi
@@ -564,10 +564,13 @@
ENTRY(ucas_efault)
movq $EFAULT,%rax
+ ret
+END(ucas_efault)
NENTRY(ucas_fault)
callq smap_enable
ret
+END(ucas_fault)
/*
* int ucas_ptr(volatile void **uptr, void *old, void *new, void **ret);
diff -r 0067216b6bc3 -r 1f6a1c493ceb sys/arch/amd64/amd64/cpufunc.S
--- a/sys/arch/amd64/amd64/cpufunc.S Wed Nov 01 08:44:34 2017 +0000
+++ b/sys/arch/amd64/amd64/cpufunc.S Wed Nov 01 09:17:28 2017 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpufunc.S,v 1.30 2017/10/30 17:06:42 maxv Exp $ */
+/* $NetBSD: cpufunc.S,v 1.31 2017/11/01 09:17:28 maxv Exp $ */
/*
* Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -424,8 +424,7 @@
* Load a new GDT pointer (and do any necessary cleanup).
* XXX It's somewhat questionable whether reloading all the segment registers
* is necessary, since the actual descriptor data is not changed except by
- * process creation and exit, both of which clean up via task switches. OTOH,
- * this only happens at run time when the GDT is resized.
+ * process creation and exit, both of which clean up via task switches.
*/
#ifndef XEN
ENTRY(lgdt)
@@ -435,19 +434,21 @@
/* Flush the prefetch q. */
jmp 1f
nop
-1: /* Reload "stale" selectors. */
-#else /* XEN */
+1: jmp _C_LABEL(lgdt_finish)
+END(lgdt)
+#endif
+
/*
* void lgdt_finish(void);
* Reload segments after a GDT change
*/
ENTRY(lgdt_finish)
-#endif /* XEN */
movl $GSEL(GDATA_SEL, SEL_KPL),%eax
movl %eax,%ds
movl %eax,%es
movl %eax,%ss
- /* FALLTHROUGH */
+ jmp _C_LABEL(x86_flush)
+END(lgdt_finish)
/*
* void x86_flush()
Home |
Main Index |
Thread Index |
Old Index