Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x86/include Allow for non inlined definitions for RUMP
details: https://anonhg.NetBSD.org/src/rev/0441085f3e54
branches: trunk
changeset: 784240:0441085f3e54
user: christos <christos%NetBSD.org@localhost>
date: Tue Jan 22 22:09:44 2013 +0000
description:
Allow for non inlined definitions for RUMP
diffstat:
sys/arch/x86/include/lock.h | 52 +++++++++++++++++++-------------------------
1 files changed, 22 insertions(+), 30 deletions(-)
diffs (95 lines):
diff -r b927a40d4fa7 -r 0441085f3e54 sys/arch/x86/include/lock.h
--- a/sys/arch/x86/include/lock.h Tue Jan 22 21:59:52 2013 +0000
+++ b/sys/arch/x86/include/lock.h Tue Jan 22 22:09:44 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: lock.h,v 1.26 2012/10/11 11:12:21 apb Exp $ */
+/* $NetBSD: lock.h,v 1.27 2013/01/22 22:09:44 christos Exp $ */
/*-
* Copyright (c) 2000, 2006 The NetBSD Foundation, Inc.
@@ -65,33 +65,25 @@
}
#ifdef _HARDKERNEL
-
-#include <machine/cpufunc.h>
-
-void __cpu_simple_lock_init(__cpu_simple_lock_t *);
-void __cpu_simple_lock(__cpu_simple_lock_t *);
-int __cpu_simple_lock_try(__cpu_simple_lock_t *);
-void __cpu_simple_unlock(__cpu_simple_lock_t *);
-
-#define SPINLOCK_SPIN_HOOK /* nothing */
+# include <machine/cpufunc.h>
+# define SPINLOCK_SPIN_HOOK /* nothing */
+# ifdef SPINLOCK_BACKOFF_HOOK
+# undef SPINLOCK_BACKOFF_HOOK
+# endif
+# define SPINLOCK_BACKOFF_HOOK x86_pause()
+# define SPINLOCK_INLINE
+#else /* !_HARDKERNEL */
+# define SPINLOCK_BODY
+# define SPINLOCK_INLINE static __inline __unused
+#endif /* _HARDKERNEL */
-#ifdef SPINLOCK_BACKOFF_HOOK
-#undef SPINLOCK_BACKOFF_HOOK
-#endif
-#define SPINLOCK_BACKOFF_HOOK x86_pause()
-
-#else
+SPINLOCK_INLINE void __cpu_simple_lock_init(__cpu_simple_lock_t *);
+SPINLOCK_INLINE void __cpu_simple_lock(__cpu_simple_lock_t *);
+SPINLOCK_INLINE int __cpu_simple_lock_try(__cpu_simple_lock_t *);
+SPINLOCK_INLINE void __cpu_simple_unlock(__cpu_simple_lock_t *);
-static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
- __unused;
-static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
- __unused;
-static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
- __unused;
-static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
- __unused;
-
-static __inline void
+#ifdef SPINLOCK_BODY
+SPINLOCK_INLINE void
__cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
{
@@ -99,7 +91,7 @@
__insn_barrier();
}
-static __inline int
+SPINLOCK_INLINE int
__cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
{
uint8_t val;
@@ -112,7 +104,7 @@
return val == __SIMPLELOCK_UNLOCKED;
}
-static __inline void
+SPINLOCK_INLINE void
__cpu_simple_lock(__cpu_simple_lock_t *lockp)
{
@@ -173,7 +165,7 @@
* reordered, however stores act as load fences, meaning that
* loads can not be reordered around stores.
*/
-static __inline void
+SPINLOCK_INLINE void
__cpu_simple_unlock(__cpu_simple_lock_t *lockp)
{
@@ -181,6 +173,6 @@
*lockp = __SIMPLELOCK_UNLOCKED;
}
-#endif /* _HARDKERNEL */
+#endif /* SPINLOCK_BODY */
#endif /* _X86_LOCK_H_ */
Home |
Main Index |
Thread Index |
Old Index