Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/nathanw_sa]: src/lib/libpthread The beginnings of a scheduler activation...
details: https://anonhg.NetBSD.org/src/rev/4e9259f6fb93
branches: nathanw_sa
changeset: 504564:4e9259f6fb93
user: nathanw <nathanw%NetBSD.org@localhost>
date: Mon Mar 05 23:51:52 2001 +0000
description:
The beginnings of a scheduler activations-based pthread library.
diffstat:
lib/libpthread/TODO | 24 +
lib/libpthread/arch/i386/genassym.cf | 20 +
lib/libpthread/arch/i386/pthread_md.h | 20 +
lib/libpthread/arch/i386/pthread_switch.S | 191 ++++++++++++
lib/libpthread/genassym.sh | 155 +++++++++
lib/libpthread/pthread.c | 472 ++++++++++++++++++++++++++++++
lib/libpthread/pthread.h | 83 +++++
lib/libpthread/pthread_int.h | 195 ++++++++++++
lib/libpthread/pthread_lock.c | 101 ++++++
lib/libpthread/pthread_mutex.c | 159 ++++++++++
lib/libpthread/pthread_mutex.h | 28 +
lib/libpthread/pthread_run.c | 134 ++++++++
lib/libpthread/pthread_sa.c | 415 ++++++++++++++++++++++++++
lib/libpthread/pthread_sig.c | 235 ++++++++++++++
lib/libpthread/pthread_stack.c | 115 +++++++
lib/libpthread/sched.c | 59 +++
lib/libpthread/sched.h | 41 ++
lib/libpthread/shlib_version | 5 +
18 files changed, 2452 insertions(+), 0 deletions(-)
diffs (truncated from 2524 to 300 lines):
diff -r 2b096c07e8e0 -r 4e9259f6fb93 lib/libpthread/TODO
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libpthread/TODO Mon Mar 05 23:51:52 2001 +0000
@@ -0,0 +1,24 @@
+
+- priority scheduling!
+
+- non-i386 platform support.
+
+- stress-test spinlock-preemption handling.
+
+- Mutexes should have an internal simple_lock, not counted against a
+ per-thread spinlock, so that there can be an optimistic fast-path lock.
+
+- Supporting different mutex types would be nice (normal, debugging,
+ recursive, etc).
+
+- Currently, each thread uses two real pages of memory: one at the top
+ of the stack for actual stack data, and one at the bottom for the
+ pthread_st. If we can get suitable space above the initial stack for
+ main(), we can cut this to one page per thread. Perhaps crt0 should
+ do something different (give us more space) if libpthread is linked
+ in?
+
+- Figure out whether/how to expose the inline version of
+ pthread_self().
+
+- Look at regression rests in Proven's pthreads package.
diff -r 2b096c07e8e0 -r 4e9259f6fb93 lib/libpthread/arch/i386/genassym.cf
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libpthread/arch/i386/genassym.cf Mon Mar 05 23:51:52 2001 +0000
@@ -0,0 +1,20 @@
+# $Id: genassym.cf,v 1.1.2.1 2001/03/05 23:52:04 nathanw Exp $
+
+# Copyright
+
+include <ucontext.h>
+include <sys/queue.h>
+include "pthread.h"
+include "pthread_int.h"
+
+define PT_NEXT offsetof(struct pthread_st, pt_next)
+define PT_STATE offsetof(struct pthread_st, pt_state)
+define PT_SWITCHTO offsetof(struct pthread_st, pt_switchto)
+define PT_SWITCHTOUC offsetof(struct pthread_st, pt_switchtouc)
+define PT_SPINLOCKS offsetof(struct pthread_st, pt_spinlocks)
+define PT_HELDLOCK offsetof(struct pthread_st, pt_heldlock)
+define PT_UC offsetof(struct pthread_st, pt_uc)
+define CONTEXTSIZE sizeof(ucontext_t)
+
+define PT_STATE_RECYCLABLE PT_STATE_RECYCLABLE
+
diff -r 2b096c07e8e0 -r 4e9259f6fb93 lib/libpthread/arch/i386/pthread_md.h
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libpthread/arch/i386/pthread_md.h Mon Mar 05 23:51:52 2001 +0000
@@ -0,0 +1,20 @@
+/* $Id: pthread_md.h,v 1.1.2.1 2001/03/05 23:52:05 nathanw Exp $ */
+
+/* Copyright */
+
+#ifndef _LIB_PTHREAD_I386_MD_H
+#define _LIB_PTHREAD_I386_MD_H
+
+
+static __inline long
+pthread__sp(void)
+{
+ long ret;
+ __asm("movl %%esp, %0" : "=g" (ret));
+
+ return ret;
+}
+
+#define pthread__uc_sp(ucp) ((ucp)->uc_mcontext.__gregs[_REG_UESP])
+
+#endif /* _LIB_PTHREAD_I386_MD_H */
diff -r 2b096c07e8e0 -r 4e9259f6fb93 lib/libpthread/arch/i386/pthread_switch.S
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libpthread/arch/i386/pthread_switch.S Mon Mar 05 23:51:52 2001 +0000
@@ -0,0 +1,191 @@
+/* $Id: pthread_switch.S,v 1.1.2.1 2001/03/05 23:52:06 nathanw Exp $ */
+
+/* Copyright */
+
+#include <machine/asm.h>
+#include "assym.h"
+
+#define STACK_SWITCH \
+ movl PT_UC(%ecx),%esi ; \
+ movl %esi, %esp ; \
+ subl $CONTEXTSIZE, %esp /* XXX dodge kernel-placed ucontext */
+
+#define NOTREACHED \
+ /* force error */ \
+ int3
+
+/* Simple versions that don't handle spin-preempt cases */
+
+/* Plain switch that doesn't do any special checking. */
+ENTRY(pthread__switch)
+ pushl %ebp
+ movl %esp, %ebp
+ PIC_PROLOGUE
+ movl 8(%ebp), %eax /* eax holds the current thread */
+ movl 12(%ebp), %ecx /* ecx holds the thread to switch to */
+pthread__switch_no_save:
+ subl $CONTEXTSIZE, %esp
+ movl %esp, PT_UC(%eax)
+ movl %esp, %edi
+
+ STACK_SWITCH
+
+ subl %edx, PT_SPINLOCKS(%eax)
+ pushl %esi
+ pushl %edi
+ call PIC_PLT(_C_LABEL(_swapcontext_u))
+ popl %edi
+ popl %esi
+ movl %edi, %esp /* Switches back to the old stack! */
+ addl $CONTEXTSIZE, %esp
+ PIC_EPILOGUE
+ movl %ebp, %esp
+ popl %ebp
+ ret
+
+/* Switch away from a thread that is holding a lock on a queue (to
+ * prevent being removed from the queue before being switched away).
+ */
+ENTRY(pthread__locked_switch)
+ pushl %ebp
+ movl %esp, %ebp
+ PIC_PROLOGUE
+ movl 8(%ebp), %eax /* eax holds the current thread */
+ movl 12(%ebp), %ecx /* ecx holds the thread to switch to */
+ movl 16(%ebp), %edx /* edx holds the pointer to the spinlock */
+ incl PT_SPINLOCKS(%ecx) /* Make sure we get continued */
+ subl $CONTEXTSIZE, %esp
+ movl %esp, PT_UC(%eax)
+ movl %esp, %edi
+
+ pushl %eax
+ pushl %ecx
+ pushl %edx
+ pushl %edi
+ call PIC_PLT(_C_LABEL(_getcontext_u))
+ popl %edi
+ popl %edx
+ popl %ecx
+ popl %eax
+ /* Major-league cheating. Edit the context so that it continues
+ * as if returning from the _setcontext_u below.
+ */
+#ifdef PIC
+ movl PIC_GOT(locked_return_point), %esi
+#else
+ leal locked_return_point, %esi
+#endif
+ movl %esi, 92(%edi)
+
+ STACK_SWITCH
+
+ /* Check if the original thread was preempted while holding
+ * its queue lock.
+ */
+ cmpl $0, PT_NEXT(%eax)
+ je locked_no_old_preempt
+
+ /* Yes, it was. Stash the thread we were going to
+ * switch to, the lock the original thread was holding,
+ * and go to the next thread in the chain.
+ * Mark the fact that this was a locked switch, and so the
+ * thread does not need to be put on a run queue.
+ * Don't release the lock. It's possible that if we do so,
+ * PT_SWITCHTO will be stomped by another switch_lock and
+ * preemption.
+ */
+ movl %ecx, PT_SWITCHTO(%eax)
+ movl %esi, PT_SWITCHTOUC(%eax)
+ movl %edx, PT_HELDLOCK(%eax)
+ decl PT_SPINLOCKS(%eax)
+
+ movl PT_NEXT(%eax), %edx
+ movl %ecx, %eax
+ movl %edx, %ecx
+ movl $1, %edx
+ jmp pthread__switch_no_save
+ NOTREACHED
+
+locked_no_old_preempt:
+ /* We've moved to the new stack, and the old context has been
+ * saved. The queue lock can be released. */
+ decl PT_SPINLOCKS(%eax)
+ /* We happen to know that this is the right way to release a lock. */
+ movl $0, 0(%edx)
+
+ decl PT_SPINLOCKS(%ecx)
+ /* Check if we were preempted while holding the fake lock. */
+ cmpl $0, PT_NEXT(%ecx)
+ je locked_no_new_preempt
+ /* Yes, we were. Bummer. Go to the next element in the chain. */
+ movl PT_NEXT(%ecx), %edx
+ movl %ecx, %eax
+ movl %edx, %ecx
+ movl $0, %edx
+ jmp pthread__switch_no_save
+ NOTREACHED
+
+locked_no_new_preempt:
+ pushl %esi
+ call PIC_PLT(_C_LABEL(_setcontext_u))
+locked_return_point:
+ /* We're back on the original stack. */
+ addl $CONTEXTSIZE+16, %esp
+ PIC_EPILOGUE
+ movl %ebp, %esp
+ popl %ebp
+ ret
+
+
+
+
+/* Quit an upcall, recycle it, and jump to the next thing. */
+ENTRY(pthread__upcall_switch)
+ /* Save args into registers so we can stop using the old stack. */
+ pushl %ebp
+ movl %esp, %ebp
+ PIC_PROLOGUE
+ movl 8(%ebp), %eax /* eax holds the upcall thread */
+ movl 12(%ebp), %ecx /* ecx holds the thread to switch to */
+ incl PT_SPINLOCKS(%ecx)
+
+ STACK_SWITCH
+
+ /* Check if the upcall was preempted and continued. */
+ cmpl $0, PT_NEXT(%eax)
+ je upcall_no_old_preempt
+ /* Yes, it was. Stash the thread we were going to
+ * switch to, and go to the next thread in the chain.
+ */
+ movl %ecx, PT_SWITCHTO(%eax)
+ movl %esi, PT_SWITCHTOUC(%eax)
+ movl $PT_STATE_RECYCLABLE, PT_STATE(%eax)
+ movl PT_NEXT(%eax), %edx
+ movl %ecx, %eax
+ movl %edx, %ecx
+ movl $1, %edx
+ jmp pthread__switch_no_save
+ NOTREACHED
+
+upcall_no_old_preempt:
+ pushl %ecx
+ pushl %eax
+ call PIC_PLT(_C_LABEL(pthread__sa_recycle))
+ popl %eax
+ popl %ecx
+ decl PT_SPINLOCKS(%ecx)
+ /* Check if we were preempted while holding the fake lock. */
+ cmpl $0, PT_NEXT(%ecx)
+ je upcall_no_new_preempt
+ /* Yes, we were. Bummer. Go to the next element in the chain. */
+ movl PT_NEXT(%ecx), %edx
+ movl %ecx, %eax
+ movl %edx, %ecx
+ movl $0, %edx
+ jmp pthread__switch_no_save
+ NOTREACHED
+
+upcall_no_new_preempt:
+ pushl %esi
+ call PIC_PLT(_C_LABEL(_setcontext_u))
+ NOTREACHED
diff -r 2b096c07e8e0 -r 4e9259f6fb93 lib/libpthread/genassym.sh
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/libpthread/genassym.sh Mon Mar 05 23:51:52 2001 +0000
@@ -0,0 +1,155 @@
+#!/bin/sh
+# $NetBSD: genassym.sh,v 1.1.2.1 2001/03/05 23:51:53 nathanw Exp $
+
+#
+# Copyright (c) 1997 Matthias Pfaller.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. All advertising materials mentioning features or use of this software
+# must display the following acknowledgement:
+# This product includes software developed by Matthias Pfaller.
+# 4. The name of the author may not be used to endorse or promote products
+# derived from this software without specific prior written permission
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
Home |
Main Index |
Thread Index |
Old Index