Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Remove LW_AFFINITY flag and fix some bugs affinity mask ...
details: https://anonhg.NetBSD.org/src/rev/584f4c01b3a8
branches: trunk
changeset: 768084:584f4c01b3a8
user: rmind <rmind%NetBSD.org@localhost>
date: Sun Aug 07 21:13:05 2011 +0000
description:
Remove LW_AFFINITY flag and fix some bugs affinity mask handling.
diffstat:
sys/kern/kern_cpu.c | 11 +++++------
sys/kern/kern_lwp.c | 38 +++++++++++++++++++-------------------
sys/kern/kern_runq.c | 12 ++++++------
sys/kern/subr_kcpuset.c | 13 ++++++-------
sys/kern/sys_pset.c | 9 +++++----
sys/kern/sys_sched.c | 32 ++++++++++++++++----------------
sys/sys/lwp.h | 3 +--
7 files changed, 58 insertions(+), 60 deletions(-)
diffs (truncated from 332 to 300 lines):
diff -r fa75d0fa9c78 -r 584f4c01b3a8 sys/kern/kern_cpu.c
--- a/sys/kern/kern_cpu.c Sun Aug 07 20:44:32 2011 +0000
+++ b/sys/kern/kern_cpu.c Sun Aug 07 21:13:05 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_cpu.c,v 1.48 2011/08/07 13:33:01 rmind Exp $ */
+/* $NetBSD: kern_cpu.c,v 1.49 2011/08/07 21:13:05 rmind Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc.
@@ -56,7 +56,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.48 2011/08/07 13:33:01 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.49 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -311,13 +311,12 @@
lwp_unlock(l);
continue;
}
- /* Normal case - no affinity */
- if ((l->l_flag & LW_AFFINITY) == 0) {
+ /* Regular case - no affinity. */
+ if (l->l_affinity == NULL) {
lwp_migrate(l, target_ci);
continue;
}
- /* Affinity is set, find an online CPU in the set */
- KASSERT(l->l_affinity != NULL);
+ /* Affinity is set, find an online CPU in the set. */
for (CPU_INFO_FOREACH(cii, mci)) {
mspc = &mci->ci_schedstate;
if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
diff -r fa75d0fa9c78 -r 584f4c01b3a8 sys/kern/kern_lwp.c
--- a/sys/kern/kern_lwp.c Sun Aug 07 20:44:32 2011 +0000
+++ b/sys/kern/kern_lwp.c Sun Aug 07 21:13:05 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_lwp.c,v 1.161 2011/07/30 17:01:04 christos Exp $ */
+/* $NetBSD: kern_lwp.c,v 1.162 2011/08/07 21:13:05 rmind Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -211,7 +211,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.161 2011/07/30 17:01:04 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.162 2011/08/07 21:13:05 rmind Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@@ -803,18 +803,19 @@
p2->p_nlwps++;
p2->p_nrlwps++;
+ KASSERT(l2->l_affinity == NULL);
+
if ((p2->p_flag & PK_SYSTEM) == 0) {
- /* Inherit an affinity */
- if (l1->l_flag & LW_AFFINITY) {
+ /* Inherit the affinity mask. */
+ if (l1->l_affinity) {
/*
* Note that we hold the state lock while inheriting
* the affinity to avoid race with sched_setaffinity().
*/
lwp_lock(l1);
- if (l1->l_flag & LW_AFFINITY) {
+ if (l1->l_affinity) {
kcpuset_use(l1->l_affinity);
l2->l_affinity = l1->l_affinity;
- l2->l_flag |= LW_AFFINITY;
}
lwp_unlock(l1);
}
@@ -987,12 +988,8 @@
lwp_lock(l);
l->l_stat = LSZOMB;
- if (l->l_name != NULL)
+ if (l->l_name != NULL) {
strcpy(l->l_name, "(zombie)");
- if (l->l_flag & LW_AFFINITY) {
- l->l_flag &= ~LW_AFFINITY;
- } else {
- KASSERT(l->l_affinity == NULL);
}
lwp_unlock(l);
p->p_nrlwps--;
@@ -1001,12 +998,6 @@
l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
mutex_exit(p->p_lock);
- /* Safe without lock since LWP is in zombie state */
- if (l->l_affinity) {
- kcpuset_unuse(l->l_affinity, NULL);
- l->l_affinity = NULL;
- }
-
/*
* We can no longer block. At this point, lwp_free() may already
* be gunning for us. On a multi-CPU system, we may be off p_lwps.
@@ -1103,6 +1094,17 @@
cv_destroy(&l->l_sigcv);
/*
+ * Free lwpctl structure and affinity.
+ */
+ if (l->l_lwpctl) {
+ lwp_ctl_free(l);
+ }
+ if (l->l_affinity) {
+ kcpuset_unuse(l->l_affinity, NULL);
+ l->l_affinity = NULL;
+ }
+
+ /*
* Free the LWP's turnstile and the LWP structure itself unless the
* caller wants to recycle them. Also, free the scheduler specific
* data.
@@ -1112,8 +1114,6 @@
*
* We don't recycle the VM resources at this time.
*/
- if (l->l_lwpctl != NULL)
- lwp_ctl_free(l);
if (!recycle && l->l_ts != &turnstile0)
pool_cache_put(turnstile_cache, l->l_ts);
diff -r fa75d0fa9c78 -r 584f4c01b3a8 sys/kern/kern_runq.c
--- a/sys/kern/kern_runq.c Sun Aug 07 20:44:32 2011 +0000
+++ b/sys/kern/kern_runq.c Sun Aug 07 21:13:05 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_runq.c,v 1.31 2011/08/07 13:33:01 rmind Exp $ */
+/* $NetBSD: kern_runq.c,v 1.32 2011/08/07 21:13:05 rmind Exp $ */
/*
* Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.31 2011/08/07 13:33:01 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.32 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -346,15 +346,15 @@
const struct schedstate_percpu *spc = &ci->ci_schedstate;
KASSERT(lwp_locked(__UNCONST(l), NULL));
- /* CPU is offline */
+ /* Is CPU offline? */
if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
return false;
- /* Affinity bind */
- if (__predict_false(l->l_flag & LW_AFFINITY))
+ /* Is affinity set? */
+ if (__predict_false(l->l_affinity))
return kcpuset_isset(l->l_affinity, cpu_index(ci));
- /* Processor-set */
+ /* Is there a processor-set? */
return (spc->spc_psid == l->l_psid);
}
diff -r fa75d0fa9c78 -r 584f4c01b3a8 sys/kern/subr_kcpuset.c
--- a/sys/kern/subr_kcpuset.c Sun Aug 07 20:44:32 2011 +0000
+++ b/sys/kern/subr_kcpuset.c Sun Aug 07 21:13:05 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_kcpuset.c,v 1.1 2011/08/07 13:33:01 rmind Exp $ */
+/* $NetBSD: subr_kcpuset.c,v 1.2 2011/08/07 21:13:05 rmind Exp $ */
/*-
* Copyright (c) 2011 The NetBSD Foundation, Inc.
@@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.1 2011/08/07 13:33:01 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.2 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h>
#include <sys/types.h>
@@ -216,17 +216,16 @@
void
kcpuset_destroy(kcpuset_t *kcp)
{
- kcpuset_impl_t *kc, *nkc;
+ kcpuset_impl_t *kc;
KASSERT(kc_initialised);
KASSERT(kcp != NULL);
- kc = KC_GETSTRUCT(kcp);
do {
- nkc = KC_GETSTRUCT(kc->kc_next);
+ kc = KC_GETSTRUCT(kcp);
+ kcp = kc->kc_next;
pool_cache_put(kc_cache, kc);
- kc = nkc;
- } while (kc);
+ } while (kcp);
}
/*
diff -r fa75d0fa9c78 -r 584f4c01b3a8 sys/kern/sys_pset.c
--- a/sys/kern/sys_pset.c Sun Aug 07 20:44:32 2011 +0000
+++ b/sys/kern/sys_pset.c Sun Aug 07 21:13:05 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: sys_pset.c,v 1.16 2011/08/07 13:33:01 rmind Exp $ */
+/* $NetBSD: sys_pset.c,v 1.17 2011/08/07 21:13:05 rmind Exp $ */
/*
* Copyright (c) 2008, Mindaugas Rasiukevicius <rmind at NetBSD org>
@@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sys_pset.c,v 1.16 2011/08/07 13:33:01 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sys_pset.c,v 1.17 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h>
@@ -366,10 +366,11 @@
* with this target CPU in it.
*/
LIST_FOREACH(t, &alllwp, l_list) {
- if ((t->l_flag & LW_AFFINITY) == 0)
+ if (t->l_affinity == NULL) {
continue;
+ }
lwp_lock(t);
- if ((t->l_flag & LW_AFFINITY) == 0) {
+ if (t->l_affinity == NULL) {
lwp_unlock(t);
continue;
}
diff -r fa75d0fa9c78 -r 584f4c01b3a8 sys/kern/sys_sched.c
--- a/sys/kern/sys_sched.c Sun Aug 07 20:44:32 2011 +0000
+++ b/sys/kern/sys_sched.c Sun Aug 07 21:13:05 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: sys_sched.c,v 1.36 2011/08/07 13:33:01 rmind Exp $ */
+/* $NetBSD: sys_sched.c,v 1.37 2011/08/07 21:13:05 rmind Exp $ */
/*
* Copyright (c) 2008, 2011 Mindaugas Rasiukevicius <rmind at NetBSD org>
@@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.36 2011/08/07 13:33:01 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.37 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h>
@@ -425,32 +425,33 @@
}
#endif
- /* Find the LWP(s) */
+ /* Iterate through LWP(s). */
lcnt = 0;
lid = SCARG(uap, lid);
LIST_FOREACH(t, &p->p_lwps, l_sibling) {
- if (lid && lid != t->l_lid)
+ if (lid && lid != t->l_lid) {
continue;
+ }
lwp_lock(t);
- /* It is not allowed to set the affinity for zombie LWPs */
+ /* No affinity for zombie LWPs. */
if (t->l_stat == LSZOMB) {
lwp_unlock(t);
continue;
}
+ /* First, release existing affinity, if any. */
+ if (t->l_affinity) {
+ kcpuset_unuse(t->l_affinity, &kcpulst);
+ }
if (kcset) {
- /* Set the affinity flag and new CPU set */
- t->l_flag |= LW_AFFINITY;
+ /*
+ * Hold a reference on affinity mask, assign mask to
+ * LWP and migrate it to another CPU (unlocks LWP).
+ */
kcpuset_use(kcset);
- if (t->l_affinity != NULL)
- kcpuset_unuse(t->l_affinity, &kcpulst);
t->l_affinity = kcset;
- /* Migrate to another CPU, unlocks LWP */
lwp_migrate(t, ci);
} else {
- /* Unset the affinity flag */
- t->l_flag &= ~LW_AFFINITY;
- if (t->l_affinity != NULL)
Home |
Main Index |
Thread Index |
Old Index