Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Put back the vnode changes I backed out yesterday; they ...
details: https://anonhg.NetBSD.org/src/rev/2432d8ff471a
branches: trunk
changeset: 325045:2432d8ff471a
user: christos <christos%NetBSD.org@localhost>
date: Sun Dec 01 17:29:40 2013 +0000
description:
Put back the vnode changes I backed out yesterday; they were not the problem.
I've tested them with 2 -j 20 builds on an 8 cpu box. It crashed reliably
with the pcu changes present before.
diffstat:
sys/kern/vfs_vnode.c | 119 ++++++++++++++++++++++++++------------------------
sys/sys/vnode.h | 8 +--
2 files changed, 65 insertions(+), 62 deletions(-)
diffs (truncated from 336 to 300 lines):
diff -r 1e57c3936acf -r 2432d8ff471a sys/kern/vfs_vnode.c
--- a/sys/kern/vfs_vnode.c Sun Dec 01 09:29:37 2013 +0000
+++ b/sys/kern/vfs_vnode.c Sun Dec 01 17:29:40 2013 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vfs_vnode.c,v 1.28 2013/12/01 00:59:34 christos Exp $ */
+/* $NetBSD: vfs_vnode.c,v 1.29 2013/12/01 17:29:40 christos Exp $ */
/*-
* Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
@@ -116,7 +116,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.28 2013/12/01 00:59:34 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.29 2013/12/01 17:29:40 christos Exp $");
#define _VFS_VNODE_PRIVATE
@@ -145,6 +145,7 @@
/* Flags to vrelel. */
#define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
+#define VRELEL_CHANGING_SET 0x0002 /* VI_CHANGING set by caller. */
u_int numvnodes __cacheline_aligned;
@@ -323,8 +324,10 @@
* before doing this.
*/
vp->v_usecount = 1;
+ KASSERT((vp->v_iflag & VI_CHANGING) == 0);
+ vp->v_iflag |= VI_CHANGING;
vclean(vp);
- vrelel(vp, 0);
+ vrelel(vp, VRELEL_CHANGING_SET);
fstrans_done(mp);
return 0;
@@ -476,10 +479,10 @@
*
* => Should be called with v_interlock held.
*
- * If VI_XLOCK is set, the vnode is being eliminated in vgone()/vclean().
+ * If VI_CHANGING is set, the vnode may be eliminated in vgone()/vclean().
* In that case, we cannot grab the vnode, so the process is awakened when
* the transition is completed, and an error returned to indicate that the
- * vnode is no longer usable (e.g. changed to a new file system type).
+ * vnode is no longer usable.
*/
int
vget(vnode_t *vp, int flags)
@@ -502,31 +505,16 @@
}
/*
- * If the vnode is in the process of being cleaned out for
- * another use, we wait for the cleaning to finish and then
- * return failure. Cleaning is determined by checking if
- * the VI_XLOCK flag is set.
+ * If the vnode is in the process of changing state we wait
+ * for the change to complete and take care not to return
+ * a clean vnode.
*/
- if ((vp->v_iflag & VI_XLOCK) != 0) {
+ if ((vp->v_iflag & VI_CHANGING) != 0) {
if ((flags & LK_NOWAIT) != 0) {
vrelel(vp, 0);
return EBUSY;
}
- vwait(vp, VI_XLOCK);
- vrelel(vp, 0);
- return ENOENT;
- }
-
- if ((vp->v_iflag & VI_INACTNOW) != 0) {
- /*
- * if it's being desactived, wait for it to complete.
- * Make sure to not return a clean vnode.
- */
- if ((flags & LK_NOWAIT) != 0) {
- vrelel(vp, 0);
- return EBUSY;
- }
- vwait(vp, VI_INACTNOW);
+ vwait(vp, VI_CHANGING);
if ((vp->v_iflag & VI_CLEAN) != 0) {
vrelel(vp, 0);
return ENOENT;
@@ -605,7 +593,11 @@
* and unlock.
*/
if (vtryrele(vp)) {
- vp->v_iflag |= VI_INACTREDO;
+ if ((flags & VRELEL_CHANGING_SET) != 0) {
+ KASSERT((vp->v_iflag & VI_CHANGING) != 0);
+ vp->v_iflag &= ~VI_CHANGING;
+ cv_broadcast(&vp->v_cv);
+ }
mutex_exit(vp->v_interlock);
return;
}
@@ -626,10 +618,8 @@
* If not clean, deactivate the vnode, but preserve
* our reference across the call to VOP_INACTIVE().
*/
-retry:
if ((vp->v_iflag & VI_CLEAN) == 0) {
recycle = false;
- vp->v_iflag |= VI_INACTNOW;
/*
* XXX This ugly block can be largely eliminated if
@@ -644,11 +634,8 @@
defer = true;
} else if (curlwp == vrele_lwp) {
/*
- * We have to try harder. But we can't sleep
- * with VI_INACTNOW as vget() may be waiting on it.
+ * We have to try harder.
*/
- vp->v_iflag &= ~(VI_INACTREDO|VI_INACTNOW);
- cv_broadcast(&vp->v_cv);
mutex_exit(vp->v_interlock);
error = vn_lock(vp, LK_EXCLUSIVE);
if (error != 0) {
@@ -663,11 +650,14 @@
*/
if (__predict_false(vtryrele(vp))) {
VOP_UNLOCK(vp);
+ if ((flags & VRELEL_CHANGING_SET) != 0) {
+ KASSERT((vp->v_iflag & VI_CHANGING) != 0);
+ vp->v_iflag &= ~VI_CHANGING;
+ cv_broadcast(&vp->v_cv);
+ }
mutex_exit(vp->v_interlock);
return;
}
- vp->v_iflag |= VI_INACTNOW;
- mutex_exit(vp->v_interlock);
defer = false;
} else if ((vp->v_iflag & VI_LAYER) != 0) {
/*
@@ -678,15 +668,14 @@
defer = true;
} else {
/* If we can't acquire the lock, then defer. */
- vp->v_iflag &= ~VI_INACTREDO;
mutex_exit(vp->v_interlock);
error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
defer = true;
- mutex_enter(vp->v_interlock);
} else {
defer = false;
}
+ mutex_enter(vp->v_interlock);
}
if (defer) {
@@ -695,17 +684,26 @@
* clean it here. We donate it our last reference.
*/
KASSERT(mutex_owned(vp->v_interlock));
- vp->v_iflag &= ~VI_INACTNOW;
+ if ((flags & VRELEL_CHANGING_SET) != 0) {
+ KASSERT((vp->v_iflag & VI_CHANGING) != 0);
+ vp->v_iflag &= ~VI_CHANGING;
+ cv_broadcast(&vp->v_cv);
+ }
mutex_enter(&vrele_lock);
TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
if (++vrele_pending > (desiredvnodes >> 8))
cv_signal(&vrele_cv);
mutex_exit(&vrele_lock);
- cv_broadcast(&vp->v_cv);
mutex_exit(vp->v_interlock);
return;
}
+ if ((flags & VRELEL_CHANGING_SET) == 0) {
+ KASSERT((vp->v_iflag & VI_CHANGING) == 0);
+ vp->v_iflag |= VI_CHANGING;
+ }
+ mutex_exit(vp->v_interlock);
+
/*
* The vnode can gain another reference while being
* deactivated. If VOP_INACTIVE() indicates that
@@ -718,21 +716,14 @@
*/
VOP_INACTIVE(vp, &recycle);
mutex_enter(vp->v_interlock);
- vp->v_iflag &= ~VI_INACTNOW;
- cv_broadcast(&vp->v_cv);
if (!recycle) {
if (vtryrele(vp)) {
+ KASSERT((vp->v_iflag & VI_CHANGING) != 0);
+ vp->v_iflag &= ~VI_CHANGING;
+ cv_broadcast(&vp->v_cv);
mutex_exit(vp->v_interlock);
return;
}
-
- /*
- * If we grew another reference while
- * VOP_INACTIVE() was underway, retry.
- */
- if ((vp->v_iflag & VI_INACTREDO) != 0) {
- goto retry;
- }
}
/* Take care of space accounting. */
@@ -753,10 +744,18 @@
vclean(vp);
}
KASSERT(vp->v_usecount > 0);
+ } else { /* vnode was already clean */
+ if ((flags & VRELEL_CHANGING_SET) == 0) {
+ KASSERT((vp->v_iflag & VI_CHANGING) == 0);
+ vp->v_iflag |= VI_CHANGING;
+ }
}
if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
/* Gained another reference while being reclaimed. */
+ KASSERT((vp->v_iflag & VI_CHANGING) != 0);
+ vp->v_iflag &= ~VI_CHANGING;
+ cv_broadcast(&vp->v_cv);
mutex_exit(vp->v_interlock);
return;
}
@@ -788,6 +787,9 @@
}
TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
mutex_exit(&vnode_free_list_lock);
+ KASSERT((vp->v_iflag & VI_CHANGING) != 0);
+ vp->v_iflag &= ~VI_CHANGING;
+ cv_broadcast(&vp->v_cv);
mutex_exit(vp->v_interlock);
}
}
@@ -798,7 +800,7 @@
KASSERT((vp->v_iflag & VI_MARKER) == 0);
- if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
+ if (vtryrele(vp)) {
return;
}
mutex_enter(vp->v_interlock);
@@ -814,7 +816,7 @@
KASSERT((vp->v_iflag & VI_MARKER) == 0);
- if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
+ if (vtryrele(vp)) {
return;
}
mutex_enter(vp->v_interlock);
@@ -1058,8 +1060,10 @@
}
vremfree(vp);
vp->v_usecount = 1;
+ KASSERT((vp->v_iflag & VI_CHANGING) == 0);
+ vp->v_iflag |= VI_CHANGING;
vclean(vp);
- vrelel(vp, 0);
+ vrelel(vp, VRELEL_CHANGING_SET);
return 1;
}
@@ -1082,8 +1086,8 @@
return;
} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
atomic_inc_uint(&vp->v_usecount);
- vclean(vp);
- vrelel(vp, 0);
+ mutex_exit(vp->v_interlock);
+ vgone(vp);
return;
} else {
dev = vp->v_rdev;
@@ -1092,9 +1096,7 @@
}
while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
- mutex_enter(vq->v_interlock);
- vclean(vq);
- vrelel(vq, 0);
+ vgone(vq);
}
}
@@ -1107,8 +1109,11 @@
{
mutex_enter(vp->v_interlock);
+ if ((vp->v_iflag & VI_CHANGING) != 0)
+ vwait(vp, VI_CHANGING);
Home |
Main Index |
Thread Index |
Old Index