Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/external/cddl/osnet/dist/uts/common/fs/zfs Add workaround ab...
details: https://anonhg.NetBSD.org/src/rev/ac37fc4ab639
branches: trunk
changeset: 748603:ac37fc4ab639
user: haad <haad%NetBSD.org@localhost>
date: Wed Oct 28 23:44:51 2009 +0000
description:
Add workaround about zfs vnode reclaiming deadlock by checking if we don't
ehld ZFS_MUTEX_OBJ already. If we can lock OBJ_MUTEX deffer execution of
zfs_zinactive to taskq. Code was inspired by FreeBSD zfs_freebsd_reclaim.
XXX. This needs to be fixed after finding final solution for vnode lifecycle.
diffstat:
external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c | 69 ++++++++++++++---
external/cddl/osnet/dist/uts/common/fs/zfs/zfs_znode.c | 21 ++--
2 files changed, 65 insertions(+), 25 deletions(-)
diffs (174 lines):
diff -r dbbe6334b060 -r ac37fc4ab639 external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c
--- a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c Wed Oct 28 22:49:38 2009 +0000
+++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_vnops.c Wed Oct 28 23:44:51 2009 +0000
@@ -1950,12 +1950,6 @@
vnevent_rmdir(vp, dvp, name, ct);
/*
- * Grab a lock on the directory to make sure that noone is
- * trying to add (or lookup) entries while we are removing it.
- */
- rw_enter(&zp->z_name_lock, RW_WRITER);
-
- /*
* Grab a lock on the parent pointer to make sure we play well
* with the treewalk and directory rename code.
*/
@@ -4236,13 +4230,36 @@
return (0);
}
+/*
+ * Destroy znode from taskq thread without ZFS_OBJ_MUTEX held.
+ */
+static void
+zfs_reclaim_deferred(void *arg, int pending)
+{
+ znode_t *zp = arg;
+ zfsvfs_t *zfsvfs = zp->z_zfsvfs;
+ uint64_t z_id = zp->z_id;
+
+ /*
+ * Don't allow a zfs_zget() while were trying to release this znode
+ */
+ ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
+
+ /* Don't need to call ZFS_OBJ_HOLD_EXIT zfs_inactive did thatfor us. */
+ zfs_zinactive(zp);
+
+}
+
static int
zfs_netbsd_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs;
-
+ int locked;
+
+ locked = 0;
+
ASSERT(zp != NULL);
KASSERT(!vn_has_cached_data(vp));
@@ -4251,13 +4268,11 @@
mutex_enter(&zp->z_lock);
ASSERT(zp->z_phys);
- dprintf("destroying znode %p -- vnode %p -- zp->z_buf = %p\n", zp, ZTOV(zp), zp->z_dbuf);
- //cpu_Debugger();
-
- rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
+// dprintf("destroying znode %p -- vnode %p -- zp->z_buf = %p\n", zp, ZTOV(zp), zp->z_dbuf);
+// rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
genfs_node_destroy(vp);
cache_purge(vp);
-// ZTOV(zp) = NULL;
+
if (zp->z_dbuf == NULL) {
/*
* The fs has been unmounted, or we did a
@@ -4270,8 +4285,34 @@
}
mutex_exit(&zp->z_lock);
- zfs_zinactive(zp);
- rw_exit(&zfsvfs->z_teardown_inactive_lock);
+ mutex_enter(&zp->z_lock);
+ if (!zp->z_unlinked) {
+ /*
+ * XXX Hack because ZFS_OBJ_MUTEX is held we can't call zfs_zinactive
+ * now. I need to defer zfs_zinactive to another thread which doesn't hold this mutex.
+ */
+ locked = MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)) ? 2 :
+ ZFS_OBJ_HOLD_TRYENTER(zfsvfs, zp->z_id);
+ if (locked == 0) {
+ /*
+ * Lock can't be obtained due to deadlock possibility,
+ * so defer znode destruction.
+ */
+ taskq_dispatch(system_taskq, zfs_reclaim_deferred, zp, 0);
+ } else {
+ zfs_znode_dmu_fini(zp);
+ /* Our LWP is holding ZFS_OBJ_HELD mutex but it was locked before
+ zfs_zinactive was called therefore we can't release it. */
+ if (locked == 1)
+ ZFS_OBJ_HOLD_EXIT(zfsvfs, zp->z_id);
+ zfs_znode_free(zp);
+ }
+ } else
+ mutex_exit(&zp->z_lock);
+
+ ZTOV(zp) = NULL;
+ vp->v_data = NULL; /* v_data must be NULL for a cleaned vnode. */
+
return (0);
}
diff -r dbbe6334b060 -r ac37fc4ab639 external/cddl/osnet/dist/uts/common/fs/zfs/zfs_znode.c
--- a/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_znode.c Wed Oct 28 22:49:38 2009 +0000
+++ b/external/cddl/osnet/dist/uts/common/fs/zfs/zfs_znode.c Wed Oct 28 23:44:51 2009 +0000
@@ -675,7 +675,7 @@
}
dprintf("zfs_znode_alloc znode %p -- vnode %p\n", zp, vp);
- dprintf("zfs_znode_alloc Initializing genfs_node at %p\n", vp);
+ dprintf("zfs_znode_alloc z_id %ld\n", zp->z_id);
//cpu_Debugger();
uvm_vnp_setsize(vp, zp->z_phys->zp_size);
@@ -829,6 +829,9 @@
pzp->zp_mode = MAKEIMODE(vap->va_type, vap->va_mode);
if (!(flag & IS_ROOT_NODE)) {
+ dprintf("zfs_mknode parent vp %p - zp %p\n", ZTOV(dzp), dzp);
+ dprintf("Going to lock %p with %ld\n", ZFS_OBJ_MUTEX(zfsvfs, obj), obj);
+
ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
*zpp = zfs_znode_alloc(zfsvfs, db, 0);
@@ -952,7 +955,6 @@
err = ENOENT;
} else {
if ((vp = ZTOV(zp)) != NULL) {
-
mutex_enter(&vp->v_interlock);
mutex_exit(&zp->z_lock);
if (vget(vp, LK_INTERLOCK) != 0) {
@@ -1057,19 +1059,19 @@
zfs_znode_free(zp);
}
+/*
+ * zfs_zinactive must be called with ZFS_OBJ_HOLD_ENTER held. And this lock
+ * will be released in zfs_zinactive.
+ */
void
zfs_zinactive(znode_t *zp)
{
vnode_t *vp = ZTOV(zp);
- zfsvfs_t *zfsvfs = zp->z_zfsvfs;
- uint64_t z_id = zp->z_id;
ASSERT(zp->z_dbuf && zp->z_phys);
- /*
- * Don't allow a zfs_zget() while were trying to release this znode
- */
- ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id);
+ //printf("zfs_zinactive vp %p - zp %p\n", vp, zp);
+ //printf("Going to lock %p with %ld\n", ZFS_OBJ_MUTEX(zfsvfs, z_id), z_id);
mutex_enter(&zp->z_lock);
/*
@@ -1079,9 +1081,6 @@
if (zp->z_unlinked) {
mutex_exit(&zp->z_lock);
ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id);
-#ifndef __NetBSD__
- ASSERT(vp->v_count == 0);
-#endif
zfs_rmnode(zp);
return;
}
Home |
Main Index |
Thread Index |
Old Index