NetBSD-Bugs archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: kern/35351: fileassoc lacks locking
The following reply was made to PR kern/35351; it has been noted by GNATS.
From: David Holland <dholland-bugs%netbsd.org@localhost>
To: gnats-bugs%netbsd.org@localhost
Cc:
Subject: Re: kern/35351: fileassoc lacks locking
Date: Sun, 22 Jan 2012 18:26:54 +0000
Resend to the correct address.
------
From: Elad Efrat <elad%NetBSD.org@localhost>
To: netbsd-bugs%netbsd.org@localhost
Subject: Fwd: PR/35351: fileassoc lacks locking
Date: Sun, 22 Jan 2012 11:39:29 -0500
Resend to a different address.
---------- Forwarded message ----------
From: Elad Efrat <elad%netbsd.org@localhost>
Date: Wed, Jan 18, 2012 at 12:48 PM
Subject: PR/35351: fileassoc lacks locking
To: gnats%netbsd.org@localhost
Attached is a diff that adds locking to fileassoc(9). It fixes two
panics that I can easily trigger through Veriexec.
A version of it (earlier? can't remember) was discussed here:
? ?http://mail-index.netbsd.org/tech-kern/2009/12/26/msg006703.html
I don't find the "it's not perfect" line of reasoning convincing, so
my recommendation is to check it in. It's filed so it doesn't get
lost.
Elad
Index: kern/kern_fileassoc.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_fileassoc.c,v
retrieving revision 1.34
diff -u -p -r1.34 kern_fileassoc.c
--- kern/kern_fileassoc.c 25 Dec 2009 20:07:18 -0000 1.34
+++ kern/kern_fileassoc.c 18 Jan 2012 17:21:55 -0000
@@ -58,9 +58,14 @@ struct fileassoc {
const char *assoc_name; /* Name. */
fileassoc_cleanup_cb_t assoc_cleanup_cb; /* Clear callback. */
specificdata_key_t assoc_key;
+
+ kmutex_t assoc_mtx;
+ kcondvar_t assoc_cv;
+ uint32_t assoc_refcnt;
};
static LIST_HEAD(, fileassoc) fileassoc_list;
+static kmutex_t fileassoc_list_lock;
/* An entry in the per-mount hash table. */
struct fileassoc_file {
@@ -68,6 +73,10 @@ struct fileassoc_file {
specificdata_reference faf_data; /* Assoc data. */
u_int faf_nassocs; /* # of assocs. */
LIST_ENTRY(fileassoc_file) faf_list; /* List pointer. */
+
+ kmutex_t faf_mtx;
+ kcondvar_t faf_cv;
+ uint32_t faf_refcnt;
};
LIST_HEAD(fileassoc_hash_entry, fileassoc_file);
@@ -78,15 +87,76 @@ struct fileassoc_table {
size_t tbl_nslots; /* Number of slots. */
size_t tbl_nused; /* # of used slots. */
specificdata_reference tbl_data;
+
+ kmutex_t tbl_mtx;
+ kcondvar_t tbl_cv;
+ uint32_t tbl_refcnt;
};
/*
* Hashing function: Takes a number modulus the mask to give back an
* index into the hash table.
*/
-#define FILEASSOC_HASH(tbl, handle) \
+#define FILEASSOC_HASH(mask, handle) \
(hash32_buf((handle), FHANDLE_SIZE(handle), HASH32_BUF_INIT) \
- & ((tbl)->tbl_mask))
+ & ((mask)))
+
+static void
+assoc_use(struct fileassoc *assoc)
+{
+
+ mutex_enter(&assoc->assoc_mtx);
+ assoc->assoc_refcnt++;
+ mutex_exit(&assoc->assoc_mtx);
+}
+
+static void
+assoc_unuse(struct fileassoc *assoc)
+{
+
+ mutex_enter(&assoc->assoc_mtx);
+ if (--assoc->assoc_refcnt == 0)
+ cv_broadcast(&assoc->assoc_cv);
+ mutex_exit(&assoc->assoc_mtx);
+}
+
+static void
+file_use(struct fileassoc_file *faf)
+{
+
+ mutex_enter(&faf->faf_mtx);
+ faf->faf_refcnt++;
+ mutex_exit(&faf->faf_mtx);
+}
+
+static void
+file_unuse(struct fileassoc_file *faf)
+{
+
+ mutex_enter(&faf->faf_mtx);
+ if (--faf->faf_refcnt == 0)
+ cv_broadcast(&faf->faf_cv);
+ mutex_exit(&faf->faf_mtx);
+}
+
+static void
+table_use(struct fileassoc_table *tbl)
+{
+
+ mutex_enter(&tbl->tbl_mtx);
+ tbl->tbl_refcnt++;
+ mutex_exit(&tbl->tbl_mtx);
+}
+
+static void
+table_unuse(struct fileassoc_table *tbl)
+{
+
+ mutex_enter(&tbl->tbl_mtx);
+ if (--tbl->tbl_refcnt == 0)
+ cv_broadcast(&tbl->tbl_cv);
+ mutex_exit(&tbl->tbl_mtx);
+}
static void *
file_getdata(struct fileassoc_file *faf, const struct fileassoc *assoc)
@@ -124,34 +194,70 @@ file_free(struct fileassoc_file *faf)
{
struct fileassoc *assoc;
- LIST_REMOVE(faf, faf_list);
+ KASSERT(faf->faf_refcnt == 0);
+ mutex_enter(&fileassoc_list_lock);
LIST_FOREACH(assoc, &fileassoc_list, assoc_list) {
+ assoc_use(assoc);
file_cleanup(faf, assoc);
+ assoc_unuse(assoc);
}
+ mutex_exit(&fileassoc_list_lock);
+
+ mutex_destroy(&faf->faf_mtx);
+ cv_destroy(&faf->faf_cv);
vfs_composefh_free(faf->faf_handle);
specificdata_fini(fileassoc_domain, &faf->faf_data);
+
kmem_free(faf, sizeof(*faf));
}
+/*
+ * Free a table.
+ *
+ * Expects the table to be detached and unattainable by anyone.
+ *
+ * Can be called in two cases:
+ * - fileassoc_table_delete(), called manually to delete a table,
+ * - specificdata(9), when a mount-point disappears.
+ */
static void
table_dtor(void *v)
{
struct fileassoc_table *tbl = v;
u_long i;
+ /* Wait for everyone to finish. */
+ mutex_enter(&tbl->tbl_mtx);
+ while (tbl->tbl_refcnt > 0)
+ cv_wait(&tbl->tbl_cv, &tbl->tbl_mtx);
+ mutex_exit(&tbl->tbl_mtx);
+
/* Remove all entries from the table and lists */
for (i = 0; i < tbl->tbl_nslots; i++) {
struct fileassoc_file *faf;
while ((faf = LIST_FIRST(&tbl->tbl_hash[i])) != NULL) {
+ /* Get exclusivity on this file. */
+ mutex_enter(&faf->faf_mtx);
+ while (faf->faf_refcnt > 0)
+ cv_wait(&faf->faf_cv, &faf->faf_mtx);
+
+ /* Remove it... */
+ LIST_REMOVE(faf, faf_list);
+
+ mutex_exit(&faf->faf_mtx);
+
file_free(faf);
}
}
- /* Remove hash table and sysctl node */
hashdone(tbl->tbl_hash, HASH_LIST, tbl->tbl_mask);
specificdata_fini(fileassoc_domain, &tbl->tbl_data);
+
+ mutex_destroy(&tbl->tbl_mtx);
+ cv_destroy(&tbl->tbl_cv);
+
kmem_free(tbl, sizeof(*tbl));
}
@@ -170,6 +276,8 @@ fileassoc_init(void)
}
fileassoc_domain = specificdata_domain_create();
+ mutex_init(&fileassoc_list_lock, MUTEX_DEFAULT, IPL_NONE);
+
return 0;
}
@@ -196,8 +304,13 @@ fileassoc_register(const char *name, fil
assoc->assoc_name = name;
assoc->assoc_cleanup_cb = cleanup_cb;
assoc->assoc_key = key;
+ mutex_init(&assoc->assoc_mtx, MUTEX_DEFAULT, IPL_NONE);
+ cv_init(&assoc->assoc_cv, "fa_assoc");
+ assoc->assoc_refcnt = 0;
+ mutex_enter(&fileassoc_list_lock);
LIST_INSERT_HEAD(&fileassoc_list, assoc, assoc_list);
+ mutex_exit(&fileassoc_list_lock);
*result = assoc;
@@ -211,8 +324,22 @@ int
fileassoc_deregister(fileassoc_t assoc)
{
+ mutex_enter(&fileassoc_list_lock);
+ mutex_enter(&assoc->assoc_mtx);
LIST_REMOVE(assoc, assoc_list);
+ mutex_exit(&fileassoc_list_lock);
+
+ /* Wait for everyone to finish playing with it... */
+ while (assoc->assoc_refcnt != 0)
+ cv_wait(&assoc->assoc_cv, &assoc->assoc_mtx);
+
specificdata_key_delete(fileassoc_domain, assoc->assoc_key);
+
+ mutex_exit(&assoc->assoc_mtx);
+
+ mutex_destroy(&assoc->assoc_mtx);
+ cv_destroy(&assoc->assoc_cv);
+
kmem_free(assoc, sizeof(*assoc));
return 0;
@@ -220,6 +347,7 @@ fileassoc_deregister(fileassoc_t assoc)
/*
* Get the hash table for the specified device.
+ *
*/
static struct fileassoc_table *
fileassoc_table_lookup(struct mount *mp)
@@ -230,6 +358,7 @@ fileassoc_table_lookup(struct mount *mp)
if (error) {
return NULL;
}
+
return mount_getspecific(mp, fileassoc_mountspecific_key);
}
@@ -253,26 +382,39 @@ fileassoc_file_lookup(struct vnode *vp,
return NULL;
}
+ table_use(tbl);
+
if (hint == NULL) {
error = vfs_composefh_alloc(vp, &th);
- if (error)
+ if (error) {
+ table_unuse(tbl);
return (NULL);
+ }
} else {
th = hint;
}
- indx = FILEASSOC_HASH(tbl, th);
+ indx = FILEASSOC_HASH(tbl->tbl_mask, th);
hash_entry = &(tbl->tbl_hash[indx]);
LIST_FOREACH(faf, hash_entry, faf_list) {
+ file_use(faf);
+
if (((FHANDLE_FILEID(faf->faf_handle)->fid_len ==
FHANDLE_FILEID(th)->fid_len)) &&
(memcmp(FHANDLE_FILEID(faf->faf_handle), FHANDLE_FILEID(th),
(FHANDLE_FILEID(th))->fid_len) == 0)) {
break;
}
+
+ file_unuse(faf);
}
+ if (faf != NULL)
+ file_unuse(faf);
+
+ table_unuse(tbl);
+
if (hint == NULL)
vfs_composefh_free(th);
@@ -286,35 +428,60 @@ void *
fileassoc_lookup(struct vnode *vp, fileassoc_t assoc)
{
struct fileassoc_file *faf;
+ void *data;
faf = fileassoc_file_lookup(vp, NULL);
if (faf == NULL)
return (NULL);
- return file_getdata(faf, assoc);
+ /* Prevent this file entry and assoc from being taken away. */
+ /* file_use(faf);
+ assoc_use(assoc); */
+
+ data = file_getdata(faf, assoc);
+
+ return data;
}
-static struct fileassoc_table *
+void
+fileassoc_release(struct vnode *vp, fileassoc_t assoc)
+{
+ struct fileassoc_file *faf;
+
+ faf = fileassoc_file_lookup(vp, NULL);
+ /*if (faf != NULL)
+ file_unuse(faf);
+
+ assoc_unuse(assoc);*/
+}
+
+/*
+ * Resize a fileassoc table.
+ *
+ * Expects tbl to be held exclusively by the caller.
+ */
+static int
fileassoc_table_resize(struct fileassoc_table *tbl)
{
- struct fileassoc_table *newtbl;
- u_long i;
+ struct fileassoc_hash_entry *new_hash;
+ size_t new_nslots;
+ u_long new_mask, i;
+
+ KASSERT(mutex_owned(&tbl->tbl_mtx));
+ KASSERT(tbl->tbl_refcnt == 1);
/*
- * Allocate a new table. Like the condition in fileassoc_file_add(),
+ * Allocate a new has table. Like the condition in fileassoc_file_add(),
* this is also temporary -- just double the number of slots.
*/
- newtbl = kmem_zalloc(sizeof(*newtbl), KM_SLEEP);
- newtbl->tbl_nslots = (tbl->tbl_nslots * 2);
- if (newtbl->tbl_nslots < tbl->tbl_nslots)
- newtbl->tbl_nslots = tbl->tbl_nslots;
- newtbl->tbl_hash = hashinit(newtbl->tbl_nslots, HASH_LIST,
- true, &newtbl->tbl_mask);
- newtbl->tbl_nused = 0;
- specificdata_init(fileassoc_domain, &newtbl->tbl_data);
-
- /* XXX we need to make sure nothing uses fileassoc here! */
+ new_nslots = (tbl->tbl_nslots * 2);
+ if (new_nslots < tbl->tbl_nslots)
+ new_nslots = tbl->tbl_nslots;
+ new_hash = hashinit(new_nslots, HASH_LIST, true, &new_mask);
+ /*
+ * Attach all entries to the new hash table.
+ */
for (i = 0; i < tbl->tbl_nslots; i++) {
struct fileassoc_file *faf;
@@ -322,31 +489,35 @@ fileassoc_table_resize(struct fileassoc_
struct fileassoc_hash_entry *hash_entry;
size_t indx;
+ /* Wait for others to finish with each file. */
+ mutex_enter(&faf->faf_mtx);
+ while (faf->faf_refcnt > 0)
+ cv_wait(&faf->faf_cv, &faf->faf_mtx);
+
LIST_REMOVE(faf, faf_list);
- indx = FILEASSOC_HASH(newtbl, faf->faf_handle);
- hash_entry = &(newtbl->tbl_hash[indx]);
+ indx = FILEASSOC_HASH(new_mask, faf->faf_handle);
+ hash_entry = &(new_hash[indx]);
LIST_INSERT_HEAD(hash_entry, faf, faf_list);
- newtbl->tbl_nused++;
+ mutex_exit(&faf->faf_mtx);
}
}
- if (tbl->tbl_nused != newtbl->tbl_nused)
- panic("fileassoc_table_resize: inconsistency detected! "
- "needed %zu entries, got %zu", tbl->tbl_nused,
- newtbl->tbl_nused);
-
+ /* Free the old hash table, and plug in the new one. */
hashdone(tbl->tbl_hash, HASH_LIST, tbl->tbl_mask);
- specificdata_fini(fileassoc_domain, &tbl->tbl_data);
- kmem_free(tbl, sizeof(*tbl));
+ tbl->tbl_hash = new_hash;
+ tbl->tbl_nslots = new_nslots;
+ tbl->tbl_mask = new_mask;
- return (newtbl);
+ return (0);
}
/*
* Create a new fileassoc table.
+ *
+ * The new table is returned with reference count set to 1.
*/
static struct fileassoc_table *
fileassoc_table_add(struct mount *mp)
@@ -365,6 +536,9 @@ fileassoc_table_add(struct mount *mp)
&tbl->tbl_mask);
tbl->tbl_nused = 0;
specificdata_init(fileassoc_domain, &tbl->tbl_data);
+ tbl->tbl_refcnt = 1;
+ mutex_init(&tbl->tbl_mtx, MUTEX_DEFAULT, IPL_NONE);
+ cv_init(&tbl->tbl_cv, "fa_tbl");
mount_setspecific(mp, fileassoc_mountspecific_key, tbl);
@@ -383,7 +557,10 @@ fileassoc_table_delete(struct mount *mp)
if (tbl == NULL)
return (EEXIST);
+ /* Detach it from the mount-point. */
mount_setspecific(mp, fileassoc_mountspecific_key, NULL);
+
+ /* Free it. */
table_dtor(tbl);
return (0);
@@ -403,18 +580,30 @@ fileassoc_table_run(struct mount *mp, fi
if (tbl == NULL)
return (EEXIST);
+ table_use(tbl);
+
+ assoc_use(assoc);
+
for (i = 0; i < tbl->tbl_nslots; i++) {
struct fileassoc_file *faf;
LIST_FOREACH(faf, &tbl->tbl_hash[i], faf_list) {
void *data;
+ file_use(faf);
+
data = file_getdata(faf, assoc);
if (data != NULL)
cb(data, cookie);
+
+ file_unuse(faf);
}
}
+ assoc_unuse(assoc);
+
+ table_unuse(tbl);
+
return (0);
}
@@ -431,20 +620,36 @@ fileassoc_table_clear(struct mount *mp,
if (tbl == NULL)
return (EEXIST);
+ table_use(tbl);
+
+ assoc_use(assoc);
+
for (i = 0; i < tbl->tbl_nslots; i++) {
struct fileassoc_file *faf;
LIST_FOREACH(faf, &tbl->tbl_hash[i], faf_list) {
+ mutex_enter(&faf->faf_mtx);
+ while (faf->faf_refcnt > 0)
+ cv_wait(&faf->faf_cv, &faf->faf_mtx);
+
file_cleanup(faf, assoc);
file_setdata(faf, assoc, NULL);
+
+ mutex_exit(&faf->faf_mtx);
}
}
+ assoc_unuse(assoc);
+
+ table_unuse(tbl);
+
return (0);
}
/*
* Add a file entry to a table.
+ *
+ * XXX: Set reference count to 1 here too?
*/
static struct fileassoc_file *
fileassoc_file_add(struct vnode *vp, fhandle_t *hint)
@@ -473,16 +678,24 @@ fileassoc_file_add(struct vnode *vp, fha
tbl = fileassoc_table_lookup(vp->v_mount);
if (tbl == NULL) {
+ /* This will "keep" a reference for us. */
tbl = fileassoc_table_add(vp->v_mount);
+ } else {
+ /* Keep a reference to the table. */
+ table_use(tbl);
}
- indx = FILEASSOC_HASH(tbl, th);
- hash_entry = &(tbl->tbl_hash[indx]);
-
faf = kmem_zalloc(sizeof(*faf), KM_SLEEP);
faf->faf_handle = th;
specificdata_init(fileassoc_domain, &faf->faf_data);
- LIST_INSERT_HEAD(hash_entry, faf, faf_list);
+ faf->faf_refcnt = 0;
+ mutex_init(&faf->faf_mtx, MUTEX_DEFAULT, IPL_NONE);
+ cv_init(&faf->faf_cv, "fa_faf");
+
+ /* We need the table exclusively. */
+ mutex_enter(&tbl->tbl_mtx);
+ while (tbl->tbl_refcnt > 1)
+ cv_wait(&tbl->tbl_cv, &tbl->tbl_mtx);
/*
* This decides when we need to resize the table. For now,
@@ -490,14 +703,19 @@ fileassoc_file_add(struct vnode *vp, fha
* has. That's not really true unless of course we had zero
* collisions. Think positive! :)
*/
- if (++(tbl->tbl_nused) == tbl->tbl_nslots) {
- struct fileassoc_table *newtbl;
-
- newtbl = fileassoc_table_resize(tbl);
- mount_setspecific(vp->v_mount, fileassoc_mountspecific_key,
- newtbl);
+ if (((tbl->tbl_nused) + 1) == tbl->tbl_nslots) {
+ fileassoc_table_resize(tbl);
}
+ indx = FILEASSOC_HASH(tbl->tbl_mask, th);
+ hash_entry = &(tbl->tbl_hash[indx]);
+ LIST_INSERT_HEAD(hash_entry, faf, faf_list);
+ ++(tbl->tbl_nused);
+
+ mutex_exit(&tbl->tbl_mtx);
+
+ table_unuse(tbl);
+
return (faf);
}
@@ -518,10 +736,23 @@ fileassoc_file_delete(struct vnode *vp)
return (ENOENT);
}
+ /* Wait for everyone to finish... */
+ mutex_enter(&faf->faf_mtx);
+ while (faf->faf_refcnt > 0)
+ cv_wait(&faf->faf_cv, &faf->faf_mtx);
+
+ /* Remove it... */
+ LIST_REMOVE(faf, faf_list);
+
+ mutex_exit(&faf->faf_mtx);
+
file_free(faf);
tbl = fileassoc_table_lookup(vp->v_mount);
+
+ mutex_enter(&tbl->tbl_mtx);
--(tbl->tbl_nused); /* XXX gc? */
+ mutex_exit(&tbl->tbl_mtx);
KERNEL_UNLOCK_ONE(NULL);
@@ -544,14 +775,25 @@ fileassoc_add(struct vnode *vp, fileasso
return (ENOTDIR);
}
+ mutex_enter(&faf->faf_mtx);
+
+ assoc_use(assoc);
+
olddata = file_getdata(faf, assoc);
- if (olddata != NULL)
+ if (olddata != NULL) {
+ assoc_unuse(assoc);
+ mutex_exit(&faf->faf_mtx);
return (EEXIST);
+ }
file_setdata(faf, assoc, data);
faf->faf_nassocs++;
+ assoc_unuse(assoc);
+
+ mutex_exit(&faf->faf_mtx);
+
return (0);
}
@@ -567,10 +809,20 @@ fileassoc_clear(struct vnode *vp, fileas
if (faf == NULL)
return (ENOENT);
+ file_use(faf);
+
+ assoc_use(assoc);
+
file_cleanup(faf, assoc);
file_setdata(faf, assoc, NULL);
+ assoc_unuse(assoc);
+
+ mutex_enter(&faf->faf_mtx);
--(faf->faf_nassocs); /* XXX gc? */
+ mutex_exit(&faf->faf_mtx);
+
+ file_unuse(faf);
return (0);
}
Index: sys/fileassoc.h
===================================================================
RCS file: /cvsroot/src/sys/sys/fileassoc.h,v
retrieving revision 1.11
diff -u -p -r1.11 fileassoc.h
--- sys/fileassoc.h 15 May 2007 19:47:44 -0000 1.11
+++ sys/fileassoc.h 18 Jan 2012 17:21:55 -0000
@@ -45,6 +45,7 @@ int fileassoc_table_clear(struct mount *
int fileassoc_file_delete(struct vnode *);
int fileassoc_add(struct vnode *, fileassoc_t, void *);
int fileassoc_clear(struct vnode *, fileassoc_t);
+void fileassoc_release(struct vnode *, fileassoc_t);
int fileassoc_table_run(struct mount *, fileassoc_t, fileassoc_cb_t, void *);
#endif /* !_SYS_FILEASSOC_H_ */
Home |
Main Index |
Thread Index |
Old Index