Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/rmind-uvmplock]: src/sys Reorganise UVM locking to protect P->V state an...
details: https://anonhg.NetBSD.org/src/rev/d540665d9a3b
branches: rmind-uvmplock
changeset: 753043:d540665d9a3b
user: rmind <rmind%NetBSD.org@localhost>
date: Wed Mar 17 06:03:16 2010 +0000
description:
Reorganise UVM locking to protect P->V state and serialise pmap(9)
operations on the same page(s) by always locking their owner. Hence
lock order: "vmpage"-lock -> pmap-lock.
Patch, proposed on tech-kern@, from Andrew Doran.
diffstat:
sys/miscfs/genfs/genfs_io.c | 8 +-
sys/uvm/uvm_amap.c | 129 +++++++++++++++---------
sys/uvm/uvm_amap.h | 15 +-
sys/uvm/uvm_anon.c | 63 ++++++-----
sys/uvm/uvm_anon.h | 23 ++-
sys/uvm/uvm_bio.c | 25 ++--
sys/uvm/uvm_device.c | 12 +-
sys/uvm/uvm_fault.c | 220 +++++++++++++++++++++---------------------
sys/uvm/uvm_fault_i.h | 8 +-
sys/uvm/uvm_km.c | 15 ++-
sys/uvm/uvm_loan.c | 68 ++++++------
sys/uvm/uvm_map.c | 59 ++++++++--
sys/uvm/uvm_map.h | 5 +-
sys/uvm/uvm_page.c | 32 +++++-
sys/uvm/uvm_page.h | 3 +-
sys/uvm/uvm_pager.c | 6 +-
sys/uvm/uvm_pdaemon.c | 8 +-
sys/uvm/uvm_pdpolicy_clock.c | 31 ++++-
sys/uvm/uvm_vnode.c | 6 +-
19 files changed, 430 insertions(+), 306 deletions(-)
diffs (truncated from 2435 to 300 lines):
diff -r 429f05fb0ad6 -r d540665d9a3b sys/miscfs/genfs/genfs_io.c
--- a/sys/miscfs/genfs/genfs_io.c Tue Mar 16 15:38:02 2010 +0000
+++ b/sys/miscfs/genfs/genfs_io.c Wed Mar 17 06:03:16 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: genfs_io.c,v 1.36.4.1 2010/03/16 15:38:11 rmind Exp $ */
+/* $NetBSD: genfs_io.c,v 1.36.4.2 2010/03/17 06:03:16 rmind Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.4.1 2010/03/16 15:38:11 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.4.2 2010/03/17 06:03:16 rmind Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -1757,11 +1757,13 @@
kva = uvm_km_alloc(kernel_map, klen, 0,
UVM_KMF_VAONLY | UVM_KMF_WAITVA);
puva = trunc_page(uva);
+ mutex_enter(vp->v_interlock);
for (poff = 0; poff < klen; poff += PAGE_SIZE) {
rv = pmap_extract(upm, puva + poff, &pa);
KASSERT(rv);
pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
}
+ mutex_exit(vp->v_interlock);
pmap_update(kpm);
/*
@@ -1776,7 +1778,9 @@
* Tear down the kernel mapping.
*/
+ mutex_enter(vp->v_interlock);
pmap_remove(kpm, kva, kva + klen);
+ mutex_exit(vp->v_interlock);
pmap_update(kpm);
uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
diff -r 429f05fb0ad6 -r d540665d9a3b sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c Tue Mar 16 15:38:02 2010 +0000
+++ b/sys/uvm/uvm_amap.c Wed Mar 17 06:03:16 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.c,v 1.88 2009/10/21 21:12:07 rmind Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.88.4.1 2010/03/17 06:03:16 rmind Exp $ */
/*
*
@@ -42,7 +42,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.88 2009/10/21 21:12:07 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.88.4.1 2010/03/17 06:03:16 rmind Exp $");
#include "opt_uvmhist.h"
@@ -171,7 +171,7 @@
* amap_alloc1: internal function that allocates an amap, but does not
* init the overlay.
*
- * => lock on returned amap is init'd
+ * => lock is not initialized
*/
static inline struct vm_amap *
amap_alloc1(int slots, int padslots, int waitf)
@@ -187,7 +187,7 @@
kmflags = ((waitf & UVM_FLAG_NOWAIT) != 0) ? KM_NOSLEEP : KM_SLEEP;
totalslots = amap_roundup_slots(slots + padslots);
- mutex_init(&amap->am_l, MUTEX_DEFAULT, IPL_NONE);
+ amap->am_lock = NULL;
amap->am_ref = 1;
amap->am_flags = 0;
#ifdef UVM_AMAP_PPREF
@@ -217,7 +217,6 @@
fail2:
kmem_free(amap->am_slots, totalslots * sizeof(int));
fail1:
- mutex_destroy(&amap->am_l);
pool_cache_put(&uvm_amap_cache, amap);
/*
@@ -255,6 +254,7 @@
if (amap) {
memset(amap->am_anon, 0,
amap->am_maxslot * sizeof(struct vm_anon *));
+ amap->am_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
amap_list_insert(amap);
}
@@ -290,7 +290,10 @@
KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
- KASSERT(!mutex_owned(&amap->am_l));
+ if (amap->am_lock != NULL) {
+ KASSERT(!mutex_owned(amap->am_lock));
+ mutex_obj_free(amap->am_lock);
+ }
slots = amap->am_maxslot;
kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots));
kmem_free(amap->am_bckptr, slots * sizeof(*amap->am_bckptr));
@@ -299,7 +302,6 @@
if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
kmem_free(amap->am_ppref, slots * sizeof(*amap->am_ppref));
#endif
- mutex_destroy(&amap->am_l);
pool_cache_put(&uvm_amap_cache, amap);
UVMHIST_LOG(maphist,"<- done, freed amap = 0x%x", amap, 0, 0, 0);
}
@@ -325,7 +327,7 @@
int *newppref, *oldppref;
#endif
int i, *newsl, *newbck, *oldsl, *oldbck;
- struct vm_anon **newover, **oldover;
+ struct vm_anon **newover, **oldover, *tofree;
const km_flag_t kmflags =
(flags & AMAP_EXTEND_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
@@ -354,6 +356,7 @@
slotadj = slotadd - slotoff;
slotspace = amap->am_maxslot - slotmapped;
}
+ tofree = NULL;
/*
* case 1: we already have enough slots in the map and thus
@@ -366,8 +369,9 @@
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
amap_pp_adjref(amap, slotoff + slotmapped,
- slotadd, 1);
+ slotadd, 1, &tofree);
}
+ uvm_anfree(tofree);
#endif
amap_unlock(amap);
UVMHIST_LOG(maphist,
@@ -381,8 +385,10 @@
entry->aref.ar_pageoff = slotoff;
#ifdef UVM_AMAP_PPREF
if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
- amap_pp_adjref(amap, slotoff, slotadd, 1);
+ amap_pp_adjref(amap, slotoff, slotadd, 1,
+ &tofree);
}
+ uvm_anfree(tofree);
#endif
amap_unlock(amap);
UVMHIST_LOG(maphist,
@@ -405,12 +411,14 @@
amap_pp_adjref(amap,
slotoff + slotmapped,
(amap->am_nslot -
- (slotoff + slotmapped)), 1);
+ (slotoff + slotmapped)), 1,
+ &tofree);
pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
}
#endif
amap->am_nslot = slotneed;
+ uvm_anfree(tofree);
amap_unlock(amap);
/*
@@ -583,7 +591,8 @@
if ((flags & AMAP_EXTEND_FORWARDS) &&
(slotoff + slotmapped) < amap->am_nslot)
amap_pp_adjref(amap, slotoff + slotmapped,
- (amap->am_nslot - (slotoff + slotmapped)), 1);
+ (amap->am_nslot - (slotoff + slotmapped)), 1,
+ &tofree);
if (flags & AMAP_EXTEND_FORWARDS)
pp_setreflen(newppref, amap->am_nslot, 1,
slotneed - amap->am_nslot);
@@ -609,6 +618,7 @@
oldnslots = amap->am_maxslot;
amap->am_maxslot = slotalloc;
+ uvm_anfree(tofree);
amap_unlock(amap);
kmem_free(oldsl, oldnslots * sizeof(*oldsl));
kmem_free(oldbck, oldnslots * sizeof(*oldbck));
@@ -641,7 +651,7 @@
struct vm_amap *amap = entry->aref.ar_amap;
int slots, lcv, slot, stop;
- KASSERT(mutex_owned(&amap->am_l));
+ KASSERT(mutex_owned(amap->am_lock));
AMAP_B2SLOT(slots, (entry->end - entry->start));
stop = entry->aref.ar_pageoff + slots;
@@ -694,7 +704,6 @@
return;
}
amap_list_remove(amap);
- amap_unlock(amap);
for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
int refs;
@@ -703,11 +712,10 @@
anon = amap->am_anon[slot];
KASSERT(anon != NULL && anon->an_ref != 0);
- mutex_enter(&anon->an_lock);
+ KASSERT(anon->an_lock == amap->am_lock);
UVMHIST_LOG(maphist," processing anon 0x%x, ref=%d", anon,
anon->an_ref, 0, 0);
refs = --anon->an_ref;
- mutex_exit(&anon->an_lock);
if (refs == 0) {
/*
@@ -726,6 +734,7 @@
*/
amap->am_nused = 0;
+ amap_unlock(amap);
amap_free(amap); /* will unlock and free amap */
UVMHIST_LOG(maphist,"<- done!", 0,0,0,0);
}
@@ -749,10 +758,12 @@
vaddr_t startva, vaddr_t endva)
{
struct vm_amap *amap, *srcamap;
+ struct vm_anon *tofree;
int slots, lcv;
vaddr_t chunksize;
const int waitf = (flags & AMAP_COPY_NOWAIT) ? UVM_FLAG_NOWAIT : 0;
const bool canchunk = (flags & AMAP_COPY_NOCHUNK) == 0;
+ kmutex_t *lock;
UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (map=%p, entry=%p, flags=%d)",
map, entry, flags, 0);
@@ -859,9 +870,8 @@
srcamap->am_anon[entry->aref.ar_pageoff + lcv];
if (amap->am_anon[lcv] == NULL)
continue;
- mutex_enter(&amap->am_anon[lcv]->an_lock);
+ KASSERT(amap->am_anon[lcv]->an_lock == srcamap->am_lock);
amap->am_anon[lcv]->an_ref++;
- mutex_exit(&amap->am_anon[lcv]->an_lock);
amap->am_bckptr[lcv] = amap->am_nused;
amap->am_slots[amap->am_nused] = lcv;
amap->am_nused++;
@@ -879,15 +889,28 @@
srcamap->am_ref--;
if (srcamap->am_ref == 1 && (srcamap->am_flags & AMAP_SHARED) != 0)
srcamap->am_flags &= ~AMAP_SHARED; /* clear shared flag */
+ tofree = NULL;
#ifdef UVM_AMAP_PPREF
if (srcamap->am_ppref && srcamap->am_ppref != PPREF_NONE) {
amap_pp_adjref(srcamap, entry->aref.ar_pageoff,
- (entry->end - entry->start) >> PAGE_SHIFT, -1);
+ (entry->end - entry->start) >> PAGE_SHIFT, -1, &tofree);
}
#endif
-
+ uvm_anfree(tofree);
amap_unlock(srcamap);
+ /*
+ * if we referenced any anons then share the source amap's lock.
+ * otherwise we have nothing in common, so allocate a new one.
+ */
+
+ if (amap->am_nused != 0) {
+ lock = srcamap->am_lock;
+ mutex_obj_hold(lock);
+ } else {
+ lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
+ }
+ amap->am_lock = lock;
amap_list_insert(amap);
/*
@@ -946,7 +969,7 @@
slot = amap->am_slots[lcv];
anon = amap->am_anon[slot];
- mutex_enter(&anon->an_lock);
+ KASSERT(anon->an_lock == amap->am_lock);
/*
* If the anon has only one ref, we must have already copied it.
@@ -957,7 +980,6 @@
if (anon->an_ref == 1) {
KASSERT(anon->an_page != NULL || anon->an_swslot != 0);
- mutex_exit(&anon->an_lock);
continue;
}
@@ -975,7 +997,6 @@
*/
Home |
Main Index |
Thread Index |
Old Index