Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/yamt-pagecache]: src/sys - use O->A loan to serve read(2). based on a p...
details: https://anonhg.NetBSD.org/src/rev/64954e289ee2
branches: yamt-pagecache
changeset: 770848:64954e289ee2
user: yamt <yamt%NetBSD.org@localhost>
date: Mon Dec 26 16:03:10 2011 +0000
description:
- use O->A loan to serve read(2). based on a patch from Chuck Silvers
- associated O->A loan fixes.
diffstat:
sys/kern/kern_mutex_obj.c | 40 ++-
sys/sys/mutex.h | 3 +-
sys/uvm/uvm.h | 29 +-
sys/uvm/uvm_amap.c | 72 +++++-
sys/uvm/uvm_amap.h | 8 +-
sys/uvm/uvm_anon.c | 78 +----
sys/uvm/uvm_extern.h | 29 +-
sys/uvm/uvm_fault.c | 74 +++-
sys/uvm/uvm_loan.c | 604 +++++++++++++++++++++++++++++++++++++++++----
sys/uvm/uvm_loan.h | 6 +-
sys/uvm/uvm_map.c | 20 +-
sys/uvm/uvm_meter.c | 19 +-
sys/uvm/uvm_page.c | 16 +-
sys/uvm/uvm_pdaemon.c | 20 +-
14 files changed, 817 insertions(+), 201 deletions(-)
diffs (truncated from 1676 to 300 lines):
diff -r 2e2ba3fff55a -r 64954e289ee2 sys/kern/kern_mutex_obj.c
--- a/sys/kern/kern_mutex_obj.c Tue Dec 20 13:46:17 2011 +0000
+++ b/sys/kern/kern_mutex_obj.c Mon Dec 26 16:03:10 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_mutex_obj.c,v 1.5.2.1 2011/11/18 00:57:33 yamt Exp $ */
+/* $NetBSD: kern_mutex_obj.c,v 1.5.2.2 2011/12/26 16:03:10 yamt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.5.2.1 2011/11/18 00:57:33 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.5.2.2 2011/12/26 16:03:10 yamt Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
@@ -147,6 +147,38 @@
}
/*
+ * mutex_obj_free_if_last:
+ *
+ * Drop a reference from a lock object if it's the last reference.
+ * If the last reference is being dropped, free the object and return
+ * true. Otherwise, return false.
+ */
+bool
+mutex_obj_free_if_last(kmutex_t *lock)
+{
+ struct kmutexobj *mo = (struct kmutexobj *)lock;
+ bool ret;
+
+ KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
+ "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
+ __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
+ KASSERTMSG(mo->mo_refcnt > 0,
+ "%s: lock %p: mo->mo_refcnt (%#x) == 0",
+ __func__, mo, mo->mo_refcnt);
+
+ /*
+ * if mo_refcnt is 1, no one except us have a reference to it and
+ * thus it's stable.
+ */
+ if (mo->mo_refcnt != 1) {
+ return false;
+ }
+ ret = mutex_obj_free(lock);
+ KASSERT(ret);
+ return true;
+}
+
+/*
* mutex_obj_pause:
*
* Pause until lock1 is available.
@@ -162,6 +194,10 @@
KASSERT(mutex_owned(lock2));
mutex_obj_hold(lock1);
mutex_exit(lock2);
+ /*
+ * acquire and release lock1.
+ * this can involve priority lending.
+ */
mutex_enter(lock1);
mutex_exit(lock1);
mutex_obj_free(lock1);
diff -r 2e2ba3fff55a -r 64954e289ee2 sys/sys/mutex.h
--- a/sys/sys/mutex.h Tue Dec 20 13:46:17 2011 +0000
+++ b/sys/sys/mutex.h Mon Dec 26 16:03:10 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: mutex.h,v 1.20.10.1 2011/11/18 00:57:33 yamt Exp $ */
+/* $NetBSD: mutex.h,v 1.20.10.2 2011/12/26 16:03:10 yamt Exp $ */
/*-
* Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@@ -210,6 +210,7 @@
kmutex_t *mutex_obj_alloc(kmutex_type_t, int);
void mutex_obj_hold(kmutex_t *);
bool mutex_obj_free(kmutex_t *);
+bool mutex_obj_free_if_last(kmutex_t *);
void mutex_obj_pause(kmutex_t *, kmutex_t *);
kmutex_t *mutex_obj_alloc_kernel_obj_lock(kmutex_type_t, int);
diff -r 2e2ba3fff55a -r 64954e289ee2 sys/uvm/uvm.h
--- a/sys/uvm/uvm.h Tue Dec 20 13:46:17 2011 +0000
+++ b/sys/uvm/uvm.h Mon Dec 26 16:03:10 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm.h,v 1.62.4.3 2011/11/20 10:52:33 yamt Exp $ */
+/* $NetBSD: uvm.h,v 1.62.4.4 2011/12/26 16:03:10 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -91,16 +91,35 @@
int64_t loan_obj; /* O->K loan */
int64_t unloan_obj; /* O->K unloan */
- int64_t loanbreak_obj; /* O->K loan resolved on write */
- int64_t loanfree_obj; /* O->K loan resolved on free */
+ int64_t loanbreak_obj; /* O->K loan resolved on write to O */
+ int64_t loanfree_obj; /* O->K loan resolved on free of O */
int64_t loan_anon; /* A->K loan */
int64_t unloan_anon; /* A->K unloan */
- int64_t loanbreak_anon; /* A->K loan resolved on write */
- int64_t loanfree_anon; /* A->K loan resolved on free */
+ int64_t loanbreak_anon; /* A->K loan resolved on write to A */
+ int64_t loanfree_anon; /* A->K loan resolved on free of A */
+
+ int64_t loan_oa; /* O->A->K loan */
+ int64_t unloan_oa; /* O->A->K unloan */
int64_t loan_zero; /* O->K loan (zero) */
int64_t unloan_zero; /* O->K unloan (zero) */
+
+ int64_t loanbreak_orphaned; /* O->A->K loan turned into A->K loan due to
+ write to O */
+ int64_t loanfree_orphaned; /* O->A->K loan turned into A->K loan due to
+ free of O */
+ int64_t loanbreak_orphaned_anon; /* O->A->K loan turned into O->K loan
+ due to write to A */
+ int64_t loanfree_orphaned_anon; /* O->A->K loan turned into O->K loan
+ due to free of A */
+
+ int64_t loanbreak_oa_obj; /* O->A loan resolved on write to O */
+ int64_t loanfree_oa_obj; /* O->A loan resolved on free of O */
+ int64_t loanbreak_oa_anon; /* O->A loan resolved on write to A */
+ int64_t loanfree_oa_anon; /* O->A loan resolved on free of A */
+ int64_t loan_resolve_orphan; /* O->A loaned page taken over by anon */
+ int64_t loan_obj_read; /* O->A loan for read(2) */
};
/*
diff -r 2e2ba3fff55a -r 64954e289ee2 sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c Tue Dec 20 13:46:17 2011 +0000
+++ b/sys/uvm/uvm_amap.c Mon Dec 26 16:03:10 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.c,v 1.104 2011/10/11 23:57:50 yamt Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.104.2.1 2011/12/26 16:03:10 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.104 2011/10/11 23:57:50 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.104.2.1 2011/12/26 16:03:10 yamt Exp $");
#include "opt_uvmhist.h"
@@ -178,6 +178,7 @@
}
totalslots = amap_roundup_slots(slots + padslots);
amap->am_lock = NULL;
+ amap->am_obj_lock = NULL;
amap->am_ref = 1;
amap->am_flags = 0;
#ifdef UVM_AMAP_PPREF
@@ -288,6 +289,9 @@
KASSERT(!mutex_owned(amap->am_lock));
mutex_obj_free(amap->am_lock);
}
+ if (amap->am_obj_lock != NULL) {
+ mutex_obj_free(amap->am_obj_lock);
+ }
slots = amap->am_maxslot;
kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots));
kmem_free(amap->am_bckptr, slots * sizeof(*amap->am_bckptr));
@@ -767,6 +771,7 @@
struct vm_anon *tofree;
u_int slots, lcv;
vsize_t len;
+ bool have_obj_page;
UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
UVMHIST_LOG(maphist, " (map=%p, entry=%p, flags=%d)",
@@ -881,13 +886,22 @@
*/
UVMHIST_LOG(maphist, " copying amap now",0, 0, 0, 0);
+ have_obj_page = false;
for (lcv = 0 ; lcv < slots; lcv++) {
- amap->am_anon[lcv] =
+ struct vm_anon * const anon =
srcamap->am_anon[entry->aref.ar_pageoff + lcv];
- if (amap->am_anon[lcv] == NULL)
+
+ amap->am_anon[lcv] = anon;
+ if (anon == NULL)
continue;
- KASSERT(amap->am_anon[lcv]->an_lock == srcamap->am_lock);
- KASSERT(amap->am_anon[lcv]->an_ref > 0);
+ if (anon->an_page != NULL && anon->an_page->uobject != NULL) {
+ KASSERT(anon->an_page->loan_count > 0);
+ KASSERT(srcamap->am_obj_lock ==
+ anon->an_page->uobject->vmobjlock);
+ have_obj_page = true;
+ }
+ KASSERT(anon->an_lock == srcamap->am_lock);
+ KASSERT(anon->an_ref > 0);
amap->am_anon[lcv]->an_ref++;
amap->am_bckptr[lcv] = amap->am_nused;
amap->am_slots[amap->am_nused] = lcv;
@@ -925,6 +939,10 @@
if (amap->am_nused != 0) {
amap->am_lock = srcamap->am_lock;
mutex_obj_hold(amap->am_lock);
+ if (have_obj_page) {
+ amap->am_obj_lock = srcamap->am_obj_lock;
+ mutex_obj_hold(amap->am_obj_lock);
+ }
}
uvm_anon_freelst(srcamap, tofree);
@@ -1618,3 +1636,45 @@
UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
}
+
+void
+amap_lock(struct vm_amap *amap)
+{
+
+ mutex_enter(amap->am_lock);
+ if (amap->am_obj_lock != NULL) {
+ if (mutex_obj_free_if_last(amap->am_obj_lock)) {
+ amap->am_obj_lock = NULL;
+ } else {
+ mutex_enter(amap->am_obj_lock);
+ }
+ }
+}
+
+int
+amap_lock_try(struct vm_amap *amap)
+{
+
+ if (!mutex_tryenter(amap->am_lock)) {
+ return 0;
+ }
+ if (amap->am_obj_lock != NULL) {
+ if (mutex_obj_free_if_last(amap->am_obj_lock)) {
+ amap->am_obj_lock = NULL;
+ } else if (!mutex_tryenter(amap->am_obj_lock)) {
+ mutex_exit(amap->am_lock);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+void
+amap_unlock(struct vm_amap *amap)
+{
+
+ if (amap->am_obj_lock != NULL) {
+ mutex_exit(amap->am_obj_lock);
+ }
+ mutex_exit(amap->am_lock);
+}
diff -r 2e2ba3fff55a -r 64954e289ee2 sys/uvm/uvm_amap.h
--- a/sys/uvm/uvm_amap.h Tue Dec 20 13:46:17 2011 +0000
+++ b/sys/uvm/uvm_amap.h Mon Dec 26 16:03:10 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.h,v 1.37 2011/06/12 03:36:02 rmind Exp $ */
+/* $NetBSD: uvm_amap.h,v 1.37.2.1 2011/12/26 16:03:10 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -82,6 +82,8 @@
(struct vm_amap *);
void amap_lock /* lock amap */
(struct vm_amap *);
+int amap_lock_try /* trylock amap */
+ (struct vm_amap *);
struct vm_anon *amap_lookup /* lookup an anon @ offset in amap */
(struct vm_aref *, vaddr_t);
void amap_lookups /* lookup multiple anons */
@@ -152,6 +154,7 @@
struct vm_amap {
kmutex_t *am_lock; /* lock [locks all vm_amap fields] */
+ kmutex_t *am_obj_lock; /* uobj which might lend us pages */
int am_ref; /* reference count */
int am_flags; /* flags */
int am_maxslot; /* max # of slots allocated */
@@ -251,10 +254,7 @@
*/
#define amap_flags(AMAP) ((AMAP)->am_flags)
-#define amap_lock(AMAP) mutex_enter((AMAP)->am_lock)
-#define amap_lock_try(AMAP) mutex_tryenter((AMAP)->am_lock)
#define amap_refs(AMAP) ((AMAP)->am_ref)
-#define amap_unlock(AMAP) mutex_exit((AMAP)->am_lock)
/*
* if we enable PPREF, then we have a couple of extra functions that
diff -r 2e2ba3fff55a -r 64954e289ee2 sys/uvm/uvm_anon.c
Home |
Main Index |
Thread Index |
Old Index