Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/yamt-pagecache]: src/sys/uvm redo the page clean/dirty/unknown accountin...
details: https://anonhg.NetBSD.org/src/rev/b2f662120b62
branches: yamt-pagecache
changeset: 770825:b2f662120b62
user: yamt <yamt%NetBSD.org@localhost>
date: Sat Nov 12 02:54:04 2011 +0000
description:
redo the page clean/dirty/unknown accounting separately for file and
anonymous pages
diffstat:
sys/uvm/uvm.h | 9 ++-
sys/uvm/uvm_extern.h | 5 +-
sys/uvm/uvm_meter.c | 15 ++++--
sys/uvm/uvm_page.c | 112 +++++++++++++++++++++++++++------------------
sys/uvm/uvm_page_status.c | 22 ++++++--
5 files changed, 103 insertions(+), 60 deletions(-)
diffs (truncated from 392 to 300 lines):
diff -r cdbc9097c8e1 -r b2f662120b62 sys/uvm/uvm.h
--- a/sys/uvm/uvm.h Fri Nov 11 10:34:24 2011 +0000
+++ b/sys/uvm/uvm.h Sat Nov 12 02:54:04 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm.h,v 1.62.4.1 2011/11/11 10:34:24 yamt Exp $ */
+/* $NetBSD: uvm.h,v 1.62.4.2 2011/11/12 02:54:04 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -82,7 +82,12 @@
int pages[PGFL_NQUEUES]; /* total of pages in page_free */
u_int emap_gen; /* emap generation number */
- int64_t pagestate[UVM_PAGE_NUM_STATUS];
+ /*
+ * pagestate
+ * [0] non-anonymous
+ * [1] anonymous (PQ_SWAPBACKED)
+ */
+ int64_t pagestate[2][UVM_PAGE_NUM_STATUS];
};
/*
diff -r cdbc9097c8e1 -r b2f662120b62 sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Fri Nov 11 10:34:24 2011 +0000
+++ b/sys/uvm/uvm_extern.h Sat Nov 12 02:54:04 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.176.2.1 2011/11/11 10:34:24 yamt Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.176.2.2 2011/11/12 02:54:04 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -459,6 +459,9 @@
int64_t mightdirtypages;
int64_t cleanpages;
int64_t dirtypages;
+ int64_t mightdirtyanonpages;
+ int64_t cleananonpages;
+ int64_t dirtyanonpages;
};
#ifdef _KERNEL
diff -r cdbc9097c8e1 -r b2f662120b62 sys/uvm/uvm_meter.c
--- a/sys/uvm/uvm_meter.c Fri Nov 11 10:34:24 2011 +0000
+++ b/sys/uvm/uvm_meter.c Sat Nov 12 02:54:04 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_meter.c,v 1.56.4.1 2011/11/11 10:34:24 yamt Exp $ */
+/* $NetBSD: uvm_meter.c,v 1.56.4.2 2011/11/12 02:54:04 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.1 2011/11/11 10:34:24 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.56.4.2 2011/11/12 02:54:04 yamt Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -179,9 +179,14 @@
for (CPU_INFO_FOREACH(cii, ci)) {
struct uvm_cpu *ucpu = ci->ci_data.cpu_uvm;
- u.mightdirtypages += ucpu->pagestate[UVM_PAGE_STATUS_UNKNOWN];
- u.cleanpages += ucpu->pagestate[UVM_PAGE_STATUS_CLEAN];
- u.dirtypages += ucpu->pagestate[UVM_PAGE_STATUS_DIRTY];
+ u.mightdirtypages +=
+ ucpu->pagestate[0][UVM_PAGE_STATUS_UNKNOWN];
+ u.cleanpages += ucpu->pagestate[0][UVM_PAGE_STATUS_CLEAN];
+ u.dirtypages += ucpu->pagestate[0][UVM_PAGE_STATUS_DIRTY];
+ u.mightdirtyanonpages +=
+ ucpu->pagestate[1][UVM_PAGE_STATUS_UNKNOWN];
+ u.cleananonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN];
+ u.dirtyanonpages += ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY];
}
node = *rnode;
node.sysctl_data = &u;
diff -r cdbc9097c8e1 -r b2f662120b62 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Fri Nov 11 10:34:24 2011 +0000
+++ b/sys/uvm/uvm_page.c Sat Nov 12 02:54:04 2011 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.178.2.3 2011/11/11 10:34:24 yamt Exp $ */
+/* $NetBSD: uvm_page.c,v 1.178.2.4 2011/11/12 02:54:04 yamt Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.3 2011/11/11 10:34:24 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.178.2.4 2011/11/12 02:54:04 yamt Exp $");
#include "opt_ddb.h"
#include "opt_uvmhist.h"
@@ -162,15 +162,15 @@
* uvm_pageinsert: insert a page in the object.
*
* => caller must lock object
- * => caller must lock page queues
* => call should have already set pg's object and offset pointers
- * and bumped the version counter
*/
static inline void
uvm_pageinsert_list(struct uvm_object *uobj, struct vm_page *pg,
struct vm_page *where)
{
+ const bool isvnode = UVM_OBJ_IS_VNODE(uobj);
+ const bool isaobj = UVM_OBJ_IS_AOBJ(uobj);
KASSERT(uobj == pg->uobject);
KASSERT(mutex_owned(uobj->vmobjlock));
@@ -178,32 +178,39 @@
KASSERT(where == NULL || (where->flags & PG_TABLED));
KASSERT(where == NULL || (where->uobject == uobj));
- if (UVM_OBJ_IS_VNODE(uobj)) {
- if (uobj->uo_npages == 0) {
- struct vnode *vp = (struct vnode *)uobj;
+ if (isvnode || isaobj) {
+ struct uvm_cpu *ucpu;
+ const unsigned int status = uvm_pagegetdirty(pg);
- vholdl(vp);
+ kpreempt_disable();
+ ucpu = curcpu()->ci_data.cpu_uvm;
+ ucpu->pagestate[isaobj][status]++;
+ kpreempt_enable();
+ if (isvnode) {
+ if (uobj->uo_npages == 0) {
+ struct vnode *vp = (struct vnode *)uobj;
+
+ vholdl(vp);
+ }
+ if (UVM_OBJ_IS_VTEXT(uobj)) {
+ atomic_inc_uint(&uvmexp.execpages);
+ } else {
+ atomic_inc_uint(&uvmexp.filepages);
+ }
+ } else {
+ atomic_inc_uint(&uvmexp.anonpages);
}
- if (UVM_OBJ_IS_VTEXT(uobj)) {
- atomic_inc_uint(&uvmexp.execpages);
- } else {
- atomic_inc_uint(&uvmexp.filepages);
- }
- } else if (UVM_OBJ_IS_AOBJ(uobj)) {
- atomic_inc_uint(&uvmexp.anonpages);
}
pg->flags |= PG_TABLED;
uobj->uo_npages++;
}
-
static inline int
uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
{
const uint64_t idx = pg->offset >> PAGE_SHIFT;
int error;
- KASSERT(uobj == pg->uobject);
error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
if (error != 0) {
return error;
@@ -222,6 +229,7 @@
int error;
KDASSERT(uobj != NULL);
+ KASSERT(uobj == pg->uobject);
error = uvm_pageinsert_tree(uobj, pg);
if (error != 0) {
KASSERT(error == ENOMEM);
@@ -235,33 +243,41 @@
* uvm_page_remove: remove page from object.
*
* => caller must lock object
- * => caller must lock page queues
*/
static inline void
uvm_pageremove_list(struct uvm_object *uobj, struct vm_page *pg)
{
+ const bool isvnode = UVM_OBJ_IS_VNODE(uobj);
+ const bool isaobj = UVM_OBJ_IS_AOBJ(uobj);
KASSERT(uobj == pg->uobject);
KASSERT(mutex_owned(uobj->vmobjlock));
KASSERT(pg->flags & PG_TABLED);
- if (UVM_OBJ_IS_VNODE(uobj)) {
- if (uobj->uo_npages == 1) {
- struct vnode *vp = (struct vnode *)uobj;
+ if (isvnode || isaobj) {
+ struct uvm_cpu *ucpu;
+ const unsigned int status = uvm_pagegetdirty(pg);
- holdrelel(vp);
+ kpreempt_disable();
+ ucpu = curcpu()->ci_data.cpu_uvm;
+ ucpu->pagestate[isaobj][status]--;
+ kpreempt_enable();
+ if (isvnode) {
+ if (uobj->uo_npages == 1) {
+ struct vnode *vp = (struct vnode *)uobj;
+
+ holdrelel(vp);
+ }
+ if (UVM_OBJ_IS_VTEXT(uobj)) {
+ atomic_dec_uint(&uvmexp.execpages);
+ } else {
+ atomic_dec_uint(&uvmexp.filepages);
+ }
+ } else {
+ atomic_dec_uint(&uvmexp.anonpages);
}
- if (UVM_OBJ_IS_VTEXT(uobj)) {
- atomic_dec_uint(&uvmexp.execpages);
- } else {
- atomic_dec_uint(&uvmexp.filepages);
- }
- } else if (UVM_OBJ_IS_AOBJ(uobj)) {
- atomic_dec_uint(&uvmexp.anonpages);
}
-
- /* object should be locked */
uobj->uo_npages--;
pg->flags &= ~PG_TABLED;
pg->uobject = NULL;
@@ -272,7 +288,6 @@
{
struct vm_page *opg;
- KASSERT(uobj == pg->uobject);
opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
KASSERT(pg == opg);
}
@@ -282,8 +297,9 @@
{
KDASSERT(uobj != NULL);
+ KASSERT(uobj == pg->uobject);
+ uvm_pageremove_list(uobj, pg);
uvm_pageremove_tree(uobj, pg);
- uvm_pageremove_list(uobj, pg);
}
static void
@@ -423,15 +439,7 @@
if (atop(paddr) >= seg->avail_start &&
atop(paddr) < seg->avail_end) {
uvmexp.npages++;
- /*
- * add page to free pool
- *
- * adjust pagestate[] so that it won't go
- * negative.
- */
- KASSERT(uvm_pagegetdirty(&seg->pgs[i])
- == UVM_PAGE_STATUS_UNKNOWN);
- boot_cpu.pagestate[UVM_PAGE_STATUS_UNKNOWN]++;
+ /* add page to free pool */
uvm_pagefree(&seg->pgs[i]);
}
}
@@ -1316,7 +1324,9 @@
* otherwise we race with uvm_pglistalloc.
*/
pg->pqflags = 0;
- ucpu->pagestate[UVM_PAGE_STATUS_CLEAN]++;
+ if (anon) {
+ ucpu->pagestate[1][UVM_PAGE_STATUS_CLEAN]++;
+ }
mutex_spin_exit(&uvm_fpageqlock);
if (anon) {
anon->an_page = pg;
@@ -1348,7 +1358,9 @@
* A zero'd page is not clean. If we got a page not already
* zero'd, then we have to zero it ourselves.
*/
- uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
+ if (obj != NULL || anon != NULL) {
+ uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
+ }
if (zeroit)
pmap_zero_page(VM_PAGE_TO_PHYS(pg));
}
@@ -1523,6 +1535,11 @@
} else {
pg->pqflags &= ~PQ_ANON;
atomic_dec_uint(&uvmexp.anonpages);
+ status = uvm_pagegetdirty(pg);
+ kpreempt_disable();
+ ucpu = curcpu()->ci_data.cpu_uvm;
+ ucpu->pagestate[1][status]--;
+ kpreempt_enable();
}
pg->uanon->an_page = NULL;
Home |
Main Index |
Thread Index |
Old Index