Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Detangle the pagedaemon from uvm_fpageqlock:
details: https://anonhg.NetBSD.org/src/rev/2f62f03b1078
branches: trunk
changeset: 967767:2f62f03b1078
user: ad <ad%NetBSD.org@localhost>
date: Sat Dec 21 14:50:34 2019 +0000
description:
Detangle the pagedaemon from uvm_fpageqlock:
- Have a single lock (uvmpd_lock) to protect pagedaemon state that was
previously covered by uvmpd_pool_drain_lock plus uvm_fpageqlock.
- Don't require any locks be held when calling uvm_kick_pdaemon().
- Use uvm_free().
diffstat:
sys/kern/subr_vmem.c | 6 +--
sys/uvm/uvm_page.c | 7 +--
sys/uvm/uvm_pdaemon.c | 83 +++++++++++++++++++++++---------------------------
sys/uvm/uvm_pglist.c | 8 ++--
4 files changed, 48 insertions(+), 56 deletions(-)
diffs (truncated from 362 to 300 lines):
diff -r baa4cd8b0536 -r 2f62f03b1078 sys/kern/subr_vmem.c
--- a/sys/kern/subr_vmem.c Sat Dec 21 14:41:44 2019 +0000
+++ b/sys/kern/subr_vmem.c Sat Dec 21 14:50:34 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: subr_vmem.c,v 1.99 2019/10/16 18:29:49 christos Exp $ */
+/* $NetBSD: subr_vmem.c,v 1.100 2019/12/21 14:50:34 ad Exp $ */
/*-
* Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
@@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.99 2019/10/16 18:29:49 christos Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.100 2019/12/21 14:50:34 ad Exp $");
#if defined(_KERNEL) && defined(_KERNEL_OPT)
#include "opt_ddb.h"
@@ -198,9 +198,7 @@
vmem_kick_pdaemon(void)
{
#if defined(_KERNEL)
- mutex_spin_enter(&uvm_fpageqlock);
uvm_kick_pdaemon();
- mutex_spin_exit(&uvm_fpageqlock);
#endif
}
diff -r baa4cd8b0536 -r 2f62f03b1078 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Sat Dec 21 14:41:44 2019 +0000
+++ b/sys/uvm/uvm_page.c Sat Dec 21 14:50:34 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.209 2019/12/21 14:41:44 ad Exp $ */
+/* $NetBSD: uvm_page.c,v 1.210 2019/12/21 14:50:34 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.209 2019/12/21 14:41:44 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.210 2019/12/21 14:50:34 ad Exp $");
#include "opt_ddb.h"
#include "opt_uvm.h"
@@ -878,8 +878,6 @@
KASSERT(anon == NULL || anon->an_lock == NULL ||
mutex_owned(anon->an_lock));
- mutex_spin_enter(&uvm_fpageqlock);
-
/*
* This implements a global round-robin page coloring
* algorithm.
@@ -909,6 +907,7 @@
* we make kernel reserve pages available if called by a
* kernel thread or a realtime thread.
*/
+ mutex_spin_enter(&uvm_fpageqlock);
l = curlwp;
if (__predict_true(l != NULL) && lwp_eprio(l) >= PRI_KTHREAD) {
flags |= UVM_PGA_USERESERVE;
diff -r baa4cd8b0536 -r 2f62f03b1078 sys/uvm/uvm_pdaemon.c
--- a/sys/uvm/uvm_pdaemon.c Sat Dec 21 14:41:44 2019 +0000
+++ b/sys/uvm/uvm_pdaemon.c Sat Dec 21 14:50:34 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_pdaemon.c,v 1.116 2019/12/21 11:41:18 ad Exp $ */
+/* $NetBSD: uvm_pdaemon.c,v 1.117 2019/12/21 14:50:34 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.116 2019/12/21 11:41:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.117 2019/12/21 14:50:34 ad Exp $");
#include "opt_uvmhist.h"
#include "opt_readahead.h"
@@ -112,7 +112,7 @@
static unsigned int uvm_pagedaemon_waiters;
/* State for the pool drainer thread */
-static kmutex_t uvmpd_pool_drain_lock __cacheline_aligned;
+static kmutex_t uvmpd_lock __cacheline_aligned;
static kcondvar_t uvmpd_pool_drain_cv;
static bool uvmpd_pool_drain_run = false;
@@ -136,7 +136,7 @@
if (uvm.pagedaemon_lwp == NULL)
panic("out of memory before the pagedaemon thread exists");
- mutex_spin_enter(&uvm_fpageqlock);
+ mutex_spin_enter(&uvmpd_lock);
/*
* check for page daemon going to sleep (waiting for itself)
@@ -170,27 +170,26 @@
uvm_pagedaemon_waiters++;
wakeup(&uvm.pagedaemon); /* wake the daemon! */
- UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
+ UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvmpd_lock, false, wmsg, timo);
}
/*
* uvm_kick_pdaemon: perform checks to determine if we need to
* give the pagedaemon a nudge, and do so if necessary.
- *
- * => called with uvm_fpageqlock held.
*/
void
uvm_kick_pdaemon(void)
{
-
- KASSERT(mutex_owned(&uvm_fpageqlock));
+ int fpages = uvm_free();
- if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
- (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
+ if (fpages + uvmexp.paging < uvmexp.freemin ||
+ (fpages + uvmexp.paging < uvmexp.freetarg &&
uvmpdpol_needsscan_p()) ||
uvm_km_va_starved_p()) {
+ mutex_spin_enter(&uvmpd_lock);
wakeup(&uvm.pagedaemon);
+ mutex_spin_exit(&uvmpd_lock);
}
}
@@ -241,12 +240,14 @@
{
int npages = 0;
int extrapages = 0;
+ int fpages;
UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
- mutex_init(&uvmpd_pool_drain_lock, MUTEX_DEFAULT, IPL_VM);
+ mutex_init(&uvmpd_lock, MUTEX_DEFAULT, IPL_VM);
+ mutex_init(&uvmpd_lock, MUTEX_DEFAULT, IPL_VM);
cv_init(&uvmpd_pool_drain_cv, "pooldrain");
/* Create the pool drainer kernel thread. */
@@ -271,16 +272,16 @@
kmem_va_starved = uvm_km_va_starved_p();
- mutex_spin_enter(&uvm_fpageqlock);
+ mutex_spin_enter(&uvmpd_lock);
if ((uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) &&
!kmem_va_starved) {
UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
- &uvm_fpageqlock, false, "pgdaemon", 0);
+ &uvmpd_lock, false, "pgdaemon", 0);
uvmexp.pdwoke++;
UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
} else {
- mutex_spin_exit(&uvm_fpageqlock);
+ mutex_spin_exit(&uvmpd_lock);
}
/*
@@ -290,9 +291,7 @@
if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
npages = uvmexp.npages;
extrapages = uvm_extrapages;
- mutex_spin_enter(&uvm_fpageqlock);
uvmpd_tune();
- mutex_spin_exit(&uvm_fpageqlock);
}
uvmpdpol_tune();
@@ -301,33 +300,30 @@
* Estimate a hint. Note that bufmem are returned to
* system only when entire pool page is empty.
*/
- mutex_spin_enter(&uvm_fpageqlock);
-
+ fpages = uvm_free();
UVMHIST_LOG(pdhist," free/ftarg=%jd/%jd",
- uvmexp.free, uvmexp.freetarg, 0,0);
+ fpages, uvmexp.freetarg, 0,0);
- needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg;
+ needsfree = fpages + uvmexp.paging < uvmexp.freetarg;
needsscan = needsfree || uvmpdpol_needsscan_p();
/*
* scan if needed
*/
if (needsscan) {
- mutex_spin_exit(&uvm_fpageqlock);
uvmpd_scan();
- mutex_spin_enter(&uvm_fpageqlock);
}
/*
* if there's any free memory to be had,
* wake up any waiters.
*/
- if (uvmexp.free > uvmexp.reserve_kernel ||
- uvmexp.paging == 0) {
+ if (uvm_free() > uvmexp.reserve_kernel || uvmexp.paging == 0) {
+ mutex_spin_enter(&uvmpd_lock);
wakeup(&uvmexp.free);
uvm_pagedaemon_waiters = 0;
+ mutex_spin_exit(&uvmpd_lock);
}
- mutex_spin_exit(&uvm_fpageqlock);
/*
* scan done. if we don't need free memory, we're done.
@@ -382,14 +378,14 @@
* wake up either of pagedaemon or LWPs waiting for it.
*/
- mutex_spin_enter(&uvm_fpageqlock);
- if (uvmexp.free <= uvmexp.reserve_kernel) {
+ mutex_spin_enter(&uvmpd_lock);
+ if (uvm_free() <= uvmexp.reserve_kernel) {
wakeup(&uvm.pagedaemon);
- } else {
+ } else if (uvm_pagedaemon_waiters != 0) {
wakeup(&uvmexp.free);
uvm_pagedaemon_waiters = 0;
}
- mutex_spin_exit(&uvm_fpageqlock);
+ mutex_spin_exit(&uvmpd_lock);
}
/*
@@ -714,7 +710,7 @@
* see if we've met the free target.
*/
- if (uvmexp.free + uvmexp.paging
+ if (uvm_free() + uvmexp.paging
#if defined(VMSWAP)
+ swapcluster_nused(&swc)
#endif /* defined(VMSWAP) */
@@ -843,7 +839,7 @@
* free target when all the current pageouts complete.
*/
- if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
+ if (uvm_free() + uvmexp.paging > uvmexp.freetarg << 2) {
mutex_exit(slock);
continue;
}
@@ -926,7 +922,7 @@
static void
uvmpd_scan(void)
{
- int swap_shortage, pages_freed;
+ int swap_shortage, pages_freed, fpages;
UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
uvmexp.pdrevs++;
@@ -950,11 +946,12 @@
*/
swap_shortage = 0;
- if (uvmexp.free < uvmexp.freetarg &&
+ fpages = uvm_free();
+ if (fpages < uvmexp.freetarg &&
uvmexp.swpginuse >= uvmexp.swpgavail &&
!uvm_swapisfull() &&
pages_freed == 0) {
- swap_shortage = uvmexp.freetarg - uvmexp.free;
+ swap_shortage = uvmexp.freetarg - fpages;
}
uvmpdpol_balancequeue(swap_shortage);
@@ -964,7 +961,7 @@
* modules.
*/
- if (uvmexp.free < uvmexp.freemin) {
+ if (uvm_free() < uvmexp.freemin) {
module_thread_kick();
}
}
@@ -1038,19 +1035,17 @@
int bufcnt;
for (;;) {
- mutex_enter(&uvmpd_pool_drain_lock);
+ mutex_enter(&uvmpd_lock);
if (!uvmpd_pool_drain_run) {
- cv_wait(&uvmpd_pool_drain_cv, &uvmpd_pool_drain_lock);
+ cv_wait(&uvmpd_pool_drain_cv, &uvmpd_lock);
}
uvmpd_pool_drain_run = false;
Home |
Main Index |
Thread Index |
Old Index