Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys - Merge sched_pstats() and uvm_meter()/uvm_loadav(). Av...
details: https://anonhg.NetBSD.org/src/rev/6ef8a40558cf
branches: trunk
changeset: 754008:6ef8a40558cf
user: rmind <rmind%NetBSD.org@localhost>
date: Fri Apr 16 03:21:49 2010 +0000
description:
- Merge sched_pstats() and uvm_meter()/uvm_loadav(). Avoids double loop
through all LWPs and duplicate locking overhead.
- Move sched_pstats() from soft-interrupt context to process 0 main loop.
Avoids blocking effect on real-time threads. Mostly fixes PR/38792.
Note: it might be worth to move the loop above PRI_PGDAEMON. Also,
sched_pstats() might be cleaned-up slightly.
diffstat:
sys/kern/kern_synch.c | 81 +++++++++++++++++++++++++++++++++++++-------------
sys/sys/sched.h | 4 +-
sys/uvm/uvm_extern.h | 3 +-
sys/uvm/uvm_glue.c | 13 +++++--
sys/uvm/uvm_meter.c | 68 +-----------------------------------------
5 files changed, 73 insertions(+), 96 deletions(-)
diffs (truncated from 327 to 300 lines):
diff -r cd4e80d0ac56 -r 6ef8a40558cf sys/kern/kern_synch.c
--- a/sys/kern/kern_synch.c Fri Apr 16 03:13:03 2010 +0000
+++ b/sys/kern/kern_synch.c Fri Apr 16 03:21:49 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_synch.c,v 1.280 2010/03/03 00:47:31 yamt Exp $ */
+/* $NetBSD: kern_synch.c,v 1.281 2010/04/16 03:21:49 rmind Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009
@@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.280 2010/03/03 00:47:31 yamt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.281 2010/04/16 03:21:49 rmind Exp $");
#include "opt_kstack.h"
#include "opt_perfctrs.h"
@@ -128,7 +128,6 @@
syncobj_noowner,
};
-callout_t sched_pstats_ch;
unsigned sched_pstats_ticks;
kcondvar_t lbolt; /* once a second sleep address */
@@ -152,8 +151,6 @@
{
cv_init(&lbolt, "lbolt");
- callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
- callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL,
"kpreempt", "defer: critical section");
@@ -161,8 +158,6 @@
"kpreempt", "defer: kernel_lock");
evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL,
"kpreempt", "immediate");
-
- sched_pstats(NULL);
}
/*
@@ -1148,36 +1143,55 @@
}
/* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */
-const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
+const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
+
+/*
+ * Constants for averages over 1, 5 and 15 minutes when sampling at
+ * 5 second intervals.
+ */
+static const fixpt_t cexp[ ] = {
+ 0.9200444146293232 * FSCALE, /* exp(-1/12) */
+ 0.9834714538216174 * FSCALE, /* exp(-1/60) */
+ 0.9944598480048967 * FSCALE, /* exp(-1/180) */
+};
/*
* sched_pstats:
*
- * Update process statistics and check CPU resource allocation.
- * Call scheduler-specific hook to eventually adjust process/LWP
- * priorities.
+ * => Update process statistics and check CPU resource allocation.
+ * => Call scheduler-specific hook to eventually adjust LWP priorities.
+ * => Compute load average of a quantity on 1, 5 and 15 minute intervals.
*/
void
-sched_pstats(void *arg)
+sched_pstats(void)
{
+ extern struct loadavg averunnable;
+ struct loadavg *avg = &averunnable;
const int clkhz = (stathz != 0 ? stathz : hz);
- static bool backwards;
- struct rlimit *rlim;
- struct lwp *l;
+ static bool backwards = false;
+ static u_int lavg_count = 0;
struct proc *p;
- long runtm;
- fixpt_t lpctcpu;
- u_int lcpticks;
- int sig;
+ int nrun;
sched_pstats_ticks++;
-
+ if (++lavg_count >= 5) {
+ lavg_count = 0;
+ nrun = 0;
+ }
mutex_enter(proc_lock);
PROCLIST_FOREACH(p, &allproc) {
+ struct lwp *l;
+ struct rlimit *rlim;
+ long runtm;
+ int sig;
+
/* Increment sleep time (if sleeping), ignore overflow. */
mutex_enter(p->p_lock);
runtm = p->p_rtime.sec;
LIST_FOREACH(l, &p->p_lwps, l_sibling) {
+ fixpt_t lpctcpu;
+ u_int lcpticks;
+
if (__predict_false((l->l_flag & LW_IDLE) != 0))
continue;
lwp_lock(l);
@@ -1195,6 +1209,20 @@
lpctcpu += ((FSCALE - ccpu) *
(lcpticks * FSCALE / clkhz)) >> FSHIFT;
l->l_pctcpu = lpctcpu;
+
+ /* For load average calculation. */
+ if (__predict_false(lavg_count == 0)) {
+ switch (l->l_stat) {
+ case LSSLEEP:
+ if (l->l_slptime > 1) {
+ break;
+ }
+ case LSRUN:
+ case LSONPROC:
+ case LSIDL:
+ nrun++;
+ }
+ }
}
/* Calculating p_pctcpu only for ps(1) */
p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
@@ -1227,7 +1255,16 @@
}
}
mutex_exit(proc_lock);
- uvm_meter();
+
+ /* Load average calculation. */
+ if (__predict_false(lavg_count == 0)) {
+ int i;
+ for (i = 0; i < __arraycount(cexp); i++) {
+ avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
+ nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
+ }
+ }
+
+ /* Lightning bolt. */
cv_broadcast(&lbolt);
- callout_schedule(&sched_pstats_ch, hz);
}
diff -r cd4e80d0ac56 -r 6ef8a40558cf sys/sys/sched.h
--- a/sys/sys/sched.h Fri Apr 16 03:13:03 2010 +0000
+++ b/sys/sys/sched.h Fri Apr 16 03:21:49 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: sched.h,v 1.71 2009/10/03 22:32:56 elad Exp $ */
+/* $NetBSD: sched.h,v 1.72 2010/04/16 03:21:49 rmind Exp $ */
/*-
* Copyright (c) 1999, 2000, 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
@@ -237,7 +237,7 @@
void sched_tick(struct cpu_info *);
void schedclock(struct lwp *);
void sched_schedclock(struct lwp *);
-void sched_pstats(void *);
+void sched_pstats(void);
void sched_lwp_stats(struct lwp *);
void sched_pstats_hook(struct lwp *, int);
diff -r cd4e80d0ac56 -r 6ef8a40558cf sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Fri Apr 16 03:13:03 2010 +0000
+++ b/sys/uvm/uvm_extern.h Fri Apr 16 03:21:49 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_extern.h,v 1.162 2010/02/08 19:02:33 joerg Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.163 2010/04/16 03:21:49 rmind Exp $ */
/*
*
@@ -681,7 +681,6 @@
void uvm_whatis(uintptr_t, void (*)(const char *, ...));
/* uvm_meter.c */
-void uvm_meter(void);
int uvm_sysctl(int *, u_int, void *, size_t *,
void *, size_t, struct proc *);
int uvm_pctparam_check(struct uvm_pctparam *, int);
diff -r cd4e80d0ac56 -r 6ef8a40558cf sys/uvm/uvm_glue.c
--- a/sys/uvm/uvm_glue.c Fri Apr 16 03:13:03 2010 +0000
+++ b/sys/uvm/uvm_glue.c Fri Apr 16 03:21:49 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_glue.c,v 1.144 2010/02/25 23:10:49 jym Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.145 2010/04/16 03:21:49 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.144 2010/02/25 23:10:49 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.145 2010/04/16 03:21:49 rmind Exp $");
#include "opt_kgdb.h"
#include "opt_kstack.h"
@@ -78,6 +78,8 @@
*/
#include <sys/param.h>
+#include <sys/kernel.h>
+
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
@@ -414,6 +416,9 @@
/*
* uvm_scheduler: process zero main loop.
*/
+
+extern struct loadavg averunnable;
+
void
uvm_scheduler(void)
{
@@ -425,7 +430,7 @@
lwp_unlock(l);
for (;;) {
- /* XXX/TODO: move some workload to this LWP? */
- (void)kpause("uvm", false, 0, NULL);
+ sched_pstats();
+ (void)kpause("uvm", false, hz, NULL);
}
}
diff -r cd4e80d0ac56 -r 6ef8a40558cf sys/uvm/uvm_meter.c
--- a/sys/uvm/uvm_meter.c Fri Apr 16 03:13:03 2010 +0000
+++ b/sys/uvm/uvm_meter.c Fri Apr 16 03:21:49 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_meter.c,v 1.51 2010/04/11 01:53:03 mrg Exp $ */
+/* $NetBSD: uvm_meter.c,v 1.52 2010/04/16 03:21:49 rmind Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.51 2010/04/11 01:53:03 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.52 2010/04/16 03:21:49 rmind Exp $");
#include <sys/param.h>
#include <sys/proc.h>
@@ -59,73 +59,9 @@
int maxslp = MAXSLP; /* patchable ... */
struct loadavg averunnable;
-/*
- * constants for averages over 1, 5, and 15 minutes when sampling at
- * 5 second intervals.
- */
-
-static const fixpt_t cexp[3] = {
- 0.9200444146293232 * FSCALE, /* exp(-1/12) */
- 0.9834714538216174 * FSCALE, /* exp(-1/60) */
- 0.9944598480048967 * FSCALE, /* exp(-1/180) */
-};
-
-/*
- * prototypes
- */
-
-static void uvm_loadav(struct loadavg *);
static void uvm_total(struct vmtotal *);
/*
- * uvm_meter: calculate load average.
- */
-void
-uvm_meter(void)
-{
- static int count;
-
- if (++count >= 5) {
- count = 0;
- uvm_loadav(&averunnable);
- }
-}
-
-/*
- * uvm_loadav: compute a tenex style load average of a quantity on
- * 1, 5, and 15 minute intervals.
- */
-static void
-uvm_loadav(struct loadavg *avg)
-{
- int i, nrun;
- struct lwp *l;
-
- nrun = 0;
Home |
Main Index |
Thread Index |
Old Index