Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys - Extend the per-CPU counters matt@ did to include all o...
details: https://anonhg.NetBSD.org/src/rev/7132c305b1da
branches: trunk
changeset: 967651:7132c305b1da
user: ad <ad%NetBSD.org@localhost>
date: Mon Dec 16 22:47:54 2019 +0000
description:
- Extend the per-CPU counters matt@ did to include all of the hot counters
in UVM, excluding uvmexp.free, which needs special treatment and will be
done with a separate commit. Cuts system time for a build by 20-25% on
a 48 CPU machine w/DIAGNOSTIC.
- Avoid 64-bit integer divide on every fault (for rnd_add_uint32).
diffstat:
sys/kern/kern_cpu.c | 92 +++++++++++++++++-
sys/kern/kern_fork.c | 14 +-
sys/kern/kern_softint.c | 10 +-
sys/kern/vfs_vnode.c | 14 +-
sys/kern/vfs_vnops.c | 12 +-
sys/miscfs/procfs/procfs_linux.c | 41 ++++---
sys/rump/librump/rumpkern/MAINBUS.ioconf | 3 +
sys/rump/librump/rumpkern/Makefile.rumpkern | 3 +-
sys/rump/librump/rumpkern/emul.c | 12 +-
sys/rump/librump/rumpkern/intr.c | 11 +-
sys/rump/librump/rumpkern/scheduler.c | 14 +-
sys/sys/cpu_data.h | 100 +++++++++++++++++-
sys/uvm/uvm_extern.h | 19 ++-
sys/uvm/uvm_fault.c | 93 +++++++-----------
sys/uvm/uvm_fault_i.h | 6 +-
sys/uvm/uvm_glue.c | 6 +-
sys/uvm/uvm_meter.c | 144 +++++++++++++++++++--------
sys/uvm/uvm_page.c | 46 ++++----
sys/uvm/uvm_pdpolicy_clock.c | 25 +++-
sys/uvm/uvm_pglist.c | 8 +-
sys/uvm/uvm_stat.c | 76 ++++++++-----
21 files changed, 480 insertions(+), 269 deletions(-)
diffs (truncated from 1609 to 300 lines):
diff -r 66c1d950cac1 -r 7132c305b1da sys/kern/kern_cpu.c
--- a/sys/kern/kern_cpu.c Mon Dec 16 22:22:11 2019 +0000
+++ b/sys/kern/kern_cpu.c Mon Dec 16 22:47:54 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_cpu.c,v 1.81 2019/12/04 09:34:13 wiz Exp $ */
+/* $NetBSD: kern_cpu.c,v 1.82 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc.
@@ -56,9 +56,11 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.81 2019/12/04 09:34:13 wiz Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.82 2019/12/16 22:47:54 ad Exp $");
+#ifdef _KERNEL_OPT
#include "opt_cpu_ucode.h"
+#endif
#include <sys/param.h>
#include <sys/systm.h>
@@ -120,6 +122,7 @@
int ncpuonline __read_mostly;
bool mp_online __read_mostly;
static bool cpu_topology_present __read_mostly;
+int64_t cpu_counts[CPU_COUNT_MAX];
/* An array of CPUs. There are ncpu entries. */
struct cpu_info **cpu_infos __read_mostly;
@@ -305,6 +308,7 @@
return error;
}
+#ifndef _RUMPKERNEL
struct cpu_info *
cpu_lookup(u_int idx)
{
@@ -327,6 +331,7 @@
return ci;
}
+#endif
static void
cpu_xc_offline(struct cpu_info *ci, void *unused)
@@ -830,3 +835,86 @@
return error;
}
#endif
+
+/*
+ * Adjust one count, for a counter that's NOT updated from interrupt
+ * context. Hardly worth making an inline due to preemption stuff.
+ */
+void
+cpu_count(enum cpu_count idx, int64_t delta)
+{
+ lwp_t *l = curlwp;
+ KPREEMPT_DISABLE(l);
+ l->l_cpu->ci_counts[idx] += delta;
+ KPREEMPT_ENABLE(l);
+}
+
+/*
+ * Fetch fresh sum total for all counts. Expensive - don't call often.
+ */
+void
+cpu_count_sync_all(void)
+{
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ int64_t sum[CPU_COUNT_MAX], *ptr;
+ enum cpu_count i;
+ int s;
+
+ KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
+
+ if (__predict_true(mp_online)) {
+ memset(sum, 0, sizeof(sum));
+ /*
+ * We want this to be reasonably quick, so any value we get
+ * isn't totally out of whack, so don't let the current LWP
+ * get preempted.
+ */
+ s = splvm();
+ curcpu()->ci_counts[CPU_COUNT_SYNC_ALL]++;
+ for (CPU_INFO_FOREACH(cii, ci)) {
+ ptr = ci->ci_counts;
+ for (i = 0; i < CPU_COUNT_MAX; i += 8) {
+ sum[i+0] += ptr[i+0];
+ sum[i+1] += ptr[i+1];
+ sum[i+2] += ptr[i+2];
+ sum[i+3] += ptr[i+3];
+ sum[i+4] += ptr[i+4];
+ sum[i+5] += ptr[i+5];
+ sum[i+6] += ptr[i+6];
+ sum[i+7] += ptr[i+7];
+ }
+ KASSERT(i == CPU_COUNT_MAX);
+ }
+ memcpy(cpu_counts, sum, sizeof(cpu_counts));
+ splx(s);
+ } else {
+ memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
+ }
+}
+
+/*
+ * Fetch a fresh sum total for one single count. Expensive - don't call often.
+ */
+int64_t
+cpu_count_sync(enum cpu_count count)
+{
+ CPU_INFO_ITERATOR cii;
+ struct cpu_info *ci;
+ int64_t sum;
+ int s;
+
+ if (__predict_true(mp_online)) {
+ s = splvm();
+ curcpu()->ci_counts[CPU_COUNT_SYNC_ONE]++;
+ sum = 0;
+ for (CPU_INFO_FOREACH(cii, ci)) {
+ sum += ci->ci_counts[count];
+ }
+ splx(s);
+ } else {
+ /* XXX Early boot, iterator might not be available. */
+ sum = curcpu()->ci_counts[count];
+ }
+ return cpu_counts[count] = sum;
+}
diff -r 66c1d950cac1 -r 7132c305b1da sys/kern/kern_fork.c
--- a/sys/kern/kern_fork.c Mon Dec 16 22:22:11 2019 +0000
+++ b/sys/kern/kern_fork.c Mon Dec 16 22:47:54 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_fork.c,v 1.216 2019/11/23 19:42:52 ad Exp $ */
+/* $NetBSD: kern_fork.c,v 1.217 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 1999, 2001, 2004, 2006, 2007, 2008, 2019
@@ -68,7 +68,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.216 2019/11/23 19:42:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_fork.c,v 1.217 2019/12/16 22:47:54 ad Exp $");
#include "opt_ktrace.h"
#include "opt_dtrace.h"
@@ -96,8 +96,6 @@
#include <sys/sdt.h>
#include <sys/ptrace.h>
-#include <uvm/uvm_extern.h>
-
/*
* DTrace SDT provider definitions
*/
@@ -525,11 +523,13 @@
/*
* Update stats now that we know the fork was successful.
*/
- uvmexp.forks++;
+ KPREEMPT_DISABLE(l1);
+ CPU_COUNT(CPU_COUNT_FORKS, 1);
if (flags & FORK_PPWAIT)
- uvmexp.forks_ppwait++;
+ CPU_COUNT(CPU_COUNT_FORKS_PPWAIT, 1);
if (flags & FORK_SHAREVM)
- uvmexp.forks_sharevm++;
+ CPU_COUNT(CPU_COUNT_FORKS_SHAREVM, 1);
+ KPREEMPT_ENABLE(l1);
if (ktrpoint(KTR_EMUL))
p2->p_traceflag |= KTRFAC_TRC_EMUL;
diff -r 66c1d950cac1 -r 7132c305b1da sys/kern/kern_softint.c
--- a/sys/kern/kern_softint.c Mon Dec 16 22:22:11 2019 +0000
+++ b/sys/kern/kern_softint.c Mon Dec 16 22:47:54 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_softint.c,v 1.55 2019/12/06 21:36:10 ad Exp $ */
+/* $NetBSD: kern_softint.c,v 1.56 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 2007, 2008, 2019 The NetBSD Foundation, Inc.
@@ -170,7 +170,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.55 2019/12/06 21:36:10 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.56 2019/12/16 22:47:54 ad Exp $");
#include <sys/param.h>
#include <sys/proc.h>
@@ -607,11 +607,7 @@
KERNEL_UNLOCK_ONE(l);
}
- /*
- * Unlocked, but only for statistics.
- * Should be per-CPU to prevent cache ping-pong.
- */
- curcpu()->ci_data.cpu_nsoft++;
+ CPU_COUNT(CPU_COUNT_NSOFT, 1);
KASSERT(si->si_cpu == curcpu());
KASSERT(si->si_lwp->l_wchan == NULL);
diff -r 66c1d950cac1 -r 7132c305b1da sys/kern/vfs_vnode.c
--- a/sys/kern/vfs_vnode.c Mon Dec 16 22:22:11 2019 +0000
+++ b/sys/kern/vfs_vnode.c Mon Dec 16 22:47:54 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vfs_vnode.c,v 1.104 2019/12/01 13:56:29 ad Exp $ */
+/* $NetBSD: vfs_vnode.c,v 1.105 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 1997-2011, 2019 The NetBSD Foundation, Inc.
@@ -146,7 +146,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.104 2019/12/01 13:56:29 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.105 2019/12/16 22:47:54 ad Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -792,10 +792,8 @@
/* Take care of space accounting. */
if ((vp->v_iflag & VI_EXECMAP) != 0 &&
vp->v_uobj.uo_npages != 0) {
- atomic_add_int(&uvmexp.execpages,
- -vp->v_uobj.uo_npages);
- atomic_add_int(&uvmexp.filepages,
- vp->v_uobj.uo_npages);
+ cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
+ cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
}
vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
vp->v_vflag &= ~VV_MAPPED;
@@ -1565,8 +1563,8 @@
*/
VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
- atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
- atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
+ cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
+ cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
}
vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
mutex_exit(vp->v_interlock);
diff -r 66c1d950cac1 -r 7132c305b1da sys/kern/vfs_vnops.c
--- a/sys/kern/vfs_vnops.c Mon Dec 16 22:22:11 2019 +0000
+++ b/sys/kern/vfs_vnops.c Mon Dec 16 22:47:54 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: vfs_vnops.c,v 1.203 2019/12/01 13:56:29 ad Exp $ */
+/* $NetBSD: vfs_vnops.c,v 1.204 2019/12/16 22:47:54 ad Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.203 2019/12/01 13:56:29 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.204 2019/12/16 22:47:54 ad Exp $");
#include "veriexec.h"
@@ -341,8 +341,8 @@
mutex_enter(vp->v_interlock);
if ((vp->v_iflag & VI_EXECMAP) == 0) {
- atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
- atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
+ cpu_count(CPU_COUNT_FILEPAGES, -vp->v_uobj.uo_npages);
+ cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
vp->v_iflag |= VI_EXECMAP;
}
mutex_exit(vp->v_interlock);
@@ -368,8 +368,8 @@
return (ETXTBSY);
}
if ((vp->v_iflag & VI_EXECMAP) == 0) {
- atomic_add_int(&uvmexp.filepages, -vp->v_uobj.uo_npages);
- atomic_add_int(&uvmexp.execpages, vp->v_uobj.uo_npages);
+ cpu_count(CPU_COUNT_FILEPAGES, -vp->v_uobj.uo_npages);
+ cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
}
vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
mutex_exit(vp->v_interlock);
diff -r 66c1d950cac1 -r 7132c305b1da sys/miscfs/procfs/procfs_linux.c
--- a/sys/miscfs/procfs/procfs_linux.c Mon Dec 16 22:22:11 2019 +0000
+++ b/sys/miscfs/procfs/procfs_linux.c Mon Dec 16 22:47:54 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: procfs_linux.c,v 1.76 2019/09/07 19:08:28 chs Exp $ */
+/* $NetBSD: procfs_linux.c,v 1.77 2019/12/16 22:47:55 ad Exp $ */
/*
* Copyright (c) 2001 Wasabi Systems, Inc.
@@ -36,7 +36,7 @@
Home |
Main Index |
Thread Index |
Old Index