Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/usr.bin/vmstat Remove PG_ZERO. It worked brilliantly on x86...
details: https://anonhg.NetBSD.org/src/rev/ea7d1f33691c
branches: trunk
changeset: 972968:ea7d1f33691c
user: ad <ad%NetBSD.org@localhost>
date: Sun Jun 14 21:41:42 2020 +0000
description:
Remove PG_ZERO. It worked brilliantly on x86 machines from the mid-90s but
having spent an age experimenting with it over the last 6 months on various
machines and with different use cases it's always either break-even or a
slight net loss for me.
diffstat:
sys/arch/x86/x86/pmap.c | 7 +---
sys/sys/cpu_data.h | 8 ++--
sys/uvm/uvm_glue.c | 7 +---
sys/uvm/uvm_meter.c | 16 +---------
sys/uvm/uvm_page.c | 72 ++++++++----------------------------------------
sys/uvm/uvm_page.h | 15 +---------
sys/uvm/uvm_pgflcache.c | 10 +++---
sys/uvm/uvm_pglist.c | 6 +--
sys/uvm/uvm_stat.c | 9 ++---
usr.bin/vmstat/vmstat.c | 15 +--------
10 files changed, 37 insertions(+), 128 deletions(-)
diffs (truncated from 533 to 300 lines):
diff -r f6a49bb85868 -r ea7d1f33691c sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Sun Jun 14 21:34:25 2020 +0000
+++ b/sys/arch/x86/x86/pmap.c Sun Jun 14 21:41:42 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.398 2020/06/03 00:27:46 ad Exp $ */
+/* $NetBSD: pmap.c,v 1.399 2020/06/14 21:41:42 ad Exp $ */
/*
* Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.398 2020/06/03 00:27:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.399 2020/06/14 21:41:42 ad Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -3066,7 +3066,6 @@
if (!pmap_valid_entry(opte)) {
continue;
}
- pmap_pte_set(pte, 0);
/*
* Count the PTE. If it's not for a managed mapping
@@ -5741,8 +5740,6 @@
PMAP_DUMMY_LOCK(pmap);
uvm_pagerealloc(ptp, NULL, 0);
PMAP_DUMMY_UNLOCK(pmap);
-
- ptp->flags |= PG_ZERO;
uvm_pagefree(ptp);
}
mutex_exit(&pmap->pm_lock);
diff -r f6a49bb85868 -r ea7d1f33691c sys/sys/cpu_data.h
--- a/sys/sys/cpu_data.h Sun Jun 14 21:34:25 2020 +0000
+++ b/sys/sys/cpu_data.h Sun Jun 14 21:41:42 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: cpu_data.h,v 1.51 2020/06/11 22:21:05 ad Exp $ */
+/* $NetBSD: cpu_data.h,v 1.52 2020/06/14 21:41:42 ad Exp $ */
/*-
* Copyright (c) 2004, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
@@ -57,12 +57,12 @@
CPU_COUNT_FORKS_SHAREVM,
CPU_COUNT_COLORHIT, /* 8 */
CPU_COUNT_COLORMISS,
- CPU_COUNT_PGA_ZEROHIT,
- CPU_COUNT_PGA_ZEROMISS,
+ CPU_COUNT__UNUSED3,
+ CPU_COUNT__UNUSED4,
CPU_COUNT_CPUHIT,
CPU_COUNT_CPUMISS,
CPU_COUNT_FREEPAGES,
- CPU_COUNT_ZEROPAGES,
+ CPU_COUNT__UNUSED5,
CPU_COUNT_PAGEINS, /* 16 */
CPU_COUNT_FLTUP,
CPU_COUNT_FLTNOUP,
diff -r f6a49bb85868 -r ea7d1f33691c sys/uvm/uvm_glue.c
--- a/sys/uvm/uvm_glue.c Sun Jun 14 21:34:25 2020 +0000
+++ b/sys/uvm/uvm_glue.c Sun Jun 14 21:41:42 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_glue.c,v 1.180 2020/06/11 19:20:47 ad Exp $ */
+/* $NetBSD: uvm_glue.c,v 1.181 2020/06/14 21:41:42 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.180 2020/06/11 19:20:47 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.181 2020/06/14 21:41:42 ad Exp $");
#include "opt_kgdb.h"
#include "opt_kstack.h"
@@ -531,7 +531,4 @@
if (!ci->ci_want_resched)
uvmpdpol_idle(ucpu);
- if (!ci->ci_want_resched)
- uvm_pageidlezero();
-
}
diff -r f6a49bb85868 -r ea7d1f33691c sys/uvm/uvm_meter.c
--- a/sys/uvm/uvm_meter.c Sun Jun 14 21:34:25 2020 +0000
+++ b/sys/uvm/uvm_meter.c Sun Jun 14 21:41:42 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_meter.c,v 1.79 2020/06/11 22:21:05 ad Exp $ */
+/* $NetBSD: uvm_meter.c,v 1.80 2020/06/14 21:41:42 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.79 2020/06/11 22:21:05 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.80 2020/06/14 21:41:42 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -111,7 +111,6 @@
u.inactive = inactive;
u.paging = uvmexp.paging;
u.wired = uvmexp.wired;
- u.zeropages = cpu_count_get(CPU_COUNT_ZEROPAGES);
u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
u.reserve_kernel = uvmexp.reserve_kernel;
u.freemin = uvmexp.freemin;
@@ -137,8 +136,6 @@
u.forks = cpu_count_get(CPU_COUNT_FORKS);
u.forks_ppwait = cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
u.forks_sharevm = cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
- u.pga_zerohit = cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
- u.pga_zeromiss = cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
u.zeroaborts = uvmexp.zeroaborts;
u.fltnoram = cpu_count_get(CPU_COUNT_FLTNORAM);
u.fltnoanon = cpu_count_get(CPU_COUNT_FLTNOANON);
@@ -273,12 +270,6 @@
NULL, USPACE, NULL, 0,
CTL_VM, VM_USPACE, CTL_EOL);
sysctl_createv(clog, 0, NULL, NULL,
- CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
- CTLTYPE_BOOL, "idlezero",
- SYSCTL_DESCR("Whether try to zero pages in idle loop"),
- NULL, 0, &vm_page_zero_enable, 0,
- CTL_VM, CTL_CREATE, CTL_EOL);
- sysctl_createv(clog, 0, NULL, NULL,
CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
CTLTYPE_LONG, "minaddress",
SYSCTL_DESCR("Minimum user address"),
@@ -455,7 +446,6 @@
/* uvm_availmem() will sync the counters if old. */
uvmexp.free = (int)uvm_availmem(true);
- uvmexp.zeropages = (int)cpu_count_get(CPU_COUNT_ZEROPAGES);
uvmexp.cpuhit = (int)cpu_count_get(CPU_COUNT_CPUHIT);
uvmexp.cpumiss = (int)cpu_count_get(CPU_COUNT_CPUMISS);
uvmexp.faults = (int)cpu_count_get(CPU_COUNT_NFAULT);
@@ -468,8 +458,6 @@
uvmexp.forks = (int)cpu_count_get(CPU_COUNT_FORKS);
uvmexp.forks_ppwait = (int)cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
uvmexp.forks_sharevm = (int)cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
- uvmexp.pga_zerohit = (int)cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
- uvmexp.pga_zeromiss = (int)cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
uvmexp.fltnoram = (int)cpu_count_get(CPU_COUNT_FLTNORAM);
uvmexp.fltnoanon = (int)cpu_count_get(CPU_COUNT_FLTNOANON);
uvmexp.fltpgwait = (int)cpu_count_get(CPU_COUNT_FLTPGWAIT);
diff -r f6a49bb85868 -r ea7d1f33691c sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Sun Jun 14 21:34:25 2020 +0000
+++ b/sys/uvm/uvm_page.c Sun Jun 14 21:41:42 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.241 2020/06/13 19:55:39 ad Exp $ */
+/* $NetBSD: uvm_page.c,v 1.242 2020/06/14 21:41:42 ad Exp $ */
/*-
* Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.241 2020/06/13 19:55:39 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.242 2020/06/14 21:41:42 ad Exp $");
#include "opt_ddb.h"
#include "opt_uvm.h"
@@ -119,13 +119,6 @@
#include <uvm/uvm_pgflcache.h>
/*
- * Some supported CPUs in a given architecture don't support all
- * of the things necessary to do idle page zero'ing efficiently.
- * We therefore provide a way to enable it from machdep code here.
- */
-bool vm_page_zero_enable = false;
-
-/*
* number of pages per-CPU to reserve for the kernel.
*/
#ifndef UVM_RESERVED_PAGES_PER_CPU
@@ -1078,8 +1071,8 @@
* lock because of uvm_pglistalloc().
*/
LIST_REMOVE(pg, pageq.list);
- KASSERT(pg->flags & PG_FREE);
- pg->flags &= PG_ZERO;
+ KASSERT(pg->flags == PG_FREE);
+ pg->flags = PG_BUSY | PG_CLEAN | PG_FAKE;
pgb->pgb_nfree--;
/*
@@ -1154,8 +1147,8 @@
* => caller must activate/deactivate page if it is not wired.
* => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
* => policy decision: it is more important to pull a page off of the
- * appropriate priority free list than it is to get a zero'd or
- * unknown contents page. This is because we live with the
+ * appropriate priority free list than it is to get a page from the
+ * correct bucket or color bin. This is because we live with the
* consequences of a bad free list decision for the entire
* lifetime of the page, e.g. if the page comes from memory that
* is slower to access.
@@ -1165,8 +1158,7 @@
uvm_pagealloc_strat(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
int flags, int strat, int free_list)
{
- int zeroit = 0, color;
- int lcv, error, s;
+ int color, lcv, error, s;
struct uvm_cpu *ucpu;
struct vm_page *pg;
lwp_t *l;
@@ -1280,28 +1272,15 @@
ucpu->pgflcolor = (color + 1) & uvmexp.colormask;
/*
- * while still at IPL_VM, update allocation statistics and remember
- * if we have to zero the page
+ * while still at IPL_VM, update allocation statistics.
*/
CPU_COUNT(CPU_COUNT_FREEPAGES, -1);
- if (flags & UVM_PGA_ZERO) {
- if (pg->flags & PG_ZERO) {
- CPU_COUNT(CPU_COUNT_PGA_ZEROHIT, 1);
- zeroit = 0;
- } else {
- CPU_COUNT(CPU_COUNT_PGA_ZEROMISS, 1);
- zeroit = 1;
- }
- }
- if (pg->flags & PG_ZERO) {
- CPU_COUNT(CPU_COUNT_ZEROPAGES, -1);
- }
if (anon) {
CPU_COUNT(CPU_COUNT_ANONCLEAN, 1);
}
splx(s);
- KASSERT((pg->flags & ~(PG_ZERO|PG_FREE)) == 0);
+ KASSERT(pg->flags == (PG_BUSY|PG_CLEAN|PG_FAKE));
/*
* assign the page to the object. as the page was free, we know
@@ -1315,7 +1294,6 @@
pg->uobject = obj;
pg->uanon = anon;
KASSERT(uvm_page_owner_locked_p(pg, true));
- pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE;
if (anon) {
anon->an_page = pg;
pg->flags |= PG_ANON;
@@ -1347,16 +1325,11 @@
UVM_PAGE_OWN(pg, "new alloc");
if (flags & UVM_PGA_ZERO) {
- /*
- * A zero'd page is not clean. If we got a page not already
- * zero'd, then we have to zero it ourselves.
- */
+ /* A zero'd page is not clean. */
if (obj != NULL || anon != NULL) {
uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
}
- if (zeroit) {
- pmap_zero_page(VM_PAGE_TO_PHYS(pg));
- }
+ pmap_zero_page(VM_PAGE_TO_PHYS(pg));
}
return(pg);
@@ -1477,7 +1450,7 @@
pmap_update(pmap_kernel());
while (p < ep) {
if (*p != 0)
- panic("PG_ZERO page isn't zero-filled");
+ panic("zero page isn't zero-filled");
p++;
}
pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
@@ -1633,16 +1606,11 @@
#ifdef DEBUG
pg->uobject = (void *)0xdeadbeef;
pg->uanon = (void *)0xdeadbeef;
- if (pg->flags & PG_ZERO)
- uvm_pagezerocheck(pg);
#endif /* DEBUG */
/* Try to send the page to the per-CPU cache. */
s = splvm();
CPU_COUNT(CPU_COUNT_FREEPAGES, 1);
- if (pg->flags & PG_ZERO) {
- CPU_COUNT(CPU_COUNT_ZEROPAGES, 1);
- }
ucpu = curcpu()->ci_data.cpu_uvm;
bucket = uvm_page_get_bucket(pg);
if (bucket == ucpu->pgflbucket && uvm_pgflcache_free(ucpu, pg)) {
@@ -1657,7 +1625,7 @@
Home |
Main Index |
Thread Index |
Old Index