Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm G/C uvm_pagezerocheck
details: https://anonhg.NetBSD.org/src/rev/6289de24bf1a
branches: trunk
changeset: 938922:6289de24bf1a
user: skrll <skrll%NetBSD.org@localhost>
date: Sun Sep 20 10:30:05 2020 +0000
description:
G/C uvm_pagezerocheck
diffstat:
sys/uvm/uvm_page.c | 48 ++----------------------------------------------
sys/uvm/uvm_page.h | 7 ++-----
2 files changed, 4 insertions(+), 51 deletions(-)
diffs (104 lines):
diff -r f61f7058e026 -r 6289de24bf1a sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Sun Sep 20 10:29:05 2020 +0000
+++ b/sys/uvm/uvm_page.c Sun Sep 20 10:30:05 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.c,v 1.246 2020/08/15 01:27:22 tnn Exp $ */
+/* $NetBSD: uvm_page.c,v 1.247 2020/09/20 10:30:05 skrll Exp $ */
/*-
* Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.246 2020/08/15 01:27:22 tnn Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.247 2020/09/20 10:30:05 skrll Exp $");
#include "opt_ddb.h"
#include "opt_uvm.h"
@@ -447,14 +447,6 @@
*kvm_startp = round_page(virtual_space_start);
*kvm_endp = trunc_page(virtual_space_end);
-#ifdef DEBUG
- /*
- * steal kva for uvm_pagezerocheck().
- */
- uvm_zerocheckkva = *kvm_startp;
- *kvm_startp += PAGE_SIZE;
- mutex_init(&uvm_zerochecklock, MUTEX_DEFAULT, IPL_VM);
-#endif /* DEBUG */
/*
* init various thresholds.
@@ -1427,42 +1419,6 @@
return error;
}
-#ifdef DEBUG
-/*
- * check if page is zero-filled
- */
-void
-uvm_pagezerocheck(struct vm_page *pg)
-{
- int *p, *ep;
-
- KASSERT(uvm_zerocheckkva != 0);
-
- /*
- * XXX assuming pmap_kenter_pa and pmap_kremove never call
- * uvm page allocator.
- *
- * it might be better to have "CPU-local temporary map" pmap interface.
- */
- mutex_spin_enter(&uvm_zerochecklock);
- pmap_kenter_pa(uvm_zerocheckkva, VM_PAGE_TO_PHYS(pg), VM_PROT_READ, 0);
- p = (int *)uvm_zerocheckkva;
- ep = (int *)((char *)p + PAGE_SIZE);
- pmap_update(pmap_kernel());
- while (p < ep) {
- if (*p != 0)
- panic("zero page isn't zero-filled");
- p++;
- }
- pmap_kremove(uvm_zerocheckkva, PAGE_SIZE);
- mutex_spin_exit(&uvm_zerochecklock);
- /*
- * pmap_update() is not necessary here because no one except us
- * uses this VA.
- */
-}
-#endif /* DEBUG */
-
/*
* uvm_pagefree: free page
*
diff -r f61f7058e026 -r 6289de24bf1a sys/uvm/uvm_page.h
--- a/sys/uvm/uvm_page.h Sun Sep 20 10:29:05 2020 +0000
+++ b/sys/uvm/uvm_page.h Sun Sep 20 10:30:05 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_page.h,v 1.105 2020/06/14 21:41:42 ad Exp $ */
+/* $NetBSD: uvm_page.h,v 1.106 2020/09/20 10:30:05 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -404,6 +404,7 @@
#ifdef __HAVE_VM_PAGE_MD
#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
+#define VM_MD_TO_PAGE(md) (container_of((md), struct vm_page, mdpage))
#endif
/*
@@ -470,10 +471,6 @@
pg->phys_addr |= __SHIFTIN(b, UVM_PHYSADDR_BUCKET);
}
-#ifdef DEBUG
-void uvm_pagezerocheck(struct vm_page *);
-#endif /* DEBUG */
-
#endif /* _KERNEL */
#endif /* _UVM_UVM_PAGE_H_ */
Home |
Main Index |
Thread Index |
Old Index