Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/x86/x86 Add a bunch of assertions.
details: https://anonhg.NetBSD.org/src/rev/66722ee091cd
branches: trunk
changeset: 970240:66722ee091cd
user: ad <ad%NetBSD.org@localhost>
date: Tue Mar 17 13:34:50 2020 +0000
description:
Add a bunch of assertions.
diffstat:
sys/arch/x86/x86/pmap.c | 110 ++++++++++++++++++++++++++++++++++++++++++-----
1 files changed, 97 insertions(+), 13 deletions(-)
diffs (truncated from 354 to 300 lines):
diff -r b1040781ffbe -r 66722ee091cd sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Tue Mar 17 10:50:59 2020 +0000
+++ b/sys/arch/x86/x86/pmap.c Tue Mar 17 13:34:50 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.370 2020/03/15 19:41:04 ad Exp $ */
+/* $NetBSD: pmap.c,v 1.371 2020/03/17 13:34:50 ad Exp $ */
/*
* Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.370 2020/03/15 19:41:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.371 2020/03/17 13:34:50 ad Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -139,6 +139,8 @@
#include "opt_svs.h"
#include "opt_kaslr.h"
+#define __MUTEX_PRIVATE /* for assertions */
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@@ -333,6 +335,8 @@
#endif
#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mp_pp)
+#define PMAP_CHECK_PP(pp) \
+ KASSERTMSG((pp)->pp_lock.mtx_ipl._ipl == IPL_VM, "bad pmap_page %p", pp)
/*
* Other data structures
@@ -644,6 +648,7 @@
*minidx = UINT16_MAX;
*maxidx = 0;
rb_tree_init(&VM_PAGE_TO_PP(ptp)->pp_rb, &pmap_rbtree_ops);
+ PMAP_CHECK_PP(VM_PAGE_TO_PP(ptp));
}
/*
@@ -654,6 +659,7 @@
{
KASSERT(RB_TREE_MIN(&VM_PAGE_TO_PP(ptp)->pp_rb) == NULL);
+ PMAP_CHECK_PP(VM_PAGE_TO_PP(ptp));
ptp->uanon = NULL;
}
@@ -1982,6 +1988,34 @@
}
/*
+ * pmap_check_pv: verify {VA, PTP} pair is either tracked/untracked by page
+ */
+static void
+pmap_check_pv(struct pmap *pmap, struct vm_page *ptp, struct pmap_page *pp,
+ vaddr_t va, bool tracked)
+{
+#ifdef DIAGNOSTIC /* XXX too slow make this DEBUG before April 2020 */
+ struct pv_pte *pvpte;
+
+ PMAP_CHECK_PP(pp);
+
+ mutex_spin_enter(&pp->pp_lock);
+ for (pvpte = pv_pte_first(pp); pvpte; pvpte = pv_pte_next(pp, pvpte)) {
+ if (pvpte->pte_ptp == ptp && pvpte->pte_va == va) {
+ break;
+ }
+ }
+ mutex_spin_exit(&pp->pp_lock);
+
+ if (pvpte && !tracked) {
+ panic("pmap_check_pv: %p/%lx found on pp %p", ptp, va, pp);
+ } else if (!pvpte && tracked) {
+ panic("pmap_check_pv: %p/%lx missing on pp %p", ptp, va, pp);
+ }
+#endif
+}
+
+/*
* pmap_treelookup_pv: search the PV tree for a dynamic entry
*
* => pmap must be locked
@@ -2010,6 +2044,7 @@
node = node->rb_nodes[pve->pve_pte.pte_va < va];
}
}
+
/*
* pmap_lookup_pv: look up a non-embedded pv entry for the given pmap
*
@@ -2083,6 +2118,7 @@
KASSERT(ptp_to_pmap(ptp) == pmap);
KASSERT(ptp == NULL || ptp->uobject != NULL);
KASSERT(ptp == NULL || ptp_va2o(va, 1) == ptp->offset);
+ PMAP_CHECK_PP(pp);
/*
* If entering the same page and it's already tracked with an
@@ -2093,6 +2129,7 @@
if (atomic_load_relaxed(&pp->pp_pte.pte_ptp) == ptp &&
atomic_load_relaxed(&pp->pp_pte.pte_va) == va) {
*samepage = true;
+ pmap_check_pv(pmap, ptp, pp, va, true);
return 0;
}
@@ -2104,6 +2141,7 @@
*old_pve = pmap_treelookup_pv(pmap, ptp, tree, va);
if (*old_pve != NULL && (*old_pve)->pve_pp == pp) {
*samepage = true;
+ pmap_check_pv(pmap, ptp, pp, va, true);
return 0;
}
@@ -2116,6 +2154,7 @@
}
error = 0;
+ pmap_check_pv(pmap, ptp, pp, va, false);
mutex_spin_enter(&pp->pp_lock);
if (!pv_pte_embedded(pp)) {
/*
@@ -2142,6 +2181,7 @@
LIST_INSERT_HEAD(&pp->pp_pvlist, pve, pve_list);
}
mutex_spin_exit(&pp->pp_lock);
+ pmap_check_pv(pmap, ptp, pp, va, true);
return error;
}
@@ -2157,7 +2197,8 @@
pmap_remove_pv(struct pmap *pmap, struct pmap_page *pp, struct vm_page *ptp,
vaddr_t va, struct pv_entry *pve, uint8_t oattrs)
{
- rb_tree_t *tree;
+ rb_tree_t *tree = (ptp != NULL ?
+ &VM_PAGE_TO_PP(ptp)->pp_rb : &pmap_kernel_rb);
KASSERT(mutex_owned(&pmap->pm_lock));
KASSERT(ptp_to_pmap(ptp) == pmap);
@@ -2165,6 +2206,8 @@
KASSERT(ptp == NULL || ptp_va2o(va, 1) == ptp->offset);
KASSERT(ptp != NULL || pmap == pmap_kernel());
+ pmap_check_pv(pmap, ptp, pp, va, true);
+
mutex_spin_enter(&pp->pp_lock);
pp->pp_attrs |= oattrs;
if (pve == NULL) {
@@ -2174,16 +2217,23 @@
pp->pp_pte.pte_va = 0;
mutex_spin_exit(&pp->pp_lock);
} else {
- KASSERT(pve == pmap_lookup_pv(pmap, ptp, pp, va));
+ KASSERT(pp->pp_pte.pte_ptp != ptp ||
+ pp->pp_pte.pte_va != va);
KASSERT(pve->pve_pte.pte_ptp == ptp);
KASSERT(pve->pve_pte.pte_va == va);
+ KASSERT(pve->pve_pp == pp);
LIST_REMOVE(pve, pve_list);
mutex_spin_exit(&pp->pp_lock);
- tree = (ptp != NULL ? &VM_PAGE_TO_PP(ptp)->pp_rb :
- &pmap_kernel_rb);
+ KASSERT(pmap_treelookup_pv(pmap, ptp, tree, va) == pve);
rb_tree_remove_node(tree, pve);
- }
+#ifdef DIAGNOSTIC
+ memset(pve, 0, sizeof(*pve));
+#endif
+ }
+
+ KASSERT(pmap_treelookup_pv(pmap, ptp, tree, va) == NULL);
+ pmap_check_pv(pmap, ptp, pp, va, false);
}
/*
@@ -2201,7 +2251,9 @@
if (pmap->pm_ptphint[lidx] && off == pmap->pm_ptphint[lidx]->offset) {
KASSERT(pmap->pm_ptphint[lidx]->wire_count > 0);
- return pmap->pm_ptphint[lidx];
+ pg = pmap->pm_ptphint[lidx];
+ PMAP_CHECK_PP(VM_PAGE_TO_PP(pg));
+ return pg;
}
PMAP_DUMMY_LOCK(pmap);
pg = uvm_pagelookup(&pmap->pm_obj[lidx], off);
@@ -2210,6 +2262,9 @@
/* This page is queued to be freed - ignore. */
pg = NULL;
}
+ if (pg != NULL) {
+ PMAP_CHECK_PP(VM_PAGE_TO_PP(pg));
+ }
pmap->pm_ptphint[lidx] = pg;
return pg;
}
@@ -2439,6 +2494,7 @@
continue;
}
KASSERT(pt->pg[i]->wire_count == 0);
+ PMAP_CHECK_PP(VM_PAGE_TO_PP(pt->pg[i]));
/* pmap zeros all pages before freeing. */
pt->pg[i]->flags |= PG_ZERO;
pmap_ptp_fini(pt->pg[i]);
@@ -3863,6 +3919,8 @@
KASSERTMSG((pmap_pv_tracked(pmap_pte2pa(opte)) == NULL),
"pv-tracked page without PTE_PVLIST for %#"PRIxVADDR, va);
#endif
+ KASSERT(pmap_treelookup_pv(pmap, ptp, (ptp != NULL ?
+ &VM_PAGE_TO_PP(ptp)->pp_rb : &pmap_kernel_rb), va) == NULL);
return true;
}
@@ -4313,6 +4371,7 @@
error = pmap_sync_pv(pvpte, pa, clearbits, &oattrs, NULL);
if (error == EAGAIN) {
int hold_count;
+ /* XXXAD wait for the pmap's lock instead. */
mutex_spin_exit(&pp->pp_lock);
KERNEL_UNLOCK_ALL(curlwp, &hold_count);
SPINLOCK_BACKOFF(count);
@@ -4429,7 +4488,8 @@
/*
* Acquire pmap. No need to lock the kernel pmap as we won't
- * be touching the pvmap nor the stats.
+ * be touching PV entries nor stats and kernel PDEs aren't
+ * freed.
*/
if (pmap != pmap_kernel()) {
mutex_enter(&pmap->pm_lock);
@@ -4611,13 +4671,16 @@
else
#endif
new_pg = PHYS_TO_VM_PAGE(pa);
+
if (new_pg != NULL) {
/* This is a managed page */
npte |= PTE_PVLIST;
new_pp = VM_PAGE_TO_PP(new_pg);
+ PMAP_CHECK_PP(new_pp);
} else if ((new_pp = pmap_pv_tracked(pa)) != NULL) {
/* This is an unmanaged pv-tracked page */
npte |= PTE_PVLIST;
+ PMAP_CHECK_PP(new_pp);
} else {
new_pp = NULL;
}
@@ -4771,7 +4834,13 @@
*/
if (((opte ^ npte) & (PTE_FRAME | PTE_P)) == 0) {
KASSERT(((opte ^ npte) & PTE_PVLIST) == 0);
+ if ((npte & PTE_PVLIST) != 0) {
+ KASSERT(samepage);
+ pmap_check_pv(pmap, ptp, new_pp, va, true);
+ }
goto same_pa;
+ } else if ((npte & PTE_PVLIST) != 0) {
+ KASSERT(!samepage);
}
/*
@@ -4790,7 +4859,6 @@
pmap_remove_pv(pmap, old_pp, ptp, va, old_pve,
pmap_pte_to_pp_attrs(opte));
if (old_pve != NULL) {
- KASSERT(old_pve->pve_pp == old_pp);
if (pmap->pm_pve == NULL) {
pmap->pm_pve = old_pve;
} else {
@@ -4799,13 +4867,17 @@
}
} else {
KASSERT(old_pve == NULL);
+ KASSERT(pmap_treelookup_pv(pmap, ptp, tree, va) == NULL);
}
/*
* If new page is dynamically PV tracked, insert to tree.
*/
if (new_pve != NULL) {
- rb_tree_insert_node(tree, new_pve);
+ KASSERT(pmap_treelookup_pv(pmap, ptp, tree, va) == NULL);
+ old_pve = rb_tree_insert_node(tree, new_pve);
+ KASSERT(old_pve == new_pve);
+ pmap_check_pv(pmap, ptp, new_pp, va, true);
}
same_pa:
@@ -5159,6 +5231,7 @@
pp->pp_attrs = 0;
pp->pp_pte.pte_ptp = NULL;
pp->pp_pte.pte_va = 0;
Home |
Main Index |
Thread Index |
Old Index