Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm/pmap fix the start index generation in pmap_segtab_r...
details: https://anonhg.NetBSD.org/src/rev/6d4c88d83b5b
branches: trunk
changeset: 349081:6d4c88d83b5b
user: mrg <mrg%NetBSD.org@localhost>
date: Wed Nov 23 03:30:53 2016 +0000
description:
fix the start index generation in pmap_segtab_release() to
ensure it fits in the actual array. fixes N64 binaries from
triggering later panic. move the panic check itself into a
common function that is called from a couple of new places too.
diffstat:
sys/uvm/pmap/pmap_segtab.c | 40 +++++++++++++++++++++++++++++++---------
1 files changed, 31 insertions(+), 9 deletions(-)
diffs (103 lines):
diff -r 6e8adda0026f -r 6d4c88d83b5b sys/uvm/pmap/pmap_segtab.c
--- a/sys/uvm/pmap/pmap_segtab.c Wed Nov 23 03:02:56 2016 +0000
+++ b/sys/uvm/pmap/pmap_segtab.c Wed Nov 23 03:30:53 2016 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_segtab.c,v 1.3 2016/07/11 16:06:09 matt Exp $ */
+/* $NetBSD: pmap_segtab.c,v 1.4 2016/11/23 03:30:53 mrg Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.3 2016/07/11 16:06:09 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.4 2016/11/23 03:30:53 mrg Exp $");
/*
* Manages physical address maps.
@@ -130,6 +130,24 @@
kmutex_t pmap_segtab_lock __cacheline_aligned;
+static void
+pmap_check_stp(pmap_segtab_t *stp, const char *caller, const char *why)
+{
+#ifdef DEBUG
+ for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
+ if (stp->seg_tab[i] != 0) {
+#ifdef DEBUG_NOISY
+ for (size_t j = i; j < PMAP_SEGTABSIZE; j++)
+ printf("%s: pm_segtab.seg_tab[%zu] = 0x%p\n",
+ caller, j, stp->seg_tab[j]);
+#endif
+ panic("%s: pm_segtab.seg_tab[%zu] != 0 (0x%p): %s",
+ caller, i, stp->seg_tab[i], why);
+ }
+ }
+#endif
+}
+
static inline struct vm_page *
pmap_pte_pagealloc(void)
{
@@ -190,7 +208,9 @@
{
pmap_segtab_t *stp = *stp_p;
- for (size_t i = va / vinc; i < PMAP_SEGTABSIZE; i++, va += vinc) {
+ for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
+ i < PMAP_SEGTABSIZE;
+ i++, va += vinc) {
#ifdef _LP64
if (vinc > NBSEG) {
if (stp->seg_seg[i] != NULL) {
@@ -236,6 +256,8 @@
}
if (free_stp) {
+ pmap_check_stp(stp, __func__,
+ vinc == NBSEG ? "release seg" : "release xseg");
pmap_segtab_free(stp);
*stp_p = NULL;
}
@@ -257,6 +279,7 @@
pmap_segtab_alloc(void)
{
pmap_segtab_t *stp;
+ bool found_on_freelist = false;
again:
mutex_spin_enter(&pmap_segtab_lock);
@@ -264,6 +287,7 @@
pmap_segtab_info.free_segtab = stp->seg_seg[0];
stp->seg_seg[0] = NULL;
SEGTAB_ADD(nget, 1);
+ found_on_freelist = true;
}
mutex_spin_exit(&pmap_segtab_lock);
@@ -300,12 +324,9 @@
}
}
-#ifdef DEBUG
- for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
- if (stp->seg_tab[i] != 0)
- panic("%s: pm_segtab.seg_tab[%zu] != 0", __func__, i);
- }
-#endif
+ pmap_check_stp(stp, __func__,
+ found_on_freelist ? "from free list" : "allocated");
+
return stp;
}
@@ -420,6 +441,7 @@
#ifdef MULTIPROCESSOR
pmap_segtab_t *ostp = atomic_cas_ptr(stp_p, NULL, nstp);
if (__predict_false(ostp != NULL)) {
+ pmap_check_stp(nstp, __func__, "reserve");
pmap_segtab_free(nstp);
nstp = ostp;
}
Home |
Main Index |
Thread Index |
Old Index