Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/alpha/alpha - Undo part of rev 1.264; go back to no...
details: https://anonhg.NetBSD.org/src/rev/e7d8db7613d5
branches: trunk
changeset: 942709:e7d8db7613d5
user: thorpej <thorpej%NetBSD.org@localhost>
date: Sun Aug 16 20:04:36 2020 +0000
description:
- Undo part of rev 1.264; go back to not acquiring the pmap lock in
pmap_activate(). As of rev 1.211, the pmap::pm_lev1map field is
stable across the life of the pmap, and so the conditino that
the change in 1.264 was intended to avoid would not have happened
anyway.
- Explicitly use __cacheline_aligned / COHERENCY_UNIT rather than 64
in a couple of places.
- Update comments around the lev1map lifecycle, and add some assertions
to enforce the assumptions being described.
- Remove some dubious DEBUG tests that are not MP-safe.
- Chage some long-form #ifdef DIAGNOSTIC checks / panics to KASSERTs.
- Remove the PMAP_ACTIVATE() macro because it's no longer used anywhere
except for pmap_activate(). Just open-code the equivalent there.
- In pmap_activate(), only perform the SWPCTX if either the PTBR or the
ASN are different than what the PCB already has. Also assert that
preemption is disabled and that the specified lwp is curlwp.
- In pmap_deactivate(), add similar assertions, and add a comment explaining
why a SWPCTX to get off of the deactivated lev1map is not necessaray.
- Refactor some duplicated code in pmap_growkernel() into a new
pmap_kptpage_alloc() function.
- In pmap_growkernel(), assert that any user pmap published on the all-pmaps
list does not reference the kernel_lev1map.
- In pmap_asn_alloc(), get out early if we're called with the kernel pmap,
since all kernel mappings are ASM. Remove bogus assertions around the
value of pmap::pm_lev1map and the current ASN, and simply assert that
pmap::pm_lev1map is never kernel_lev1map. Also assert that preemption
is disabled, since we're manipulating per-cpu data structures.
- Convert the "too much uptime" panic to a simple KASSERT, and update the
comment to reflect that we're only subject to the longer 75 billion year
ASN generation overflow (because CPUs that don't implement ASNs never go
through this code path).
diffstat:
sys/arch/alpha/alpha/pmap.c | 296 ++++++++++++++-----------------------------
1 files changed, 98 insertions(+), 198 deletions(-)
diffs (truncated from 462 to 300 lines):
diff -r 69382c451e53 -r e7d8db7613d5 sys/arch/alpha/alpha/pmap.c
--- a/sys/arch/alpha/alpha/pmap.c Sun Aug 16 20:03:52 2020 +0000
+++ b/sys/arch/alpha/alpha/pmap.c Sun Aug 16 20:04:36 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.266 2020/01/17 22:03:56 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.267 2020/08/16 20:04:36 thorpej Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008 The NetBSD Foundation, Inc.
@@ -140,7 +140,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.266 2020/01/17 22:03:56 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.267 2020/08/16 20:04:36 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -219,7 +219,7 @@
static struct {
struct pmap k_pmap;
struct pmap_asn_info k_asni[ALPHA_MAXPROCS];
-} kernel_pmap_store;
+} kernel_pmap_store __cacheline_aligned;
struct pmap *const kernel_pmap_ptr = &kernel_pmap_store.k_pmap;
@@ -384,17 +384,18 @@
#define PMAP_HEAD_TO_MAP_LOCK() rw_enter(&pmap_main_lock, RW_WRITER)
#define PMAP_HEAD_TO_MAP_UNLOCK() rw_exit(&pmap_main_lock)
-struct {
- kmutex_t lock;
-} __aligned(64) static pmap_pvh_locks[64] __aligned(64);
+static union {
+ kmutex_t lock;
+ uint8_t pad[COHERENCY_UNIT];
+} pmap_pvh_locks[64] __cacheline_aligned;
+
+#define PVH_LOCK_HASH(pg) \
+ ((((uintptr_t)(pg)) >> 6) & 63)
static inline kmutex_t *
pmap_pvh_lock(struct vm_page *pg)
{
-
- /* Cut bits 11-6 out of page address and use directly as offset. */
- return (kmutex_t *)((uintptr_t)&pmap_pvh_locks +
- ((uintptr_t)pg & (63 << 6)));
+ return &pmap_pvh_locks[PVH_LOCK_HASH(pg)].lock;
}
#if defined(MULTIPROCESSOR)
@@ -498,32 +499,13 @@
static int pmap_physpage_delref(void *);
/*
- * PMAP_ISACTIVE{,_TEST}:
+ * PMAP_ISACTIVE:
*
* Check to see if a pmap is active on the current processor.
*/
-#define PMAP_ISACTIVE_TEST(pm, cpu_id) \
+#define PMAP_ISACTIVE(pm, cpu_id) \
(((pm)->pm_cpus & (1UL << (cpu_id))) != 0)
-#if defined(DEBUG) && !defined(MULTIPROCESSOR)
-#define PMAP_ISACTIVE(pm, cpu_id) \
-({ \
- /* \
- * XXX This test is not MP-safe. \
- */ \
- int isactive_ = PMAP_ISACTIVE_TEST(pm, cpu_id); \
- \
- if ((curlwp->l_flag & LW_IDLE) != 0 && \
- curproc->p_vmspace != NULL && \
- ((curproc->p_sflag & PS_WEXIT) == 0) && \
- (isactive_ ^ ((pm) == curproc->p_vmspace->vm_map.pmap))) \
- panic("PMAP_ISACTIVE"); \
- (isactive_); \
-})
-#else
-#define PMAP_ISACTIVE(pm, cpu_id) PMAP_ISACTIVE_TEST(pm, cpu_id)
-#endif /* DEBUG && !MULTIPROCESSOR */
-
/*
* PMAP_ACTIVATE_ASN_SANITY:
*
@@ -542,32 +524,10 @@
* ASN to prevent the PALcode from servicing a TLB \
* miss with the wrong PTE. \
*/ \
- if (__pma->pma_asn != PMAP_ASN_RESERVED) { \
- printf("kernel_lev1map with non-reserved ASN " \
- "(line %d)\n", __LINE__); \
- panic("PMAP_ACTIVATE_ASN_SANITY"); \
- } \
+ KASSERT(__pma->pma_asn == PMAP_ASN_RESERVED); \
} else { \
- if (__pma->pma_asngen != __cpma->pma_asngen) { \
- /* \
- * ASN generation number isn't valid! \
- */ \
- printf("pmap asngen %lu, current %lu " \
- "(line %d)\n", \
- __pma->pma_asngen, \
- __cpma->pma_asngen, \
- __LINE__); \
- panic("PMAP_ACTIVATE_ASN_SANITY"); \
- } \
- if (__pma->pma_asn == PMAP_ASN_RESERVED) { \
- /* \
- * DANGER WILL ROBINSON! We're going to \
- * pollute the VPT TLB entries! \
- */ \
- printf("Using reserved ASN! (line %d)\n", \
- __LINE__); \
- panic("PMAP_ACTIVATE_ASN_SANITY"); \
- } \
+ KASSERT(__pma->pma_asngen == __cpma->pma_asngen); \
+ KASSERT(__pma->pma_asn != PMAP_ASN_RESERVED); \
} \
} while (/*CONSTCOND*/0)
#else
@@ -575,34 +535,6 @@
#endif
/*
- * PMAP_ACTIVATE:
- *
- * This is essentially the guts of pmap_activate(), without
- * ASN allocation. This is used by pmap_activate(),
- * pmap_lev1map_create(), and pmap_lev1map_destroy().
- *
- * This is called only when it is known that a pmap is "active"
- * on the current processor; the ASN must already be valid.
- */
-#define PMAP_ACTIVATE(pmap, l, cpu_id) \
-do { \
- struct pcb *pcb = lwp_getpcb(l); \
- PMAP_ACTIVATE_ASN_SANITY(pmap, cpu_id); \
- \
- pcb->pcb_hw.apcb_ptbr = \
- ALPHA_K0SEG_TO_PHYS((vaddr_t)(pmap)->pm_lev1map) >> PGSHIFT; \
- pcb->pcb_hw.apcb_asn = (pmap)->pm_asni[(cpu_id)].pma_asn; \
- \
- if ((l) == curlwp) { \
- /* \
- * Page table base register has changed; switch to \
- * our own context again so that it will take effect. \
- */ \
- (void) alpha_pal_swpctx((u_long)l->l_md.md_pcbpaddr); \
- } \
-} while (/*CONSTCOND*/0)
-
-/*
* PMAP_SET_NEEDISYNC:
*
* Mark that a user pmap needs an I-stream synch on its
@@ -1133,11 +1065,7 @@
pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK);
memset(pmap, 0, sizeof(*pmap));
- /*
- * Defer allocation of a new level 1 page table until
- * the first new mapping is entered; just take a reference
- * to the kernel kernel_lev1map.
- */
+ /* Reference the kernel_lev1map until we allocate our own below. */
pmap->pm_lev1map = kernel_lev1map;
pmap->pm_count = 1;
@@ -1197,13 +1125,6 @@
rw_exit(&pmap_growkernel_lock);
- /*
- * Since the pmap is supposed to contain no valid
- * mappings at this point, we should always see
- * kernel_lev1map here.
- */
- KASSERT(pmap->pm_lev1map == kernel_lev1map);
-
mutex_destroy(&pmap->pm_lock);
pool_cache_put(&pmap_pmap_cache, pmap);
}
@@ -1292,12 +1213,7 @@
PMAP_MAP_TO_HEAD_LOCK();
PMAP_LOCK(pmap);
- /*
- * If we're already referencing the kernel_lev1map, there
- * is no work for us to do.
- */
- if (pmap->pm_lev1map == kernel_lev1map)
- goto out;
+ KASSERT(pmap->pm_lev1map != kernel_lev1map);
saved_l1pte = l1pte = pmap_l1pte(pmap, sva);
@@ -1377,7 +1293,6 @@
if (needisync)
PMAP_SYNC_ISTREAM_USER(pmap);
- out:
PMAP_UNLOCK(pmap);
PMAP_MAP_TO_HEAD_UNLOCK();
}
@@ -2114,13 +2029,14 @@
* pmap_activate: [ INTERFACE ]
*
* Activate the pmap used by the specified process. This includes
- * reloading the MMU context if the current process, and marking
+ * reloading the MMU context of the current process, and marking
* the pmap in use by the processor.
*/
void
pmap_activate(struct lwp *l)
{
struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
+ struct pcb *pcb = lwp_getpcb(l);
long cpu_id = cpu_number();
#ifdef DEBUG
@@ -2128,23 +2044,34 @@
printf("pmap_activate(%p)\n", l);
#endif
- /*
- * Lock the pmap across the work we do here; although the
- * in-use mask is manipulated with an atomic op and the
- * ASN info is per-cpu, the lev1map pointer needs to remain
- * consistent across the entire call.
- */
- PMAP_LOCK(pmap);
+ KASSERT(kpreempt_disabled());
+ KASSERT(l == curlwp);
/* Mark the pmap in use by this processor. */
atomic_or_ulong(&pmap->pm_cpus, (1UL << cpu_id));
/* Allocate an ASN. */
pmap_asn_alloc(pmap, cpu_id);
-
- PMAP_ACTIVATE(pmap, l, cpu_id);
-
- PMAP_UNLOCK(pmap);
+ PMAP_ACTIVATE_ASN_SANITY(pmap, cpu_id);
+
+ u_long const old_ptbr = pcb->pcb_hw.apcb_ptbr;
+ u_int const old_asn = pcb->pcb_hw.apcb_asn;
+
+ pcb->pcb_hw.apcb_ptbr =
+ ALPHA_K0SEG_TO_PHYS((vaddr_t)pmap->pm_lev1map) >> PGSHIFT;
+ pcb->pcb_hw.apcb_asn = (pmap)->pm_asni[cpu_id].pma_asn;
+
+ /*
+ * Check to see if the ASN or page table base has changed; if
+ * so, switch to our own context again so that it will take
+ * effect.
+ *
+ * We test ASN first because it's the most likely value to change.
+ */
+ if (old_asn != pcb->pcb_hw.apcb_asn ||
+ old_ptbr != pcb->pcb_hw.apcb_ptbr) {
+ (void) alpha_pal_swpctx((u_long)l->l_md.md_pcbpaddr);
+ }
}
/*
@@ -2163,11 +2090,17 @@
printf("pmap_deactivate(%p)\n", l);
#endif
+ KASSERT(kpreempt_disabled());
+ KASSERT(l == curlwp);
+
+ atomic_and_ulong(&pmap->pm_cpus, ~(1UL << cpu_number()));
+
/*
- * Mark the pmap no longer in use by this processor. Because
- * this is all we're doing, no need to take the pmap lock.
+ * There is no need to switch to a different PTBR here,
+ * because a pmap_activate() or SWPCTX is guaranteed
+ * before whatever lev1map we're on now is invalidated
+ * or before user space is accessed again.
*/
- atomic_and_ulong(&pmap->pm_cpus, ~(1UL << cpu_number()));
}
/*
@@ -3006,6 +2939,23 @@
/******************** page table page management ********************/
+static bool
+pmap_kptpage_alloc(paddr_t *pap)
+{
+ if (uvm.page_init_done == false) {
+ /*
Home |
Main Index |
Thread Index |
Old Index