Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/alpha pmap_tlb_shootdown_all_user() can be called i...
details: https://anonhg.NetBSD.org/src/rev/a53b9082d3ea
branches: trunk
changeset: 379257:a53b9082d3ea
user: thorpej <thorpej%NetBSD.org@localhost>
date: Mon May 24 03:43:24 2021 +0000
description:
pmap_tlb_shootdown_all_user() can be called in the PV scenario as well
as the forward scenario, for example if a pmap_page_protect() to remove
all mappings results in the freeing of a PT page. It therefore needs
to do the same reference counting dance as pmap_tlb_shootdown_pv().
Also fix a use-after-free error in pmap_page_protect().
Add / tweak some assertions, and shrink the pmap::pm_count field from
long to unsigned int (which gave me a spare unsigned int field for
debugging purposes).
PR port-alpha/56201.
diffstat:
sys/arch/alpha/alpha/pmap.c | 108 ++++++++++++++++++++++++++---------------
sys/arch/alpha/include/pmap.h | 26 +++++----
2 files changed, 82 insertions(+), 52 deletions(-)
diffs (truncated from 393 to 300 lines):
diff -r e147b6aa30a6 -r a53b9082d3ea sys/arch/alpha/alpha/pmap.c
--- a/sys/arch/alpha/alpha/pmap.c Sun May 23 23:24:45 2021 +0000
+++ b/sys/arch/alpha/alpha/pmap.c Mon May 24 03:43:24 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.277 2021/05/23 19:13:27 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.278 2021/05/24 03:43:24 thorpej Exp $ */
/*-
* Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
@@ -135,7 +135,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.277 2021/05/23 19:13:27 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.278 2021/05/24 03:43:24 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -667,17 +667,17 @@ pmap_tlb_init(void)
}
static inline void
-pmap_tlb_context_init(struct pmap_tlb_context * const tlbctx)
+pmap_tlb_context_init(struct pmap_tlb_context * const tlbctx, uintptr_t flags)
{
/* Initialize the minimum number of fields. */
tlbctx->t_addrdata[0] = 0;
- tlbctx->t_addrdata[1] = 0;
+ tlbctx->t_addrdata[1] = flags;
tlbctx->t_pmap = NULL;
LIST_INIT(&tlbctx->t_freeptq);
}
static void
-pmap_tlb_shootdown(pmap_t const pmap, vaddr_t const va,
+pmap_tlb_shootdown_internal(pmap_t const pmap, vaddr_t const va,
pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx)
{
KASSERT(pmap != NULL);
@@ -730,6 +730,14 @@ pmap_tlb_shootdown(pmap_t const pmap, va
}
static void
+pmap_tlb_shootdown(pmap_t const pmap, vaddr_t const va,
+ pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx)
+{
+ KASSERT((TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) == 0);
+ pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx);
+}
+
+static void
pmap_tlb_shootdown_all_user(pmap_t const pmap, pt_entry_t const pte_bits,
struct pmap_tlb_context * const tlbctx)
{
@@ -743,29 +751,42 @@ pmap_tlb_shootdown_all_user(pmap_t const
TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_IMB);
}
- KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap);
- tlbctx->t_pmap = pmap;
+ if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV) {
+ if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) {
+ if (tlbctx->t_pmap == NULL) {
+ pmap_reference(pmap);
+ tlbctx->t_pmap = pmap;
+ }
+ } else {
+ TLB_CTX_SET_FLAG(tlbctx, TLB_CTX_F_MULTI);
+ }
+ } else {
+ KASSERT(tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap);
+ tlbctx->t_pmap = pmap;
+ }
TLB_CTX_SET_ALLVA(tlbctx);
}
static void
-pmap_tlb_shootdown_pv(const pv_entry_t pv, pt_entry_t const pte_bits,
- struct pmap_tlb_context * const tlbctx)
+pmap_tlb_shootdown_pv(pmap_t const pmap, vaddr_t const va,
+ pt_entry_t const pte_bits, struct pmap_tlb_context * const tlbctx)
{
- uintptr_t flags = TLB_CTX_F_PV;
+
+ KASSERT(TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_PV);
TLB_COUNT(shootdown_pv);
- if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pv->pv_pmap) {
+ if (tlbctx->t_pmap == NULL || tlbctx->t_pmap == pmap) {
if (tlbctx->t_pmap == NULL) {
- pmap_reference(pv->pv_pmap);
+ pmap_reference(pmap);
+ tlbctx->t_pmap = pmap;
}
- pmap_tlb_shootdown(pv->pv_pmap, pv->pv_va, pte_bits, tlbctx);
+ pmap_tlb_shootdown_internal(pmap, va, pte_bits, tlbctx);
} else {
TLB_COUNT(shootdown_pv_multi);
- flags |= TLB_CTX_F_MULTI;
- if (pv->pv_pmap == pmap_kernel()) {
+ uintptr_t flags = TLB_CTX_F_MULTI;
+ if (pmap == pmap_kernel()) {
KASSERT(pte_bits & PG_ASM);
flags |= TLB_CTX_F_ASM;
} else {
@@ -780,8 +801,8 @@ pmap_tlb_shootdown_pv(const pv_entry_t p
flags |= TLB_CTX_F_IMB;
}
TLB_CTX_SET_ALLVA(tlbctx);
+ TLB_CTX_SET_FLAG(tlbctx, flags);
}
- TLB_CTX_SET_FLAG(tlbctx, flags);
}
static void
@@ -1034,6 +1055,7 @@ pmap_tlb_shootnow(const struct pmap_tlb_
#if defined(MULTIPROCESSOR)
void
pmap_tlb_shootdown_ipi(struct cpu_info * const ci,
+
struct trapframe * const tf __unused)
{
KASSERT(tlb_context != NULL);
@@ -1367,7 +1389,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int max
*/
memset(pmap_kernel(), 0, sizeof(struct pmap));
pmap_kernel()->pm_lev1map = kernel_lev1map;
- pmap_kernel()->pm_count = 1;
+ atomic_store_relaxed(&pmap_kernel()->pm_count, 1);
/* Kernel pmap does not have ASN info. */
TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
@@ -1550,7 +1572,7 @@ pmap_create(void)
pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK);
memset(pmap, 0, sizeof(*pmap));
- pmap->pm_count = 1;
+ atomic_store_relaxed(&pmap->pm_count, 1);
/*
* There are only kernel mappings at this point; give the pmap
@@ -1597,7 +1619,8 @@ pmap_destroy(pmap_t pmap)
#endif
PMAP_MP(membar_exit());
- if (atomic_dec_ulong_nv(&pmap->pm_count) > 0)
+ KASSERT(atomic_load_relaxed(&pmap->pm_count) > 0);
+ if (atomic_dec_uint_nv(&pmap->pm_count) > 0)
return;
rw_enter(&pmap_growkernel_lock, RW_READER);
@@ -1610,7 +1633,7 @@ pmap_destroy(pmap_t pmap)
mutex_exit(&pmap_all_pmaps_lock);
pool_cache_put(&pmap_l1pt_cache, pmap->pm_lev1map);
- pmap->pm_lev1map = NULL;
+ pmap->pm_lev1map = (pt_entry_t *)0xdeadbeefUL;
rw_exit(&pmap_growkernel_lock);
@@ -1625,13 +1648,15 @@ pmap_destroy(pmap_t pmap)
void
pmap_reference(pmap_t pmap)
{
+ unsigned int newcount __diagused;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_reference(%p)\n", pmap);
#endif
- atomic_inc_ulong(&pmap->pm_count);
+ newcount = atomic_inc_uint_nv(&pmap->pm_count);
+ KASSERT(newcount != 0);
PMAP_MP(membar_enter());
}
@@ -1773,7 +1798,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
{
struct pmap_tlb_context tlbctx;
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, 0);
pmap_remove_internal(pmap, sva, eva, &tlbctx);
}
@@ -1801,7 +1826,7 @@ pmap_page_protect(struct vm_page *pg, vm
printf("pmap_page_protect(%p, %x)\n", pg, prot);
#endif
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV);
switch (prot) {
case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
@@ -1820,7 +1845,8 @@ pmap_page_protect(struct vm_page *pg, vm
if (opte & (PG_KWE | PG_UWE)) {
atomic_store_relaxed(pv->pv_pte,
opte & ~(PG_KWE | PG_UWE));
- pmap_tlb_shootdown_pv(pv, opte, &tlbctx);
+ pmap_tlb_shootdown_pv(pv->pv_pmap, pv->pv_va,
+ opte, &tlbctx);
}
PMAP_UNLOCK(pv->pv_pmap);
}
@@ -1840,13 +1866,17 @@ pmap_page_protect(struct vm_page *pg, vm
mutex_enter(lock);
for (pv = md->pvh_list; pv != NULL; pv = nextpv) {
pt_entry_t pte_bits;
+ pmap_t pmap;
+ vaddr_t va;
nextpv = pv->pv_next;
PMAP_LOCK(pv->pv_pmap);
- pte_bits = pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
- pv->pv_pte, false, NULL, &tlbctx);
- pmap_tlb_shootdown_pv(pv, pte_bits, &tlbctx);
+ pmap = pv->pv_pmap;
+ va = pv->pv_va;
+ pte_bits = pmap_remove_mapping(pmap, va, pv->pv_pte,
+ false, NULL, &tlbctx);
+ pmap_tlb_shootdown_pv(pmap, va, pte_bits, &tlbctx);
PMAP_UNLOCK(pv->pv_pmap);
}
mutex_exit(lock);
@@ -1875,7 +1905,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, v
pmap, sva, eva, prot);
#endif
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, 0);
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
pmap_remove_internal(pmap, sva, eva, &tlbctx);
@@ -1931,7 +1961,7 @@ pmap_enter_tlb_shootdown(pmap_t const pm
{
struct pmap_tlb_context tlbctx;
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, 0);
pmap_tlb_shootdown(pmap, va, pte_bits, &tlbctx);
if (locked) {
PMAP_UNLOCK(pmap);
@@ -1959,7 +1989,7 @@ pmap_enter_l2pt_delref(pmap_t const pmap
* for this VPT index.
*/
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, 0);
pmap_l2pt_delref(pmap, l1pte, l2pte, &tlbctx);
PMAP_UNLOCK(pmap);
pmap_tlb_shootnow(&tlbctx);
@@ -1987,7 +2017,7 @@ pmap_enter_l3pt_delref(pmap_t const pmap
* for this VPT index.
*/
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, 0);
pmap_l3pt_delref(pmap, va, pte, &tlbctx);
PMAP_UNLOCK(pmap);
pmap_tlb_shootnow(&tlbctx);
@@ -2332,7 +2362,7 @@ pmap_kremove(vaddr_t va, vsize_t size)
va, size);
#endif
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, 0);
KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
@@ -2728,7 +2758,7 @@ pmap_clear_modify(struct vm_page *pg)
printf("pmap_clear_modify(%p)\n", pg);
#endif
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV);
PMAP_HEAD_TO_MAP_LOCK();
lock = pmap_pvh_lock(pg);
@@ -2767,7 +2797,7 @@ pmap_clear_reference(struct vm_page *pg)
printf("pmap_clear_reference(%p)\n", pg);
#endif
- pmap_tlb_context_init(&tlbctx);
+ pmap_tlb_context_init(&tlbctx, TLB_CTX_F_PV);
PMAP_HEAD_TO_MAP_LOCK();
lock = pmap_pvh_lock(pg);
@@ -2983,7 +3013,8 @@ pmap_changebit(struct vm_page *pg, pt_en
npte = (opte | set) & mask;
if (npte != opte) {
atomic_store_relaxed(pte, npte);
- pmap_tlb_shootdown_pv(pv, opte, tlbctx);
+ pmap_tlb_shootdown_pv(pv->pv_pmap, pv->pv_va,
+ opte, tlbctx);
}
PMAP_UNLOCK(pv->pv_pmap);
Home |
Main Index |
Thread Index |
Old Index