Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64 - Fix a lock order reversal in pmap_page_pr...
details: https://anonhg.NetBSD.org/src/rev/5ea269fba960
branches: trunk
changeset: 972969:5ea269fba960
user: ad <ad%NetBSD.org@localhost>
date: Sun Jun 14 21:47:14 2020 +0000
description:
- Fix a lock order reversal in pmap_page_protect().
- Make sure pmap is always locked when updating stats; atomics no longer
needed to do that.
- Remove unneeded traversal of pv list in pmap_enter_pv().
- Shrink struct vm_page from 136 to 128 bytes (cache line sized) and struct
pv_entry from 48 to 32 bytes (power of 2 sized).
- Embed a pv_entry in each vm_page. This means PV entries don't need to
be allocated for private anonymous memory / COW pages / most UBC mappings.
Dynamic PV entries are then used only for stuff like shared libraries and
shared memory.
Proposed on port-arm@.
diffstat:
sys/arch/aarch64/aarch64/pmap.c | 398 ++++++++++++++++++++++++---------------
sys/arch/aarch64/include/pmap.h | 28 +-
2 files changed, 259 insertions(+), 167 deletions(-)
diffs (truncated from 828 to 300 lines):
diff -r ea7d1f33691c -r 5ea269fba960 sys/arch/aarch64/aarch64/pmap.c
--- a/sys/arch/aarch64/aarch64/pmap.c Sun Jun 14 21:41:42 2020 +0000
+++ b/sys/arch/aarch64/aarch64/pmap.c Sun Jun 14 21:47:14 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.77 2020/06/10 22:24:22 ad Exp $ */
+/* $NetBSD: pmap.c,v 1.78 2020/06/14 21:47:14 ad Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.77 2020/06/10 22:24:22 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.78 2020/06/14 21:47:14 ad Exp $");
#include "opt_arm_debug.h"
#include "opt_ddb.h"
@@ -102,8 +102,9 @@
PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)");
PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)");
-PMAP_COUNTER(pv_enter, "pv_entry allocate and link");
-PMAP_COUNTER(pv_remove, "pv_entry free and unlink");
+PMAP_COUNTER(pv_enter, "pv_entry fill");
+PMAP_COUNTER(pv_remove_dyn, "pv_entry free and unlink dynamic");
+PMAP_COUNTER(pv_remove_emb, "pv_entry clear embedded");
PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv");
PMAP_COUNTER(activate, "pmap_activate call");
@@ -184,15 +185,6 @@
#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mdpg_pp)
-struct pv_entry {
- LIST_ENTRY(pv_entry) pv_link;
- struct pmap *pv_pmap;
- vaddr_t pv_va;
- paddr_t pv_pa; /* debug */
- pt_entry_t *pv_ptep; /* for fast pte lookup */
-};
-#define pv_next pv_link.le_next
-
#define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1)
#define PDPSWEEP_TRIGGER 512
@@ -204,7 +196,7 @@
struct pv_entry **);
static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool);
-static struct pmap kernel_pmap;
+static struct pmap kernel_pmap __cacheline_aligned;
struct pmap * const kernel_pmap_ptr = &kernel_pmap;
static vaddr_t pmap_maxkvaddr;
@@ -223,27 +215,48 @@
pmap_pv_lock(struct pmap_page *pp)
{
- mutex_enter(&pp->pp_pvlock);
+ mutex_spin_enter(&pp->pp_pvlock);
}
static inline void
pmap_pv_unlock(struct pmap_page *pp)
{
- mutex_exit(&pp->pp_pvlock);
+ mutex_spin_exit(&pp->pp_pvlock);
}
static inline void
pm_lock(struct pmap *pm)
{
- mutex_enter(&pm->pm_lock);
+ mutex_spin_enter(&pm->pm_lock);
}
static inline void
pm_unlock(struct pmap *pm)
{
- mutex_exit(&pm->pm_lock);
+ mutex_spin_exit(&pm->pm_lock);
+}
+
+static bool
+pm_reverse_lock(struct pmap *pm, struct pmap_page *pp)
+{
+
+ KASSERT(mutex_owned(&pp->pp_pvlock));
+
+ if (__predict_true(mutex_tryenter(&pm->pm_lock)))
+ return true;
+
+ if (pm != pmap_kernel())
+ pmap_reference(pm);
+ mutex_spin_exit(&pp->pp_pvlock);
+ mutex_spin_enter(&pm->pm_lock);
+ /* nothing, just wait for lock */
+ mutex_spin_exit(&pm->pm_lock);
+ if (pm != pmap_kernel())
+ pmap_destroy(pm);
+ mutex_spin_enter(&pp->pp_pvlock);
+ return false;
}
static inline struct pmap_page *
@@ -466,14 +479,22 @@
CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long));
CTASSERT(sizeof(kpm->pm_stats.resident_count) == sizeof(long));
-#define PMSTAT_INC_WIRED_COUNT(pm) \
- atomic_inc_ulong(&(pm)->pm_stats.wired_count)
-#define PMSTAT_DEC_WIRED_COUNT(pm) \
- atomic_dec_ulong(&(pm)->pm_stats.wired_count)
-#define PMSTAT_INC_RESIDENT_COUNT(pm) \
- atomic_inc_ulong(&(pm)->pm_stats.resident_count)
-#define PMSTAT_DEC_RESIDENT_COUNT(pm) \
- atomic_dec_ulong(&(pm)->pm_stats.resident_count)
+#define PMSTAT_INC_WIRED_COUNT(pm) do { \
+ KASSERT(mutex_owned(&(pm)->pm_lock)); \
+ (pm)->pm_stats.wired_count++; \
+} while (/* CONSTCOND */ 0);
+#define PMSTAT_DEC_WIRED_COUNT(pm) do{ \
+ KASSERT(mutex_owned(&(pm)->pm_lock)); \
+ (pm)->pm_stats.wired_count--; \
+} while (/* CONSTCOND */ 0);
+#define PMSTAT_INC_RESIDENT_COUNT(pm) do { \
+ KASSERT(mutex_owned(&(pm)->pm_lock)); \
+ (pm)->pm_stats.resident_count++; \
+} while (/* CONSTCOND */ 0);
+#define PMSTAT_DEC_RESIDENT_COUNT(pm) do { \
+ KASSERT(mutex_owned(&(pm)->pm_lock)); \
+ (pm)->pm_stats.resident_count--; \
+} while (/* CONSTCOND */ 0);
}
inline static int
@@ -501,10 +522,12 @@
{
pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap),
- 0, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor, NULL, NULL);
+ coherency_unit, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor,
+ NULL, NULL);
+
pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry),
- 0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL);
-
+ 32, 0, PR_LARGECACHE, "pvpl", NULL, IPL_NONE, _pmap_pv_ctor,
+ NULL, NULL);
}
void
@@ -584,17 +607,12 @@
return POOL_PADDR_INVALID;
}
- LIST_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
+ LIST_INSERT_HEAD(&pm->pm_vmlist, pg, pageq.list);
pg->flags &= ~PG_BUSY; /* never busy */
pg->wire_count = 1; /* max = 1 + Ln_ENTRIES = 513 */
pa = VM_PAGE_TO_PHYS(pg);
PMAP_COUNT(pdp_alloc);
-
- VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = NULL;
-
- struct pmap_page *pp = VM_PAGE_TO_PP(pg);
- pp->pp_flags = 0;
-
+ PMAP_PAGE_INIT(VM_PAGE_TO_PP(pg));
} else {
/* uvm_pageboot_alloc() returns AARCH64 KSEG address */
pg = NULL;
@@ -614,13 +632,13 @@
static void
pmap_free_pdp(struct pmap *pm, struct vm_page *pg)
{
- LIST_REMOVE(pg, mdpage.mdpg_vmlist);
- pg->flags |= PG_BUSY;
+
+ KASSERT(pm != pmap_kernel());
+ KASSERT(VM_PAGE_TO_PP(pg)->pp_pv.pv_pmap == NULL);
+ KASSERT(VM_PAGE_TO_PP(pg)->pp_pv.pv_next == NULL);
+
+ LIST_REMOVE(pg, pageq.list);
pg->wire_count = 0;
-
- struct pmap_page *pp __diagused = VM_PAGE_TO_PP(pg);
- KASSERT(LIST_EMPTY(&pp->pp_pvhead));
-
uvm_pagefree(pg);
PMAP_COUNT(pdp_free);
}
@@ -635,8 +653,10 @@
int nsweep;
uint16_t wirecount __diagused;
+ KASSERT(mutex_owned(&pm->pm_lock) || pm->pm_refcnt == 0);
+
nsweep = 0;
- LIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
+ LIST_FOREACH_SAFE(pg, &pm->pm_vmlist, pageq.list, tmp) {
if (pg->wire_count != 1)
continue;
@@ -655,7 +675,7 @@
/* unlink from parent */
opte = atomic_swap_64(ptep_in_parent, 0);
KASSERT(lxpde_valid(opte));
- wirecount = atomic_add_32_nv(&pg->wire_count, -1); /* 1 -> 0 */
+ wirecount = --pg->wire_count; /* 1 -> 0 */
KASSERT(wirecount == 0);
pmap_free_pdp(pm, pg);
nsweep++;
@@ -670,12 +690,12 @@
KASSERTMSG(pg->wire_count >= 1,
"wire_count=%d", pg->wire_count);
/* decrement wire_count of parent */
- wirecount = atomic_add_32_nv(&pg->wire_count, -1);
+ wirecount = --pg->wire_count;
KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
"pm=%p[%d], pg=%p, wire_count=%d",
pm, pm->pm_asid, pg, pg->wire_count);
}
- atomic_swap_uint(&pm->pm_idlepdp, 0);
+ pm->pm_idlepdp = 0;
return nsweep;
}
@@ -683,9 +703,9 @@
static void
_pmap_free_pdp_all(struct pmap *pm)
{
- struct vm_page *pg, *tmp;
-
- LIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
+ struct vm_page *pg;
+
+ while ((pg = LIST_FIRST(&pm->pm_vmlist)) != NULL) {
pmap_free_pdp(pm, pg);
}
}
@@ -1015,9 +1035,10 @@
}
static struct pv_entry *
-_pmap_remove_pv(struct pmap_page *pp, struct pmap *pm, vaddr_t va, pt_entry_t pte)
+_pmap_remove_pv(struct pmap_page *pp, struct pmap *pm, vaddr_t va,
+ pt_entry_t pte)
{
- struct pv_entry *pv;
+ struct pv_entry *pv, *ppv;
UVMHIST_FUNC(__func__);
UVMHIST_CALLED(pmaphist);
@@ -1025,18 +1046,26 @@
UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pte=%llx",
pp, pm, va, pte);
- LIST_FOREACH(pv, &pp->pp_pvhead, pv_link) {
- if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
- LIST_REMOVE(pv, pv_link);
- PMAP_COUNT(pv_remove);
+ KASSERT(mutex_owned(&pp->pp_pvlock));
+
+ for (ppv = NULL, pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) {
+ if (pv->pv_pmap == pm && trunc_page(pv->pv_va) == va) {
break;
}
+ ppv = pv;
}
-#ifdef PMAPCOUNTERS
- if (pv == NULL) {
+ if (ppv == NULL) {
+ /* embedded in pmap_page */
+ pv->pv_pmap = NULL;
+ pv = NULL;
+ PMAP_COUNT(pv_remove_emb);
+ } else if (pv != NULL) {
+ /* dynamically allocated */
+ ppv->pv_next = pv->pv_next;
+ PMAP_COUNT(pv_remove_dyn);
+ } else {
PMAP_COUNT(pv_remove_nopv);
}
-#endif
return pv;
}
@@ -1082,23 +1111,25 @@
pv_dump(struct pmap_page *pp, void (*pr)(const char *, ...) __printflike(1, 2))
{
struct pv_entry *pv;
- int i;
+ int i, flags;
i = 0;
+ flags = pp->pp_pv.pv_va & (PAGE_SIZE - 1);
Home |
Main Index |
Thread Index |
Old Index