Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64 implement pmap_remove_all().
details: https://anonhg.NetBSD.org/src/rev/b77501d04a1a
branches: trunk
changeset: 950776:b77501d04a1a
user: ryo <ryo%NetBSD.org@localhost>
date: Sun Jan 31 04:51:29 2021 +0000
description:
implement pmap_remove_all().
The size of struct pv_entry has increased, but speed of kernel build has improved by about 1%
exec and exit should have been improved.
diffstat:
sys/arch/aarch64/aarch64/pmap.c | 95 ++++++++++++++++++++++++++++++++++++----
sys/arch/aarch64/include/pmap.h | 9 +++-
2 files changed, 91 insertions(+), 13 deletions(-)
diffs (201 lines):
diff -r 8f4723a3aa02 -r b77501d04a1a sys/arch/aarch64/aarch64/pmap.c
--- a/sys/arch/aarch64/aarch64/pmap.c Sat Jan 30 23:15:32 2021 +0000
+++ b/sys/arch/aarch64/aarch64/pmap.c Sun Jan 31 04:51:29 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.99 2020/12/20 08:26:32 skrll Exp $ */
+/* $NetBSD: pmap.c,v 1.100 2021/01/31 04:51:29 ryo Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.99 2020/12/20 08:26:32 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.100 2021/01/31 04:51:29 ryo Exp $");
#include "opt_arm_debug.h"
#include "opt_ddb.h"
@@ -485,6 +485,7 @@
kpm->pm_l0table_pa = l0pa;
kpm->pm_activated = true;
LIST_INIT(&kpm->pm_vmlist);
+ LIST_INIT(&kpm->pm_pvlist); /* not used for kernel pmap */
mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_NONE);
CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long));
@@ -719,11 +720,14 @@
}
static void
-_pmap_free_pdp_all(struct pmap *pm)
+_pmap_free_pdp_all(struct pmap *pm, bool free_l0)
{
- struct vm_page *pg;
-
- while ((pg = LIST_FIRST(&pm->pm_vmlist)) != NULL) {
+ struct vm_page *pg, *pgtmp, *pg_reserve;
+
+ pg_reserve = free_l0 ? NULL : PHYS_TO_VM_PAGE(pm->pm_l0table_pa);
+ LIST_FOREACH_SAFE(pg, &pm->pm_vmlist, pageq.list, pgtmp) {
+ if (pg == pg_reserve)
+ continue;
pmap_free_pdp(pm, pg);
}
}
@@ -1101,6 +1105,7 @@
UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pte=%llx",
pp, pm, va, pte);
+ KASSERT(mutex_owned(&pm->pm_lock)); /* for pv_proc */
KASSERT(mutex_owned(&pp->pp_pvlock));
for (ppv = NULL, pv = &pp->pp_pv; pv != NULL; pv = pv->pv_next) {
@@ -1109,6 +1114,10 @@
}
ppv = pv;
}
+
+ if (pm != pmap_kernel() && pv != NULL)
+ LIST_REMOVE(pv, pv_proc);
+
if (ppv == NULL) {
/* embedded in pmap_page */
pv->pv_pmap = NULL;
@@ -1234,6 +1243,9 @@
pv->pv_ptep = ptep;
PMAP_COUNT(pv_enter);
+ if (pm != pmap_kernel())
+ LIST_INSERT_HEAD(&pm->pm_pvlist, pv, pv_proc);
+
#ifdef PMAP_PV_DEBUG
printf("pv %p alias added va=%016lx -> pa=%016lx\n", pv, va, pa);
pv_dump(pp, printf);
@@ -1499,6 +1511,7 @@
pm->pm_idlepdp = 0;
pm->pm_asid = -1;
LIST_INIT(&pm->pm_vmlist);
+ LIST_INIT(&pm->pm_pvlist);
mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_NONE);
pm->pm_l0table_pa = pmap_alloc_pdp(pm, NULL, 0, true);
@@ -1535,9 +1548,13 @@
if (refcnt > 0)
return;
- aarch64_tlbi_by_asid(pm->pm_asid);
-
- _pmap_free_pdp_all(pm);
+ KASSERT(LIST_EMPTY(&pm->pm_pvlist));
+
+ /*
+ * no need to call aarch64_tlbi_by_asid(pm->pm_asid).
+ * TLB should already be invalidated in pmap_remove_all()
+ */
+ _pmap_free_pdp_all(pm, true);
mutex_destroy(&pm->pm_lock);
pool_cache_put(&_pmap_cache, pm);
@@ -2034,8 +2051,64 @@
bool
pmap_remove_all(struct pmap *pm)
{
- /* nothing to do */
- return false;
+ struct pmap_page *pp;
+ struct pv_entry *pv, *pvtmp, *opv, *pvtofree = NULL;
+ pt_entry_t pte, *ptep;
+ paddr_t pa;
+
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLED(pmaphist);
+
+ UVMHIST_LOG(pmaphist, "pm=%p", pm, 0, 0, 0);
+
+ if (pm == pmap_kernel())
+ return false;
+
+ pm_lock(pm);
+
+ LIST_FOREACH_SAFE(pv, &pm->pm_pvlist, pv_proc, pvtmp) {
+ ptep = pv->pv_ptep;
+ pte = *ptep;
+
+ KASSERTMSG(lxpde_valid(pte),
+ "pte is not valid: pmap=%p, asid=%d, va=%016lx\n",
+ pm, pm->pm_asid, pv->pv_va);
+
+ pa = lxpde_pa(pte);
+ pp = phys_to_pp(pa);
+
+ KASSERTMSG(pp != NULL,
+ "no pmap_page of physical address:%016lx, "
+ "pmap=%p, asid=%d, va=%016lx\n",
+ pa, pm, pm->pm_asid, pv->pv_va);
+
+ pmap_pv_lock(pp);
+ opv = _pmap_remove_pv(pp, pm, trunc_page(pv->pv_va), pte);
+ pmap_pv_unlock(pp);
+ if (opv != NULL) {
+ opv->pv_next = pvtofree;
+ pvtofree = opv;
+ }
+ }
+ /* all PTE should now be cleared */
+ pm->pm_stats.wired_count = 0;
+ pm->pm_stats.resident_count = 0;
+
+ /* clear L0 page table page */
+ pmap_zero_page(pm->pm_l0table_pa);
+ aarch64_tlbi_by_asid(pm->pm_asid);
+
+ /* free L1-L3 page table pages, but not L0 */
+ _pmap_free_pdp_all(pm, false);
+
+ pm_unlock(pm);
+
+ for (pv = pvtofree; pv != NULL; pv = pvtmp) {
+ pvtmp = pv->pv_next;
+ pool_cache_put(&_pmap_pv_pool, pv);
+ }
+
+ return true;
}
static void
diff -r 8f4723a3aa02 -r b77501d04a1a sys/arch/aarch64/include/pmap.h
--- a/sys/arch/aarch64/include/pmap.h Sat Jan 30 23:15:32 2021 +0000
+++ b/sys/arch/aarch64/include/pmap.h Sun Jan 31 04:51:29 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.43 2020/09/19 13:33:08 skrll Exp $ */
+/* $NetBSD: pmap.h,v 1.44 2021/01/31 04:51:29 ryo Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -76,6 +76,7 @@
paddr_t pm_l0table_pa;
LIST_HEAD(, vm_page) pm_vmlist; /* for L[0123] tables */
+ LIST_HEAD(, pv_entry) pm_pvlist; /* all pv of this process */
struct pmap_statistics pm_stats;
unsigned int pm_refcnt;
@@ -84,12 +85,16 @@
bool pm_activated;
};
-/* sized to reduce memory consumption & cache misses (32 bytes) */
+/*
+ * should be kept <=32 bytes sized to reduce memory consumption & cache misses,
+ * but it doesn't...
+ */
struct pv_entry {
struct pv_entry *pv_next;
struct pmap *pv_pmap;
vaddr_t pv_va; /* for embedded entry (pp_pv) also includes flags */
void *pv_ptep; /* pointer for fast pte lookup */
+ LIST_ENTRY(pv_entry) pv_proc; /* belonging to the process */
};
struct pmap_page {
Home |
Main Index |
Thread Index |
Old Index