Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64 improve pmap_remove
details: https://anonhg.NetBSD.org/src/rev/1c8ee4308716
branches: trunk
changeset: 448622:1c8ee4308716
user: ryo <ryo%NetBSD.org@localhost>
date: Wed Feb 06 05:33:41 2019 +0000
description:
improve pmap_remove
- don't lock/unlock per page in pmap_remove()
- speedup pte lookup for continuous addresses
- bring out pool_cache_put(&_pmap_pv_pool, pv) from lock/unlock section
diffstat:
sys/arch/aarch64/aarch64/pmap.c | 195 +++++++++++++++++++++------------------
sys/arch/aarch64/include/pmap.h | 11 +-
2 files changed, 110 insertions(+), 96 deletions(-)
diffs (truncated from 461 to 300 lines):
diff -r 52111e5797c4 -r 1c8ee4308716 sys/arch/aarch64/aarch64/pmap.c
--- a/sys/arch/aarch64/aarch64/pmap.c Wed Feb 06 05:33:14 2019 +0000
+++ b/sys/arch/aarch64/aarch64/pmap.c Wed Feb 06 05:33:41 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.34 2018/12/21 08:01:01 ryo Exp $ */
+/* $NetBSD: pmap.c,v 1.35 2019/02/06 05:33:41 ryo Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.34 2018/12/21 08:01:01 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.35 2019/02/06 05:33:41 ryo Exp $");
#include "opt_arm_debug.h"
#include "opt_ddb.h"
@@ -187,12 +187,15 @@
paddr_t pv_pa; /* debug */
pt_entry_t *pv_ptep; /* for fast pte lookup */
};
+#define pv_next pv_link.tqe_next
+
+#define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1)
static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t);
static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *);
static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool);
static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int);
-static void _pmap_remove(struct pmap *, vaddr_t, bool);
+static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool, struct pv_entry **);
static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool);
static struct pmap kernel_pmap;
@@ -614,7 +617,7 @@
bool
pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
{
- static pt_entry_t *ptep;
+ static pt_entry_t *ptep, pte;
paddr_t pa;
vsize_t blocksize = 0;
extern char __kernel_text[];
@@ -630,7 +633,10 @@
ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
if (ptep == NULL)
return false;
- pa = lxpde_pa(*ptep) + (va & (blocksize - 1));
+ pte = *ptep;
+ if (!lxpde_valid(pte))
+ return false;
+ pa = lxpde_pa(pte) + (va & (blocksize - 1));
}
if (pap != NULL)
@@ -676,46 +682,30 @@
blocksize = L0_SIZE;
l0 = pm->pm_l0table;
idx = l0pde_index(va);
- pde = l0[idx];
- if (!l0pde_valid(pde)) {
- ptep = NULL;
+ ptep = &l0[idx];
+ pde = *ptep;
+ if (!l0pde_valid(pde))
goto done;
- }
blocksize = L1_SIZE;
l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
idx = l1pde_index(va);
- pde = l1[idx];
- if (!l1pde_valid(pde)) {
- ptep = NULL;
+ ptep = &l1[idx];
+ pde = *ptep;
+ if (!l1pde_valid(pde) || l1pde_is_block(pde))
goto done;
- }
- if (l1pde_is_block(pde)) {
- ptep = &l1[idx];
- goto done;
- }
blocksize = L2_SIZE;
l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
idx = l2pde_index(va);
- pde = l2[idx];
- if (!l2pde_valid(pde)) {
- ptep = NULL;
+ ptep = &l2[idx];
+ pde = *ptep;
+ if (!l2pde_valid(pde) || l2pde_is_block(pde))
goto done;
- }
- if (l2pde_is_block(pde)) {
- ptep = &l2[idx];
- goto done;
- }
blocksize = L3_SIZE;
l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
idx = l3pte_index(va);
- pde = l3[idx];
- if (!l3pte_valid(pde)) {
- ptep = NULL;
- goto done;
- }
ptep = &l3[idx];
done:
@@ -740,21 +730,27 @@
void
pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
{
- pt_entry_t *ptep, pte;
+ pt_entry_t *ptep = NULL, pte;
vaddr_t va;
vsize_t blocksize = 0;
pm_lock(pm);
- for (va = sva; va < eva; va += blocksize) {
- ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
- if (blocksize == 0)
- break;
- if (ptep != NULL) {
+ for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) {
+ /* va is belong to the same L3 table as before? */
+ if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) {
+ ptep++;
+ } else {
+ ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
+ if (ptep == NULL)
+ break;
+ }
+
+ pte = *ptep;
+ if (lxpde_valid(pte)) {
vaddr_t eob = (va + blocksize) & ~(blocksize - 1);
vsize_t len = ulmin(eva, eob - va);
- pte = *ptep;
if (l3pte_writable(pte)) {
cpu_icache_sync_range(va, len);
} else {
@@ -771,7 +767,6 @@
atomic_swap_64(ptep, opte);
AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
}
- va &= ~(blocksize - 1);
}
}
@@ -826,7 +821,7 @@
/* and either to executable */
xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN;
if (prot & VM_PROT_EXECUTE)
- pte &= ~xn;
+ pte &= ~xn;
return pte;
}
@@ -942,20 +937,22 @@
TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
pr(" pv[%d] pv=%p\n",
i, pv);
- pr(" pv[%d].pv_pmap =%p (asid=%d)\n",
+ pr(" pv[%d].pv_pmap = %p (asid=%d)\n",
i, pv->pv_pmap, pv->pv_pmap->pm_asid);
- pr(" pv[%d].pv_va =%016lx (color=%d)\n",
+ pr(" pv[%d].pv_va = %016lx (color=%d)\n",
i, pv->pv_va, _pmap_color(pv->pv_va));
- pr(" pv[%d].pv_pa =%016lx (color=%d)\n",
+ pr(" pv[%d].pv_pa = %016lx (color=%d)\n",
i, pv->pv_pa, _pmap_color(pv->pv_pa));
+ pr(" pv[%d].pv_ptep = %p\n",
+ i, pv->pv_ptep);
i++;
}
}
#endif /* PMAP_PV_DEBUG & DDB */
static int
-_pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp, vaddr_t va,
- pt_entry_t *ptep, paddr_t pa, u_int flags)
+_pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp,
+ vaddr_t va, pt_entry_t *ptep, paddr_t pa, u_int flags)
{
struct vm_page_md *md;
struct pv_entry *pv;
@@ -1006,6 +1003,7 @@
}
#endif
}
+
pmap_pv_unlock(md);
return 0;
}
@@ -1024,7 +1022,6 @@
pmap_kremove(vaddr_t va, vsize_t size)
{
struct pmap *kpm = pmap_kernel();
- vaddr_t eva;
int s;
UVMHIST_FUNC(__func__);
@@ -1036,14 +1033,12 @@
KDASSERT((size & PGOFSET) == 0);
KDASSERT(!IN_KSEG_ADDR(va));
-
- eva = va + size;
KDASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS));
s = splvm();
- for (; va < eva; va += PAGE_SIZE) {
- _pmap_remove(kpm, va, true);
- }
+ pm_lock(kpm);
+ _pmap_remove(kpm, va, va + size, true, NULL);
+ pm_unlock(kpm);
splx(s);
}
@@ -1089,7 +1084,9 @@
void
pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
+ pt_entry_t *ptep = NULL, pte;
vaddr_t va;
+ vsize_t blocksize = 0;
const bool user = (pm != pmap_kernel());
KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
@@ -1115,8 +1112,7 @@
pm_lock(pm);
- for (va = sva; va < eva; va += PAGE_SIZE) {
- pt_entry_t *ptep, pte;
+ for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) {
#ifdef UVMHIST
pt_entry_t opte;
#endif
@@ -1125,20 +1121,19 @@
uint32_t mdattr;
bool executable;
- ptep = _pmap_pte_lookup_l3(pm, va);
- if (ptep == NULL) {
+ /* va is belong to the same L3 table as before? */
+ if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0))
+ ptep++;
+ else
+ ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
+
+ pte = *ptep;
+ if (!lxpde_valid(pte)) {
PMAP_COUNT(protect_none);
continue;
}
- pte = *ptep;
-
- if (!l3pte_valid(pte)) {
- PMAP_COUNT(protect_none);
- continue;
- }
-
- pa = l3pte_pa(pte);
+ pa = lxpde_pa(pte);
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL) {
@@ -1152,7 +1147,6 @@
PMAP_COUNT(protect_unmanaged);
}
- pte = *ptep;
#ifdef UVMHIST
opte = pte;
#endif
@@ -1282,6 +1276,7 @@
_pmap_free_pdp_all(pm);
mutex_destroy(&pm->pm_lock);
+
pool_cache_put(&_pmap_cache, pm);
PMAP_COUNT(destroy);
@@ -1555,37 +1550,46 @@
}
static void
-_pmap_remove(struct pmap *pm, vaddr_t va, bool kremove)
+_pmap_remove(struct pmap *pm, vaddr_t sva, vaddr_t eva, bool kremove,
+ struct pv_entry **pvtofree)
{
Home |
Main Index |
Thread Index |
Old Index