Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/aarch64 separate struct vm_page_md into vm_page_md ...
details: https://anonhg.NetBSD.org/src/rev/3fc9efd44989
branches: trunk
changeset: 744489:3fc9efd44989
user: ryo <ryo%NetBSD.org@localhost>
date: Mon Feb 03 13:35:44 2020 +0000
description:
separate struct vm_page_md into vm_page_md and pmap_page
for preparation pmap_pv(9)
diffstat:
sys/arch/aarch64/aarch64/pmap.c | 301 +++++++++++++++++++++------------------
sys/arch/aarch64/include/pmap.h | 29 ++-
2 files changed, 180 insertions(+), 150 deletions(-)
diffs (truncated from 744 to 300 lines):
diff -r ff3e8b34775a -r 3fc9efd44989 sys/arch/aarch64/aarch64/pmap.c
--- a/sys/arch/aarch64/aarch64/pmap.c Mon Feb 03 13:28:11 2020 +0000
+++ b/sys/arch/aarch64/aarch64/pmap.c Mon Feb 03 13:35:44 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.61 2020/01/09 01:38:34 ryo Exp $ */
+/* $NetBSD: pmap.c,v 1.62 2020/02/03 13:35:44 ryo Exp $ */
/*
* Copyright (c) 2017 Ryo Shimizu <ryo%nerv.org@localhost>
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.61 2020/01/09 01:38:34 ryo Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.62 2020/02/03 13:35:44 ryo Exp $");
#include "opt_arm_debug.h"
#include "opt_ddb.h"
@@ -183,6 +183,8 @@
cpu_icache_sync_range((va), PAGE_SIZE); \
} while (0/*CONSTCOND*/)
+#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mdpg_pp)
+
struct pv_entry {
TAILQ_ENTRY(pv_entry) pv_link;
struct pmap *pv_pmap;
@@ -217,17 +219,17 @@
static inline void
-pmap_pv_lock(struct vm_page_md *md)
+pmap_pv_lock(struct pmap_page *pp)
{
- mutex_enter(&md->mdpg_pvlock);
+ mutex_enter(&pp->pp_pvlock);
}
static inline void
-pmap_pv_unlock(struct vm_page_md *md)
+pmap_pv_unlock(struct pmap_page *pp)
{
- mutex_exit(&md->mdpg_pvlock);
+ mutex_exit(&pp->pp_pvlock);
}
@@ -243,6 +245,18 @@
mutex_exit(&pm->pm_lock);
}
+static inline struct pmap_page *
+phys_to_pp(paddr_t pa)
+{
+ struct vm_page *pg;
+
+ pg = PHYS_TO_VM_PAGE(pa);
+ if (pg != NULL)
+ return VM_PAGE_TO_PP(pg);
+
+ return NULL;
+}
+
#define IN_RANGE(va,sta,end) (((sta) <= (va)) && ((va) < (end)))
#define IN_KSEG_ADDR(va) \
@@ -512,7 +526,7 @@
0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL);
/*
- * initialize vm_page_md:mdpg_pvlock at this time.
+ * initialize mutex in vm_page_md at this time.
* When LOCKDEBUG, mutex_init() calls km_alloc,
* but VM_MDPAGE_INIT() is called before initialized kmem_vm_arena.
*/
@@ -524,7 +538,7 @@
pfn++) {
pg = PHYS_TO_VM_PAGE(ptoa(pfn));
md = VM_PAGE_TO_MD(pg);
- mutex_init(&md->mdpg_pvlock, MUTEX_SPIN, IPL_VM);
+ mutex_init(&md->mdpg_pp.pp_pvlock, MUTEX_SPIN, IPL_VM);
}
}
}
@@ -994,22 +1008,19 @@
}
static struct pv_entry *
-_pmap_remove_pv(struct vm_page *pg, struct pmap *pm, vaddr_t va, pt_entry_t pte)
+_pmap_remove_pv(struct pmap_page *pp, struct pmap *pm, vaddr_t va, pt_entry_t pte)
{
- struct vm_page_md *md;
struct pv_entry *pv;
UVMHIST_FUNC(__func__);
UVMHIST_CALLED(pmaphist);
- UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pte=%llx",
- pg, pm, va, pte);
-
- md = VM_PAGE_TO_MD(pg);
-
- TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
+ UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pte=%llx",
+ pp, pm, va, pte);
+
+ TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
- TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link);
+ TAILQ_REMOVE(&pp->pp_pvhead, pv, pv_link);
PMAP_COUNT(pv_remove);
break;
}
@@ -1061,18 +1072,18 @@
}
static void
-pv_dump(struct vm_page_md *md, void (*pr)(const char *, ...) __printflike(1, 2))
+pv_dump(struct pmap_page *pp, void (*pr)(const char *, ...) __printflike(1, 2))
{
struct pv_entry *pv;
int i;
i = 0;
- pr("md=%p\n", md);
- pr(" md->mdpg_flags=%08x %s\n", md->mdpg_flags,
- str_vmflags(md->mdpg_flags));
-
- TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
+ pr("pp=%p\n", pp);
+ pr(" pp->pp_flags=%08x %s\n", pp->pp_flags,
+ str_vmflags(pp->pp_flags));
+
+ TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
pr(" pv[%d] pv=%p\n",
i, pv);
pr(" pv[%d].pv_pmap = %p (asid=%d)\n",
@@ -1089,22 +1100,19 @@
#endif /* PMAP_PV_DEBUG & DDB */
static int
-_pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp,
+_pmap_enter_pv(struct pmap_page *pp, struct pmap *pm, struct pv_entry **pvp,
vaddr_t va, pt_entry_t *ptep, paddr_t pa, u_int flags)
{
- struct vm_page_md *md;
struct pv_entry *pv;
UVMHIST_FUNC(__func__);
UVMHIST_CALLED(pmaphist);
- UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pa=%llx", pg, pm, va, pa);
+ UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pa=%llx", pp, pm, va, pa);
UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0);
- md = VM_PAGE_TO_MD(pg);
-
/* pv is already registered? */
- TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
+ TAILQ_FOREACH(pv, &pp->pp_pvhead, pv_link) {
if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
break;
}
@@ -1125,14 +1133,14 @@
pv->pv_pa = pa;
pv->pv_ptep = ptep;
- TAILQ_INSERT_HEAD(&md->mdpg_pvhead, pv, pv_link);
+ TAILQ_INSERT_HEAD(&pp->pp_pvhead, pv, pv_link);
PMAP_COUNT(pv_enter);
#ifdef PMAP_PV_DEBUG
- if (!TAILQ_EMPTY(&md->mdpg_pvhead)){
+ if (!TAILQ_EMPTY(&pp->pp_pvhead)){
printf("pv %p alias added va=%016lx -> pa=%016lx\n",
pv, va, pa);
- pv_dump(md, printf);
+ pv_dump(pp, printf);
}
#endif
}
@@ -1175,7 +1183,7 @@
}
static void
-_pmap_protect_pv(struct vm_page *pg, struct pv_entry *pv, vm_prot_t prot)
+_pmap_protect_pv(struct pmap_page *pp, struct pv_entry *pv, vm_prot_t prot)
{
pt_entry_t *ptep, pte;
vm_prot_t pteprot;
@@ -1185,10 +1193,10 @@
UVMHIST_FUNC(__func__);
UVMHIST_CALLED(pmaphist);
- UVMHIST_LOG(pmaphist, "pg=%p, pv=%p, prot=%08x", pg, pv, prot, 0);
+ UVMHIST_LOG(pmaphist, "pp=%p, pv=%p, prot=%08x", pp, pv, prot, 0);
/* get prot mask from referenced/modified */
- mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags &
+ mdattr = pp->pp_flags &
(VM_PROT_READ | VM_PROT_WRITE);
pm_lock(pv->pv_pmap);
@@ -1253,6 +1261,7 @@
pt_entry_t opte;
#endif
struct vm_page *pg;
+ struct pmap_page *pp;
paddr_t pa;
uint32_t mdattr;
bool executable;
@@ -1271,16 +1280,20 @@
pa = lxpde_pa(pte);
pg = PHYS_TO_VM_PAGE(pa);
-
if (pg != NULL) {
+ pp = VM_PAGE_TO_PP(pg);
+ PMAP_COUNT(protect_managed);
+ } else {
+ pp = NULL;
+ PMAP_COUNT(protect_unmanaged);
+ }
+
+ if (pp != NULL) {
/* get prot mask from referenced/modified */
- mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags &
- (VM_PROT_READ | VM_PROT_WRITE);
- PMAP_COUNT(protect_managed);
+ mdattr = pp->pp_flags & (VM_PROT_READ | VM_PROT_WRITE);
} else {
/* unmanaged page */
mdattr = VM_PROT_ALL;
- PMAP_COUNT(protect_unmanaged);
}
#ifdef UVMHIST
@@ -1553,7 +1566,8 @@
_pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
u_int flags, bool kenter)
{
- struct vm_page *pg, *pgs[2], *pdppg, *pdppg0;
+ struct vm_page *pdppg, *pdppg0;
+ struct pmap_page *pp, *opp, *pps[2];
struct pv_entry *spv, *opv = NULL;
pd_entry_t pde;
pt_entry_t attr, pte, opte, *ptep;
@@ -1599,13 +1613,20 @@
}
#endif
- if (kenter)
- pg = NULL;
- else
- pg = PHYS_TO_VM_PAGE(pa);
-
- if (pg != NULL) {
- PMAP_COUNT(managed_mappings);
+ if (kenter) {
+ pp = NULL;
+ } else {
+ struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
+ if (pg != NULL) {
+ pp = VM_PAGE_TO_PP(pg);
+ PMAP_COUNT(managed_mappings);
+ } else {
+ pp = NULL;
+ PMAP_COUNT(unmanaged_mappings);
+ }
+ }
+
+ if (pp != NULL) {
/*
* allocate pv in advance of pm_lock() to avoid locking myself.
* pool_cache_get() may call pmap_kenter() internally.
@@ -1613,7 +1634,6 @@
spv = pool_cache_get(&_pmap_pv_pool, PR_NOWAIT);
need_update_pv = true;
} else {
- PMAP_COUNT(unmanaged_mappings);
spv = NULL;
need_update_pv = false;
}
@@ -1708,13 +1728,12 @@
opte = atomic_swap_64(ptep, 0);
need_sync_icache = (prot & VM_PROT_EXECUTE);
- /* for lock ordering for pg and opg */
- pgs[0] = pg;
- pgs[1] = NULL;
+ /* for lock ordering for old page and new page */
+ pps[0] = pp;
+ pps[1] = NULL;
/* remap? */
if (l3pte_valid(opte)) {
- struct vm_page *opg;
Home |
Main Index |
Thread Index |
Old Index