Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-9]: src/sys/arch/arm/arm32 Pull up following revision(s) (request...
details: https://anonhg.NetBSD.org/src/rev/980ba9a219d2
branches: netbsd-9
changeset: 744696:980ba9a219d2
user: martin <martin%NetBSD.org@localhost>
date: Mon Feb 10 19:20:01 2020 +0000
description:
Pull up following revision(s) (requested by skrll in ticket #691):
sys/arch/arm/arm32/pmap.c: revision 1.383
sys/arch/arm/arm32/pmap.c: revision 1.385
sys/arch/arm/arm32/pmap.c: revision 1.386
sys/arch/arm/arm32/pmap.c: revision 1.387
sys/arch/arm/arm32/pmap.c: revision 1.374
sys/arch/arm/arm32/pmap.c: revision 1.375
sys/arch/arm/arm32/pmap.c: revision 1.376
sys/arch/arm/arm32/pmap.c: revision 1.377
sys/arch/arm/arm32/pmap.c: revision 1.378
sys/arch/arm/arm32/pmap.c: revision 1.379
Convert a __CTASSERT into a KASSERT as L1_S_CACHE_MASK may not be a
compile time constant if ARM_NMMUS > 1
Improve a comment
Update PMAP_STEAL_MEMORY code to uvm_hotplug
Typo in comment
Fix a bug introduced in 1.271 where pmap_grow_map would no longer map
the allocated page for the uvm.page_init_done == false case when
PMAP_STEAL_MEMORY is not defined.
Trailing whitespace
Fix comment
Always pmap_kenter_pa the page in pmap_grow_map regardless of how we got
it.
Always call pmap_grow_map with a page aligned new VA. KASSERT that this
happenes.
More KNF
diffstat:
sys/arch/arm/arm32/pmap.c | 144 +++++++++++++++++++++++-----------------------
1 files changed, 72 insertions(+), 72 deletions(-)
diffs (truncated from 460 to 300 lines):
diff -r 01d14928c8cd -r 980ba9a219d2 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Mon Feb 10 19:14:03 2020 +0000
+++ b/sys/arch/arm/arm32/pmap.c Mon Feb 10 19:20:01 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.373 2019/04/23 11:21:21 bouyer Exp $ */
+/* $NetBSD: pmap.c,v 1.373.2.1 2020/02/10 19:20:01 martin Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
@@ -221,7 +221,7 @@
#include <arm/db_machdep.h>
#endif
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.373 2019/04/23 11:21:21 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.373.2.1 2020/02/10 19:20:01 martin Exp $");
//#define PMAP_DEBUG
#ifdef PMAP_DEBUG
@@ -547,7 +547,7 @@
if (__predict_false(db_onproc != NULL))
return;
#endif
-
+
mutex_enter(pm->pm_lock);
}
@@ -614,7 +614,7 @@
/*
* L1 Page Tables are tracked using a Least Recently Used list.
* - New L1s are allocated from the HEAD.
- * - Freed L1s are added to the TAIl.
+ * - Freed L1s are added to the TAIL.
* - Recently accessed L1s (where an 'access' is some change to one of
* the userland pmaps which owns this L1) are moved to the TAIL.
*/
@@ -1096,7 +1096,7 @@
break;
}
- return (pv);
+ return pv;
}
/*
@@ -1213,7 +1213,7 @@
KASSERT(!PV_IS_KENTRY_P(set_mask));
if ((npv = pmap_find_pv(md, pm, va)) == NULL)
- return (0);
+ return 0;
NPDEBUG(PDB_PVDUMP,
printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags));
@@ -1287,7 +1287,7 @@
PMAPCOUNT(remappings);
- return (oflags);
+ return oflags;
}
/*
@@ -1495,9 +1495,9 @@
if ((l2 = pm->pm_l2[L2_IDX(l1slot)]) == NULL ||
(l2b = &l2->l2_bucket[L2_BUCKET(l1slot)])->l2b_kva == NULL)
- return (NULL);
-
- return (l2b);
+ return NULL;
+
+ return l2b;
}
/*
@@ -1525,7 +1525,7 @@
* Need to allocate a new l2_dtable.
*/
if ((l2 = pmap_alloc_l2_dtable()) == NULL)
- return (NULL);
+ return NULL;
/*
* Link it into the parent pmap
@@ -1555,7 +1555,7 @@
pm->pm_l2[L2_IDX(l1slot)] = NULL;
pmap_free_l2_dtable(l2);
}
- return (NULL);
+ return NULL;
}
l2->l2_occupancy++;
@@ -1576,7 +1576,7 @@
#endif
}
- return (l2b);
+ return l2b;
}
/*
@@ -1712,7 +1712,7 @@
memset(v, 0, L2_TABLE_SIZE_REAL);
PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
- return (0);
+ return 0;
}
static int
@@ -1720,7 +1720,7 @@
{
memset(v, 0, sizeof(struct l2_dtable));
- return (0);
+ return 0;
}
static int
@@ -1728,7 +1728,7 @@
{
memset(v, 0, sizeof(struct pmap));
- return (0);
+ return 0;
}
static void
@@ -1807,7 +1807,7 @@
if (md->urw_mappings)
uidx |= 2;
- return (pmap_vac_flags[uidx][kidx]);
+ return pmap_vac_flags[uidx][kidx];
}
static inline void
@@ -3021,7 +3021,7 @@
pmap_pinit(pm);
- return (pm);
+ return pm;
}
u_int
@@ -3251,7 +3251,7 @@
pmap_free_l2_bucket(pm, l2b, 0);
UVMHIST_LOG(maphist, " <-- done (ENOMEM)",
0, 0, 0, 0);
- return (ENOMEM);
+ return ENOMEM;
}
}
@@ -3406,7 +3406,7 @@
free_pv:
if (new_pv)
pool_put(&pmap_pv_pool, new_pv);
- return (error);
+ return error;
}
/*
@@ -3633,8 +3633,8 @@
KASSERT(PV_IS_KENTRY_P(pv->pv_flags));
/*
- * If we are removing a writeable mapping to a cached exec page,
- * if it's the last mapping then clear it execness other sync
+ * We are removing a writeable mapping to a cached exec page, if
+ * it's the last mapping then clear its execness otherwise sync
* the page to the icache.
*/
if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
@@ -4756,7 +4756,7 @@
out:
pmap_release_pmap_lock(pm);
- return (rv);
+ return rv;
}
/*
@@ -5772,6 +5772,8 @@
{
paddr_t pa;
+ KASSERT((va & PGOFSET) == 0);
+
if (uvm.page_init_done == false) {
#ifdef PMAP_STEAL_MEMORY
pv_addr_t pv;
@@ -5786,13 +5788,13 @@
pa = pv.pv_pa;
#else
if (uvm_page_physget(&pa) == false)
- return (1);
+ return 1;
#endif /* PMAP_STEAL_MEMORY */
} else {
struct vm_page *pg;
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (pg == NULL)
- return (1);
+ return 1;
pa = VM_PAGE_TO_PHYS(pg);
/*
* This new page must not have any mappings. Enter it via
@@ -5800,9 +5802,10 @@
*/
struct vm_page_md *md __diagused = VM_PAGE_TO_MD(pg);
KASSERT(SLIST_EMPTY(&md->pvh_list));
- pmap_kenter_pa(va, pa,
- VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE|PMAP_PTE);
- }
+ }
+
+ pmap_kenter_pa(va, pa,
+ VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE|PMAP_PTE);
if (pap)
*pap = pa;
@@ -5817,7 +5820,7 @@
KDASSERT((opte & L2_S_CACHE_MASK) == pte_l2_s_cache_mode_pt);
#endif
memset((void *)va, 0, PAGE_SIZE);
- return (0);
+ return 0;
}
/*
@@ -5846,7 +5849,7 @@
* Need to allocate a backing page
*/
if (pmap_grow_map(nva, NULL))
- return (NULL);
+ return NULL;
}
l2 = (struct l2_dtable *)nva;
@@ -5857,8 +5860,8 @@
* The new l2_dtable straddles a page boundary.
* Map in another page to cover it.
*/
- if (pmap_grow_map(nva, NULL))
- return (NULL);
+ if (pmap_grow_map(nva & ~PGOFSET, NULL))
+ return NULL;
}
pmap_kernel_l2dtable_kva = nva;
@@ -5888,7 +5891,7 @@
* Need to allocate a backing page
*/
if (pmap_grow_map(nva, &pmap_kernel_l2ptp_phys))
- return (NULL);
+ return NULL;
PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
}
@@ -5901,7 +5904,7 @@
pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
}
- return (l2b);
+ return l2b;
}
vaddr_t
@@ -5975,7 +5978,7 @@
splx(s);
out:
- return (pmap_curmaxkvaddr);
+ return pmap_curmaxkvaddr;
}
/************************ Utility routines ****************************/
@@ -6406,7 +6409,7 @@
pd_entry_t pde = *pdep;
if (l1pte_section_p(pde)) {
- __CTASSERT((L1_S_CACHE_MASK & L1_S_V6_SUPER) == 0);
+ KASSERT((L1_S_CACHE_MASK & L1_S_V6_SUPER) == 0);
if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
*pdep = (pde & ~L1_S_CACHE_MASK) |
pte_l1_s_cache_mode_pt;
@@ -6435,7 +6438,7 @@
va += PAGE_SIZE;
}
- return (rv);
+ return rv;
#endif
}
@@ -6498,12 +6501,12 @@
Home |
Main Index |
Thread Index |
Old Index