Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm/pmap Changes so that MIPS can use the common pmap.
details: https://anonhg.NetBSD.org/src/rev/74133c964304
branches: trunk
changeset: 346388:74133c964304
user: matt <matt%NetBSD.org@localhost>
date: Mon Jul 11 16:06:09 2016 +0000
description:
Changes so that MIPS can use the common pmap.
Change/augment the virtual cache alias callbacks.
diffstat:
sys/uvm/pmap/pmap.c | 993 ++++++++++++++++++++++++++++----------------
sys/uvm/pmap/pmap.h | 28 +-
sys/uvm/pmap/pmap_segtab.c | 13 +-
sys/uvm/pmap/pmap_synci.c | 43 +-
sys/uvm/pmap/pmap_synci.h | 41 +
sys/uvm/pmap/pmap_tlb.c | 190 +++++---
sys/uvm/pmap/pmap_tlb.h | 4 +-
sys/uvm/pmap/tlb.h | 13 +-
sys/uvm/pmap/vmpagemd.h | 89 +++-
9 files changed, 915 insertions(+), 499 deletions(-)
diffs (truncated from 2864 to 300 lines):
diff -r f2795a88a9d4 -r 74133c964304 sys/uvm/pmap/pmap.c
--- a/sys/uvm/pmap/pmap.c Mon Jul 11 15:51:01 2016 +0000
+++ b/sys/uvm/pmap/pmap.c Mon Jul 11 16:06:09 2016 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.14 2016/07/07 06:55:44 msaitoh Exp $ */
+/* $NetBSD: pmap.c,v 1.15 2016/07/11 16:06:09 matt Exp $ */
/*-
* Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.14 2016/07/07 06:55:44 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.15 2016/07/11 16:06:09 matt Exp $");
/*
* Manages physical address maps.
@@ -102,22 +102,22 @@
#define __PMAP_PRIVATE
#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/proc.h>
+#include <sys/atomic.h>
#include <sys/buf.h>
+#include <sys/cpu.h>
+#include <sys/mutex.h>
#include <sys/pool.h>
#include <sys/atomic.h>
#include <sys/mutex.h>
#include <sys/atomic.h>
-#include <sys/socketvar.h> /* XXX: for sock_loan_thresh */
#include <uvm/uvm.h>
-#define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0)
-#define PMAP_COUNTER(name, desc) \
-static struct evcnt pmap_evcnt_##name = \
- EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
-EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
+#if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
+ && !defined(PMAP_NO_PV_UNCACHED)
+#error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
+ PMAP_NO_PV_UNCACHED to be defined
+#endif
PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
@@ -132,8 +132,6 @@
PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
-PMAP_COUNTER(zeroed_pages, "pages zeroed");
-PMAP_COUNTER(copied_pages, "pages copied");
PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
@@ -190,20 +188,22 @@
#define PMAP_ASID_RESERVED 0
CTASSERT(PMAP_ASID_RESERVED == 0);
-/*
- * Initialize the kernel pmap.
- */
-#ifdef MULTIPROCESSOR
-#define PMAP_SIZE offsetof(struct pmap, pm_pai[PMAP_TLB_MAX])
-#else
-#define PMAP_SIZE sizeof(struct pmap)
-kmutex_t pmap_pvlist_mutex __aligned(COHERENCY_UNIT);
+#ifndef PMAP_SEGTAB_ALIGN
+#define PMAP_SEGTAB_ALIGN /* nothing */
+#endif
+#ifdef _LP64
+pmap_segtab_t pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
#endif
+pmap_segtab_t pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
+#ifdef _LP64
+ .seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab,
+#endif
+};
struct pmap_kernel kernel_pmap_store = {
.kernel_pmap = {
.pm_count = 1,
- .pm_segtab = PMAP_INVALID_SEGTAB_ADDRESS,
+ .pm_segtab = &pmap_kern_segtab,
.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
},
@@ -211,7 +211,7 @@
struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
-struct pmap_limits pmap_limits = {
+struct pmap_limits pmap_limits = { /* VA and PA limits */
.virtual_start = VM_MIN_KERNEL_ADDRESS,
};
@@ -231,23 +231,24 @@
#ifndef PMAP_PV_LOWAT
#define PMAP_PV_LOWAT 16
#endif
-int pmap_pv_lowat = PMAP_PV_LOWAT;
+int pmap_pv_lowat = PMAP_PV_LOWAT;
-bool pmap_initialized = false;
+bool pmap_initialized = false;
#define PMAP_PAGE_COLOROK_P(a, b) \
((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
-u_int pmap_page_colormask;
+u_int pmap_page_colormask;
-#define PAGE_IS_MANAGED(pa) \
- (pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1)
+#define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa))
#define PMAP_IS_ACTIVE(pm) \
((pm) == pmap_kernel() || \
(pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
/* Forward function declarations */
+void pmap_page_remove(struct vm_page *);
+static void pmap_pvlist_check(struct vm_page_md *);
void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
-void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
+void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, pt_entry_t *, u_int);
/*
* PV table management functions.
@@ -265,7 +266,50 @@
#if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
#define pmap_md_tlb_miss_lock_enter() do { } while(/*CONSTCOND*/0)
#define pmap_md_tlb_miss_lock_exit() do { } while(/*CONSTCOND*/0)
-#endif /* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
+#endif /* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
+
+#ifndef MULTIPROCESSOR
+kmutex_t pmap_pvlist_mutex __cacheline_aligned;
+#endif
+
+/*
+ * Debug functions.
+ */
+
+static inline void
+pmap_asid_check(pmap_t pm, const char *func)
+{
+#ifdef DEBUG
+ if (!PMAP_IS_ACTIVE(pm))
+ return;
+
+ struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
+ tlb_asid_t asid = tlb_get_asid();
+ if (asid != pai->pai_asid)
+ panic("%s: inconsistency for active TLB update: %u <-> %u",
+ func, asid, pai->pai_asid);
+#endif
+}
+
+static void
+pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
+{
+#ifdef DEBUG
+ if (pmap == pmap_kernel()) {
+ if (sva < VM_MIN_KERNEL_ADDRESS)
+ panic("%s: kva %#"PRIxVADDR" not in range",
+ func, sva);
+ if (eva >= pmap_limits.virtual_end)
+ panic("%s: kva %#"PRIxVADDR" not in range",
+ func, eva);
+ } else {
+ if (eva > VM_MAXUSER_ADDRESS)
+ panic("%s: uva %#"PRIxVADDR" not in range",
+ func, eva);
+ pmap_asid_check(pmap, func);
+ }
+#endif
+}
/*
* Misc. functions.
@@ -274,18 +318,18 @@
bool
pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
{
- volatile u_int * const attrp = &mdpg->mdpg_attrs;
+ volatile unsigned long * const attrp = &mdpg->mdpg_attrs;
#ifdef MULTIPROCESSOR
for (;;) {
u_int old_attr = *attrp;
if ((old_attr & clear_attributes) == 0)
return false;
u_int new_attr = old_attr & ~clear_attributes;
- if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr))
+ if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
return true;
}
#else
- u_int old_attr = *attrp;
+ unsigned long old_attr = *attrp;
if ((old_attr & clear_attributes) == 0)
return false;
*attrp &= ~clear_attributes;
@@ -297,7 +341,7 @@
pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
{
#ifdef MULTIPROCESSOR
- atomic_or_uint(&mdpg->mdpg_attrs, set_attributes);
+ atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
#else
mdpg->mdpg_attrs |= set_attributes;
#endif
@@ -307,17 +351,19 @@
pmap_page_syncicache(struct vm_page *pg)
{
#ifndef MULTIPROCESSOR
- struct pmap * const curpmap = curcpu()->ci_curpm;
+ struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
#endif
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
pv_entry_t pv = &mdpg->mdpg_first;
kcpuset_t *onproc;
#ifdef MULTIPROCESSOR
kcpuset_create(&onproc, true);
+ KASSERT(onproc != NULL);
#else
onproc = NULL;
#endif
- (void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
+ VM_PAGEMD_PVLIST_READLOCK(mdpg);
+ pmap_pvlist_check(mdpg);
if (pv->pv_pmap != NULL) {
for (; pv != NULL; pv = pv->pv_next) {
@@ -334,13 +380,14 @@
#endif
}
}
+ pmap_pvlist_check(mdpg);
VM_PAGEMD_PVLIST_UNLOCK(mdpg);
kpreempt_disable();
pmap_md_page_syncicache(pg, onproc);
+ kpreempt_enable();
#ifdef MULTIPROCESSOR
kcpuset_destroy(onproc);
#endif
- kpreempt_enable();
}
/*
@@ -402,24 +449,58 @@
vaddr_t
pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
{
- u_int npgs;
+ size_t npgs;
paddr_t pa;
vaddr_t va;
+ struct vm_physseg *maybe_seg = NULL;
+ u_int maybe_bank = vm_nphysseg;
size = round_page(size);
npgs = atop(size);
+ aprint_debug("%s: need %zu pages\n", __func__, npgs);
+
for (u_int bank = 0; bank < vm_nphysseg; bank++) {
struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
if (uvm.page_init_done == true)
panic("pmap_steal_memory: called _after_ bootstrap");
- if (seg->avail_start != seg->start ||
- seg->avail_start >= seg->avail_end)
+ aprint_debug("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
+ __func__, bank,
+ seg->avail_start, seg->start,
+ seg->avail_end, seg->end);
+
+ if (seg->avail_start != seg->start
+ || seg->avail_start >= seg->avail_end) {
+ aprint_debug("%s: seg %u: bad start\n", __func__, bank);
+ continue;
+ }
+
+ if (seg->avail_end - seg->avail_start < npgs) {
+ aprint_debug("%s: seg %u: too small for %zu pages\n",
+ __func__, bank, npgs);
continue;
+ }
- if ((seg->avail_end - seg->avail_start) < npgs)
+ if (!pmap_md_ok_to_steal_p(seg, npgs)) {
continue;
+ }
+
+ /*
+ * Always try to allocate from the segment with the least
+ * amount of space left.
+ */
Home |
Main Index |
Thread Index |
Old Index