Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm/pmap move from common/pmap/tlb -> uvm/pmap
details: https://anonhg.NetBSD.org/src/rev/77f816e1965b
branches: trunk
changeset: 781821:77f816e1965b
user: christos <christos%NetBSD.org@localhost>
date: Wed Oct 03 00:51:45 2012 +0000
description:
move from common/pmap/tlb -> uvm/pmap
diffstat:
sys/uvm/pmap/pmap.c | 1874 ++++++++++++++++++++++++++++++++++++++++++++
sys/uvm/pmap/pmap.h | 271 ++++++
sys/uvm/pmap/pmap_segtab.c | 486 +++++++++++
sys/uvm/pmap/pmap_synci.c | 199 ++++
sys/uvm/pmap/pmap_tlb.c | 891 ++++++++++++++++++++
sys/uvm/pmap/tlb.h | 71 +
sys/uvm/pmap/vmpagemd.h | 112 ++
7 files changed, 3904 insertions(+), 0 deletions(-)
diffs (truncated from 3932 to 300 lines):
diff -r 1023a25cb536 -r 77f816e1965b sys/uvm/pmap/pmap.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/uvm/pmap/pmap.c Wed Oct 03 00:51:45 2012 +0000
@@ -0,0 +1,1874 @@
+/* $NetBSD: pmap.c,v 1.1 2012/10/03 00:51:45 christos Exp $ */
+
+/*-
+ * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center and by Chris G. Demetriou.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and Ralph Campbell.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)pmap.c 8.4 (Berkeley) 1/26/94
+ */
+
+#include <sys/cdefs.h>
+
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.1 2012/10/03 00:51:45 christos Exp $");
+
+/*
+ * Manages physical address maps.
+ *
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include "opt_modular.h"
+#include "opt_multiprocessor.h"
+#include "opt_sysv.h"
+
+#define __PMAP_PRIVATE
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/buf.h>
+#include <sys/pool.h>
+#include <sys/atomic.h>
+#include <sys/mutex.h>
+#include <sys/atomic.h>
+#ifdef SYSVSHM
+#include <sys/shm.h>
+#endif
+#include <sys/socketvar.h> /* XXX: for sock_loan_thresh */
+
+#include <uvm/uvm.h>
+
+#define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0)
+#define PMAP_COUNTER(name, desc) \
+static struct evcnt pmap_evcnt_##name = \
+ EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
+EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
+
+PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
+PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
+PMAP_COUNTER(remove_user_calls, "remove user calls");
+PMAP_COUNTER(remove_user_pages, "user pages unmapped");
+PMAP_COUNTER(remove_flushes, "remove cache flushes");
+PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
+PMAP_COUNTER(remove_pvfirst, "remove pv first");
+PMAP_COUNTER(remove_pvsearch, "remove pv search");
+
+PMAP_COUNTER(prefer_requests, "prefer requests");
+PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
+
+PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
+PMAP_COUNTER(zeroed_pages, "pages zeroed");
+PMAP_COUNTER(copied_pages, "pages copied");
+
+PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
+PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
+PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
+PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
+
+PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
+PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
+
+PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
+PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
+PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
+PMAP_COUNTER(user_mappings, "user pages mapped");
+PMAP_COUNTER(user_mappings_changed, "user mapping changed");
+PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
+PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
+PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
+PMAP_COUNTER(managed_mappings, "managed pages mapped");
+PMAP_COUNTER(mappings, "pages mapped");
+PMAP_COUNTER(remappings, "pages remapped");
+PMAP_COUNTER(unmappings, "pages unmapped");
+PMAP_COUNTER(primary_mappings, "page initial mappings");
+PMAP_COUNTER(primary_unmappings, "page final unmappings");
+PMAP_COUNTER(tlb_hit, "page mapping");
+
+PMAP_COUNTER(exec_mappings, "exec pages mapped");
+PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
+PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
+PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
+PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
+PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
+PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
+PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
+PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
+PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
+PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
+
+PMAP_COUNTER(create, "creates");
+PMAP_COUNTER(reference, "references");
+PMAP_COUNTER(dereference, "dereferences");
+PMAP_COUNTER(destroy, "destroyed");
+PMAP_COUNTER(activate, "activations");
+PMAP_COUNTER(deactivate, "deactivations");
+PMAP_COUNTER(update, "updates");
+#ifdef MULTIPROCESSOR
+PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
+#endif
+PMAP_COUNTER(unwire, "unwires");
+PMAP_COUNTER(copy, "copies");
+PMAP_COUNTER(clear_modify, "clear_modifies");
+PMAP_COUNTER(protect, "protects");
+PMAP_COUNTER(page_protect, "page_protects");
+
+#define PMAP_ASID_RESERVED 0
+CTASSERT(PMAP_ASID_RESERVED == 0);
+
+/*
+ * Initialize the kernel pmap.
+ */
+#ifdef MULTIPROCESSOR
+#define PMAP_SIZE offsetof(struct pmap, pm_pai[MAXCPUS])
+#else
+#define PMAP_SIZE sizeof(struct pmap)
+kmutex_t pmap_pvlist_mutex __aligned(COHERENCY_UNIT);
+#endif
+
+struct pmap_kernel kernel_pmap_store = {
+ .kernel_pmap = {
+ .pm_count = 1,
+ .pm_segtab = PMAP_INVALID_SEGTAB_ADDRESS,
+ .pm_minaddr = VM_MIN_KERNEL_ADDRESS,
+ .pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
+#ifdef MULTIPROCESSOR
+ .pm_active = 1,
+ .pm_onproc = 1,
+#endif
+ },
+};
+
+struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
+
+struct pmap_limits pmap_limits;
+
+#ifdef UVMHIST
+static struct kern_history_ent pmapexechistbuf[10000];
+static struct kern_history_ent pmaphistbuf[10000];
+#endif
+
+/*
+ * The pools from which pmap structures and sub-structures are allocated.
+ */
+struct pool pmap_pmap_pool;
+struct pool pmap_pv_pool;
+
+#ifndef PMAP_PV_LOWAT
+#define PMAP_PV_LOWAT 16
+#endif
+int pmap_pv_lowat = PMAP_PV_LOWAT;
+
+bool pmap_initialized = false;
+#define PMAP_PAGE_COLOROK_P(a, b) \
+ ((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
+u_int pmap_page_colormask;
+
+#define PAGE_IS_MANAGED(pa) \
+ (pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1)
+
+#define PMAP_IS_ACTIVE(pm) \
+ ((pm) == pmap_kernel() || \
+ (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
+
+/* Forward function declarations */
+void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
+void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
+
+/*
+ * PV table management functions.
+ */
+void *pmap_pv_page_alloc(struct pool *, int);
+void pmap_pv_page_free(struct pool *, void *);
+
+struct pool_allocator pmap_pv_page_allocator = {
+ pmap_pv_page_alloc, pmap_pv_page_free, 0,
+};
+
+#define pmap_pv_alloc() pool_get(&pmap_pv_pool, PR_NOWAIT)
+#define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv))
+
+/*
+ * Misc. functions.
+ */
+
+bool
+pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
+{
+ volatile u_int * const attrp = &mdpg->mdpg_attrs;
+#ifdef MULTIPROCESSOR
+ for (;;) {
+ u_int old_attr = *attrp;
+ if ((old_attr & clear_attributes) == 0)
+ return false;
+ u_int new_attr = old_attr & ~clear_attributes;
+ if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr))
+ return true;
+ }
+#else
+ u_int old_attr = *attrp;
+ if ((old_attr & clear_attributes) == 0)
+ return false;
+ *attrp &= ~clear_attributes;
+ return true;
+#endif
+}
+
+void
+pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
+{
Home |
Main Index |
Thread Index |
Old Index