Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch Provide pmap_enter_ma(), pmap_extract_ma(), pmap_ke...
details: https://anonhg.NetBSD.org/src/rev/485eb65232f2
branches: trunk
changeset: 754733:485eb65232f2
user: dyoung <dyoung%NetBSD.org@localhost>
date: Mon May 10 18:46:58 2010 +0000
description:
Provide pmap_enter_ma(), pmap_extract_ma(), pmap_kenter_ma() in all x86
kernels, and use them in the bus_space(9) implementation instead of ugly
Xen #ifdef-age. In a non-Xen kernel, the _ma() functions either call or
alias the equivalent _pa() functions.
Reviewed on port-xen%netbsd.org@localhost and port-i386%netbsd.org@localhost. Passes
rmind@'s and bouyer@'s inspection. Tested on i386 and on Xen DOMU /
DOM0.
diffstat:
sys/arch/x86/include/pmap.h | 24 +++-
sys/arch/x86/x86/bus_space.c | 15 +-
sys/arch/x86/x86/pmap.c | 130 +++-------------------
sys/arch/xen/conf/files.xen | 3 +-
sys/arch/xen/x86/xen_pmap.c | 245 +++++++++++++++++++++++++++++++++++++++++++
5 files changed, 288 insertions(+), 129 deletions(-)
diffs (truncated from 616 to 300 lines):
diff -r d651b9fbd5fc -r 485eb65232f2 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h Mon May 10 16:33:45 2010 +0000
+++ b/sys/arch/x86/include/pmap.h Mon May 10 18:46:58 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.29 2010/02/09 22:51:13 jym Exp $ */
+/* $NetBSD: pmap.h,v 1.30 2010/05/10 18:46:58 dyoung Exp $ */
/*
*
@@ -228,6 +228,12 @@
void pmap_emap_remove(vaddr_t, vsize_t);
void pmap_emap_sync(bool);
+void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
+ pd_entry_t * const **);
+void pmap_unmap_ptes(struct pmap *, struct pmap *);
+
+int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
+
vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
void pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
@@ -248,6 +254,12 @@
* inline functions
*/
+__inline static bool __unused
+pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
+{
+ return pmap_pdes_invalid(va, pdes, lastpde) == 0;
+}
+
/*
* pmap_update_pg: flush one page from the TLB (or flush the whole thing
* if hardware doesn't support one-page flushing)
@@ -401,17 +413,17 @@
/* Xen helpers to change bits of a pte */
#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
+paddr_t vtomach(vaddr_t);
+#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
+
+#endif /* XEN */
+
/* pmap functions with machine addresses */
void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
vm_prot_t, u_int, int);
bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
-paddr_t vtomach(vaddr_t);
-#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
-
-#endif /* XEN */
-
/*
* Hooks for the pool allocator.
*/
diff -r d651b9fbd5fc -r 485eb65232f2 sys/arch/x86/x86/bus_space.c
--- a/sys/arch/x86/x86/bus_space.c Mon May 10 16:33:45 2010 +0000
+++ b/sys/arch/x86/x86/bus_space.c Mon May 10 18:46:58 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: bus_space.c,v 1.28 2010/04/28 20:27:36 dyoung Exp $ */
+/* $NetBSD: bus_space.c,v 1.29 2010/05/10 18:46:58 dyoung Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: bus_space.c,v 1.28 2010/04/28 20:27:36 dyoung Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bus_space.c,v 1.29 2010/05/10 18:46:58 dyoung Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -48,9 +48,6 @@
#ifdef XEN
#include <xen/hypervisor.h>
-#include <xen/xenpmap.h>
-
-#define pmap_extract(a, b, c) pmap_extract_ma(a, b, c)
#endif
/*
@@ -339,11 +336,7 @@
*bshp = (bus_space_handle_t)(sva + (bpa & PGOFSET));
for (va = sva; pa != endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
-#ifdef XEN
pmap_kenter_ma(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
-#else
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, pmapflags);
-#endif /* XEN */
}
pmap_update(pmap_kernel());
@@ -395,7 +388,7 @@
}
#endif
- if (pmap_extract(pmap_kernel(), va, &bpa) == FALSE) {
+ if (pmap_extract_ma(pmap_kernel(), va, &bpa) == FALSE) {
panic("_x86_memio_unmap:"
" wrong virtual address");
}
@@ -447,7 +440,7 @@
panic("x86_memio_unmap: overflow");
#endif
- (void) pmap_extract(pmap_kernel(), va, &bpa);
+ (void) pmap_extract_ma(pmap_kernel(), va, &bpa);
bpa += (bsh & PGOFSET);
pmap_kremove(va, endva - va);
diff -r d651b9fbd5fc -r 485eb65232f2 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Mon May 10 16:33:45 2010 +0000
+++ b/sys/arch/x86/x86/pmap.c Mon May 10 18:46:58 2010 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.108 2010/05/04 23:27:14 jym Exp $ */
+/* $NetBSD: pmap.c,v 1.109 2010/05/10 18:46:58 dyoung Exp $ */
/*
* Copyright (c) 2007 Manuel Bouyer.
@@ -149,7 +149,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.108 2010/05/04 23:27:14 jym Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.109 2010/05/10 18:46:58 dyoung Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -531,6 +531,8 @@
static pt_entry_t *csrc_pte, *cdst_pte, *zero_pte, *ptp_pte, *early_zero_pte;
static char *csrcp, *cdstp, *zerop, *ptpp, *early_zerop;
+int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
+
/*
* pool and cache that PDPs are allocated from
*/
@@ -580,8 +582,6 @@
pd_entry_t * const *);
static bool pmap_is_curpmap(struct pmap *);
static bool pmap_is_active(struct pmap *, struct cpu_info *, bool);
-static void pmap_map_ptes(struct pmap *, struct pmap **,
- pt_entry_t **, pd_entry_t * const **);
static bool pmap_remove_pte(struct pmap *, struct vm_page *,
pt_entry_t *, vaddr_t,
struct pv_entry **);
@@ -589,13 +589,8 @@
vaddr_t, vaddr_t, vaddr_t,
struct pv_entry **);
-static void pmap_unmap_ptes(struct pmap *, struct pmap *);
static void pmap_unmap_apdp(void);
static bool pmap_get_physpage(vaddr_t, int, paddr_t *);
-static int pmap_pdes_invalid(vaddr_t, pd_entry_t * const *,
- pd_entry_t *);
-#define pmap_pdes_valid(va, pdes, lastpde) \
- (pmap_pdes_invalid((va), (pdes), (lastpde)) == 0)
static void pmap_alloc_level(pd_entry_t * const *, vaddr_t, int,
long *);
@@ -781,7 +776,7 @@
* => must be undone with pmap_unmap_ptes before returning
*/
-static void
+void
pmap_map_ptes(struct pmap *pmap, struct pmap **pmap2,
pd_entry_t **ptepp, pd_entry_t * const **pdeppp)
{
@@ -914,7 +909,7 @@
* pmap_unmap_ptes: unlock the PTE mapping of "pmap"
*/
-static void
+void
pmap_unmap_ptes(struct pmap *pmap, struct pmap *pmap2)
{
@@ -1122,47 +1117,7 @@
}
}
-#ifdef XEN
-/*
- * pmap_kenter_ma: enter a kernel mapping without R/M (pv_entry) tracking
- *
- * => no need to lock anything, assume va is already allocated
- * => should be faster than normal pmap enter function
- * => we expect a MACHINE address
- */
-
-void
-pmap_kenter_ma(vaddr_t va, paddr_t ma, vm_prot_t prot, u_int flags)
-{
- pt_entry_t *pte, opte, npte;
-
- if (va < VM_MIN_KERNEL_ADDRESS)
- pte = vtopte(va);
- else
- pte = kvtopte(va);
-
- npte = ma | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
- PG_V | PG_k;
- if (flags & PMAP_NOCACHE)
- npte |= PG_N;
-
- if ((cpu_feature[2] & CPUID_NOX) && !(prot & VM_PROT_EXECUTE))
- npte |= PG_NX;
-
- opte = pmap_pte_testset (pte, npte); /* zap! */
-
- if (pmap_valid_entry(opte)) {
-#if defined(MULTIPROCESSOR)
- kpreempt_disable();
- pmap_tlb_shootdown(pmap_kernel(), va, 0, opte);
- kpreempt_enable();
-#else
- /* Don't bother deferring in the single CPU case. */
- pmap_update_pg(va);
-#endif
- }
-}
-#endif /* XEN */
+__weak_alias(pmap_kenter_ma, pmap_kenter_pa);
#if defined(__x86_64__)
/*
@@ -2883,7 +2838,7 @@
* some misc. functions
*/
-static int
+int
pmap_pdes_invalid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
{
int i;
@@ -2977,39 +2932,9 @@
return (0);
}
+__weak_alias(pmap_extract_ma, pmap_extract);
+
#ifdef XEN
-/*
- * pmap_extract_ma: extract a MA for the given VA
- */
-
-bool
-pmap_extract_ma(struct pmap *pmap, vaddr_t va, paddr_t *pap)
-{
- pt_entry_t *ptes, pte;
- pd_entry_t pde;
- pd_entry_t * const *pdes;
- struct pmap *pmap2;
-
- kpreempt_disable();
- pmap_map_ptes(pmap, &pmap2, &ptes, &pdes);
- if (!pmap_pdes_valid(va, pdes, &pde)) {
- pmap_unmap_ptes(pmap, pmap2);
- kpreempt_enable();
- return false;
- }
-
- pte = ptes[pl1_i(va)];
- pmap_unmap_ptes(pmap, pmap2);
- kpreempt_enable();
-
- if (__predict_true((pte & PG_V) != 0)) {
- if (pap != NULL)
- *pap = (pte & PG_FRAME) | (va & (NBPD_L1 - 1));
- return true;
- }
-
- return false;
-}
/*
* vtomach: virtual address to machine address. For use by
@@ -3028,8 +2953,6 @@
#endif /* XEN */
-
-
/*
* pmap_virtual_space: used during bootup [pmap_steal_memory] to
* determine the bounds of the kernel virtual addess space.
@@ -3985,24 +3908,25 @@
* defined as macro in pmap.h
*/
+__weak_alias(pmap_enter, pmap_enter_default);
+
+int
Home |
Main Index |
Thread Index |
Old Index