Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-9]: src/sys Pull up following revision(s) (requested by bouyer in...
details: https://anonhg.NetBSD.org/src/rev/5ec9d26e99b0
branches: netbsd-9
changeset: 963918:5ec9d26e99b0
user: martin <martin%NetBSD.org@localhost>
date: Sun May 31 10:39:34 2020 +0000
description:
Pull up following revision(s) (requested by bouyer in ticket #935):
sys/arch/xen/x86/x86_xpmap.c: revision 1.89
sys/arch/x86/include/pmap.h: revision 1.121
sys/arch/xen/xen/privcmd.c: revision 1.58
sys/external/mit/xen-include-public/dist/xen/include/public/memory.h: revision 1.2
sys/arch/xen/include/xenpmap.h: revision 1.44
sys/arch/xen/include/xenio.h: revision 1.12
sys/arch/x86/x86/pmap.c: revision 1.394
(all via patch)
Ajust pmap_enter_ma() for upcoming new Xen privcmd ioctl:
pass flags to xpq_update_foreign()
Introduce a pmap MD flag: PMAP_MD_XEN_NOTR, which cause xpq_update_foreign()
to use the MMU_PT_UPDATE_NO_TRANSLATE flag.
make xpq_update_foreign() return the raw Xen error. This will cause
pmap_enter_ma() to return a negative error number in this case, but the
only user of this code path is privcmd.c and it can deal with it.
Add pmap_enter_gnt()m which maps a set of Xen grant entries at the
specified va in the specified pmap. Use the hooks implemented for EPT to
keep track of mapped grand entries in the pmap, and unmap them
when pmap_remove() is called. This requires pmap_remove() to be split
into a pmap_remove_locked(), to be called from pmap_remove_gnt().
Implement new ioctl, needed by Xen 4.13:
IOCTL_PRIVCMD_MMAPBATCH_V2
IOCTL_PRIVCMD_MMAP_RESOURCE
IOCTL_GNTDEV_MMAP_GRANT_REF
IOCTL_GNTDEV_ALLOC_GRANT_REF
Always enable declarations needed by privcmd.c
diffstat:
sys/arch/x86/include/pmap.h | 4 +-
sys/arch/x86/x86/pmap.c | 360 ++++-
sys/arch/xen/include/xenio.h | 85 +-
sys/arch/xen/include/xenpmap.h | 11 +-
sys/arch/xen/x86/x86_xpmap.c | 19 +-
sys/arch/xen/xen/privcmd.c | 731 +++++++--
sys/external/mit/xen-include-public/dist/xen/include/public/memory.h | 2 +-
7 files changed, 1031 insertions(+), 181 deletions(-)
diffs (truncated from 1500 to 300 lines):
diff -r 355708f349a3 -r 5ec9d26e99b0 sys/arch/x86/include/pmap.h
--- a/sys/arch/x86/include/pmap.h Sun May 31 10:25:58 2020 +0000
+++ b/sys/arch/x86/include/pmap.h Sun May 31 10:39:34 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.101 2019/05/29 16:54:41 maxv Exp $ */
+/* $NetBSD: pmap.h,v 1.101.2.1 2020/05/31 10:39:34 martin Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -272,7 +272,7 @@
uint64_t pm_ncsw; /* for assertions */
struct vm_page *pm_gc_ptp; /* pages from pmap g/c */
- /* Used by NVMM. */
+ /* Used by NVMM and Xen */
int (*pm_enter)(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int);
bool (*pm_extract)(struct pmap *, vaddr_t, paddr_t *);
void (*pm_remove)(struct pmap *, vaddr_t, vaddr_t);
diff -r 355708f349a3 -r 5ec9d26e99b0 sys/arch/x86/x86/pmap.c
--- a/sys/arch/x86/x86/pmap.c Sun May 31 10:25:58 2020 +0000
+++ b/sys/arch/x86/x86/pmap.c Sun May 31 10:39:34 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.334.2.1 2020/04/29 13:39:23 martin Exp $ */
+/* $NetBSD: pmap.c,v 1.334.2.2 2020/05/31 10:39:35 martin Exp $ */
/*
* Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -130,7 +130,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.334.2.1 2020/04/29 13:39:23 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.334.2.2 2020/05/31 10:39:35 martin Exp $");
#include "opt_user_ldt.h"
#include "opt_lockdebug.h"
@@ -150,6 +150,7 @@
#include <sys/intr.h>
#include <sys/xcall.h>
#include <sys/kcore.h>
+#include <sys/kmem.h>
#include <sys/asan.h>
#include <uvm/uvm.h>
@@ -2393,7 +2394,7 @@
pmap->pm_flags = 0;
pmap->pm_gc_ptp = NULL;
- /* Used by NVMM. */
+ /* Used by NVMM and Xen */
pmap->pm_enter = NULL;
pmap->pm_extract = NULL;
pmap->pm_remove = NULL;
@@ -3527,32 +3528,19 @@
return true;
}
-/*
- * pmap_remove: mapping removal function.
- *
- * => caller should not be holding any pmap locks
- */
-void
-pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
+static void
+pmap_remove_locked(struct pmap *pmap, vaddr_t sva, vaddr_t eva,
+ pt_entry_t *ptes, pd_entry_t * const *pdes)
{
- pt_entry_t *ptes;
pd_entry_t pde;
- pd_entry_t * const *pdes;
struct pv_entry *pv_tofree = NULL;
bool result;
paddr_t ptppa;
vaddr_t blkendva, va = sva;
struct vm_page *ptp;
- struct pmap *pmap2;
int lvl;
- if (__predict_false(pmap->pm_remove != NULL)) {
- (*pmap->pm_remove)(pmap, sva, eva);
- return;
- }
-
- kpreempt_disable();
- pmap_map_ptes(pmap, &pmap2, &ptes, &pdes); /* locks pmap */
+ KASSERT(kpreempt_disabled());
/*
* removing one page? take shortcut function.
@@ -3620,14 +3608,31 @@
pmap_free_ptp(pmap, ptp, va, ptes, pdes);
}
}
- pmap_unmap_ptes(pmap, pmap2); /* unlock pmap */
- kpreempt_enable();
/* Now we free unused PVs */
if (pv_tofree)
pmap_free_pvs(pv_tofree);
}
+void
+pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
+{
+ struct pmap *pmap2;
+ pt_entry_t *ptes;
+ pd_entry_t * const *pdes;
+
+ if (__predict_false(pmap->pm_remove != NULL)) {
+ (*pmap->pm_remove)(pmap, sva, eva);
+ return;
+ }
+
+ kpreempt_disable();
+ pmap_map_ptes(pmap, &pmap2, &ptes, &pdes); /* locks pmap */
+ pmap_remove_locked(pmap, sva, eva, ptes, pdes);
+ pmap_unmap_ptes(pmap, pmap2); /* unlock pmap */
+ kpreempt_enable();
+}
+
/*
* pmap_sync_pv: clear pte bits and return the old value of the pp_attrs.
*
@@ -4296,7 +4301,7 @@
continue;
}
error = xpq_update_foreign(
- vtomach((vaddr_t)ptep), npte, domid);
+ vtomach((vaddr_t)ptep), npte, domid, flags);
splx(s);
if (error) {
if (ptp != NULL && ptp->wire_count <= 1) {
@@ -4380,6 +4385,315 @@
return error;
}
+#if defined(XEN) && defined(DOM0OPS)
+
+struct pmap_data_gnt {
+ SLIST_ENTRY(pmap_data_gnt) pd_gnt_list;
+ vaddr_t pd_gnt_sva;
+ vaddr_t pd_gnt_eva; /* range covered by this gnt */
+ int pd_gnt_refs; /* ref counter */
+ struct gnttab_map_grant_ref pd_gnt_ops[1]; /* variable length */
+};
+SLIST_HEAD(pmap_data_gnt_head, pmap_data_gnt);
+
+static void pmap_remove_gnt(struct pmap *, vaddr_t, vaddr_t);
+
+static struct pmap_data_gnt *
+pmap_find_gnt(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
+{
+ struct pmap_data_gnt_head *headp;
+ struct pmap_data_gnt *pgnt;
+
+ KASSERT(mutex_owned(pmap->pm_lock));
+ headp = pmap->pm_data;
+ KASSERT(headp != NULL);
+ SLIST_FOREACH(pgnt, headp, pd_gnt_list) {
+ if (pgnt->pd_gnt_sva >= sva && pgnt->pd_gnt_sva <= eva)
+ return pgnt;
+ /* check that we're not overlapping part of a region */
+ KASSERT(pgnt->pd_gnt_sva >= eva || pgnt->pd_gnt_eva <= sva);
+ }
+ return NULL;
+}
+
+static void
+pmap_alloc_gnt(struct pmap *pmap, vaddr_t sva, int nentries,
+ const struct gnttab_map_grant_ref *ops)
+{
+ struct pmap_data_gnt_head *headp;
+ struct pmap_data_gnt *pgnt;
+ vaddr_t eva = sva + nentries * PAGE_SIZE;
+ KASSERT(mutex_owned(pmap->pm_lock));
+ KASSERT(nentries >= 1);
+ if (pmap->pm_remove == NULL) {
+ pmap->pm_remove = pmap_remove_gnt;
+ KASSERT(pmap->pm_data == NULL);
+ headp = kmem_alloc(sizeof(*headp), KM_SLEEP);
+ SLIST_INIT(headp);
+ pmap->pm_data = headp;
+ } else {
+ KASSERT(pmap->pm_remove == pmap_remove_gnt);
+ KASSERT(pmap->pm_data != NULL);
+ headp = pmap->pm_data;
+ }
+
+ pgnt = pmap_find_gnt(pmap, sva, eva);
+ if (pgnt != NULL) {
+ KASSERT(pgnt->pd_gnt_sva == sva);
+ KASSERT(pgnt->pd_gnt_eva == eva);
+ return;
+ }
+
+ /* new entry */
+ pgnt = kmem_alloc(sizeof(*pgnt) +
+ (nentries - 1) * sizeof(struct gnttab_map_grant_ref), KM_SLEEP);
+ pgnt->pd_gnt_sva = sva;
+ pgnt->pd_gnt_eva = eva;
+ pgnt->pd_gnt_refs = 0;
+ memcpy(pgnt->pd_gnt_ops, ops,
+ sizeof(struct gnttab_map_grant_ref) * nentries);
+ SLIST_INSERT_HEAD(headp, pgnt, pd_gnt_list);
+}
+
+static void
+pmap_free_gnt(struct pmap *pmap, struct pmap_data_gnt *pgnt)
+{
+ struct pmap_data_gnt_head *headp = pmap->pm_data;
+ int nentries = (pgnt->pd_gnt_eva - pgnt->pd_gnt_sva) / PAGE_SIZE;
+ KASSERT(nentries >= 1);
+ KASSERT(mutex_owned(&pmap->pm_lock));
+ KASSERT(pgnt->pd_gnt_refs == 0);
+ SLIST_REMOVE(headp, pgnt, pmap_data_gnt, pd_gnt_list);
+ kmem_free(pgnt, sizeof(*pgnt) +
+ (nentries - 1) * sizeof(struct gnttab_map_grant_ref));
+ if (SLIST_EMPTY(headp)) {
+ kmem_free(headp, sizeof(*headp));
+ pmap->pm_data = NULL;
+ pmap->pm_remove = NULL;
+ }
+}
+
+/*
+ * pmap_enter_gnt: enter a grant entry into a pmap
+ *
+ * => must be done "now" ... no lazy-evaluation
+ */
+int
+pmap_enter_gnt(struct pmap *pmap, vaddr_t va, vaddr_t sva, int nentries,
+ const struct gnttab_map_grant_ref *oops)
+{
+ struct pmap_data_gnt *pgnt;
+ pt_entry_t *ptes, opte;
+ pt_entry_t *ptep;
+ pd_entry_t * const *pdes;
+ struct vm_page *ptp;
+ struct vm_page *old_pg;
+ struct pmap_page *old_pp;
+ struct pv_entry *old_pve = NULL;
+ int error;
+ struct pmap *pmap2;
+ struct gnttab_map_grant_ref *op;
+ int ret;
+ int idx;
+
+ KASSERT(pmap_initialized);
+ KASSERT(curlwp->l_md.md_gc_pmap != pmap);
+ KASSERT(va < VM_MAX_KERNEL_ADDRESS);
+ KASSERTMSG(va != (vaddr_t)PDP_BASE, "%s: trying to map va=%#"
+ PRIxVADDR " over PDP!", __func__, va);
+ KASSERTMSG(va < VM_MIN_KERNEL_ADDRESS ||
+ pmap_valid_entry(pmap->pm_pdir[pl_i(va, PTP_LEVELS)]),
+ "%s: missing kernel PTP for va=%#" PRIxVADDR, __func__, va);
+
+ kpreempt_disable();
+ pmap_map_ptes(pmap, &pmap2, &ptes, &pdes); /* locks pmap */
+
+ pmap_alloc_gnt(pmap, sva, nentries, oops);
+
+ pgnt = pmap_find_gnt(pmap, va, va + PAGE_SIZE);
+ KASSERT(pgnt != NULL);
+
+ ptp = pmap_get_ptp(pmap, va, pdes, PMAP_CANFAIL);
+ if (ptp == NULL) {
+ pmap_unmap_ptes(pmap, pmap2);
+ error = ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Check if there is an existing mapping. If we are now sure that
+ * we need pves and we failed to allocate them earlier, handle that.
+ * Caching the value of oldpa here is safe because only the mod/ref
+ * bits can change while the pmap is locked.
+ */
+ ptep = &ptes[pl1_i(va)];
+ opte = *ptep;
+ bool have_oldpa = pmap_valid_entry(opte);
+ paddr_t oldpa = pmap_pte2pa(opte);
+
+ /*
+ * Update the pte.
+ */
+
+ idx = (va - pgnt->pd_gnt_sva) / PAGE_SIZE;
+ op = &pgnt->pd_gnt_ops[idx];
+
+ op->host_addr = xpmap_ptetomach(ptep);
+ op->dev_bus_addr = 0;
+ op->status = GNTST_general_error;
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, 1);
+ if (__predict_false(ret)) {
+ printf("%s: GNTTABOP_map_grant_ref failed: %d\n",
+ __func__, ret);
+ op->status = GNTST_general_error;
+ }
+ for (int d = 0; d < 256 && op->status == GNTST_eagain; d++) {
Home |
Main Index |
Thread Index |
Old Index