Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/xen/x86 style, localify global variables, etc, no r...
details: https://anonhg.NetBSD.org/src/rev/5987ec95a531
branches: trunk
changeset: 363435:5987ec95a531
user: maxv <maxv%NetBSD.org@localhost>
date: Fri Jul 27 09:22:40 2018 +0000
description:
style, localify global variables, etc, no real functional change
diffstat:
sys/arch/xen/x86/xen_shm_machdep.c | 88 +++++++++++++++++++------------------
1 files changed, 45 insertions(+), 43 deletions(-)
diffs (238 lines):
diff -r 623aae208b64 -r 5987ec95a531 sys/arch/xen/x86/xen_shm_machdep.c
--- a/sys/arch/xen/x86/xen_shm_machdep.c Fri Jul 27 07:35:09 2018 +0000
+++ b/sys/arch/xen/x86/xen_shm_machdep.c Fri Jul 27 09:22:40 2018 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: xen_shm_machdep.c,v 1.11 2018/06/24 20:28:57 jdolecek Exp $ */
+/* $NetBSD: xen_shm_machdep.c,v 1.12 2018/07/27 09:22:40 maxv Exp $ */
/*
* Copyright (c) 2006 Manuel Bouyer.
@@ -22,12 +22,10 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.11 2018/06/24 20:28:57 jdolecek Exp $");
-
+__KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.12 2018/07/27 09:22:40 maxv Exp $");
#include <sys/types.h>
#include <sys/param.h>
@@ -44,42 +42,37 @@
#include <xen/xen_shm.h>
/*
- * Helper routines for the backend drivers. This implement the necessary
- * functions to map a bunch of pages from foreign domains in our kernel VM
+ * Helper routines for the backend drivers. This implements the necessary
+ * functions to map a bunch of pages from foreign domains into our kernel VM
* space, do I/O to it, and unmap it.
*
* At boot time, we grab some kernel VM space that we'll use to map the foreign
- * pages. We also maintain a virtual to machine mapping table to give back
+ * pages. We also maintain a virtual-to-machine mapping table to give back
* the appropriate address to bus_dma if requested.
+ *
* If no more VM space is available, we return an error. The caller can then
* register a callback which will be called when the required VM space is
* available.
*/
-/* pointers to our VM space */
-static vaddr_t xen_shm_base_address;
-static u_long xen_shm_base_address_pg;
-static vaddr_t xen_shm_end_address;
-
/* Grab enough VM space to map an entire vbd ring. */
/* Xen3 linux guests seems to eat more pages, gives enough for 10 vbd rings */
#define BLKIF_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
#define XENSHM_NPAGES (BLKIF_RING_SIZE * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) * 10)
-static vsize_t xen_shm_size = (XENSHM_NPAGES * PAGE_SIZE);
-
/* vm space management */
-static vmem_t *xen_shm_arena;
+static vmem_t *xen_shm_arena __read_mostly;
/* callbacks are registered in a FIFO list. */
-
static SIMPLEQ_HEAD(xen_shm_callback_head, xen_shm_callback_entry)
xen_shm_callbacks;
+
struct xen_shm_callback_entry {
SIMPLEQ_ENTRY(xen_shm_callback_entry) xshmc_entries;
int (*xshmc_callback)(void *); /* our callback */
void *xshmc_arg; /* cookie passed to the callback */
};
+
/* a pool of struct xen_shm_callback_entry */
static struct pool xen_shm_callback_pool;
@@ -91,6 +84,11 @@
void
xen_shm_init(void)
{
+ vaddr_t xen_shm_base_address;
+ vaddr_t xen_shm_end_address;
+ u_long xen_shm_base_address_pg;
+ vsize_t xen_shm_size;
+
SIMPLEQ_INIT(&xen_shm_callbacks);
pool_init(&xen_shm_callback_pool, sizeof(struct xen_shm_callback_entry),
0, 0, 0, "xshmc", NULL, IPL_VM);
@@ -100,6 +98,8 @@
panic("xen_shm_init can't prime pool");
}
+ xen_shm_size = (XENSHM_NPAGES * PAGE_SIZE);
+
xen_shm_base_address = uvm_km_alloc(kernel_map, xen_shm_size, 0,
UVM_KMF_VAONLY);
xen_shm_end_address = xen_shm_base_address + xen_shm_size;
@@ -107,8 +107,7 @@
if (xen_shm_base_address == 0) {
panic("xen_shm_init no VM space");
}
- xen_shm_arena = vmem_create("xen_shm",
- xen_shm_base_address_pg,
+ xen_shm_arena = vmem_create("xen_shm", xen_shm_base_address_pg,
(xen_shm_end_address >> PAGE_SHIFT) - 1 - xen_shm_base_address_pg,
1, NULL, NULL, NULL, 1, VM_NOSLEEP, IPL_VM);
if (xen_shm_arena == NULL) {
@@ -120,45 +119,42 @@
xen_shm_map(int nentries, int domid, grant_ref_t *grefp, vaddr_t *vap,
grant_handle_t *handlep, int flags)
{
- int s, i;
- vaddr_t new_va;
+ gnttab_map_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
vmem_addr_t new_va_pg;
- int err;
- gnttab_map_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
+ vaddr_t new_va;
+ int ret, i, s;
#ifdef DIAGNOSTIC
if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
- printf("xen_shm_map: %d entries\n", nentries);
- panic("xen_shm_map");
+ panic("xen_shm_map: %d entries", nentries);
}
#endif
+
/* XXXSMP */
s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
+
/*
- * if a driver is waiting for ressources, don't try to allocate
+ * If a driver is waiting for resources, don't try to allocate
* yet. This is to avoid a flood of small requests stalling large
* ones.
*/
if (__predict_false(SIMPLEQ_FIRST(&xen_shm_callbacks) != NULL) &&
(flags & XSHM_CALLBACK) == 0) {
+ splx(s);
#ifdef DEBUG
static struct timeval lasttime;
-#endif
- splx(s);
-#ifdef DEBUG
if (ratecheck(&lasttime, &xen_shm_errintvl))
printf("xen_shm_map: ENOMEM1\n");
#endif
return ENOMEM;
}
- /* allocate the needed virtual space */
+
+ /* Allocate the needed virtual space. */
if (vmem_alloc(xen_shm_arena, nentries,
VM_INSTANTFIT | VM_NOSLEEP, &new_va_pg) != 0) {
+ splx(s);
#ifdef DEBUG
static struct timeval lasttime;
-#endif
- splx(s);
-#ifdef DEBUG
if (ratecheck(&lasttime, &xen_shm_errintvl))
printf("xen_shm_map: ENOMEM\n");
#endif
@@ -174,14 +170,18 @@
op[i].flags = GNTMAP_host_map |
((flags & XSHM_RO) ? GNTMAP_readonly : 0);
}
- err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, nentries);
- if (__predict_false(err))
+
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, nentries);
+ if (__predict_false(ret)) {
panic("xen_shm_map: HYPERVISOR_grant_table_op failed");
+ }
+
for (i = 0; i < nentries; i++) {
if (__predict_false(op[i].status))
return op[i].status;
handlep[i] = op[i].handle;
}
+
*vap = new_va;
return 0;
}
@@ -190,15 +190,12 @@
xen_shm_unmap(vaddr_t va, int nentries, grant_handle_t *handlep)
{
gnttab_unmap_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST];
- int ret;
- int i;
- int s;
struct xen_shm_callback_entry *xshmc;
+ int ret, i, s;
#ifdef DIAGNOSTIC
if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) {
- printf("xen_shm_unmap: %d entries\n", nentries);
- panic("xen_shm_unmap");
+ panic("xen_shm_unmap: %d entries", nentries);
}
#endif
@@ -207,13 +204,18 @@
op[i].dev_bus_addr = 0;
op[i].handle = handlep[i];
}
+
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
op, nentries);
- if (__predict_false(ret))
+ if (__predict_false(ret)) {
panic("xen_shm_unmap: unmap failed");
+ }
+
va = va >> PAGE_SHIFT;
+
/* XXXSMP */
s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */
+
vmem_free(xen_shm_arena, va, nentries);
while (__predict_false((xshmc = SIMPLEQ_FIRST(&xen_shm_callbacks))
!= NULL)) {
@@ -224,14 +226,14 @@
s = splvm(); /* XXXSMP */
pool_put(&xen_shm_callback_pool, xshmc);
} else {
- /* callback failed, probably out of ressources */
+ /* callback failed, probably out of resources */
s = splvm(); /* XXXSMP */
SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc,
- xshmc_entries);
-
+ xshmc_entries);
break;
}
}
+
splx(s);
}
Home |
Main Index |
Thread Index |
Old Index