Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src Three changes in libnvmm:
details: https://anonhg.NetBSD.org/src/rev/5d907011ffd4
branches: trunk
changeset: 845914:5d907011ffd4
user: maxv <maxv%NetBSD.org@localhost>
date: Wed Oct 23 12:02:55 2019 +0000
description:
Three changes in libnvmm:
- Add 'mach' and 'vcpu' backpointers in the nvmm_io and nvmm_mem
structures.
- Rename 'nvmm_callbacks' to 'nvmm_assist_callbacks'.
- Rename and migrate NVMM_MACH_CONF_CALLBACKS to NVMM_VCPU_CONF_CALLBACKS,
it now becomes per-VCPU.
diffstat:
lib/libnvmm/libnvmm.c | 14 +-
lib/libnvmm/libnvmm_x86.c | 153 +++++++++++++++++++++-----------------
lib/libnvmm/nvmm.h | 41 ++++++----
tests/lib/libnvmm/h_io_assist.c | 6 +-
tests/lib/libnvmm/h_mem_assist.c | 8 +-
5 files changed, 122 insertions(+), 100 deletions(-)
diffs (truncated from 718 to 300 lines):
diff -r 84fa63f1831b -r 5d907011ffd4 lib/libnvmm/libnvmm.c
--- a/lib/libnvmm/libnvmm.c Wed Oct 23 11:27:08 2019 +0000
+++ b/lib/libnvmm/libnvmm.c Wed Oct 23 12:02:55 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: libnvmm.c,v 1.15 2019/10/23 07:01:11 maxv Exp $ */
+/* $NetBSD: libnvmm.c,v 1.16 2019/10/23 12:02:55 maxv Exp $ */
/*
* Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
@@ -260,12 +260,6 @@
struct nvmm_ioc_machine_configure args;
int ret;
- switch (op) {
- case NVMM_MACH_CONF_CALLBACKS:
- memcpy(&mach->cbs, conf, sizeof(mach->cbs));
- return 0;
- }
-
args.machid = mach->machid;
args.op = op;
args.conf = conf;
@@ -335,6 +329,12 @@
struct nvmm_ioc_vcpu_configure args;
int ret;
+ switch (op) {
+ case NVMM_VCPU_CONF_CALLBACKS:
+ memcpy(&vcpu->cbs, conf, sizeof(vcpu->cbs));
+ return 0;
+ }
+
args.machid = mach->machid;
args.cpuid = vcpu->cpuid;
args.op = op;
diff -r 84fa63f1831b -r 5d907011ffd4 lib/libnvmm/libnvmm_x86.c
--- a/lib/libnvmm/libnvmm_x86.c Wed Oct 23 11:27:08 2019 +0000
+++ b/lib/libnvmm/libnvmm_x86.c Wed Oct 23 12:02:55 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: libnvmm_x86.c,v 1.36 2019/10/23 07:01:11 maxv Exp $ */
+/* $NetBSD: libnvmm_x86.c,v 1.37 2019/10/23 12:02:55 maxv Exp $ */
/*
* Copyright (c) 2018-2019 The NetBSD Foundation, Inc.
@@ -555,9 +555,10 @@
}
static int
-read_guest_memory(struct nvmm_machine *mach, struct nvmm_x64_state *state,
+read_guest_memory(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
gvaddr_t gva, uint8_t *data, size_t size)
{
+ struct nvmm_x64_state *state = vcpu->state;
struct nvmm_mem mem;
nvmm_prot_t prot;
gpaddr_t gpa;
@@ -585,11 +586,13 @@
is_mmio = (ret == -1);
if (is_mmio) {
+ mem.mach = mach;
+ mem.vcpu = vcpu;
mem.data = data;
mem.gpa = gpa;
mem.write = false;
mem.size = size;
- (*mach->cbs.mem)(&mem);
+ (*vcpu->cbs.mem)(&mem);
} else {
if (__predict_false(!(prot & NVMM_PROT_READ))) {
errno = EFAULT;
@@ -599,7 +602,7 @@
}
if (remain > 0) {
- ret = read_guest_memory(mach, state, gva + size,
+ ret = read_guest_memory(mach, vcpu, gva + size,
data + size, remain);
} else {
ret = 0;
@@ -609,9 +612,10 @@
}
static int
-write_guest_memory(struct nvmm_machine *mach, struct nvmm_x64_state *state,
+write_guest_memory(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
gvaddr_t gva, uint8_t *data, size_t size)
{
+ struct nvmm_x64_state *state = vcpu->state;
struct nvmm_mem mem;
nvmm_prot_t prot;
gpaddr_t gpa;
@@ -639,11 +643,13 @@
is_mmio = (ret == -1);
if (is_mmio) {
+ mem.mach = mach;
+ mem.vcpu = vcpu;
mem.data = data;
mem.gpa = gpa;
mem.write = true;
mem.size = size;
- (*mach->cbs.mem)(&mem);
+ (*vcpu->cbs.mem)(&mem);
} else {
if (__predict_false(!(prot & NVMM_PROT_WRITE))) {
errno = EFAULT;
@@ -653,7 +659,7 @@
}
if (remain > 0) {
- ret = write_guest_memory(mach, state, gva + size,
+ ret = write_guest_memory(mach, vcpu, gva + size,
data + size, remain);
} else {
ret = 0;
@@ -664,12 +670,12 @@
/* -------------------------------------------------------------------------- */
-static int fetch_segment(struct nvmm_machine *, struct nvmm_x64_state *);
+static int fetch_segment(struct nvmm_machine *, struct nvmm_vcpu *);
#define NVMM_IO_BATCH_SIZE 32
static int
-assist_io_batch(struct nvmm_machine *mach, struct nvmm_x64_state *state,
+assist_io_batch(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
struct nvmm_io *io, gvaddr_t gva, uint64_t cnt)
{
uint8_t iobuf[NVMM_IO_BATCH_SIZE];
@@ -683,18 +689,18 @@
io->data = iobuf;
if (!io->in) {
- ret = read_guest_memory(mach, state, gva, iobuf, iosize);
+ ret = read_guest_memory(mach, vcpu, gva, iobuf, iosize);
if (ret == -1)
return -1;
}
for (i = 0; i < iocnt; i++) {
- (*mach->cbs.io)(io);
+ (*vcpu->cbs.io)(io);
io->data += io->size;
}
if (io->in) {
- ret = write_guest_memory(mach, state, gva, iobuf, iosize);
+ ret = write_guest_memory(mach, vcpu, gva, iobuf, iosize);
if (ret == -1)
return -1;
}
@@ -721,6 +727,8 @@
return -1;
}
+ io.mach = mach;
+ io.vcpu = vcpu;
io.port = exit->u.io.port;
io.in = exit->u.io.in;
io.size = exit->u.io.operand_size;
@@ -763,7 +771,7 @@
if (io.in) {
seg = NVMM_X64_SEG_ES;
} else {
- seg = fetch_segment(mach, state);
+ seg = fetch_segment(mach, vcpu);
if (seg == -1)
return -1;
}
@@ -781,7 +789,7 @@
}
if (exit->u.io.rep && !psld) {
- iocnt = assist_io_batch(mach, state, &io, gva, cnt);
+ iocnt = assist_io_batch(mach, vcpu, &io, gva, cnt);
if (iocnt == -1)
return -1;
goto done;
@@ -792,14 +800,14 @@
if (!exit->u.io.str) {
memcpy(io.data, &state->gprs[NVMM_X64_GPR_RAX], io.size);
} else {
- ret = read_guest_memory(mach, state, gva, io.data,
+ ret = read_guest_memory(mach, vcpu, gva, io.data,
io.size);
if (ret == -1)
return -1;
}
}
- (*mach->cbs.io)(&io);
+ (*vcpu->cbs.io)(&io);
if (io.in) {
if (!exit->u.io.str) {
@@ -809,7 +817,7 @@
state->gprs[NVMM_X64_GPR_RAX] &= size_to_mask(4);
}
} else {
- ret = write_guest_memory(mach, state, gva, io.data,
+ ret = write_guest_memory(mach, vcpu, gva, io.data,
io.size);
if (ret == -1)
return -1;
@@ -849,20 +857,20 @@
bool readreg;
bool backprop;
bool notouch;
- void (*func)(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
+ void (*func)(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
};
-static void x86_func_or(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_and(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_xchg(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_sub(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_xor(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_cmp(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_test(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_mov(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_stos(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_lods(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
-static void x86_func_movs(struct nvmm_machine *, struct nvmm_mem *, uint64_t *);
+static void x86_func_or(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_and(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_xchg(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_sub(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_xor(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_cmp(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_test(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_mov(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_stos(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_lods(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
+static void x86_func_movs(struct nvmm_vcpu *, struct nvmm_mem *, uint64_t *);
static const struct x86_emul x86_emul_or = {
.readreg = true,
@@ -2721,7 +2729,7 @@
*/
static void
-x86_func_or(struct nvmm_machine *mach, struct nvmm_mem *mem, uint64_t *gprs)
+x86_func_or(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
{
uint64_t *retval = (uint64_t *)mem->data;
const bool write = mem->write;
@@ -2733,7 +2741,7 @@
/* Fetch the value to be OR'ed (op2). */
mem->data = (uint8_t *)&op2;
mem->write = false;
- (*mach->cbs.mem)(mem);
+ (*vcpu->cbs.mem)(mem);
/* Perform the OR. */
ret = exec_or(*op1, op2, &fl, mem->size);
@@ -2742,7 +2750,7 @@
/* Write back the result. */
mem->data = (uint8_t *)&ret;
mem->write = true;
- (*mach->cbs.mem)(mem);
+ (*vcpu->cbs.mem)(mem);
} else {
/* Return data to the caller. */
*retval = ret;
@@ -2753,7 +2761,7 @@
}
static void
-x86_func_and(struct nvmm_machine *mach, struct nvmm_mem *mem, uint64_t *gprs)
+x86_func_and(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
{
uint64_t *retval = (uint64_t *)mem->data;
const bool write = mem->write;
@@ -2765,7 +2773,7 @@
/* Fetch the value to be AND'ed (op2). */
mem->data = (uint8_t *)&op2;
mem->write = false;
- (*mach->cbs.mem)(mem);
+ (*vcpu->cbs.mem)(mem);
/* Perform the AND. */
ret = exec_and(*op1, op2, &fl, mem->size);
@@ -2774,7 +2782,7 @@
/* Write back the result. */
mem->data = (uint8_t *)&ret;
mem->write = true;
- (*mach->cbs.mem)(mem);
+ (*vcpu->cbs.mem)(mem);
} else {
/* Return data to the caller. */
*retval = ret;
@@ -2785,7 +2793,7 @@
}
static void
-x86_func_xchg(struct nvmm_machine *mach, struct nvmm_mem *mem, uint64_t *gprs)
+x86_func_xchg(struct nvmm_vcpu *vcpu, struct nvmm_mem *mem, uint64_t *gprs)
{
Home |
Main Index |
Thread Index |
Old Index