Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src Improvements:
details: https://anonhg.NetBSD.org/src/rev/1ba31104bd3a
branches: trunk
changeset: 448704:1ba31104bd3a
user: maxv <maxv%NetBSD.org@localhost>
date: Thu Feb 07 10:58:45 2019 +0000
description:
Improvements:
- Emulate the instructions by executing them directly on the host CPU.
This is easier and probably faster than doing it in software
manually.
- Decode SUB from Primary, CMP from Group1, TEST from Group3, and add
associated tests.
- Handle correctly the cases where an instruction that always implicitly
reads the register operand is executed with the mem operand as source
(eg: "orq (%rbx),%rax").
- Fix the MMU handling of 32bit-PAE. Under PAE CR3 is not page-aligned,
so there are extra bits that are valid.
With these changes in place I can boot Windows XP on Qemu+NVMM.
diffstat:
lib/libnvmm/libnvmm_x86.c | 579 ++++++++++++++++++++++++++--------
tests/lib/libnvmm/h_mem_assist.c | 8 +-
tests/lib/libnvmm/h_mem_assist_asm.S | 89 +++++
3 files changed, 528 insertions(+), 148 deletions(-)
diffs (truncated from 1120 to 300 lines):
diff -r 399bbc3f1a57 -r 1ba31104bd3a lib/libnvmm/libnvmm_x86.c
--- a/lib/libnvmm/libnvmm_x86.c Thu Feb 07 10:46:32 2019 +0000
+++ b/lib/libnvmm/libnvmm_x86.c Thu Feb 07 10:58:45 2019 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: libnvmm_x86.c,v 1.18 2019/02/01 06:49:58 maxv Exp $ */
+/* $NetBSD: libnvmm_x86.c,v 1.19 2019/02/07 10:58:45 maxv Exp $ */
/*
* Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -111,6 +111,8 @@
#define pte32_l1idx(va) (((va) & PTE32_L1_MASK) >> PTE32_L1_SHIFT)
#define pte32_l2idx(va) (((va) & PTE32_L2_MASK) >> PTE32_L2_SHIFT)
+#define CR3_FRAME_32BIT PG_FRAME
+
typedef uint32_t pte_32bit_t;
static int
@@ -125,7 +127,7 @@
*prot = NVMM_PROT_ALL;
/* Parse L2. */
- L2gpa = (cr3 & PG_FRAME);
+ L2gpa = (cr3 & CR3_FRAME_32BIT);
if (nvmm_gpa_to_hva(mach, L2gpa, &L2hva) == -1)
return -1;
pdir = (pte_32bit_t *)L2hva;
@@ -181,6 +183,8 @@
#define pte32_pae_l2idx(va) (((va) & PTE32_PAE_L2_MASK) >> PTE32_PAE_L2_SHIFT)
#define pte32_pae_l3idx(va) (((va) & PTE32_PAE_L3_MASK) >> PTE32_PAE_L3_SHIFT)
+#define CR3_FRAME_32BIT_PAE __BITS(31, 5)
+
typedef uint64_t pte_32bit_pae_t;
static int
@@ -195,7 +199,7 @@
*prot = NVMM_PROT_ALL;
/* Parse L3. */
- L3gpa = (cr3 & PG_FRAME);
+ L3gpa = (cr3 & CR3_FRAME_32BIT_PAE);
if (nvmm_gpa_to_hva(mach, L3gpa, &L3hva) == -1)
return -1;
pdir = (pte_32bit_pae_t *)L3hva;
@@ -272,6 +276,8 @@
#define pte64_l3idx(va) (((va) & PTE64_L3_MASK) >> PTE64_L3_SHIFT)
#define pte64_l4idx(va) (((va) & PTE64_L4_MASK) >> PTE64_L4_SHIFT)
+#define CR3_FRAME_64BIT PG_FRAME
+
typedef uint64_t pte_64bit_t;
static inline bool
@@ -297,7 +303,7 @@
return -1;
/* Parse L4. */
- L4gpa = (cr3 & PG_FRAME);
+ L4gpa = (cr3 & CR3_FRAME_64BIT);
if (nvmm_gpa_to_hva(mach, L4gpa, &L4hva) == -1)
return -1;
pdir = (pte_64bit_t *)L4hva;
@@ -820,13 +826,68 @@
/* -------------------------------------------------------------------------- */
-static void x86_emul_or(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
-static void x86_emul_and(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
-static void x86_emul_xor(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
-static void x86_emul_mov(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
-static void x86_emul_stos(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
-static void x86_emul_lods(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
-static void x86_emul_movs(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
+struct x86_emul {
+ bool read;
+ bool notouch;
+ void (*func)(struct nvmm_mem *, uint64_t *);
+};
+
+static void x86_func_or(struct nvmm_mem *, uint64_t *);
+static void x86_func_and(struct nvmm_mem *, uint64_t *);
+static void x86_func_sub(struct nvmm_mem *, uint64_t *);
+static void x86_func_xor(struct nvmm_mem *, uint64_t *);
+static void x86_func_cmp(struct nvmm_mem *, uint64_t *);
+static void x86_func_test(struct nvmm_mem *, uint64_t *);
+static void x86_func_mov(struct nvmm_mem *, uint64_t *);
+static void x86_func_stos(struct nvmm_mem *, uint64_t *);
+static void x86_func_lods(struct nvmm_mem *, uint64_t *);
+static void x86_func_movs(struct nvmm_mem *, uint64_t *);
+
+static const struct x86_emul x86_emul_or = {
+ .read = true,
+ .func = x86_func_or
+};
+
+static const struct x86_emul x86_emul_and = {
+ .read = true,
+ .func = x86_func_and
+};
+
+static const struct x86_emul x86_emul_sub = {
+ .read = true,
+ .func = x86_func_sub
+};
+
+static const struct x86_emul x86_emul_xor = {
+ .read = true,
+ .func = x86_func_xor
+};
+
+static const struct x86_emul x86_emul_cmp = {
+ .notouch = true,
+ .func = x86_func_cmp
+};
+
+static const struct x86_emul x86_emul_test = {
+ .notouch = true,
+ .func = x86_func_test
+};
+
+static const struct x86_emul x86_emul_mov = {
+ .func = x86_func_mov
+};
+
+static const struct x86_emul x86_emul_stos = {
+ .func = x86_func_stos
+};
+
+static const struct x86_emul x86_emul_lods = {
+ .func = x86_func_lods
+};
+
+static const struct x86_emul x86_emul_movs = {
+ .func = x86_func_movs
+};
/* Legacy prefixes. */
#define LEG_LOCK 0xF0
@@ -954,10 +1015,9 @@
struct x86_store src;
struct x86_store dst;
-
struct x86_store *strm;
- void (*emul)(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
+ const struct x86_emul *emul;
};
struct x86_decode_fsm {
@@ -985,14 +1045,15 @@
int defsize;
int allsize;
bool group1;
+ bool group3;
bool group11;
bool immediate;
int flags;
- void (*emul)(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
+ const struct x86_emul *emul;
};
struct x86_group_entry {
- void (*emul)(struct nvmm_mem *, void (*)(struct nvmm_mem *), uint64_t *);
+ const struct x86_emul *emul;
};
#define OPSIZE_BYTE 0x01
@@ -1005,13 +1066,19 @@
#define FLAG_ze 0x04
static const struct x86_group_entry group1[8] = {
- [1] = { .emul = x86_emul_or },
- [4] = { .emul = x86_emul_and },
- [6] = { .emul = x86_emul_xor }
+ [1] = { .emul = &x86_emul_or },
+ [4] = { .emul = &x86_emul_and },
+ [6] = { .emul = &x86_emul_xor },
+ [7] = { .emul = &x86_emul_cmp }
+};
+
+static const struct x86_group_entry group3[8] = {
+ [0] = { .emul = &x86_emul_test },
+ [1] = { .emul = &x86_emul_test }
};
static const struct x86_group_entry group11[8] = {
- [0] = { .emul = x86_emul_mov }
+ [0] = { .emul = &x86_emul_mov }
};
static const struct x86_opcode primary_opcode_table[] = {
@@ -1019,6 +1086,18 @@
* Group1
*/
{
+ /* Eb, Ib */
+ .byte = 0x80,
+ .regmodrm = true,
+ .regtorm = true,
+ .szoverride = false,
+ .defsize = OPSIZE_BYTE,
+ .allsize = -1,
+ .group1 = true,
+ .immediate = true,
+ .emul = NULL /* group1 */
+ },
+ {
/* Ev, Iz */
.byte = 0x81,
.regmodrm = true,
@@ -1046,6 +1125,35 @@
},
/*
+ * Group3
+ */
+ {
+ /* Eb, Ib */
+ .byte = 0xF6,
+ .regmodrm = true,
+ .regtorm = true,
+ .szoverride = false,
+ .defsize = OPSIZE_BYTE,
+ .allsize = -1,
+ .group3 = true,
+ .immediate = true,
+ .emul = NULL /* group3 */
+ },
+ {
+ /* Ev, Iz */
+ .byte = 0xF7,
+ .regmodrm = true,
+ .regtorm = true,
+ .szoverride = true,
+ .defsize = -1,
+ .allsize = OPSIZE_WORD|OPSIZE_DOUB|OPSIZE_QUAD,
+ .group3 = true,
+ .immediate = true,
+ .flags = FLAG_immz,
+ .emul = NULL /* group3 */
+ },
+
+ /*
* Group11
*/
{
@@ -1085,7 +1193,7 @@
.szoverride = false,
.defsize = OPSIZE_BYTE,
.allsize = -1,
- .emul = x86_emul_or
+ .emul = &x86_emul_or
},
{
/* Ev, Gv */
@@ -1095,7 +1203,7 @@
.szoverride = true,
.defsize = -1,
.allsize = OPSIZE_WORD|OPSIZE_DOUB|OPSIZE_QUAD,
- .emul = x86_emul_or
+ .emul = &x86_emul_or
},
{
/* Gb, Eb */
@@ -1105,7 +1213,7 @@
.szoverride = false,
.defsize = OPSIZE_BYTE,
.allsize = -1,
- .emul = x86_emul_or
+ .emul = &x86_emul_or
},
{
/* Gv, Ev */
@@ -1115,7 +1223,7 @@
.szoverride = true,
.defsize = -1,
.allsize = OPSIZE_WORD|OPSIZE_DOUB|OPSIZE_QUAD,
- .emul = x86_emul_or
+ .emul = &x86_emul_or
},
/*
@@ -1129,7 +1237,7 @@
.szoverride = false,
.defsize = OPSIZE_BYTE,
.allsize = -1,
- .emul = x86_emul_and
+ .emul = &x86_emul_and
},
{
/* Ev, Gv */
@@ -1139,7 +1247,7 @@
.szoverride = true,
.defsize = -1,
.allsize = OPSIZE_WORD|OPSIZE_DOUB|OPSIZE_QUAD,
- .emul = x86_emul_and
+ .emul = &x86_emul_and
},
Home |
Main Index |
Thread Index |
Old Index