pkgsrc-WIP-changes archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
haxm: Add patches with NetBSD support
Module Name: pkgsrc-wip
Committed By: Kamil Rytarowski <n54%gmx.com@localhost>
Pushed By: kamil
Date: Sat Nov 24 23:50:51 2018 +0100
Changeset: 3da20bf5f8fb49433bc0943397e4ab171dcf2770
Modified Files:
haxm/Makefile
haxm/distinfo
Added Files:
haxm/patches/patch-include_hax.h
haxm/patches/patch-include_hax__interface.h
haxm/patches/patch-include_hax__types.h
haxm/patches/patch-include_netbsd_hax__interface__netbsd.h
haxm/patches/patch-include_netbsd_hax__netbsd.h
haxm/patches/patch-include_netbsd_hax__types__netbsd.h
haxm/patches/patch-platforms_netbsd_.gitignore
haxm/patches/patch-platforms_netbsd_Makefile
haxm/patches/patch-platforms_netbsd_components.c
haxm/patches/patch-platforms_netbsd_hax__entry.c
haxm/patches/patch-platforms_netbsd_hax__event.c
haxm/patches/patch-platforms_netbsd_hax__host__mem.c
haxm/patches/patch-platforms_netbsd_hax__mem__alloc.c
haxm/patches/patch-platforms_netbsd_hax__mm.c
haxm/patches/patch-platforms_netbsd_hax__wrapper.c
Log Message:
haxm: Add patches with NetBSD support
It's good enough to get into seabios, but not good enough to boot Debian.
To see a diff of this commit:
https://wip.pkgsrc.org/cgi-bin/gitweb.cgi?p=pkgsrc-wip.git;a=commitdiff;h=3da20bf5f8fb49433bc0943397e4ab171dcf2770
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
diffstat:
haxm/Makefile | 9 +
haxm/distinfo | 15 +
haxm/patches/patch-include_hax.h | 27 +
haxm/patches/patch-include_hax__interface.h | 14 +
haxm/patches/patch-include_hax__types.h | 15 +
.../patch-include_netbsd_hax__interface__netbsd.h | 87 +++
haxm/patches/patch-include_netbsd_hax__netbsd.h | 129 ++++
.../patch-include_netbsd_hax__types__netbsd.h | 124 ++++
haxm/patches/patch-platforms_netbsd_.gitignore | 13 +
haxm/patches/patch-platforms_netbsd_Makefile | 58 ++
haxm/patches/patch-platforms_netbsd_components.c | 690 +++++++++++++++++++++
haxm/patches/patch-platforms_netbsd_hax__entry.c | 348 +++++++++++
haxm/patches/patch-platforms_netbsd_hax__event.c | 42 ++
.../patch-platforms_netbsd_hax__host__mem.c | 260 ++++++++
.../patch-platforms_netbsd_hax__mem__alloc.c | 247 ++++++++
haxm/patches/patch-platforms_netbsd_hax__mm.c | 150 +++++
haxm/patches/patch-platforms_netbsd_hax__wrapper.c | 336 ++++++++++
17 files changed, 2564 insertions(+)
diffs:
diff --git a/haxm/Makefile b/haxm/Makefile
index ca7188e3e6..5efbeb9ad1 100644
--- a/haxm/Makefile
+++ b/haxm/Makefile
@@ -13,4 +13,13 @@ LICENSE= modified-bsd
WRKSRC= ${WRKDIR}/haxm-${GITHUB_TAG}
+PKG_FAIL_REASON= "Build manually and install manually:"
+PKG_FAIL_REASON+= "cd platforms/netbsd && make"
+PKG_FAIL_REASON+= "modload ./intel-hax"
+PKG_FAIL_REASON+= "cd /dev && mknod HAX c 220 0"
+PKG_FAIL_REASON+= "cd /dev && mkdir hax_vm hax_vm00"
+PKG_FAIL_REASON+= "cd /dev/hax_vm && mknod vm00 c 222 0"
+PKG_FAIL_REASON+= "cd /dev/hax_vm00 && mknod vcpu00 c 221 0"
+PKG_FAIL_REASON+= "qemu-system-x86_64 --enable-hax"
+
.include "../../mk/bsd.pkg.mk"
diff --git a/haxm/distinfo b/haxm/distinfo
index 74bc840b9b..fceb112a67 100644
--- a/haxm/distinfo
+++ b/haxm/distinfo
@@ -4,3 +4,18 @@ SHA1 (haxm-0.226b5a8ef3b79ca3d235a629a83a4a2168c11e36-226b5a8ef3b79ca3d235a629a8
RMD160 (haxm-0.226b5a8ef3b79ca3d235a629a83a4a2168c11e36-226b5a8ef3b79ca3d235a629a83a4a2168c11e36.tar.gz) = 050b8801d2de265d53b4d05bb515259406129e57
SHA512 (haxm-0.226b5a8ef3b79ca3d235a629a83a4a2168c11e36-226b5a8ef3b79ca3d235a629a83a4a2168c11e36.tar.gz) = ddcd7bfdfb8aed40382cae4d4e5b4206c957bf2ca72d0f7cad698faf5cc10dd520dc37a892e331f4cc6f10c491b9c3ce0066f3eed2f0ae0958b662083e634adb
Size (haxm-0.226b5a8ef3b79ca3d235a629a83a4a2168c11e36-226b5a8ef3b79ca3d235a629a83a4a2168c11e36.tar.gz) = 218100 bytes
+SHA1 (patch-include_hax.h) = 52073f9f66321d87b78974d7f36921f31f8bf80e
+SHA1 (patch-include_hax__interface.h) = 8b55e3e91ea310dbf3ff308192acbac7e7839380
+SHA1 (patch-include_hax__types.h) = bf2ad44a63e4aa54848477b403f806d7d4dc35a2
+SHA1 (patch-include_netbsd_hax__interface__netbsd.h) = 384363e2053e39ba7dcb58e3c16846349b06a2fe
+SHA1 (patch-include_netbsd_hax__netbsd.h) = feed33e50c0c808d8ac9b00a7f3d847ad324e0a9
+SHA1 (patch-include_netbsd_hax__types__netbsd.h) = f4590a0e5a2d48779b00f519805bae6baa990509
+SHA1 (patch-platforms_netbsd_.gitignore) = baaa7368280e3cded6c08a28165299126fd944b1
+SHA1 (patch-platforms_netbsd_Makefile) = bed934dbb84924c19e3fb4372940710ca978a452
+SHA1 (patch-platforms_netbsd_components.c) = 84807e4ca85af669de361fee779fd671d0fb9d64
+SHA1 (patch-platforms_netbsd_hax__entry.c) = 40f8be710e174cb4993308070b8d833f901ec9d9
+SHA1 (patch-platforms_netbsd_hax__event.c) = aa24332d850a962166c280229d9877b26985aa0f
+SHA1 (patch-platforms_netbsd_hax__host__mem.c) = e24bae656743f64e2234480816a67c482c497a00
+SHA1 (patch-platforms_netbsd_hax__mem__alloc.c) = 06a631fc85080ec11fa9db0cdf58e2c230d35cdc
+SHA1 (patch-platforms_netbsd_hax__mm.c) = ca9b16de2253180f1e9b3c8e228ce23ab57fb6e6
+SHA1 (patch-platforms_netbsd_hax__wrapper.c) = ce3b5ea649ebf90e848109afa75d20bda56d7160
diff --git a/haxm/patches/patch-include_hax.h b/haxm/patches/patch-include_hax.h
new file mode 100644
index 0000000000..110b76d92a
--- /dev/null
+++ b/haxm/patches/patch-include_hax.h
@@ -0,0 +1,27 @@
+$NetBSD$
+
+--- include/hax.h.orig 2018-11-21 06:34:18.000000000 +0000
++++ include/hax.h
+@@ -119,9 +119,9 @@ void hax_vfree_aligned(void *va, uint32_
+ uint32_t alignment);
+
+ struct hax_vcpu_mem {
+- uint32_t size;
+- uint64_t uva;
+- void *kva;
++ vsize_t size;
++ vaddr_t uva;
++ vaddr_t kva;
+ void *hinfo;
+ };
+
+@@ -269,6 +269,9 @@ int hax_em64t_enabled(void);
+ #ifdef HAX_PLATFORM_LINUX
+ #include "linux/hax_linux.h"
+ #endif
++#ifdef HAX_PLATFORM_NETBSD
++#include "netbsd/hax_netbsd.h"
++#endif
+ #ifdef HAX_PLATFORM_WINDOWS
+ #include "windows/hax_windows.h"
+ #endif
diff --git a/haxm/patches/patch-include_hax__interface.h b/haxm/patches/patch-include_hax__interface.h
new file mode 100644
index 0000000000..ce5a41359a
--- /dev/null
+++ b/haxm/patches/patch-include_hax__interface.h
@@ -0,0 +1,14 @@
+$NetBSD$
+
+--- include/hax_interface.h.orig 2018-11-21 06:34:18.000000000 +0000
++++ include/hax_interface.h
+@@ -45,6 +45,9 @@
+ #ifdef HAX_PLATFORM_LINUX
+ #include "linux/hax_interface_linux.h"
+ #endif
++#ifdef HAX_PLATFORM_NETBSD
++#include "netbsd/hax_interface_netbsd.h"
++#endif
+ #ifdef HAX_PLATFORM_WINDOWS
+ #include "windows/hax_interface_windows.h"
+ #endif
diff --git a/haxm/patches/patch-include_hax__types.h b/haxm/patches/patch-include_hax__types.h
new file mode 100644
index 0000000000..54a4f803a9
--- /dev/null
+++ b/haxm/patches/patch-include_hax__types.h
@@ -0,0 +1,15 @@
+$NetBSD$
+
+--- include/hax_types.h.orig 2018-11-21 06:34:18.000000000 +0000
++++ include/hax_types.h
+@@ -78,6 +78,10 @@
+ #elif defined(__linux__)
+ #define HAX_PLATFORM_LINUX
+ #include "linux/hax_types_linux.h"
++// NetBSD
++#elif defined(__NetBSD__)
++#define HAX_PLATFORM_NETBSD
++#include "netbsd/hax_types_netbsd.h"
+ // Windows
+ #elif defined(_WIN32)
+ #define HAX_PLATFORM_WINDOWS
diff --git a/haxm/patches/patch-include_netbsd_hax__interface__netbsd.h b/haxm/patches/patch-include_netbsd_hax__interface__netbsd.h
new file mode 100644
index 0000000000..55e4ccf35e
--- /dev/null
+++ b/haxm/patches/patch-include_netbsd_hax__interface__netbsd.h
@@ -0,0 +1,87 @@
+$NetBSD$
+
+--- include/netbsd/hax_interface_netbsd.h.orig 2018-11-24 22:22:37.786333630 +0000
++++ include/netbsd/hax_interface_netbsd.h
+@@ -0,0 +1,82 @@
++/*
++ * Copyright (c) 2011 Intel Corporation
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef HAX_NETBSD_HAX_INTERFACE_NETBSD_H_
++#define HAX_NETBSD_HAX_INTERFACE_NETBSD_H_
++
++#include <sys/param.h>
++#include <sys/types.h>
++#include <sys/ioctl.h>
++#include <sys/ioccom.h>
++
++/* The mac specific interface to qemu because of mac's
++ * special handling like hax tunnel allocation etc */
++/* HAX model level ioctl */
++#define HAX_IOCTL_VERSION _IOWR(0, 0x20, struct hax_module_version)
++#define HAX_IOCTL_CREATE_VM _IOWR(0, 0x21, uint32_t)
++#define HAX_IOCTL_DESTROY_VM _IOW(0, 0x22, uint32_t)
++#define HAX_IOCTL_CAPABILITY _IOR(0, 0x23, struct hax_capabilityinfo)
++#define HAX_IOCTL_SET_MEMLIMIT _IOWR(0, 0x24, struct hax_set_memlimit)
++
++// Only for backward compatibility with old Qemu.
++#define HAX_VM_IOCTL_VCPU_CREATE_ORIG _IOR(0, 0x80, int)
++
++#define HAX_VM_IOCTL_VCPU_CREATE _IOWR(0, 0x80, uint32_t)
++#define HAX_VM_IOCTL_ALLOC_RAM _IOWR(0, 0x81, struct hax_alloc_ram_info)
++#define HAX_VM_IOCTL_SET_RAM _IOWR(0, 0x82, struct hax_set_ram_info)
++#define HAX_VM_IOCTL_VCPU_DESTROY _IOR(0, 0x83, uint32_t)
++#define HAX_VM_IOCTL_ADD_RAMBLOCK _IOW(0, 0x85, struct hax_ramblock_info)
++#define HAX_VM_IOCTL_SET_RAM2 _IOWR(0, 0x86, struct hax_set_ram_info2)
++#define HAX_VM_IOCTL_PROTECT_RAM _IOWR(0, 0x87, struct hax_protect_ram_info)
++
++#define HAX_VCPU_IOCTL_RUN _IO(0, 0xc0)
++#define HAX_VCPU_IOCTL_SET_MSRS _IOWR(0, 0xc1, struct hax_msr_data)
++#define HAX_VCPU_IOCTL_GET_MSRS _IOWR(0, 0xc2, struct hax_msr_data)
++
++#define HAX_VCPU_IOCTL_SET_FPU _IOW(0, 0xc3, struct fx_layout)
++#define HAX_VCPU_IOCTL_GET_FPU _IOR(0, 0xc4, struct fx_layout)
++
++#define HAX_VCPU_IOCTL_SETUP_TUNNEL _IOWR(0, 0xc5, struct hax_tunnel_info)
++#define HAX_VCPU_IOCTL_INTERRUPT _IOWR(0, 0xc6, uint32_t)
++#define HAX_VCPU_SET_REGS _IOWR(0, 0xc7, struct vcpu_state_t)
++#define HAX_VCPU_GET_REGS _IOWR(0, 0xc8, struct vcpu_state_t)
++
++/* API 2.0 */
++#define HAX_VM_IOCTL_NOTIFY_QEMU_VERSION _IOW(0, 0x84, struct hax_qemu_version)
++
++#define HAX_IOCTL_VCPU_DEBUG _IOW(0, 0xc9, struct hax_debug_t)
++
++#define HAX_KERNEL64_CS 0x80
++#define HAX_KERNEL32_CS 0x08
++
++#define is_compatible() 0
++
++#endif // HAX_NETBSD_HAX_INTERFACE_NETBSD_H_
diff --git a/haxm/patches/patch-include_netbsd_hax__netbsd.h b/haxm/patches/patch-include_netbsd_hax__netbsd.h
new file mode 100644
index 0000000000..3829a05843
--- /dev/null
+++ b/haxm/patches/patch-include_netbsd_hax__netbsd.h
@@ -0,0 +1,129 @@
+$NetBSD$
+
+--- include/netbsd/hax_netbsd.h.orig 2018-11-24 22:22:37.786457524 +0000
++++ include/netbsd/hax_netbsd.h
+@@ -0,0 +1,124 @@
++/*
++ * Copyright (c) 2011 Intel Corporation
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef HAX_NETBSD_HAX_NETBSD_H_
++#define HAX_NETBSD_HAX_NETBSD_H_
++
++#define HAX_RAM_ENTRY_SIZE 0x4000000
++
++hax_spinlock *hax_spinlock_alloc_init(void);
++void hax_spinlock_free(hax_spinlock *lock);
++void hax_spin_lock(hax_spinlock *lock);
++void hax_spin_unlock(hax_spinlock *lock);
++
++hax_mutex hax_mutex_alloc_init(void);
++void hax_mutex_lock(hax_mutex lock);
++void hax_mutex_unlock(hax_mutex lock);
++void hax_mutex_free(hax_mutex lock);
++
++/* Return true if the bit is set already */
++int hax_test_and_set_bit(int bit, uint64_t *memory);
++
++/* Return true if the bit is cleared already */
++int hax_test_and_clear_bit(int bit, uint64_t *memory);
++
++/* Don't care for the big endian situation */
++static inline bool hax_test_bit(int bit, uint64_t *memory)
++{
++ int byte = bit / 8;
++ unsigned char *p;
++ int offset = bit % 8;
++
++ p = (unsigned char *)memory + byte;
++ return !!(*p & (1 << offset));
++}
++
++// memcpy_s() is part of the optional Bounds Checking Interfaces specified in
++// Annex K of the C11 standard:
++// http://en.cppreference.com/w/c/string/byte/memcpy
++// However, it is not implemented by Clang:
++// https://stackoverflow.com/questions/40829032/how-to-install-c11-compiler-on-mac-os-with-optional-string-functions-included
++// Provide a simplified implementation here so memcpy_s() can be used instead of
++// memcpy() everywhere else, which helps reduce the number of Klocwork warnings.
++static inline int memcpy_s(void *dest, size_t destsz, const void *src,
++ size_t count)
++{
++ char *dest_start = (char *)dest;
++ char *dest_end = (char *)dest + destsz;
++ char *src_start = (char *)src;
++ char *src_end = (char *)src + count;
++ bool overlap;
++
++ if (count == 0)
++ return 0;
++
++ if (!dest || destsz == 0)
++ return -EINVAL;
++
++ overlap = src_start < dest_start
++ ? dest_start < src_end : src_start < dest_end;
++ if (!src || count > destsz || overlap) {
++ memset(dest, 0, destsz);
++ return -EINVAL;
++ }
++
++ memcpy(dest, src, count);
++ return 0;
++}
++
++bool hax_cmpxchg32(uint32_t old_val, uint32_t new_val, volatile uint32_t *addr);
++bool hax_cmpxchg64(uint64_t old_val, uint64_t new_val, volatile uint64_t *addr);
++
++static inline bool cpu_is_online(int cpu)
++{
++ if (cpu < 0 || cpu >= max_cpus)
++ return 0;
++ return !!(((mword)1 << cpu) & cpu_online_map);
++}
++
++int hax_notify_host_event(enum hax_notify_event event, uint32_t *param,
++ uint32_t size);
++
++extern int default_hax_log_level;
++
++void hax_error(char *fmt, ...);
++void hax_warning(char *fmt, ...);
++void hax_info(char *fmt, ...);
++void hax_debug(char *fmt, ...);
++void hax_log(char *fmt, ...);
++
++#define hax_log hax_info
++
++#define hax_panic panic
++
++#define hax_assert(condition) KASSERT(condition)
++
++#endif // HAX_NETBSD_HAX_NETBSD_H_
diff --git a/haxm/patches/patch-include_netbsd_hax__types__netbsd.h b/haxm/patches/patch-include_netbsd_hax__types__netbsd.h
new file mode 100644
index 0000000000..d31891ea87
--- /dev/null
+++ b/haxm/patches/patch-include_netbsd_hax__types__netbsd.h
@@ -0,0 +1,124 @@
+$NetBSD$
+
+--- include/netbsd/hax_types_netbsd.h.orig 2018-11-24 22:22:37.786579658 +0000
++++ include/netbsd/hax_types_netbsd.h
+@@ -0,0 +1,119 @@
++/*
++ * Copyright (c) 2011 Intel Corporation
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef HAX_NETBSD_HAX_TYPES_NETBSD_H_
++#define HAX_NETBSD_HAX_TYPES_NETBSD_H_
++
++#include <sys/param.h>
++#include <sys/types.h>
++
++// Signed Types
++typedef int8_t int8;
++typedef int16_t int16;
++typedef int32_t int32;
++typedef int64_t int64;
++
++// Unsigned Types
++typedef uint8_t uint8;
++typedef uint16_t uint16;
++typedef uint32_t uint32;
++typedef uint64_t uint64;
++
++typedef unsigned int uint;
++typedef unsigned long ulong;
++typedef unsigned long ulong_t;
++
++#if defined(__i386__)
++typedef uint32_t mword;
++#endif
++#if defined (__x86_64__)
++typedef uint64_t mword;
++#endif
++typedef mword HAX_VADDR_T;
++
++#include "../hax_list.h"
++struct hax_page {
++ void *kva;
++ struct vm_page *page;
++ struct pglist *pglist;
++ uint64_t pa;
++ uint32_t order;
++ uint32_t flags;
++ struct hax_link_list list;
++ size_t size;
++};
++
++typedef struct hax_memdesc_user {
++ vaddr_t uva;
++ vsize_t size;
++} hax_memdesc_user;
++
++typedef struct hax_kmap_user {
++ vaddr_t kva;
++ vsize_t size;
++} hax_kmap_user;
++
++typedef struct hax_memdesc_phys {
++ struct vm_page *page;
++} hax_memdesc_phys;
++
++typedef struct hax_kmap_phys {
++ vaddr_t kva;
++} hax_kmap_phys;
++
++typedef struct hax_spinlock hax_spinlock;
++
++typedef int hax_cpumap_t;
++
++static inline hax_cpumap_t cpu2cpumap(int cpu)
++{
++ return (0x1 << cpu);
++}
++
++/* Remove this later */
++#define is_leaf(x) 1
++
++typedef mword preempt_flag;
++typedef kmutex_t *hax_mutex;
++typedef uint32_t hax_atomic_t;
++
++/* Return the value before add */
++hax_atomic_t hax_atomic_add(volatile hax_atomic_t *atom, uint32_t value);
++
++/* Return the value before the increment */
++hax_atomic_t hax_atomic_inc(volatile hax_atomic_t *atom);
++
++/* Return the value before the decrement */
++hax_atomic_t hax_atomic_dec(volatile hax_atomic_t *atom);
++
++void hax_smp_mb(void);
++
++#endif // HAX_NETBSD_HAX_TYPES_NETBSD_H_
diff --git a/haxm/patches/patch-platforms_netbsd_.gitignore b/haxm/patches/patch-platforms_netbsd_.gitignore
new file mode 100644
index 0000000000..e8616aa23c
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_.gitignore
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- platforms/netbsd/.gitignore.orig 2018-11-24 22:22:37.786680836 +0000
++++ platforms/netbsd/.gitignore
+@@ -0,0 +1,8 @@
++# Build
++*.kmod.map
++*.kmod
++*~
++amd64
++i386
++machine
++x86
diff --git a/haxm/patches/patch-platforms_netbsd_Makefile b/haxm/patches/patch-platforms_netbsd_Makefile
new file mode 100644
index 0000000000..a81b58d76e
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_Makefile
@@ -0,0 +1,58 @@
+$NetBSD$
+
+--- platforms/netbsd/Makefile.orig 2018-11-24 22:22:37.786772493 +0000
++++ platforms/netbsd/Makefile
+@@ -0,0 +1,53 @@
++S?= /usr/src/sys
++
++KMOD= intel-haxm
++
++# toplevel
++SRCS+= components.c
++SRCS+= hax_entry.c
++SRCS+= hax_event.c
++SRCS+= hax_host_mem.c
++SRCS+= hax_mem_alloc.c
++SRCS+= hax_mm.c
++SRCS+= hax_wrapper.c
++
++# core
++.PATH: ../../core
++SRCS+= chunk.c
++SRCS+= cpu.c
++SRCS+= cpuid.c
++SRCS+= dump.c
++SRCS+= emulate.c
++SRCS+= ept.c
++SRCS+= ept2.c
++SRCS+= ept_tree.c
++SRCS+= gpa_space.c
++SRCS+= hax.c
++SRCS+= ia32.c
++SRCS+= intr_exc.c
++SRCS+= memory.c
++SRCS+= memslot.c
++SRCS+= name.c
++SRCS+= page_walker.c
++SRCS+= ramblock.c
++SRCS+= vcpu.c
++SRCS+= vm.c
++SRCS+= vmx.c
++SRCS+= vtlb.c
++
++.if ${MACHINE} == "amd64"
++TARGET_ELF= elf64
++.else
++.error Not supported
++.endif
++
++.SUFFIXES: .asm .o
++.PATH.asm: ../../core
++.asm.o:
++ nasm -f ${TARGET_ELF} -o ${.TARGET} ${.IMPSRC}
++
++SRCS+= emulate_ops.asm
++SRCS+= ia32_ops.asm
++SRCS+= vmx_ops.asm
++
++.include <bsd.kmodule.mk>
diff --git a/haxm/patches/patch-platforms_netbsd_components.c b/haxm/patches/patch-platforms_netbsd_components.c
new file mode 100644
index 0000000000..e082b0ef0d
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_components.c
@@ -0,0 +1,690 @@
+$NetBSD$
+
+--- platforms/netbsd/components.c.orig 2018-11-24 22:22:37.786983117 +0000
++++ platforms/netbsd/components.c
+@@ -0,0 +1,685 @@
++/*
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <sys/param.h>
++#include <sys/conf.h>
++#include <sys/device.h>
++#include <sys/kernel.h>
++#include <sys/kmem.h>
++#include <sys/lwp.h>
++#include <sys/proc.h>
++#include <sys/module.h>
++
++#include "../../core/include/hax_core_interface.h"
++
++static int hax_vcpu_cmajor = 221, hax_vcpu_bmajor = -1;
++static int hax_vm_cmajor = 222, hax_vm_bmajor = -1;
++
++#define HAX_VM_DEVFS_FMT "hax_vm/vm%02d"
++#define HAX_VCPU_DEVFS_FMT "hax_vm%02d/vcpu%02d"
++
++typedef struct hax_vm_netbsd_t {
++ struct vm_t *cvm;
++ int id;
++ struct cdevsw dev;
++ char *devname;
++} hax_vm_netbsd_t;
++
++typedef struct hax_vcpu_netbsd_t {
++ struct vcpu_t *cvcpu;
++ struct hax_vm_netbsd_t *vm;
++ int id;
++ struct cdevsw dev;
++ char *devname;
++} hax_vcpu_netbsd_t;
++
++
++struct hax_vm_softc {
++ device_t sc_dev;
++ struct hax_vm_netbsd_t *vm;
++};
++
++static device_t hax_vm_sc_self;
++
++extern struct cfdriver hax_vm_cd;
++
++static int hax_vm_match(device_t, cfdata_t, void *);
++static void hax_vm_attach(device_t, device_t, void *);
++static int hax_vm_detach(device_t, int);
++
++CFATTACH_DECL_NEW(hax_vm, sizeof(struct hax_vm_softc),
++ hax_vm_match, hax_vm_attach, hax_vm_detach, NULL);
++
++struct hax_vcpu_softc {
++ device_t sc_dev;
++ struct hax_vcpu_netbsd_t *vcpu;
++};
++
++static device_t hax_vcpu_sc_self;
++
++extern struct cfdriver hax_vcpu_cd;
++
++static int hax_vcpu_match(device_t, cfdata_t, void *);
++static void hax_vcpu_attach(device_t, device_t, void *);
++static int hax_vcpu_detach(device_t, int);
++
++CFATTACH_DECL_NEW(hax_vcpu, sizeof(struct hax_vcpu_softc),
++ hax_vcpu_match, hax_vcpu_attach, hax_vcpu_detach, NULL);
++
++dev_type_open(hax_vm_open);
++dev_type_close(hax_vm_close);
++dev_type_ioctl(hax_vm_ioctl);
++
++dev_type_open(hax_vcpu_open);
++dev_type_close(hax_vcpu_close);
++dev_type_ioctl(hax_vcpu_ioctl);
++
++/* Component management */
++
++static hax_vcpu_netbsd_t* hax_vcpu_create_netbsd(struct vcpu_t *cvcpu,
++ hax_vm_netbsd_t *vm,
++ int vcpu_id)
++{
++ hax_vcpu_netbsd_t *vcpu;
++
++ if (!cvcpu || !vm)
++ return NULL;
++
++ vcpu = kmem_zalloc(sizeof(hax_vcpu_netbsd_t), KM_SLEEP);
++ vcpu->cvcpu = cvcpu;
++ vcpu->id = vcpu_id;
++ vcpu->vm = vm;
++ set_vcpu_host(cvcpu, vcpu);
++ return vcpu;
++}
++
++static void hax_vcpu_destroy_netbsd(hax_vcpu_netbsd_t *vcpu)
++{
++ struct vcpu_t *cvcpu;
++
++ if (!vcpu)
++ return;
++
++ cvcpu = vcpu->cvcpu;
++ hax_vcpu_destroy_hax_tunnel(cvcpu);
++ set_vcpu_host(cvcpu, NULL);
++ vcpu->cvcpu = NULL;
++ kmem_free(vcpu, sizeof(hax_vcpu_netbsd_t));
++}
++
++int hax_vcpu_create_host(struct vcpu_t *cvcpu, void *vm_host, int vm_id,
++ int vcpu_id)
++{
++ int err;
++ hax_vcpu_netbsd_t *vcpu;
++ hax_vm_netbsd_t *vm;
++
++ vm = (hax_vm_netbsd_t *)vm_host;
++ vcpu = hax_vcpu_create_netbsd(cvcpu, vm, vcpu_id);
++ if (!vcpu)
++ return -1;
++
++ vcpu->devname = kmem_asprintf(HAX_VCPU_DEVFS_FMT, vm_id, vcpu_id);
++ vcpu->dev.d_open = hax_vcpu_open;
++ vcpu->dev.d_close = hax_vcpu_close;
++ vcpu->dev.d_read = noread;
++ vcpu->dev.d_write = nowrite;
++ vcpu->dev.d_ioctl = hax_vcpu_ioctl;
++ vcpu->dev.d_stop = nostop;
++ vcpu->dev.d_tty = notty;
++ vcpu->dev.d_poll = nopoll;
++ vcpu->dev.d_mmap = nommap;
++ vcpu->dev.d_kqfilter = nokqfilter;
++ vcpu->dev.d_discard = nodiscard;
++ vcpu->dev.d_flag = D_OTHER;
++
++ err = devsw_attach(vcpu->devname, NULL, &hax_vcpu_bmajor, &vcpu->dev, &hax_vcpu_cmajor);
++ if (err) {
++ hax_error("Failed to register HAXM-VCPU device\n");
++ hax_vcpu_destroy_netbsd(vcpu);
++ return -1;
++ }
++ ((struct hax_vcpu_softc *)device_private(hax_vcpu_sc_self))->vcpu = vcpu;
++ hax_info("Created HAXM-VCPU device '%s'\n", vcpu->devname);
++ return 0;
++}
++
++int hax_vcpu_destroy_host(struct vcpu_t *cvcpu, void *vcpu_host)
++{
++ hax_vcpu_netbsd_t *vcpu;
++
++ vcpu = (hax_vcpu_netbsd_t *)vcpu_host;
++ devsw_detach(NULL, &vcpu->dev);
++ kmem_free(vcpu->devname, strlen(vcpu->devname) + 1);
++
++ hax_vcpu_destroy_netbsd(vcpu);
++ return 0;
++}
++
++static hax_vm_netbsd_t *hax_vm_create_netbsd(struct vm_t *cvm, int vm_id)
++{
++ hax_vm_netbsd_t *vm;
++
++ if (!cvm)
++ return NULL;
++
++ vm = kmem_zalloc(sizeof(hax_vm_netbsd_t), KM_SLEEP);
++ vm->cvm = cvm;
++ vm->id = vm_id;
++ set_vm_host(cvm, vm);
++ return vm;
++}
++
++static void hax_vm_destroy_netbsd(hax_vm_netbsd_t *vm)
++{
++ struct vm_t *cvm;
++
++ if (!vm)
++ return;
++
++ cvm = vm->cvm;
++ set_vm_host(cvm, NULL);
++ vm->cvm = NULL;
++ hax_vm_free_all_ram(cvm);
++ kmem_free(vm, sizeof(hax_vm_netbsd_t));
++}
++
++int hax_vm_create_host(struct vm_t *cvm, int vm_id)
++{
++ int err;
++ hax_vm_netbsd_t *vm;
++
++ vm = hax_vm_create_netbsd(cvm, vm_id);
++ if (!vm)
++ return -1;
++
++ vm->devname = kmem_asprintf(HAX_VM_DEVFS_FMT, vm_id);
++ vm->dev.d_open = hax_vm_open;
++ vm->dev.d_close = hax_vm_close;
++ vm->dev.d_read = noread;
++ vm->dev.d_write = nowrite;
++ vm->dev.d_ioctl = hax_vm_ioctl;
++ vm->dev.d_stop = nostop;
++ vm->dev.d_tty = notty;
++ vm->dev.d_poll = nopoll;
++ vm->dev.d_mmap = nommap;
++ vm->dev.d_kqfilter = nokqfilter;
++ vm->dev.d_discard = nodiscard;
++ vm->dev.d_flag = D_OTHER;
++
++ err = devsw_attach(vm->devname, NULL, &hax_vm_bmajor, &vm->dev, &hax_vm_cmajor);
++ if (err) {
++ hax_error("Failed to register HAXM-VM device\n");
++ hax_vm_destroy_netbsd(vm);
++ return -1;
++ }
++ ((struct hax_vm_softc *)device_private(hax_vm_sc_self))->vm = vm;
++ hax_info("Created HAXM-VM device '%s'\n", vm->devname);
++ return 0;
++}
++
++/* When coming here, all vcpus should have been destroyed already. */
++int hax_vm_destroy_host(struct vm_t *cvm, void *vm_host)
++{
++ hax_vm_netbsd_t *vm;
++
++ vm = (hax_vm_netbsd_t *)vm_host;
++ devsw_detach(NULL, &vm->dev);
++ kmem_free(vm->devname, strlen(vm->devname) + 1);
++
++ hax_vm_destroy_netbsd(vm);
++ return 0;
++}
++
++/* No corresponding function in netbsd side, it can be cleaned later. */
++int hax_destroy_host_interface(void)
++{
++ return 0;
++}
++
++/* VCPU operations */
++
++int hax_vcpu_open(dev_t self, int flag __unused, int mode __unused,
++ struct lwp *l __unused)
++{
++ struct hax_vcpu_softc *sc;
++ struct vcpu_t *cvcpu;
++ struct hax_vcpu_netbsd_t *vcpu;
++ int ret;
++
++ sc = device_lookup_private(&hax_vcpu_cd, minor(self));
++ if (sc == NULL) {
++ hax_error("device_lookup_private() for hax_vcpu failed\n");
++ return ENODEV;
++ }
++ vcpu = sc->vcpu;
++ cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1);
++
++ hax_log_level(HAX_LOGD, "HAX vcpu open called\n");
++ if (!cvcpu)
++ return ENODEV;
++
++ ret = hax_vcpu_core_open(cvcpu);
++ if (ret)
++ hax_error("Failed to open core vcpu\n");
++ hax_put_vcpu(cvcpu);
++ return ret;
++}
++
++int hax_vcpu_close(dev_t self, int flag __unused, int mode __unused,
++ struct lwp *l __unused)
++{
++ int ret;
++ struct hax_vcpu_softc *sc;
++ struct vcpu_t *cvcpu;
++ struct hax_vcpu_netbsd_t *vcpu;
++
++ sc = device_lookup_private(&hax_vcpu_cd, minor(self));
++ if (sc == NULL) {
++ hax_error("device_lookup_private() for hax_vcpu failed\n");
++ return ENODEV;
++ }
++ vcpu = sc->vcpu;
++ cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1);
++
++ hax_log_level(HAX_LOGD, "HAX vcpu close called\n");
++ if (!cvcpu) {
++ hax_error("Failed to find the vcpu, is it closed already?\n");
++ return 0;
++ }
++
++ /* put the one for vcpu create */
++ hax_put_vcpu(cvcpu);
++ /* put the one just held */
++ hax_put_vcpu(cvcpu);
++
++ return 0;
++}
++
++int hax_vcpu_ioctl(dev_t self, u_long cmd, void *data, int flag,
++ struct lwp *l __unused)
++{
++ int ret = 0;
++ struct hax_vcpu_softc *sc;
++ struct vcpu_t *cvcpu;
++ struct hax_vcpu_netbsd_t *vcpu;
++
++ sc = device_lookup_private(&hax_vcpu_cd, minor(self));
++ if (sc == NULL) {
++ hax_error("device_lookup_private() for hax_vcpu failed\n");
++ return ENODEV;
++ }
++ vcpu = sc->vcpu;
++ cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1);
++
++ if (!cvcpu)
++ return ENODEV;
++
++ switch (cmd) {
++ case HAX_VCPU_IOCTL_RUN:
++ ret = vcpu_execute(cvcpu);
++ break;
++ case HAX_VCPU_IOCTL_SETUP_TUNNEL: {
++ struct hax_tunnel_info *info;
++ info = (struct hax_tunnel_info *)data;
++ ret = hax_vcpu_setup_hax_tunnel(cvcpu, info);
++ break;
++ }
++ case HAX_VCPU_IOCTL_SET_MSRS: {
++ struct hax_msr_data *msrs;
++ msrs = (struct hax_msr_data *)data;
++ struct vmx_msr *msr;
++ int i, fail;
++
++ msr = msrs->entries;
++ /* nr_msr needs to be verified */
++ if (msrs->nr_msr >= 0x20) {
++ hax_error("MSRS invalid!\n");
++ ret = EFAULT;
++ break;
++ }
++ for (i = 0; i < msrs->nr_msr; i++, msr++) {
++ fail = vcpu_set_msr(cvcpu, msr->entry, msr->value);
++ if (fail) {
++ break;
++ }
++ }
++ msrs->done = i;
++ break;
++ }
++ case HAX_VCPU_IOCTL_GET_MSRS: {
++ struct hax_msr_data *msrs;
++ msrs = (struct hax_msr_data *)data;
++ struct vmx_msr *msr;
++ int i, fail;
++
++ msr = msrs->entries;
++ if(msrs->nr_msr >= 0x20) {
++ hax_error("MSRS invalid!\n");
++ ret = EFAULT;
++ break;
++ }
++ for (i = 0; i < msrs->nr_msr; i++, msr++) {
++ fail = vcpu_get_msr(cvcpu, msr->entry, &msr->value);
++ if (fail) {
++ break;
++ }
++ }
++ msrs->done = i;
++ break;
++ }
++ case HAX_VCPU_IOCTL_SET_FPU: {
++ struct fx_layout *fl;
++ fl = (struct fx_layout *)data;
++ ret = vcpu_put_fpu(cvcpu, fl);
++ break;
++ }
++ case HAX_VCPU_IOCTL_GET_FPU: {
++ struct fx_layout *fl;
++ fl = (struct fx_layout *)data;
++ ret = vcpu_get_fpu(cvcpu, fl);
++ break;
++ }
++ case HAX_VCPU_SET_REGS: {
++ struct vcpu_state_t *vc_state;
++ vc_state = (struct vcpu_state_t *)data;
++ ret = vcpu_set_regs(cvcpu, vc_state);
++ break;
++ }
++ case HAX_VCPU_GET_REGS: {
++ struct vcpu_state_t *vc_state;
++ vc_state = (struct vcpu_state_t *)data;
++ ret = vcpu_get_regs(cvcpu, vc_state);
++ break;
++ }
++ case HAX_VCPU_IOCTL_INTERRUPT: {
++ uint8_t *vector;
++ vector = (uint8_t *)data;
++ vcpu_interrupt(cvcpu, *vector);
++ break;
++ }
++ case HAX_IOCTL_VCPU_DEBUG: {
++ struct hax_debug_t *hax_debug;
++ hax_debug = (struct hax_debug_t *)data;
++ vcpu_debug(cvcpu, hax_debug);
++ break;
++ }
++ default:
++ // TODO: Print information about the process that sent the ioctl.
++ hax_error("Unknown VCPU IOCTL %#lx, pid=%d ('%s')\n", cmd,
++ l->l_proc->p_pid, l->l_proc->p_comm);
++ ret = ENOSYS;
++ break;
++ }
++ hax_put_vcpu(cvcpu);
++ return ret;
++}
++
++/* VM operations */
++
++int hax_vm_open(dev_t self, int flag __unused, int mode __unused,
++ struct lwp *l __unused)
++{
++ struct hax_vm_softc *sc;
++ struct vm_t *cvm;
++ struct hax_vm_netbsd_t *vm;
++ int ret;
++
++ sc = device_lookup_private(&hax_vm_cd, minor(self));
++ if (sc == NULL) {
++ hax_error("device_lookup_private() for hax_vm failed\n");
++ return ENODEV;
++ }
++
++ vm = sc->vm;
++ cvm = hax_get_vm(vm->id, 1);
++ if (!cvm)
++ return ENODEV;
++
++ ret = hax_vm_core_open(cvm);
++ hax_put_vm(cvm);
++ hax_log_level(HAX_LOGI, "Open VM\n");
++ return ret;
++}
++
++int hax_vm_close(dev_t self __unused, int flag __unused, int mode __unused,
++ struct lwp *l __unused)
++{
++ struct hax_vm_softc *sc;
++ struct vm_t *cvm;
++ struct hax_vm_netbsd_t *vm;
++ int ret;
++
++ sc = device_lookup_private(&hax_vm_cd, minor(self));
++ if (sc == NULL) {
++ hax_error("device_lookup_private() for hax_vm failed\n");
++ return ENODEV;
++ }
++
++ vm = sc->vm;
++ cvm = hax_get_vm(vm->id, 1);
++
++ hax_log_level(HAX_LOGI, "Close VM\n");
++ if (cvm) {
++ /* put the ref get just now */
++ hax_put_vm(cvm);
++ hax_put_vm(cvm);
++ }
++ return 0;
++}
++
++int hax_vm_ioctl(dev_t self __unused, u_long cmd, void *data, int flag,
++ struct lwp *l __unused)
++{
++ int ret = 0;
++ struct vm_t *cvm;
++ struct hax_vm_netbsd_t *vm;
++ struct hax_vm_softc *sc;
++
++ sc = device_lookup_private(&hax_vm_cd, minor(self));
++ if (sc == NULL) {
++ hax_error("device_lookup_private() for hax_vm failed\n");
++ return ENODEV;
++ }
++ vm = sc->vm;
++ cvm = hax_get_vm(vm->id, 1);
++ if (!cvm)
++ return ENODEV;
++
++ switch (cmd) {
++ case HAX_VM_IOCTL_VCPU_CREATE:
++ case HAX_VM_IOCTL_VCPU_CREATE_ORIG: {
++ uint32_t *vcpu_id, vm_id;
++ vcpu_id = (uint32_t *)data;
++ struct vcpu_t *cvcpu;
++
++ vm_id = vm->id;
++ cvcpu = vcpu_create(cvm, vm, *vcpu_id);
++ if (!cvcpu) {
++ hax_error("Failed to create vcpu %x on vm %x\n", *vcpu_id, vm_id);
++ ret = -EINVAL;
++ break;
++ }
++ break;
++ }
++ case HAX_VM_IOCTL_ALLOC_RAM: {
++ struct hax_alloc_ram_info *info;
++ info = (struct hax_alloc_ram_info *)data;
++ hax_info("IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x, pad=0x%x\n",
++ vm->id, info->va, info->size, info->pad);
++ ret = hax_vm_add_ramblock(cvm, info->va, info->size);
++ break;
++ }
++ case HAX_VM_IOCTL_ADD_RAMBLOCK: {
++ struct hax_ramblock_info *info;
++ info = (struct hax_ramblock_info *)data;
++ if (info->reserved) {
++ hax_error("IOCTL_ADD_RAMBLOCK: vm_id=%d, reserved=0x%llx\n",
++ vm->id, info->reserved);
++ ret = EINVAL;
++ break;
++ }
++ hax_info("IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx, size=0x%llx\n",
++ vm->id, info->start_va, info->size);
++ ret = hax_vm_add_ramblock(cvm, info->start_va, info->size);
++ break;
++ }
++ case HAX_VM_IOCTL_SET_RAM: {
++ struct hax_set_ram_info *info;
++ info = (struct hax_set_ram_info *)data;
++ ret = hax_vm_set_ram(cvm, info);
++ break;
++ }
++#ifdef CONFIG_HAX_EPT2
++ case HAX_VM_IOCTL_SET_RAM2: {
++ struct hax_set_ram_info2 *info;
++ info = (struct hax_set_ram_info2 *)data;
++ if (info->reserved1 || info->reserved2) {
++ hax_error("IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x reserved2=0x%llx\n",
++ vm->id, info->reserved1, info->reserved2);
++ ret = EINVAL;
++ break;
++ }
++ ret = hax_vm_set_ram2(cvm, info);
++ break;
++ }
++ case HAX_VM_IOCTL_PROTECT_RAM: {
++ struct hax_protect_ram_info *info;
++ info = (struct hax_protect_ram_info *)data;
++ if (info->reserved) {
++ hax_error("IOCTL_PROTECT_RAM: vm_id=%d, reserved=0x%x\n",
++ vm->id, info->reserved);
++ ret = EINVAL;
++ break;
++ }
++ ret = hax_vm_protect_ram(cvm, info);
++ break;
++ }
++#endif
++ case HAX_VM_IOCTL_NOTIFY_QEMU_VERSION: {
++ struct hax_qemu_version *info;
++ info = (struct hax_qemu_version *)data;
++ // TODO: Print information about the process that sent the ioctl.
++ ret = hax_vm_set_qemuversion(cvm, info);
++ break;
++ }
++ default:
++ // TODO: Print information about the process that sent the ioctl.
++ hax_error("Unknown VM IOCTL %#lx, pid=%d ('%s')\n", cmd,
++ l->l_proc->p_pid, l->l_proc->p_comm);
++ break;
++ }
++ hax_put_vm(cvm);
++ return ret;
++}
++
++static int
++hax_vm_match(device_t parent, cfdata_t match, void *aux)
++{
++ return 1;
++}
++
++static void
++hax_vm_attach(device_t parent, device_t self, void *aux)
++{
++ struct hax_vm_softc *sc;
++
++ if (hax_vm_sc_self)
++ return;
++
++ sc = device_private(self);
++ if (sc == NULL) {
++ hax_error("device_private() for hax_vm failed\n");
++ return;
++ }
++ sc->sc_dev = self;
++ hax_vm_sc_self = self;
++
++ if (!pmf_device_register(self, NULL, NULL))
++ aprint_error_dev(self, "couldn't establish power handler\n");
++}
++
++static int
++hax_vm_detach(device_t self, int flags)
++{
++ struct hax_vm_softc *sc;
++
++ sc = device_private(self);
++ if (sc == NULL) {
++ hax_error("device_private() for hax_vm failed\n");
++ return ENODEV;
++ }
++ pmf_device_deregister(self);
++
++ hax_vm_sc_self = NULL;
++ return 0;
++}
++
++static int
++hax_vcpu_match(device_t parent, cfdata_t match, void *aux)
++{
++ return 1;
++}
++
++static void
++hax_vcpu_attach(device_t parent, device_t self, void *aux)
++{
++ struct hax_vcpu_softc *sc;
++
++ if (hax_vcpu_sc_self)
++ return;
++
++ sc = device_private(self);
++ if (sc == NULL) {
++ hax_error("device_private() for hax_vcpu failed\n");
++ return;
++ }
++ sc->sc_dev = self;
++ hax_vcpu_sc_self = self;
++
++ if (!pmf_device_register(self, NULL, NULL))
++ aprint_error_dev(self, "couldn't establish power handler\n");
++}
++
++static int
++hax_vcpu_detach(device_t self, int flags)
++{
++ struct hax_vcpu_softc *sc;
++
++ sc = device_private(self);
++ if (sc == NULL) {
++ hax_error("device_private() for hax_vm failed\n");
++ return ENODEV;
++ }
++ pmf_device_deregister(self);
++
++ hax_vcpu_sc_self = NULL;
++ return 0;
++}
diff --git a/haxm/patches/patch-platforms_netbsd_hax__entry.c b/haxm/patches/patch-platforms_netbsd_hax__entry.c
new file mode 100644
index 0000000000..a3f22a30d4
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_hax__entry.c
@@ -0,0 +1,348 @@
+$NetBSD$
+
+--- platforms/netbsd/hax_entry.c.orig 2018-11-24 22:22:37.787129043 +0000
++++ platforms/netbsd/hax_entry.c
+@@ -0,0 +1,343 @@
++/*
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <sys/param.h>
++#include <sys/conf.h>
++#include <sys/device.h>
++#include <sys/kernel.h>
++#include <sys/lwp.h>
++#include <sys/proc.h>
++#include <sys/module.h>
++#include <machine/specialreg.h>
++#include <machine/cpufunc.h>
++
++#include "../../include/hax.h"
++#include "../../include/hax_interface.h"
++#include "../../include/hax_release_ver.h"
++#include "../../core/include/hax_core_interface.h"
++
++#define HAX_DEVICE_NAME "HAX"
++
++static int hax_cmajor = 220, hax_bmajor = -1;
++
++extern struct cfdriver hax_vm_cd;
++extern struct cfdriver hax_vcpu_cd;
++extern struct cfattach hax_vm_ca;
++extern struct cfattach hax_vcpu_ca;
++
++static int hax_driver_init(void);
++static int hax_driver_exit(void);
++
++dev_type_open(hax_dev_open);
++dev_type_close(hax_dev_close);
++dev_type_ioctl(hax_dev_ioctl);
++
++static struct cdevsw hax_dev_cdevsw = {
++ .d_open = hax_dev_open,
++ .d_close = hax_dev_close,
++ .d_read = noread,
++ .d_write = nowrite,
++ .d_ioctl = hax_dev_ioctl,
++ .d_stop = nostop,
++ .d_tty = notty,
++ .d_poll = nopoll,
++ .d_mmap = nommap,
++ .d_kqfilter = nokqfilter,
++ .d_discard = nodiscard,
++ .d_flag = D_OTHER
++};
++
++int hax_dev_open(dev_t dev __unused, int flags __unused, int mode __unused,
++ struct lwp *l __unused)
++{
++ hax_log_level(HAX_LOGI, "HAX module opened\n");
++ return 0;
++}
++
++int hax_dev_close(dev_t self __unused, int flag __unused, int mode __unused,
++ struct lwp *l __unused)
++{
++ hax_log_level(HAX_LOGI, "hax_close\n");
++ return 0;
++}
++
++int hax_dev_ioctl(dev_t self __unused, u_long cmd, void *data, int flag,
++ struct lwp *l)
++{
++ int ret = 0;
++
++ switch (cmd) {
++ case HAX_IOCTL_VERSION: {
++ struct hax_module_version *version;
++ version = (struct hax_module_version *)data;
++ version->cur_version = HAX_CUR_VERSION;
++ version->compat_version = HAX_COMPAT_VERSION;
++ break;
++ }
++ case HAX_IOCTL_CAPABILITY: {
++ struct hax_capabilityinfo *capab;
++ capab = (struct hax_capabilityinfo *)data;
++ hax_get_capability(capab, sizeof(*capab), NULL);
++ break;
++ }
++ case HAX_IOCTL_SET_MEMLIMIT: {
++ struct hax_set_memlimit *memlimit;
++ memlimit = (struct hax_set_memlimit *)data;
++ ret = hax_set_memlimit(memlimit, sizeof(*memlimit), NULL);
++ break;
++ }
++ case HAX_IOCTL_CREATE_VM: {
++ int vm_id;
++ struct vm_t *cvm;
++
++ cvm = hax_create_vm(&vm_id);
++ if (!cvm) {
++ hax_log_level(HAX_LOGE, "Failed to create the HAX VM\n");
++ ret = ENOMEM;
++ break;
++ }
++
++ *((uint32_t *)data) = vm_id;
++ break;
++ }
++ default:
++ hax_error("Unknown ioctl %#lx, pid=%d ('%s')\n", cmd,
++ l->l_proc->p_pid, l->l_proc->p_comm);
++ ret = ENOSYS;
++ break;
++ }
++ return ret;
++}
++
++MODULE(MODULE_CLASS_MISC, hax_driver, NULL);
++
++static const struct cfiattrdata haxbus_iattrdata = {
++ "haxbus", 0, { { NULL, NULL, 0 },}
++};
++
++static const struct cfiattrdata *const hax_vm_attrs[] = {
++ &haxbus_iattrdata, NULL
++};
++
++CFDRIVER_DECL(hax_vm, DV_DULL, hax_vm_attrs);
++extern struct cfattach hax_vm_ca;
++static int hax_vmloc[] = {
++ -1,
++ -1,
++ -1
++};
++
++static struct cfdata hax_vm_cfdata[] = {
++ {
++ .cf_name = "hax_vm",
++ .cf_atname = "hax_vm",
++ .cf_unit = 0,
++ .cf_fstate = FSTATE_STAR,
++ .cf_loc = hax_vmloc,
++ .cf_flags = 0,
++ .cf_pspec = NULL,
++ },
++ { NULL, NULL, 0, FSTATE_NOTFOUND, NULL, 0, NULL }
++};
++
++static const struct cfiattrdata *const hax_vcpu_attrs[] = {
++ &haxbus_iattrdata, NULL
++};
++
++CFDRIVER_DECL(hax_vcpu, DV_DULL, hax_vcpu_attrs);
++extern struct cfattach hax_vcpu_ca;
++static int hax_vcpuloc[] = {
++ -1,
++ -1,
++ -1
++};
++
++static struct cfdata hax_vcpu_cfdata[] = {
++ {
++ .cf_name = "hax_vcpu",
++ .cf_atname = "hax_vcpu",
++ .cf_unit = 0,
++ .cf_fstate = FSTATE_STAR,
++ .cf_loc = hax_vcpuloc,
++ .cf_flags = 0,
++ .cf_pspec = NULL,
++ },
++ { NULL, NULL, 0, FSTATE_NOTFOUND, NULL, 0, NULL }
++};
++
++static int
++hax_driver_modcmd(modcmd_t cmd, void *arg __unused)
++{
++ switch (cmd) {
++ case MODULE_CMD_INIT:
++ return hax_driver_init();
++ case MODULE_CMD_FINI:
++ return hax_driver_exit();
++ default:
++ return ENOTTY;
++ }
++}
++
++static int hax_driver_init(void)
++{
++ struct cpu_info *ci;
++ CPU_INFO_ITERATOR cii;
++ struct schedstate_percpu *spc;
++ int i, err;
++ vaddr_t cr4;
++
++ // Initialization
++ max_cpus = 0;
++
++ ci = NULL;
++
++ for (CPU_INFO_FOREACH(cii, ci)) {
++ ++max_cpus;
++ if (!ISSET(ci->ci_schedstate.spc_flags, SPCF_OFFLINE)) {
++ cpu_online_map |= __BIT(ci->ci_cpuid);
++ }
++ }
++
++#if 0
++ cr4 = rcr4();
++ cr4 |= CR4_VMXE;
++ lcr4(cr4);
++#endif
++
++ // Register hax_vm
++ err = config_cfdriver_attach(&hax_vm_cd);
++ if (err) {
++ hax_error("Unable to register cfdriver hax_vm\n");
++ return err;
++ }
++
++ err = config_cfattach_attach(hax_vm_cd.cd_name, &hax_vm_ca);
++ if (err) {
++ hax_error("Unable to register cfattch hax_vm\n");
++ config_cfdriver_detach(&hax_vm_cd);
++ return err;
++ }
++
++ err = config_cfdata_attach(hax_vm_cfdata, 1);
++ if (err) {
++ hax_error("Unable to register cfdata hax_vm\n");
++ config_cfattach_detach(hax_vm_cd.cd_name, &hax_vm_ca);
++ config_cfdriver_detach(&hax_vm_cd);
++ return err;
++ }
++
++ // Register hax_vcpu
++ err = config_cfdriver_attach(&hax_vcpu_cd);
++ if (err) {
++ hax_error("Unable to register cfdriver hax_vcpu\n");
++ config_cfattach_detach(hax_vm_cd.cd_name, &hax_vm_ca);
++ config_cfdriver_detach(&hax_vm_cd);
++ return err;
++ }
++
++ err = config_cfattach_attach(hax_vcpu_cd.cd_name, &hax_vcpu_ca);
++ if (err) {
++ hax_error("Unable to register cfattch hax_vcpu\n");
++ config_cfdriver_detach(&hax_vcpu_cd);
++ config_cfattach_detach(hax_vm_cd.cd_name, &hax_vm_ca);
++ config_cfdriver_detach(&hax_vm_cd);
++ return err;
++ }
++
++ err = config_cfdata_attach(hax_vcpu_cfdata, 1);
++ if (err) {
++ hax_error("Unable to register cfdata hax_vcpu\n");
++ config_cfattach_detach(hax_vcpu_cd.cd_name, &hax_vcpu_ca);
++ config_cfdriver_detach(&hax_vcpu_cd);
++ config_cfattach_detach(hax_vm_cd.cd_name, &hax_vm_ca);
++ config_cfdriver_detach(&hax_vm_cd);
++ return err;
++ }
++
++ // Register HAXM
++ err = devsw_attach(HAX_DEVICE_NAME, NULL, &hax_bmajor, &hax_dev_cdevsw,
++ &hax_cmajor);
++ if (err) {
++ hax_error("Failed to register HAXM device\n");
++ config_cfattach_detach(hax_vm_cd.cd_name, &hax_vm_ca);
++ config_cfdriver_detach(&hax_vm_cd);
++ config_cfattach_detach(hax_vm_cd.cd_name, &hax_vm_ca);
++ config_cfdriver_detach(&hax_vm_cd);
++ return ENXIO;
++ }
++
++ config_attach_pseudo(hax_vm_cfdata);
++
++ config_attach_pseudo(hax_vcpu_cfdata);
++
++ // Initialize HAXM
++
++ if (hax_module_init() < 0) {
++ hax_error("Failed to initialize HAXM module\n");
++ return ENXIO;
++ }
++
++ hax_info("Created HAXM device\n");
++ return 0;
++}
++
++static int hax_driver_exit(void)
++{
++ int err;
++
++ if (hax_module_exit() < 0) {
++ hax_error("Failed to finalize HAXM module\n");
++ }
++
++ // hax_vcpu
++ err = config_cfdata_detach(hax_vcpu_cfdata);
++ if (err) {
++ hax_error("Unable to deregister cfattch hax_vcpu\n");
++ return err;
++ }
++ config_cfattach_detach(hax_vcpu_cd.cd_name, &hax_vcpu_ca);
++ config_cfdriver_detach(&hax_vcpu_cd);
++
++ // hax_vm
++ err = config_cfdata_detach(hax_vm_cfdata);
++ if (err) {
++ hax_error("Unable to deregister cfattch hax_vm\n");
++ return err;
++ }
++ config_cfattach_detach(hax_vm_cd.cd_name, &hax_vm_ca);
++ config_cfdriver_detach(&hax_vm_cd);
++
++ // HAX
++
++ devsw_detach(NULL, &hax_dev_cdevsw);
++ hax_info("Removed HAXM device\n");
++
++ return 0;
++}
diff --git a/haxm/patches/patch-platforms_netbsd_hax__event.c b/haxm/patches/patch-platforms_netbsd_hax__event.c
new file mode 100644
index 0000000000..167fee3d23
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_hax__event.c
@@ -0,0 +1,42 @@
+$NetBSD$
+
+--- platforms/netbsd/hax_event.c.orig 2018-11-24 22:22:37.787221630 +0000
++++ platforms/netbsd/hax_event.c
+@@ -0,0 +1,37 @@
++/*
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "../../include/hax.h"
++
++int hax_notify_host_event(enum hax_notify_event event, uint32_t *param,
++ uint32_t size)
++{
++ return 0;
++}
diff --git a/haxm/patches/patch-platforms_netbsd_hax__host__mem.c b/haxm/patches/patch-platforms_netbsd_hax__host__mem.c
new file mode 100644
index 0000000000..0dd9660650
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_hax__host__mem.c
@@ -0,0 +1,260 @@
+$NetBSD$
+
+--- platforms/netbsd/hax_host_mem.c.orig 2018-11-24 22:22:37.787364506 +0000
++++ platforms/netbsd/hax_host_mem.c
+@@ -0,0 +1,255 @@
++/*
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#define __HAVE_DIRECT_MAP
++
++#include <sys/param.h>
++#include <sys/types.h>
++#include <sys/kmem.h>
++#include <uvm/uvm.h>
++#include <machine/pmap.h>
++
++#include "../../include/hax_host_mem.h"
++#include "../../core/include/paging.h"
++
++int hax_pin_user_pages(uint64_t start_uva, uint64_t size, hax_memdesc_user *memdesc)
++{
++ if (start_uva & PAGE_MASK) {
++ hax_error("Failed 'start_uva & ~PAGE_MASK', start_uva=%llx\n", start_uva);
++ return -EINVAL;
++ }
++ if (!size) {
++ hax_error("Failed '!size'\n");
++ return -EINVAL;
++ }
++
++ uvm_vslock(curproc->p_vmspace, (void *)start_uva, size, VM_PROT_READ | VM_PROT_WRITE);
++
++ memdesc->uva = start_uva;
++ memdesc->size = size;
++ return 0;
++}
++
++int hax_unpin_user_pages(hax_memdesc_user *memdesc)
++{
++ vsize_t size;
++ vaddr_t uva;
++
++ if (!memdesc)
++ return -EINVAL;
++ if (!memdesc->size)
++ return -EINVAL;
++ if (!memdesc->uva)
++ return -EINVAL;
++
++ size = memdesc->size;
++ uva = memdesc->uva;
++
++ uvm_vsunlock(curproc->p_vmspace, (void *)uva, size);
++
++ return 0;
++}
++
++uint64_t hax_get_pfn_user(hax_memdesc_user *memdesc, uint64_t uva_offset)
++{
++ struct vm_map *map;
++ vsize_t size;
++ vaddr_t uva;
++ paddr_t pa;
++
++ if (!memdesc)
++ return -EINVAL;
++ if (!memdesc->size)
++ return -EINVAL;
++ if (!memdesc->uva)
++ return -EINVAL;
++
++ size = memdesc->size;
++ uva = memdesc->uva;
++
++ if (uva_offset > size)
++ return -EINVAL;
++
++ map = &curproc->p_vmspace->vm_map;
++
++ if (!pmap_extract(map->pmap, uva + uva_offset, &pa))
++ return -EINVAL;
++
++ return (pa >> PAGE_SHIFT);
++}
++
++void * hax_map_user_pages(hax_memdesc_user *memdesc, uint64_t uva_offset,
++ uint64_t size, hax_kmap_user *kmap)
++{
++ struct vm_map *map;
++ struct vm_page *page;
++ vaddr_t uva, va, va2, end_va;
++ vaddr_t kva;
++ paddr_t pa;
++ int err;
++
++ if (!memdesc)
++ return NULL;
++ if (!memdesc->size)
++ return NULL;
++ if (!memdesc->uva)
++ return NULL;
++ if (!kmap)
++ return NULL;
++ if (!size)
++ return NULL;
++ if (size + uva_offset > memdesc->size)
++ return NULL;
++
++ uva = trunc_page(memdesc->uva + uva_offset);
++ size = round_page(size);
++
++ map = &curproc->p_vmspace->vm_map;
++
++ kva = uvm_km_alloc(kernel_map, size, PAGE_SIZE, UVM_KMF_VAONLY|UVM_KMF_WAITVA);
++
++ for (va = uva, end_va = uva + size, va2 = kva; va < end_va; va += PAGE_SIZE, va2 += PAGE_SIZE) {
++ if (!pmap_extract(map->pmap, va, &pa))
++ break;
++ pmap_kenter_pa(va2, pa, VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
++ }
++ pmap_update(pmap_kernel());
++
++ kmap->kva = kva;
++ kmap->size = size;
++ return (void *)kva;
++}
++
++int hax_unmap_user_pages(hax_kmap_user *kmap)
++{
++ vaddr_t kva;
++ vsize_t size;
++
++ if (!kmap)
++ return -EINVAL;
++ if (!kmap->kva)
++ return -EINVAL;
++ if (!kmap->size)
++ return -EINVAL;
++
++ kva = kmap->kva;
++ size = kmap->size;
++
++ pmap_kremove(kva, size);
++ pmap_update(pmap_kernel());
++
++ uvm_km_free(kernel_map, kva, size, UVM_KMF_VAONLY);
++
++ return 0;
++}
++
++int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc)
++{
++ if (!memdesc)
++ return -EINVAL;
++
++ // TODO: Support HAX_PAGE_ALLOC_BELOW_4G
++ if (flags & HAX_PAGE_ALLOC_BELOW_4G) {
++ hax_warning("%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__);
++ }
++
++ memdesc->page = uvm_pagealloc(NULL, 0, NULL, ISSET(flags, HAX_PAGE_ALLOC_ZEROED) ? UVM_PGA_ZERO : 0);
++
++ return 0;
++}
++
++int hax_free_page_frame(hax_memdesc_phys *memdesc)
++{
++ if (!memdesc)
++ return -EINVAL;
++ if (!memdesc->page)
++ return -EINVAL;
++
++ uvm_pagefree(memdesc->page);
++
++ memdesc->page = NULL;
++
++ return 0;
++}
++
++uint64_t hax_get_pfn_phys(hax_memdesc_phys *memdesc)
++{
++ if (!memdesc)
++ return INVALID_PFN;
++ if (!memdesc->page)
++ return INVALID_PFN;
++
++ return VM_PAGE_TO_PHYS(memdesc->page) >> PAGE_SHIFT;
++}
++
++void * hax_get_kva_phys(hax_memdesc_phys *memdesc)
++{
++ if (!memdesc)
++ return NULL;
++ if (!memdesc->page)
++ return NULL;
++
++ return (void *)(PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(memdesc->page)));
++}
++
++void * hax_map_page_frame(uint64_t pfn, hax_kmap_phys *kmap)
++{
++ vaddr_t kva;
++ paddr_t pa;
++ struct vm_page *ppage;
++
++ if (!kmap)
++ return NULL;
++
++ kva = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE, UVM_KMF_VAONLY|UVM_KMF_WAITVA);
++
++ pa = pfn << PAGE_SHIFT;
++
++ pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
++ pmap_update(pmap_kernel());
++
++ kmap->kva = kva;
++ return (void *)kva;
++}
++
++int hax_unmap_page_frame(hax_kmap_phys *kmap)
++{
++ if (!kmap)
++ return -EINVAL;
++ if (!kmap->kva)
++ return -EINVAL;
++
++ pmap_kremove(kmap->kva, PAGE_SIZE);
++ pmap_update(pmap_kernel());
++
++ uvm_km_free(kernel_map, kmap->kva, PAGE_SIZE, UVM_KMF_VAONLY);
++
++ return 0;
++}
diff --git a/haxm/patches/patch-platforms_netbsd_hax__mem__alloc.c b/haxm/patches/patch-platforms_netbsd_hax__mem__alloc.c
new file mode 100644
index 0000000000..eb7d964e6d
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_hax__mem__alloc.c
@@ -0,0 +1,247 @@
+$NetBSD$
+
+--- platforms/netbsd/hax_mem_alloc.c.orig 2018-11-24 22:22:37.787488265 +0000
++++ platforms/netbsd/hax_mem_alloc.c
+@@ -0,0 +1,242 @@
++/*
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <sys/param.h>
++#include <sys/types.h>
++#include <sys/kmem.h>
++#include <sys/vmem.h>
++#include <uvm/uvm.h>
++
++#include "../../include/hax.h"
++
++void * hax_vmalloc(uint32_t size, uint32_t flags)
++{
++ vaddr_t kva;
++ uvm_flag_t flag;
++
++ if (size == 0)
++ return NULL;
++
++#if 0
++ if (flags & HAX_MEM_PAGABLE)
++ flag = UVM_KMF_PAGEABLE;
++ else if (flags & HAX_MEM_NONPAGE)
++#endif
++ flag = UVM_KMF_WIRED | UVM_KMF_ZERO;
++
++ flag |= UVM_KMF_WAITVA;
++
++ kva = uvm_km_alloc(kernel_map, size, PAGE_SIZE, flag);
++
++ return (void *)kva;
++}
++
++void hax_vfree_flags(void *va, uint32_t size, uint32_t flags)
++{
++ uvm_flag_t flag;
++
++#if 0
++ if (flags & HAX_MEM_PAGABLE)
++ flag = UVM_KMF_PAGEABLE;
++ else if (flags & HAX_MEM_NONPAGE)
++#endif
++ flag = UVM_KMF_WIRED;
++
++ uvm_km_free(kernel_map, (vaddr_t)va, size, flag);
++}
++
++void hax_vfree(void *va, uint32_t size)
++{
++ uint32_t flags = HAX_MEM_NONPAGE;
++
++ hax_vfree_flags(va, size, flags);
++}
++
++void hax_vfree_aligned(void *va, uint32_t size, uint32_t alignment,
++ uint32_t flags)
++{
++ hax_vfree_flags(va, size, flags);
++}
++
++void * hax_vmap(hax_pa_t pa, uint32_t size)
++{
++ vaddr_t kva;
++ vaddr_t va, end_va;
++ unsigned long offset;
++
++ offset = pa & PAGE_MASK;
++ pa = trunc_page(pa);
++ size = round_page(size + offset);
++
++ kva = uvm_km_alloc(kernel_map, size, PAGE_SIZE, UVM_KMF_VAONLY|UVM_KMF_WAITVA);
++
++ for (va = kva, end_va = kva + size; va < end_va; va += PAGE_SIZE, pa += PAGE_SIZE) {
++ pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
++ }
++ pmap_update(pmap_kernel());
++
++ return (void *)(kva + offset);
++}
++
++void hax_vunmap(void *addr, uint32_t size)
++{
++ unsigned long offset;
++ vaddr_t kva = (vaddr_t)addr;
++
++ offset = kva & PAGE_MASK;
++ size = round_page(size + offset);
++ kva = trunc_page(kva);
++
++ pmap_kremove(kva, size);
++ pmap_update(pmap_kernel());
++
++ uvm_km_alloc(kernel_map, kva, size, UVM_KMF_VAONLY);
++}
++
++hax_pa_t hax_pa(void *va)
++{
++ bool success;
++ paddr_t pa;
++
++ success = pmap_extract(pmap_kernel(), (vaddr_t)va, &pa);
++
++ KASSERT(success);
++
++ return pa;
++}
++
++struct hax_page * hax_alloc_pages(int order, uint32_t flags, bool vmap)
++{
++ struct hax_page *ppage;
++ struct vm_page *page;
++ paddr_t pa;
++ vaddr_t kva, va;
++ size_t size;
++ int rv;
++
++ ppage = kmem_zalloc(sizeof(struct hax_page), KM_SLEEP);
++
++ // TODO: Support HAX_MEM_LOW_4G
++ if (flags & HAX_MEM_LOW_4G) {
++ hax_warning("%s: HAX_MEM_LOW_4G is ignored\n", __func__);
++ }
++
++ ppage->pglist = kmem_zalloc(sizeof(struct pglist), KM_SLEEP);
++
++ size = PAGE_SIZE << order;
++
++ rv = uvm_pglistalloc(size, 0, ~0UL, PAGE_SIZE, 0, ppage->pglist, 1, 1);
++ if (rv) {
++ kmem_free(ppage->pglist, sizeof(struct pglist));
++ kmem_free(ppage, sizeof(struct hax_page));
++ return NULL;
++ }
++
++ kva = uvm_km_alloc(kernel_map, size, PAGE_SIZE, UVM_KMF_VAONLY);
++ if (kva == 0) {
++ uvm_pglistfree(ppage->pglist);
++ kmem_free(ppage->pglist, sizeof(struct pglist));
++ kmem_free(ppage, sizeof(struct hax_page));
++ return NULL;
++ }
++
++ va = kva;
++ TAILQ_FOREACH(page, ppage->pglist, pageq.queue) {
++ pa = VM_PAGE_TO_PHYS(page);
++ pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
++ va += PAGE_SIZE;
++ }
++ pmap_update(pmap_kernel());
++
++ ppage->page = TAILQ_FIRST(ppage->pglist);
++ ppage->pa = VM_PAGE_TO_PHYS(ppage->page);
++ ppage->kva = (void *)kva;
++ ppage->flags = flags;
++ ppage->order = order;
++ return ppage;
++}
++
++void hax_free_pages(struct hax_page *pages)
++{
++ size_t size;
++
++ if (!pages)
++ return;
++
++ size = PAGE_SIZE << pages->order;
++
++ pmap_kremove((vaddr_t)pages->kva, size);
++ pmap_update(pmap_kernel());
++ uvm_km_free(kernel_map, (vaddr_t)pages->kva, size, UVM_KMF_VAONLY);
++ uvm_pglistfree(pages->pglist);
++ kmem_free(pages->pglist, sizeof(struct pglist));
++ kmem_free(pages, sizeof(struct hax_page));
++}
++
++void * hax_map_page(struct hax_page *page)
++{
++ if (!page)
++ return NULL;
++
++ return page->kva;
++}
++
++void hax_unmap_page(struct hax_page *page)
++{
++ return;
++}
++
++hax_pfn_t hax_page2pfn(struct hax_page *page)
++{
++ if (!page)
++ return 0;
++
++ return page->pa >> PAGE_SHIFT;
++}
++
++void hax_clear_page(struct hax_page *page)
++{
++ memset((void *)page->kva, 0, PAGE_SIZE);
++}
++
++void hax_set_page(struct hax_page *page)
++{
++ memset((void *)page->kva, 0xFF, PAGE_SIZE);
++}
++
++/* Initialize memory allocation related structures */
++int hax_malloc_init(void)
++{
++ return 0;
++}
++
++void hax_malloc_exit(void)
++{
++}
diff --git a/haxm/patches/patch-platforms_netbsd_hax__mm.c b/haxm/patches/patch-platforms_netbsd_hax__mm.c
new file mode 100644
index 0000000000..af0592a5f9
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_hax__mm.c
@@ -0,0 +1,150 @@
+$NetBSD$
+
+--- platforms/netbsd/hax_mm.c.orig 2018-11-24 22:22:37.787599548 +0000
++++ platforms/netbsd/hax_mm.c
+@@ -0,0 +1,145 @@
++/*
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <sys/param.h>
++#include <sys/types.h>
++#include <sys/kmem.h>
++#include <uvm/uvm.h>
++
++#include "../../include/hax.h"
++
++struct hax_vcpu_mem_hinfo_t {
++ struct uvm_object *uao;
++ int flags;
++};
++
++int hax_clear_vcpumem(struct hax_vcpu_mem *mem)
++{
++ struct hax_vcpu_mem_hinfo_t *hinfo;
++ struct vm_map *map;
++ vaddr_t uva, kva;
++ vsize_t size;
++
++ if (!mem)
++ return -EINVAL;
++
++ hinfo = mem->hinfo;
++
++ uva = mem->uva;
++ kva = mem->kva;
++ size = mem->size;
++
++ if (!ISSET(hinfo->flags, HAX_VCPUMEM_VALIDVA)) {
++ map = &curproc->p_vmspace->vm_map;
++ uvm_unmap(map, uva, uva + size);
++ }
++
++ uvm_unmap(kernel_map, kva, kva + size);
++
++ if (!ISSET(hinfo->flags, HAX_VCPUMEM_VALIDVA)) {
++ uao_detach(hinfo->uao);
++ }
++
++ kmem_free(hinfo, sizeof(struct hax_vcpu_mem_hinfo_t));
++
++ return 0;
++}
++
++int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size,
++ int flags)
++{
++ struct proc *p;
++ struct uvm_object *uao;
++ struct vm_map *map;
++ int err;
++ struct hax_vcpu_mem_hinfo_t *hinfo = NULL;
++ vaddr_t kva, kva2;
++ vaddr_t va, end_va;
++ paddr_t pa;
++ unsigned offset;
++
++ if (!mem || !size)
++ return -EINVAL;
++
++ offset = uva & PAGE_MASK;
++ size = round_page(size + offset);
++ uva = trunc_page(uva);
++
++ hinfo = kmem_zalloc(sizeof(struct hax_vcpu_mem_hinfo_t), KM_SLEEP);
++ hinfo->flags = flags;
++
++ p = curproc;
++ map = &p->p_vmspace->vm_map;
++
++ if (!ISSET(flags, HAX_VCPUMEM_VALIDVA)) {
++ // Map to user
++ uao = uao_create(size, 0);
++ uao_reference(uao);
++ va = p->p_emul->e_vm_default_addr(p, (vaddr_t)p->p_vmspace->vm_daddr, size, map->flags & VM_MAP_TOPDOWN);
++ err = uvm_map(map, &va, size, uao, 0, 0,
++ UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
++ UVM_ADV_RANDOM, 0));
++ uao_reference(uao);
++ if (err) {
++ hax_error("Failed to map into user\n");
++ uao_detach(uao);
++ kmem_free(hinfo, sizeof(struct hax_vcpu_mem_hinfo_t));
++ return -ENOMEM;
++ }
++ hinfo->uao = uao;
++ uva = va;
++ }
++
++ err = uvm_map_extract(map, uva, size, kernel_map, &kva, UVM_EXTRACT_QREF | UVM_EXTRACT_CONTIG | UVM_EXTRACT_FIXPROT);
++ if (err) {
++ hax_error("Failed to map into kernel\n");
++ if (!ISSET(flags, HAX_VCPUMEM_VALIDVA)) {
++ uvm_unmap(map, uva, uva + size);
++ uao_detach(uao);
++ kmem_free(hinfo, sizeof(struct hax_vcpu_mem_hinfo_t));
++ }
++ }
++
++ mem->uva = uva;
++ mem->kva = kva;
++ mem->hinfo = hinfo;
++ mem->size = size;
++ return 0;
++}
++
++uint64_t hax_get_memory_threshold(void)
++{
++#ifdef CONFIG_HAX_EPT2
++ // Since there is no memory cap, just return a sufficiently large value
++ return 1ULL << 48; // PHYSADDR_MAX + 1
++#else // !CONFIG_HAX_EPT2
++ return 0;
++#endif // CONFIG_HAX_EPT2
++}
diff --git a/haxm/patches/patch-platforms_netbsd_hax__wrapper.c b/haxm/patches/patch-platforms_netbsd_hax__wrapper.c
new file mode 100644
index 0000000000..d701facadf
--- /dev/null
+++ b/haxm/patches/patch-platforms_netbsd_hax__wrapper.c
@@ -0,0 +1,336 @@
+$NetBSD$
+
+--- platforms/netbsd/hax_wrapper.c.orig 2018-11-24 22:22:37.787733087 +0000
++++ platforms/netbsd/hax_wrapper.c
+@@ -0,0 +1,331 @@
++/*
++ * Copyright (c) 2018 Kamil Rytarowski
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions and the following disclaimer.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <sys/param.h>
++#include <sys/types.h>
++#include <sys/atomic.h>
++#include <sys/mutex.h>
++#include <sys/systm.h>
++#include <sys/xcall.h>
++#include <sys/kmem.h>
++#include <machine/cpu.h>
++#include <machine/cpufunc.h>
++
++
++#include "../../include/hax.h"
++#include "../../core/include/hax_core_interface.h"
++#include "../../core/include/ia32.h"
++
++int default_hax_log_level = 3;
++int max_cpus;
++hax_cpumap_t cpu_online_map;
++
++int hax_log_level(int level, const char *fmt, ...)
++{
++ va_list args;
++ va_start(args, fmt);
++ if (level >= default_hax_log_level) {
++ printf("haxm: ");
++ vprintf(fmt, args);
++ }
++ va_end(args);
++ return 0;
++}
++
++uint32_t hax_cpuid(void)
++{
++ return curcpu()->ci_cpuid;
++}
++
++typedef struct smp_call_parameter {
++ void (*func)(void *);
++ void *param;
++ hax_cpumap_t *cpus;
++} smp_call_parameter;
++
++static void smp_cfunction(void *a1, void *a2 __unused)
++{
++ struct smp_call_parameter *info = a1;
++ hax_cpumap_t *cpus;
++ uint32_t cpuid;
++
++ cpus = info->cpus;
++ cpuid = hax_cpuid();
++ if (*cpus & (0x1 << cpuid))
++ info->func(info->param);
++}
++
++int hax_smp_call_function(hax_cpumap_t *cpus, void (*scfunc)(void *),
++ void *param)
++{
++ smp_call_parameter info;
++ uint64_t xc;
++
++ info.func = scfunc;
++ info.param = param;
++ info.cpus = cpus;
++ xc = xc_broadcast(XC_HIGHPRI, smp_cfunction, &info, NULL);
++ xc_wait(xc);
++ return 0;
++}
++
++/* XXX */
++int proc_event_pending(struct vcpu_t *vcpu)
++{
++ return vcpu_event_pending(vcpu);
++}
++
++void hax_disable_preemption(preempt_flag *eflags)
++{
++ kpreempt_disable();
++}
++
++void hax_enable_preemption(preempt_flag *eflags)
++{
++ kpreempt_enable();
++}
++
++void hax_enable_irq(void)
++{
++ x86_enable_intr();
++}
++
++void hax_disable_irq(void)
++{
++ x86_disable_intr();
++}
++
++void hax_error(char *fmt, ...)
++{
++ va_list args;
++
++ if (HAX_LOGE < default_hax_log_level)
++ return;
++
++ va_start(args, fmt);
++ printf("haxm_error: ");
++ vprintf(fmt, args);
++ va_end(args);
++}
++
++void hax_warning(char *fmt, ...)
++{
++ va_list args;
++
++ if (HAX_LOGW < default_hax_log_level)
++ return;
++
++ va_start(args, fmt);
++ printf("haxm_warning: ");
++ vprintf(fmt, args);
++ va_end(args);
++}
++
++void hax_info(char *fmt, ...)
++{
++ va_list args;
++
++ if (HAX_LOGI < default_hax_log_level)
++ return;
++
++ va_start(args, fmt);
++ printf("haxm_info: ");
++ vprintf(fmt, args);
++ va_end(args);
++}
++
++void hax_debug(char *fmt, ...)
++{
++ va_list args;
++
++ if (HAX_LOGD < default_hax_log_level)
++ return;
++
++ va_start(args, fmt);
++ printf("haxm_debug: ");
++ vprintf(fmt, args);
++ va_end(args);
++}
++
++void hax_panic_vcpu(struct vcpu_t *v, char *fmt, ...)
++{
++ va_list args;
++
++ va_start(args, fmt);
++ printf("haxm_panic: ");
++ vprintf(fmt, args);
++ va_end(args);
++
++ vcpu_set_panic(v);
++}
++
++/* Misc */
++void hax_smp_mb(void)
++{
++ membar_sync();
++}
++
++/* Compare-Exchange */
++bool hax_cmpxchg32(uint32_t old_val, uint32_t new_val, volatile uint32_t *addr)
++{
++ return atomic_cas_32(addr, old_val, new_val) == old_val;
++}
++
++bool hax_cmpxchg64(uint64_t old_val, uint64_t new_val, volatile uint64_t *addr)
++{
++ return atomic_cas_64(addr, old_val, new_val) == old_val;
++}
++
++/* Atomics */
++hax_atomic_t hax_atomic_add(volatile hax_atomic_t *atom, uint32_t value)
++{
++ return atomic_add_32_nv(atom, value) - value;
++}
++
++hax_atomic_t hax_atomic_inc(volatile hax_atomic_t *atom)
++{
++ return atomic_inc_32_nv(atom) - 1;
++}
++
++hax_atomic_t hax_atomic_dec(volatile hax_atomic_t *atom)
++{
++ return atomic_dec_32_nv(atom) + 1;
++}
++
++int hax_test_and_set_bit(int bit, uint64_t *memory)
++{
++ volatile uint64_t *val;
++ uint64_t mask, old;
++
++ val = (volatile uint64_t *)memory;
++ mask = 1 << bit;
++
++ do {
++ old = *val;
++ if ((old & mask) != 0)
++ break;
++ } while (atomic_cas_64(val, old, old | mask) != old);
++
++ return !!(old & mask);
++}
++
++int hax_test_and_clear_bit(int bit, uint64_t *memory)
++{
++ volatile uint64_t *val;
++ uint64_t mask, old;
++
++ val = (volatile uint64_t *)memory;
++ mask = 1 << bit;
++
++ do {
++ old = *val;
++ if ((old & mask) != 0)
++ break;
++ } while (atomic_cas_64(val, old, old & ~mask) != old);
++
++ return !!(old & mask);
++}
++
++/* Spinlock */
++struct hax_spinlock {
++ kmutex_t lock;
++};
++
++hax_spinlock *hax_spinlock_alloc_init(void)
++{
++ struct hax_spinlock *lock;
++
++ lock = kmem_alloc(sizeof(struct hax_spinlock), KM_SLEEP);
++ if (!lock) {
++ hax_error("Could not allocate spinlock\n");
++ return NULL;
++ }
++ mutex_init(&lock->lock, MUTEX_DEFAULT, IPL_VM);
++
++ return lock;
++}
++
++void hax_spinlock_free(hax_spinlock *lock)
++{
++ if (!lock)
++ return;
++
++ mutex_destroy(&lock->lock);
++ kmem_free(lock, sizeof(struct hax_spinlock));
++}
++
++void hax_spin_lock(hax_spinlock *lock)
++{
++ mutex_spin_enter(&lock->lock);
++}
++
++void hax_spin_unlock(hax_spinlock *lock)
++{
++ mutex_spin_exit(&lock->lock);
++}
++
++/* Mutex */
++hax_mutex hax_mutex_alloc_init(void)
++{
++ kmutex_t *lock;
++
++ lock = kmem_alloc(sizeof(kmutex_t), KM_SLEEP);
++ if (!lock) {
++ hax_error("Could not allocate mutex\n");
++ return NULL;
++ }
++ mutex_init(lock, MUTEX_DEFAULT, IPL_NONE);
++ return lock;
++}
++
++void hax_mutex_lock(hax_mutex lock)
++{
++ if (!lock)
++ return;
++
++ mutex_enter(lock);
++}
++
++void hax_mutex_unlock(hax_mutex lock)
++{
++ if (!lock)
++ return;
++
++ mutex_exit(lock);
++}
++
++void hax_mutex_free(hax_mutex lock)
++{
++ if (!lock)
++ return;
++
++ mutex_destroy(lock);
++ kmem_free(lock, sizeof(kmutex_t));
++}
Home |
Main Index |
Thread Index |
Old Index