pkgsrc-WIP-changes archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
wip/valgrind-netbsd: import valgrind-3.14.0
Module Name: pkgsrc-wip
Committed By: Kamil Rytarowski <n54%gmx.com@localhost>
Pushed By: kamil
Date: Mon Apr 1 18:29:26 2019 +0200
Changeset: 185a2113c3e86c790cded233bf65d26ed9cc349f
Added Files:
valgrind-netbsd/DESCR
valgrind-netbsd/Makefile
valgrind-netbsd/PLIST
valgrind-netbsd/PLIST.Darwin
valgrind-netbsd/distinfo
valgrind-netbsd/patches/patch-Makefile.am
valgrind-netbsd/patches/patch-Makefile.tool.am
valgrind-netbsd/patches/patch-configure.ac
valgrind-netbsd/patches/patch-coregrind_Makefile.am
valgrind-netbsd/patches/patch-coregrind_launcher-linux.c
valgrind-netbsd/patches/patch-coregrind_link__tool__exe__netbsd.in
valgrind-netbsd/patches/patch-coregrind_m__addrinfo.c
valgrind-netbsd/patches/patch-coregrind_m__aspacemgr_aspacemgr-common.c
valgrind-netbsd/patches/patch-coregrind_m__aspacemgr_aspacemgr-linux.c
valgrind-netbsd/patches/patch-coregrind_m__coredump_coredump-elf.c
valgrind-netbsd/patches/patch-coregrind_m__debuginfo_d3basics.c
valgrind-netbsd/patches/patch-coregrind_m__debuginfo_debuginfo.c
valgrind-netbsd/patches/patch-coregrind_m__debuginfo_priv__readpdb.h
valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readdwarf.c
valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readdwarf3.c
valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readelf.c
valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readpdb.c
valgrind-netbsd/patches/patch-coregrind_m__debuginfo_storage.c
valgrind-netbsd/patches/patch-coregrind_m__debuglog.c
valgrind-netbsd/patches/patch-coregrind_m__dispatch_dispatch-amd64-netbsd.S
valgrind-netbsd/patches/patch-coregrind_m__errormgr.c
valgrind-netbsd/patches/patch-coregrind_m__initimg_initimg-netbsd.c
valgrind-netbsd/patches/patch-coregrind_m__libcassert.c
valgrind-netbsd/patches/patch-coregrind_m__libcfile.c
valgrind-netbsd/patches/patch-coregrind_m__libcprint.c
valgrind-netbsd/patches/patch-coregrind_m__libcproc.c
valgrind-netbsd/patches/patch-coregrind_m__libcsetjmp.c
valgrind-netbsd/patches/patch-coregrind_m__libcsignal.c
valgrind-netbsd/patches/patch-coregrind_m__machine.c
valgrind-netbsd/patches/patch-coregrind_m__main.c
valgrind-netbsd/patches/patch-coregrind_m__redir.c
valgrind-netbsd/patches/patch-coregrind_m__replacemalloc_vg__replace__malloc.c
valgrind-netbsd/patches/patch-coregrind_m__scheduler_scheduler.c
valgrind-netbsd/patches/patch-coregrind_m__sigframe_sigframe-amd64-netbsd.c
valgrind-netbsd/patches/patch-coregrind_m__sigframe_sigframe-common.c
valgrind-netbsd/patches/patch-coregrind_m__signals.c
valgrind-netbsd/patches/patch-coregrind_m__stacktrace.c
valgrind-netbsd/patches/patch-coregrind_m__syscall.c
valgrind-netbsd/patches/patch-coregrind_m__syswrap_priv__syswrap-netbsd.h
valgrind-netbsd/patches/patch-coregrind_m__syswrap_priv__types__n__macros.h
valgrind-netbsd/patches/patch-coregrind_m__syswrap_syscall-amd64-netbsd.S
valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-amd64-netbsd.c
valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-generic.c
valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-main.c
valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-netbsd-variants.c
valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-netbsd.c
valgrind-netbsd/patches/patch-coregrind_m__trampoline.S
valgrind-netbsd/patches/patch-coregrind_m__translate.c
valgrind-netbsd/patches/patch-coregrind_m__ume_elf.c
valgrind-netbsd/patches/patch-coregrind_m__ume_main.c
valgrind-netbsd/patches/patch-coregrind_m__ume_priv__ume.h
valgrind-netbsd/patches/patch-coregrind_m__vki.c
valgrind-netbsd/patches/patch-coregrind_m__vkiscnums.c
valgrind-netbsd/patches/patch-coregrind_pub__core__debuginfo.h
valgrind-netbsd/patches/patch-coregrind_pub__core__initimg.h
valgrind-netbsd/patches/patch-coregrind_pub__core__machine.h
valgrind-netbsd/patches/patch-coregrind_pub__core__mallocfree.h
valgrind-netbsd/patches/patch-coregrind_pub__core__sigframe.h
valgrind-netbsd/patches/patch-coregrind_pub__core__syscall.h
valgrind-netbsd/patches/patch-coregrind_pub__core__trampoline.h
valgrind-netbsd/patches/patch-coregrind_vg__preloaded.c
valgrind-netbsd/patches/patch-drd_Makefile.am
valgrind-netbsd/patches/patch-drd_drd__main.c
valgrind-netbsd/patches/patch-helgrind_Makefile.am
valgrind-netbsd/patches/patch-helgrind_hg__intercepts.c
valgrind-netbsd/patches/patch-include_Makefile.am
valgrind-netbsd/patches/patch-include_pub__tool__basics.h
valgrind-netbsd/patches/patch-include_pub__tool__basics__asm.h
valgrind-netbsd/patches/patch-include_pub__tool__machine.h
valgrind-netbsd/patches/patch-include_pub__tool__redir.h
valgrind-netbsd/patches/patch-include_pub__tool__vki.h
valgrind-netbsd/patches/patch-include_pub__tool__vkiscnums.h
valgrind-netbsd/patches/patch-include_pub__tool__vkiscnums__asm.h
valgrind-netbsd/patches/patch-include_valgrind.h
valgrind-netbsd/patches/patch-include_vki_vki-amd64-netbsd.h
valgrind-netbsd/patches/patch-include_vki_vki-machine-types-amd64-netbsd.h
valgrind-netbsd/patches/patch-include_vki_vki-netbsd.h
valgrind-netbsd/patches/patch-include_vki_vki-scnums-netbsd.h
valgrind-netbsd/patches/patch-memcheck_Makefile.am
valgrind-netbsd/patches/patch-memcheck_mc__errors.c
valgrind-netbsd/patches/patch-memcheck_mc__leakcheck.c
valgrind-netbsd/patches/patch-memcheck_mc__machine.c
valgrind-netbsd/patches/patch-memcheck_mc__main.c
valgrind-netbsd/patches/patch-memcheck_mc__malloc__wrappers.c
valgrind-netbsd/patches/patch-memcheck_mc__translate.c
valgrind-netbsd/patches/patch-memcheck_tests_vbit-test_util.c
valgrind-netbsd/patches/patch-memcheck_tests_vbit-test_vbits.c
valgrind-netbsd/patches/patch-netbsd.supp
valgrind-netbsd/patches/patch-shared_vg__replace__strmem.c
Log Message:
wip/valgrind-netbsd: import valgrind-3.14.0
This is an initial support patchset for NetBSD/amd64. This repository
contains a lot of draft and debug code that will go away once everything
will start to be functional.
More information:
https://mail-index.netbsd.org/tech-toolchain/2019/04/01/msg003429.html
I plan to sync my local branch with pkgsrc-wip from time to time.
To see a diff of this commit:
https://wip.pkgsrc.org/cgi-bin/gitweb.cgi?p=pkgsrc-wip.git;a=commitdiff;h=185a2113c3e86c790cded233bf65d26ed9cc349f
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
diffstat:
valgrind-netbsd/DESCR | 15 +
valgrind-netbsd/Makefile | 66 +
valgrind-netbsd/PLIST | 311 ++
valgrind-netbsd/PLIST.Darwin | 27 +
valgrind-netbsd/distinfo | 95 +
valgrind-netbsd/patches/patch-Makefile.am | 17 +
valgrind-netbsd/patches/patch-Makefile.tool.am | 51 +
valgrind-netbsd/patches/patch-configure.ac | 176 +
.../patches/patch-coregrind_Makefile.am | 101 +
.../patches/patch-coregrind_launcher-linux.c | 83 +
.../patch-coregrind_link__tool__exe__netbsd.in | 89 +
.../patches/patch-coregrind_m__addrinfo.c | 23 +
...patch-coregrind_m__aspacemgr_aspacemgr-common.c | 110 +
.../patch-coregrind_m__aspacemgr_aspacemgr-linux.c | 170 +
.../patch-coregrind_m__coredump_coredump-elf.c | 39 +
.../patch-coregrind_m__debuginfo_d3basics.c | 13 +
.../patch-coregrind_m__debuginfo_debuginfo.c | 58 +
.../patch-coregrind_m__debuginfo_priv__readpdb.h | 22 +
.../patch-coregrind_m__debuginfo_readdwarf.c | 31 +
.../patch-coregrind_m__debuginfo_readdwarf3.c | 22 +
.../patches/patch-coregrind_m__debuginfo_readelf.c | 41 +
.../patches/patch-coregrind_m__debuginfo_readpdb.c | 22 +
.../patches/patch-coregrind_m__debuginfo_storage.c | 13 +
.../patches/patch-coregrind_m__debuglog.c | 54 +
...h-coregrind_m__dispatch_dispatch-amd64-netbsd.S | 261 ++
.../patches/patch-coregrind_m__errormgr.c | 69 +
.../patch-coregrind_m__initimg_initimg-netbsd.c | 898 ++++
.../patches/patch-coregrind_m__libcassert.c | 22 +
.../patches/patch-coregrind_m__libcfile.c | 355 ++
.../patches/patch-coregrind_m__libcprint.c | 13 +
.../patches/patch-coregrind_m__libcproc.c | 146 +
.../patches/patch-coregrind_m__libcsetjmp.c | 38 +
.../patches/patch-coregrind_m__libcsignal.c | 77 +
.../patches/patch-coregrind_m__machine.c | 13 +
valgrind-netbsd/patches/patch-coregrind_m__main.c | 52 +
valgrind-netbsd/patches/patch-coregrind_m__redir.c | 23 +
...oregrind_m__replacemalloc_vg__replace__malloc.c | 216 +
.../patch-coregrind_m__scheduler_scheduler.c | 22 +
...h-coregrind_m__sigframe_sigframe-amd64-netbsd.c | 630 +++
.../patch-coregrind_m__sigframe_sigframe-common.c | 13 +
.../patches/patch-coregrind_m__signals.c | 202 +
.../patches/patch-coregrind_m__stacktrace.c | 48 +
.../patches/patch-coregrind_m__syscall.c | 109 +
...tch-coregrind_m__syswrap_priv__syswrap-netbsd.h | 286 ++
...h-coregrind_m__syswrap_priv__types__n__macros.h | 91 +
...tch-coregrind_m__syswrap_syscall-amd64-netbsd.S | 208 +
...tch-coregrind_m__syswrap_syswrap-amd64-netbsd.c | 725 ++++
.../patch-coregrind_m__syswrap_syswrap-generic.c | 151 +
.../patch-coregrind_m__syswrap_syswrap-main.c | 323 ++
...-coregrind_m__syswrap_syswrap-netbsd-variants.c | 100 +
.../patch-coregrind_m__syswrap_syswrap-netbsd.c | 4448 ++++++++++++++++++++
.../patches/patch-coregrind_m__trampoline.S | 58 +
.../patches/patch-coregrind_m__translate.c | 15 +
.../patches/patch-coregrind_m__ume_elf.c | 34 +
.../patches/patch-coregrind_m__ume_main.c | 55 +
.../patches/patch-coregrind_m__ume_priv__ume.h | 33 +
valgrind-netbsd/patches/patch-coregrind_m__vki.c | 36 +
.../patches/patch-coregrind_m__vkiscnums.c | 23 +
.../patches/patch-coregrind_pub__core__debuginfo.h | 13 +
.../patches/patch-coregrind_pub__core__initimg.h | 24 +
.../patches/patch-coregrind_pub__core__machine.h | 13 +
.../patch-coregrind_pub__core__mallocfree.h | 12 +
.../patches/patch-coregrind_pub__core__sigframe.h | 18 +
.../patches/patch-coregrind_pub__core__syscall.h | 12 +
.../patch-coregrind_pub__core__trampoline.h | 15 +
.../patches/patch-coregrind_vg__preloaded.c | 13 +
valgrind-netbsd/patches/patch-drd_Makefile.am | 24 +
valgrind-netbsd/patches/patch-drd_drd__main.c | 13 +
valgrind-netbsd/patches/patch-helgrind_Makefile.am | 24 +
.../patches/patch-helgrind_hg__intercepts.c | 329 ++
valgrind-netbsd/patches/patch-include_Makefile.am | 22 +
.../patches/patch-include_pub__tool__basics.h | 38 +
.../patches/patch-include_pub__tool__basics__asm.h | 13 +
.../patches/patch-include_pub__tool__machine.h | 13 +
.../patches/patch-include_pub__tool__redir.h | 41 +
.../patches/patch-include_pub__tool__vki.h | 13 +
.../patches/patch-include_pub__tool__vkiscnums.h | 16 +
.../patch-include_pub__tool__vkiscnums__asm.h | 14 +
valgrind-netbsd/patches/patch-include_valgrind.h | 49 +
.../patches/patch-include_vki_vki-amd64-netbsd.h | 42 +
...ch-include_vki_vki-machine-types-amd64-netbsd.h | 227 +
.../patches/patch-include_vki_vki-netbsd.h | 3302 +++++++++++++++
.../patches/patch-include_vki_vki-scnums-netbsd.h | 471 +++
valgrind-netbsd/patches/patch-memcheck_Makefile.am | 21 +
.../patches/patch-memcheck_mc__errors.c | 376 ++
.../patches/patch-memcheck_mc__leakcheck.c | 303 ++
.../patches/patch-memcheck_mc__machine.c | 43 +
valgrind-netbsd/patches/patch-memcheck_mc__main.c | 1687 ++++++++
.../patches/patch-memcheck_mc__malloc__wrappers.c | 336 ++
.../patches/patch-memcheck_mc__translate.c | 1296 ++++++
.../patches/patch-memcheck_tests_vbit-test_util.c | 16 +
.../patches/patch-memcheck_tests_vbit-test_vbits.c | 16 +
valgrind-netbsd/patches/patch-netbsd.supp | 6 +
.../patches/patch-shared_vg__replace__strmem.c | 17 +
94 files changed, 20330 insertions(+)
diffs:
diff --git a/valgrind-netbsd/DESCR b/valgrind-netbsd/DESCR
new file mode 100644
index 0000000000..d0717c71fe
--- /dev/null
+++ b/valgrind-netbsd/DESCR
@@ -0,0 +1,15 @@
+Valgrind is an instrumentation framework for building dynamic analysis
+tools. There are Valgrind tools that can automatically detect many memory
+management and threading bugs, and profile your programs in detail. You
+can also use Valgrind to build new tools.
+
+The Valgrind distribution currently includes six production-quality
+tools: a memory error detector, two thread error detectors, a cache
+and branch-prediction profiler, a call-graph generating cache and
+branch-prediction profiler, and a heap profiler. It also includes three
+experimental tools: a heap/stack/global array overrun detector, a second
+heap profiler that examines how heap blocks are used, and a SimPoint basic
+block vector generator. It runs on the following platforms: X86/Linux,
+AMD64/Linux, ARM/Linux, PPC32/Linux, PPC64/Linux, S390X/Linux, MIPS/Linux,
+ARM/Android (2.3.x and later), X86/Android (4.0 and later), X86/Darwin
+and AMD64/Darwin (Mac OS X 10.6 and 10.7, with limited support for 10.8).
diff --git a/valgrind-netbsd/Makefile b/valgrind-netbsd/Makefile
new file mode 100644
index 0000000000..9a6648aead
--- /dev/null
+++ b/valgrind-netbsd/Makefile
@@ -0,0 +1,66 @@
+# $NetBSD: Makefile,v 1.22 2016/01/16 20:30:11 ryoon Exp $
+#
+
+PKGSRC_USE_SSP= no
+
+DISTNAME= valgrind-3.14.0
+CATEGORIES= devel
+MASTER_SITES= http://valgrind.org/downloads/
+EXTRACT_SUFX= .tar.bz2
+
+MAINTAINER= alnsn%NetBSD.org@localhost
+HOMEPAGE= http://valgrind.org/
+COMMENT= Debugging and profiling tools
+LICENSE= gnu-gpl-v2
+
+USE_PKGLOCALEDIR= yes
+PKGCONFIG_OVERRIDE+= ${WRKSRC}/valgrind.pc.in
+GNU_CONFIGURE= yes
+
+ONLY_FOR_PLATFORM= Darwin-*-i386 Darwin-*-x86_64 \
+ Linux-*-x86_64 \
+ Linux-*-i386 \
+ Linux-*-arm* \
+ SunOS-*-i386 SunOS-*-x86_64 \
+ NetBSD-*-x86_64
+
+USE_TOOLS+= gmake perl aclocal autoheader automake autoconf
+USE_LANGUAGES= c c++
+TEST_TARGET= regtest
+
+REPLACE_INTERPRETER+= perl
+REPLACE.perl.old= .*/bin/perl.*
+REPLACE.perl.new= ${PREFIX}/bin/perl
+REPLACE_FILES.perl+= callgrind/callgrind_annotate.in
+REPLACE_FILES.perl+= callgrind/callgrind_control.in
+
+.include "../../mk/bsd.prefs.mk"
+
+.if ${MACHINE_ARCH:C/arm.*/arm/} == "arm"
+PLIST_SUBST+= VGCONF_ARCH_PRI=arm
+.endif
+
+.if ${MACHINE_ARCH} == "i386"
+PLIST_SUBST+= VGCONF_ARCH_PRI=x86
+.endif
+
+.if ${MACHINE_ARCH} == "x86_64"
+PLIST_SUBST+= VGCONF_ARCH_PRI=amd64
+#PLIST_SUBST+= VGCONF_ARCH_SEC=x86
+CONFIGURE_ARGS+= --enable-only64bit
+.endif
+
+#CFLAGS+= -fsanitize-coverage=trace-pc
+
+CFLAGS+= -O0 -g -ggdb
+CXXFLAGS+= -O0 -g -ggdb
+INSTALL_UNSTRIPPED= yes
+BUILDLINK_TRANSFORM+= rm:-s
+
+#CFLAGS+= -fsanitize=address
+#LDFLAGS+= -fsanitize=address
+
+pre-configure:
+ ${RUN} cd ${WRKSRC} && ${SH} ./autogen.sh
+
+.include "../../mk/bsd.pkg.mk"
diff --git a/valgrind-netbsd/PLIST b/valgrind-netbsd/PLIST
new file mode 100644
index 0000000000..78630cb2eb
--- /dev/null
+++ b/valgrind-netbsd/PLIST
@@ -0,0 +1,311 @@
+@comment $NetBSD$
+bin/callgrind_annotate
+bin/callgrind_control
+bin/cg_annotate
+bin/cg_diff
+bin/cg_merge
+bin/ms_print
+bin/valgrind
+bin/valgrind-di-server
+bin/valgrind-listener
+bin/vgdb
+include/valgrind/callgrind.h
+include/valgrind/config.h
+include/valgrind/drd.h
+include/valgrind/helgrind.h
+include/valgrind/libvex.h
+include/valgrind/libvex_basictypes.h
+include/valgrind/libvex_emnote.h
+include/valgrind/libvex_guest_amd64.h
+include/valgrind/libvex_guest_arm.h
+include/valgrind/libvex_guest_arm64.h
+include/valgrind/libvex_guest_mips32.h
+include/valgrind/libvex_guest_mips64.h
+include/valgrind/libvex_guest_offsets.h
+include/valgrind/libvex_guest_ppc32.h
+include/valgrind/libvex_guest_ppc64.h
+include/valgrind/libvex_guest_s390x.h
+include/valgrind/libvex_guest_x86.h
+include/valgrind/libvex_inner.h
+include/valgrind/libvex_ir.h
+include/valgrind/libvex_s390x_common.h
+include/valgrind/libvex_trc_values.h
+include/valgrind/memcheck.h
+include/valgrind/pub_tool_addrinfo.h
+include/valgrind/pub_tool_aspacehl.h
+include/valgrind/pub_tool_aspacemgr.h
+include/valgrind/pub_tool_basics.h
+include/valgrind/pub_tool_basics_asm.h
+include/valgrind/pub_tool_clientstate.h
+include/valgrind/pub_tool_clreq.h
+include/valgrind/pub_tool_debuginfo.h
+include/valgrind/pub_tool_deduppoolalloc.h
+include/valgrind/pub_tool_errormgr.h
+include/valgrind/pub_tool_execontext.h
+include/valgrind/pub_tool_gdbserver.h
+include/valgrind/pub_tool_guest.h
+include/valgrind/pub_tool_hashtable.h
+include/valgrind/pub_tool_libcassert.h
+include/valgrind/pub_tool_libcbase.h
+include/valgrind/pub_tool_libcfile.h
+include/valgrind/pub_tool_libcprint.h
+include/valgrind/pub_tool_libcproc.h
+include/valgrind/pub_tool_libcsetjmp.h
+include/valgrind/pub_tool_libcsignal.h
+include/valgrind/pub_tool_machine.h
+include/valgrind/pub_tool_mallocfree.h
+include/valgrind/pub_tool_options.h
+include/valgrind/pub_tool_oset.h
+include/valgrind/pub_tool_poolalloc.h
+include/valgrind/pub_tool_rangemap.h
+include/valgrind/pub_tool_redir.h
+include/valgrind/pub_tool_replacemalloc.h
+include/valgrind/pub_tool_seqmatch.h
+include/valgrind/pub_tool_signals.h
+include/valgrind/pub_tool_sparsewa.h
+include/valgrind/pub_tool_stacktrace.h
+include/valgrind/pub_tool_threadstate.h
+include/valgrind/pub_tool_tooliface.h
+include/valgrind/pub_tool_transtab.h
+include/valgrind/pub_tool_vki.h
+include/valgrind/pub_tool_vkiscnums.h
+include/valgrind/pub_tool_vkiscnums_asm.h
+include/valgrind/pub_tool_wordfm.h
+include/valgrind/pub_tool_xarray.h
+include/valgrind/pub_tool_xtmemory.h
+include/valgrind/pub_tool_xtree.h
+include/valgrind/valgrind.h
+include/valgrind/vki/vki-amd64-linux.h
+include/valgrind/vki/vki-amd64-netbsd.h
+include/valgrind/vki/vki-arm-linux.h
+include/valgrind/vki/vki-arm64-linux.h
+include/valgrind/vki/vki-darwin.h
+include/valgrind/vki/vki-linux-drm.h
+include/valgrind/vki/vki-linux.h
+include/valgrind/vki/vki-machine-types-amd64-netbsd.h
+include/valgrind/vki/vki-mips32-linux.h
+include/valgrind/vki/vki-mips64-linux.h
+include/valgrind/vki/vki-netbsd.h
+include/valgrind/vki/vki-posixtypes-amd64-linux.h
+include/valgrind/vki/vki-posixtypes-arm-linux.h
+include/valgrind/vki/vki-posixtypes-arm64-linux.h
+include/valgrind/vki/vki-posixtypes-mips32-linux.h
+include/valgrind/vki/vki-posixtypes-mips64-linux.h
+include/valgrind/vki/vki-posixtypes-ppc32-linux.h
+include/valgrind/vki/vki-posixtypes-ppc64-linux.h
+include/valgrind/vki/vki-posixtypes-s390x-linux.h
+include/valgrind/vki/vki-posixtypes-x86-linux.h
+include/valgrind/vki/vki-ppc32-linux.h
+include/valgrind/vki/vki-ppc64-linux.h
+include/valgrind/vki/vki-s390x-linux.h
+include/valgrind/vki/vki-scnums-amd64-linux.h
+include/valgrind/vki/vki-scnums-arm-linux.h
+include/valgrind/vki/vki-scnums-arm64-linux.h
+include/valgrind/vki/vki-scnums-darwin.h
+include/valgrind/vki/vki-scnums-mips32-linux.h
+include/valgrind/vki/vki-scnums-mips64-linux.h
+include/valgrind/vki/vki-scnums-netbsd.h
+include/valgrind/vki/vki-scnums-ppc32-linux.h
+include/valgrind/vki/vki-scnums-ppc64-linux.h
+include/valgrind/vki/vki-scnums-s390x-linux.h
+include/valgrind/vki/vki-scnums-solaris.h
+include/valgrind/vki/vki-scnums-x86-linux.h
+include/valgrind/vki/vki-solaris-repcache.h
+include/valgrind/vki/vki-solaris.h
+include/valgrind/vki/vki-x86-linux.h
+include/valgrind/vki/vki-xen-domctl.h
+include/valgrind/vki/vki-xen-evtchn.h
+include/valgrind/vki/vki-xen-gnttab.h
+include/valgrind/vki/vki-xen-hvm.h
+include/valgrind/vki/vki-xen-memory.h
+include/valgrind/vki/vki-xen-mmuext.h
+include/valgrind/vki/vki-xen-physdev.h
+include/valgrind/vki/vki-xen-schedop.h
+include/valgrind/vki/vki-xen-sysctl.h
+include/valgrind/vki/vki-xen-tmem.h
+include/valgrind/vki/vki-xen-version.h
+include/valgrind/vki/vki-xen-x86.h
+include/valgrind/vki/vki-xen-xsm.h
+include/valgrind/vki/vki-xen.h
+lib/pkgconfig/valgrind.pc
+lib/valgrind/32bit-core-valgrind-s1.xml
+lib/valgrind/32bit-core-valgrind-s2.xml
+lib/valgrind/32bit-core.xml
+lib/valgrind/32bit-linux-valgrind-s1.xml
+lib/valgrind/32bit-linux-valgrind-s2.xml
+lib/valgrind/32bit-linux.xml
+lib/valgrind/32bit-sse-valgrind-s1.xml
+lib/valgrind/32bit-sse-valgrind-s2.xml
+lib/valgrind/32bit-sse.xml
+lib/valgrind/64bit-avx-valgrind-s1.xml
+lib/valgrind/64bit-avx-valgrind-s2.xml
+lib/valgrind/64bit-avx.xml
+lib/valgrind/64bit-core-valgrind-s1.xml
+lib/valgrind/64bit-core-valgrind-s2.xml
+lib/valgrind/64bit-core.xml
+lib/valgrind/64bit-linux-valgrind-s1.xml
+lib/valgrind/64bit-linux-valgrind-s2.xml
+lib/valgrind/64bit-linux.xml
+lib/valgrind/64bit-sse-valgrind-s1.xml
+lib/valgrind/64bit-sse-valgrind-s2.xml
+lib/valgrind/64bit-sse.xml
+lib/valgrind/amd64-avx-coresse-valgrind.xml
+lib/valgrind/amd64-avx-coresse.xml
+lib/valgrind/amd64-avx-linux-valgrind.xml
+lib/valgrind/amd64-avx-linux.xml
+lib/valgrind/amd64-coresse-valgrind.xml
+lib/valgrind/amd64-linux-valgrind.xml
+lib/valgrind/arm-core-valgrind-s1.xml
+lib/valgrind/arm-core-valgrind-s2.xml
+lib/valgrind/arm-core.xml
+lib/valgrind/arm-vfpv3-valgrind-s1.xml
+lib/valgrind/arm-vfpv3-valgrind-s2.xml
+lib/valgrind/arm-vfpv3.xml
+lib/valgrind/arm-with-vfpv3-valgrind.xml
+lib/valgrind/arm-with-vfpv3.xml
+lib/valgrind/cachegrind-amd64-netbsd
+lib/valgrind/callgrind-amd64-netbsd
+lib/valgrind/default.supp
+lib/valgrind/drd-amd64-netbsd
+lib/valgrind/exp-bbv-amd64-netbsd
+lib/valgrind/exp-dhat-amd64-netbsd
+lib/valgrind/exp-sgcheck-amd64-netbsd
+lib/valgrind/getoff-amd64-netbsd
+lib/valgrind/helgrind-amd64-netbsd
+lib/valgrind/i386-coresse-valgrind.xml
+lib/valgrind/i386-linux-valgrind.xml
+lib/valgrind/lackey-amd64-netbsd
+lib/valgrind/libcoregrind-amd64-netbsd.a
+lib/valgrind/libreplacemalloc_toolpreload-amd64-netbsd.a
+lib/valgrind/libvex-amd64-netbsd.a
+lib/valgrind/libvexmultiarch-amd64-netbsd.a
+lib/valgrind/massif-amd64-netbsd
+lib/valgrind/memcheck-amd64-netbsd
+lib/valgrind/mips-cp0-valgrind-s1.xml
+lib/valgrind/mips-cp0-valgrind-s2.xml
+lib/valgrind/mips-cp0.xml
+lib/valgrind/mips-cpu-valgrind-s1.xml
+lib/valgrind/mips-cpu-valgrind-s2.xml
+lib/valgrind/mips-cpu.xml
+lib/valgrind/mips-fpu-valgrind-s1.xml
+lib/valgrind/mips-fpu-valgrind-s2.xml
+lib/valgrind/mips-fpu.xml
+lib/valgrind/mips-linux-valgrind.xml
+lib/valgrind/mips-linux.xml
+lib/valgrind/mips64-cp0-valgrind-s1.xml
+lib/valgrind/mips64-cp0-valgrind-s2.xml
+lib/valgrind/mips64-cp0.xml
+lib/valgrind/mips64-cpu-valgrind-s1.xml
+lib/valgrind/mips64-cpu-valgrind-s2.xml
+lib/valgrind/mips64-cpu.xml
+lib/valgrind/mips64-fpu-valgrind-s1.xml
+lib/valgrind/mips64-fpu-valgrind-s2.xml
+lib/valgrind/mips64-fpu.xml
+lib/valgrind/mips64-linux-valgrind.xml
+lib/valgrind/mips64-linux.xml
+lib/valgrind/none-amd64-netbsd
+lib/valgrind/power-altivec-valgrind-s1.xml
+lib/valgrind/power-altivec-valgrind-s2.xml
+lib/valgrind/power-altivec.xml
+lib/valgrind/power-core-valgrind-s1.xml
+lib/valgrind/power-core-valgrind-s2.xml
+lib/valgrind/power-core.xml
+lib/valgrind/power-fpu-valgrind-s1.xml
+lib/valgrind/power-fpu-valgrind-s2.xml
+lib/valgrind/power-fpu.xml
+lib/valgrind/power-linux-valgrind-s1.xml
+lib/valgrind/power-linux-valgrind-s2.xml
+lib/valgrind/power-linux.xml
+lib/valgrind/power-vsx-valgrind-s1.xml
+lib/valgrind/power-vsx-valgrind-s2.xml
+lib/valgrind/power-vsx.xml
+lib/valgrind/power64-core-valgrind-s1.xml
+lib/valgrind/power64-core-valgrind-s2.xml
+lib/valgrind/power64-core.xml
+lib/valgrind/power64-core2-valgrind-s1.xml
+lib/valgrind/power64-core2-valgrind-s2.xml
+lib/valgrind/power64-linux-valgrind-s1.xml
+lib/valgrind/power64-linux-valgrind-s2.xml
+lib/valgrind/power64-linux.xml
+lib/valgrind/powerpc-altivec32l-valgrind.xml
+lib/valgrind/powerpc-altivec32l.xml
+lib/valgrind/powerpc-altivec64l-valgrind.xml
+lib/valgrind/powerpc-altivec64l.xml
+lib/valgrind/s390-acr-valgrind-s1.xml
+lib/valgrind/s390-acr-valgrind-s2.xml
+lib/valgrind/s390-acr.xml
+lib/valgrind/s390-fpr-valgrind-s1.xml
+lib/valgrind/s390-fpr-valgrind-s2.xml
+lib/valgrind/s390-fpr.xml
+lib/valgrind/s390x-core64-valgrind-s1.xml
+lib/valgrind/s390x-core64-valgrind-s2.xml
+lib/valgrind/s390x-core64.xml
+lib/valgrind/s390x-generic-valgrind.xml
+lib/valgrind/s390x-generic.xml
+lib/valgrind/s390x-linux64-valgrind-s1.xml
+lib/valgrind/s390x-linux64-valgrind-s2.xml
+lib/valgrind/s390x-linux64.xml
+lib/valgrind/vgpreload_core-amd64-netbsd.so
+lib/valgrind/vgpreload_drd-amd64-netbsd.so
+lib/valgrind/vgpreload_exp-dhat-amd64-netbsd.so
+lib/valgrind/vgpreload_exp-sgcheck-amd64-netbsd.so
+lib/valgrind/vgpreload_helgrind-amd64-netbsd.so
+lib/valgrind/vgpreload_massif-amd64-netbsd.so
+lib/valgrind/vgpreload_memcheck-amd64-netbsd.so
+man/man1/callgrind_annotate.1
+man/man1/callgrind_control.1
+man/man1/cg_annotate.1
+man/man1/cg_diff.1
+man/man1/cg_merge.1
+man/man1/ms_print.1
+man/man1/valgrind-listener.1
+man/man1/valgrind.1
+man/man1/vgdb.1
+share/doc/valgrind/html/FAQ.html
+share/doc/valgrind/html/QuickStart.html
+share/doc/valgrind/html/bbv-manual.html
+share/doc/valgrind/html/cg-manual.html
+share/doc/valgrind/html/cl-format.html
+share/doc/valgrind/html/cl-manual.html
+share/doc/valgrind/html/design-impl.html
+share/doc/valgrind/html/dh-manual.html
+share/doc/valgrind/html/dist.authors.html
+share/doc/valgrind/html/dist.html
+share/doc/valgrind/html/dist.news.html
+share/doc/valgrind/html/dist.news.old.html
+share/doc/valgrind/html/dist.readme-android.html
+share/doc/valgrind/html/dist.readme-android_emulator.html
+share/doc/valgrind/html/dist.readme-developers.html
+share/doc/valgrind/html/dist.readme-mips.html
+share/doc/valgrind/html/dist.readme-missing.html
+share/doc/valgrind/html/dist.readme-packagers.html
+share/doc/valgrind/html/dist.readme-s390.html
+share/doc/valgrind/html/dist.readme-solaris.html
+share/doc/valgrind/html/dist.readme.html
+share/doc/valgrind/html/drd-manual.html
+share/doc/valgrind/html/faq.html
+share/doc/valgrind/html/hg-manual.html
+share/doc/valgrind/html/images/home.png
+share/doc/valgrind/html/images/kcachegrind_xtree.png
+share/doc/valgrind/html/images/next.png
+share/doc/valgrind/html/images/prev.png
+share/doc/valgrind/html/images/up.png
+share/doc/valgrind/html/index.html
+share/doc/valgrind/html/license.gfdl.html
+share/doc/valgrind/html/license.gpl.html
+share/doc/valgrind/html/licenses.html
+share/doc/valgrind/html/lk-manual.html
+share/doc/valgrind/html/manual-core-adv.html
+share/doc/valgrind/html/manual-core.html
+share/doc/valgrind/html/manual-intro.html
+share/doc/valgrind/html/manual-writing-tools.html
+share/doc/valgrind/html/manual.html
+share/doc/valgrind/html/mc-manual.html
+share/doc/valgrind/html/ms-manual.html
+share/doc/valgrind/html/nl-manual.html
+share/doc/valgrind/html/quick-start.html
+share/doc/valgrind/html/sg-manual.html
+share/doc/valgrind/html/tech-docs.html
+share/doc/valgrind/html/vg_basic.css
+share/doc/valgrind/valgrind_manual.pdf
+share/doc/valgrind/valgrind_manual.ps
diff --git a/valgrind-netbsd/PLIST.Darwin b/valgrind-netbsd/PLIST.Darwin
new file mode 100644
index 0000000000..4afcb51eac
--- /dev/null
+++ b/valgrind-netbsd/PLIST.Darwin
@@ -0,0 +1,27 @@
+@comment $NetBSD: PLIST.Darwin,v 1.1 2012/11/04 05:12:56 minskim Exp $
+lib/valgrind/drd-amd64-darwin.dSYM/Contents/Info.plist
+lib/valgrind/drd-amd64-darwin.dSYM/Contents/Resources/DWARF/drd-amd64-darwin
+lib/valgrind/exp-dhat-amd64-darwin.dSYM/Contents/Info.plist
+lib/valgrind/exp-dhat-amd64-darwin.dSYM/Contents/Resources/DWARF/exp-dhat-amd64-darwin
+lib/valgrind/exp-sgcheck-amd64-darwin.dSYM/Contents/Info.plist
+lib/valgrind/exp-sgcheck-amd64-darwin.dSYM/Contents/Resources/DWARF/exp-sgcheck-amd64-darwin
+lib/valgrind/helgrind-amd64-darwin.dSYM/Contents/Info.plist
+lib/valgrind/helgrind-amd64-darwin.dSYM/Contents/Resources/DWARF/helgrind-amd64-darwin
+lib/valgrind/massif-amd64-darwin.dSYM/Contents/Info.plist
+lib/valgrind/massif-amd64-darwin.dSYM/Contents/Resources/DWARF/massif-amd64-darwin
+lib/valgrind/memcheck-amd64-darwin.dSYM/Contents/Info.plist
+lib/valgrind/memcheck-amd64-darwin.dSYM/Contents/Resources/DWARF/memcheck-amd64-darwin
+lib/valgrind/vgpreload_core-amd64-darwin.so.dSYM/Contents/Info.plist
+lib/valgrind/vgpreload_core-amd64-darwin.so.dSYM/Contents/Resources/DWARF/vgpreload_core-amd64-darwin.so
+lib/valgrind/vgpreload_drd-amd64-darwin.so.dSYM/Contents/Info.plist
+lib/valgrind/vgpreload_drd-amd64-darwin.so.dSYM/Contents/Resources/DWARF/vgpreload_drd-amd64-darwin.so
+lib/valgrind/vgpreload_exp-dhat-amd64-darwin.so.dSYM/Contents/Info.plist
+lib/valgrind/vgpreload_exp-dhat-amd64-darwin.so.dSYM/Contents/Resources/DWARF/vgpreload_exp-dhat-amd64-darwin.so
+lib/valgrind/vgpreload_exp-sgcheck-amd64-darwin.so.dSYM/Contents/Info.plist
+lib/valgrind/vgpreload_exp-sgcheck-amd64-darwin.so.dSYM/Contents/Resources/DWARF/vgpreload_exp-sgcheck-amd64-darwin.so
+lib/valgrind/vgpreload_helgrind-amd64-darwin.so.dSYM/Contents/Info.plist
+lib/valgrind/vgpreload_helgrind-amd64-darwin.so.dSYM/Contents/Resources/DWARF/vgpreload_helgrind-amd64-darwin.so
+lib/valgrind/vgpreload_massif-amd64-darwin.so.dSYM/Contents/Info.plist
+lib/valgrind/vgpreload_massif-amd64-darwin.so.dSYM/Contents/Resources/DWARF/vgpreload_massif-amd64-darwin.so
+lib/valgrind/vgpreload_memcheck-amd64-darwin.so.dSYM/Contents/Info.plist
+lib/valgrind/vgpreload_memcheck-amd64-darwin.so.dSYM/Contents/Resources/DWARF/vgpreload_memcheck-amd64-darwin.so
diff --git a/valgrind-netbsd/distinfo b/valgrind-netbsd/distinfo
new file mode 100644
index 0000000000..4af62fd8b8
--- /dev/null
+++ b/valgrind-netbsd/distinfo
@@ -0,0 +1,95 @@
+$NetBSD$
+
+SHA1 (valgrind-3.14.0.tar.bz2) = 182afd405b92ddb6f52c6729e848eacf4b1daf46
+RMD160 (valgrind-3.14.0.tar.bz2) = 562359c6222acd8546eedf6f0b6db964e91bd434
+SHA512 (valgrind-3.14.0.tar.bz2) = 68e548c42df31dc2b883a403e0faff7480c49b3054841870f5d2f742141ba199eca5d83c96bbf283115f0633f2bdb0860161d422f98e3ec720ec65760d250f97
+Size (valgrind-3.14.0.tar.bz2) = 16602858 bytes
+SHA1 (patch-Makefile.am) = 8175ceaa034f49d26801989fa6ad0e77872b7d0d
+SHA1 (patch-Makefile.tool.am) = b490afe260508064338ed52b56d14613f191ae0c
+SHA1 (patch-configure.ac) = 4c0fe4d6724cbd4b3c29f54c5bdbbfebcc37680e
+SHA1 (patch-coregrind_Makefile.am) = b5ea9d34f9135a39b8ed7d971823bbfa8b7a03ae
+SHA1 (patch-coregrind_launcher-linux.c) = 54a084da0ff8be05d2b1489247d81bad860fb70e
+SHA1 (patch-coregrind_link__tool__exe__netbsd.in) = ebf427931d24239fb04880a01b5ad9f1c86b5f62
+SHA1 (patch-coregrind_m__addrinfo.c) = a5eb2adb37f8409dcf143c924df87d9015571097
+SHA1 (patch-coregrind_m__aspacemgr_aspacemgr-common.c) = d1e09ff5ef979179fb4b8b13502992bb5e428e57
+SHA1 (patch-coregrind_m__aspacemgr_aspacemgr-linux.c) = 4143c822bba9f3ae98f8772f81b3aa7f6acdce35
+SHA1 (patch-coregrind_m__coredump_coredump-elf.c) = e24ce0c52673dc488206687aad9efcda8019a2b9
+SHA1 (patch-coregrind_m__debuginfo_d3basics.c) = c3a435a1c1e075c139f5963d27f4ac24996c9ca7
+SHA1 (patch-coregrind_m__debuginfo_debuginfo.c) = 243a586975067a38e5a9ab822d7f064d8b708118
+SHA1 (patch-coregrind_m__debuginfo_priv__readpdb.h) = c444f25cbe6d624c40c2154fd3646a8151cf8f8c
+SHA1 (patch-coregrind_m__debuginfo_readdwarf.c) = 60ab2ccd3a73cb7364d28e2a17b43bd4b9e571ef
+SHA1 (patch-coregrind_m__debuginfo_readdwarf3.c) = 13084532608812e217f15b64d2986b8b6fb124e6
+SHA1 (patch-coregrind_m__debuginfo_readelf.c) = 8e2c5b6a156fa819d478df4cffd44a08b5629e82
+SHA1 (patch-coregrind_m__debuginfo_readpdb.c) = f8c97fe9fd9fcf43c5ec5fd41477478c6d70114c
+SHA1 (patch-coregrind_m__debuginfo_storage.c) = 3a4a600613d64799a8229cf9d5d4fb9952eb3a2d
+SHA1 (patch-coregrind_m__debuglog.c) = 63fd7f562bf112a7cb2cf3c13655445a0b319f30
+SHA1 (patch-coregrind_m__dispatch_dispatch-amd64-netbsd.S) = c25b1170383f01ef1a6d16168e054310f76eb9e5
+SHA1 (patch-coregrind_m__errormgr.c) = 4e3b935bbd488e3d138618eb4ab214c512888327
+SHA1 (patch-coregrind_m__initimg_initimg-netbsd.c) = 810f31f2eae31f98d55ac555b0d1e32cb08cdb70
+SHA1 (patch-coregrind_m__libcassert.c) = faab08db23f58c71c2859deb0fc3649c577bfa6f
+SHA1 (patch-coregrind_m__libcfile.c) = e1d8b6ffd674fc289d454664a729710d3f645e84
+SHA1 (patch-coregrind_m__libcprint.c) = 11be5b36f9bdd61989bf3d93a77e03327beee269
+SHA1 (patch-coregrind_m__libcproc.c) = fd0993c0a3b322b4eba7bc58ff918e737d8d764d
+SHA1 (patch-coregrind_m__libcsetjmp.c) = 6256a64975d71afb6490dc60914cb4f6c0f58489
+SHA1 (patch-coregrind_m__libcsignal.c) = d4ad3a8c5c9dd63cb92cb241203321594149554f
+SHA1 (patch-coregrind_m__machine.c) = 661fb8baf42142bc0570f4db982d45cb5b21b4b7
+SHA1 (patch-coregrind_m__main.c) = 1d0a907acabd33170fa03d3a82f484e4a368e4eb
+SHA1 (patch-coregrind_m__redir.c) = 6515b4d388305797c0bb332cbbb940e000db2ba7
+SHA1 (patch-coregrind_m__replacemalloc_vg__replace__malloc.c) = 3116f6803756e8999fe863da3e0445bcddab3ca8
+SHA1 (patch-coregrind_m__scheduler_scheduler.c) = 8e54203a5bb92e542f14c9fb6d17130265db9ffe
+SHA1 (patch-coregrind_m__sigframe_sigframe-amd64-netbsd.c) = d4139d11814510bed1d7b6b28b88ad0f6566208f
+SHA1 (patch-coregrind_m__sigframe_sigframe-common.c) = e70746f02ca2f6de086f17617b6e1cbb5caae79c
+SHA1 (patch-coregrind_m__signals.c) = ed59d1b7053a69e170021273c0c009009c3d3026
+SHA1 (patch-coregrind_m__stacktrace.c) = 276357409a4c4ecac736690426838e901a704aea
+SHA1 (patch-coregrind_m__syscall.c) = e7302c1df64eaf003765e4c99431096d90014c3e
+SHA1 (patch-coregrind_m__syswrap_priv__syswrap-netbsd.h) = e9dfe84a3cb89eb1c606bd31bd0faaa076f5d28d
+SHA1 (patch-coregrind_m__syswrap_priv__types__n__macros.h) = feaf58693f4b8ee89c69ddf0beb9f39e7f42111c
+SHA1 (patch-coregrind_m__syswrap_syscall-amd64-netbsd.S) = 591bce0814d10fce6dced6de9bf7e07e0b10d13c
+SHA1 (patch-coregrind_m__syswrap_syswrap-amd64-netbsd.c) = c280ad2f4a569e57dcda8e34acf7994a61e9ce30
+SHA1 (patch-coregrind_m__syswrap_syswrap-generic.c) = 27d98d5081b4be7368bbad613e52ed17a20d8fcc
+SHA1 (patch-coregrind_m__syswrap_syswrap-main.c) = 506019e3351c78c8227794bc6fab3f4b9fdf366a
+SHA1 (patch-coregrind_m__syswrap_syswrap-netbsd-variants.c) = 03f651e330e32fd703e37191eb774152caadac6e
+SHA1 (patch-coregrind_m__syswrap_syswrap-netbsd.c) = 2f2ddcfcc96dd9733c65b84a2f38dc3ecdb68c2f
+SHA1 (patch-coregrind_m__trampoline.S) = d70748fa8ad836ba49d49fa600cdcae8722f795c
+SHA1 (patch-coregrind_m__translate.c) = 44a5182378ba6ff9f68d87f4de8195430fe6e32c
+SHA1 (patch-coregrind_m__ume_elf.c) = 387231663c23a9c215fe238e98181de7c8dfaa94
+SHA1 (patch-coregrind_m__ume_main.c) = 33affe1d0b8318908faf9385195b16a599b18b8d
+SHA1 (patch-coregrind_m__ume_priv__ume.h) = 628853a79d330ced54953cee8d756b4b6c46a8ca
+SHA1 (patch-coregrind_m__vki.c) = 946677ff8e2f604f2479e9e089940b881eb43105
+SHA1 (patch-coregrind_m__vkiscnums.c) = c8a68d4b6f38ece5da416401201ccdd404e071b8
+SHA1 (patch-coregrind_pub__core__debuginfo.h) = 5b43e2f1f57cbabab3424aa387b9ef9529c46e7f
+SHA1 (patch-coregrind_pub__core__initimg.h) = abac1cf83cda86270ebbe9a01118688475b4a069
+SHA1 (patch-coregrind_pub__core__machine.h) = 12214e212a56d5d8b029adf80e3a4e0c8c826d98
+SHA1 (patch-coregrind_pub__core__mallocfree.h) = ba38926737f61ba64b7424de45ab91822264e65f
+SHA1 (patch-coregrind_pub__core__sigframe.h) = ecfcc823fbcc2865b161279cc66947e1ec314b99
+SHA1 (patch-coregrind_pub__core__syscall.h) = 5fe6785b2c6f87cd698f6ab5e1f5b8716fbb5c57
+SHA1 (patch-coregrind_pub__core__trampoline.h) = b0aae68b6d529babff539dd38cc65011ab0a476f
+SHA1 (patch-coregrind_vg__preloaded.c) = 426f5bc8f124bfc931d595d113c90a36ba320f61
+SHA1 (patch-drd_Makefile.am) = 3d43d5b173ecb496292736c4f74122683fcfcceb
+SHA1 (patch-drd_drd__main.c) = ad45d1ea9cf8e7d0326fd2fd304f71f1700b1da4
+SHA1 (patch-helgrind_Makefile.am) = 36988c66b1ad7cb2f3ccd7431b0c1f049acc5da9
+SHA1 (patch-helgrind_hg__intercepts.c) = e7d3f32cc20e54047754a3bb04b277baf2a364ba
+SHA1 (patch-include_Makefile.am) = 0e6b6666c13d06f240fb75ba93a2bfe1a6cd7245
+SHA1 (patch-include_pub__tool__basics.h) = ab23d24e2a5e74b4b5093abc6c9f9ca98de0dc05
+SHA1 (patch-include_pub__tool__basics__asm.h) = d244496cef9ef5913f8995eaf2949f6a4c28d40f
+SHA1 (patch-include_pub__tool__machine.h) = 91efde63fc24d3c367ee560955661956e3aed842
+SHA1 (patch-include_pub__tool__redir.h) = 9d1b029c7ce27188b6722ef6eced603387ebf608
+SHA1 (patch-include_pub__tool__vki.h) = e59b9a1e67ed5038c92f3e038149408b30c38b01
+SHA1 (patch-include_pub__tool__vkiscnums.h) = ff122044a2f9831d9a6e8dd3e6828d15f301e1a3
+SHA1 (patch-include_pub__tool__vkiscnums__asm.h) = a62771d625824eab0f5225dfaf474c4187e90d75
+SHA1 (patch-include_valgrind.h) = bea2a4f4141873aba01674bf54b5532598b570a7
+SHA1 (patch-include_vki_vki-amd64-netbsd.h) = fe861ae4a91ca16a9c46f5e7411b9218c7eac1d9
+SHA1 (patch-include_vki_vki-machine-types-amd64-netbsd.h) = 92e08cd63a406182bbd25799d8a568b6fc29993f
+SHA1 (patch-include_vki_vki-netbsd.h) = bd8b67009fe7006ca765a725fa108ed2a4d1c230
+SHA1 (patch-include_vki_vki-scnums-netbsd.h) = 187f1a4189d1febce0f9e84f870e0019aa108aff
+SHA1 (patch-memcheck_Makefile.am) = b0f3a8749fe1d8b71372fb7de23c862a45c9ddcd
+SHA1 (patch-memcheck_mc__errors.c) = 92315e7065cbf28d9e318a6b0b8e0760eaeac73b
+SHA1 (patch-memcheck_mc__leakcheck.c) = 7b7e5e1a650a57e5434f40d59045837456954aa8
+SHA1 (patch-memcheck_mc__machine.c) = ff9166efda00c72d3c312d08a2089d9f8715bbde
+SHA1 (patch-memcheck_mc__main.c) = 7e68ec5f6e0de3944ea4c2a28045efcf8ef316ff
+SHA1 (patch-memcheck_mc__malloc__wrappers.c) = 19970a8062899bfe4b309b41463dd90858756622
+SHA1 (patch-memcheck_mc__translate.c) = 80966ae0e837b70537e57218b482846981370e4e
+SHA1 (patch-memcheck_tests_vbit-test_util.c) = cc99ebcae194e60dd195422377fb01c4820e7926
+SHA1 (patch-memcheck_tests_vbit-test_vbits.c) = bb75ebb16baa30ee2418a27ac1da4b8e3ba018a8
+SHA1 (patch-netbsd.supp) = 8e8d0c59c59b6d47ca75aa99d330043ebbd15215
+SHA1 (patch-shared_vg__replace__strmem.c) = 3cf2ef83b7cd9f6131bc4009f0ae89aa58226eac
diff --git a/valgrind-netbsd/patches/patch-Makefile.am b/valgrind-netbsd/patches/patch-Makefile.am
new file mode 100644
index 0000000000..e265d879e8
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-Makefile.am
@@ -0,0 +1,17 @@
+$NetBSD$
+
+--- Makefile.am.orig 2018-10-09 07:18:28.000000000 +0000
++++ Makefile.am
+@@ -49,6 +49,7 @@ SUPP_FILES = \
+ darwin11.supp darwin12.supp darwin13.supp darwin14.supp darwin15.supp \
+ darwin16.supp darwin17.supp \
+ bionic.supp \
++ netbsd.supp \
+ solaris11.supp solaris12.supp
+ DEFAULT_SUPP_FILES = @DEFAULT_SUPP@
+
+@@ -138,4 +139,3 @@ include/vgversion.h:
+ $(top_srcdir)/auxprogs/make_or_upd_vgversion_h $(top_srcdir)
+
+ .PHONY: include/vgversion.h
+-
diff --git a/valgrind-netbsd/patches/patch-Makefile.tool.am b/valgrind-netbsd/patches/patch-Makefile.tool.am
new file mode 100644
index 0000000000..6f59aa7a8d
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-Makefile.tool.am
@@ -0,0 +1,51 @@
+$NetBSD$
+
+--- Makefile.tool.am.orig 2018-05-05 07:42:22.000000000 +0000
++++ Makefile.tool.am
+@@ -39,6 +39,8 @@ TOOL_LDFLAGS_COMMON_DARWIN = \
+ TOOL_LDFLAGS_COMMON_SOLARIS = \
+ -static -nodefaultlibs -nostartfiles -u _start \
+ -Wl,-M,/usr/lib/ld/map.noexstk
++TOOL_LDFLAGS_COMMON_NETBSD = -static \
++ -nostdlib -u _start @FLAG_NO_BUILD_ID@
+
+ TOOL_LDFLAGS_X86_LINUX = \
+ $(TOOL_LDFLAGS_COMMON_LINUX) @FLAG_M32@
+@@ -70,6 +72,9 @@ TOOL_LDFLAGS_X86_DARWIN = \
+ TOOL_LDFLAGS_AMD64_DARWIN = \
+ $(TOOL_LDFLAGS_COMMON_DARWIN) -arch x86_64
+
++TOOL_LDFLAGS_AMD64_NETBSD = \
++ $(TOOL_LDFLAGS_COMMON_NETBSD) @FLAG_M64@
++
+ # MIPS Linux default start symbol is __start, not _start like on x86 or amd
+ TOOL_LDFLAGS_MIPS32_LINUX = \
+ -static -nodefaultlibs -nostartfiles -u __start @FLAG_NO_BUILD_ID@ \
+@@ -126,6 +131,9 @@ LIBREPLACEMALLOC_ARM_LINUX = \
+ LIBREPLACEMALLOC_ARM64_LINUX = \
+ $(top_builddir)/coregrind/libreplacemalloc_toolpreload-arm64-linux.a
+
++LIBREPLACEMALLOC_AMD64_NETBSD = \
++ $(top_builddir)/coregrind/libreplacemalloc_toolpreload-amd64-netbsd.a
++
+ LIBREPLACEMALLOC_X86_DARWIN = \
+ $(top_builddir)/coregrind/libreplacemalloc_toolpreload-x86-darwin.a
+
+@@ -182,6 +190,12 @@ LIBREPLACEMALLOC_LDFLAGS_ARM64_LINUX = \
+ $(LIBREPLACEMALLOC_ARM64_LINUX) \
+ -Wl,--no-whole-archive
+
++LIBREPLACEMALLOC_LDFLAGS_AMD64_NETBSD = \
++ -Wl,--whole-archive \
++ $(LIBREPLACEMALLOC_AMD64_NETBSD) \
++ -Wl,--no-whole-archive \
++ -shared -fPIC
++
+ LIBREPLACEMALLOC_LDFLAGS_X86_DARWIN = \
+ $(LIBREPLACEMALLOC_X86_DARWIN)
+
+@@ -224,4 +238,3 @@ clean-local: clean-noinst_DSYMS
+ install-exec-local: install-noinst_PROGRAMS install-noinst_DSYMS
+
+ uninstall-local: uninstall-noinst_PROGRAMS uninstall-noinst_DSYMS
+-
diff --git a/valgrind-netbsd/patches/patch-configure.ac b/valgrind-netbsd/patches/patch-configure.ac
new file mode 100644
index 0000000000..62cabf858d
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-configure.ac
@@ -0,0 +1,176 @@
+$NetBSD$
+
+--- configure.ac.orig 2018-10-09 07:26:33.000000000 +0000
++++ configure.ac
+@@ -115,7 +115,7 @@ rm $tmpfile
+
+ # Make sure we can compile in C99 mode.
+ AC_PROG_CC_C99
+-if test "$ac_cv_prog_cc_c99" == "no"; then
++if test "$ac_cv_prog_cc_c99" = "no"; then
+ AC_MSG_ERROR([Valgrind relies on a C compiler supporting C99])
+ fi
+
+@@ -450,6 +450,12 @@ case "${host_os}" in
+ esac
+ ;;
+
++ *netbsd*)
++ AC_MSG_RESULT([ok (${host_os})])
++ VGCONF_OS="netbsd"
++ DEFAULT_SUPP="netbsd.supp ${DEFAULT_SUPP}"
++ ;;
++
+ solaris2.11*)
+ AC_MSG_RESULT([ok (${host_os})])
+ VGCONF_OS="solaris"
+@@ -484,7 +490,7 @@ esac
+ # does not support building 32 bit programs
+
+ case "$ARCH_MAX-$VGCONF_OS" in
+- amd64-linux|ppc64be-linux|arm64-linux|amd64-solaris)
++ amd64-linux|ppc64be-linux|arm64-linux|amd64-solaris|amd64-netbsd)
+ AC_MSG_CHECKING([for 32 bit build support])
+ safe_CFLAGS=$CFLAGS
+ CFLAGS="-m32"
+@@ -784,6 +790,17 @@ case "$ARCH_MAX-$VGCONF_OS" in
+ fi
+ AC_MSG_RESULT([ok (${ARCH_MAX}-${VGCONF_OS})])
+ ;;
++ amd64-netbsd)
++ VGCONF_ARCH_PRI="amd64"
++ VGCONF_ARCH_SEC=""
++ VGCONF_PLATFORM_PRI_CAPS="AMD64_NETBSD"
++ VGCONF_PLATFORM_SEC_CAPS=""
++ valt_load_address_pri_norml="0x38000000"
++ valt_load_address_pri_inner="0x28000000"
++ valt_load_address_sec_norml="0xUNSET"
++ valt_load_address_sec_inner="0xUNSET"
++ AC_MSG_RESULT([ok (${ARCH_MAX}-${VGCONF_OS})])
++ ;;
+ x86-solaris)
+ VGCONF_ARCH_PRI="x86"
+ VGCONF_ARCH_SEC=""
+@@ -852,6 +869,7 @@ AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_X86,
+ AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_AMD64,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_LINUX \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_DARWIN \
++ -o x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_NETBSD \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_SOLARIS )
+ AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_PPC32,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xPPC32_LINUX \
+@@ -909,6 +927,8 @@ AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_
+ -o x$VGCONF_PLATFORM_SEC_CAPS = xX86_SOLARIS)
+ AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_AMD64_SOLARIS,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_SOLARIS)
++AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_AMD64_NETBSD,
++ test x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_NETBSD)
+
+
+ # Similarly, set up VGCONF_OS_IS_<os>. Exactly one of these becomes defined.
+@@ -931,6 +951,8 @@ AM_CONDITIONAL(VGCONF_OS_IS_DARWIN,
+ AM_CONDITIONAL(VGCONF_OS_IS_SOLARIS,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xX86_SOLARIS \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_SOLARIS)
++AM_CONDITIONAL(VGCONF_OS_IS_NETBSD,
++ test x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_NETBSD )
+
+
+ # Sometimes, in the Makefile.am files, it's useful to know whether or not
+@@ -1025,6 +1047,15 @@ AC_EGREP_CPP([DARWIN_LIBC], [
+ GLIBC_VERSION="darwin")
+
+ # not really a version check
++AC_EGREP_CPP([NETBSD_LIBC], [
++#include <sys/cdefs.h>
++#if defined(__NetBSD__)
++ NETBSD_LIBC
++#endif
++],
++GLIBC_VERSION="netbsd")
++
++# not really a version check
+ AC_EGREP_CPP([BIONIC_LIBC], [
+ #if defined(__ANDROID__)
+ BIONIC_LIBC
+@@ -1095,6 +1126,11 @@ case "${GLIBC_VERSION}" in
+ AC_DEFINE([BIONIC_LIBC], 1, [Define to 1 if you're using Bionic])
+ DEFAULT_SUPP="bionic.supp ${DEFAULT_SUPP}"
+ ;;
++ netbsd)
++ AC_MSG_RESULT(NetBSD)
++ # DEFAULT_SUPP set in host_os switch-case above.
++ # No other suppression file is used.
++ ;;
+ solaris)
+ AC_MSG_RESULT(Solaris)
+ # DEFAULT_SUPP set in host_os switch-case above.
+@@ -3013,6 +3049,41 @@ if test "$vg_cv_tls" = yes -a $is_clang
+ AC_DEFINE([HAVE_TLS], 1, [can use __thread to define thread-local variables])
+ fi
+
++#----------------------------------------------------------------------------
++# NetBSD-specific checks.
++#----------------------------------------------------------------------------
++
++if test "$VGCONF_OS" = "netbsd" ; then
++# NetBSD-specific check determining default platform for the Valgrind launcher.
++# Used in case the launcher cannot select platform by looking at the client
++# image (for example because the executable is a shell script).
++#
++# C-level symbol: NETBSD_LAUNCHER_DEFAULT_PLATFORM
++# Automake-level symbol: none
++#
++AC_MSG_CHECKING([for default platform of Valgrind launcher (NetBSD-specific)])
++# Get the ELF class of /bin/sh first.
++if ! test -f /bin/sh; then
++ AC_MSG_ERROR([Shell interpreter `/bin/sh' not found.])
++fi
++elf_class=$( /usr/bin/file /bin/sh | sed -n 's/.*ELF \(..\)-bit.*/\1/p' )
++case "$elf_class" in
++ 64)
++ default_arch="$VGCONF_ARCH_PRI";
++ ;;
++ 32)
++ AC_MSG_ERROR([Shell interpreter `/bin/sh' 32-bit not supported])
++ ;;
++ *)
++ AC_MSG_ERROR([Cannot determine ELF class of `/bin/sh'.])
++ ;;
++esac
++default_platform="$default_arch-$VGCONF_OS"
++AC_MSG_RESULT([$default_platform])
++AC_DEFINE_UNQUOTED([NETBSD_LAUNCHER_DEFAULT_PLATFORM], ["$default_platform"],
++ [Default platform for Valgrind launcher.])
++fi # test "$VGCONF_OS" = "netbsd"
++
+
+ #----------------------------------------------------------------------------
+ # Solaris-specific checks.
+@@ -4148,6 +4219,7 @@ if test x$VGCONF_PLATFORM_PRI_CAPS = xX8
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xX86_SOLARIS ; then
+ mflag_primary=$FLAG_M32
+ elif test x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_LINUX \
++ -o x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_NETBSD \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xARM64_LINUX \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xMIPS64_LINUX \
+@@ -4187,6 +4259,9 @@ AM_COND_IF([VGCONF_OS_IS_LINUX],
+ AM_COND_IF([VGCONF_OS_IS_DARWIN],
+ [CFLAGS_MPI="-g -O -fno-omit-frame-pointer -Wall -dynamic"
+ LDFLAGS_MPI="-dynamic -dynamiclib -all_load"])
++AM_COND_IF([VGCONF_OS_IS_NETBSD],
++ [CFLAGS_MPI="-g -O -fno-omit-frame-pointer -Wall -fPIC"
++ LDFLAGS_MPI="-fPIC -shared"])
+ AM_COND_IF([VGCONF_OS_IS_SOLARIS],
+ [CFLAGS_MPI="-g -O -fno-omit-frame-pointer -Wall -fpic"
+ LDFLAGS_MPI="-fpic -shared"])
+@@ -4680,6 +4755,8 @@ AC_CONFIG_FILES([
+ ])
+ AC_CONFIG_FILES([coregrind/link_tool_exe_linux],
+ [chmod +x coregrind/link_tool_exe_linux])
++AC_CONFIG_FILES([coregrind/link_tool_exe_netbsd],
++ [chmod +x coregrind/link_tool_exe_netbsd])
+ AC_CONFIG_FILES([coregrind/link_tool_exe_darwin],
+ [chmod +x coregrind/link_tool_exe_darwin])
+ AC_CONFIG_FILES([coregrind/link_tool_exe_solaris],
diff --git a/valgrind-netbsd/patches/patch-coregrind_Makefile.am b/valgrind-netbsd/patches/patch-coregrind_Makefile.am
new file mode 100644
index 0000000000..d01b2293af
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_Makefile.am
@@ -0,0 +1,101 @@
+$NetBSD$
+
+--- coregrind/Makefile.am.orig 2018-08-20 09:34:59.000000000 +0000
++++ coregrind/Makefile.am
+@@ -44,6 +44,11 @@ valgrind_SOURCES = \
+ launcher-darwin.c \
+ m_debuglog.c
+ endif
++if VGCONF_OS_IS_NETBSD
++valgrind_SOURCES = \
++ launcher-linux.c \
++ m_debuglog.c
++endif
+ if VGCONF_OS_IS_SOLARIS
+ valgrind_SOURCES = \
+ launcher-linux.c \
+@@ -89,6 +94,9 @@ if VGCONF_OS_IS_DARWIN
+ # having access to Darwin, 'none' implementation is used.
+ vgdb_SOURCES += vgdb-invoker-none.c
+ endif
++if VGCONF_OS_IS_NETBSD
++vgdb_SOURCES += vgdb-invoker-none.c ## XXX
++endif
+ if VGCONF_OS_IS_SOLARIS
+ vgdb_SOURCES += vgdb-invoker-solaris.c
+ endif
+@@ -276,6 +284,7 @@ noinst_HEADERS = \
+ m_syswrap/priv_syswrap-generic.h \
+ m_syswrap/priv_syswrap-linux.h \
+ m_syswrap/priv_syswrap-linux-variants.h \
++ m_syswrap/priv_syswrap-netbsd.h \
+ m_syswrap/priv_syswrap-darwin.h \
+ m_syswrap/priv_syswrap-solaris.h \
+ m_syswrap/priv_syswrap-main.h \
+@@ -376,6 +385,7 @@ COREGRIND_SOURCES_COMMON = \
+ m_dispatch/dispatch-s390x-linux.S \
+ m_dispatch/dispatch-mips32-linux.S \
+ m_dispatch/dispatch-mips64-linux.S \
++ m_dispatch/dispatch-amd64-netbsd.S \
+ m_dispatch/dispatch-x86-darwin.S \
+ m_dispatch/dispatch-amd64-darwin.S \
+ m_dispatch/dispatch-x86-solaris.S \
+@@ -399,6 +409,7 @@ COREGRIND_SOURCES_COMMON = \
+ m_gdbserver/valgrind-low-mips64.c \
+ m_gdbserver/version.c \
+ m_initimg/initimg-linux.c \
++ m_initimg/initimg-netbsd.c \
+ m_initimg/initimg-darwin.c \
+ m_initimg/initimg-solaris.c \
+ m_initimg/initimg-pathscan.c \
+@@ -423,6 +434,7 @@ COREGRIND_SOURCES_COMMON = \
+ m_sigframe/sigframe-mips64-linux.c \
+ m_sigframe/sigframe-x86-darwin.c \
+ m_sigframe/sigframe-amd64-darwin.c \
++ m_sigframe/sigframe-amd64-netbsd.c \
+ m_sigframe/sigframe-solaris.c \
+ m_syswrap/syscall-x86-linux.S \
+ m_syswrap/syscall-amd64-linux.S \
+@@ -436,16 +448,20 @@ COREGRIND_SOURCES_COMMON = \
+ m_syswrap/syscall-mips64-linux.S \
+ m_syswrap/syscall-x86-darwin.S \
+ m_syswrap/syscall-amd64-darwin.S \
++ m_syswrap/syscall-amd64-netbsd.S \
+ m_syswrap/syscall-x86-solaris.S \
+ m_syswrap/syscall-amd64-solaris.S \
+ m_syswrap/syswrap-main.c \
+ m_syswrap/syswrap-generic.c \
+ m_syswrap/syswrap-linux.c \
+ m_syswrap/syswrap-linux-variants.c \
++ m_syswrap/syswrap-netbsd.c \
++ m_syswrap/syswrap-netbsd-variants.c \
+ m_syswrap/syswrap-darwin.c \
+ m_syswrap/syswrap-solaris.c \
+ m_syswrap/syswrap-x86-linux.c \
+ m_syswrap/syswrap-amd64-linux.c \
++ m_syswrap/syswrap-amd64-netbsd.c \
+ m_syswrap/syswrap-ppc32-linux.c \
+ m_syswrap/syswrap-ppc64-linux.c \
+ m_syswrap/syswrap-arm-linux.c \
+@@ -547,7 +563,8 @@ libreplacemalloc_toolpreload_@VGCONF_ARC
+ libreplacemalloc_toolpreload_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_CPPFLAGS = \
+ $(AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+ libreplacemalloc_toolpreload_@VGCONF_ARCH_PRI@_@VGCONF_OS@_a_CFLAGS = \
+- $(AM_CFLAGS_PSO_@VGCONF_PLATFORM_PRI_CAPS@)
++ $(AM_CFLAGS_PSO_@VGCONF_PLATFORM_PRI_CAPS@) -fPIC
++
+ if VGCONF_HAVE_PLATFORM_SEC
+ libreplacemalloc_toolpreload_@VGCONF_ARCH_SEC@_@VGCONF_OS@_a_SOURCES = \
+ m_replacemalloc/vg_replace_malloc.c
+@@ -587,6 +604,11 @@ vgpreload_core_@VGCONF_ARCH_SEC@_@VGCONF
+ $(PRELOAD_LDFLAGS_@VGCONF_PLATFORM_SEC_CAPS@)
+ endif
+
++if VGCONF_OS_IS_NETBSD
++vgpreload_core_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_LDFLAGS += \
++ -shared
++endif
++
+ if VGCONF_OS_IS_SOLARIS
+ # Give the vgpreload_core library a proper soname so it can be easily
+ # recognized during reading of debug information.
diff --git a/valgrind-netbsd/patches/patch-coregrind_launcher-linux.c b/valgrind-netbsd/patches/patch-coregrind_launcher-linux.c
new file mode 100644
index 0000000000..875436e893
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_launcher-linux.c
@@ -0,0 +1,83 @@
+$NetBSD$
+
+--- coregrind/launcher-linux.c.orig 2018-07-13 08:52:05.000000000 +0000
++++ coregrind/launcher-linux.c
+@@ -53,6 +53,16 @@
+ #include <string.h>
+ #include <unistd.h>
+
++#if defined(VGO_netbsd)
++#include <sys/param.h>
++#include <sys/types.h>
++#include <sys/sysctl.h>
++#endif
++
++#ifndef VKI_BINPRM_BUF_SIZE
++#define VKI_BINPRM_BUF_SIZE VKI_PATH_MAX
++#endif
++
+ #ifndef EM_X86_64
+ #define EM_X86_64 62 // elf.h doesn't define this on some older systems
+ #endif
+@@ -276,6 +286,13 @@ static const char *select_platform(const
+ platform = "amd64-solaris";
+ }
+ else
++# elif defined(VGO_netbsd)
++ if (header.ehdr64.e_machine == EM_X86_64 &&
++ (header.ehdr64.e_ident[EI_OSABI] == ELFOSABI_SYSV ||
++ header.ehdr64.e_ident[EI_OSABI] == ELFOSABI_NETBSD)) {
++ platform = "amd64-netbsd";
++ }
++ else
+ # endif
+ if (header.ehdr64.e_machine == EM_X86_64 &&
+ (header.ehdr64.e_ident[EI_OSABI] == ELFOSABI_SYSV ||
+@@ -338,7 +355,9 @@ int main(int argc, char** argv, char** e
+ const char *cp;
+ const char *linkname;
+ char *toolfile;
++#if !defined(VGO_netbsd)
+ const char *launcher_name;
++#endif
+ char* new_line;
+ char** new_env;
+
+@@ -397,6 +416,9 @@ int main(int argc, char** argv, char** e
+ if ((0==strcmp(VG_PLATFORM,"x86-solaris")) ||
+ (0==strcmp(VG_PLATFORM,"amd64-solaris")))
+ default_platform = SOLARIS_LAUNCHER_DEFAULT_PLATFORM;
++# elif defined(VGO_netbsd)
++ if ((0==strcmp(VG_PLATFORM,"amd64-netbsd")))
++ default_platform = NETBSD_LAUNCHER_DEFAULT_PLATFORM;
+ # else
+ # error Unknown OS
+ # endif
+@@ -422,6 +444,18 @@ int main(int argc, char** argv, char** e
+ /* Figure out the name of this executable (viz, the launcher), so
+ we can tell stage2. stage2 will use the name for recursive
+ invocations of valgrind on child processes. */
++#if defined(VGO_netbsd)
++ static const int name[] = {
++ CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME,
++ };
++ char launcher_name[MAXPATHLEN];
++ size_t len;
++
++ len = sizeof(launcher_name);
++ if (sysctl(name, __arraycount(name), launcher_name, &len, NULL, 0) == -1) {
++ abort();
++ }
++#else
+ # if defined(VGO_linux)
+ linkname = "/proc/self/exe";
+ # elif defined(VGO_solaris)
+@@ -458,7 +492,7 @@ int main(int argc, char** argv, char** e
+ launcher_name = buf;
+ break;
+ }
+-
++#endif
+ /* tediously augment the env: VALGRIND_LAUNCHER=launcher_name */
+ new_line = malloc(strlen(VALGRIND_LAUNCHER) + 1
+ + strlen(launcher_name) + 1);
diff --git a/valgrind-netbsd/patches/patch-coregrind_link__tool__exe__netbsd.in b/valgrind-netbsd/patches/patch-coregrind_link__tool__exe__netbsd.in
new file mode 100644
index 0000000000..7dce34b380
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_link__tool__exe__netbsd.in
@@ -0,0 +1,89 @@
+$NetBSD$
+
+--- coregrind/link_tool_exe_netbsd.in.orig 2019-03-26 07:40:15.574675987 +0000
++++ coregrind/link_tool_exe_netbsd.in
+@@ -0,0 +1,84 @@
++#! @PERL@
++
++# This script handles linking the tool executables on NetBSD,
++# statically and at an alternative load address.
++#
++# Linking statically sidesteps all sorts of complications to do with
++# having two copies of the dynamic linker (valgrind's and the
++# client's) coexisting in the same process. The alternative load
++# address is needed because Valgrind itself will load the client at
++# whatever address it specifies, which is almost invariably the
++# default load address. Hence we can't allow Valgrind itself (viz,
++# the tool executable) to be loaded at that address.
++#
++# Unfortunately there's no standard way to do 'static link at
++# alternative address', so these link_tool_exe_*.in scripts handle
++# the per-platform hoop-jumping.
++#
++# What we get passed here is:
++# first arg
++# the alternative load address
++# all the rest of the args
++# the gcc invocation to do the final link, that
++# the build system would have done, left to itself
++#
++# We just let the script 'die' if something is wrong, rather than do
++# proper error reporting. We don't expect the users to run this
++# directly. It is only run as part of the build process, with
++# carefully constrained inputs.
++#
++# Linux specific complications:
++#
++# - need to support both old GNU ld and gold: use -Ttext= to
++# set the text segment address if that is all we have. We really
++# need -Ttext-segment. Otherwise with GNU ld sections or notes
++# (like the build-id) don't get at the desired address. But older
++# linkers only know about -Ttext, not -Ttext-segment. So configure
++# checks for us and sets FLAG_T_TEXT.
++#
++# - If all we have is -Ttext, then we need to pass --build-id=none
++# (that is, -Wl,--build-id=none to gcc) if it accepts it, to ensure
++# the linker doesn't add a notes section which ends up at the default
++# load address and so defeats our attempts to keep that address clear
++# for the client. However, older linkers don't support this flag,
++# so it is tested for by configure.in and is shipped to us as part of
++# argv[2 ..].
++#
++# So: what we actually do:
++#
++# pass the specified command to the linker as-is, except, add
++# "-static" and "-Ttext[-segment]=<argv[1]>" to it.
++# Previously we did this by adding these options after the first
++# word of the rest of the arguments, which works in the common case
++# when it's something like "gcc". But the linker invocation itself
++# might be multiple words, say if it's "ccache gcc". So we now put
++# the new options at the end instead.
++#
++
++use warnings;
++use strict;
++
++# expect at least: alt-load-address gcc -o foo bar.o
++die "Not enough arguments"
++ if (($#ARGV + 1) < 5);
++
++my $ala = $ARGV[0];
++shift; # Remove $ala from @ARGV
++
++# check for plausible-ish alt load address
++die "Bogus alt-load address"
++ if (length($ala) < 3 || index($ala, "0x") != 0);
++
++my $cmd = join(" ", @ARGV, "-static -Wl,@FLAG_T_TEXT@=$ala");
++
++#print "link_tool_exe_netbsd: $cmd\n";
++
++
++# Execute the command:
++my $r = system($cmd);
++
++if ($r == 0) {
++ exit 0;
++} else {
++ exit 1;
++}
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__addrinfo.c b/valgrind-netbsd/patches/patch-coregrind_m__addrinfo.c
new file mode 100644
index 0000000000..0a9cd8ae7e
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__addrinfo.c
@@ -0,0 +1,23 @@
+$NetBSD$
+
+--- coregrind/m_addrinfo.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_addrinfo.c
+@@ -650,6 +650,8 @@ static void pp_addrinfo_WRK ( Addr a, co
+ default:
+ VG_(core_panic)("mc_pp_AddrInfo");
+ }
++
++ VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ void VG_(pp_addrinfo) ( Addr a, const AddrInfo* ai )
+@@ -659,7 +661,9 @@ void VG_(pp_addrinfo) ( Addr a, const Ad
+
+ void VG_(pp_addrinfo_mc) ( Addr a, const AddrInfo* ai, Bool maybe_gcc )
+ {
++ VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ pp_addrinfo_WRK (a, ai, True /*mc*/, maybe_gcc);
++ VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__aspacemgr_aspacemgr-common.c b/valgrind-netbsd/patches/patch-coregrind_m__aspacemgr_aspacemgr-common.c
new file mode 100644
index 0000000000..24215b86b9
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__aspacemgr_aspacemgr-common.c
@@ -0,0 +1,110 @@
+$NetBSD$
+
+--- coregrind/m_aspacemgr/aspacemgr-common.c.orig 2018-08-06 07:22:24.000000000 +0000
++++ coregrind/m_aspacemgr/aspacemgr-common.c
+@@ -146,6 +146,9 @@ SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr s
+ SysRes res;
+ aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
+
++ if (start == 0 && flags & VKI_MAP_FIXED)
++ __builtin_trap();
++
+ # if defined(VGP_arm64_linux)
+ res = VG_(do_syscall6)(__NR3264_mmap, (UWord)start, length,
+ prot, flags, fd, offset);
+@@ -161,6 +164,11 @@ SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr s
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
+ prot, flags, fd, offset);
++# elif defined(VGP_amd64_netbsd)
++ if (flags & VKI_MAP_ANONYMOUS && fd == 0)
++ fd = -1;
++ res = VG_(do_syscall7)(__NR_mmap, (UWord)start, length,
++ prot, flags, fd, 0, offset);
+ # elif defined(VGP_x86_darwin)
+ if (fd == 0 && (flags & VKI_MAP_ANONYMOUS)) {
+ fd = -1; // MAP_ANON with fd==0 is EINVAL
+@@ -221,6 +229,12 @@ SysRes ML_(am_do_extend_mapping_NO_NOTIF
+ 0/*flags, meaning: must be at old_addr, else FAIL */,
+ 0/*new_addr, is ignored*/
+ );
++# elif defined(VGO_netbsd)
++ return VG_(do_syscall5)(
++ __NR_mremap,
++ old_addr, old_len, 0/*newp*/, new_len,
++ VKI_MAP_FIXED/*flags, meaning: must be at old_addr, else FAIL */
++ );
+ # else
+ # error Unknown OS
+ # endif
+@@ -242,6 +256,12 @@ SysRes ML_(am_do_relocate_nooverlap_mapp
+ VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/,
+ new_addr
+ );
++# elif defined(VGO_netbsd)
++ return VG_(do_syscall5)(
++ __NR_mremap,
++ old_addr, old_len, new_addr, new_len,
++ VKI_MAP_FIXED/*move-or-fail*/
++ );
+ # else
+ # error Unknown OS
+ # endif
+@@ -257,7 +277,7 @@ SysRes ML_(am_open) ( const HChar* pathn
+ /* ARM64 wants to use __NR_openat rather than __NR_open. */
+ SysRes res = VG_(do_syscall4)(__NR_openat,
+ VKI_AT_FDCWD, (UWord)pathname, flags, mode);
+-# elif defined(VGO_linux) || defined(VGO_darwin)
++# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
+ # elif defined(VGO_solaris)
+ SysRes res = VG_(do_syscall4)(__NR_openat, VKI_AT_FDCWD, (UWord)pathname,
+@@ -285,7 +305,7 @@ Int ML_(am_readlink)(const HChar* path,
+ # if defined(VGP_arm64_linux)
+ res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
+ (UWord)path, (UWord)buf, bufsiz);
+-# elif defined(VGO_linux) || defined(VGO_darwin)
++# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+ # elif defined(VGO_solaris)
+ res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
+@@ -298,7 +318,7 @@ Int ML_(am_readlink)(const HChar* path,
+
+ Int ML_(am_fcntl) ( Int fd, Int cmd, Addr arg )
+ {
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
+ # elif defined(VGO_darwin)
+ SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
+@@ -314,7 +334,7 @@ Bool ML_(am_get_fd_d_i_m)( Int fd,
+ /*OUT*/ULong* dev,
+ /*OUT*/ULong* ino, /*OUT*/UInt* mode )
+ {
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ SysRes res;
+ struct vki_stat buf;
+ # if defined(VGO_linux) && defined(__NR_fstat64)
+@@ -330,7 +350,11 @@ Bool ML_(am_get_fd_d_i_m)( Int fd,
+ return True;
+ }
+ # endif
++# if defined(VGO_netbsd)
++ res = VG_(do_syscall2)(__NR___fstat50, fd, (UWord)&buf);
++#else
+ res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)&buf);
++#endif
+ if (!sr_isError(res)) {
+ *dev = (ULong)buf.st_dev;
+ *ino = (ULong)buf.st_ino;
+@@ -393,6 +417,9 @@ Bool ML_(am_resolve_filename) ( Int fd,
+ else
+ return False;
+
++#elif defined(VGO_netbsd)
++ return False;
++
+ # else
+ # error Unknown OS
+ # endif
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__aspacemgr_aspacemgr-linux.c b/valgrind-netbsd/patches/patch-coregrind_m__aspacemgr_aspacemgr-linux.c
new file mode 100644
index 0000000000..3a78da6a3d
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__aspacemgr_aspacemgr-linux.c
@@ -0,0 +1,170 @@
+$NetBSD$
+
+--- coregrind/m_aspacemgr/aspacemgr-linux.c.orig 2018-07-13 08:52:05.000000000 +0000
++++ coregrind/m_aspacemgr/aspacemgr-linux.c
+@@ -32,7 +32,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /* *************************************************************
+ DO NOT INCLUDE ANY OTHER FILES HERE.
+@@ -306,7 +306,7 @@ static Int nsegments_used = 0;
+
+
+ Addr VG_(clo_aspacem_minAddr)
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+ = (Addr) 0x04000000; // 64M
+ #elif defined(VGO_darwin)
+ # if VG_WORDSIZE == 4
+@@ -872,7 +872,7 @@ static void sync_check_mapping_callback
+ cmp_devino = False;
+ #endif
+
+-#if defined(VGO_darwin)
++#if defined(VGO_darwin) || defined(VGO_netbsd)
+ // GrP fixme kernel info doesn't have dev/inode
+ cmp_devino = False;
+
+@@ -1638,6 +1638,32 @@ Addr VG_(am_startup) ( Addr sp_at_startu
+
+ suggested_clstack_end = -1; // ignored; Mach-O specifies its stack
+
++ // --- Freebsd ------------------------------------------
++
++#elif defined(VGO_netbsd)
++
++# if VG_WORDSIZE == 4
++ aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
++# else
++ aspacem_maxAddr = (Addr) (Addr)0x800000000 - 1; // 32G
++# ifdef ENABLE_INNER
++ { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
++ if (aspacem_maxAddr > cse)
++ aspacem_maxAddr = cse;
++ }
++# endif
++# endif
++
++ aspacem_cStart = aspacem_minAddr;
++ aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
++
++# ifdef ENABLE_INNER
++ aspacem_vStart -= 0x10000000; // 256M
++# endif
++
++ suggested_clstack_end = aspacem_maxAddr - 16*1024*1024ULL
++ + VKI_PAGE_SIZE;
++
+ // --- Solaris ------------------------------------------
+ #elif defined(VGO_solaris)
+ # if VG_WORDSIZE == 4
+@@ -3797,13 +3823,93 @@ Bool VG_(get_changed_segments)(
+ return !css_overflowed;
+ }
+
+-#endif // defined(VGO_darwin)
+-
+ /*------END-procmaps-parser-for-Darwin---------------------------*/
+
++/*------BEGIN-procmaps-parser-for-NetBSD------------------------*/
++
++#elif defined(VGO_netbsd)
++
++ /* Size of a smallish table used to read /proc/self/map entries. */
++ #define M_PROCMAP_BUF 10485760 /* 10M */
++
++ /* static ... to keep it out of the stack frame. */
++ static char procmap_buf[M_PROCMAP_BUF];
++static void parse_procselfmaps (
++ void (*record_mapping)( Addr addr, SizeT len, UInt prot,
++ ULong dev, ULong ino, Off64T offset,
++ const HChar* filename ),
++ void (*record_gap)( Addr addr, SizeT len )
++ )
++{
++ Int i;
++ Addr start, endPlusOne, gapStart;
++ char* filename;
++ char *p;
++ UInt prot;
++ ULong foffset, dev, ino;
++ struct vki_kinfo_vmentry *kve;
++ vki_size_t len;
++ Int oid[5];
++ SysRes sres;
++
++ foffset = ino = 0; /* keep gcc-4.1.0 happy */
++
++ oid[0] = VKI_CTL_VM;
++ oid[1] = VKI_VM_PROC;
++ oid[2] = VKI_VM_PROC_MAP;
++ oid[3] = sr_Res(VG_(do_syscall0)(__NR_getpid));
++ oid[4] = sizeof(struct vki_kinfo_vmentry);
++
++ len = sizeof(procmap_buf);
++
++ sres = VG_(do_syscall6)(__NR___sysctl, (UWord)oid, 5, (UWord)procmap_buf,
++ (UWord)&len, 0, 0);
++ if (sr_isError(sres)) {
++ VG_(debugLog)(0, "procselfmaps", "sysctll %ld\n", sr_Err(sres));
++ ML_(am_exit)(1);
++ }
++
++ gapStart = Addr_MIN;
++ i = 0;
++ p = procmap_buf;
++ while (p < (char *)procmap_buf + len) {
++ kve = (struct vki_kinfo_vmentry *)p;
++ start = (UWord)kve->kve_start;
++ endPlusOne = (UWord)kve->kve_end;
++ foffset = kve->kve_offset;
++ filename = kve->kve_path;
++ dev = kve->kve_vn_fsid;
++ ino = kve->kve_vn_fileid;
++ if (filename[0] != '/') {
++ filename = NULL;
++ foffset = 0;
++ }
++
++ prot = 0;
++ if (kve->kve_protection & VKI_KVME_PROT_READ) prot |= VKI_PROT_READ;
++ if (kve->kve_protection & VKI_KVME_PROT_WRITE) prot |= VKI_PROT_WRITE;
++ if (kve->kve_protection & VKI_KVME_PROT_EXEC) prot |= VKI_PROT_EXEC;
++
++ if (record_gap && gapStart < start)
++ (*record_gap) ( gapStart, start-gapStart );
++
++ if (record_mapping && start < endPlusOne)
++ (*record_mapping) ( start, endPlusOne-start,
++ prot, dev, ino,
++ foffset, filename );
++ gapStart = endPlusOne;
++ p += sizeof(struct vki_kinfo_vmentry);
++ }
++
++ if (record_gap && gapStart < Addr_MAX)
++ (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
++}
++
++/*------END-procmaps-parser-for-Netbsd--------------------------*/
++
+ /*------BEGIN-procmaps-parser-for-Solaris------------------------*/
+
+-#if defined(VGO_solaris)
++#elif defined(VGO_solaris)
+
+ /* Note: /proc/self/xmap contains extended information about already
+ materialized mappings whereas /proc/self/rmap contains information about
+@@ -4113,7 +4219,7 @@ Bool VG_(am_search_for_new_segment)(Addr
+
+ /*------END-procmaps-parser-for-Solaris--------------------------*/
+
+-#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__coredump_coredump-elf.c b/valgrind-netbsd/patches/patch-coregrind_m__coredump_coredump-elf.c
new file mode 100644
index 0000000000..d6de713887
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__coredump_coredump-elf.c
@@ -0,0 +1,39 @@
+$NetBSD$
+
+--- coregrind/m_coredump/coredump-elf.c.orig 2018-08-14 07:56:38.000000000 +0000
++++ coregrind/m_coredump/coredump-elf.c
+@@ -28,7 +28,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+
+ #include "pub_core_basics.h"
+ #include "pub_core_vki.h"
+@@ -66,6 +66,7 @@
+ #error VG_WORDSIZE needs to ==4 or ==8
+ #endif
+
++#if 0
+ /* If true, then this Segment may be mentioned in the core */
+ static Bool may_dump(const NSegment *seg)
+ {
+@@ -740,13 +741,15 @@ void make_elf_coredump(ThreadId tid, con
+ VG_(free)(seg_starts);
+ VG_(free)(phdrs);
+ }
+-
++#endif
+ void VG_(make_coredump)(ThreadId tid, const vki_siginfo_t *si, ULong max_size)
+ {
++#if 0
+ make_elf_coredump(tid, si, max_size);
++#endif
+ }
+
+-#endif // defined(VGO_linux)
++#endif // defined(VGO_linux) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_d3basics.c b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_d3basics.c
new file mode 100644
index 0000000000..c8917e52dd
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_d3basics.c
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- coregrind/m_debuginfo/d3basics.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_debuginfo/d3basics.c
+@@ -405,7 +405,7 @@ static Bool get_Dwarf_Reg( /*OUT*/Addr*
+ if (regno == 5/*EBP*/) { *a = regs->fp; return True; }
+ if (regno == 4/*ESP*/) { *a = regs->sp; return True; }
+ # elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin) \
+- || defined(VGP_amd64_solaris)
++ || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+ if (regno == 6/*RBP*/) { *a = regs->fp; return True; }
+ if (regno == 7/*RSP*/) { *a = regs->sp; return True; }
+ # elif defined(VGP_ppc32_linux)
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_debuginfo.c b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_debuginfo.c
new file mode 100644
index 0000000000..50e4e34732
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_debuginfo.c
@@ -0,0 +1,58 @@
+$NetBSD$
+
+--- coregrind/m_debuginfo/debuginfo.c.orig 2018-09-30 04:41:00.000000000 +0000
++++ coregrind/m_debuginfo/debuginfo.c
+@@ -59,7 +59,7 @@
+ #include "priv_tytypes.h"
+ #include "priv_storage.h"
+ #include "priv_readdwarf.h"
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ # include "priv_readelf.h"
+ # include "priv_readdwarf3.h"
+ # include "priv_readpdb.h"
+@@ -816,7 +816,7 @@ void VG_(di_initialise) ( void )
+ /*--- ---*/
+ /*--------------------------------------------------------------*/
+
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /* Helper (indirect) for di_notify_ACHIEVE_ACCEPT_STATE */
+ static Bool overlaps_DebugInfoMappings ( const DebugInfoMapping* map1,
+@@ -967,7 +967,7 @@ static ULong di_notify_ACHIEVE_ACCEPT_ST
+ truncate_DebugInfoMapping_overlaps( di, di->fsm.maps );
+
+ /* And acquire new info. */
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ ok = ML_(read_elf_debug_info)( di );
+ # elif defined(VGO_darwin)
+ ok = ML_(read_macho_debug_info)( di );
+@@ -1249,7 +1249,7 @@ ULong VG_(di_notify_mmap)( Addr a, Bool
+ vg_assert(sr_Res(preadres) > 0 && sr_Res(preadres) <= sizeof(buf1k) );
+
+ /* We're only interested in mappings of object files. */
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ if (!ML_(is_elf_object_file)( buf1k, (SizeT)sr_Res(preadres), False ))
+ return 0;
+ # elif defined(VGO_darwin)
+@@ -1698,7 +1698,7 @@ void VG_(di_notify_pdb_debuginfo)( Int f
+ if (pdbname) ML_(dinfo_free)(pdbname);
+ }
+
+-#endif /* defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) */
++#endif /* defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd) */
+
+
+ /*------------------------------------------------------------*/
+@@ -2295,6 +2295,8 @@ Vg_FnNameKind VG_(get_fnname_kind) ( con
+ # elif defined(VGO_darwin)
+ // See readmacho.c for an explanation of this.
+ VG_STREQ("start_according_to_valgrind", name) || // Darwin, darling
++# elif defined(VGO_netbsd)
++ VG_STREQ("___start", name) || // main() is called directly from ___start
+ # elif defined(VGO_solaris)
+ VG_STREQ("_start", name) || // main() is called directly from _start
+ # else
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_priv__readpdb.h b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_priv__readpdb.h
new file mode 100644
index 0000000000..fc450a8323
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_priv__readpdb.h
@@ -0,0 +1,22 @@
+$NetBSD$
+
+--- coregrind/m_debuginfo/priv_readpdb.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_debuginfo/priv_readpdb.h
+@@ -32,7 +32,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ #ifndef __PRIV_READPDB_H
+ #define __PRIV_READPDB_H
+@@ -59,7 +59,7 @@ HChar* ML_(find_name_of_pdb_file)( const
+
+ #endif /* ndef __PRIV_READPDB_H */
+
+-#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readdwarf.c b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readdwarf.c
new file mode 100644
index 0000000000..0acf3604e1
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readdwarf.c
@@ -0,0 +1,31 @@
+$NetBSD$
+
+--- coregrind/m_debuginfo/readdwarf.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_debuginfo/readdwarf.c
+@@ -29,7 +29,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ #include "pub_core_basics.h"
+ #include "pub_core_debuginfo.h"
+@@ -1692,7 +1692,7 @@ void ML_(read_debuginfo_dwarf1) (
+ # define FP_REG 5
+ # define SP_REG 4
+ # define RA_REG_DEFAULT 8
+-#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris)
++#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+ # define FP_REG 6
+ # define SP_REG 7
+ # define RA_REG_DEFAULT 16
+@@ -4145,7 +4145,7 @@ void ML_(read_callframe_info_dwarf3)
+ return;
+ }
+
+-#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readdwarf3.c b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readdwarf3.c
new file mode 100644
index 0000000000..a224b327f3
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readdwarf3.c
@@ -0,0 +1,22 @@
+$NetBSD$
+
+--- coregrind/m_debuginfo/readdwarf3.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_debuginfo/readdwarf3.c
+@@ -35,7 +35,7 @@
+ without prior written permission.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /* REFERENCE (without which this code will not make much sense):
+
+@@ -5303,7 +5303,7 @@ ML_(new_dwarf3_reader) (
+ TRACE_SYMTAB("\n");
+ #endif
+
+-#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readelf.c b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readelf.c
new file mode 100644
index 0000000000..db691b5c74
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readelf.c
@@ -0,0 +1,41 @@
+$NetBSD$
+
+--- coregrind/m_debuginfo/readelf.c.orig 2018-07-20 10:43:38.000000000 +0000
++++ coregrind/m_debuginfo/readelf.c
+@@ -29,7 +29,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ #include "pub_core_basics.h"
+ #include "pub_core_vki.h"
+@@ -1554,7 +1554,7 @@ static HChar* readlink_path (const HChar
+ #if defined(VGP_arm64_linux)
+ res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
+ (UWord)path, (UWord)buf, bufsiz);
+-#elif defined(VGO_linux) || defined(VGO_darwin)
++#elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+ #elif defined(VGO_solaris)
+ res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
+@@ -2393,7 +2393,8 @@ Bool ML_(read_elf_debug_info) ( struct _
+ || defined(VGP_arm_linux) || defined (VGP_s390x_linux) \
+ || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_arm64_linux) \
+- || defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
++ || defined(VGP_x86_solaris) || defined(VGP_amd64_solaris) \
++ || defined(VGP_amd64_netbsd)
+ /* Accept .plt where mapped as rx (code) */
+ if (0 == VG_(strcmp)(name, ".plt")) {
+ if (inrx && !di->plt_present) {
+@@ -3333,7 +3334,7 @@ Bool ML_(read_elf_debug_info) ( struct _
+ /* NOTREACHED */
+ }
+
+-#endif // defined(VGO_linux) || defined(VGO_solaris)
++#endif // defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readpdb.c b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readpdb.c
new file mode 100644
index 0000000000..331f41b53d
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_readpdb.c
@@ -0,0 +1,22 @@
+$NetBSD$
+
+--- coregrind/m_debuginfo/readpdb.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_debuginfo/readpdb.c
+@@ -35,7 +35,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ #include "pub_core_basics.h"
+ #include "pub_core_debuginfo.h"
+@@ -2606,7 +2606,7 @@ HChar* ML_(find_name_of_pdb_file)( const
+ return res;
+ }
+
+-#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_storage.c b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_storage.c
new file mode 100644
index 0000000000..a369ccb7fc
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuginfo_storage.c
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- coregrind/m_debuginfo/storage.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_debuginfo/storage.c
+@@ -1469,7 +1469,7 @@ Bool preferName ( const DebugInfo* di,
+ vlena = VG_(strlen)(a_name);
+ vlenb = VG_(strlen)(b_name);
+
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ # define VERSION_CHAR '@'
+ # elif defined(VGO_darwin)
+ # define VERSION_CHAR '$'
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__debuglog.c b/valgrind-netbsd/patches/patch-coregrind_m__debuglog.c
new file mode 100644
index 0000000000..133c4ad7b4
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__debuglog.c
@@ -0,0 +1,54 @@
+$NetBSD$
+
+--- coregrind/m_debuglog.c.orig 2018-09-03 05:28:32.000000000 +0000
++++ coregrind/m_debuglog.c
+@@ -566,6 +566,49 @@ static UInt local_sys_getpid ( void )
+ return res;
+ }
+
++#elif defined(VGP_amd64_netbsd)
++__attribute__((noinline))
++static UInt local_sys_write_stderr ( HChar* buf, Int n )
++{
++ volatile Long block[2];
++ block[0] = (Long)buf;
++ block[1] = n;
++ __asm__ volatile (
++ "subq $256, %%rsp\n" /* don't trash the stack redzone */
++ "pushq %%r15\n" /* r15 is callee-save */
++ "movq %0, %%r15\n" /* r15 = &block */
++ "pushq %%r15\n" /* save &block */
++ "movq $"VG_STRINGIFY(__NR_write)", %%rax\n" /* rax = __NR_write */
++ "movq $2, %%rdi\n" /* rdi = stderr */
++ "movq 0(%%r15), %%rsi\n" /* rsi = buf */
++ "movq 8(%%r15), %%rdx\n" /* rdx = n */
++ "syscall\n" /* write(stderr, buf, n) */
++ "popq %%r15\n" /* reestablish &block */
++ "movq %%rax, 0(%%r15)\n" /* block[0] = result */
++ "popq %%r15\n" /* restore r15 */
++ "addq $256, %%rsp\n" /* restore stack ptr */
++ : /*wr*/
++ : /*rd*/ "g" (block)
++ : /*trash*/ "rax", "rdi", "rsi", "rdx", "memory", "cc"
++ );
++ if (block[0] < 0)
++ block[0] = -1;
++ return (UInt)block[0];
++}
++
++static UInt local_sys_getpid ( void )
++{
++ UInt __res;
++ __asm__ volatile (
++ "movq $20, %%rax\n" /* set %rax = __NR_getpid */
++ "syscall\n" /* getpid() */
++ "movl %%eax, %0\n" /* set __res = %eax */
++ : "=mr" (__res)
++ :
++ : "rax" );
++ return __res;
++}
++
+ #else
+ # error Unknown platform
+ #endif
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__dispatch_dispatch-amd64-netbsd.S b/valgrind-netbsd/patches/patch-coregrind_m__dispatch_dispatch-amd64-netbsd.S
new file mode 100644
index 0000000000..201ffcb494
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__dispatch_dispatch-amd64-netbsd.S
@@ -0,0 +1,261 @@
+$NetBSD$
+
+--- coregrind/m_dispatch/dispatch-amd64-netbsd.S.orig 2019-03-30 13:41:46.939848076 +0000
++++ coregrind/m_dispatch/dispatch-amd64-netbsd.S
+@@ -0,0 +1,256 @@
++
++/*--------------------------------------------------------------------*/
++/*--- The core dispatch loop, for jumping to a code address. ---*/
++/*--- dispatch-amd64-linux.S ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2012 Julian Seward
++ jseward%acm.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#if defined(VGP_amd64_netbsd)
++
++#include "pub_core_basics_asm.h"
++#include "pub_core_dispatch_asm.h"
++#include "pub_core_transtab_asm.h"
++#include "libvex_guest_offsets.h" /* for OFFSET_amd64_RIP */
++
++
++/*------------------------------------------------------------*/
++/*--- ---*/
++/*--- The dispatch loop. VG_(disp_run_translations) is ---*/
++/*--- used to run all translations, ---*/
++/*--- including no-redir ones. ---*/
++/*--- ---*/
++/*------------------------------------------------------------*/
++
++/*----------------------------------------------------*/
++/*--- Entry and preamble (set everything up) ---*/
++/*----------------------------------------------------*/
++
++/* signature:
++void VG_(disp_run_translations)( UWord* two_words,
++ void* guest_state,
++ Addr host_addr );
++*/
++.text
++.globl VG_(disp_run_translations)
++.type VG_(disp_run_translations), @function
++VG_(disp_run_translations):
++ /* %rdi holds two_words */
++ /* %rsi holds guest_state */
++ /* %rdx holds host_addr */
++
++ /* The preamble */
++
++ /* Save integer registers, since this is a pseudo-function. */
++ pushq %rax
++ pushq %rbx
++ pushq %rcx
++ pushq %rdx
++ pushq %rsi
++ pushq %rbp
++ pushq %r8
++ pushq %r9
++ pushq %r10
++ pushq %r11
++ pushq %r12
++ pushq %r13
++ pushq %r14
++ pushq %r15
++ /* %rdi must be saved last */
++ pushq %rdi
++
++ /* Get the host CPU in the state expected by generated code. */
++
++ /* set host FPU control word to the default mode expected
++ by VEX-generated code. See comments in libvex.h for
++ more info. */
++ finit
++ pushq $0x027F
++ fldcw (%rsp)
++ addq $8, %rsp
++
++ /* set host SSE control word to the default mode expected
++ by VEX-generated code. */
++ pushq $0x1F80
++ ldmxcsr (%rsp)
++ addq $8, %rsp
++
++ /* set dir flag to known value */
++ cld
++
++ /* Set up the guest state pointer */
++ movq %rsi, %rbp
++
++ /* and jump into the code cache. Chained translations in
++ the code cache run, until for whatever reason, they can't
++ continue. When that happens, the translation in question
++ will jump (or call) to one of the continuation points
++ VG_(cp_...) below. */
++ jmpq *%rdx
++ /*NOTREACHED*/
++
++/*----------------------------------------------------*/
++/*--- Postamble and exit. ---*/
++/*----------------------------------------------------*/
++
++postamble:
++ /* At this point, %rax and %rdx contain two
++ words to be returned to the caller. %rax
++ holds a TRC value, and %rdx optionally may
++ hold another word (for CHAIN_ME exits, the
++ address of the place to patch.) */
++
++ /* We're leaving. Check that nobody messed with %mxcsr
++ or %fpucw. We can't mess with %rax or %rdx here as they
++ hold the tentative return values, but any others are OK. */
++#if !defined(ENABLE_INNER)
++ /* This check fails for self-hosting, so skip in that case */
++ pushq $0
++ fstcw (%rsp)
++ cmpl $0x027F, (%rsp)
++ popq %r15 /* get rid of the word without trashing %rflags */
++ jnz invariant_violation
++#endif
++ pushq $0
++ stmxcsr (%rsp)
++ andl $0xFFFFFFC0, (%rsp) /* mask out status flags */
++ cmpl $0x1F80, (%rsp)
++ popq %r15
++ jnz invariant_violation
++ /* otherwise we're OK */
++ jmp remove_frame
++invariant_violation:
++ movq $VG_TRC_INVARIANT_FAILED, %rax
++ movq $0, %rdx
++
++remove_frame:
++ /* Pop %rdi, stash return values */
++ popq %rdi
++ movq %rax, 0(%rdi)
++ movq %rdx, 8(%rdi)
++ /* Now pop everything else */
++ popq %r15
++ popq %r14
++ popq %r13
++ popq %r12
++ popq %r11
++ popq %r10
++ popq %r9
++ popq %r8
++ popq %rbp
++ popq %rsi
++ popq %rdx
++ popq %rcx
++ popq %rbx
++ popq %rax
++ ret
++
++/*----------------------------------------------------*/
++/*--- Continuation points ---*/
++/*----------------------------------------------------*/
++
++/* ------ Chain me to slow entry point ------ */
++.global VG_(disp_cp_chain_me_to_slowEP)
++VG_(disp_cp_chain_me_to_slowEP):
++ /* We got called. The return address indicates
++ where the patching needs to happen. Collect
++ the return address and, exit back to C land,
++ handing the caller the pair (Chain_me_S, RA) */
++ movq $VG_TRC_CHAIN_ME_TO_SLOW_EP, %rax
++ popq %rdx
++ /* 10 = movabsq $VG_(disp_chain_me_to_slowEP), %r11;
++ 3 = call *%r11 */
++ subq $10+3, %rdx
++ jmp postamble
++
++/* ------ Chain me to fast entry point ------ */
++.global VG_(disp_cp_chain_me_to_fastEP)
++VG_(disp_cp_chain_me_to_fastEP):
++ /* We got called. The return address indicates
++ where the patching needs to happen. Collect
++ the return address and, exit back to C land,
++ handing the caller the pair (Chain_me_F, RA) */
++ movq $VG_TRC_CHAIN_ME_TO_FAST_EP, %rax
++ popq %rdx
++ /* 10 = movabsq $VG_(disp_chain_me_to_fastEP), %r11;
++ 3 = call *%r11 */
++ subq $10+3, %rdx
++ jmp postamble
++
++/* ------ Indirect but boring jump ------ */
++.global VG_(disp_cp_xindir)
++VG_(disp_cp_xindir):
++ /* Where are we going? */
++ movq OFFSET_amd64_RIP(%rbp), %rax
++
++ /* stats only */
++// addl $1, VG_(stats__n_xindirs_32)
++
++ /* try a fast lookup in the translation cache */
++ movabsq $VG_(tt_fast), %rcx
++ movq %rax, %rbx /* next guest addr */
++ andq $VG_TT_FAST_MASK, %rbx /* entry# */
++ shlq $4, %rbx /* entry# * sizeof(FastCacheEntry) */
++ movq 0(%rcx,%rbx,1), %r10 /* .guest */
++ movq 8(%rcx,%rbx,1), %r11 /* .host */
++ cmpq %rax, %r10
++ jnz fast_lookup_failed
++
++ /* Found a match. Jump to .host. */
++ jmp *%r11
++ ud2 /* persuade insn decoders not to speculate past here */
++
++fast_lookup_failed:
++ /* stats only */
++// addl $1, VG_(stats__n_xindir_misses_32)
++
++ movq $VG_TRC_INNER_FASTMISS, %rax
++ movq $0, %rdx
++ jmp postamble
++
++/* ------ Assisted jump ------ */
++.global VG_(disp_cp_xassisted)
++VG_(disp_cp_xassisted):
++ /* %rbp contains the TRC */
++ movq %rbp, %rax
++ movq $0, %rdx
++ jmp postamble
++
++/* ------ Event check failed ------ */
++.global VG_(disp_cp_evcheck_fail)
++VG_(disp_cp_evcheck_fail):
++ movq $VG_TRC_INNER_COUNTERZERO, %rax
++ movq $0, %rdx
++ jmp postamble
++
++
++.size VG_(disp_run_translations), .-VG_(disp_run_translations)
++
++#endif // defined(VGP_amd64_netbsd)
++
++/*--------------------------------------------------------------------*/
++/*--- end ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__errormgr.c b/valgrind-netbsd/patches/patch-coregrind_m__errormgr.c
new file mode 100644
index 0000000000..391522c5f2
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__errormgr.c
@@ -0,0 +1,69 @@
+$NetBSD$
+
+--- coregrind/m_errormgr.c.orig 2018-08-17 15:58:00.000000000 +0000
++++ coregrind/m_errormgr.c
+@@ -584,6 +584,7 @@ static void pp_Error ( const Error* err,
+ {
+ /* If this fails, you probably specified your tool's method
+ dictionary incorrectly. */
++ VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ vg_assert(VG_(needs).tool_errors);
+
+ if (xml) {
+@@ -642,6 +643,7 @@ static void pp_Error ( const Error* err,
+ }
+
+ do_actions_on_error(err, allow_db_attach);
++ VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+
+@@ -685,6 +687,7 @@ void VG_(maybe_record_error) ( ThreadId
+ ErrorKind ekind, Addr a,
+ const HChar* s, void* extra )
+ {
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Error err;
+ Error* p;
+ Error* p_prev;
+@@ -729,13 +732,16 @@ void VG_(maybe_record_error) ( ThreadId
+ VG_(umsg)("\n");
+ stopping_message = True;
+ }
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return;
+ }
+
+ /* Ignore it if error acquisition is disabled for this thread. */
+ { ThreadState* tst = VG_(get_ThreadState)(tid);
+- if (tst->err_disablement_level > 0)
++ if (tst->err_disablement_level > 0) {
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return;
++ }
+ }
+
+ /* After M_COLLECT_ERRORS_SLOWLY_AFTER different errors have
+@@ -842,18 +848,22 @@ void VG_(maybe_record_error) ( ThreadId
+ p->next = errors;
+ p->supp = is_suppressible_error(&err);
+ errors = p;
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (p->supp == NULL) {
+ /* update stats */
+ n_err_contexts++;
+ n_errs_found++;
+ n_errs_shown++;
+ /* Actually show the error; more complex than you might think. */
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ pp_Error( p, /*allow_db_attach*/True, VG_(clo_xml) );
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ } else {
+ n_supp_contexts++;
+ n_errs_suppressed++;
+ p->supp->count++;
+ }
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ /* Second top-level entry point to the error management subsystem, for
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__initimg_initimg-netbsd.c b/valgrind-netbsd/patches/patch-coregrind_m__initimg_initimg-netbsd.c
new file mode 100644
index 0000000000..83cc0bc3fb
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__initimg_initimg-netbsd.c
@@ -0,0 +1,898 @@
+$NetBSD$
+
+--- coregrind/m_initimg/initimg-netbsd.c.orig 2019-04-01 10:05:56.420527388 +0000
++++ coregrind/m_initimg/initimg-netbsd.c
+@@ -0,0 +1,893 @@
++
++/*--------------------------------------------------------------------*/
++/*--- Startup: create initial process image on Linux ---*/
++/*--- initimg-netbsd.c ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2009 Julian Seward
++ jseward%acm.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#if defined(VGO_netbsd)
++
++#include "pub_core_basics.h"
++#include "pub_core_vki.h"
++#include "pub_core_debuglog.h"
++#include "pub_core_libcbase.h"
++#include "pub_core_libcassert.h"
++#include "pub_core_libcfile.h"
++#include "pub_core_libcproc.h"
++#include "pub_core_libcprint.h"
++#include "pub_core_xarray.h"
++#include "pub_core_clientstate.h"
++#include "pub_core_aspacemgr.h"
++#include "pub_core_mallocfree.h"
++#include "pub_core_machine.h"
++#include "pub_core_ume.h"
++#include "pub_core_options.h"
++#include "pub_core_syscall.h"
++#include "pub_core_tooliface.h" /* VG_TRACK */
++#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
++#include "pub_core_threadstate.h" /* ThreadArchState */
++#include "priv_initimg_pathscan.h"
++#include "pub_core_initimg.h" /* self */
++
++/* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
++#define _GNU_SOURCE
++#define _FILE_OFFSET_BITS 64
++/* This is for ELF types etc, and also the AT_ constants. */
++#include <elf.h>
++/* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
++
++
++/*====================================================================*/
++/*=== Loading the client ===*/
++/*====================================================================*/
++
++/* Load the client whose name is VG_(argv_the_exename). */
++
++static void load_client ( /*OUT*/ExeInfo* info,
++ /*OUT*/Addr* client_ip,
++ /*OUT*/Addr* client_toc)
++{
++ const HChar* exe_name;
++ Int ret;
++ SysRes res;
++
++ vg_assert( VG_(args_the_exename) != NULL);
++ exe_name = ML_(find_executable)( VG_(args_the_exename) );
++
++ if (!exe_name) {
++ VG_(printf)("valgrind: %s: command not found\n", VG_(args_the_exename));
++ VG_(exit)(127); // 127 is Posix NOTFOUND
++ }
++
++ VG_(memset)(info, 0, sizeof(*info));
++ ret = VG_(do_exec)(exe_name, info);
++ if (ret < 0) {
++ VG_(printf)("valgrind: could not execute '%s'\n", exe_name);
++ VG_(exit)(1);
++ }
++
++ // The client was successfully loaded! Continue.
++
++ /* Get hold of a file descriptor which refers to the client
++ executable. This is needed for attaching to GDB. */
++ res = VG_(open)(exe_name, VKI_O_RDONLY, VKI_S_IRUSR);
++ if (!sr_isError(res))
++ VG_(cl_exec_fd) = sr_Res(res);
++
++ /* Copy necessary bits of 'info' that were filled in */
++ *client_ip = info->init_ip;
++ *client_toc = info->init_toc;
++ VG_(brk_base) = VG_(brk_limit) = VG_PGROUNDUP(info->brkbase);
++}
++
++
++/*====================================================================*/
++/*=== Setting up the client's environment ===*/
++/*====================================================================*/
++
++/* Prepare the client's environment. This is basically a copy of our
++ environment, except:
++
++ LD_PRELOAD=$VALGRIND_LIB/vgpreload_core-PLATFORM.so:
++ ($VALGRIND_LIB/vgpreload_TOOL-PLATFORM.so:)?
++ $LD_PRELOAD
++
++ If this is missing, then it is added.
++
++ Also, remove any binding for VALGRIND_LAUNCHER=. The client should
++ not be able to see this.
++
++ If this needs to handle any more variables it should be hacked
++ into something table driven. The copy is VG_(malloc)'d space.
++*/
++static HChar** setup_client_env ( HChar** origenv, const HChar* toolname)
++{
++ const HChar* preload_core = "vgpreload_core";
++ const HChar* ld_preload = "LD_PRELOAD=";
++ const HChar* v_launcher = VALGRIND_LAUNCHER "=";
++ Int ld_preload_len = VG_(strlen)( ld_preload );
++ Int v_launcher_len = VG_(strlen)( v_launcher );
++ Bool ld_preload_done = False;
++#if defined(VGP_x86_netbsd)
++ HChar* ld_32_preload = "LD_32_PRELOAD=";
++ Int ld_32_preload_len = VG_(strlen)( ld_32_preload );
++ Bool ld_32_preload_done = False;
++#endif
++ Int vglib_len = VG_(strlen)(VG_(libdir));
++
++ HChar** cpp;
++ HChar** ret;
++ HChar* preload_tool_path;
++ Int envc, i;
++
++ /* Alloc space for the vgpreload_core.so path and vgpreload_<tool>.so
++ paths. We might not need the space for vgpreload_<tool>.so, but it
++ doesn't hurt to over-allocate briefly. The 16s are just cautious
++ slop. */
++ Int preload_core_path_len = vglib_len + sizeof(preload_core)
++ + sizeof(VG_PLATFORM) + 16;
++ Int preload_tool_path_len = vglib_len + VG_(strlen)(toolname)
++ + sizeof(VG_PLATFORM) + 16;
++ Int preload_string_len = preload_core_path_len + preload_tool_path_len;
++ HChar* preload_string = VG_(malloc)("initimg-netbsd.sce.1",
++ preload_string_len);
++ vg_assert(origenv);
++ vg_assert(toolname);
++ vg_assert(preload_string);
++
++ /* Determine if there's a vgpreload_<tool>_<platform>.so file, and setup
++ preload_string. */
++ preload_tool_path = VG_(malloc)("initimg-netbsd.sce.2", preload_tool_path_len);
++ vg_assert(preload_tool_path);
++ VG_(snprintf)(preload_tool_path, preload_tool_path_len,
++ "%s/vgpreload_%s-%s.so", VG_(libdir), toolname, VG_PLATFORM);
++ if (VG_(access)(preload_tool_path, True/*r*/, False/*w*/, False/*x*/) == 0) {
++ VG_(snprintf)(preload_string, preload_string_len, "%s/%s-%s.so:%s",
++ VG_(libdir), preload_core, VG_PLATFORM, preload_tool_path);
++ } else {
++ VG_(snprintf)(preload_string, preload_string_len, "%s/%s-%s.so",
++ VG_(libdir), preload_core, VG_PLATFORM);
++ }
++ VG_(free)(preload_tool_path);
++
++ VG_(debugLog)(2, "initimg", "preload_string:\n");
++ VG_(debugLog)(2, "initimg", " \"%s\"\n", preload_string);
++
++ /* Count the original size of the env */
++ envc = 0;
++ for (cpp = origenv; cpp && *cpp; cpp++)
++ envc++;
++
++ /* Allocate a new space */
++ ret = VG_(malloc) ("initimg-netbsd.sce.3",
++ sizeof(HChar *) * (envc+2+1)); /* 2 new entry + NULL */
++ vg_assert(ret);
++
++ /* copy it over */
++ for (cpp = ret; *origenv; )
++ *cpp++ = *origenv++;
++ *cpp = NULL;
++ *(cpp + 1) = NULL;
++
++ vg_assert(envc == (cpp - ret));
++
++ /* Walk over the new environment, mashing as we go */
++ for (cpp = ret; cpp && *cpp; cpp++) {
++ if (VG_(memcmp)(*cpp, ld_preload, ld_preload_len) == 0) {
++ Int len = VG_(strlen)(*cpp) + preload_string_len;
++ HChar *cp = VG_(malloc)("initimg-netbsd.sce.4", len);
++ vg_assert(cp);
++
++ VG_(snprintf)(cp, len, "%s%s:%s",
++ ld_preload, preload_string, (*cpp)+ld_preload_len);
++
++ *cpp = cp;
++
++ ld_preload_done = True;
++ }
++ }
++
++ /* Add the missing bits */
++ if (!ld_preload_done) {
++ Int len = ld_preload_len + preload_string_len;
++ HChar *cp = VG_(malloc) ("initimg-netbsd.sce.5", len);
++ vg_assert(cp);
++
++ VG_(snprintf)(cp, len, "%s%s", ld_preload, preload_string);
++
++ ret[envc++] = cp;
++ }
++
++#if defined(VGP_x86_netbsd)
++ /* If we're running a 32 bit binary, ld-elf32.so.1 may be looking for
++ * a different variable name. Or it might be a 32 bit ld-elf.so.1 in a
++ * chroot. Cover both cases. */
++ if (VG_(is32on64)()) {
++ for (cpp = ret; cpp && *cpp; cpp++) {
++ if (VG_(memcmp)(*cpp, ld_32_preload, ld_32_preload_len) == 0) {
++ Int len = VG_(strlen)(*cpp) + preload_string_len;
++ HChar *cp = VG_(malloc)("initimg-netbsd.sce.4a", len);
++ vg_assert(cp);
++
++ VG_(snprintf)(cp, len, "%s%s:%s",
++ ld_32_preload, preload_string, (*cpp)+ld_32_preload_len);
++
++ *cpp = cp;
++
++ ld_32_preload_done = True;
++ }
++ }
++ if (!ld_32_preload_done) {
++ Int len = ld_32_preload_len + preload_string_len;
++ HChar *cp = VG_(malloc) ("initimg-netbsd.sce.5a", len);
++ vg_assert(cp);
++
++ VG_(snprintf)(cp, len, "%s%s", ld_32_preload, preload_string);
++
++ ret[envc++] = cp;
++ }
++ }
++#endif
++
++ /* ret[0 .. envc-1] is live now. */
++ /* Find and remove a binding for VALGRIND_LAUNCHER. */
++ for (i = 0; i < envc; i++)
++ if (0 == VG_(memcmp(ret[i], v_launcher, v_launcher_len)))
++ break;
++
++ if (i < envc) {
++ for (; i < envc-1; i++)
++ ret[i] = ret[i+1];
++ envc--;
++ }
++
++ VG_(free)(preload_string);
++ ret[envc] = NULL;
++
++ return ret;
++}
++
++
++/*====================================================================*/
++/*=== Setting up the client's stack ===*/
++/*====================================================================*/
++
++/* Add a string onto the string table, and return its address */
++static char *copy_str(char **tab, const char *str)
++{
++ char *cp = *tab;
++ char *orig = cp;
++
++ while(*str)
++ *cp++ = *str++;
++ *cp++ = '\0';
++
++ if (0)
++ VG_(printf)("copied %p \"%s\" len %lld\n", orig, orig, (Long)(cp-orig));
++
++ *tab = cp;
++
++ return orig;
++}
++
++
++/* ----------------------------------------------------------------
++
++ This sets up the client's initial stack, containing the args,
++ environment and aux vector.
++
++ The format of the stack is:
++
++ higher address +-----------------+ <- clstack_end
++ | |
++ : string table :
++ | |
++ +-----------------+
++ | AT_NULL |
++ - -
++ | auxv |
++ +-----------------+
++ | NULL |
++ - -
++ | envp |
++ +-----------------+
++ | NULL |
++ - -
++ | argv |
++ +-----------------+
++ | argc |
++ lower address +-----------------+ <- sp
++ | undefined |
++ : :
++
++ Allocate and create the initial client stack. It is allocated down
++ from clstack_end, which was previously determined by the address
++ space manager. The returned value is the SP value for the client.
++
++ The client's auxv is created by copying and modifying our own one.
++ As a side effect of scanning our own auxv, some important bits of
++ info are collected:
++
++ VG_(cache_line_size_ppc32) // ppc32 only -- cache line size
++ VG_(have_altivec_ppc32) // ppc32 only -- is Altivec supported?
++
++ ---------------------------------------------------------------- */
++
++struct auxv
++{
++ Word a_type;
++ union {
++ void *a_ptr;
++ Word a_val;
++ } u;
++};
++
++static
++struct auxv *find_auxv(UWord* sp)
++{
++ sp++; // skip argc (Nb: is word-sized, not int-sized!)
++
++ while (*sp != 0) // skip argv
++ sp++;
++ sp++;
++
++ while (*sp != 0) // skip env
++ sp++;
++ sp++;
++
++#if defined(VGA_ppc32) || defined(VGA_ppc64)
++# if defined AT_IGNOREPPC
++ while (*sp == AT_IGNOREPPC) // skip AT_IGNOREPPC entries
++ sp += 2;
++# endif
++#endif
++
++ return (struct auxv *)sp;
++}
++
++static
++Addr setup_client_stack( void* init_sp,
++ char** orig_envp,
++ const ExeInfo* info,
++ UInt** client_auxv,
++ Addr clstack_end,
++ SizeT clstack_max_size )
++{
++ SysRes res;
++ char **cpp;
++ char *strtab; /* string table */
++ char *stringbase;
++ Addr *ptr;
++ struct auxv *auxv;
++ const struct auxv *orig_auxv;
++ const struct auxv *cauxv;
++ unsigned stringsize; /* total size of strings in bytes */
++ unsigned auxsize; /* total size of auxv in bytes */
++ Int argc; /* total argc */
++ Int envc; /* total number of env vars */
++ unsigned stacksize; /* total client stack size */
++ Addr client_SP; /* client stack base (initial SP) */
++ Addr clstack_start;
++ Int i;
++ Bool have_exename;
++
++ vg_assert(VG_IS_PAGE_ALIGNED(clstack_end+1));
++ vg_assert( VG_(args_for_client) );
++
++ /* use our own auxv as a prototype */
++ orig_auxv = find_auxv(init_sp);
++
++ /* ==================== compute sizes ==================== */
++
++ /* first of all, work out how big the client stack will be */
++ stringsize = 0;
++ have_exename = VG_(args_the_exename) != NULL;
++
++ /* paste on the extra args if the loader needs them (ie, the #!
++ interpreter and its argument) */
++ argc = 0;
++ if (info->interp_name != NULL) {
++ argc++;
++ stringsize += VG_(strlen)(info->interp_name) + 1;
++ }
++ if (info->interp_args != NULL) {
++ argc++;
++ stringsize += VG_(strlen)(info->interp_args) + 1;
++ }
++
++ /* now scan the args we're given... */
++ if (have_exename)
++ stringsize += VG_(strlen)( VG_(args_the_exename) ) + 1;
++
++ for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
++ argc++;
++ stringsize += VG_(strlen)( * (HChar**)
++ VG_(indexXA)( VG_(args_for_client), i ))
++ + 1;
++ }
++
++ /* ...and the environment */
++ envc = 0;
++ for (cpp = orig_envp; cpp && *cpp; cpp++) {
++ envc++;
++ stringsize += VG_(strlen)(*cpp) + 1;
++ }
++
++ /* now, how big is the auxv? */
++ auxsize = sizeof(*auxv); /* there's always at least one entry: AT_NULL */
++ for (cauxv = orig_auxv; cauxv->a_type != AT_NULL; cauxv++) {
++ auxsize += sizeof(*cauxv);
++ }
++
++ /* OK, now we know how big the client stack is */
++ stacksize =
++ sizeof(Word) + /* argc */
++ (have_exename ? sizeof(char **) : 0) + /* argc[0] == exename */
++ sizeof(char **)*argc + /* argv */
++ sizeof(char **) + /* terminal NULL */
++ sizeof(char **)*envc + /* envp */
++ sizeof(char **) + /* terminal NULL */
++ auxsize + /* auxv */
++ VG_ROUNDUP(stringsize, sizeof(Word)); /* strings (aligned) */
++
++ if (0) VG_(printf)("stacksize = %d\n", stacksize);
++
++ /* client_SP is the client's stack pointer */
++ client_SP = clstack_end - stacksize;
++ client_SP = VG_ROUNDDN(client_SP, 16); /* make stack 16 byte aligned */
++
++ /* base of the string table (aligned) */
++ stringbase = strtab = (char *)clstack_end
++ - VG_ROUNDUP(stringsize, sizeof(int));
++
++ clstack_start = VG_PGROUNDDN(client_SP);
++
++ /* The max stack size */
++ clstack_max_size = VG_PGROUNDUP(clstack_max_size);
++
++ /* Record stack extent -- needed for stack-change code. */
++ VG_(clstk_start_base) = clstack_start;
++ VG_(clstk_end) = clstack_end;
++
++ if (0)
++ VG_(printf)("stringsize=%d auxsize=%d stacksize=%d maxsize=0x%x\n"
++ "clstack_start %p\n"
++ "clstack_end %p\n",
++ stringsize, auxsize, stacksize, (Int)clstack_max_size,
++ (void*)clstack_start, (void*)clstack_end);
++
++ /* ==================== allocate space ==================== */
++
++ { SizeT anon_size = clstack_end - clstack_start + 1;
++ SizeT resvn_size = clstack_max_size - anon_size;
++ Addr anon_start = clstack_start;
++ Addr resvn_start = anon_start - resvn_size;
++ SizeT inner_HACK = 0;
++ Bool ok;
++
++ /* So far we've only accounted for space requirements down to the
++ stack pointer. If this target's ABI requires a redzone below
++ the stack pointer, we need to allocate an extra page, to
++ handle the worst case in which the stack pointer is almost at
++ the bottom of a page, and so there is insufficient room left
++ over to put the redzone in. In this case the simple thing to
++ do is allocate an extra page, by shrinking the reservation by
++ one page and growing the anonymous area by a corresponding
++ page. */
++ vg_assert(VG_STACK_REDZONE_SZB >= 0);
++ vg_assert(VG_STACK_REDZONE_SZB < VKI_PAGE_SIZE);
++ if (VG_STACK_REDZONE_SZB > 0) {
++ vg_assert(resvn_size > VKI_PAGE_SIZE);
++ resvn_size -= VKI_PAGE_SIZE;
++ anon_start -= VKI_PAGE_SIZE;
++ anon_size += VKI_PAGE_SIZE;
++ }
++
++ vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
++ vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
++ vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
++ vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
++ vg_assert(resvn_start == clstack_end + 1 - clstack_max_size);
++
++# ifdef ENABLE_INNER
++ inner_HACK = 1024*1024; // create 1M non-fault-extending stack
++# endif
++
++ if (0)
++ VG_(printf)("%#lx 0x%lx %#lx 0x%lx\n",
++ resvn_start, resvn_size, anon_start, anon_size);
++
++ /* Create a shrinkable reservation followed by an anonymous
++ segment. Together these constitute a growdown stack. */
++ res = VG_(mk_SysRes_Error)(0);
++ ok = VG_(am_create_reservation)(
++ resvn_start,
++ resvn_size -inner_HACK,
++ SmUpper,
++ anon_size +inner_HACK
++ );
++ if (ok) {
++ /* allocate a stack - mmap enough space for the stack */
++ res = VG_(am_mmap_anon_fixed_client)(
++ anon_start -inner_HACK,
++ anon_size +inner_HACK,
++ VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC
++ );
++ }
++ if ((!ok) || sr_isError(res)) {
++ /* Allocation of the stack failed. We have to stop. */
++ VG_(printf)("valgrind: "
++ "I failed to allocate space for the application's stack.\n");
++ VG_(printf)("valgrind: "
++ "This may be the result of a very large --main-stacksize=\n");
++ VG_(printf)("valgrind: setting. Cannot continue. Sorry.\n\n");
++ VG_(exit)(0);
++ }
++
++ vg_assert(ok);
++ vg_assert(!sr_isError(res));
++ }
++
++ /* ==================== create client stack ==================== */
++
++ ptr = (Addr*)client_SP;
++
++ /* --- client argc --- */
++ *ptr++ = argc + (have_exename ? 1 : 0);
++
++ /* --- client argv --- */
++ if (info->interp_name) {
++ *ptr++ = (Addr)copy_str(&strtab, info->interp_name);
++ VG_(free)(info->interp_name);
++ }
++ if (info->interp_args) {
++ *ptr++ = (Addr)copy_str(&strtab, info->interp_args);
++ VG_(free)(info->interp_args);
++ }
++
++ if (have_exename)
++ *ptr++ = (Addr)copy_str(&strtab, VG_(args_the_exename));
++
++ for (i = 0; i < VG_(sizeXA)( VG_(args_for_client) ); i++) {
++ *ptr++ = (Addr)copy_str(
++ &strtab,
++ * (HChar**) VG_(indexXA)( VG_(args_for_client), i )
++ );
++ }
++ *ptr++ = 0;
++
++ /* --- envp --- */
++ VG_(client_envp) = (HChar **)ptr;
++ for (cpp = orig_envp; cpp && *cpp; ptr++, cpp++)
++ *ptr = (Addr)copy_str(&strtab, *cpp);
++ *ptr++ = 0;
++
++ /* --- auxv --- */
++ auxv = (struct auxv *)ptr;
++ *client_auxv = (UInt *)auxv;
++
++ for (; orig_auxv->a_type != AT_NULL; auxv++, orig_auxv++) {
++
++ /* copy the entry... */
++ *auxv = *orig_auxv;
++
++ /* ...and fix up / examine the copy */
++ switch(auxv->a_type) {
++
++ case VKI_AT_IGNORE:
++ case VKI_AT_PHENT:
++ case VKI_AT_PAGESZ:
++ case VKI_AT_FLAGS:
++ case VKI_AT_EUID:
++ case VKI_AT_EGID:
++ /* All these are pointerless, so we don't need to do
++ anything about them. */
++ break;
++
++ case VKI_AT_PHDR:
++ if (info->phdr == 0)
++ auxv->a_type = VKI_AT_IGNORE;
++ else
++ auxv->u.a_val = info->phdr;
++ break;
++
++ case VKI_AT_PHNUM:
++ if (info->phdr == 0)
++ auxv->a_type = VKI_AT_IGNORE;
++ else
++ auxv->u.a_val = info->phnum;
++ break;
++
++ case VKI_AT_BASE:
++ auxv->u.a_val = info->interp_offset;
++ break;
++
++ case VKI_AT_ENTRY:
++ auxv->u.a_val = info->entry;
++ break;
++
++ default:
++ /* stomp out anything we don't know about */
++ VG_(debugLog)(2, "initimg",
++ "stomping auxv entry %lld\n",
++ (ULong)auxv->a_type);
++ auxv->a_type = AT_IGNORE;
++ break;
++ }
++ }
++ *auxv = *orig_auxv;
++ vg_assert(auxv->a_type == AT_NULL);
++
++ vg_assert((strtab-stringbase) == stringsize);
++
++ /* client_SP is pointing at client's argc/argv */
++
++ if (0) VG_(printf)("startup SP = %#lx\n", client_SP);
++ return client_SP;
++}
++
++
++/* Allocate the client data segment. It is an expandable anonymous
++ mapping abutting a shrinkable reservation of size max_dseg_size.
++ The data segment starts at VG_(brk_base), which is page-aligned,
++ and runs up to VG_(brk_limit), which isn't. */
++
++static void setup_client_dataseg ( SizeT max_size )
++{
++ Bool ok;
++ SysRes sres;
++ Addr anon_start = VG_(brk_base);
++ SizeT anon_size = VKI_PAGE_SIZE;
++ Addr resvn_start = anon_start + anon_size;
++ SizeT resvn_size = max_size - anon_size;
++
++ vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
++ vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
++ vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
++ vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
++
++ /* Because there's been no brk activity yet: */
++ vg_assert(VG_(brk_base) == VG_(brk_limit));
++
++ /* Try to create the data seg and associated reservation where
++ VG_(brk_base) says. */
++ ok = VG_(am_create_reservation)(
++ resvn_start,
++ resvn_size,
++ SmLower,
++ anon_size
++ );
++
++ if (!ok) {
++ /* Hmm, that didn't work. Well, let aspacem suggest an address
++ it likes better, and try again with that. */
++ anon_start = VG_(am_get_advisory_client_simple)
++ ( 0/*floating*/, anon_size+resvn_size, &ok );
++ if (ok) {
++ resvn_start = anon_start + anon_size;
++ ok = VG_(am_create_reservation)(
++ resvn_start,
++ resvn_size,
++ SmLower,
++ anon_size
++ );
++ if (ok)
++ VG_(brk_base) = VG_(brk_limit) = anon_start;
++ }
++ /* that too might have failed, but if it has, we're hosed: there
++ is no Plan C. */
++ }
++ vg_assert(ok);
++
++ /* We make the data segment (heap) executable because LinuxThreads on
++ ppc32 creates trampolines in this area. Also, on x86/Linux the data
++ segment is RWX natively, at least according to /proc/self/maps.
++ Also, having a non-executable data seg would kill any program which
++ tried to create code in the data seg and then run it. */
++ sres = VG_(am_mmap_anon_fixed_client)(
++ anon_start,
++ anon_size,
++ VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC
++ );
++ vg_assert(!sr_isError(sres));
++ vg_assert(sr_Res(sres) == anon_start);
++}
++
++
++/*====================================================================*/
++/*=== TOP-LEVEL: VG_(setup_client_initial_image) ===*/
++/*====================================================================*/
++
++/* Create the client's initial memory image. */
++IIFinaliseImageInfo VG_(ii_create_image)( IICreateImageInfo iicii,
++ const VexArchInfo* vex_archinfo )
++{
++ ExeInfo info;
++ HChar** env = NULL;
++
++ IIFinaliseImageInfo iifii;
++ VG_(memset)( &iifii, 0, sizeof(iifii) );
++
++ //--------------------------------------------------------------
++ // Load client executable, finding in $PATH if necessary
++ // p: get_helprequest_and_toolname() [for 'exec', 'need_help']
++ // p: layout_remaining_space [so there's space]
++ //--------------------------------------------------------------
++ VG_(debugLog)(1, "initimg", "Loading client\n");
++
++ if (VG_(args_the_exename) == NULL)
++ VG_(err_missing_prog)();
++
++ load_client(&info, &iifii.initial_client_IP, &iifii.initial_client_TOC);
++
++ //--------------------------------------------------------------
++ // Set up client's environment
++ // p: set-libdir [for VG_(libdir)]
++ // p: get_helprequest_and_toolname [for toolname]
++ //--------------------------------------------------------------
++ VG_(debugLog)(1, "initimg", "Setup client env\n");
++ env = setup_client_env(iicii.envp, iicii.toolname);
++
++ //--------------------------------------------------------------
++ // Setup client stack, eip, and VG_(client_arg[cv])
++ // p: load_client() [for 'info']
++ // p: fix_environment() [for 'env']
++ //--------------------------------------------------------------
++ {
++ /* When allocating space for the client stack on Linux, take
++ notice of the --main-stacksize value. This makes it possible
++ to run programs with very large (primary) stack requirements
++ simply by specifying --main-stacksize. */
++ /* Logic is as follows:
++ - by default, use the client's current stack rlimit
++ - if that exceeds 16M, clamp to 16M
++ - if a larger --main-stacksize value is specified, use that instead
++ - in all situations, the minimum allowed stack size is 1M
++ */
++ void* init_sp = iicii.argv - 1;
++ SizeT m1 = 1024 * 1024;
++ SizeT m16 = 16 * m1;
++ SizeT szB = (SizeT)VG_(client_rlimit_stack).rlim_cur;
++ if (szB < m1) szB = m1;
++ if (szB > m16) szB = m16;
++ if (VG_(clo_main_stacksize) > 0) szB = VG_(clo_main_stacksize);
++ if (szB < m1) szB = m1;
++ szB = VG_PGROUNDUP(szB);
++ VG_(debugLog)(1, "initimg",
++ "Setup client stack: size will be %ld\n", szB);
++
++ iifii.clstack_max_size = szB;
++
++ iifii.initial_client_SP
++ = setup_client_stack( init_sp, env,
++ &info, &iifii.client_auxv,
++ iicii.clstack_end, iifii.clstack_max_size );
++
++ VG_(free)(env);
++
++ VG_(debugLog)(2, "initimg",
++ "Client info: "
++ "initial_IP=%p initial_TOC=%p brk_base=%p\n",
++ (void*)(iifii.initial_client_IP),
++ (void*)(iifii.initial_client_TOC),
++ (void*)VG_(brk_base) );
++ VG_(debugLog)(2, "initimg",
++ "Client info: "
++ "initial_SP=%p max_stack_size=%ld\n",
++ (void*)(iifii.initial_client_SP),
++ (SizeT)iifii.clstack_max_size );
++ }
++
++ //--------------------------------------------------------------
++ // Setup client data (brk) segment. Initially a 1-page segment
++ // which abuts a shrinkable reservation.
++ // p: load_client() [for 'info' and hence VG_(brk_base)]
++ //--------------------------------------------------------------
++ {
++ SizeT m1 = 1024 * 1024;
++ SizeT m8 = 8 * m1;
++ SizeT dseg_max_size = (SizeT)VG_(client_rlimit_data).rlim_cur;
++ VG_(debugLog)(1, "initimg", "Setup client data (brk) segment\n");
++ if (dseg_max_size < m1) dseg_max_size = m1;
++ if (dseg_max_size > m8) dseg_max_size = m8;
++ dseg_max_size = VG_PGROUNDUP(dseg_max_size);
++
++ setup_client_dataseg( dseg_max_size );
++ }
++
++ return iifii;
++}
++
++
++/*====================================================================*/
++/*=== TOP-LEVEL: VG_(finalise_thread1state) ===*/
++/*====================================================================*/
++
++/* Just before starting the client, we may need to make final
++ adjustments to its initial image. Also we need to set up the VEX
++ guest state for thread 1 (the root thread) and copy in essential
++ starting values. This is handed the IIFinaliseImageInfo created by
++ VG_(ii_create_image).
++*/
++void VG_(ii_finalise_image)( IIFinaliseImageInfo iifii )
++{
++ ThreadArchState* arch = &VG_(threads)[1].arch;
++
++ /* On Linux we get client_{ip/sp/toc}, and start the client with
++ all other registers zeroed. */
++
++# if defined(VGP_x86_netbsd)
++ vg_assert(0 == sizeof(VexGuestX86State) % 16);
++
++ /* Zero out the initial state, and set up the simulated FPU in a
++ sane way. */
++ LibVEX_GuestX86_initialise(&arch->vex);
++
++ /* Zero out the shadow areas. */
++ VG_(memset)(&arch->vex_shadow1, 0, sizeof(VexGuestX86State));
++ VG_(memset)(&arch->vex_shadow2, 0, sizeof(VexGuestX86State));
++
++ /* Put essential stuff into the new state. */
++ arch->vex.guest_ESP = iifii.initial_client_SP;
++ arch->vex.guest_EIP = iifii.initial_client_IP;
++
++ /* initialise %cs, %ds and %ss to point at the operating systems
++ default code, data and stack segments */
++ asm volatile("movw %%cs, %0" : : "m" (arch->vex.guest_CS));
++ asm volatile("movw %%ds, %0" : : "m" (arch->vex.guest_DS));
++ asm volatile("movw %%ss, %0" : : "m" (arch->vex.guest_SS));
++
++# elif defined(VGP_amd64_netbsd)
++ vg_assert(0 == sizeof(VexGuestAMD64State) % 16);
++
++ /* Zero out the initial state, and set up the simulated FPU in a
++ sane way. */
++ LibVEX_GuestAMD64_initialise(&arch->vex);
++
++ /* Zero out the shadow areas. */
++ VG_(memset)(&arch->vex_shadow1, 0, sizeof(VexGuestAMD64State));
++ VG_(memset)(&arch->vex_shadow2, 0, sizeof(VexGuestAMD64State));
++
++ /* Put essential stuff into the new state. */
++ arch->vex.guest_RSP = ((iifii.initial_client_SP - 8) & ~0xFul) + 8;
++ arch->vex.guest_RDI = iifii.initial_client_SP;
++ arch->vex.guest_RIP = iifii.initial_client_IP;
++
++# else
++# error Unknown platform
++# endif
++
++ /* Tell the tool that we just wrote to the registers. */
++ VG_TRACK( post_reg_write, Vg_CoreStartup, /*tid*/1, /*offset*/0,
++ sizeof(VexGuestArchState));
++}
++
++#endif // defined(VGO_netbsd)
++
++/*--------------------------------------------------------------------*/
++/*--- ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__libcassert.c b/valgrind-netbsd/patches/patch-coregrind_m__libcassert.c
new file mode 100644
index 0000000000..7fd4046561
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__libcassert.c
@@ -0,0 +1,22 @@
+$NetBSD$
+
+--- coregrind/m_libcassert.c.orig 2018-08-06 07:22:24.000000000 +0000
++++ coregrind/m_libcassert.c
+@@ -68,7 +68,7 @@
+ (srP)->misc.X86.r_ebp = ebp; \
+ }
+ #elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin) \
+- || defined(VGP_amd64_solaris)
++ || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+ # define GET_STARTREGS(srP) \
+ { ULong rip, rsp, rbp; \
+ __asm__ __volatile__( \
+@@ -272,7 +272,7 @@ void VG_(exit_now)( Int status )
+ {
+ #if defined(VGO_linux)
+ (void)VG_(do_syscall1)(__NR_exit_group, status );
+-#elif defined(VGO_darwin) || defined(VGO_solaris)
++#elif defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+ (void)VG_(do_syscall1)(__NR_exit, status );
+ #else
+ # error Unknown OS
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__libcfile.c b/valgrind-netbsd/patches/patch-coregrind_m__libcfile.c
new file mode 100644
index 0000000000..c0430ab631
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__libcfile.c
@@ -0,0 +1,355 @@
+$NetBSD$
+
+--- coregrind/m_libcfile.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_libcfile.c
+@@ -131,6 +131,11 @@ Bool VG_(resolve_filename) ( Int fd, con
+ *result = NULL;
+ return False;
+
++# elif defined(VGO_netbsd)
++ // Not supported
++ *result = NULL;
++ return False;
++
+ # else
+ # error Unknown OS
+ # endif
+@@ -148,6 +153,8 @@ SysRes VG_(mknod) ( const HChar* pathnam
+ # elif defined(VGO_solaris)
+ SysRes res = VG_(do_syscall4)(__NR_mknodat,
+ VKI_AT_FDCWD, (UWord)pathname, mode, dev);
++# elif defined(VGO_netbsd)
++ SysRes res = VG_(do_syscall2)( __NR_mkfifo, (UWord)pathname, mode );
+ # else
+ # error Unknown OS
+ # endif
+@@ -160,7 +167,7 @@ SysRes VG_(open) ( const HChar* pathname
+ /* ARM64 wants to use __NR_openat rather than __NR_open. */
+ SysRes res = VG_(do_syscall4)(__NR_openat,
+ VKI_AT_FDCWD, (UWord)pathname, flags, mode);
+-# elif defined(VGO_linux)
++# elif defined(VGO_linux) | defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall3)(__NR_open,
+ (UWord)pathname, flags, mode);
+ # elif defined(VGO_darwin)
+@@ -188,7 +195,7 @@ Int VG_(fd_open) (const HChar* pathname,
+ void VG_(close) ( Int fd )
+ {
+ /* Hmm. Return value is not checked. That's uncool. */
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ (void)VG_(do_syscall1)(__NR_close, fd);
+ # elif defined(VGO_darwin)
+ (void)VG_(do_syscall1)(__NR_close_nocancel, fd);
+@@ -200,7 +207,7 @@ void VG_(close) ( Int fd )
+ Int VG_(read) ( Int fd, void* buf, Int count)
+ {
+ Int ret;
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
+ # elif defined(VGO_darwin)
+ SysRes res = VG_(do_syscall3)(__NR_read_nocancel, fd, (UWord)buf, count);
+@@ -220,7 +227,7 @@ Int VG_(read) ( Int fd, void* buf, Int c
+ Int VG_(write) ( Int fd, const void* buf, Int count)
+ {
+ Int ret;
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall3)(__NR_write, fd, (UWord)buf, count);
+ # elif defined(VGO_darwin)
+ SysRes res = VG_(do_syscall3)(__NR_write_nocancel, fd, (UWord)buf, count);
+@@ -256,7 +263,7 @@ Int VG_(pipe) ( Int fd[2] )
+ # elif defined(VGO_linux)
+ SysRes res = VG_(do_syscall1)(__NR_pipe, (UWord)fd);
+ return sr_isError(res) ? -1 : 0;
+-# elif defined(VGO_darwin)
++# elif defined(VGO_darwin) || defined(VGO_netbsd)
+ /* __NR_pipe is UX64, so produces a double-word result */
+ SysRes res = VG_(do_syscall0)(__NR_pipe);
+ if (!sr_isError(res)) {
+@@ -283,7 +290,7 @@ Int VG_(pipe) ( Int fd[2] )
+
+ Off64T VG_(lseek) ( Int fd, Off64T offset, Int whence )
+ {
+-# if defined(VGO_linux) || defined(VGP_amd64_darwin)
++# if defined(VGO_linux) || defined(VGP_amd64_darwin) || defined(VGP_amd64_netbsd)
+ # if defined(__NR__llseek)
+ Off64T result;
+ SysRes res = VG_(do_syscall5)(__NR__llseek, fd,
+@@ -344,7 +351,13 @@ SysRes VG_(stat) ( const HChar* file_nam
+ SysRes res;
+ VG_(memset)(vgbuf, 0, sizeof(*vgbuf));
+
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_netbsd)
++ struct vki_stat buf;
++ res = VG_(do_syscall2)(__NR___stat50, (UWord)file_name, (UWord)&buf);
++ if (!sr_isError(res))
++ TRANSLATE_TO_vg_stat(vgbuf, &buf);
++ return res;
++# elif defined(VGO_linux) || defined(VGO_darwin)
+ /* First try with stat64. If that doesn't work out, fall back to
+ the vanilla version. */
+ # if defined(__NR_stat64)
+@@ -397,7 +410,13 @@ Int VG_(fstat) ( Int fd, struct vg_stat*
+ SysRes res;
+ VG_(memset)(vgbuf, 0, sizeof(*vgbuf));
+
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_netbsd)
++ struct vki_stat buf;
++ res = VG_(do_syscall2)(__NR___fstat50, (UWord)fd, (UWord)&buf);
++ if (!sr_isError(res))
++ TRANSLATE_TO_vg_stat(vgbuf, &buf);
++ return sr_isError(res) ? (-1) : 0;
++# elif defined(VGO_linux) || defined(VGO_darwin)
+ /* First try with fstat64. If that doesn't work out, fall back to
+ the vanilla version. */
+ # if defined(__NR_fstat64)
+@@ -450,7 +469,7 @@ Long VG_(fsize) ( Int fd )
+ SysRes VG_(getxattr) ( const HChar* file_name, const HChar* attr_name, Addr attr_value, SizeT attr_value_len )
+ {
+ SysRes res;
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+ res = VG_(do_syscall4)(__NR_getxattr, (UWord)file_name, (UWord)attr_name,
+ attr_value, attr_value_len);
+ #else
+@@ -469,7 +488,7 @@ Bool VG_(is_dir) ( const HChar* f )
+
+ SysRes VG_(dup) ( Int oldfd )
+ {
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ return VG_(do_syscall1)(__NR_dup, oldfd);
+ # elif defined(VGO_solaris)
+ return VG_(do_syscall3)(__NR_fcntl, oldfd, F_DUPFD, 0);
+@@ -491,7 +510,7 @@ SysRes VG_(dup2) ( Int oldfd, Int newfd
+ return VG_(mk_SysRes_Success)(newfd);
+ }
+ return VG_(do_syscall3)(__NR_dup3, oldfd, newfd, 0);
+-# elif defined(VGO_linux) || defined(VGO_darwin)
++# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ return VG_(do_syscall2)(__NR_dup2, oldfd, newfd);
+ # elif defined(VGO_solaris)
+ return VG_(do_syscall3)(__NR_fcntl, oldfd, F_DUP2FD, newfd);
+@@ -503,7 +522,7 @@ SysRes VG_(dup2) ( Int oldfd, Int newfd
+ /* Returns -1 on error. */
+ Int VG_(fcntl) ( Int fd, Int cmd, Addr arg )
+ {
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
+ # elif defined(VGO_darwin)
+ SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
+@@ -518,7 +537,7 @@ Int VG_(rename) ( const HChar* old_name,
+ # if defined(VGO_solaris) || defined(VGP_arm64_linux)
+ SysRes res = VG_(do_syscall4)(__NR_renameat, VKI_AT_FDCWD, (UWord)old_name,
+ VKI_AT_FDCWD, (UWord)new_name);
+-# elif defined(VGO_linux) || defined(VGO_darwin)
++# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall2)(__NR_rename, (UWord)old_name, (UWord)new_name);
+ # else
+ # error "Unknown OS"
+@@ -531,7 +550,7 @@ Int VG_(unlink) ( const HChar* file_name
+ # if defined(VGP_arm64_linux)
+ SysRes res = VG_(do_syscall2)(__NR_unlinkat, VKI_AT_FDCWD,
+ (UWord)file_name);
+-# elif defined(VGO_linux) || defined(VGO_darwin)
++# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall1)(__NR_unlink, (UWord)file_name);
+ # elif defined(VGO_solaris)
+ SysRes res = VG_(do_syscall3)(__NR_unlinkat, VKI_AT_FDCWD,
+@@ -553,7 +572,7 @@ static HChar *startup_wd;
+ changes. */
+ void VG_(record_startup_wd) ( void )
+ {
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ /* Simple: just ask the kernel */
+ SysRes res;
+ SizeT szB = 0;
+@@ -561,7 +580,11 @@ void VG_(record_startup_wd) ( void )
+ szB += 500;
+ startup_wd = VG_(realloc)("startup_wd", startup_wd, szB);
+ VG_(memset)(startup_wd, 0, szB);
++# if defined(VGO_netbsd)
++ res = VG_(do_syscall2)(__NR___getcwd, (UWord)startup_wd, szB-1);
++# else
+ res = VG_(do_syscall2)(__NR_getcwd, (UWord)startup_wd, szB-1);
++# endif
+ } while (sr_isError(res) && sr_Err(res) == VKI_ERANGE);
+
+ if (sr_isError(res)) {
+@@ -614,7 +637,7 @@ SysRes VG_(poll) (struct vki_pollfd *fds
+ (UWord)fds, nfds,
+ (UWord)(timeout >= 0 ? &timeout_ts : NULL),
+ (UWord)NULL);
+-# elif defined(VGO_linux)
++# elif defined(VGO_linux) || defined(VGO_netbsd)
+ res = VG_(do_syscall3)(__NR_poll, (UWord)fds, nfds, timeout);
+ # elif defined(VGO_darwin)
+ res = VG_(do_syscall3)(__NR_poll_nocancel, (UWord)fds, nfds, timeout);
+@@ -649,7 +672,7 @@ SSizeT VG_(readlink) (const HChar* path,
+ # if defined(VGP_arm64_linux)
+ res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
+ (UWord)path, (UWord)buf, bufsiz);
+-# elif defined(VGO_linux) || defined(VGO_darwin)
++# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+ # elif defined(VGO_solaris)
+ res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
+@@ -727,7 +750,7 @@ Int VG_(access) ( const HChar* path, Boo
+ | (ixusr ? VKI_X_OK : 0);
+ # if defined(VGP_arm64_linux)
+ SysRes res = VG_(do_syscall3)(__NR_faccessat, VKI_AT_FDCWD, (UWord)path, w);
+-# elif defined(VGO_linux) || defined(VGO_darwin)
++# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall2)(__NR_access, (UWord)path, w);
+ # elif defined(VGO_solaris)
+ SysRes res = VG_(do_syscall4)(__NR_faccessat, VKI_AT_FDCWD, (UWord)path,
+@@ -770,8 +793,9 @@ Int VG_(check_executable)(/*OUT*/Bool* i
+ struct vg_stat st;
+ SysRes res = VG_(stat)(f, &st);
+
+- if (is_setuid)
++ if (is_setuid) {
+ *is_setuid = False;
++ }
+
+ if (sr_isError(res)) {
+ return sr_Err(res);
+@@ -782,27 +806,30 @@ Int VG_(check_executable)(/*OUT*/Bool* i
+ }
+
+ if ( (st.mode & (VKI_S_ISUID | VKI_S_ISGID)) && !allow_setuid ) {
+- if (is_setuid)
++ if (is_setuid) {
+ *is_setuid = True;
++ }
+ return VKI_EACCES;
+ }
+
+ res = VG_(getxattr)(f, "security.capability", (Addr)0, 0);
+ if (!sr_isError(res) && !allow_setuid) {
+- if (is_setuid)
++ if (is_setuid) {
+ *is_setuid = True;
++ }
+ return VKI_EACCES;
+ }
+
+ if (VG_(geteuid)() == st.uid) {
+- if (!(st.mode & VKI_S_IXUSR))
++ if (!(st.mode & VKI_S_IXUSR)) {
+ return VKI_EACCES;
++ }
+ } else {
+ Int grpmatch = 0;
+
+- if (VG_(getegid)() == st.gid)
++ if (VG_(getegid)() == st.gid) {
+ grpmatch = 1;
+- else {
++ } else {
+ UInt *groups = NULL;
+ Int ngrp;
+
+@@ -872,6 +899,10 @@ SysRes VG_(pread) ( Int fd, void* buf, I
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ res = VG_(do_syscall4)(__NR_pread64, fd, (UWord)buf, count, offset);
+ return res;
++# elif defined(VGO_netbsd)
++ vg_assert(sizeof(OffT) == 8);
++ res = VG_(do_syscall5)(__NR_pread, fd, (UWord)buf, count, 0, offset);
++ return res;
+ # elif defined(VGP_amd64_darwin)
+ vg_assert(sizeof(OffT) == 8);
+ res = VG_(do_syscall4)(__NR_pread_nocancel, fd, (UWord)buf, count, offset);
+@@ -1023,7 +1054,7 @@ UShort VG_(ntohs) ( UShort x )
+ */
+ Int VG_(connect_via_socket)( const HChar* str )
+ {
+-# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+ Int sd, res;
+ struct vki_sockaddr_in servAddr;
+ UInt ip = 0;
+@@ -1124,9 +1155,13 @@ Int VG_(socket) ( Int domain, Int type,
+
+ # elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
+ || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+- || defined(VGP_arm64_linux)
++ || defined(VGP_arm64_linux) || defined(VGO_netbsd)
+ SysRes res;
++# if defined(VGO_netbsd)
++ res = VG_(do_syscall3)(__NR___socket30, domain, type, protocol );
++# else
+ res = VG_(do_syscall3)(__NR_socket, domain, type, protocol );
++# endif
+ return sr_isError(res) ? -1 : sr_Res(res);
+
+ # elif defined(VGO_darwin)
+@@ -1179,7 +1214,7 @@ Int my_connect ( Int sockfd, struct vki_
+
+ # elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
+ || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+- || defined(VGP_arm64_linux)
++ || defined(VGP_arm64_linux) || defined(VGO_netbsd)
+ SysRes res;
+ res = VG_(do_syscall3)(__NR_connect, sockfd, (UWord)serv_addr, addrlen);
+ return sr_isError(res) ? -1 : sr_Res(res);
+@@ -1226,7 +1261,7 @@ Int VG_(write_socket)( Int sd, const voi
+
+ # elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
+ || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+- || defined(VGP_arm64_linux)
++ || defined(VGP_arm64_linux) || defined(VGO_netbsd)
+ SysRes res;
+ res = VG_(do_syscall6)(__NR_sendto, sd, (UWord)msg,
+ count, VKI_MSG_NOSIGNAL, 0,0);
+@@ -1262,7 +1297,7 @@ Int VG_(getsockname) ( Int sd, struct vk
+ return sr_isError(res) ? -1 : sr_Res(res);
+
+ # elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
+- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
++ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) || defined(VGO_netbsd)
+ SysRes res;
+ res = VG_(do_syscall3)( __NR_getsockname,
+ (UWord)sd, (UWord)name, (UWord)namelen );
+@@ -1300,7 +1335,7 @@ Int VG_(getpeername) ( Int sd, struct vk
+ return sr_isError(res) ? -1 : sr_Res(res);
+
+ # elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
+- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
++ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) || defined(VGO_netbsd)
+ SysRes res;
+ res = VG_(do_syscall3)( __NR_getpeername,
+ (UWord)sd, (UWord)name, (UWord)namelen );
+@@ -1341,14 +1376,14 @@ Int VG_(getsockopt) ( Int sd, Int level,
+
+ # elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
+ || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+- || defined(VGP_arm64_linux)
++ || defined(VGP_arm64_linux) || defined(VGO_netbsd)
+ SysRes res;
+ res = VG_(do_syscall5)( __NR_getsockopt,
+ (UWord)sd, (UWord)level, (UWord)optname,
+ (UWord)optval, (UWord)optlen );
+ return sr_isError(res) ? -1 : sr_Res(res);
+
+-# elif defined(VGO_darwin)
++# elif defined(VGO_darwin) || defined(VGO_netbsd)
+ SysRes res;
+ res = VG_(do_syscall5)( __NR_getsockopt,
+ (UWord)sd, (UWord)level, (UWord)optname,
+@@ -1392,7 +1427,7 @@ Int VG_(setsockopt) ( Int sd, Int level,
+ (UWord)optval, (UWord)optlen );
+ return sr_isError(res) ? -1 : sr_Res(res);
+
+-# elif defined(VGO_darwin)
++# elif defined(VGO_darwin) || defined(VGO_netbsd)
+ SysRes res;
+ res = VG_(do_syscall5)( __NR_setsockopt,
+ (UWord)sd, (UWord)level, (UWord)optname,
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__libcprint.c b/valgrind-netbsd/patches/patch-coregrind_m__libcprint.c
new file mode 100644
index 0000000000..7f3d06fe78
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__libcprint.c
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- coregrind/m_libcprint.c.orig 2018-07-13 08:52:05.000000000 +0000
++++ coregrind/m_libcprint.c
+@@ -1188,7 +1188,7 @@ const HChar *VG_(sr_as_string) ( SysRes
+ return buf;
+ }
+
+-#elif defined(VGO_darwin) || defined(VGO_solaris)
++#elif defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ const HChar *VG_(sr_as_string) ( SysRes sr )
+ {
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__libcproc.c b/valgrind-netbsd/patches/patch-coregrind_m__libcproc.c
new file mode 100644
index 0000000000..252815a82b
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__libcproc.c
@@ -0,0 +1,146 @@
+$NetBSD$
+
+--- coregrind/m_libcproc.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_libcproc.c
+@@ -67,7 +67,7 @@ HChar** VG_(client_envp) = NULL;
+ const HChar *VG_(libdir) = VG_LIBDIR;
+
+ const HChar *VG_(LD_PRELOAD_var_name) =
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ "LD_PRELOAD";
+ #elif defined(VGO_darwin)
+ "DYLD_INSERT_LIBRARIES";
+@@ -348,8 +348,12 @@ void VG_(client_cmd_and_args)(HChar *buf
+
+ Int VG_(waitpid)(Int pid, Int *status, Int options)
+ {
+-# if defined(VGO_linux)
++# if defined(VGO_linux) || defined(VGO_netbsd)
++# if defined(VGO_netbsd)
++ SysRes res = VG_(do_syscall4)(__NR___wait450,
++# else
+ SysRes res = VG_(do_syscall4)(__NR_wait4,
++# endif
+ pid, (UWord)status, options, 0);
+ return sr_isError(res) ? -1 : sr_Res(res);
+ # elif defined(VGO_darwin)
+@@ -586,7 +590,7 @@ Int VG_(system) ( const HChar* cmd )
+ Int VG_(sysctl)(Int *name, UInt namelen, void *oldp, SizeT *oldlenp, void *newp, SizeT newlen)
+ {
+ SysRes res;
+-# if defined(VGO_darwin)
++# if defined(VGO_darwin) || defined(VGO_netbsd)
+ res = VG_(do_syscall6)(__NR___sysctl,
+ (UWord)name, namelen, (UWord)oldp, (UWord)oldlenp, (UWord)newp, newlen);
+ # else
+@@ -689,6 +693,10 @@ Int VG_(gettid)(void)
+ // Use Mach thread ports for lwpid instead.
+ return mach_thread_self();
+
++# elif defined(VGO_netbsd)
++ SysRes res = VG_(do_syscall0)(__NR__lwp_self);
++ return sr_Res(res);
++
+ # elif defined(VGO_solaris)
+ SysRes res = VG_(do_syscall0)(__NR_lwp_self);
+ return sr_Res(res);
+@@ -710,7 +718,7 @@ Int VG_(getpgrp) ( void )
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+ # if defined(VGP_arm64_linux)
+ return sr_Res( VG_(do_syscall1)(__NR_getpgid, 0) );
+-# elif defined(VGO_linux) || defined(VGO_darwin)
++# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ return sr_Res( VG_(do_syscall0)(__NR_getpgrp) );
+ # elif defined(VGO_solaris)
+ /* Uses the shared pgrpsys syscall, 0 for the getpgrp variant. */
+@@ -723,7 +731,7 @@ Int VG_(getpgrp) ( void )
+ Int VG_(getppid) ( void )
+ {
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ return sr_Res( VG_(do_syscall0)(__NR_getppid) );
+ # elif defined(VGO_solaris)
+ /* Uses the shared getpid/getppid syscall, val2 contains a parent pid. */
+@@ -736,7 +744,7 @@ Int VG_(getppid) ( void )
+ Int VG_(geteuid) ( void )
+ {
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ {
+ # if defined(__NR_geteuid32)
+ // We use the 32-bit version if it's supported. Otherwise, IDs greater
+@@ -757,7 +765,7 @@ Int VG_(geteuid) ( void )
+
+ Int VG_(getegid) ( void )
+ {
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+ # if defined(__NR_getegid32)
+ // We use the 32-bit version if it's supported. Otherwise, IDs greater
+@@ -804,7 +812,7 @@ Int VG_(getgroups)( Int size, UInt* list
+ || defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
+ || defined(VGO_darwin) || defined(VGP_s390x_linux) \
+ || defined(VGP_mips32_linux) || defined(VGP_arm64_linux) \
+- || defined(VGO_solaris)
++ || defined(VGO_solaris) || defined(VGO_netbsd)
+ SysRes sres;
+ sres = VG_(do_syscall2)(__NR_getgroups, size, (Addr)list);
+ if (sr_isError(sres))
+@@ -823,7 +831,7 @@ Int VG_(getgroups)( Int size, UInt* list
+ Int VG_(ptrace) ( Int request, Int pid, void *addr, void *data )
+ {
+ SysRes res;
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ res = VG_(do_syscall4)(__NR_ptrace, request, pid, (UWord)addr, (UWord)data);
+ # elif defined(VGO_solaris)
+ /* There is no ptrace syscall on Solaris. Such requests has to be
+@@ -860,7 +868,7 @@ Int VG_(fork) ( void )
+ return -1;
+ return sr_Res(res);
+
+-# elif defined(VGO_darwin)
++# elif defined(VGO_darwin) || defined(VGO_netbsd)
+ SysRes res;
+ res = VG_(do_syscall0)(__NR_fork); /* __NR_fork is UX64 */
+ if (sr_isError(res))
+@@ -921,6 +929,14 @@ UInt VG_(read_millisecond_timer) ( void
+ }
+ }
+
++# elif defined(VGO_netbsd)
++ { SysRes res;
++ struct vki_timeval tv_now;
++ res = VG_(do_syscall2)(__NR___gettimeofday50, (UWord)&tv_now, (UWord)NULL);
++ vg_assert(! sr_isError(res));
++ now = tv_now.tv_sec * 1000000ULL + tv_now.tv_usec;
++ }
++
+ # elif defined(VGO_darwin)
+ // Weird: it seems that gettimeofday() doesn't fill in the timeval, but
+ // rather returns the tv_sec as the low 32 bits of the result and the
+@@ -947,7 +963,11 @@ UInt VG_(read_millisecond_timer) ( void
+ Int VG_(gettimeofday)(struct vki_timeval *tv, struct vki_timezone *tz)
+ {
+ SysRes res;
++# if defined(VGO_netbsd)
++ res = VG_(do_syscall2)(__NR___gettimeofday50, (UWord)tv, (UWord)tz);
++# else
+ res = VG_(do_syscall2)(__NR_gettimeofday, (UWord)tv, (UWord)tz);
++#endif
+
+ if (! sr_isError(res)) return 0;
+
+@@ -982,7 +1002,7 @@ UInt VG_(get_user_milliseconds)(void)
+ }
+ }
+
+-# elif defined(VGO_darwin)
++# elif defined(VGO_darwin) || defined(VGO_netbsd)
+ res = 0;
+
+ # else
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__libcsetjmp.c b/valgrind-netbsd/patches/patch-coregrind_m__libcsetjmp.c
new file mode 100644
index 0000000000..c8fe4111a6
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__libcsetjmp.c
@@ -0,0 +1,38 @@
+$NetBSD$
+
+--- coregrind/m_libcsetjmp.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_libcsetjmp.c
+@@ -382,13 +382,13 @@ __asm__(
+ /* -------- amd64-{linux,darwin,solaris} -------- */
+
+ #if defined(VGP_amd64_linux) || defined(VGP_amd64_darwin) || \
+- defined(VGP_amd64_solaris)
++ defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+
+ __asm__(
+ ".text" "\n"
+ "" "\n"
+
+-#if defined(VGP_amd64_linux) || defined(VGP_amd64_solaris)
++#if defined(VGP_amd64_linux) || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+ ".global VG_MINIMAL_SETJMP" "\n" // rdi = jmp_buf
+ "VG_MINIMAL_SETJMP:" "\n"
+
+@@ -425,7 +425,7 @@ __asm__(
+ "" "\n"
+
+
+-#if defined(VGP_amd64_linux) || defined(VGP_amd64_solaris)
++#if defined(VGP_amd64_linux) || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+ ".global VG_MINIMAL_LONGJMP" "\n"
+ "VG_MINIMAL_LONGJMP:" "\n" // rdi = jmp_buf
+
+@@ -473,7 +473,7 @@ __asm__(
+ #endif
+ );
+
+-#endif /* VGP_amd64_linux || VGP_amd64_darwin || VGP_amd64_solaris */
++#endif /* VGP_amd64_linux || VGP_amd64_darwin || VGP_amd64_solaris || VGP_amd64_netbsd */
+
+
+ /* -------- x86-{linux,darwin,solaris} -------- */
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__libcsignal.c b/valgrind-netbsd/patches/patch-coregrind_m__libcsignal.c
new file mode 100644
index 0000000000..648cd32ca6
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__libcsignal.c
@@ -0,0 +1,77 @@
+$NetBSD$
+
+--- coregrind/m_libcsignal.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_libcsignal.c
+@@ -216,7 +216,10 @@ void VG_(sigcomplementset)( vki_sigset_t
+ */
+ Int VG_(sigprocmask)( Int how, const vki_sigset_t* set, vki_sigset_t* oldset)
+ {
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_netbsd)
++ SysRes res = VG_(do_syscall3)(__NR___sigprocmask14,
++ how, (UWord)set, (UWord)oldset);
++# elif defined(VGO_linux) || defined(VGO_solaris)
+ # if defined(__NR_rt_sigprocmask)
+ SysRes res = VG_(do_syscall4)(__NR_rt_sigprocmask,
+ how, (UWord)set, (UWord)oldset,
+@@ -320,6 +323,11 @@ Int VG_(sigaction) ( Int signum,
+ signum, (UWord)act, (UWord)oldact);
+ return sr_isError(res) ? -1 : 0;
+
++# elif defined(VGO_netbsd)
++ SysRes res = VG_(do_syscall3)(__NR___sigaction_sigtramp,
++ signum, (UWord)act, (UWord)oldact);
++ return sr_isError(res) ? -1 : 0;
++
+ # else
+ # error "Unsupported OS"
+ # endif
+@@ -331,7 +339,7 @@ void
+ VG_(convert_sigaction_fromK_to_toK)( const vki_sigaction_fromK_t* fromK,
+ /*OUT*/vki_sigaction_toK_t* toK )
+ {
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ *toK = *fromK;
+ # elif defined(VGO_darwin)
+ toK->ksa_handler = fromK->ksa_handler;
+@@ -346,7 +354,7 @@ VG_(convert_sigaction_fromK_to_toK)( con
+
+ Int VG_(kill)( Int pid, Int signo )
+ {
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ SysRes res = VG_(do_syscall2)(__NR_kill, pid, signo);
+ # elif defined(VGO_darwin)
+ SysRes res = VG_(do_syscall3)(__NR_kill,
+@@ -372,6 +380,11 @@ Int VG_(tkill)( Int lwpid, Int signo )
+ res = VG_(do_syscall2)(__NR___pthread_kill, lwpid, signo);
+ return sr_isError(res) ? -1 : 0;
+
++# elif defined(VGO_netbsd)
++ SysRes res;
++ res = VG_(do_syscall2)(__NR__lwp_kill, lwpid, signo);
++ return sr_isError(res) ? -1 : 0;
++
+ # elif defined(VGO_solaris)
+ SysRes res;
+ # if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
+@@ -422,6 +435,18 @@ Int VG_(sigtimedwait_zero)( const vki_si
+ return sr_isError(res) ? -1 : sr_Res(res);
+ }
+
++/* ---------- sigtimedwait_zero: Linux ----------- */
++
++#elif defined(VGO_netbsd)
++Int VG_(sigtimedwait_zero)( const vki_sigset_t *set,
++ vki_siginfo_t *info )
++{
++ static const struct vki_timespec zero = { 0, 0 };
++ SysRes res = VG_(do_syscall3)(__NR_____sigtimedwait50, (UWord)set, (UWord)info,
++ (UWord)&zero);
++ return sr_isError(res) ? -1 : sr_Res(res);
++}
++
+ /* ---------- sigtimedwait_zero: Darwin ----------- */
+
+ #elif defined(VGO_darwin)
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__machine.c b/valgrind-netbsd/patches/patch-coregrind_m__machine.c
new file mode 100644
index 0000000000..45b5a82dc5
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__machine.c
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- coregrind/m_machine.c.orig 2018-07-24 08:23:41.000000000 +0000
++++ coregrind/m_machine.c
+@@ -2035,7 +2035,7 @@ void* VG_(fnptr_to_fnentry)( void* f )
+ || defined(VGP_ppc32_linux) || defined(VGP_ppc64le_linux) \
+ || defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
+- || defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
++ || defined(VGP_x86_solaris) || defined(VGP_amd64_solaris) || defined(VGO_netbsd)
+ return f;
+ # elif defined(VGP_ppc64be_linux)
+ /* ppc64-linux uses the AIX scheme, in which f is a pointer to a
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__main.c b/valgrind-netbsd/patches/patch-coregrind_m__main.c
new file mode 100644
index 0000000000..21a8c6e2b6
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__main.c
@@ -0,0 +1,52 @@
+$NetBSD$
+
+--- coregrind/m_main.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_main.c
+@@ -1470,7 +1470,7 @@ Int valgrind_main ( Int argc, HChar **ar
+ if (!need_help) {
+ VG_(debugLog)(1, "main", "Create initial image\n");
+
+-# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+ the_iicii.argv = argv;
+ the_iicii.envp = envp;
+ the_iicii.toolname = VG_(clo_toolname);
+@@ -1735,7 +1735,7 @@ Int valgrind_main ( Int argc, HChar **ar
+ addr2dihandle = VG_(newXA)( VG_(malloc), "main.vm.2",
+ VG_(free), sizeof(Addr_n_ULong) );
+
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ { Addr* seg_starts;
+ Int n_seg_starts;
+ Addr_n_ULong anu;
+@@ -2401,7 +2401,7 @@ static void final_tidyup(ThreadId tid)
+ /*=== Getting to main() alive: LINUX ===*/
+ /*====================================================================*/
+
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+
+ /* If linking of the final executables is done with glibc present,
+ then Valgrind starts at main() above as usual, and all of the
+@@ -2566,8 +2566,19 @@ asm("\n"
+ "\thlt\n"
+ ".previous\n"
+ );
+-#elif defined(VGP_amd64_linux)
++#elif defined(VGP_amd64_linux) || defined(VGP_amd64_netbsd)
+ asm("\n"
++# if defined(VGP_amd64_netbsd)
++ ".section \".note.netbsd.ident\", \"a\", @note\n"
++ ".long 2f-1f\n"
++ ".long 4f-3f\n"
++ ".long 1\n"
++ "1: .asciz \"NetBSD\"\n"
++ "2: .p2align 2\n"
++ "3: .long 800000001\n" // __NetBSD_Version__
++ "4: .p2align 2\n"
++ "\n"
++# endif
+ ".text\n"
+ "\t.globl _start\n"
+ "\t.type _start,@function\n"
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__redir.c b/valgrind-netbsd/patches/patch-coregrind_m__redir.c
new file mode 100644
index 0000000000..3fa4b7a58a
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__redir.c
@@ -0,0 +1,23 @@
+$NetBSD$
+
+--- coregrind/m_redir.c.orig 2018-09-18 06:43:50.000000000 +0000
++++ coregrind/m_redir.c
+@@ -1213,6 +1213,9 @@ Bool VG_(is_soname_ld_so) (const HChar *
+ if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
+ if (VG_STREQ(soname, VG_U_LD_LINUX_AARCH64_SO_1)) return True;
+ if (VG_STREQ(soname, VG_U_LD_LINUX_ARMHF_SO_3)) return True;
++# elif defined(VGO_netbsd)
++ if (VG_STREQ(soname, VG_U_LD_ELF_SO_1)) return True;
++ if (VG_STREQ(soname, VG_U_LD_ELF32_SO_1)) return True;
+ # elif defined(VGO_darwin)
+ if (VG_STREQ(soname, VG_U_DYLD)) return True;
+ # elif defined(VGO_solaris)
+@@ -1528,6 +1531,8 @@ void VG_(redir_initialise) ( void )
+ # endif
+ }
+
++# elif defined(VGO_netbsd)
++/* XXX do something real if needed */
+ # elif defined(VGP_x86_darwin)
+ /* If we're using memcheck, use these intercepts right from
+ the start, otherwise dyld makes a lot of noise. */
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__replacemalloc_vg__replace__malloc.c b/valgrind-netbsd/patches/patch-coregrind_m__replacemalloc_vg__replace__malloc.c
new file mode 100644
index 0000000000..561a1871e9
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__replacemalloc_vg__replace__malloc.c
@@ -0,0 +1,216 @@
+$NetBSD$
+
+--- coregrind/m_replacemalloc/vg_replace_malloc.c.orig 2018-05-31 14:59:48.000000000 +0000
++++ coregrind/m_replacemalloc/vg_replace_malloc.c
+@@ -298,6 +298,10 @@ static void init(void);
+ ALLOC_or_NULL(VG_Z_LIBC_SONAME, malloc, malloc);
+ ALLOC_or_NULL(SO_SYN_MALLOC, malloc, malloc);
+
++#elif defined(VGO_netbsd)
++ ALLOC_or_NULL(VG_Z_LIBC_SONAME, malloc, malloc);
++ ALLOC_or_NULL(SO_SYN_MALLOC, malloc, malloc);
++
+ #elif defined(VGO_darwin)
+ ALLOC_or_NULL(VG_Z_LIBC_SONAME, malloc, malloc);
+ ALLOC_or_NULL(SO_SYN_MALLOC, malloc, malloc);
+@@ -334,6 +338,20 @@ static void init(void);
+ ALLOC_or_BOMB(SO_SYN_MALLOC, _Znwm, __builtin_new);
+ #endif
+
++#elif defined(VGO_netbsd)
++ // operator new(unsigned int), GNU mangling
++ #if VG_WORDSIZE == 4
++ ALLOC_or_BOMB(VG_Z_LIBSTDCXX_SONAME, _Znwj, __builtin_new);
++ ALLOC_or_BOMB(VG_Z_LIBSUPCXX_SONAME, _Znwj, __builtin_new);
++ ALLOC_or_BOMB(SO_SYN_MALLOC, _Znwm, __builtin_new);
++ #endif
++ // operator new(unsigned long), GNU mangling
++ #if VG_WORDSIZE == 8
++ ALLOC_or_BOMB(VG_Z_LIBSTDCXX_SONAME, _Znwm, __builtin_new);
++ ALLOC_or_BOMB(VG_Z_LIBSUPCXX_SONAME, _Znwm, __builtin_new);
++ ALLOC_or_BOMB(SO_SYN_MALLOC, _Znwm, __builtin_new);
++ #endif
++
+ #elif defined(VGO_darwin)
+ // operator new(unsigned int), GNU mangling
+ #if VG_WORDSIZE == 4
+@@ -377,6 +395,20 @@ static void init(void);
+ ALLOC_or_NULL(SO_SYN_MALLOC, _ZnwmRKSt9nothrow_t, __builtin_new);
+ #endif
+
++#elif defined(VGO_netbsd)
++ // operator new(unsigned, std::nothrow_t const&), GNU mangling
++ #if VG_WORDSIZE == 4
++ ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, _ZnwjRKSt9nothrow_t, __builtin_new);
++ ALLOC_or_NULL(VG_Z_LIBSUPCXX_SONAME, _ZnwjRKSt9nothrow_t, __builtin_new);
++ ALLOC_or_NULL(SO_SYN_MALLOC, _ZnwjRKSt9nothrow_t, __builtin_new);
++ #endif
++ // operator new(unsigned long, std::nothrow_t const&), GNU mangling
++ #if VG_WORDSIZE == 8
++ ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, _ZnwmRKSt9nothrow_t, __builtin_new);
++ ALLOC_or_NULL(VG_Z_LIBSUPCXX_SONAME, _ZnwmRKSt9nothrow_t, __builtin_new);
++ ALLOC_or_NULL(SO_SYN_MALLOC, _ZnwjRKSt9nothrow_t, __builtin_new);
++ #endif
++
+ #elif defined(VGO_darwin)
+ // operator new(unsigned, std::nothrow_t const&), GNU mangling
+ #if VG_WORDSIZE == 4
+@@ -423,6 +455,20 @@ static void init(void);
+ ALLOC_or_BOMB(SO_SYN_MALLOC, _Znam, __builtin_vec_new );
+ #endif
+
++#elif defined(VGO_netbsd)
++ // operator new[](unsigned int), GNU mangling
++ #if VG_WORDSIZE == 4
++ ALLOC_or_BOMB(VG_Z_LIBSTDCXX_SONAME, _Znaj, __builtin_vec_new );
++ ALLOC_or_BOMB(VG_Z_LIBSUPCXX_SONAME, _Znaj, __builtin_vec_new );
++ ALLOC_or_BOMB(SO_SYN_MALLOC, _Znaj, __builtin_vec_new );
++ #endif
++ // operator new[](unsigned long), GNU mangling
++ #if VG_WORDSIZE == 8
++ ALLOC_or_BOMB(VG_Z_LIBSTDCXX_SONAME, _Znam, __builtin_vec_new );
++ ALLOC_or_BOMB(VG_Z_LIBSUPCXX_SONAME, _Znam, __builtin_vec_new );
++ ALLOC_or_BOMB(SO_SYN_MALLOC, _Znaj, __builtin_vec_new );
++ #endif
++
+ #elif defined(VGO_darwin)
+ // operator new[](unsigned int), GNU mangling
+ #if VG_WORDSIZE == 4
+@@ -466,6 +512,20 @@ static void init(void);
+ ALLOC_or_NULL(SO_SYN_MALLOC, _ZnamRKSt9nothrow_t, __builtin_vec_new );
+ #endif
+
++#elif defined(VGO_netbsd)
++ // operator new[](unsigned, std::nothrow_t const&), GNU mangling
++ #if VG_WORDSIZE == 4
++ ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, _ZnajRKSt9nothrow_t, __builtin_vec_new );
++ ALLOC_or_NULL(VG_Z_LIBSUPCXX_SONAME, _ZnajRKSt9nothrow_t, __builtin_vec_new );
++ ALLOC_or_NULL(SO_SYN_MALLOC, _ZnajRKSt9nothrow_t, __builtin_vec_new );
++ #endif
++ // operator new[](unsigned long, std::nothrow_t const&), GNU mangling
++ #if VG_WORDSIZE == 8
++ ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, _ZnamRKSt9nothrow_t, __builtin_vec_new );
++ ALLOC_or_NULL(VG_Z_LIBSUPCXX_SONAME, _ZnamRKSt9nothrow_t, __builtin_vec_new );
++ ALLOC_or_NULL(SO_SYN_MALLOC, _ZnajRKSt9nothrow_t, __builtin_vec_new );
++ #endif
++
+ #elif defined(VGO_darwin)
+ // operator new[](unsigned, std::nothrow_t const&), GNU mangling
+ #if VG_WORDSIZE == 4
+@@ -529,6 +589,10 @@ static void init(void);
+ FREE(VG_Z_LIBC_SONAME, free, free );
+ FREE(SO_SYN_MALLOC, free, free );
+
++#elif defined(VGO_freebsd)
++ FREE(VG_Z_LIBC_SONAME, free, free );
++ FREE(SO_SYN_MALLOC, free, free );
++
+ #elif defined(VGO_darwin)
+ FREE(VG_Z_LIBC_SONAME, free, free );
+ FREE(SO_SYN_MALLOC, free, free );
+@@ -585,6 +649,10 @@ static void init(void);
+ FREE(SO_SYN_MALLOC, _ZdlPvm, __builtin_delete );
+ #endif
+
++#elif defined(VGO_netbsd)
++ FREE(VG_Z_LIBSTDCXX_SONAME, _ZdlPv, __builtin_delete );
++ FREE(VG_Z_LIBSUPCXX_SONAME, _ZdlPv, __builtin_delete );
++ FREE(SO_SYN_MALLOC, _ZdlPv, __builtin_delete );
+
+ #elif defined(VGO_darwin)
+ // operator delete(void*), GNU mangling
+@@ -616,6 +684,12 @@ static void init(void);
+ FREE(VG_Z_LIBC_SONAME, _ZdlPvRKSt9nothrow_t, __builtin_delete );
+ FREE(SO_SYN_MALLOC, _ZdlPvRKSt9nothrow_t, __builtin_delete );
+
++#elif defined(VGO_netbsd)
++ // operator delete(void*, std::nothrow_t const&), GNU mangling
++ FREE(VG_Z_LIBSTDCXX_SONAME, _ZdlPvRKSt9nothrow_t, __builtin_delete );
++ FREE(VG_Z_LIBSUPCXX_SONAME, _ZdlPvRKSt9nothrow_t, __builtin_delete );
++ FREE(SO_SYN_MALLOC, _ZdlPvRKSt9nothrow_t, __builtin_delete );
++
+ #elif defined(VGO_darwin)
+ // operator delete(void*, std::nothrow_t const&), GNU mangling
+ //FREE(VG_Z_LIBSTDCXX_SONAME, _ZdlPvRKSt9nothrow_t, __builtin_delete );
+@@ -652,6 +726,12 @@ static void init(void);
+ FREE(SO_SYN_MALLOC, _ZdaPvm, __builtin_vec_delete );
+ #endif
+
++#elif defined(VGO_netbsd)
++ // operator delete[](void*), GNU mangling
++ FREE(VG_Z_LIBSTDCXX_SONAME, _ZdaPv, __builtin_vec_delete );
++ FREE(VG_Z_LIBSUPCXX_SONAME, _ZdaPv, __builtin_vec_delete );
++ FREE(SO_SYN_MALLOC, _ZdaPv, __builtin_vec_delete );
++
+ #elif defined(VGO_darwin)
+ // operator delete[](void*), not mangled (for gcc 2.96)
+ //FREE(VG_Z_LIBSTDCXX_SONAME, __builtin_vec_delete, __builtin_vec_delete );
+@@ -685,6 +765,12 @@ static void init(void);
+ FREE(VG_Z_LIBC_SONAME, _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
+ FREE(SO_SYN_MALLOC, _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
+
++#elif defined(VGO_netbsd)
++ // operator delete[](void*, std::nothrow_t const&), GNU mangling
++ FREE(VG_Z_LIBSTDCXX_SONAME, _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
++ FREE(VG_Z_LIBSUPCXX_SONAME, _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
++ FREE(SO_SYN_MALLOC, _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
++
+ #elif defined(VGO_darwin)
+ // operator delete[](void*, std::nothrow_t const&), GNU mangling
+ //FREE(VG_Z_LIBSTDCXX_SONAME, _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
+@@ -751,6 +837,10 @@ static void init(void);
+ CALLOC(VG_Z_LIBC_SONAME, calloc);
+ CALLOC(SO_SYN_MALLOC, calloc);
+
++#elif defined(VGO_netbsd)
++ CALLOC(VG_Z_LIBC_SONAME, calloc);
++ CALLOC(SO_SYN_MALLOC, calloc);
++
+ #elif defined(VGO_darwin)
+ CALLOC(VG_Z_LIBC_SONAME, calloc);
+ CALLOC(SO_SYN_MALLOC, calloc);
+@@ -825,6 +915,11 @@ static void init(void);
+ REALLOC(VG_Z_LIBC_SONAME, realloc);
+ REALLOC(SO_SYN_MALLOC, realloc);
+
++#elif defined(VGO_freebsd)
++ REALLOC(VG_Z_LIBC_SONAME, realloc);
++ REALLOC(SO_SYN_MALLOC, realloc);
++ REALLOCF(VG_Z_LIBC_SONAME, reallocf);
++
+ #elif defined(VGO_darwin)
+ REALLOC(VG_Z_LIBC_SONAME, realloc);
+ REALLOC(SO_SYN_MALLOC, realloc);
+@@ -898,6 +993,10 @@ static void init(void);
+ MEMALIGN(VG_Z_LIBC_SONAME, memalign);
+ MEMALIGN(SO_SYN_MALLOC, memalign);
+
++#elif defined(VGO_netbsd)
++ MEMALIGN(VG_Z_LIBC_SONAME, memalign);
++ MEMALIGN(SO_SYN_MALLOC, memalign);
++
+ #elif defined(VGO_darwin)
+ MEMALIGN(VG_Z_LIBC_SONAME, memalign);
+ MEMALIGN(SO_SYN_MALLOC, memalign);
+@@ -945,6 +1044,10 @@ static void init(void);
+ VALLOC(VG_Z_LIBC_SONAME, valloc);
+ VALLOC(SO_SYN_MALLOC, valloc);
+
++#elif defined(VGO_netbsd)
++ VALLOC(VG_Z_LIBC_SONAME, valloc);
++ VALLOC(SO_SYN_MALLOC, valloc);
++
+ #elif defined(VGO_darwin)
+ VALLOC(VG_Z_LIBC_SONAME, valloc);
+ VALLOC(SO_SYN_MALLOC, valloc);
+@@ -1062,6 +1165,10 @@ static void init(void);
+ POSIX_MEMALIGN(VG_Z_LIBC_SONAME, posix_memalign);
+ POSIX_MEMALIGN(SO_SYN_MALLOC, posix_memalign);
+
++#elif defined(VGO_netbsd)
++ POSIX_MEMALIGN(VG_Z_LIBC_SONAME, posix_memalign);
++ POSIX_MEMALIGN(SO_SYN_MALLOC, posix_memalign);
++
+ #elif defined(VGO_darwin)
+ //POSIX_MEMALIGN(VG_Z_LIBC_SONAME, posix_memalign);
+
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__scheduler_scheduler.c b/valgrind-netbsd/patches/patch-coregrind_m__scheduler_scheduler.c
new file mode 100644
index 0000000000..e629a944c3
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__scheduler_scheduler.c
@@ -0,0 +1,22 @@
+$NetBSD$
+
+--- coregrind/m_scheduler/scheduler.c.orig 2018-09-30 04:41:00.000000000 +0000
++++ coregrind/m_scheduler/scheduler.c
+@@ -500,7 +500,7 @@ void VG_(vg_yield)(void)
+ /*
+ Tell the kernel we're yielding.
+ */
+-# if defined(VGO_linux) || defined(VGO_darwin)
++# if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ VG_(do_syscall0)(__NR_sched_yield);
+ # elif defined(VGO_solaris)
+ VG_(do_syscall0)(__NR_yield);
+@@ -539,7 +539,7 @@ static void os_state_clear(ThreadState *
+ tst->os_state.lwpid = 0;
+ tst->os_state.threadgroup = 0;
+ tst->os_state.stk_id = NULL_STK_ID;
+-# if defined(VGO_linux)
++# if defined(VGO_linux) || defined(VGO_netbsd)
+ /* no other fields to clear */
+ # elif defined(VGO_darwin)
+ tst->os_state.post_mach_trap_fn = NULL;
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__sigframe_sigframe-amd64-netbsd.c b/valgrind-netbsd/patches/patch-coregrind_m__sigframe_sigframe-amd64-netbsd.c
new file mode 100644
index 0000000000..97d87ab84a
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__sigframe_sigframe-amd64-netbsd.c
@@ -0,0 +1,630 @@
+$NetBSD$
+
+--- coregrind/m_sigframe/sigframe-amd64-netbsd.c.orig 2019-03-30 07:21:10.160474699 +0000
++++ coregrind/m_sigframe/sigframe-amd64-netbsd.c
+@@ -0,0 +1,625 @@
++
++/*--------------------------------------------------------------------*/
++/*--- Create/destroy signal delivery frames. ---*/
++/*--- sigframe-x86-nebsdelf2.c ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2005 Nicholas Nethercote
++ njn%valgrind.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#if defined(VGP_amd64_netbsd)
++
++#include "pub_core_basics.h"
++#include "pub_core_vki.h"
++#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
++#include "pub_core_threadstate.h"
++#include "pub_core_aspacemgr.h" /* find_segment */
++#include "pub_core_libcbase.h"
++#include "pub_core_libcassert.h"
++#include "pub_core_libcprint.h"
++#include "pub_core_machine.h"
++#include "pub_core_options.h"
++#include "pub_core_sigframe.h"
++#include "pub_core_signals.h"
++#include "pub_core_tooliface.h"
++#include "pub_core_trampoline.h"
++
++
++/* This module creates and removes signal frames for signal deliveries
++ on x86-linux.
++
++ Note, this file contains kernel-specific knowledge in the form of
++ 'struct sigframe' and 'struct rt_sigframe'. How does that relate
++ to the vki kernel interface stuff?
++
++ Either a 'struct sigframe' or a 'struct rtsigframe' is pushed
++ onto the client's stack. This contains a subsidiary
++ vki_ucontext. That holds the vcpu's state across the signal,
++ so that the sighandler can mess with the vcpu state if it
++ really wants.
++
++ FIXME: sigcontexting is basically broken for the moment. When
++ delivering a signal, the integer registers and %eflags are
++ correctly written into the sigcontext, however the FP and SSE state
++ is not. When returning from a signal, only the integer registers
++ are restored from the sigcontext; the rest of the CPU state is
++ restored to what it was before the signal.
++
++ This will be fixed.
++*/
++
++
++/*------------------------------------------------------------*/
++/*--- Signal frame layouts ---*/
++/*------------------------------------------------------------*/
++
++// A structure in which to save the application's registers
++// during the execution of signal handlers.
++
++// Linux has 2 signal frame structures: one for normal signal
++// deliveries, and one for SA_SIGINFO deliveries (also known as RT
++// signals).
++//
++// In theory, so long as we get the arguments to the handler function
++// right, it doesn't matter what the exact layout of the rest of the
++// frame is. Unfortunately, things like gcc's exception unwinding
++// make assumptions about the locations of various parts of the frame,
++// so we need to duplicate it exactly.
++
++/* Valgrind-specific parts of the signal frame */
++struct vg_sigframe
++{
++ /* Sanity check word. */
++ UInt magicPI;
++
++ UInt handlerflags; /* flags for signal handler */
++
++
++ /* Safely-saved version of sigNo, as described above. */
++ Int sigNo_private;
++
++ /* XXX This is wrong. Surely we should store the shadow values
++ into the shadow memory behind the actual values? */
++ VexGuestAMD64State vex_shadow1;
++ VexGuestAMD64State vex_shadow2;
++
++ /* HACK ALERT */
++ VexGuestAMD64State vex;
++ /* end HACK ALERT */
++
++ /* saved signal mask to be restored when handler returns */
++ vki_sigset_t mask;
++
++ /* Sanity check word. Is the highest-addressed word; do not
++ move!*/
++ UInt magicE;
++};
++
++struct sigframe
++{
++ /* Sig handler's return address */
++ Addr retaddr;
++ Int sigNo;
++
++/* struct vki_sigcontext sigContext; */
++ struct vki_mcontext sigContext; /* XXX - netbsd */
++// struct _vki_fpstate fpstate;
++
++ struct vg_sigframe vg;
++};
++
++
++//:: /*------------------------------------------------------------*/
++//:: /*--- Signal operations ---*/
++//:: /*------------------------------------------------------------*/
++//::
++//:: /*
++//:: Great gobs of FP state conversion taken wholesale from
++//:: linux/arch/i386/kernel/i387.c
++//:: */
++//::
++//:: /*
++//:: * FXSR floating point environment conversions.
++//:: */
++//:: #define X86_FXSR_MAGIC 0x0000
++//::
++//:: /*
++//:: * FPU tag word conversions.
++//:: */
++//::
++//:: static inline unsigned short twd_i387_to_fxsr( unsigned short twd )
++//:: {
++//:: unsigned int tmp; /* to avoid 16 bit prefixes in the code */
++//::
++//:: /* Transform each pair of bits into 01 (valid) or 00 (empty) */
++//:: tmp = ~twd;
++//:: tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
++//:: /* and move the valid bits to the lower byte. */
++//:: tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
++//:: tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
++//:: tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
++//:: return tmp;
++//:: }
++//::
++//:: static unsigned long twd_fxsr_to_i387( const struct i387_fxsave_struct *fxsave )
++//:: {
++//:: struct _vki_fpxreg *st = NULL;
++//:: unsigned long twd = (unsigned long) fxsave->twd;
++//:: unsigned long tag;
++//:: unsigned long ret = 0xffff0000u;
++//:: int i;
++//::
++//:: #define FPREG_ADDR(f, n) ((char *)&(f)->st_space + (n) * 16);
++//::
++//:: for ( i = 0 ; i < 8 ; i++ ) {
++//:: if ( twd & 0x1 ) {
++//:: st = (struct _vki_fpxreg *) FPREG_ADDR( fxsave, i );
++//::
++//:: switch ( st->exponent & 0x7fff ) {
++//:: case 0x7fff:
++//:: tag = 2; /* Special */
++//:: break;
++//:: case 0x0000:
++//:: if ( !st->significand[0] &&
++//:: !st->significand[1] &&
++//:: !st->significand[2] &&
++//:: !st->significand[3] ) {
++//:: tag = 1; /* Zero */
++//:: } else {
++//:: tag = 2; /* Special */
++//:: }
++//:: break;
++//:: default:
++//:: if ( st->significand[3] & 0x8000 ) {
++//:: tag = 0; /* Valid */
++//:: } else {
++//:: tag = 2; /* Special */
++//:: }
++//:: break;
++//:: }
++//:: } else {
++//:: tag = 3; /* Empty */
++//:: }
++//:: ret |= (tag << (2 * i));
++//:: twd = twd >> 1;
++//:: }
++//:: return ret;
++//:: }
++//::
++//:: static void convert_fxsr_to_user( struct _vki_fpstate *buf,
++//:: const struct i387_fxsave_struct *fxsave )
++//:: {
++//:: unsigned long env[7];
++//:: struct _vki_fpreg *to;
++//:: struct _vki_fpxreg *from;
++//:: int i;
++//::
++//:: env[0] = (unsigned long)fxsave->cwd | 0xffff0000ul;
++//:: env[1] = (unsigned long)fxsave->swd | 0xffff0000ul;
++//:: env[2] = twd_fxsr_to_i387(fxsave);
++//:: env[3] = fxsave->fip;
++//:: env[4] = fxsave->fcs | ((unsigned long)fxsave->fop << 16);
++//:: env[5] = fxsave->foo;
++//:: env[6] = fxsave->fos;
++//::
++//:: VG_(memcpy)(buf, env, 7 * sizeof(unsigned long));
++//::
++//:: to = &buf->_st[0];
++//:: from = (struct _vki_fpxreg *) &fxsave->st_space[0];
++//:: for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
++//:: unsigned long __user *t = (unsigned long __user *)to;
++//:: unsigned long *f = (unsigned long *)from;
++//::
++//:: t[0] = f[0];
++//:: t[1] = f[1];
++//:: to->exponent = from->exponent;
++//:: }
++//:: }
++//::
++//:: static void convert_fxsr_from_user( struct i387_fxsave_struct *fxsave,
++//:: const struct _vki_fpstate *buf )
++//:: {
++//:: unsigned long env[7];
++//:: struct _vki_fpxreg *to;
++//:: const struct _vki_fpreg *from;
++//:: int i;
++//::
++//:: VG_(memcpy)(env, buf, 7 * sizeof(long));
++//::
++//:: fxsave->cwd = (unsigned short)(env[0] & 0xffff);
++//:: fxsave->swd = (unsigned short)(env[1] & 0xffff);
++//:: fxsave->twd = twd_i387_to_fxsr((unsigned short)(env[2] & 0xffff));
++//:: fxsave->fip = env[3];
++//:: fxsave->fop = (unsigned short)((env[4] & 0xffff0000ul) >> 16);
++//:: fxsave->fcs = (env[4] & 0xffff);
++//:: fxsave->foo = env[5];
++//:: fxsave->fos = env[6];
++//::
++//:: to = (struct _vki_fpxreg *) &fxsave->st_space[0];
++//:: from = &buf->_st[0];
++//:: for ( i = 0 ; i < 8 ; i++, to++, from++ ) {
++//:: unsigned long *t = (unsigned long *)to;
++//:: unsigned long __user *f = (unsigned long __user *)from;
++//::
++//:: t[0] = f[0];
++//:: t[1] = f[1];
++//:: to->exponent = from->exponent;
++//:: }
++//:: }
++//::
++//:: static inline void save_i387_fsave( arch_thread_t *regs, struct _vki_fpstate *buf )
++//:: {
++//:: struct i387_fsave_struct *fs = ®s->m_sse.fsave;
++//::
++//:: fs->status = fs->swd;
++//:: VG_(memcpy)(buf, fs, sizeof(*fs));
++//:: }
++//::
++//:: static void save_i387_fxsave( arch_thread_t *regs, struct _vki_fpstate *buf )
++//:: {
++//:: const struct i387_fxsave_struct *fx = ®s->m_sse.fxsave;
++//:: convert_fxsr_to_user( buf, fx );
++//::
++//:: buf->status = fx->swd;
++//:: buf->magic = X86_FXSR_MAGIC;
++//:: VG_(memcpy)(buf->_fxsr_env, fx, sizeof(struct i387_fxsave_struct));
++//:: }
++//::
++//:: static void save_i387( arch_thread_t *regs, struct _vki_fpstate *buf )
++//:: {
++//:: if ( VG_(have_ssestate) )
++//:: save_i387_fxsave( regs, buf );
++//:: else
++//:: save_i387_fsave( regs, buf );
++//:: }
++//::
++//:: static inline void restore_i387_fsave( arch_thread_t *regs, const struct _vki_fpstate __user *buf )
++//:: {
++//:: VG_(memcpy)( ®s->m_sse.fsave, buf, sizeof(struct i387_fsave_struct) );
++//:: }
++//::
++//:: static void restore_i387_fxsave( arch_thread_t *regs, const struct _vki_fpstate __user *buf )
++//:: {
++//:: VG_(memcpy)(®s->m_sse.fxsave, &buf->_fxsr_env[0],
++//:: sizeof(struct i387_fxsave_struct) );
++//:: /* mxcsr reserved bits must be masked to zero for security reasons */
++//:: regs->m_sse.fxsave.mxcsr &= 0xffbf;
++//:: convert_fxsr_from_user( ®s->m_sse.fxsave, buf );
++//:: }
++//::
++//:: static void restore_i387( arch_thread_t *regs, const struct _vki_fpstate __user *buf )
++//:: {
++//:: if ( VG_(have_ssestate) ) {
++//:: restore_i387_fxsave( regs, buf );
++//:: } else {
++//:: restore_i387_fsave( regs, buf );
++//:: }
++//:: }
++
++
++/*------------------------------------------------------------*/
++/*--- Creating signal frames ---*/
++/*------------------------------------------------------------*/
++
++/* Create a plausible-looking sigcontext from the thread's
++ Vex guest state. NOTE: does not fill in the FP or SSE
++ bits of sigcontext at the moment.
++*/
++/* static */
++/* void synth_ucontext(ThreadId tid, const vki_siginfo_t *si, */
++/* const vki_sigset_t *set, */
++/* struct vki_ucontext *uc, struct _vki_fpstate *fpstate) */
++/* { */
++/* ThreadState *tst = VG_(get_ThreadState)(tid); */
++/* struct vki_sigcontext *sc = &uc->uc_mcontext; */
++
++/* VG_(memset)(uc, 0, sizeof(*uc)); */
++
++/* uc->uc_flags = 0; */
++/* uc->uc_link = 0; */
++/* uc->uc_sigmask = *set; */
++/* uc->uc_stack = tst->altstack; */
++/* sc->fpstate = fpstate; */
++
++/* // FIXME: save_i387(&tst->arch, fpstate); */
++
++/* # define SC2(reg,REG) sc->reg = tst->arch.vex.guest_##REG */
++/* SC2(gs,GS); */
++/* SC2(fs,FS); */
++/* SC2(es,ES); */
++/* SC2(ds,DS); */
++
++/* SC2(edi,EDI); */
++/* SC2(esi,ESI); */
++/* SC2(ebp,EBP); */
++/* SC2(esp,ESP); */
++/* SC2(ebx,EBX); */
++/* SC2(edx,EDX); */
++/* SC2(ecx,ECX); */
++/* SC2(eax,EAX); */
++
++/* SC2(eip,EIP); */
++/* SC2(cs,CS); */
++/* sc->eflags = LibVEX_GuestX86_get_eflags(&tst->arch.vex); */
++/* SC2(ss,SS); */
++/* /\* XXX esp_at_signal *\/ */
++/* /\* XXX trapno *\/ */
++/* /\* XXX err *\/ */
++/* # undef SC2 */
++
++/* sc->cr2 = (UInt)si->_sifields._sigfault._addr; */
++
++/* } */
++
++
++/* Extend the stack segment downwards if needed so as to ensure the
++ new signal frames are mapped to something. Return a Bool
++ indicating whether or not the operation was successful.
++*/
++/* static Bool extend ( ThreadState *tst, Addr addr, SizeT size ) */
++/* { */
++/* ThreadId tid = tst->tid; */
++/* NSegment *stackseg = NULL; */
++
++/* if (VG_(extend_stack)(addr, tst->client_stack_szB)) { */
++/* stackseg = VG_(am_find_nsegment)(addr); */
++/* if (0 && stackseg) */
++/* VG_(printf)("frame=%p seg=%p-%p\n", */
++/* addr, stackseg->start, stackseg->end); */
++/* } */
++
++/* if (stackseg == NULL || !stackseg->hasR || !stackseg->hasW) { */
++/* VG_(message)( */
++/* Vg_UserMsg, */
++/* "Can't extend stack to %p during signal delivery for thread %d:", */
++/* addr, tid); */
++/* if (stackseg == NULL) */
++/* VG_(message)(Vg_UserMsg, " no stack segment"); */
++/* else */
++/* VG_(message)(Vg_UserMsg, " too small or bad protection modes"); */
++
++/* /\* set SIGSEGV to default handler *\/ */
++/* VG_(set_default_handler)(VKI_SIGSEGV); */
++/* VG_(synth_fault_mapping)(tid, addr); */
++
++/* /\* The whole process should be about to die, since the default */
++/* action of SIGSEGV to kill the whole process. *\/ */
++/* return False; */
++/* } */
++
++/* /\* For tracking memory events, indicate the entire frame has been */
++/* allocated. *\/ */
++/* VG_TRACK( new_mem_stack_signal, addr - VG_STACK_REDZONE_SZB, */
++/* size + VG_STACK_REDZONE_SZB ); */
++
++/* return True; */
++/* } */
++
++
++/* Build the Valgrind-specific part of a signal frame. */
++
++/* static void build_vg_sigframe(struct vg_sigframe *frame, */
++/* ThreadState *tst, */
++/* const vki_sigset_t *mask, */
++/* UInt flags, */
++/* Int sigNo) */
++/* { */
++/* frame->sigNo_private = sigNo; */
++/* frame->magicPI = 0x31415927; */
++/* frame->vex_shadow = tst->arch.vex_shadow; */
++/* /\* HACK ALERT *\/ */
++/* frame->vex = tst->arch.vex; */
++/* /\* end HACK ALERT *\/ */
++/* frame->mask = tst->sig_mask; */
++/* frame->handlerflags = flags; */
++/* frame->magicE = 0x27182818; */
++/* } */
++
++
++static Addr build_sigframe(ThreadState *tst,
++ Addr esp_top_of_frame,
++ const vki_siginfo_t *siginfo,
++ void *handler, UInt flags,
++ const vki_sigset_t *mask,
++ void *restorer)
++{
++/* struct sigframe *frame; */
++/* Addr esp = esp_top_of_frame; */
++/* Int sigNo = siginfo->si_signo; */
++/* struct vki_ucontext uc; */
++
++/* vg_assert((flags & VKI_SA_SIGINFO) == 0); */
++
++/* esp -= sizeof(*frame); */
++/* esp = VG_ROUNDDN(esp, 16); */
++/* frame = (struct sigframe *)esp; */
++
++/* if (!extend(tst, esp, sizeof(*frame))) */
++/* return esp_top_of_frame; */
++
++/* /\* retaddr, sigNo, siguContext fields are to be written *\/ */
++/* VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame", */
++/* esp, offsetof(struct sigframe, vg) ); */
++
++/* frame->sigNo = sigNo; */
++
++/* if (flags & VKI_SA_RESTORER) */
++/* frame->retaddr = (Addr)restorer; */
++/* else */
++/* frame->retaddr = (Addr)&VG_(x86_netbsdelf2_SUBST_FOR_sigreturn); */
++
++/* synth_ucontext(tst->tid, siginfo, mask, &uc, &frame->fpstate); */
++
++/* VG_(memcpy)(&frame->sigContext, &uc.uc_mcontext, */
++/* sizeof(struct vki_sigcontext)); */
++/* frame->sigContext.oldmask = mask->sig[0]; */
++
++/* VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, */
++/* esp, offsetof(struct sigframe, vg) ); */
++
++/* build_vg_sigframe(&frame->vg, tst, mask, flags, sigNo); */
++
++/* return esp; */
++ I_die_here;
++}
++
++/* EXPORTED */
++void VG_(sigframe_create)( ThreadId tid,
++ Bool on_altstack,
++ Addr rsp_top_of_frame,
++ const vki_siginfo_t *siginfo,
++ const struct vki_ucontext *siguc,
++ void *handler,
++ UInt flags,
++ const vki_sigset_t *mask,
++ void *restorer )
++{
++ Addr esp;
++ ThreadState* tst = VG_(get_ThreadState)(tid);
++
++ esp = build_sigframe(tst, rsp_top_of_frame,
++ siginfo, handler, flags, mask, restorer);
++
++ /* Set the thread so it will next run the handler. */
++ /* tst->m_esp = esp; also notify the tool we've updated ESP */
++ VG_(set_SP)(tid, esp);
++ VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));
++
++ //VG_(printf)("handler = %p\n", handler);
++ tst->arch.vex.guest_RIP = (Addr) handler;
++ /* This thread needs to be marked runnable, but we leave that the
++ caller to do. */
++
++ if (0)
++ VG_(printf)("pushed signal frame; %%RSP now = %p, "
++ "next %%RIP = %p, status=%d\n",
++ esp, tst->arch.vex.guest_RIP, tst->status);
++}
++
++
++/*------------------------------------------------------------*/
++/*--- Destroying signal frames ---*/
++/*------------------------------------------------------------*/
++
++/* Return False and don't do anything, just set the client to take a
++ segfault, if it looks like the frame is corrupted. */
++static
++Bool restore_vg_sigframe ( ThreadState *tst,
++ struct vg_sigframe *frame, Int *sigNo )
++{
++ if (frame->magicPI != 0x31415927 ||
++ frame->magicE != 0x27182818) {
++ VG_(message)(Vg_UserMsg, "Thread %d return signal frame "
++ "corrupted. Killing process.",
++ tst->tid);
++ VG_(set_default_handler)(VKI_SIGSEGV);
++ VG_(synth_fault)(tst->tid);
++ *sigNo = VKI_SIGSEGV;
++ return False;
++ }
++ tst->sig_mask = frame->mask;
++ tst->tmp_sig_mask = frame->mask;
++// tst->arch.vex_shadow = frame->vex_shadow;
++ /* HACK ALERT */
++ tst->arch.vex = frame->vex;
++ /* end HACK ALERT */
++ *sigNo = frame->sigNo_private;
++ return True;
++}
++
++static
++void restore_sigcontext( ThreadState *tst,
++ struct vki_mcontext *sc, /* was sigcontext */
++ struct _vki_fpstate *fpstate )
++{
++/* tst->arch.vex.guest_EAX = sc->eax; */
++/* tst->arch.vex.guest_ECX = sc->ecx; */
++/* tst->arch.vex.guest_EDX = sc->edx; */
++/* tst->arch.vex.guest_EBX = sc->ebx; */
++/* tst->arch.vex.guest_EBP = sc->ebp; */
++/* tst->arch.vex.guest_ESP = sc->esp; */
++/* tst->arch.vex.guest_ESI = sc->esi; */
++/* tst->arch.vex.guest_EDI = sc->edi; */
++/* //:: tst->arch.vex.guest_eflags = sc->eflags; */
++/* //:: tst->arch.vex.guest_RIP = sc->eip; */
++
++/* tst->arch.vex.guest_CS = sc->cs; */
++/* tst->arch.vex.guest_SS = sc->ss; */
++/* tst->arch.vex.guest_DS = sc->ds; */
++/* tst->arch.vex.guest_ES = sc->es; */
++/* tst->arch.vex.guest_FS = sc->fs; */
++/* tst->arch.vex.guest_GS = sc->gs; */
++
++/* //:: restore_i387(&tst->arch, fpstate); */
++ I_die_here;
++}
++
++
++static
++SizeT restore_sigframe ( ThreadState *tst,
++ struct sigframe *frame, Int *sigNo )
++{
++// if (restore_vg_sigframe(tst, &frame->vg, sigNo))
++// restore_sigcontext(tst, &frame->sigContext, &frame->fpstate);
++
++ return sizeof(*frame);
++}
++
++
++/* EXPORTED */
++void VG_(sigframe_destroy)( ThreadId tid )
++{
++ Bool isRT;
++ memset(&isRT, 0, sizeof(isRT));
++ Addr rsp;
++ ThreadState* tst;
++ SizeT size;
++ Int sigNo;
++
++ tst = VG_(get_ThreadState)(tid);
++
++ /* Correctly reestablish the frame base address. */
++ rsp = tst->arch.vex.guest_RSP;
++
++ size = restore_sigframe(tst, (struct sigframe *)rsp, &sigNo);
++
++ VG_TRACK( die_mem_stack_signal, rsp - VG_STACK_REDZONE_SZB,
++ size + VG_STACK_REDZONE_SZB );
++
++ if (VG_(clo_trace_signals))
++ VG_(message)(
++ Vg_DebugMsg,
++ "VG_(signal_return) (thread %d): isRT=%d valid magic; RIP=%p",
++ tid, isRT, tst->arch.vex.guest_RIP);
++
++ /* tell the tools */
++ VG_TRACK( post_deliver_signal, tid, sigNo );
++}
++
++#endif
++
++/*--------------------------------------------------------------------*/
++/*--- end sigframe-x86-netbsdelf2.c ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__sigframe_sigframe-common.c b/valgrind-netbsd/patches/patch-coregrind_m__sigframe_sigframe-common.c
new file mode 100644
index 0000000000..4f6fc267c5
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__sigframe_sigframe-common.c
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- coregrind/m_sigframe/sigframe-common.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_sigframe/sigframe-common.c
+@@ -49,7 +49,7 @@ static void track_frame_memory ( Addr ad
+ VG_TRACK( new_mem_stack_signal, addr - VG_STACK_REDZONE_SZB, size, tid );
+ }
+
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /* Extend the stack segment downwards if needed so as to ensure the
+ new signal frames are mapped to something. Return a Bool
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__signals.c b/valgrind-netbsd/patches/patch-coregrind_m__signals.c
new file mode 100644
index 0000000000..12fa4edf40
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__signals.c
@@ -0,0 +1,202 @@
+$NetBSD$
+
+--- coregrind/m_signals.c.orig 2018-09-30 04:41:00.000000000 +0000
++++ coregrind/m_signals.c
+@@ -512,6 +512,22 @@ typedef struct SigQueue {
+ srP->misc.AMD64.r_rbp = (ULong)(ss->__rbp);
+ }
+
++#elif defined(VGP_amd64_netbsd)
++# define VG_UCONTEXT_INSTR_PTR(uc) VKI__UC_MACHINE_PC(uc)
++# define VG_UCONTEXT_STACK_PTR(uc) VKI__UC_MACHINE_SP(uc)
++# define VG_UCONTEXT_FRAME_PTR(uc) VKI__UC_MACHINE_FP(uc)
++# define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.__gregs[VKI__REG_RAX])
++# define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
++ /* Convert the value in uc_mcontext.rax into a SysRes. */ \
++ VG_(mk_SysRes_amd64_netbsd)( ((uc)->uc_mcontext.__gregs[VKI__REG_RAX]), \
++ ((uc)->uc_mcontext.__gregs[VKI__REG_RDX]), ((uc)->uc_mcontext.__gregs[VKI__REG_RFLAGS]) != 0 ? True : False )
++# define VG_UCONTEXT_LINK_REG(uc) 0 /* No LR on amd64 either */
++# define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
++ { (srP)->r_pc = VKI__UC_MACHINE_PC(uc); \
++ (srP)->r_sp = VKI__UC_MACHINE_SP(uc); \
++ (srP)->misc.AMD64.r_rbp = VKI__UC_MACHINE_FP(uc); \
++ }
++
+ #elif defined(VGP_s390x_linux)
+
+ # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.regs.psw.addr)
+@@ -605,7 +621,7 @@ typedef struct SigQueue {
+ #if defined(VGO_linux)
+ # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
+ # define VKI_SIGINFO_si_pid _sifields._kill._pid
+-#elif defined(VGO_darwin) || defined(VGO_solaris)
++#elif defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+ # define VKI_SIGINFO_si_addr si_addr
+ # define VKI_SIGINFO_si_pid si_pid
+ #else
+@@ -979,7 +995,7 @@ extern void my_sigreturn(void);
+ " syscall\n" \
+ ".previous\n"
+
+-#elif defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
++#elif defined(VGP_x86_solaris) || defined(VGP_amd64_solaris) || defined(VGO_netbsd)
+ /* Not used on Solaris. */
+ # define _MY_SIGRETURN(name) \
+ ".text\n" \
+@@ -1031,7 +1047,7 @@ static void handle_SCSS_change ( Bool fo
+ ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
+ # if !defined(VGP_ppc32_linux) && \
+ !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+- !defined(VGP_mips32_linux) && !defined(VGO_solaris)
++ !defined(VGP_mips32_linux) && !defined(VGO_solaris) && !defined(VGO_netbsd)
+ ksa.sa_restorer = my_sigreturn;
+ # endif
+ /* Re above ifdef (also the assertion below), PaulM says:
+@@ -1078,7 +1094,7 @@ static void handle_SCSS_change ( Bool fo
+ # if !defined(VGP_ppc32_linux) && \
+ !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+ !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux) && \
+- !defined(VGO_solaris)
++ !defined(VGO_solaris) && !defined(VGO_netbsd)
+ vg_assert(ksa_old.sa_restorer == my_sigreturn);
+ # endif
+ VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
+@@ -1199,7 +1215,7 @@ SysRes VG_(do_sys_sigaction) ( Int signo
+ old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
+ old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
+ # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+- !defined(VGO_solaris)
++ !defined(VGO_solaris) && !defined(VGO_netbsd)
+ old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
+ # endif
+ }
+@@ -1212,7 +1228,7 @@ SysRes VG_(do_sys_sigaction) ( Int signo
+
+ scss.scss_per_sig[signo].scss_restorer = NULL;
+ # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+- !defined(VGO_solaris)
++ !defined(VGO_solaris) && !defined(VGO_netbsd)
+ scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
+ # endif
+
+@@ -1558,7 +1574,7 @@ void VG_(kill_self)(Int sigNo)
+ sa.ksa_handler = VKI_SIG_DFL;
+ sa.sa_flags = 0;
+ # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+- !defined(VGO_solaris)
++ !defined(VGO_solaris) && !defined(VGO_netbsd)
+ sa.sa_restorer = 0;
+ # endif
+ VG_(sigemptyset)(&sa.sa_mask);
+@@ -1586,14 +1602,14 @@ void VG_(kill_self)(Int sigNo)
+ // request (SI_ASYNCIO). There's lots of implementation-defined leeway in
+ // POSIX, but the user vs. kernal distinction is what we want here. We also
+ // pass in some other details that can help when si_code is unreliable.
+-static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
++static Bool is_signal_from_kernel(ThreadId tid, int signum, int my_si_code)
+ {
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ // On Linux, SI_USER is zero, negative values are from the user, positive
+ // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
+ // macros but we don't use them here because other platforms don't have
+ // them.
+- return ( si_code > VKI_SI_USER ? True : False );
++ return ( my_si_code > VKI_SI_USER ? True : False );
+
+ # elif defined(VGO_darwin)
+ // On Darwin 9.6.0, the si_code is completely unreliable. It should be the
+@@ -1615,7 +1631,7 @@ static Bool is_signal_from_kernel(Thread
+
+ // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
+ } else if (SIGSEGV == signum) {
+- return ( si_code > 0 ? True : False );
++ return ( my_si_code > 0 ? True : False );
+
+ // If it's anything else, assume it's kernel-generated. Reason being that
+ // kernel-generated sync signals are more common, and it's probable that
+@@ -2025,7 +2041,7 @@ static void resume_scheduler(ThreadId ti
+ }
+ }
+
+-static void synth_fault_common(ThreadId tid, Addr addr, Int si_code)
++static void synth_fault_common(ThreadId tid, Addr addr, Int my_si_code)
+ {
+ vki_siginfo_t info;
+
+@@ -2033,7 +2049,7 @@ static void synth_fault_common(ThreadId
+
+ VG_(memset)(&info, 0, sizeof(info));
+ info.si_signo = VKI_SIGSEGV;
+- info.si_code = si_code;
++ info.si_code = my_si_code;
+ info.VKI_SIGINFO_si_addr = (void*)addr;
+
+ /* Even if gdbserver indicates to ignore the signal, we must deliver it.
+@@ -2266,7 +2282,7 @@ static vki_siginfo_t *next_queued(Thread
+ return ret;
+ }
+
+-static int sanitize_si_code(int si_code)
++static int sanitize_si_code(int my_si_code)
+ {
+ #if defined(VGO_linux)
+ /* The linux kernel uses the top 16 bits of si_code for it's own
+@@ -2277,9 +2293,9 @@ static int sanitize_si_code(int si_code)
+ The kernel treats the bottom 16 bits as signed and (when it does
+ mask them off) sign extends them when exporting to user space so
+ we do the same thing here. */
+- return (Short)si_code;
+-#elif defined(VGO_darwin) || defined(VGO_solaris)
+- return si_code;
++ return (Short)my_si_code;
++#elif defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
++ return my_si_code;
+ #else
+ # error Unknown OS
+ #endif
+@@ -2463,7 +2479,7 @@ void async_signalhandler ( Int sigNo,
+ /* (1) */
+ VG_(fixup_guest_state_after_syscall_interrupted)(
+ tid,
+- VG_UCONTEXT_INSTR_PTR(uc),
++ VG_UCONTEXT_INSTR_PTR(uc),
+ sres,
+ !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART),
+ uc
+@@ -2904,7 +2920,7 @@ void pp_ksigaction ( vki_sigaction_toK_t
+ sa->ksa_handler,
+ (UInt)sa->sa_flags,
+ # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+- !defined(VGO_solaris)
++ !defined(VGO_solaris) && !defined(VGO_netbsd)
+ sa->sa_restorer
+ # else
+ (void*)0
+@@ -2927,7 +2943,7 @@ void VG_(set_default_handler)(Int signo)
+ sa.ksa_handler = VKI_SIG_DFL;
+ sa.sa_flags = 0;
+ # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+- !defined(VGO_solaris)
++ !defined(VGO_solaris) && !defined(VGO_netbsd)
+ sa.sa_restorer = 0;
+ # endif
+ VG_(sigemptyset)(&sa.sa_mask);
+@@ -3047,7 +3063,7 @@ void VG_(sigstartup_actions) ( void )
+ tsa.ksa_handler = (void *)sync_signalhandler;
+ tsa.sa_flags = VKI_SA_SIGINFO;
+ # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+- !defined(VGO_solaris)
++ !defined(VGO_solaris) && !defined(VGO_netbsd)
+ tsa.sa_restorer = 0;
+ # endif
+ VG_(sigfillset)(&tsa.sa_mask);
+@@ -3075,7 +3091,7 @@ void VG_(sigstartup_actions) ( void )
+
+ scss.scss_per_sig[i].scss_restorer = NULL;
+ # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+- !defined(VGO_solaris)
++ !defined(VGO_solaris) && !defined(VGO_netbsd)
+ scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
+ # endif
+
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__stacktrace.c b/valgrind-netbsd/patches/patch-coregrind_m__stacktrace.c
new file mode 100644
index 0000000000..7225fba364
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__stacktrace.c
@@ -0,0 +1,48 @@
+$NetBSD$
+
+--- coregrind/m_stacktrace.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_stacktrace.c
+@@ -500,7 +500,7 @@ UInt VG_(get_StackTrace_wrk) ( ThreadId
+ /* ----------------------- amd64 ------------------------ */
+
+ #if defined(VGP_amd64_linux) || defined(VGP_amd64_darwin) \
+- || defined(VGP_amd64_solaris)
++ || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+
+ UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
+ /*OUT*/Addr* ips, UInt max_n_ips,
+@@ -638,6 +638,34 @@ UInt VG_(get_StackTrace_wrk) ( ThreadId
+ continue;
+ }
+
++#if defined(VGO_netbsd)
++ const NSegment *seg;
++ const HChar *filename = NULL;
++ int match = 0;
++
++ seg = VG_(am_find_nsegment)(uregs.xip);
++ if (seg != NULL) {
++ filename = VG_(am_get_filename)(seg);
++ }
++ if (filename != NULL && VG_(strstr)(filename, "/libc.so")) {
++ match = 1;
++ }
++ if (match == 1 && fp_min <= uregs.xsp &&
++ uregs.xsp <= fp_max - 1 * sizeof(UWord)) {
++ /* fp looks sane, so use it. */
++ uregs.xip = (((UWord*)uregs.xsp)[0]);
++ if (0 == uregs.xip || 1 == uregs.xip) break;
++ if (fps) fps[i] = uregs.xsp;
++ uregs.xsp = uregs.xsp + sizeof(Addr) /*ra*/;
++ if (sps) sps[i] = uregs.xsp;
++ ips[i++] = uregs.xip - 1; /* -1: refer to calling insn, not the RA */
++ if (debug)
++ VG_(printf)(" ipsFF[%d]=%#08lx\n", i-1, ips[i-1]);
++ uregs.xip = uregs.xip - 1; /* as per comment at the head of this loop */
++ continue;
++ }
++#endif
++
+ /* If VG_(use_CF_info) fails, it won't modify ip/sp/fp, so
+ we can safely try the old-fashioned method. */
+ /* This bit is supposed to deal with frames resulting from
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syscall.c b/valgrind-netbsd/patches/patch-coregrind_m__syscall.c
new file mode 100644
index 0000000000..60eb76991e
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syscall.c
@@ -0,0 +1,109 @@
+$NetBSD$
+
+--- coregrind/m_syscall.c.orig 2018-07-13 08:52:05.000000000 +0000
++++ coregrind/m_syscall.c
+@@ -331,6 +331,33 @@ SysRes VG_(mk_SysRes_amd64_solaris) ( Bo
+ return res;
+ }
+
++#elif defined(VGO_netbsd)
++
++SysRes VG_(mk_SysRes_amd64_netbsd) ( ULong val, ULong val2, Bool err ) {
++ SysRes r;
++ r._isError = err;
++ r._val = val;
++ r._val2 = val2;
++ return r;
++}
++
++/* Generic constructors. */
++SysRes VG_(mk_SysRes_Error) ( UWord err ) {
++ SysRes r;
++ r._val = err;
++ r._val2 = 0;
++ r._isError = True;
++ return r;
++}
++
++SysRes VG_(mk_SysRes_Success) ( UWord res ) {
++ SysRes r;
++ r._val = res;
++ r._val2 = 0;
++ r._isError = False;
++ return r;
++}
++
+ #else
+ # error "Unknown OS"
+ #endif
+@@ -676,6 +703,55 @@ asm(
+ );
+
+
++#elif defined(VGP_amd64_netbsd)
++extern UWord do_syscall_WRK (
++ UWord syscall_no, /* %rdi */
++ UWord a1, /* %rsi */
++ UWord a2, /* %rdx */
++ UWord a3, /* %rcx */
++ UWord a4, /* %r8 */
++ UWord a5, /* %r9 */
++ UWord a6, /* 8(%rsp) */
++ UWord a7, /* 16(%rsp) */
++ UWord a8, /* 24(%rsp) */
++ UInt *flags, /* 32(%rsp) */
++ UWord *rv2 /* 40(%rsp) */
++ );
++asm(
++".text\n"
++"do_syscall_WRK:\n"
++ /* Convert function calling convention --> syscall calling
++ convention */
++" pushq %rbp\n"
++" movq %rsp, %rbp\n"
++" movq %rdi, %rax\n" /* syscall_no */
++" movq %rsi, %rdi\n" /* a1 */
++" movq %rdx, %rsi\n" /* a2 */
++" movq %rcx, %rdx\n" /* a3 */
++" movq %r8, %r10\n" /* a4 */
++" movq %r9, %r8\n" /* a5 */
++" movq 16(%rbp), %r9\n" /* a6 last arg from stack, account for %rbp */
++" movq 24(%rbp), %r11\n" /* a7 from stack */
++" pushq %r11\n"
++" movq 32(%rbp), %r11\n" /* a8 from stack */
++" pushq %r11\n"
++" subq $8,%rsp\n" /* fake return addr */
++" syscall\n"
++" jb 1f\n"
++" movq 48(%rbp),%rsi\n"
++" movq %rdx, (%rsi)\n"
++" movq %rbp, %rsp\n"
++" popq %rbp\n"
++" ret\n"
++"1:\n"
++" movq 40(%rbp), %rsi\n"
++" movl $1,(%rsi)\n"
++" movq %rbp, %rsp\n"
++" popq %rbp\n"
++" ret\n"
++".previous\n"
++);
++
+ #elif defined(VGP_amd64_darwin)
+
+ /* Incoming args (syscall number + up to 8 args) come in registers and stack
+@@ -928,6 +1004,14 @@ SysRes VG_(do_syscall) ( UWord sysno, Re
+ UWord val = do_syscall_WRK(sysno,a1,a2,a3,a4,a5,a6);
+ return VG_(mk_SysRes_x86_linux)( val );
+
++# elif defined(VGP_amd64_netbsd)
++ UWord val;
++ UWord val2 = 0;
++ UInt err = 0;
++ val = do_syscall_WRK(sysno, a1, a2, a3, a4, a5,
++ a6, a7, a8, &err, &val2);
++ return VG_(mk_SysRes_amd64_netbsd)( val, val2, (err & 1) != 0 ? True : False);
++
+ # elif defined(VGP_amd64_linux)
+ UWord val = do_syscall_WRK(sysno,a1,a2,a3,a4,a5,a6);
+ return VG_(mk_SysRes_amd64_linux)( val );
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syswrap_priv__syswrap-netbsd.h b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_priv__syswrap-netbsd.h
new file mode 100644
index 0000000000..cc91bffe62
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_priv__syswrap-netbsd.h
@@ -0,0 +1,286 @@
+$NetBSD$
+
+--- coregrind/m_syswrap/priv_syswrap-netbsd.h.orig 2019-03-29 12:02:52.642924072 +0000
++++ coregrind/m_syswrap/priv_syswrap-netbsd.h
+@@ -0,0 +1,281 @@
++
++/*--------------------------------------------------------------------*/
++/*--- netbsd-specific syscalls stuff. priv_syswrap-netbsd.h ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2008 Nicholas Nethercote
++ njn%valgrind.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#ifndef __PRIV_SYSWRAP_NETBSD_H
++#define __PRIV_SYSWRAP_NETBSD_H
++
++/* requires #include "priv_types_n_macros.h" */
++
++// Clone-related functions
++extern Word ML_(start_thread_NORETURN) ( void* arg );
++extern Addr ML_(allocstack) ( ThreadId tid );
++extern void ML_(call_on_new_stack_0_1) ( Addr stack, Addr retaddr,
++ void (*f)(Word), Word arg1 );
++extern SysRes ML_(do_fork) ( ThreadId tid );
++extern SysRes ML_(do_vfork) ( ThreadId tid );
++extern SysRes ML_(do_rfork) ( ThreadId tid, Int flags );
++
++#if 0
++DECL_TEMPLATE(netbsd, sys_syscall);
++DECL_TEMPLATE(netbsd, sys_exit);
++DECL_TEMPLATE(netbsd, sys_getfsstat4);
++DECL_TEMPLATE(netbsd, sys_getfsstat);
++DECL_TEMPLATE(netbsd, sys_mount);
++DECL_TEMPLATE(netbsd, sys_unmount);
++DECL_TEMPLATE(netbsd, sys_ptrace);
++DECL_TEMPLATE(netbsd, sys_recvmsg);
++DECL_TEMPLATE(netbsd, sys_sendmsg);
++DECL_TEMPLATE(netbsd, sys_recvfrom);
++DECL_TEMPLATE(netbsd, sys_accept);
++DECL_TEMPLATE(netbsd, sys_getpeername);
++DECL_TEMPLATE(netbsd, sys_getsockname);
++DECL_TEMPLATE(netbsd, sys_chflags);
++DECL_TEMPLATE(netbsd, sys_fchflags);
++DECL_TEMPLATE(netbsd, sys_pipe);
++DECL_TEMPLATE(netbsd, sys_pipe2);
++DECL_TEMPLATE(netbsd, sys_ktrace);
++DECL_TEMPLATE(netbsd, sys_getlogin);
++DECL_TEMPLATE(netbsd, sys_setlogin);
++DECL_TEMPLATE(netbsd, sys_reboot);
++DECL_TEMPLATE(netbsd, sys_revoke);
++DECL_TEMPLATE(netbsd, sys_sbrk);
++DECL_TEMPLATE(netbsd, sys_sstk);
++DECL_TEMPLATE(netbsd, sys_swapon);
++DECL_TEMPLATE(netbsd, sys_getdtablesize);
++DECL_TEMPLATE(netbsd, sys_socket);
++DECL_TEMPLATE(netbsd, sys_connect);
++DECL_TEMPLATE(netbsd, sys_bind);
++DECL_TEMPLATE(netbsd, sys_setsockopt);
++DECL_TEMPLATE(netbsd, sys_listen);
++DECL_TEMPLATE(netbsd, sys_getsockopt);
++DECL_TEMPLATE(netbsd, sys_mkfifo);
++DECL_TEMPLATE(netbsd, sys_sendto);
++DECL_TEMPLATE(netbsd, sys_shutdown);
++DECL_TEMPLATE(netbsd, sys_socketpair);
++DECL_TEMPLATE(netbsd, sys_adjtime);
++DECL_TEMPLATE(netbsd, sys_quotactl);
++DECL_TEMPLATE(netbsd, sys_nfssvc);
++DECL_TEMPLATE(netbsd, sys_getfh);
++DECL_TEMPLATE(netbsd, sys_getdomainname);
++DECL_TEMPLATE(netbsd, sys_setdomainname);
++DECL_TEMPLATE(netbsd, sys_uname);
++DECL_TEMPLATE(netbsd, sys_sysarch);
++DECL_TEMPLATE(netbsd, sys_rtprio);
++DECL_TEMPLATE(netbsd, sys_semsys);
++DECL_TEMPLATE(netbsd, sys_msgsys);
++DECL_TEMPLATE(netbsd, sys_shmsys);
++DECL_TEMPLATE(netbsd, sys_pread);
++DECL_TEMPLATE(netbsd, sys_pwrite);
++DECL_TEMPLATE(netbsd, sys_ntp_adjtime);
++DECL_TEMPLATE(netbsd, sys_setegid);
++DECL_TEMPLATE(netbsd, sys_seteuid);
++DECL_TEMPLATE(netbsd, sys_stat);
++DECL_TEMPLATE(netbsd, sys_fstat);
++DECL_TEMPLATE(netbsd, sys_lstat);
++DECL_TEMPLATE(netbsd, sys_pathconf);
++DECL_TEMPLATE(netbsd, sys_fpathconf);
++DECL_TEMPLATE(netbsd, sys_getdirentries);
++DECL_TEMPLATE(netbsd, sys_mmap);
++DECL_TEMPLATE(netbsd, sys___syscall);
++DECL_TEMPLATE(netbsd, sys_lseek);
++DECL_TEMPLATE(netbsd, sys_truncate);
++DECL_TEMPLATE(netbsd, sys_ftruncate);
++DECL_TEMPLATE(netbsd, sys___sysctl);
++DECL_TEMPLATE(netbsd, sys_undelete);
++DECL_TEMPLATE(netbsd, sys_futimes);
++DECL_TEMPLATE(netbsd, sys_nfs_fhopen);
++DECL_TEMPLATE(netbsd, sys___semctl7);
++DECL_TEMPLATE(netbsd, sys___semctl);
++DECL_TEMPLATE(netbsd, sys_semget);
++DECL_TEMPLATE(netbsd, sys_semop);
++DECL_TEMPLATE(netbsd, sys_msgctl);
++DECL_TEMPLATE(netbsd, sys_msgget);
++DECL_TEMPLATE(netbsd, sys_msgsnd);
++DECL_TEMPLATE(netbsd, sys_msgrcv);
++DECL_TEMPLATE(netbsd, sys_shmat);
++DECL_TEMPLATE(netbsd, sys_shmctl);
++DECL_TEMPLATE(netbsd, sys_shmctl7);
++DECL_TEMPLATE(netbsd, sys_shmdt);
++DECL_TEMPLATE(netbsd, sys_shmget);
++DECL_TEMPLATE(netbsd, sys_clock_gettime);
++DECL_TEMPLATE(netbsd, sys_clock_settime);
++DECL_TEMPLATE(netbsd, sys_clock_getres);
++DECL_TEMPLATE(netbsd, sys_minherit);
++DECL_TEMPLATE(netbsd, sys_rfork);
++DECL_TEMPLATE(netbsd, sys_issetugid);
++DECL_TEMPLATE(netbsd, sys_lchmod);
++DECL_TEMPLATE(netbsd, sys_lutimes);
++DECL_TEMPLATE(netbsd, sys_netbsd_msync);
++DECL_TEMPLATE(netbsd, sys_nstat);
++DECL_TEMPLATE(netbsd, sys_nfstat);
++DECL_TEMPLATE(netbsd, sys_nlstat);
++DECL_TEMPLATE(netbsd, sys_fhstatfs);
++DECL_TEMPLATE(netbsd, sys_fhopen);
++DECL_TEMPLATE(netbsd, sys_fhstat);
++DECL_TEMPLATE(netbsd, sys_modnext);
++DECL_TEMPLATE(netbsd, sys_modstat);
++DECL_TEMPLATE(netbsd, sys_modfnext);
++DECL_TEMPLATE(netbsd, sys_modfind);
++DECL_TEMPLATE(netbsd, sys_kldload);
++DECL_TEMPLATE(netbsd, sys_kldunload);
++DECL_TEMPLATE(netbsd, sys_kldfind);
++DECL_TEMPLATE(netbsd, sys_kldnext);
++DECL_TEMPLATE(netbsd, sys_kldstat);
++DECL_TEMPLATE(netbsd, sys_kldfirstmod);
++DECL_TEMPLATE(netbsd, sys_setresuid);
++DECL_TEMPLATE(netbsd, sys_setresgid);
++DECL_TEMPLATE(netbsd, sys_aio_return);
++DECL_TEMPLATE(netbsd, sys_aio_suspend);
++DECL_TEMPLATE(netbsd, sys_aio_cancel);
++DECL_TEMPLATE(netbsd, sys_aio_error);
++DECL_TEMPLATE(netbsd, sys_aio_read);
++DECL_TEMPLATE(netbsd, sys_aio_write);
++DECL_TEMPLATE(netbsd, sys_lio_listio);
++DECL_TEMPLATE(netbsd, sys_yield);
++DECL_TEMPLATE(netbsd, sys_thr_sleep);
++DECL_TEMPLATE(netbsd, sys_thr_wakeup);
++DECL_TEMPLATE(netbsd, sys_munlockall);
++DECL_TEMPLATE(netbsd, sys___getcwd);
++DECL_TEMPLATE(netbsd, sys_sched_setparam);
++DECL_TEMPLATE(netbsd, sys_sched_getparam);
++DECL_TEMPLATE(netbsd, sys_sched_setscheduler);
++DECL_TEMPLATE(netbsd, sys_sched_getscheduler);
++DECL_TEMPLATE(netbsd, sys_sched_yield);
++DECL_TEMPLATE(netbsd, sys_sched_get_priority_max);
++DECL_TEMPLATE(netbsd, sys_sched_get_priority_min);
++DECL_TEMPLATE(netbsd, sys_sched_rr_get_interval);
++DECL_TEMPLATE(netbsd, sys_utrace);
++DECL_TEMPLATE(netbsd, sys_kldsym);
++DECL_TEMPLATE(netbsd, sys_jail);
++DECL_TEMPLATE(netbsd, sys_sigprocmask);
++DECL_TEMPLATE(netbsd, sys_sigsuspend);
++DECL_TEMPLATE(netbsd, sys_sigaction);
++DECL_TEMPLATE(netbsd, sys_sigpending);
++DECL_TEMPLATE(netbsd, sys_sigreturn);
++DECL_TEMPLATE(netbsd, sys_fake_sigreturn);
++DECL_TEMPLATE(netbsd, sys_sigtimedwait);
++DECL_TEMPLATE(netbsd, sys_sigwaitinfo);
++DECL_TEMPLATE(netbsd, sys_getcontext);
++DECL_TEMPLATE(netbsd, sys_setcontext);
++DECL_TEMPLATE(netbsd, sys_swapcontext);
++DECL_TEMPLATE(netbsd, sys___acl_get_file);
++DECL_TEMPLATE(netbsd, sys___acl_set_file);
++DECL_TEMPLATE(netbsd, sys___acl_get_fd);
++DECL_TEMPLATE(netbsd, sys___acl_set_fd);
++DECL_TEMPLATE(netbsd, sys___acl_delete_file);
++DECL_TEMPLATE(netbsd, sys___acl_delete_fd);
++DECL_TEMPLATE(netbsd, sys___acl_aclcheck_file);
++DECL_TEMPLATE(netbsd, sys___acl_aclcheck_fd);
++DECL_TEMPLATE(netbsd, sys___acl_get_link);
++DECL_TEMPLATE(netbsd, sys___acl_set_link);
++DECL_TEMPLATE(netbsd, sys___acl_delete_link);
++DECL_TEMPLATE(netbsd, sys___acl_aclcheck_link);
++DECL_TEMPLATE(netbsd, sys_extattrctl);
++DECL_TEMPLATE(netbsd, sys_extattr_set_file);
++DECL_TEMPLATE(netbsd, sys_extattr_get_file);
++DECL_TEMPLATE(netbsd, sys_extattr_delete_file);
++DECL_TEMPLATE(netbsd, sys_aio_waitcomplete);
++DECL_TEMPLATE(netbsd, sys_getresuid);
++DECL_TEMPLATE(netbsd, sys_getresgid);
++DECL_TEMPLATE(netbsd, sys_kqueue);
++DECL_TEMPLATE(netbsd, sys_kevent);
++DECL_TEMPLATE(netbsd, sys_sendfile);
++DECL_TEMPLATE(netbsd, sys_statfs6);
++DECL_TEMPLATE(netbsd, sys_fstatfs6);
++DECL_TEMPLATE(netbsd, sys_fhstatfs6);
++DECL_TEMPLATE(netbsd, sys_thr_exit);
++DECL_TEMPLATE(netbsd, sys_thr_self);
++DECL_TEMPLATE(netbsd, sys_thr_set_name);
++DECL_TEMPLATE(netbsd, sys_rtprio_thread);
++DECL_TEMPLATE(netbsd, sys_fork);
++DECL_TEMPLATE(netbsd, sys_vfork);
++DECL_TEMPLATE(netbsd, sys_modfind);
++DECL_TEMPLATE(netbsd, sys_modstat);
++DECL_TEMPLATE(netbsd, sys_lkmnosys0);
++DECL_TEMPLATE(netbsd, sys_lkmnosys1);
++DECL_TEMPLATE(netbsd, sys_lkmnosys2);
++DECL_TEMPLATE(netbsd, sys_lkmnosys3);
++DECL_TEMPLATE(netbsd, sys_lkmnosys4);
++DECL_TEMPLATE(netbsd, sys_lkmnosys5);
++DECL_TEMPLATE(netbsd, sys_lkmnosys6);
++DECL_TEMPLATE(netbsd, sys_lkmnosys7);
++DECL_TEMPLATE(netbsd, sys_lkmnosys8);
++DECL_TEMPLATE(netbsd, sys_sigaction4);
++DECL_TEMPLATE(netbsd, sys_mmap7);
++DECL_TEMPLATE(netbsd, sys_lseek7);
++DECL_TEMPLATE(netbsd, sys_truncate7);
++DECL_TEMPLATE(netbsd, sys_ftruncate7);
++DECL_TEMPLATE(netbsd, sys_pread7);
++DECL_TEMPLATE(netbsd, sys_pwrite7);
++DECL_TEMPLATE(netbsd, sys__umtx_op);
++DECL_TEMPLATE(netbsd, sys__umtx_lock);
++DECL_TEMPLATE(netbsd, sys__umtx_unlock);
++DECL_TEMPLATE(netbsd, sys_thr_kill2);
++DECL_TEMPLATE(netbsd, sys_thr_wake);
++DECL_TEMPLATE(netbsd, sys_shm_open);
++DECL_TEMPLATE(netbsd, sys_shm_unlink);
++DECL_TEMPLATE(netbsd, sys_eaccess);
++DECL_TEMPLATE(netbsd, sys_cpuset);
++DECL_TEMPLATE(netbsd, sys_cpuset_setid);
++DECL_TEMPLATE(netbsd, sys_cpuset_getid);
++DECL_TEMPLATE(netbsd, sys_cpuset_getaffinity);
++DECL_TEMPLATE(netbsd, sys_cpuset_setaffinity);
++DECL_TEMPLATE(netbsd, sys_faccessat);
++DECL_TEMPLATE(netbsd, sys_fchmodat);
++DECL_TEMPLATE(netbsd, sys_fchownat);
++DECL_TEMPLATE(netbsd, sys_fexecve);
++DECL_TEMPLATE(netbsd, sys_fstatat);
++DECL_TEMPLATE(netbsd, sys_futimesat);
++DECL_TEMPLATE(netbsd, sys_linkat);
++DECL_TEMPLATE(netbsd, sys_mkdirat);
++DECL_TEMPLATE(netbsd, sys_mkfifoat);
++DECL_TEMPLATE(netbsd, sys_mknodat);
++DECL_TEMPLATE(netbsd, sys_openat);
++DECL_TEMPLATE(netbsd, sys_readlinkat);
++DECL_TEMPLATE(netbsd, sys_renameat);
++DECL_TEMPLATE(netbsd, sys_symlinkat);
++DECL_TEMPLATE(netbsd, sys_unlinkat);
++DECL_TEMPLATE(netbsd, sys_posix_openpt);
++DECL_TEMPLATE(netbsd, sys_kenv);
++DECL_TEMPLATE(netbsd, sys_uuidgen);
++DECL_TEMPLATE(netbsd, sys_thr_new);
++DECL_TEMPLATE(netbsd, sys_thr_kill);
++DECL_TEMPLATE(netbsd, sys_thr_kill2);
++DECL_TEMPLATE(netbsd, sys_fcntl);
++DECL_TEMPLATE(netbsd, sys_ioctl);
++DECL_TEMPLATE(netbsd, sys_mq_open);
++DECL_TEMPLATE(netbsd, sys_mq_unlink);
++#endif
++
++#endif // __PRIV_SYSWRAP_NETBSD_H
++
++/*--------------------------------------------------------------------*/
++/*--- end ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syswrap_priv__types__n__macros.h b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_priv__types__n__macros.h
new file mode 100644
index 0000000000..f9e1efc9ff
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_priv__types__n__macros.h
@@ -0,0 +1,91 @@
+$NetBSD$
+
+--- coregrind/m_syswrap/priv_types_n_macros.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_syswrap/priv_types_n_macros.h
+@@ -52,6 +52,9 @@
+ /* Arguments for a syscall. */
+ typedef
+ struct SyscallArgs {
++#if defined(VGO_netbsd)
++ Word klass;
++#endif
+ Word sysno;
+ RegWord arg1;
+ RegWord arg2;
+@@ -102,6 +105,15 @@ typedef
+ Int o_arg6;
+ Int uu_arg7;
+ Int uu_arg8;
++# elif defined(VGP_amd64_netbsd)
++ Int o_arg1;
++ Int o_arg2;
++ Int o_arg3;
++ Int o_arg4;
++ Int o_arg5;
++ Int o_arg6;
++ Int s_arg7;
++ Int s_arg8;
+ # elif defined(VGP_mips32_linux)
+ Int o_arg1;
+ Int o_arg2;
+@@ -185,7 +197,7 @@ typedef
+ extern
+ SyscallTableEntry* ML_(get_linux_syscall_entry)( UInt sysno );
+
+-#elif defined(VGO_darwin)
++#elif defined(VGO_darwin) || defined(VGO_netbsd)
+ /* XXX: Darwin still uses the old scheme of exposing the table
+ array(s) and size(s) directly to syswrap-main.c. This should be
+ fixed. */
+@@ -275,7 +287,7 @@ SyscallTableEntry* ML_(get_solaris_sysca
+ vgSysWrap_##auxstr##_##name##_after
+
+ /* Add a generic wrapper to a syscall table. */
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ # define GENX_(sysno, name) WRAPPER_ENTRY_X_(generic, sysno, name)
+ # define GENXY(sysno, name) WRAPPER_ENTRY_XY(generic, sysno, name)
+ #elif defined(VGO_darwin)
+@@ -290,6 +302,10 @@ SyscallTableEntry* ML_(get_solaris_sysca
+ #define LINX_(sysno, name) WRAPPER_ENTRY_X_(linux, sysno, name)
+ #define LINXY(sysno, name) WRAPPER_ENTRY_XY(linux, sysno, name)
+
++/* Add a NetBSD-specific, arch-independent wrapper to a syscall
++ table. */
++#define BSDX_(sysno, name) WRAPPER_ENTRY_X_(netbsd, sysno, name)
++#define BSDXY(sysno, name) WRAPPER_ENTRY_XY(netbsd, sysno, name)
+
+ /* ---------------------------------------------------------------------
+ Macros useful for writing wrappers concisely. These refer to the
+@@ -335,7 +351,7 @@ static inline UWord getRES ( SyscallStat
+ return sr_Res(st->sres);
+ }
+
+-#if defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+ static inline UWord getRESHI ( SyscallStatus* st ) {
+ vg_assert(st->what == SsComplete);
+ vg_assert(!sr_isError(st->sres));
+@@ -356,6 +372,13 @@ static inline UWord getERR ( SyscallStat
+ status->sres = VG_(mk_SysRes_Success)(zzz); \
+ } while (0)
+
++#ifdef VGO_netbsd
++#define SET_STATUS_Success2(zzz, zzz2) \
++ do { status->what = SsComplete; \
++ status->sres = VG_(mk_SysRes_amd64_netbsd)(zzz, zzz2, False); \
++ } while (0)
++#endif
++
+ #define SET_STATUS_Failure(zzz) \
+ do { Word wzz = (Word)(zzz); \
+ /* Catch out wildly bogus error values. */ \
+@@ -419,7 +442,7 @@ static inline UWord getERR ( SyscallStat
+ # define PRA7(s,t,a) PSRAn(7,s,t,a)
+ # define PRA8(s,t,a) PSRAn(8,s,t,a)
+
+-#elif defined(VGP_amd64_darwin) || defined(VGP_amd64_solaris)
++#elif defined(VGP_amd64_darwin) || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+ /* Up to 8 parameters, 6 in registers, 2 on the stack. */
+ # define PRA1(s,t,a) PRRAn(1,s,t,a)
+ # define PRA2(s,t,a) PRRAn(2,s,t,a)
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syscall-amd64-netbsd.S b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syscall-amd64-netbsd.S
new file mode 100644
index 0000000000..62311163b7
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syscall-amd64-netbsd.S
@@ -0,0 +1,208 @@
+$NetBSD$
+
+--- coregrind/m_syswrap/syscall-amd64-netbsd.S.orig 2019-03-29 10:55:22.205669362 +0000
++++ coregrind/m_syswrap/syscall-amd64-netbsd.S
+@@ -0,0 +1,203 @@
++
++/*--------------------------------------------------------------------*/
++/*--- Support for doing system calls. syscall-amd64-netbsd.S ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2008 Julian Seward
++ jseward%acm.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#if defined(VGP_amd64_netbsd)
++
++#include "pub_core_basics_asm.h"
++#include "pub_core_vkiscnums_asm.h"
++#include "libvex_guest_offsets.h"
++
++
++/*----------------------------------------------------------------*/
++/*
++ Perform a syscall for the client. This will run a syscall
++ with the client's specific per-thread signal mask.
++
++ The structure of this function is such that, if the syscall is
++ interrupted by a signal, we can determine exactly what
++ execution state we were in with respect to the execution of
++ the syscall by examining the value of %eip in the signal
++ handler. This means that we can always do the appropriate
++ thing to precisely emulate the kernel's signal/syscall
++ interactions.
++
++ The syscall number is taken from the argument, even though it
++ should also be in guest_state->guest_RAX. The syscall result
++ is written back to guest_state->guest_RAX on completion.
++
++ Returns 0 if the syscall was successfully called (even if the
++ syscall itself failed), or a -ve error code if one of the
++ sigprocmasks failed (there's no way to determine which one
++ failed).
++
++ VG_(fixup_guest_state_after_syscall_interrupted) does the
++ thread state fixup in the case where we were interrupted by a
++ signal.
++
++ Prototype:
++
++ Int ML_(do_syscall_for_client_WRK(
++ Int syscallno, // rdi
++ void* guest_state, // rsi
++ const vki_sigset_t *sysmask, // rdx
++ const vki_sigset_t *postmask, // rcx
++ Int sigsetSzB) // r8
++*/
++
++/* from vki_arch.h */
++#define VKI_SIG_SETMASK 3
++
++.globl ML_(do_syscall_for_client_WRK)
++ML_(do_syscall_for_client_WRK):
++ /* save callee-saved regs */
++ pushq %rbp
++ movq %rsp, %rbp
++ pushq %rdi // -8(%rbp) syscallno
++ pushq %rsi // -16(%rbp) guest_state
++ pushq %rdx // -24(%rbp) sysmask
++ pushq %rcx // -32(%rbp) postmask
++ pushq %r8 // -40(%rbp) sigsetSzB
++
++1: /* Even though we can't take a signal until the sigprocmask completes,
++ start the range early.
++ If eip is in the range [1,2), the syscall hasn't been started yet */
++
++ /* Set the signal mask which should be current during the syscall. */
++ /* Save and restore all 5 arg regs round the call. This is easier
++ than figuring out the minimal set to save/restore. */
++
++ movq $__NR___sigprocmask14, %rax // syscall #
++ movq $VKI_SIG_SETMASK, %rdi // how
++ movq %rdx, %rsi // sysmask
++ movq %rcx, %rdx // postmask
++ syscall
++
++ jb 7f /* sigprocmask failed */
++
++ /* OK, that worked. Now do the syscall proper. */
++
++ /* 6 register parameters */
++ movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */
++ movq OFFSET_amd64_RDI(%r11), %rdi
++ movq OFFSET_amd64_RSI(%r11), %rsi
++ movq OFFSET_amd64_RDX(%r11), %rdx
++ movq OFFSET_amd64_R10(%r11), %r10
++ movq OFFSET_amd64_R8(%r11), %r8
++ movq OFFSET_amd64_R9(%r11), %r9
++ /* 2 stack parameters plus return address (ignored by syscall) */
++ movq OFFSET_amd64_RSP(%r11), %r11 /* r11 = simulated RSP */
++ movq 16(%r11), %rax
++ pushq %rax
++ movq 8(%r11), %rax
++ pushq %rax
++ /* (fake) return address. */
++ movq 0(%r11), %rax
++ pushq %rax
++ /* syscallno */
++ movq -8(%rbp), %rax
++
++ /* If rip==2, then the syscall was either just about
++ to start, or was interrupted and the kernel was
++ restarting it. */
++2: syscall
++3: /* In the range [3, 4), the syscall result is in %rax,
++ but hasn't been committed to RAX. */
++
++ /* stack contents: 3 words for syscall above, plus our prologue */
++ setc 0(%rsp) /* stash returned carry flag */
++
++ movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */
++ movq %rax, OFFSET_amd64_RAX(%r11) /* save back to RAX */
++ movq %rdx, OFFSET_amd64_RDX(%r11) /* save back to RDX */
++
++ /* save carry flag to VEX */
++ xorq %rax, %rax
++ movb 0(%rsp), %al
++ movq %rax, %rdi /* arg1 = new flag */
++ movq %r11, %rsi /* arg2 = vex state */
++ addq $24, %rsp /* remove syscall parameters */
++ call LibVEX_GuestAMD64_put_rflag_c
++
++4: /* Re-block signals. If eip is in [4,5), then the syscall
++ is complete and we needn't worry about it. */
++
++ movq $__NR___sigprocmask14, %rax // syscall #
++ movq $VKI_SIG_SETMASK, %rdi // how
++ movq -32(%rbp), %rsi // postmask
++ xorq %rdx, %rdx // NULL
++ syscall
++
++ jb 7f /* sigprocmask failed */
++
++5: /* now safe from signals */
++
++ xorq %rax,%rax
++ movq -8(%rbp), %rdi
++ movq -16(%rbp), %rsi
++ movq -24(%rbp), %rdx
++ movq -32(%rbp), %rcx
++ movq -40(%rbp), %r8
++ movq %rbp, %rsp
++ popq %rbp
++ ret
++
++7: /* failure: return 0x8000 | error code */
++ orq $0x8000, %rax
++ movq -8(%rbp), %rdi
++ movq -16(%rbp), %rsi
++ movq -24(%rbp), %rdx
++ movq -32(%rbp), %rcx
++ movq -40(%rbp), %r8
++ movq %rbp, %rsp
++ popq %rbp
++ ret
++
++.section .rodata
++/* export the ranges so that
++ VG_(fixup_guest_state_after_syscall_interrupted) can do the
++ right thing */
++
++.globl ML_(blksys_setup)
++.globl ML_(blksys_restart)
++.globl ML_(blksys_complete)
++.globl ML_(blksys_committed)
++.globl ML_(blksys_finished)
++ML_(blksys_setup): .quad 1b
++ML_(blksys_restart): .quad 2b
++ML_(blksys_complete): .quad 3b
++ML_(blksys_committed): .quad 4b
++ML_(blksys_finished): .quad 5b
++.previous
++
++#endif /* defined(VGP_amd64_netbsd) */
++
++/*--------------------------------------------------------------------*/
++/*--- end ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-amd64-netbsd.c b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-amd64-netbsd.c
new file mode 100644
index 0000000000..9968f27919
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-amd64-netbsd.c
@@ -0,0 +1,725 @@
+$NetBSD$
+
+--- coregrind/m_syswrap/syswrap-amd64-netbsd.c.orig 2019-03-29 10:08:50.727690024 +0000
++++ coregrind/m_syswrap/syswrap-amd64-netbsd.c
+@@ -0,0 +1,720 @@
++
++/*--------------------------------------------------------------------*/
++/*--- Platform-specific syscalls stuff. syswrap-amd64-netbsd.c ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2005 Nicholas Nethercote
++ njn%valgrind.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#if defined(VGP_amd64_netbsd)
++
++#include "pub_core_basics.h"
++#include "pub_core_vki.h"
++#include "pub_core_vkiscnums.h"
++#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
++#include "pub_core_threadstate.h"
++#include "pub_core_aspacemgr.h"
++#include "pub_core_debuglog.h"
++#include "pub_core_libcbase.h"
++#include "pub_core_libcassert.h"
++#include "pub_core_libcprint.h"
++#include "pub_core_libcproc.h"
++#include "pub_core_libcsignal.h"
++#include "pub_core_machine.h"
++#include "pub_core_options.h"
++#include "pub_core_scheduler.h"
++#include "pub_core_sigframe.h"
++#include "pub_core_signals.h"
++#include "pub_core_syscall.h"
++#include "pub_core_syswrap.h"
++#include "pub_core_tooliface.h"
++#include "pub_core_stacks.h" // VG_(register_stack)
++
++#include "priv_types_n_macros.h"
++#include "priv_syswrap-generic.h" /* for decls of generic wrappers */
++#include "priv_syswrap-netbsd.h" /* for decls of netbsd-ish wrappers */
++#include "priv_syswrap-main.h"
++
++/* ---------------------------------------------------------------------
++ clone() handling
++ ------------------------------------------------------------------ */
++
++/* Call f(arg1), but first switch stacks, using 'stack' as the new
++ stack, and use 'retaddr' as f's return-to address. Also, clear all
++ the integer registers before entering f. */
++__attribute__((noreturn))
++void ML_(call_on_new_stack_0_1) ( Addr stack,
++ Addr retaddr,
++ void (*f)(Word),
++ Word arg1 );
++// %rdi == stack
++// %rsi == retaddr
++// %rdx == f
++// %rcx == arg1
++asm(
++".text\n"
++".globl vgModuleLocal_call_on_new_stack_0_1\n"
++"vgModuleLocal_call_on_new_stack_0_1:\n"
++" movq %rdi, %rsp\n" // set stack
++" pushq %rsi\n" // retaddr to stack
++" pushq %rdx\n" // f to stack
++" pushq %rcx\n" // arg1 to stack
++" movq $0, %rax\n" // zero all GP regs
++" movq $0, %rbx\n"
++" movq $0, %rcx\n"
++" movq $0, %rdx\n"
++" movq $0, %rsi\n"
++" movq $0, %rdi\n"
++" movq $0, %rbp\n"
++" movq $0, %r8\n"
++" movq $0, %r9\n"
++" movq $0, %r10\n"
++" movq $0, %r11\n"
++" movq $0, %r12\n"
++" movq $0, %r13\n"
++" movq $0, %r14\n"
++" movq $0, %r15\n"
++" popq %rdi\n" // arg1 to correct arg reg
++" ret\n" // jump to f
++" ud2\n" // should never get here
++".previous\n"
++);
++
++
++/* ---------------------------------------------------------------------
++ More thread stuff
++ ------------------------------------------------------------------ */
++
++void VG_(cleanup_thread) ( ThreadArchState *arch )
++{
++}
++
++/* ---------------------------------------------------------------------
++ PRE/POST wrappers for amd64/netbsd-specific syscalls
++ ------------------------------------------------------------------ */
++
++#define PRE(name) DEFN_PRE_TEMPLATE(netbsd, name)
++#define POST(name) DEFN_POST_TEMPLATE(netbsd, name)
++
++#if 0
++PRE(sys_thr_new)
++{
++ static const Bool debug = False;
++
++ ThreadId ctid = VG_(alloc_ThreadState)();
++ ThreadState* ptst = VG_(get_ThreadState)(tid);
++ ThreadState* ctst = VG_(get_ThreadState)(ctid);
++ SysRes res;
++ vki_sigset_t blockall, savedmask;
++ struct vki_thr_param tp;
++ Addr stk;
++
++ PRINT("thr_new ( %#lx, %ld )",ARG1,ARG2);
++ PRE_REG_READ2(int, "thr_new",
++ struct thr_param *, param,
++ int, param_size);
++
++ PRE_MEM_READ( "thr_new(param)", ARG1, offsetof(struct vki_thr_param, spare));
++ if (!ML_(safe_to_deref)( (void*)ARG1, offsetof(struct vki_thr_param, spare))) {
++ SET_STATUS_Failure( VKI_EFAULT );
++ return;
++ }
++ VG_(memset)(&tp, 0, sizeof(tp));
++ VG_(memcpy)(&tp, (void *)ARG1, offsetof(struct vki_thr_param, spare));
++ PRE_MEM_WRITE("thr_new(parent_tidptr)", (Addr)tp.parent_tid, sizeof(long));
++ PRE_MEM_WRITE("thr_new(child_tidptr)", (Addr)tp.child_tid, sizeof(long));
++
++ VG_(sigfillset)(&blockall);
++
++ vg_assert(VG_(is_running_thread)(tid));
++ vg_assert(VG_(is_valid_tid)(ctid));
++
++ /* Copy register state
++
++ On linux, both parent and child return to the same place, and the code
++ following the clone syscall works out which is which, so we
++ don't need to worry about it.
++ On netbsd, thr_new arranges a direct call. We don't actually need any
++ of this gunk.
++
++ The parent gets the child's new tid returned from clone, but the
++ child gets 0.
++
++ If the clone call specifies a NULL rsp for the new thread, then
++ it actually gets a copy of the parent's rsp.
++ */
++ /* We inherit our parent's guest state. */
++ ctst->arch.vex = ptst->arch.vex;
++ ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
++ ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
++
++ /* Make thr_new appear to have returned Success(0) in the
++ child. */
++ ctst->arch.vex.guest_RAX = 0;
++ ctst->arch.vex.guest_RDX = 0;
++ LibVEX_GuestAMD64_put_rflag_c(0, &ctst->arch.vex);
++
++ ctst->os_state.parent = tid;
++
++ /* inherit signal mask */
++ ctst->sig_mask = ptst->sig_mask;
++ ctst->tmp_sig_mask = ptst->sig_mask;
++
++ /* Linux has to guess, we don't */
++ ctst->client_stack_highest_byte = (Addr)tp.stack_base + tp.stack_size;
++ ctst->client_stack_szB = tp.stack_size;
++ VG_(register_stack)((Addr)tp.stack_base, (Addr)tp.stack_base + tp.stack_size);
++
++ /* Assume the thr_new will succeed, and tell any tool that wants to
++ know that this thread has come into existence. If the thr_new
++ fails, we'll send out a ll_exit notification for it at the out:
++ label below, to clean up. */
++ VG_TRACK ( pre_thread_ll_create, tid, ctid );
++
++ if (debug)
++ VG_(printf)("clone child has SETTLS: tls at %#lx\n", (Addr)tp.tls_base);
++ ctst->arch.vex.guest_FS_CONST = (UWord)tp.tls_base;
++ tp.tls_base = 0; /* Don't have the kernel do it too */
++
++ /* start the thread with everything blocked */
++ VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);
++
++ /* Set the client state for scheduler to run libthr's trampoline */
++ ctst->arch.vex.guest_RDI = (Addr)tp.arg;
++ /* XXX: align on 16-byte boundary? */
++ ctst->arch.vex.guest_RSP = (Addr)tp.stack_base + tp.stack_size - 8;
++ ctst->arch.vex.guest_RIP = (Addr)tp.start_func;
++
++ /* But this is for thr_new() to run valgrind's trampoline */
++ tp.start_func = (void *)ML_(start_thread_NORETURN);
++ tp.arg = &VG_(threads)[ctid];
++
++ /* And valgrind's trampoline on its own stack */
++ stk = ML_(allocstack)(ctid);
++ if (stk == (Addr)NULL) {
++ res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
++ goto fail;
++ }
++ tp.stack_base = (void *)ctst->os_state.valgrind_stack_base;
++ tp.stack_size = (Addr)stk - (Addr)tp.stack_base;
++
++ /* Create the new thread */
++ res = VG_(do_syscall2)(__NR_thr_new, (UWord)&tp, sizeof(tp));
++
++ VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);
++
++fail:
++ if (sr_isError(res)) {
++ /* thr_new failed */
++ VG_(cleanup_thread)(&ctst->arch);
++ ctst->status = VgTs_Empty;
++ /* oops. Better tell the tool the thread exited in a hurry :-) */
++ VG_TRACK( pre_thread_ll_exit, ctid );
++ } else {
++
++ POST_MEM_WRITE((Addr)tp.parent_tid, sizeof(long));
++ POST_MEM_WRITE((Addr)tp.child_tid, sizeof(long));
++
++ /* Thread creation was successful; let the child have the chance
++ to run */
++ *flags |= SfYieldAfter;
++ }
++
++ /* "Complete" the syscall so that the wrapper doesn't call the kernel again. */
++ SET_STATUS_from_SysRes(res);
++}
++
++PRE(sys_rfork)
++{
++ PRINT("sys_rfork ( %#lx )", ARG1 );
++ PRE_REG_READ1(long, "rfork", int, flags);
++
++ VG_(message)(Vg_UserMsg, "rfork() not implemented");
++ VG_(unimplemented)("Valgrind does not support rfork().");
++
++ SET_STATUS_Failure(VKI_ENOSYS);
++}
++
++PRE(sys_sigreturn)
++{
++ PRINT("sys_sigreturn ( %#lx )", ARG1);
++ PRE_REG_READ1(long, "sigreturn",
++ struct vki_ucontext *, ucp);
++
++ PRE_MEM_READ( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
++ PRE_MEM_WRITE( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
++}
++
++PRE(sys_fake_sigreturn)
++{
++ ThreadState* tst;
++ struct vki_ucontext *uc;
++ int rflags;
++
++ PRINT("sys_sigreturn ( %#lx )", ARG1);
++ PRE_REG_READ1(long, "sigreturn",
++ struct vki_ucontext *, ucp);
++
++ PRE_MEM_READ( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
++ PRE_MEM_WRITE( "sigreturn(ucp)", ARG1, sizeof(struct vki_ucontext) );
++
++ vg_assert(VG_(is_valid_tid)(tid));
++ vg_assert(tid >= 1 && tid < VG_N_THREADS);
++ vg_assert(VG_(is_running_thread)(tid));
++
++ /* Adjust esp to point to start of frame; skip back up over handler
++ ret addr */
++ tst = VG_(get_ThreadState)(tid);
++ tst->arch.vex.guest_RSP -= sizeof(Addr);
++
++ uc = (struct vki_ucontext *)ARG1;
++ if (uc == NULL || uc->uc_mcontext.len != sizeof(uc->uc_mcontext)) {
++ SET_STATUS_Failure(VKI_EINVAL);
++ return;
++ }
++
++ /* This is only so that the EIP is (might be) useful to report if
++ something goes wrong in the sigreturn */
++ ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
++
++ VG_(sigframe_destroy)(tid);
++
++ /* For unclear reasons, it appears we need the syscall to return
++ without changing %EAX. Since %EAX is the return value, and can
++ denote either success or failure, we must set up so that the
++ driver logic copies it back unchanged. Also, note %EAX is of
++ the guest registers written by VG_(sigframe_destroy). */
++ rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
++ SET_STATUS_from_SysRes( VG_(mk_SysRes_amd64_netbsd)( tst->arch.vex.guest_RAX,
++ tst->arch.vex.guest_RDX, (rflags & 1) != 0 ? True : False) );
++
++ /*
++ * Signal handler might have changed the signal mask. Respect that.
++ */
++ tst->sig_mask = uc->uc_sigmask;
++ tst->tmp_sig_mask = uc->uc_sigmask;
++
++ /* Tell the driver not to update the guest state with the "result",
++ and set a bogus result to keep it happy. */
++ *flags |= SfNoWriteResult;
++ SET_STATUS_Success(0);
++
++ /* Check to see if some any signals arose as a result of this. */
++ *flags |= SfPollAfter;
++}
++
++static void restore_mcontext(ThreadState *tst, struct vki_mcontext *sc)
++{
++ tst->arch.vex.guest_RAX = sc->rax;
++ tst->arch.vex.guest_RCX = sc->rcx;
++ tst->arch.vex.guest_RDX = sc->rdx;
++ tst->arch.vex.guest_RBX = sc->rbx;
++ tst->arch.vex.guest_RBP = sc->rbp;
++ tst->arch.vex.guest_RSP = sc->rsp;
++ tst->arch.vex.guest_RSI = sc->rsi;
++ tst->arch.vex.guest_RDI = sc->rdi;
++ tst->arch.vex.guest_R8 = sc->r8;
++ tst->arch.vex.guest_R9 = sc->r9;
++ tst->arch.vex.guest_R10 = sc->r10;
++ tst->arch.vex.guest_R11 = sc->r11;
++ tst->arch.vex.guest_R12 = sc->r12;
++ tst->arch.vex.guest_R13 = sc->r13;
++ tst->arch.vex.guest_R14 = sc->r14;
++ tst->arch.vex.guest_R15 = sc->r15;
++ tst->arch.vex.guest_RIP = sc->rip;
++ /*
++ * XXX: missing support for other flags.
++ */
++ if (sc->rflags & 0x0001)
++ LibVEX_GuestAMD64_put_rflag_c(1, &tst->arch.vex);
++ else
++ LibVEX_GuestAMD64_put_rflag_c(0, &tst->arch.vex);
++}
++
++static void fill_mcontext(ThreadState *tst, struct vki_mcontext *sc)
++{
++ sc->rax = tst->arch.vex.guest_RAX;
++ sc->rcx = tst->arch.vex.guest_RCX;
++ sc->rdx = tst->arch.vex.guest_RDX;
++ sc->rbx = tst->arch.vex.guest_RBX;
++ sc->rbp = tst->arch.vex.guest_RBP;
++ sc->rsp = tst->arch.vex.guest_RSP;
++ sc->rsi = tst->arch.vex.guest_RSI;
++ sc->rdi = tst->arch.vex.guest_RDI;
++ sc->r8 = tst->arch.vex.guest_R8;
++ sc->r9 = tst->arch.vex.guest_R9;
++ sc->r10 = tst->arch.vex.guest_R10;
++ sc->r11 = tst->arch.vex.guest_R11;
++ sc->r12 = tst->arch.vex.guest_R12;
++ sc->r13 = tst->arch.vex.guest_R13;
++ sc->r14 = tst->arch.vex.guest_R14;
++ sc->r15 = tst->arch.vex.guest_R15;
++ sc->rip = tst->arch.vex.guest_RIP;
++/*
++ Not supported by VEX.
++ sc->cs = tst->arch.vex.guest_CS;
++ sc->ss = tst->arch.vex.guest_SS;
++ sc->ds = tst->arch.vex.guest_DS;
++ sc->es = tst->arch.vex.guest_ES;
++ sc->fs = tst->arch.vex.guest_FS;
++ sc->gs = tst->arch.vex.guest_GS;
++*/
++ sc->rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
++/*
++ not yet.
++ VG_(memcpy)(&sc->fpstate, fpstate, sizeof(*fpstate));
++*/
++ sc->fpformat = VKI_FPFMT_NODEV;
++ sc->ownedfp = VKI_FPOWNED_NONE;
++ sc->len = sizeof(*sc);
++ VG_(memset)(sc->spare2, 0, sizeof(sc->spare2));
++}
++
++PRE(sys_getcontext)
++{
++ ThreadState* tst;
++ struct vki_ucontext *uc;
++
++ PRINT("sys_getcontext ( %#lx )", ARG1);
++ PRE_REG_READ1(long, "getcontext",
++ struct vki_ucontext *, ucp);
++ PRE_MEM_WRITE( "getcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
++ uc = (struct vki_ucontext *)ARG1;
++ if (uc == NULL) {
++ SET_STATUS_Failure(VKI_EINVAL);
++ return;
++ }
++ tst = VG_(get_ThreadState)(tid);
++ fill_mcontext(tst, &uc->uc_mcontext);
++ uc->uc_mcontext.rax = 0;
++ uc->uc_mcontext.rdx = 0;
++ uc->uc_mcontext.rflags &= ~0x0001; /* PSL_C */
++ uc->uc_sigmask = tst->sig_mask;
++ VG_(memset)(uc->__spare__, 0, sizeof(uc->__spare__));
++ SET_STATUS_Success(0);
++}
++
++PRE(sys_setcontext)
++{
++ ThreadState* tst;
++ struct vki_ucontext *uc;
++
++ PRINT("sys_setcontext ( %#lx )", ARG1);
++ PRE_REG_READ1(long, "setcontext",
++ struct vki_ucontext *, ucp);
++
++ PRE_MEM_READ( "setcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
++ PRE_MEM_WRITE( "setcontext(ucp)", ARG1, sizeof(struct vki_ucontext) );
++
++ vg_assert(VG_(is_valid_tid)(tid));
++ vg_assert(tid >= 1 && tid < VG_N_THREADS);
++ vg_assert(VG_(is_running_thread)(tid));
++
++ tst = VG_(get_ThreadState)(tid);
++ uc = (struct vki_ucontext *)ARG1;
++ if (uc == NULL || uc->uc_mcontext.len != sizeof(uc->uc_mcontext)) {
++ SET_STATUS_Failure(VKI_EINVAL);
++ return;
++ }
++
++ restore_mcontext(tst, &uc->uc_mcontext);
++ tst->sig_mask = uc->uc_sigmask;
++
++ /* Tell the driver not to update the guest state with the "result",
++ and set a bogus result to keep it happy. */
++ *flags |= SfNoWriteResult;
++ SET_STATUS_Success(0);
++
++ /* Check to see if some any signals arose as a result of this. */
++ *flags |= SfPollAfter;
++}
++
++PRE(sys_swapcontext)
++{
++ struct vki_ucontext *ucp, *oucp;
++ ThreadState* tst;
++
++ PRINT("sys_swapcontext ( %#lx, %#lx )", ARG1, ARG2);
++ PRE_REG_READ2(long, "swapcontext",
++ struct vki_ucontext *, oucp, struct vki_ucontext *, ucp);
++
++ PRE_MEM_READ( "swapcontext(ucp)", ARG2, sizeof(struct vki_ucontext) );
++ PRE_MEM_WRITE( "swapcontext(oucp)", ARG1, sizeof(struct vki_ucontext) );
++
++ oucp = (struct vki_ucontext *)ARG1;
++ ucp = (struct vki_ucontext *)ARG2;
++ if (oucp == NULL || ucp == NULL || ucp->uc_mcontext.len != sizeof(ucp->uc_mcontext)) {
++ SET_STATUS_Failure(VKI_EINVAL);
++ return;
++ }
++ tst = VG_(get_ThreadState)(tid);
++
++ /*
++ * Save the context.
++ */
++ fill_mcontext(tst, &oucp->uc_mcontext);
++ oucp->uc_mcontext.rax = 0;
++ oucp->uc_mcontext.rdx = 0;
++ oucp->uc_mcontext.rflags &= ~0x0001; /* PSL_C */
++ oucp->uc_sigmask = tst->sig_mask;
++ VG_(memset)(oucp->__spare__, 0, sizeof(oucp->__spare__));
++
++ /*
++ * Switch to new one.
++ */
++ restore_mcontext(tst, &ucp->uc_mcontext);
++ tst->sig_mask = ucp->uc_sigmask;
++
++ /* Tell the driver not to update the guest state with the "result",
++ and set a bogus result to keep it happy. */
++ *flags |= SfNoWriteResult;
++ SET_STATUS_Success(0);
++
++ /* Check to see if some any signals arose as a result of this. */
++ *flags |= SfPollAfter;
++}
++
++
++/* This is here because on x86 the off_t is passed in 2 regs. Don't ask about pad. */
++
++/* caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, int pad, off_t pos); */
++/* ARG1 ARG2 ARG3 ARG4 ARG5 ARG6 ARG7 */
++
++PRE(sys_mmap)
++{
++ SysRes r;
++
++ PRINT("sys_mmap ( %#lx, %lu, %ld, %ld, %ld, pad%ld, 0x%lx)",
++ ARG1, (UWord)ARG2, ARG3, ARG4, ARG5, ARG6, ARG7 );
++ PRE_REG_READ7(long, "mmap",
++ char *, addr, unsigned long, len, int, prot, int, flags,
++ int, fd, int, pad, unsigned long, pos);
++
++ r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG7 );
++ SET_STATUS_from_SysRes(r);
++}
++
++/* netbsd-7 introduces a "regular" version of mmap etc. */
++PRE(sys_mmap7)
++{
++ SysRes r;
++
++ PRINT("sys_mmap ( %#lx, %lu, %ld, %ld, %ld, 0x%lx)",
++ ARG1, (UWord)ARG2, ARG3, ARG4, ARG5, ARG6 );
++ PRE_REG_READ6(long, "mmap",
++ char *, addr, unsigned long, len, int, prot, int, flags,
++ int, fd, unsigned long, pos);
++
++ r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6 );
++ SET_STATUS_from_SysRes(r);
++}
++
++PRE(sys_lseek)
++{
++ PRINT("sys_lseek ( %ld, 0x%lx, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "lseek",
++ unsigned int, fd, int, pad, unsigned long, offset,
++ unsigned int, whence);
++}
++
++PRE(sys_lseek7)
++{
++ PRINT("sys_lseek ( %ld, 0x%lx, %ld )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "lseek",
++ unsigned int, fd, unsigned long, offset,
++ unsigned int, whence);
++}
++
++PRE(sys_pread)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_read ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(ssize_t, "read",
++ unsigned int, fd, char *, buf, vki_size_t, count,
++ int, pad, unsigned long, off);
++
++ if (!ML_(fd_allowed)(ARG1, "read", tid, False))
++ SET_STATUS_Failure( VKI_EBADF );
++ else
++ PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 );
++}
++
++POST(sys_pread)
++{
++ vg_assert(SUCCESS);
++ POST_MEM_WRITE( ARG2, RES );
++}
++
++PRE(sys_pread7)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_read ( %ld, %#lx, %lu, %lu )", ARG1, ARG2, ARG3, ARG4);
++ PRE_REG_READ4(ssize_t, "read",
++ unsigned int, fd, char *, buf, vki_size_t, count,
++ unsigned long, off);
++
++ if (!ML_(fd_allowed)(ARG1, "read", tid, False))
++ SET_STATUS_Failure( VKI_EBADF );
++ else
++ PRE_MEM_WRITE( "read(buf)", ARG2, ARG3 );
++}
++
++POST(sys_pread7)
++{
++ vg_assert(SUCCESS);
++ POST_MEM_WRITE( ARG2, RES );
++}
++
++PRE(sys_pwrite)
++{
++ Bool ok;
++ *flags |= SfMayBlock;
++ PRINT("sys_write ( %ld, %#lx, %lu, %lu, %lu )", ARG1, ARG2, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(ssize_t, "write",
++ unsigned int, fd, const char *, buf, vki_size_t, count,
++ int, pad, unsigned long, off);
++ /* check to see if it is allowed. If not, try for an exemption from
++ --sim-hints=enable-outer (used for self hosting). */
++ ok = ML_(fd_allowed)(ARG1, "write", tid, False);
++ if (!ok && ARG1 == 2/*stderr*/
++ && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints)))
++ ok = True;
++ if (!ok)
++ SET_STATUS_Failure( VKI_EBADF );
++ else
++ PRE_MEM_READ( "write(buf)", ARG2, ARG3 );
++}
++
++PRE(sys_pwrite7)
++{
++ Bool ok;
++ *flags |= SfMayBlock;
++ PRINT("sys_write ( %ld, %#lx, %lu, %lu )", ARG1, ARG2, ARG3, ARG4);
++ PRE_REG_READ4(ssize_t, "write",
++ unsigned int, fd, const char *, buf, vki_size_t, count,
++ unsigned long, off);
++ /* check to see if it is allowed. If not, try for an exemption from
++ --sim-hints=enable-outer (used for self hosting). */
++ ok = ML_(fd_allowed)(ARG1, "write", tid, False);
++ if (!ok && ARG1 == 2/*stderr*/
++ && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints)))
++ ok = True;
++ if (!ok)
++ SET_STATUS_Failure( VKI_EBADF );
++ else
++ PRE_MEM_READ( "write(buf)", ARG2, ARG3 );
++}
++
++PRE(sys_ftruncate)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_ftruncate ( %ld, %lu )", ARG1,ARG3);
++ PRE_REG_READ3(long, "ftruncate", unsigned int, fd, int, pad,
++ unsigned int, length);
++}
++
++PRE(sys_ftruncate7)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_ftruncate ( %ld, %lu )", ARG1,ARG2);
++ PRE_REG_READ2(long, "ftruncate", unsigned int, fd,
++ unsigned long, length);
++}
++
++PRE(sys_truncate)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_truncate ( %#lx(%s), %lu )", ARG1,(char *)ARG1,ARG3);
++ PRE_REG_READ3(long, "truncate",
++ const char *, path, int, pad, unsigned int, length);
++ PRE_MEM_RASCIIZ( "truncate(path)", ARG1 );
++}
++
++PRE(sys_truncate7)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_truncate ( %#lx(%s), %lu )", ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "truncate",
++ const char *, path, unsigned long, length);
++ PRE_MEM_RASCIIZ( "truncate(path)", ARG1 );
++}
++
++PRE(sys_sysarch)
++{
++ ThreadState *tst;
++ void **p;
++
++ PRINT("sys_sysarch ( %ld, %#lx )", ARG1, ARG2);
++ PRE_REG_READ2(int, "sysarch",
++ int, number, void *, args);
++ switch (ARG1) {
++ case VKI_AMD64_SET_FSBASE:
++ PRINT("sys_amd64_set_fsbase ( %#lx )", ARG2);
++ PRE_REG_READ1(long, "amd64_set_fsbase", void *, base)
++
++ /* On netbsd, the syscall loads the %gs selector for us, so do it now. */
++ tst = VG_(get_ThreadState)(tid);
++ p = (void**)ARG2;
++ tst->arch.vex.guest_FS_CONST = (UWord)*p;
++ /* "do" the syscall ourselves; the kernel never sees it */
++ SET_STATUS_Success2((ULong)*p, tst->arch.vex.guest_RDX );
++
++ break;
++ case VKI_AMD64_GET_FSBASE:
++ PRINT("sys_amd64_get_fsbase ( %#lx )", ARG2);
++ PRE_REG_READ1(int, "amd64_get_fsbase", void *, basep)
++ PRE_MEM_WRITE( "amd64_get_fsbase(basep)", ARG2, sizeof(void *) );
++
++ /* "do" the syscall ourselves; the kernel never sees it */
++ tst = VG_(get_ThreadState)(tid);
++ SET_STATUS_Success2( tst->arch.vex.guest_FS_CONST, tst->arch.vex.guest_RDX );
++ POST_MEM_WRITE( ARG2, sizeof(void *) );
++ break;
++ case VKI_AMD64_GET_XFPUSTATE:
++ PRINT("sys_amd64_get_xfpustate ( %#lx )", ARG2);
++ PRE_REG_READ1(int, "amd64_get_xfpustate", void *, basep)
++ PRE_MEM_WRITE( "amd64_get_xfpustate(basep)", ARG2, sizeof(void *) );
++
++ /* "do" the syscall ourselves; the kernel never sees it */
++ tst = VG_(get_ThreadState)(tid);
++ SET_STATUS_Success2( tst->arch.vex.guest_FPTAG[0], tst->arch.vex.guest_FPTAG[0] );
++ POST_MEM_WRITE( ARG2, sizeof(void *) );
++ break;
++ default:
++ VG_(message) (Vg_UserMsg, "unhandled sysarch cmd %ld", ARG1);
++ VG_(unimplemented) ("unhandled sysarch cmd");
++ break;
++ }
++}
++#endif
++
++#undef PRE
++#undef POST
++
++#endif /* defined(VGP_amd64_netbsd) */
++
++/*--------------------------------------------------------------------*/
++/*--- end ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-generic.c b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-generic.c
new file mode 100644
index 0000000000..087bfd5265
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-generic.c
@@ -0,0 +1,151 @@
+$NetBSD$
+
+--- coregrind/m_syswrap/syswrap-generic.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_syswrap/syswrap-generic.c
+@@ -30,7 +30,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ #include "pub_core_basics.h"
+ #include "pub_core_vki.h"
+@@ -67,6 +67,9 @@
+
+ #include "config.h"
+
++#ifndef __NR_ipc
++#define __NR_ipc -1
++#endif
+
+ void ML_(guess_and_register_stack) (Addr sp, ThreadState* tst)
+ {
+@@ -270,7 +273,7 @@ ML_(notify_core_and_tool_of_mprotect) (
+
+
+
+-#if HAVE_MREMAP
++#if HAVE_MREMAP && 0
+ /* Expand (or shrink) an existing mapping, potentially moving it at
+ the same time (controlled by the MREMAP_MAYMOVE flag). Nightmare.
+ */
+@@ -926,7 +929,7 @@ void VG_(init_preopened_fds)(void)
+ out:
+ VG_(close)(sr_Res(f));
+
+-#elif defined(VGO_darwin)
++#elif defined(VGO_darwin) || defined(VGO_netbsd)
+ init_preopened_fds_without_proc_self_fd();
+
+ #elif defined(VGO_solaris)
+@@ -1131,6 +1134,11 @@ void pre_mem_read_sockaddr ( ThreadId ti
+ VG_(sprintf) ( outmsg, description, "sa_family" );
+ PRE_MEM_READ( outmsg, (Addr) &sa->sa_family, sizeof(vki_sa_family_t));
+
++#if defined(VGO_netbsd)
++ VG_(sprintf) ( outmsg, description, ".sa_len" );
++ PRE_MEM_READ( outmsg, (Addr) &sa->sa_len, sizeof(char));
++#endif
++
+ /* Don't do any extra checking if we cannot determine the sa_family. */
+ if (! ML_(safe_to_deref) (&sa->sa_family, sizeof(vki_sa_family_t)))
+ return;
+@@ -1818,6 +1826,9 @@ UInt get_sem_count( Int semid )
+
+ return buf.sem_nsems;
+
++# elif defined(VGO_netbsd)
++ return 0;
++
+ # else
+ struct vki_semid_ds buf;
+ arg.buf = &buf;
+@@ -1844,8 +1855,10 @@ ML_(generic_PRE_sys_semctl) ( ThreadId t
+ case VKI_SEM_INFO:
+ case VKI_IPC_INFO|VKI_IPC_64:
+ case VKI_SEM_INFO|VKI_IPC_64:
++#if !defined(VGO_netbsd)
+ PRE_MEM_WRITE( "semctl(IPC_INFO, arg.buf)",
+ (Addr)arg.buf, sizeof(struct vki_seminfo) );
++#endif
+ break;
+ #endif
+
+@@ -1923,7 +1936,9 @@ ML_(generic_POST_sys_semctl) ( ThreadId
+ case VKI_SEM_INFO:
+ case VKI_IPC_INFO|VKI_IPC_64:
+ case VKI_SEM_INFO|VKI_IPC_64:
++#if !defined(VGO_netbsd)
+ POST_MEM_WRITE( (Addr)arg.buf, sizeof(struct vki_seminfo) );
++#endif
+ break;
+ #endif
+
+@@ -2641,7 +2656,7 @@ PRE(sys_madvise)
+ unsigned long, start, vki_size_t, length, int, advice);
+ }
+
+-#if HAVE_MREMAP
++#if HAVE_MREMAP && 0
+ PRE(sys_mremap)
+ {
+ // Nb: this is different to the glibc version described in the man pages,
+@@ -2735,6 +2750,7 @@ PRE(sys_sync)
+ PRE_REG_READ0(long, "sync");
+ }
+
++#if 0
+ PRE(sys_fstatfs)
+ {
+ FUSE_COMPATIBLE_MAY_BLOCK();
+@@ -2762,6 +2778,8 @@ POST(sys_fstatfs64)
+ {
+ POST_MEM_WRITE( ARG3, ARG2 );
+ }
++#endif
++
+
+ PRE(sys_getsid)
+ {
+@@ -3339,7 +3357,7 @@ PRE(sys_fork)
+
+ if (!SUCCESS) return;
+
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+ // RES is 0 for child, non-0 (the child's PID) for parent.
+ is_child = ( RES == 0 ? True : False );
+ child_pid = ( is_child ? -1 : RES );
+@@ -4429,6 +4447,7 @@ POST(sys_newstat)
+ POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
+ }
+
++#if 0
+ PRE(sys_statfs)
+ {
+ FUSE_COMPATIBLE_MAY_BLOCK();
+@@ -4456,6 +4475,7 @@ POST(sys_statfs64)
+ {
+ POST_MEM_WRITE( ARG3, ARG2 );
+ }
++#endif
+
+ PRE(sys_symlink)
+ {
+@@ -4514,6 +4534,7 @@ PRE(sys_unlink)
+ PRE_MEM_RASCIIZ( "unlink(pathname)", ARG1 );
+ }
+
++#if 0
+ PRE(sys_newuname)
+ {
+ PRINT("sys_newuname ( %#" FMT_REGWORD "x )", ARG1);
+@@ -4527,6 +4548,7 @@ POST(sys_newuname)
+ POST_MEM_WRITE( ARG1, sizeof(struct vki_new_utsname) );
+ }
+ }
++#endif
+
+ PRE(sys_waitpid)
+ {
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-main.c b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-main.c
new file mode 100644
index 0000000000..8efa32f1e8
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-main.c
@@ -0,0 +1,323 @@
+$NetBSD$
+
+--- coregrind/m_syswrap/syswrap-main.c.orig 2018-08-14 06:09:34.000000000 +0000
++++ coregrind/m_syswrap/syswrap-main.c
+@@ -81,6 +81,9 @@
+ fills in the immediate field.
+ s390x r1/SVC r2 r3 r4 r5 r6 r7 n/a n/a r2 (== ARG1)
+
++ FreeBSD:
++ amd64 rax rdi rsi rdx rcx r8 r9 +8 +16 rdx:rax, rflags.c
++
+ NUM ARG1 ARG2 ARG3 ARG4 ARG5 ARG6 ARG7 ARG8 RESULT
+ DARWIN:
+ x86 eax +4 +8 +12 +16 +20 +24 +28 +32 edx:eax, eflags.c
+@@ -292,6 +295,14 @@ UWord ML_(do_syscall_for_client_WRK)( Wo
+ const vki_sigset_t *syscall_mask,
+ const vki_sigset_t *restore_mask,
+ Word sigsetSzB );
++#elif defined(VGO_netbsd)
++extern
++UWord ML_(do_syscall_for_client_WRK)( Word syscallno,
++ void* guest_state,
++ const vki_sigset_t *syscall_mask,
++ const vki_sigset_t *restore_mask,
++ Word sigsetSzB );
++
+ #elif defined(VGO_darwin)
+ extern
+ UWord ML_(do_syscall_for_client_unix_WRK)( Word syscallno,
+@@ -335,11 +346,25 @@ void do_syscall_for_client ( Int syscall
+ {
+ vki_sigset_t saved;
+ UWord err;
++# if defined(VGO_netbsd)
++ Int real_syscallno;
++# endif
+ # if defined(VGO_linux)
+ err = ML_(do_syscall_for_client_WRK)(
+ syscallno, &tst->arch.vex,
+ syscall_mask, &saved, sizeof(vki_sigset_t)
+ );
++# elif defined(VGO_netbsd)
++ if (tst->arch.vex.guest_SC_CLASS == VG_NETBSD_SYSCALL0)
++ real_syscallno = __NR_syscall;
++ else if (tst->arch.vex.guest_SC_CLASS == VG_NETBSD_SYSCALL198)
++ real_syscallno = __NR___syscall;
++ else
++ real_syscallno = syscallno;
++ err = ML_(do_syscall_for_client_WRK)(
++ real_syscallno, &tst->arch.vex,
++ syscall_mask, &saved, sizeof(vki_sigset_t)
++ );
+ # elif defined(VGO_darwin)
+ switch (VG_DARWIN_SYSNO_CLASS(syscallno)) {
+ case VG_DARWIN_SYSCALL_CLASS_UNIX:
+@@ -532,6 +557,47 @@ void getSyscallArgsFromGuestState ( /*OU
+ canonical->arg6 = gst->guest_X5;
+ canonical->arg7 = 0;
+ canonical->arg8 = 0;
++#elif defined(VGP_amd64_netbsd)
++ VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
++ UWord *stack = (UWord *)gst->guest_RSP;
++
++ // NetBSD supports different calling conventions
++ switch (gst->guest_RAX) {
++ case __NR_syscall:
++ canonical->klass = VG_NETBSD_SYSCALL0;
++ canonical->sysno = gst->guest_RDI;
++ break;
++ case __NR___syscall:
++ canonical->klass = VG_NETBSD_SYSCALL198;
++ canonical->sysno = gst->guest_RDI;
++ break;
++ default:
++ canonical->klass = 0;
++ canonical->sysno = gst->guest_RAX;
++ break;
++ }
++ // stack[0] is a (fake) return address
++ if (canonical->klass != VG_NETBSD_SYSCALL0 && canonical->klass != VG_NETBSD_SYSCALL198) {
++ // stack[0] is return address
++ canonical->arg1 = gst->guest_RDI;
++ canonical->arg2 = gst->guest_RSI;
++ canonical->arg3 = gst->guest_RDX;
++ canonical->arg4 = gst->guest_R10;
++ canonical->arg5 = gst->guest_R8;
++ canonical->arg6 = gst->guest_R9;
++ canonical->arg7 = stack[1];
++ canonical->arg8 = stack[2];
++ } else {
++ // stack[0] is return address
++ canonical->arg1 = gst->guest_RSI;
++ canonical->arg2 = gst->guest_RDX;
++ canonical->arg3 = gst->guest_R10;
++ canonical->arg4 = gst->guest_R8;
++ canonical->arg5 = gst->guest_R9;
++ canonical->arg6 = stack[1];
++ canonical->arg7 = stack[2];
++ canonical->arg8 = stack[3];
++ }
+
+ #elif defined(VGP_mips32_linux)
+ VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
+@@ -950,6 +1016,49 @@ void putSyscallArgsIntoGuestState ( /*IN
+ stack[1] = canonical->arg7;
+ stack[2] = canonical->arg8;
+
++#elif defined(VGP_amd64_netbsd)
++ VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
++ UWord *stack = (UWord *)gst->guest_RSP;
++
++ // stack[0] is a (fake) return address
++ switch (canonical->klass) {
++ case VG_NETBSD_SYSCALL0:
++ gst->guest_RAX = __NR_syscall;
++ gst->guest_RDI = canonical->sysno;
++ gst->guest_RSI = canonical->arg1;
++ gst->guest_RDX = canonical->arg2;
++ gst->guest_R10 = canonical->arg3;
++ gst->guest_R8 = canonical->arg4;
++ gst->guest_R9 = canonical->arg5;
++ stack[1] = canonical->arg6;
++ stack[2] = canonical->arg7;
++ stack[3] = canonical->arg8;
++ break;
++ case VG_NETBSD_SYSCALL198:
++ gst->guest_RAX = __NR___syscall;
++ gst->guest_RDI = canonical->sysno;
++ gst->guest_RSI = canonical->arg1;
++ gst->guest_RDX = canonical->arg2;
++ gst->guest_R10 = canonical->arg3;
++ gst->guest_R8 = canonical->arg4;
++ gst->guest_R9 = canonical->arg5;
++ stack[1] = canonical->arg6;
++ stack[2] = canonical->arg7;
++ stack[3] = canonical->arg8;
++ break;
++ default:
++ gst->guest_RAX = canonical->sysno;
++ gst->guest_RDI = canonical->arg1;
++ gst->guest_RSI = canonical->arg2;
++ gst->guest_RDX = canonical->arg3;
++ gst->guest_R10 = canonical->arg4;
++ gst->guest_R8 = canonical->arg5;
++ gst->guest_R9 = canonical->arg6;
++ stack[1] = canonical->arg7;
++ stack[2] = canonical->arg8;
++ break;
++ }
++
+ #else
+ # error "putSyscallArgsIntoGuestState: unknown arch"
+ #endif
+@@ -983,6 +1092,7 @@ void getSyscallStatusFromGuestState ( /*
+ canonical->sres = VG_(mk_SysRes_ppc64_linux)( gst->guest_GPR3, cr0so );
+ canonical->what = SsComplete;
+
++
+ # elif defined(VGP_arm_linux)
+ VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
+ canonical->sres = VG_(mk_SysRes_arm_linux)( gst->guest_R0 );
+@@ -1041,6 +1151,14 @@ void getSyscallStatusFromGuestState ( /*
+ );
+ canonical->what = SsComplete;
+
++# elif defined(VGP_amd64_netbsd)
++ /* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
++ VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
++ ULong flags = LibVEX_GuestAMD64_get_rflags(gst);
++ canonical->sres = VG_(mk_SysRes_amd64_netbsd)(gst->guest_RAX, gst->guest_RDX,
++ (flags & 1) != 0 ? True : False);
++ canonical->what = SsComplete;
++
+ # elif defined(VGP_amd64_darwin)
+ /* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
+ VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
+@@ -1134,6 +1252,26 @@ void putSyscallStatusIntoGuestState ( /*
+ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
+ OFFSET_amd64_RAX, sizeof(UWord) );
+
++#elif defined(VGP_amd64_freebsd)
++ VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
++ vg_assert(canonical->what == SsComplete);
++ if (sr_isError(canonical->sres)) {
++ gst->guest_RAX = sr_Err(canonical->sres);
++ LibVEX_GuestAMD64_put_rflag_c(1, gst);
++ } else {
++ gst->guest_RAX = sr_Res(canonical->sres);
++ gst->guest_RDX = sr_ResHI(canonical->sres);
++ LibVEX_GuestAMD64_put_rflag_c(0, gst);
++ }
++ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
++ OFFSET_amd64_RAX, sizeof(ULong) );
++ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
++ OFFSET_amd64_RDX, sizeof(ULong) );
++ // GrP fixme sets defined for entire eflags, not just bit c
++ // DDD: this breaks exp-ptrcheck.
++ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
++ offsetof(VexGuestAMD64State, guest_CC_DEP1), sizeof(ULong) );
++
+ # elif defined(VGP_ppc32_linux)
+ VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
+ UInt old_cr = LibVEX_GuestPPC32_get_CR(gst);
+@@ -1363,6 +1501,26 @@ void putSyscallStatusIntoGuestState ( /*
+ VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestAMD64State,
+ guest_CC_DEP2), sizeof(ULong));
+
++#elif defined(VGP_amd64_netbsd)
++ VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
++ vg_assert(canonical->what == SsComplete);
++ if (sr_isError(canonical->sres)) {
++ gst->guest_RAX = sr_Err(canonical->sres);
++ LibVEX_GuestAMD64_put_rflag_c(1, gst);
++ } else {
++ gst->guest_RAX = sr_Res(canonical->sres);
++ gst->guest_RDX = sr_ResHI(canonical->sres);
++ LibVEX_GuestAMD64_put_rflag_c(0, gst);
++ }
++ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
++ OFFSET_amd64_RAX, sizeof(ULong) );
++ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
++ OFFSET_amd64_RDX, sizeof(ULong) );
++ // GrP fixme sets defined for entire eflags, not just bit c
++ // DDD: this breaks exp-ptrcheck.
++ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
++ offsetof(VexGuestAMD64State, guest_CC_DEP1), sizeof(ULong) );
++
+ # else
+ # error "putSyscallStatusIntoGuestState: unknown arch"
+ # endif
+@@ -1478,6 +1636,17 @@ void getSyscallArgLayout ( /*OUT*/Syscal
+ layout->s_arg7 = sizeof(UWord) * 7;
+ layout->s_arg8 = sizeof(UWord) * 8;
+
++#elif defined(VGP_amd64_netbsd)
++ layout->o_sysno = OFFSET_amd64_RAX;
++ layout->o_arg1 = OFFSET_amd64_RDI;
++ layout->o_arg2 = OFFSET_amd64_RSI;
++ layout->o_arg3 = OFFSET_amd64_RDX;
++ layout->o_arg4 = OFFSET_amd64_R10;
++ layout->o_arg5 = OFFSET_amd64_R8;
++ layout->o_arg6 = OFFSET_amd64_R9;
++ layout->s_arg7 = sizeof(UWord) * 1;
++ layout->s_arg8 = sizeof(UWord) * 2;
++
+ #elif defined(VGP_amd64_darwin)
+ layout->o_sysno = OFFSET_amd64_RAX;
+ layout->o_arg1 = OFFSET_amd64_RDI;
+@@ -1543,6 +1712,7 @@ void bad_before ( ThreadId
+ /*OUT*/SyscallStatus* status,
+ /*OUT*/UWord* flags )
+ {
++ __builtin_trap();
+ VG_(dmsg)("WARNING: unhandled %s syscall: %s\n",
+ VG_PLATFORM, VG_SYSNUM_STRING(args->sysno));
+ if (VG_(clo_verbosity) > 1) {
+@@ -1570,6 +1740,11 @@ static const SyscallTableEntry* get_sysc
+ # if defined(VGO_linux)
+ sys = ML_(get_linux_syscall_entry)( syscallno );
+
++# elif defined(VGO_netbsd)
++ if (syscallno >= 0 && syscallno < ML_(syscall_table_size) &&
++ ML_(syscall_table)[syscallno].before != NULL)
++ sys = &ML_(syscall_table)[syscallno];
++
+ # elif defined(VGO_darwin)
+ Int idx = VG_DARWIN_SYSNO_INDEX(syscallno);
+
+@@ -1786,6 +1961,9 @@ void VG_(client_syscall) ( ThreadId tid,
+ is interrupted by a signal. */
+ sysno = sci->orig_args.sysno;
+
++# if defined(VGO_freebsd)
++ tst->arch.vex.guest_SC_CLASS = sci->orig_args.klass;
++# endif
+ /* It's sometimes useful, as a crude debugging hack, to get a
+ stack trace at each (or selected) syscalls. */
+ if (0 && sysno == __NR_ioctl) {
+@@ -2186,7 +2364,7 @@ void VG_(post_syscall) (ThreadId tid)
+ /* These are addresses within ML_(do_syscall_for_client_WRK). See
+ syscall-$PLAT.S for details.
+ */
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+ extern const Addr ML_(blksys_setup);
+ extern const Addr ML_(blksys_restart);
+ extern const Addr ML_(blksys_complete);
+@@ -2246,6 +2424,26 @@ void ML_(fixup_guest_state_to_restart_sy
+ vg_assert(p[0] == 0xcd && p[1] == 0x80);
+ }
+
++#elif defined(VGP_amd64_netbsd)
++ /* XXX: we support different syscall methods. */
++ arch->vex.guest_RIP -= 2; // sizeof(syscall)
++
++ /* Make sure our caller is actually sane, and we're really backing
++ back over a syscall.
++
++ syscall == 0F 05
++ */
++ {
++ UChar *p = (UChar *)arch->vex.guest_RIP;
++
++ if (p[0] != 0x0F || p[1] != 0x05)
++ VG_(message)(Vg_DebugMsg,
++ "?! restarting over syscall at %#llx %02x %02x\n",
++ arch->vex.guest_RIP, p[0], p[1]);
++
++ vg_assert(p[0] == 0x0F && p[1] == 0x05);
++ }
++
+ #elif defined(VGP_amd64_linux)
+ arch->vex.guest_RIP -= 2; // sizeof(syscall)
+
+@@ -2592,7 +2790,7 @@ VG_(fixup_guest_state_after_syscall_inte
+ th_regs = &tst->arch;
+ sci = & syscallInfo[tid];
+
+-# if defined(VGO_linux)
++# if defined(VGO_linux) || defined(VGO_netbsd)
+ outside_range
+ = ip < ML_(blksys_setup) || ip >= ML_(blksys_finished);
+ in_setup_to_restart
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-netbsd-variants.c b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-netbsd-variants.c
new file mode 100644
index 0000000000..e375d8b5c7
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-netbsd-variants.c
@@ -0,0 +1,100 @@
+$NetBSD$
+
+--- coregrind/m_syswrap/syswrap-netbsd-variants.c.orig 2019-03-29 12:09:49.239759844 +0000
++++ coregrind/m_syswrap/syswrap-netbsd-variants.c
+@@ -0,0 +1,95 @@
++
++/*--------------------------------------------------------------------*/
++/*--- Handlers for syscalls on minor variants of Linux kernels. ---*/
++/*--- syswrap-linux-variants.c ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2008 Julian Seward
++ jseward%acm.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++/* The files syswrap-generic.c, syswrap-linux.c, syswrap-x86-linux.c,
++ syswrap-amd64-linux.c and syswrap-ppc32-linux.c, and associated
++ vki*.h header files, constitute Valgrind's model of how a vanilla
++ Linux kernel behaves with respect to syscalls.
++
++ On a few occasions, it is useful to run with a kernel that has some
++ (minor) extensions to the vanilla model, either due to running on a
++ hacked kernel, or using a vanilla kernel which has incorporated a
++ custom kernel module. Rather than clutter the standard model, all
++ such variant handlers are placed in here.
++
++ Unlike the C files for the standard model, this file should also
++ contain all constants/types needed for said wrappers. The vki*.h
++ headers should not be polluted with non-vanilla info. */
++
++
++#include "pub_core_basics.h"
++#include "pub_core_vki.h"
++#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
++#include "pub_core_threadstate.h"
++#include "pub_core_aspacemgr.h"
++#include "pub_core_debuginfo.h" // VG_(di_notify_*)
++#include "pub_core_transtab.h" // VG_(discard_translations)
++#include "pub_core_debuglog.h"
++#include "pub_core_libcbase.h"
++#include "pub_core_libcassert.h"
++#include "pub_core_libcfile.h"
++#include "pub_core_libcprint.h"
++#include "pub_core_libcproc.h"
++#include "pub_core_mallocfree.h"
++#include "pub_core_tooliface.h"
++#include "pub_core_options.h"
++#include "pub_core_scheduler.h"
++#include "pub_core_signals.h"
++#include "pub_core_syscall.h"
++
++#include "priv_types_n_macros.h"
++#include "priv_syswrap-netbsd.h"
++
++
++#if 0 /* think about ylock syscall etc */
++/* ---------------------------------------------------------------
++ BProc wrappers
++ ------------------------------------------------------------ */
++
++/* Return 0 means hand to kernel, non-0 means fail w/ that value. */
++Int ML_(linux_variant_PRE_sys_bproc)( UWord arg1, UWord arg2,
++ UWord arg3, UWord arg4,
++ UWord arg5, UWord arg6 )
++{
++ return 0;
++}
++
++void ML_(linux_variant_POST_sys_bproc)( UWord arg1, UWord arg2,
++ UWord arg3, UWord arg4,
++ UWord arg5, UWord arg6 )
++{
++}
++#endif
++
++
++/*--------------------------------------------------------------------*/
++/*--- end syswrap-linux-variants.c ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-netbsd.c b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-netbsd.c
new file mode 100644
index 0000000000..080cd32372
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__syswrap_syswrap-netbsd.c
@@ -0,0 +1,4448 @@
+$NetBSD$
+
+--- coregrind/m_syswrap/syswrap-netbsd.c.orig 2019-03-31 21:41:10.598938025 +0000
++++ coregrind/m_syswrap/syswrap-netbsd.c
+@@ -0,0 +1,4443 @@
++
++/*--------------------------------------------------------------------*/
++/*--- netbsd-specific syscalls, etc. syswrap-netbsd.c ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2008 Nicholas Nethercote
++ njn%valgrind.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#include "pub_core_basics.h"
++#include "pub_core_vki.h"
++#include "pub_core_vkiscnums.h"
++#include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
++#include "pub_core_threadstate.h"
++#include "pub_core_aspacemgr.h"
++#include "pub_core_debuginfo.h" // VG_(di_notify_*)
++#include "pub_core_transtab.h" // VG_(discard_translations)
++#include "pub_core_xarray.h"
++#include "pub_core_clientstate.h"
++#include "pub_core_debuglog.h"
++#include "pub_core_libcbase.h"
++#include "pub_core_libcassert.h"
++#include "pub_core_libcfile.h"
++#include "pub_core_libcprint.h"
++#include "pub_core_libcproc.h"
++#include "pub_core_libcsignal.h"
++#include "pub_core_machine.h"
++#include "pub_core_mallocfree.h"
++#include "pub_core_tooliface.h"
++#include "pub_core_options.h"
++#include "pub_core_scheduler.h"
++#include "pub_core_signals.h"
++#include "pub_core_syscall.h"
++#include "pub_core_syswrap.h"
++
++#include "priv_types_n_macros.h"
++#include "priv_syswrap-generic.h"
++#include "priv_syswrap-netbsd.h"
++
++
++// Run a thread from beginning to end and return the thread's
++// scheduler-return-code.
++static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
++{
++ VgSchedReturnCode ret;
++ ThreadId tid = (ThreadId)tidW;
++ ThreadState* tst = VG_(get_ThreadState)(tid);
++
++ VG_(debugLog)(1, "syswrap-netbsd",
++ "thread_wrapper(tid=%lld): entry\n",
++ (ULong)tidW);
++
++ vg_assert(tst->status == VgTs_Init);
++
++ /* make sure we get the CPU lock before doing anything significant */
++ VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
++
++ if (0)
++ VG_(printf)("thread tid %d started: stack = %p\n",
++ tid, &tid);
++
++ /* Make sure error reporting is enabled in the new thread. */
++ tst->err_disablement_level = 0;
++
++ VG_TRACK(pre_thread_first_insn, tid);
++
++ tst->os_state.lwpid = VG_(gettid)();
++ tst->os_state.threadgroup = VG_(getpid)();
++
++ /* Thread created with all signals blocked; scheduler will set the
++ appropriate mask */
++
++ ret = VG_(scheduler)(tid);
++
++ vg_assert(VG_(is_exiting)(tid));
++
++ vg_assert(tst->status == VgTs_Runnable);
++ vg_assert(VG_(is_running_thread)(tid));
++
++ VG_(debugLog)(1, "syswrap-netbsd",
++ "thread_wrapper(tid=%lld): exit\n",
++ (ULong)tidW);
++
++ /* Return to caller, still holding the lock. */
++ return ret;
++}
++
++
++/* ---------------------------------------------------------------------
++ clone-related stuff
++ ------------------------------------------------------------------ */
++
++/* Run a thread all the way to the end, then do appropriate exit actions
++ (this is the last-one-out-turn-off-the-lights bit). */
++static void run_a_thread_NORETURN ( Word tidW )
++{
++ ThreadId tid = (ThreadId)tidW;
++ VgSchedReturnCode src;
++ Int c;
++ ThreadState* tst;
++
++ VG_(debugLog)(1, "syswrap-netbsd",
++ "run_a_thread_NORETURN(tid=%lld): pre-thread_wrapper\n",
++ (ULong)tidW);
++
++ tst = VG_(get_ThreadState)(tid);
++ vg_assert(tst);
++
++ /* Run the thread all the way through. */
++ src = thread_wrapper(tid);
++
++ VG_(debugLog)(1, "syswrap-netbsd",
++ "run_a_thread_NORETURN(tid=%lld): post-thread_wrapper\n",
++ (ULong)tidW);
++
++ c = VG_(count_living_threads)();
++ vg_assert(c >= 1); /* stay sane */
++
++ // Tell the tool this thread is exiting
++ VG_TRACK( pre_thread_ll_exit, tid );
++
++ /* If the thread is exiting with errors disabled, complain loudly;
++ doing so is bad (does the user know this has happened?) Also,
++ in all cases, be paranoid and clear the flag anyway so that the
++ thread slot is safe in this respect if later reallocated. This
++ should be unnecessary since the flag should be cleared when the
++ slot is reallocated, in thread_wrapper(). */
++ if (tst->err_disablement_level > 0) {
++ VG_(umsg)(
++ "WARNING: exiting thread has error reporting disabled.\n"
++ "WARNING: possibly as a result of some mistake in the use\n"
++ "WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n"
++ );
++ VG_(debugLog)(
++ 1, "syswrap-netbsd",
++ "run_a_thread_NORETURN(tid=%lld): "
++ "WARNING: exiting thread has err_disablement_level = %u\n",
++ (ULong)tidW, tst->err_disablement_level
++ );
++ }
++ tst->err_disablement_level = 0;
++
++ if (c == 1) {
++
++ VG_(debugLog)(1, "syswrap-netbsd",
++ "run_a_thread_NORETURN(tid=%lld): "
++ "last one standing\n",
++ (ULong)tidW);
++
++ /* We are the last one standing. Keep hold of the lock and
++ carry on to show final tool results, then exit the entire system.
++ Use the continuation pointer set at startup in m_main. */
++ ( * VG_(address_of_m_main_shutdown_actions_NORETURN) ) (tid, src);
++
++ } else {
++
++ VG_(debugLog)(1, "syswrap-netbsd",
++ "run_a_thread_NORETURN(tid=%lld): "
++ "not last one standing\n",
++ (ULong)tidW);
++
++ /* OK, thread is dead, but others still exist. Just exit. */
++
++ /* This releases the run lock */
++ VG_(exit_thread)(tid);
++ vg_assert(tst->status == VgTs_Zombie);
++
++ /* We have to use this sequence to terminate the thread to
++ prevent a subtle race. If VG_(exit_thread)() had left the
++ ThreadState as Empty, then it could have been reallocated,
++ reusing the stack while we're doing these last cleanups.
++ Instead, VG_(exit_thread) leaves it as Zombie to prevent
++ reallocation. We need to make sure we don't touch the stack
++ between marking it Empty and exiting. Hence the
++ assembler. */
++#if defined(VGP_x86_netbsd) /* netbsd has args on the stack */
++ asm volatile (
++ "movl %1, %0\n" /* set tst->status = VgTs_Empty */
++ "movl %2, %%eax\n" /* set %eax = __NR_thr_exit */
++ "movl %3, %%ebx\n" /* set %ebx = tst->os_state.exitcode */
++ "pushl %%ebx\n" /* arg on stack */
++ "pushl %%ebx\n" /* fake return address */
++ "int $0x80\n" /* thr_exit(tst->os_state.exitcode) */
++ "popl %%ebx\n" /* fake return address */
++ "popl %%ebx\n" /* arg off stack */
++ : "=m" (tst->status)
++ : "n" (VgTs_Empty), "n" (__NR_thr_exit), "m" (tst->os_state.exitcode)
++ : "eax", "ebx"
++ );
++#elif defined(VGP_amd64_netbsd)
++ asm volatile (
++ "movl %1, %0\n" /* set tst->status = VgTs_Empty */
++ "movq %2, %%rax\n" /* set %rax = __NR_thr_exit */
++ "movq %3, %%rdi\n" /* set %rdi = tst->os_state.exitcode */
++ "pushq %%rdi\n" /* fake return address */
++ "syscall\n" /* thr_exit(tst->os_state.exitcode) */
++ "popq %%rdi\n" /* fake return address */
++ : "=m" (tst->status)
++ : "n" (VgTs_Empty), "n" (__NR__lwp_exit), "m" (tst->os_state.exitcode)
++ : "rax", "rdi"
++ );
++#else
++# error Unknown platform
++#endif
++
++ VG_(core_panic)("Thread exit failed?\n");
++ }
++
++ /*NOTREACHED*/
++ vg_assert(0);
++}
++
++Word ML_(start_thread_NORETURN) ( void* arg )
++{
++ ThreadState* tst = (ThreadState*)arg;
++ ThreadId tid = tst->tid;
++
++ run_a_thread_NORETURN ( (Word)tid );
++ /*NOTREACHED*/
++ vg_assert(0);
++}
++
++/* Allocate a stack for this thread, if it doesn't already have one.
++ They're allocated lazily, and never freed. Returns the initial stack
++ pointer value to use, or 0 if allocation failed. */
++Addr ML_(allocstack)(ThreadId tid)
++{
++ ThreadState* tst = VG_(get_ThreadState)(tid);
++ VgStack* stack;
++ Addr initial_SP;
++
++ /* Either the stack_base and stack_init_SP are both zero (in which
++ case a stack hasn't been allocated) or they are both non-zero,
++ in which case it has. */
++
++ if (tst->os_state.valgrind_stack_base == 0)
++ vg_assert(tst->os_state.valgrind_stack_init_SP == 0);
++
++ if (tst->os_state.valgrind_stack_base != 0)
++ vg_assert(tst->os_state.valgrind_stack_init_SP != 0);
++
++ /* If no stack is present, allocate one. */
++
++ if (tst->os_state.valgrind_stack_base == 0) {
++ stack = VG_(am_alloc_VgStack)( &initial_SP );
++ if (stack) {
++ tst->os_state.valgrind_stack_base = (Addr)stack;
++ tst->os_state.valgrind_stack_init_SP = initial_SP;
++ }
++ }
++
++ if (0)
++ VG_(printf)( "stack for tid %d at %p; init_SP=%p\n",
++ tid,
++ (void*)tst->os_state.valgrind_stack_base,
++ (void*)tst->os_state.valgrind_stack_init_SP );
++
++ return tst->os_state.valgrind_stack_init_SP;
++}
++
++/* Allocate a stack for the main thread, and run it all the way to the
++ end. Although we already have a working VgStack
++ (VG_(interim_stack)) it's better to allocate a new one, so that
++ overflow detection works uniformly for all threads.
++*/
++void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
++{
++ Addr sp;
++ VG_(debugLog)(1, "syswrap-netbsd",
++ "entering VG_(main_thread_wrapper_NORETURN)\n");
++
++ sp = ML_(allocstack)(tid);
++
++/* QQQ keep for amd64 redzone stuff */
++#if defined(VGP_ppc32_linux)
++ /* make a stack frame */
++ sp -= 16;
++ sp &= ~0xF;
++ *(UWord *)sp = 0;
++#elif defined(VGP_ppc64_linux)
++ /* make a stack frame */
++ sp -= 112;
++ sp &= ~((Addr)0xF);
++ *(UWord *)sp = 0;
++#endif
++
++ /* If we can't even allocate the first thread's stack, we're hosed.
++ Give up. */
++ vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
++
++ /* shouldn't be any other threads around yet */
++ vg_assert( VG_(count_living_threads)() == 1 );
++
++ ML_(call_on_new_stack_0_1)(
++ (Addr)sp, /* stack */
++ 0, /* bogus return address */
++ run_a_thread_NORETURN, /* fn to call */
++ (Word)tid /* arg to give it */
++ );
++
++ /*NOTREACHED*/
++ vg_assert(0);
++}
++
++
++/* Do a fork() */
++SysRes ML_(do_fork) ( ThreadId tid )
++{
++ vki_sigset_t fork_saved_mask;
++ vki_sigset_t mask;
++ SysRes res;
++
++ /* Block all signals during fork, so that we can fix things up in
++ the child without being interrupted. */
++ VG_(sigfillset)(&mask);
++ VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, &fork_saved_mask);
++
++ VG_(do_atfork_pre)(tid);
++
++ res = VG_(do_syscall0)( __NR_fork );
++
++ if (!sr_isError(res)) {
++ if (sr_Res(res) == 0) {
++ /* child */
++ VG_(do_atfork_child)(tid);
++
++ /* restore signal mask */
++ VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL);
++
++ }
++ else {
++ /* parent */
++ VG_(do_atfork_parent)(tid);
++
++ if (VG_(clo_trace_syscalls))
++ VG_(printf)(" clone(fork): process %d created child %ld\n",
++ VG_(getpid)(), sr_Res(res));
++
++ /* restore signal mask */
++ VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL);
++ }
++ }
++
++ return res;
++}
++
++
++/* ---------------------------------------------------------------------
++ PRE/POST wrappers for arch-generic, Linux-specific syscalls
++ ------------------------------------------------------------------ */
++
++// Nb: See the comment above the generic PRE/POST wrappers in
++// m_syswrap/syswrap-generic.c for notes about how they work.
++
++#define PRE(name) DEFN_PRE_TEMPLATE(netbsd, name)
++#define POST(name) DEFN_POST_TEMPLATE(netbsd, name)
++
++// Combine two 32-bit values into a 64-bit value
++#define LOHI64(lo,hi) ( (lo) | ((ULong)(hi) << 32) )
++
++PRE(sys_fork)
++{
++ PRINT("sys_fork ()");
++ PRE_REG_READ0(int, "fork");
++
++ SET_STATUS_from_SysRes( ML_(do_fork)(tid) );
++ if (SUCCESS) {
++ /* Thread creation was successful; let the child have the chance
++ to run */
++ *flags |= SfYieldAfter;
++ }
++}
++
++PRE(sys_vfork)
++{
++ PRINT("sys_vfork ()");
++ PRE_REG_READ0(int, "vfork");
++
++ /* Pretend vfork == fork. Not true, but will have to do. */
++ SET_STATUS_from_SysRes( ML_(do_fork)(tid) );
++ if (SUCCESS) {
++ /* Thread creation was successful; let the child have the chance
++ to run */
++ *flags |= SfYieldAfter;
++ }
++}
++
++#if 0
++PRE(sys_socket)
++{
++ PRINT("sys_socket ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "socket", int, domain, int, type, int, protocol);
++}
++POST(sys_socket)
++{
++ SysRes r;
++ vg_assert(SUCCESS);
++ r = ML_(generic_POST_sys_socket)(tid, VG_(mk_SysRes_Success)(RES));
++ SET_STATUS_from_SysRes(r);
++}
++
++PRE(sys_setsockopt)
++{
++ PRINT("sys_setsockopt ( %ld, %ld, %ld, %#lx, %ld )",ARG1,ARG2,ARG3,ARG4,ARG5);
++ PRE_REG_READ5(long, "setsockopt",
++ int, s, int, level, int, optname,
++ const void *, optval, int, optlen);
++ ML_(generic_PRE_sys_setsockopt)(tid, ARG1,ARG2,ARG3,ARG4,ARG5);
++}
++
++PRE(sys_getsockopt)
++{
++ Addr optval_p = ARG4;
++ Addr optlen_p = ARG5;
++ PRINT("sys_getsockopt ( %ld, %ld, %ld, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4,ARG5);
++ PRE_REG_READ5(long, "getsockopt",
++ int, s, int, level, int, optname,
++ void *, optval, int, *optlen);
++ if (optval_p != (Addr)NULL) {
++ ML_(buf_and_len_pre_check) ( tid, optval_p, optlen_p,
++ "getsockopt(optval)",
++ "getsockopt(optlen)" );
++ }
++}
++POST(sys_getsockopt)
++{
++ Addr optval_p = ARG4;
++ Addr optlen_p = ARG5;
++ vg_assert(SUCCESS);
++ if (optval_p != (Addr)NULL) {
++ ML_(buf_and_len_post_check) ( tid, VG_(mk_SysRes_Success)(RES),
++ optval_p, optlen_p,
++ "getsockopt(optlen_out)" );
++ }
++}
++
++PRE(sys_connect)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_connect ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "connect",
++ int, sockfd, struct sockaddr *, serv_addr, int, addrlen);
++ ML_(generic_PRE_sys_connect)(tid, ARG1,ARG2,ARG3);
++}
++
++PRE(sys_accept)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_accept ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "accept",
++ int, s, struct sockaddr *, addr, int, *addrlen);
++ ML_(generic_PRE_sys_accept)(tid, ARG1,ARG2,ARG3);
++}
++POST(sys_accept)
++{
++ SysRes r;
++ vg_assert(SUCCESS);
++ r = ML_(generic_POST_sys_accept)(tid, VG_(mk_SysRes_Success)(RES),
++ ARG1,ARG2,ARG3);
++ SET_STATUS_from_SysRes(r);
++}
++
++PRE(sys_sendto)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_sendto ( %ld, %#lx, %ld, %lu, %#lx, %ld )",ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
++ PRE_REG_READ6(long, "sendto",
++ int, s, const void *, msg, int, len,
++ unsigned int, flags,
++ const struct sockaddr *, to, int, tolen);
++ ML_(generic_PRE_sys_sendto)(tid, ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
++}
++
++PRE(sys_recvfrom)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_recvfrom ( %ld, %#lx, %ld, %lu, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
++ PRE_REG_READ6(long, "recvfrom",
++ int, s, void *, buf, int, len, unsigned int, flags,
++ struct sockaddr *, from, int *, fromlen);
++ ML_(generic_PRE_sys_recvfrom)(tid, ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
++}
++POST(sys_recvfrom)
++{
++ vg_assert(SUCCESS);
++ ML_(generic_POST_sys_recvfrom)(tid, VG_(mk_SysRes_Success)(RES),
++ ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
++}
++
++PRE(sys_sendmsg)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_sendmsg ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "sendmsg",
++ int, s, const struct msghdr *, msg, int, flags);
++ ML_(generic_PRE_sys_sendmsg)(tid, "sendmsg", (struct vki_msghdr *)ARG2);
++}
++
++PRE(sys_recvmsg)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_recvmsg ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "recvmsg", int, s, struct msghdr *, msg, int, flags);
++ ML_(generic_PRE_sys_recvmsg)(tid, "recvmsg", (struct vki_msghdr *)ARG2);
++}
++POST(sys_recvmsg)
++{
++
++ ML_(generic_POST_sys_recvmsg)(tid, "recvmsg", (struct vki_msghdr *)ARG2, RES);
++}
++
++PRE(sys_shutdown)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_shutdown ( %ld, %ld )",ARG1,ARG2);
++ PRE_REG_READ2(int, "shutdown", int, s, int, how);
++}
++
++PRE(sys_bind)
++{
++ PRINT("sys_bind ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "bind",
++ int, sockfd, struct sockaddr *, my_addr, int, addrlen);
++ ML_(generic_PRE_sys_bind)(tid, ARG1,ARG2,ARG3);
++}
++
++PRE(sys_listen)
++{
++ PRINT("sys_listen ( %ld, %ld )",ARG1,ARG2);
++ PRE_REG_READ2(long, "listen", int, s, int, backlog);
++}
++
++PRE(sys_getsockname)
++{
++ PRINT("sys_getsockname ( %ld, %#lx, %#lx )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "getsockname",
++ int, s, struct sockaddr *, name, int *, namelen);
++ ML_(generic_PRE_sys_getsockname)(tid, ARG1,ARG2,ARG3);
++}
++POST(sys_getsockname)
++{
++ vg_assert(SUCCESS);
++ ML_(generic_POST_sys_getsockname)(tid, VG_(mk_SysRes_Success)(RES),
++ ARG1,ARG2,ARG3);
++}
++
++PRE(sys_getpeername)
++{
++ PRINT("sys_getpeername ( %ld, %#lx, %#lx )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "getpeername",
++ int, s, struct sockaddr *, name, int *, namelen);
++ ML_(generic_PRE_sys_getpeername)(tid, ARG1,ARG2,ARG3);
++}
++POST(sys_getpeername)
++{
++ vg_assert(SUCCESS);
++ ML_(generic_POST_sys_getpeername)(tid, VG_(mk_SysRes_Success)(RES),
++ ARG1,ARG2,ARG3);
++}
++
++PRE(sys_socketpair)
++{
++ PRINT("sys_socketpair ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "socketpair",
++ int, d, int, type, int, protocol, int *, sv);
++ ML_(generic_PRE_sys_socketpair)(tid, ARG1,ARG2,ARG3,ARG4);
++}
++POST(sys_socketpair)
++{
++ vg_assert(SUCCESS);
++ ML_(generic_POST_sys_socketpair)(tid, VG_(mk_SysRes_Success)(RES),
++ ARG1,ARG2,ARG3,ARG4);
++}
++
++/* ---------------------------------------------------------------------
++ *mount wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_mount)
++{
++ // Nb: depending on 'flags', the 'type' and 'data' args may be ignored.
++ // We are conservative and check everything, except the memory pointed to
++ // by 'data'.
++ *flags |= SfMayBlock;
++ PRINT( "sys_mount( %#lx, %#lx, %ld, %#lx )" ,ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "mount",
++ char *, type, char *, path, int, flags,
++ void *, data);
++ PRE_MEM_RASCIIZ( "mount(type)", ARG1);
++ PRE_MEM_RASCIIZ( "mount(path)", ARG2);
++}
++
++PRE(sys_unmount)
++{
++ PRINT("sys_umount( %#lx, %ld )", ARG1, ARG2);
++ PRE_REG_READ2(long, "unmount", char *, path, int, flags);
++ PRE_MEM_RASCIIZ( "unmount(path)", ARG1);
++}
++
++/* ---------------------------------------------------------------------
++ 16- and 32-bit uid/gid wrappers
++ ------------------------------------------------------------------ */
++
++#if 0
++PRE(sys_setfsuid)
++{
++ PRINT("sys_setfsuid ( %ld )", ARG1);
++ PRE_REG_READ1(long, "setfsuid", vki_uid_t, uid);
++}
++
++PRE(sys_setfsgid)
++{
++ PRINT("sys_setfsgid ( %ld )", ARG1);
++ PRE_REG_READ1(long, "setfsgid", vki_gid_t, gid);
++}
++#endif
++
++PRE(sys_setresuid)
++{
++ PRINT("sys_setresuid ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
++ PRE_REG_READ3(long, "setresuid",
++ vki_uid_t, ruid, vki_uid_t, euid, vki_uid_t, suid);
++}
++
++PRE(sys_getresuid)
++{
++ PRINT("sys_getresuid ( %#lx, %#lx, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "getresuid",
++ vki_uid_t *, ruid, vki_uid_t *, euid, vki_uid_t *, suid);
++ PRE_MEM_WRITE( "getresuid(ruid)", ARG1, sizeof(vki_uid_t) );
++ PRE_MEM_WRITE( "getresuid(euid)", ARG2, sizeof(vki_uid_t) );
++ PRE_MEM_WRITE( "getresuid(suid)", ARG3, sizeof(vki_uid_t) );
++}
++
++POST(sys_getresuid)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0) {
++ POST_MEM_WRITE( ARG1, sizeof(vki_uid_t) );
++ POST_MEM_WRITE( ARG2, sizeof(vki_uid_t) );
++ POST_MEM_WRITE( ARG3, sizeof(vki_uid_t) );
++ }
++}
++
++PRE(sys_setresgid)
++{
++ PRINT("sys_setresgid ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
++ PRE_REG_READ3(long, "setresgid",
++ vki_gid_t, rgid, vki_gid_t, egid, vki_gid_t, sgid);
++}
++
++PRE(sys_getresgid)
++{
++ PRINT("sys_getresgid ( %#lx, %#lx, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "getresgid",
++ vki_gid_t *, rgid, vki_gid_t *, egid, vki_gid_t *, sgid);
++ PRE_MEM_WRITE( "getresgid(rgid)", ARG1, sizeof(vki_gid_t) );
++ PRE_MEM_WRITE( "getresgid(egid)", ARG2, sizeof(vki_gid_t) );
++ PRE_MEM_WRITE( "getresgid(sgid)", ARG3, sizeof(vki_gid_t) );
++}
++
++POST(sys_getresgid)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0) {
++ POST_MEM_WRITE( ARG1, sizeof(vki_gid_t) );
++ POST_MEM_WRITE( ARG2, sizeof(vki_gid_t) );
++ POST_MEM_WRITE( ARG3, sizeof(vki_gid_t) );
++ }
++}
++
++/* ---------------------------------------------------------------------
++ miscellaneous wrappers
++ ------------------------------------------------------------------ */
++
++#if 0
++PRE(sys_exit_group)
++{
++ ThreadId t;
++ ThreadState* tst;
++
++ PRINT("exit_group( %ld )", ARG1);
++ PRE_REG_READ1(void, "exit_group", int, exit_code);
++
++ tst = VG_(get_ThreadState)(tid);
++
++ /* A little complex; find all the threads with the same threadgroup
++ as this one (including this one), and mark them to exit */
++ for (t = 1; t < VG_N_THREADS; t++) {
++ if ( /* not alive */
++ VG_(threads)[t].status == VgTs_Empty
++ ||
++ /* not our group */
++ VG_(threads)[t].os_state.threadgroup != tst->os_state.threadgroup
++ )
++ continue;
++
++ VG_(threads)[t].exitreason = VgSrc_ExitSyscall;
++ VG_(threads)[t].os_state.exitcode = ARG1;
++
++ if (t != tid)
++ VG_(get_thread_out_of_syscall)(t); /* unblock it, if blocked */
++ }
++
++ /* We have to claim the syscall already succeeded. */
++ SET_STATUS_Success(0);
++}
++#endif
++
++#endif
++/* On netbsd, if any thread calls exit(2), then they are all shut down, pretty
++ * much like linux's exit_group().
++ */
++PRE(sys_exit)
++{
++ ThreadId t;
++
++ PRINT("exit( %ld )", ARG1);
++ PRE_REG_READ1(void, "exit", int, status);
++
++ /* Mark all threads (including this one) to exit. */
++ for (t = 1; t < VG_N_THREADS; t++) {
++ if ( /* not alive */ VG_(threads)[t].status == VgTs_Empty )
++ continue;
++
++ VG_(threads)[t].exitreason = VgSrc_ExitThread;
++ VG_(threads)[t].os_state.exitcode = ARG1;
++
++ if (t != tid)
++ VG_(get_thread_out_of_syscall)(t); /* unblock it, if blocked */
++ }
++
++ /* We have to claim the syscall already succeeded. */
++ SET_STATUS_Success(0);
++}
++
++#if 0
++PRE(sys_getlogin)
++{
++ PRINT("sys_getlogin ( %#lx, %ld )",ARG1,ARG2);
++ PRE_REG_READ2(long, "getlogin",
++ char *, buf, int, len);
++ PRE_MEM_WRITE( "getlogin(buf, len)", ARG1, ARG2 );
++}
++POST(sys_getlogin)
++{
++ if (ARG1 != 0) {
++ POST_MEM_WRITE( ARG1, ARG2 );
++ }
++}
++PRE(sys_setlogin)
++{
++ PRINT("sys_setlogin ( %#lx )",ARG1);
++ PRE_REG_READ1(long, "setlogin", char *, buf);
++ PRE_MEM_RASCIIZ( "setlogin(buf)", ARG1 );
++}
++PRE(sys_mkfifo)
++{
++ PRINT("sys_mkfifo ( %#lx(%s), 0x%lx, 0x%lx )", ARG1, (char *)ARG1, ARG2, ARG3 );
++ PRE_REG_READ2(long, "mkfifo", const char *, pathname, int, mode);
++ PRE_MEM_RASCIIZ( "mkfifo(pathname)", ARG1 );
++}
++
++/* int quotactl(const char *path, int cmd, int id, void *addr); */
++
++PRE(sys_quotactl)
++{
++ PRINT("sys_quotactl (%#lx, %ld, %ld, %#lx )", ARG1,ARG2,ARG3, ARG4);
++ PRE_REG_READ4(long, "quotactl",
++ const char *, path, int, cmd, int, id,
++ void *, addr);
++ PRE_MEM_RASCIIZ( "quotactl(path)", ARG1 );
++}
++
++/* int getdomainname(char *domainname, int len); */
++PRE(sys_getdomainname)
++{
++ PRINT("sys_getdomainname ( %#lx, %ld )",ARG1,ARG2);
++ PRE_REG_READ2(long, "getdomainname",
++ char *, buf, int, len);
++ PRE_MEM_WRITE( "getdomainname(buf, len)", ARG1, ARG2 );
++}
++POST(sys_getdomainname)
++{
++ if (ARG1 != 0) {
++ POST_MEM_WRITE( ARG1, ARG2 );
++ }
++}
++/* int setdomainname(char *domainname, int len); */
++PRE(sys_setdomainname)
++{
++ PRINT("sys_setdomainname ( %#lx )",ARG1);
++ PRE_REG_READ1(long, "setdomainname", char *, buf);
++ PRE_MEM_RASCIIZ( "setdomainname(buf)", ARG1 );
++}
++
++PRE(sys_uname)
++{
++ PRINT("sys_uname ( %#lx )", ARG1);
++// PRE_REG_READ1(long, "uname", struct utsname *, buf);
++// PRE_MEM_WRITE( "uname(buf)", ARG1, sizeof(struct vki_utsname) );
++}
++
++POST(sys_uname)
++{
++ if (ARG1 != 0) {
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_utsname) );
++ }
++}
++
++PRE(sys_lstat)
++{
++ PRINT("sys_lstat ( %#lx(%s), %#lx )",ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "lstat", char *, file_name, struct stat *, buf);
++ PRE_MEM_RASCIIZ( "lstat(file_name)", ARG1 );
++ PRE_MEM_WRITE( "lstat(buf)", ARG2, sizeof(struct vki_stat) );
++}
++
++POST(sys_lstat)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0) {
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
++ }
++}
++
++PRE(sys_stat)
++{
++ PRINT("sys_stat ( %#lx(%s), %#lx )",ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "stat", char *, file_name, struct stat *, buf);
++ PRE_MEM_RASCIIZ( "stat(file_name)", ARG1 );
++ PRE_MEM_WRITE( "stat(buf)", ARG2, sizeof(struct vki_stat) );
++}
++
++POST(sys_stat)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
++}
++
++PRE(sys_fstat)
++{
++ PRINT("sys_fstat ( %ld, %#lx )",ARG1,ARG2);
++ PRE_REG_READ2(long, "fstat", unsigned long, fd, struct stat *, buf);
++ PRE_MEM_WRITE( "fstat(buf)", ARG2, sizeof(struct vki_stat) );
++}
++
++POST(sys_fstat)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
++}
++
++PRE(sys_pathconf)
++{
++ PRINT("sys_pathconf ( %#lx(%s), %ld )",ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "pathconf", char *, file_name, int, name);
++ PRE_MEM_RASCIIZ( "pathconf(file_name)", ARG1 );
++}
++
++PRE(sys_fpathconf)
++{
++ PRINT("sys_fpathconf ( %ld, %ld )",ARG1,ARG2);
++ PRE_REG_READ2(long, "fpathconf", int, fd, int, name);
++}
++
++PRE(sys_lchmod)
++{
++ PRINT("sys_lchmod ( %#lx(%s), %ld )", ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "lchmod", const char *, path, vki_mode_t, mode);
++ PRE_MEM_RASCIIZ( "lchmod(path)", ARG1 );
++}
++
++PRE(sys_issetugid)
++{
++ PRINT("sys_issetugid ()");
++ PRE_REG_READ0(long, "issetugid");
++}
++
++PRE(sys_revoke)
++{
++ PRINT("sys_vhangup ( )");
++ PRE_REG_READ0(long, "vhangup");
++}
++PRE(sys_undelete)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_undelete ( %#lx(%s) )", ARG1,(char *)ARG1);
++ PRE_REG_READ1(long, "undelete", const char *, pathname);
++ PRE_MEM_RASCIIZ( "undelete(pathname)", ARG1 );
++}
++PRE(sys_yield)
++{
++ *flags |= SfMayBlock;
++ PRINT("yield()");
++ PRE_REG_READ0(long, "yield");
++}
++
++PRE(sys_sched_yield)
++{
++ *flags |= SfMayBlock;
++ PRINT("sched_yield()");
++}
++
++#if 0
++PRE(sys_sysinfo)
++{
++ PRINT("sys_sysinfo ( %#lx )",ARG1);
++ PRE_REG_READ1(long, "sysinfo", struct sysinfo *, info);
++ PRE_MEM_WRITE( "sysinfo(info)", ARG1, sizeof(struct vki_sysinfo) );
++}
++
++POST(sys_sysinfo)
++{
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_sysinfo) );
++}
++#endif
++
++/* int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); */
++/* ARG1 ARG2 ARG3 ARG4 ARG5 ARG6 */
++
++PRE(sys___sysctl)
++{
++ PRINT("sys_sysctl ( %#lx, %ld, %#lx, %#lx, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6 );
++ PRE_REG_READ6(long, "__sysctl", int *, name, unsigned int, namelen, void *, old,
++ vki_size_t *, oldlenp, void *, new, vki_size_t, newlen);
++ PRE_MEM_READ("sysctl(name)", ARG1, ARG2 * sizeof(int));
++ if (ARG5 != (UWord)NULL)
++ PRE_MEM_READ("sysctl(new)", (Addr)ARG5, ARG6);
++ if (ARG4 != (UWord)NULL) {
++ if (ARG3 != (UWord)NULL) {
++ PRE_MEM_READ("sysctl(oldlenp)", (Addr)ARG4, sizeof(vki_size_t));
++ PRE_MEM_WRITE("sysctl(oldval)", (Addr)ARG3, *(vki_size_t *)ARG4);
++ }
++ PRE_MEM_WRITE("sysctl(oldlenp)", (Addr)ARG4, sizeof(vki_size_t));
++ }
++}
++POST(sys___sysctl)
++{
++ if (ARG4 != (UWord)NULL) {
++ POST_MEM_WRITE((Addr)ARG4, sizeof(vki_size_t));
++ if (ARG3 != (UWord)NULL)
++ POST_MEM_WRITE((Addr)ARG3, *(vki_size_t *)ARG4);
++ }
++}
++
++PRE(sys_sendfile)
++{
++ *flags |= SfMayBlock;
++#if defined(VGP_x86_netbsd)
++ PRINT("sys_sendfile ( %ld, %ld, %llu, %ld, %#lx, %#lx, %lu )", ARG1,ARG2,LOHI64(ARG3,ARG4),ARG5,ARG6,ARG7,ARG8);
++ PRE_REG_READ8(int, "sendfile",
++ int, fd, int, s, unsigned int, offset_low,
++ unsigned int, offset_high, size_t, nbytes,
++ void *, hdtr, vki_off_t *, sbytes, int, flags);
++# define SF_ARG_SBYTES ARG7
++#elif defined(VGP_amd64_netbsd)
++ PRINT("sys_sendfile ( %ld, %ld, %lu, %ld, %#lx, %#lx, %lu )", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6,ARG7);
++ PRE_REG_READ7(int, "sendfile",
++ int, fd, int, s, vki_off_t, offset, size_t, nbytes,
++ void *, hdtr, vki_off_t *, sbytes, int, flags);
++# define SF_ARG_SBYTES ARG6
++#else
++# error Unknown platform
++#endif
++ if (SF_ARG_SBYTES != 0)
++ PRE_MEM_WRITE( "sendfile(offset)", SF_ARG_SBYTES, sizeof(vki_off_t) );
++}
++POST(sys_sendfile)
++{
++ if (SF_ARG_SBYTES != 0 ) {
++ POST_MEM_WRITE( SF_ARG_SBYTES, sizeof( vki_off_t ) );
++ }
++}
++#undef SF_ARG_SBYTES
++
++/* int getdirentries(int fd, char *buf, u_int count, long *basep); */
++PRE(sys_getdirentries)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_getdents ( %ld, %#lx, %ld )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "getdirentries",
++ unsigned int, fd, struct dirent *, dirp,
++ unsigned int, count);
++ PRE_MEM_WRITE( "getdirentries(dirp)", ARG2, ARG3 );
++}
++
++POST(sys_getdirentries)
++{
++ vg_assert(SUCCESS);
++ if (RES > 0) {
++ POST_MEM_WRITE( ARG2, RES );
++ if ( ARG4 != 0 )
++ POST_MEM_WRITE( ARG4, sizeof (long));
++ }
++}
++
++PRE(sys_seteuid)
++{
++ PRINT("sys_seteuid ( %ld )", ARG1);
++ PRE_REG_READ1(long, "seteuid", vki_uid_t, uid);
++}
++
++PRE(sys_setegid)
++{
++ PRINT("sys_setegid ( %ld )", ARG1);
++ PRE_REG_READ1(long, "setegid", vki_gid_t, gid);
++}
++
++PRE(sys_lutimes)
++{
++ PRINT("sys_lutimes ( %#lx(%s), %#lx )", ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "lutimes", char *, filename, struct timeval *, tvp);
++ PRE_MEM_RASCIIZ( "lutimes(filename)", ARG1 );
++ if (ARG2 != 0)
++ PRE_MEM_READ( "lutimes(tvp)", ARG2, sizeof(struct vki_timeval) );
++}
++
++PRE(sys_futimes)
++{
++ PRINT("sys_lutimes ( %ld, %#lx )", ARG1,ARG2);
++ PRE_REG_READ2(long, "futimes", int, fd, struct timeval *, tvp);
++ if (ARG2 != 0)
++ PRE_MEM_READ( "futimes(tvp)", ARG2, sizeof(struct vki_timeval) );
++}
++
++PRE(sys_utrace)
++{
++ PRINT("sys_utrace ( %#lx, %lu )", ARG1, ARG2);
++ PRE_REG_READ2(long, "utrace", const void *, buf, vki_size_t, len);
++ PRE_MEM_READ( "utrace(buf,len)", ARG2, ARG3 );
++}
++
++PRE(sys_getdtablesize)
++{
++ PRINT("sys_getdtablesize ( )");
++ PRE_REG_READ0(long, "getdtablesize");
++}
++
++PRE(sys_kqueue)
++{
++ PRINT("sys_kqueue ()");
++}
++POST(sys_kqueue)
++{
++ if (!ML_(fd_allowed)(RES, "kqueue", tid, True)) {
++ VG_(close)(RES);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else {
++ if (VG_(clo_track_fds)) {
++ ML_(record_fd_open_nameless)(tid, RES);
++ }
++ }
++}
++
++PRE(sys_kevent)
++{
++ /* struct kevent {
++ uintptr_t ident; -- identifier for this event
++ short filter; -- filter for event
++ u_short flags; -- action flags for kqueue
++ u_int fflags; -- filter flag value
++ intptr_t data; -- filter data value
++ void *udata; -- opaque user data identifier
++ };
++ int kevent(int kq, const struct kevent *changelist, int nchanges,
++ struct kevent *eventlist, int nevents,
++ const struct timespec *timeout);
++ */
++ *flags |= SfMayBlock;
++ PRINT("sys_kevent ( %ld, %#lx, %ld, %#lx, %ld, %#lx )\n", ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
++ PRE_REG_READ6(long, "kevent",
++ int, fd, struct vki_kevent *, newev, int, num_newev,
++ struct vki_kevent *, ret_ev, int, num_retev,
++ struct timespec *, timeout);
++ if (ARG2 != 0 && ARG3 != 0)
++ PRE_MEM_READ( "kevent(changeevent)", ARG2, sizeof(struct vki_kevent)*ARG3 );
++ if (ARG4 != 0 && ARG5 != 0)
++ PRE_MEM_WRITE( "kevent(events)", ARG4, sizeof(struct vki_kevent)*ARG5);
++ if (ARG6 != 0)
++ PRE_MEM_READ( "kevent(timeout)",
++ ARG6, sizeof(struct vki_timespec));
++}
++
++POST(sys_kevent)
++{
++ vg_assert(SUCCESS);
++ if (RES > 0) {
++ if (ARG4 != 0)
++ POST_MEM_WRITE( ARG4, sizeof(struct vki_kevent)*RES) ;
++ }
++}
++
++PRE(sys___getcwd)
++{
++ PRINT("sys___getcwd ( %#lx, %lu )", ARG1,ARG2);
++ PRE_REG_READ2(long, "__getcwd", char *, buf, unsigned int, size);
++ PRE_MEM_WRITE( "__getcwd(buf)", ARG1, ARG2 );
++}
++
++POST(sys___getcwd)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0) {
++ // QQQ it is unclear if this is legal or not, but the
++ // QQQ kernel just wrote it there...
++ // QQQ Why oh why didn't phk return the length from __getcwd()?
++ UInt len = VG_(strlen) ( (char *)ARG1 ) + 1;
++ POST_MEM_WRITE( ARG1, len );
++ }
++}
++
++// getfsstat() takes a length in bytes, but returns the number of structures
++// returned, not a length.
++PRE(sys_getfsstat4)
++{
++ PRINT("sys_getfsstat ( %#lx, %ld, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "getfsstat", struct vki_statfs4 *, buf, long, len, int, flags);
++ PRE_MEM_WRITE( "getfsstat(buf)", ARG1, ARG2 );
++}
++POST(sys_getfsstat4)
++{
++ vg_assert(SUCCESS);
++ if (RES > 0) {
++ POST_MEM_WRITE( ARG1, RES * sizeof(struct vki_statfs4) );
++ }
++}
++
++PRE(sys_getfsstat)
++{
++ PRINT("sys_getfsstat ( %#lx, %ld, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "getfsstat", struct vki_statfs *, buf, long, len, int, flags);
++ PRE_MEM_WRITE( "getfsstat(buf)", ARG1, ARG2 );
++}
++POST(sys_getfsstat)
++{
++ vg_assert(SUCCESS);
++ if (RES > 0) {
++ POST_MEM_WRITE( ARG1, RES * sizeof(struct vki_statfs) );
++ }
++}
++
++PRE(sys_fhopen)
++{
++ PRINT("sys_open ( %#lx, %ld )",ARG1,ARG2);
++ PRE_REG_READ2(long, "open",
++ struct fhandle *, fhp, int, flags);
++ PRE_MEM_READ( "fhopen(fhp)", ARG1, sizeof(struct vki_fhandle) );
++
++ /* Otherwise handle normally */
++ *flags |= SfMayBlock;
++}
++
++POST(sys_fhopen)
++{
++ vg_assert(SUCCESS);
++ if (!ML_(fd_allowed)(RES, "fhopen", tid, True)) {
++ VG_(close)(RES);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else {
++ if (VG_(clo_track_fds))
++ ML_(record_fd_open_nameless)(tid, RES);
++ }
++}
++
++PRE(sys_fhstat)
++{
++ PRINT("sys_fhstat ( %#lx, %#lx )",ARG1,ARG2);
++ PRE_REG_READ2(long, "fhstat", struct fhandle *, fhp, struct stat *, buf);
++ PRE_MEM_READ( "fhstat(fhp)", ARG1, sizeof(struct vki_fhandle) );
++ PRE_MEM_WRITE( "fhstat(buf)", ARG2, sizeof(struct vki_stat) );
++}
++
++POST(sys_fhstat)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
++}
++
++PRE(sys_fhstatfs)
++{
++ PRINT("sys_fstatfs ( %#lx, %#lx )",ARG1,ARG2);
++ PRE_REG_READ2(long, "fhstatfs",
++ struct fhandle *, fhp, struct statfs *, buf);
++ PRE_MEM_READ( "fhstatfs(fhp)", ARG1, sizeof(struct vki_fhandle) );
++ PRE_MEM_WRITE( "fhstatfs(buf)", ARG2, sizeof(struct vki_statfs) );
++}
++
++POST(sys_fhstatfs)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_statfs) );
++}
++
++PRE(sys_fhstatfs6)
++{
++ PRINT("sys_fstatfs6 ( %#lx, %#lx )",ARG1,ARG2);
++ PRE_REG_READ2(long, "fhstatfs6",
++ struct fhandle *, fhp, struct statfs *, buf);
++ PRE_MEM_READ( "fhstatfs6(fhp)", ARG1, sizeof(struct vki_fhandle) );
++ PRE_MEM_WRITE( "fhstatfs6(buf)", ARG2, sizeof(struct vki_statfs6) );
++}
++
++POST(sys_fhstatfs6)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_statfs6) );
++}
++
++PRE(sys_fstatfs6)
++{
++ PRINT("sys_fstatfs6 ( %ld, %#lx )",ARG1,ARG2);
++ PRE_REG_READ2(long, "fstatfs6",
++ unsigned int, fd, struct statfs *, buf);
++ PRE_MEM_WRITE( "fstatfs6(buf)", ARG2, sizeof(struct vki_statfs6) );
++}
++
++POST(sys_fstatfs6)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_statfs6) );
++}
++
++PRE(sys_statfs6)
++{
++ PRINT("sys_statfs6 ( %#lx(%s), %#lx )",ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "statfs6", const char *, path, struct statfs *, buf);
++ PRE_MEM_RASCIIZ( "statfs6(path)", ARG1 );
++ PRE_MEM_WRITE( "statfs(buf)", ARG2, sizeof(struct vki_statfs6) );
++}
++POST(sys_statfs6)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_statfs6) );
++}
++
++/* ---------------------------------------------------------------------
++ kld* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_kldload)
++{
++ PRINT("sys_kldload ( %#lx(%s) )", ARG1, (char *)ARG1);
++ PRE_REG_READ1(int, "kldload", const char *, "file");
++
++ PRE_MEM_RASCIIZ( "kldload(file)", ARG1 );
++}
++
++PRE(sys_kldunload)
++{
++ PRINT("sys_kldunload ( %ld )", ARG1);
++ PRE_REG_READ1(int, "kldunload", int, "fileid");
++}
++
++PRE(sys_kldfind)
++{
++ PRINT("sys_kldfind ( %#lx(%s) )", ARG1, (char *)ARG1);
++ PRE_REG_READ1(int, "kldfind", const char *, "file");
++
++ PRE_MEM_RASCIIZ( "kldfind(file)", ARG1 );
++}
++
++PRE(sys_kldnext)
++{
++ PRINT("sys_kldnext ( %ld )", ARG1);
++ PRE_REG_READ1(int, "kldnext", int, "fileid");
++}
++
++PRE(sys_kldsym)
++{
++ PRINT("sys_kldsym ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3 );
++ PRE_REG_READ3(int, "kldsym", int, "fileid", int, "command", void*, "data");
++ PRE_MEM_READ( "kldsym(data)", ARG3, sizeof(struct vki_kld_sym_lookup) );
++ struct vki_kld_sym_lookup *kslp = (struct vki_kld_sym_lookup *)ARG3;
++ PRE_MEM_RASCIIZ( "kldsym(data.symname)", (Addr)kslp->symname );
++}
++POST(sys_kldsym)
++{
++ struct vki_kld_sym_lookup *kslp = (struct vki_kld_sym_lookup *)ARG3;
++ POST_MEM_WRITE( (Addr)&kslp->symvalue, sizeof(kslp->symvalue) );
++ POST_MEM_WRITE( (Addr)&kslp->symsize, sizeof(kslp->symsize) );
++}
++
++#if 0
++/* ---------------------------------------------------------------------
++ aio_* wrappers
++ ------------------------------------------------------------------ */
++
++// Nb: this wrapper has to pad/unpad memory around the syscall itself,
++// and this allows us to control exactly the code that gets run while
++// the padding is in place.
++
++PRE(sys_io_setup)
++{
++ PRINT("sys_io_setup ( %lu, %#lx )", ARG1,ARG2);
++ PRE_REG_READ2(long, "io_setup",
++ unsigned, nr_events, vki_aio_context_t *, ctxp);
++ PRE_MEM_WRITE( "io_setup(ctxp)", ARG2, sizeof(vki_aio_context_t) );
++}
++
++POST(sys_io_setup)
++{
++ SizeT size;
++ struct vki_aio_ring *r;
++
++ size = VG_PGROUNDUP(sizeof(struct vki_aio_ring) +
++ ARG1*sizeof(struct vki_io_event));
++ r = *(struct vki_aio_ring **)ARG2;
++ vg_assert(ML_(valid_client_addr)((Addr)r, size, tid, "io_setup"));
++
++ ML_(notify_aspacem_and_tool_of_mmap)( (Addr)r, size,
++ VKI_PROT_READ | VKI_PROT_WRITE,
++ VKI_MAP_ANONYMOUS, -1, 0 );
++
++ POST_MEM_WRITE( ARG2, sizeof(vki_aio_context_t) );
++}
++
++// Nb: This wrapper is "Special" because we need 'size' to do the unmap
++// after the syscall. We must get 'size' from the aio_ring structure,
++// before the syscall, while the aio_ring structure still exists. (And we
++// know that we must look at the aio_ring structure because Tom inspected the
++// kernel and glibc sources to see what they do, yuk.)
++//
++// XXX This segment can be implicitly unmapped when aio
++// file-descriptors are closed...
++PRE(sys_io_destroy)
++{
++ struct vki_aio_ring *r;
++ SizeT size;
++
++ PRINT("sys_io_destroy ( %llu )", (ULong)ARG1);
++ PRE_REG_READ1(long, "io_destroy", vki_aio_context_t, ctx);
++
++ // If we are going to seg fault (due to a bogus ARG1) do it as late as
++ // possible...
++ r = (struct vki_aio_ring *)ARG1;
++ size = VG_PGROUNDUP(sizeof(struct vki_aio_ring) +
++ r->nr*sizeof(struct vki_io_event));
++
++ SET_STATUS_from_SysRes( VG_(do_syscall1)(SYSNO, ARG1) );
++
++ if (SUCCESS && RES == 0) {
++ Bool d = VG_(am_notify_munmap)( ARG1, size );
++ VG_TRACK( die_mem_munmap, ARG1, size );
++ if (d)
++ VG_(discard_translations)( (Addr64)ARG1, (ULong)size,
++ "PRE(sys_io_destroy)" );
++ }
++}
++
++PRE(sys_io_getevents)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_io_getevents ( %llu, %lld, %lld, %#lx, %#lx )",
++ (ULong)ARG1,(Long)ARG2,(Long)ARG3,ARG4,ARG5);
++ PRE_REG_READ5(long, "io_getevents",
++ vki_aio_context_t, ctx_id, long, min_nr, long, nr,
++ struct io_event *, events,
++ struct timespec *, timeout);
++ if (ARG3 > 0)
++ PRE_MEM_WRITE( "io_getevents(events)",
++ ARG4, sizeof(struct vki_io_event)*ARG3 );
++ if (ARG5 != 0)
++ PRE_MEM_READ( "io_getevents(timeout)",
++ ARG5, sizeof(struct vki_timespec));
++}
++POST(sys_io_getevents)
++{
++ Int i;
++ vg_assert(SUCCESS);
++ if (RES > 0) {
++ POST_MEM_WRITE( ARG4, sizeof(struct vki_io_event)*RES );
++ for (i = 0; i < RES; i++) {
++ const struct vki_io_event *vev = ((struct vki_io_event *)ARG4) + i;
++ const struct vki_iocb *cb = (struct vki_iocb *)(Addr)vev->obj;
++
++ switch (cb->aio_lio_opcode) {
++ case VKI_IOCB_CMD_PREAD:
++ if (vev->result > 0)
++ POST_MEM_WRITE( cb->aio_buf, vev->result );
++ break;
++
++ case VKI_IOCB_CMD_PWRITE:
++ break;
++
++ default:
++ VG_(message)(Vg_DebugMsg,
++ "Warning: unhandled io_getevents opcode: %u\n",
++ cb->aio_lio_opcode);
++ break;
++ }
++ }
++ }
++}
++
++PRE(sys_io_submit)
++{
++ Int i;
++
++ PRINT("sys_io_submit ( %llu, %ld, %#lx )", (ULong)ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "io_submit",
++ vki_aio_context_t, ctx_id, long, nr,
++ struct iocb **, iocbpp);
++ PRE_MEM_READ( "io_submit(iocbpp)", ARG3, ARG2*sizeof(struct vki_iocb *) );
++ if (ARG3 != 0) {
++ for (i = 0; i < ARG2; i++) {
++ struct vki_iocb *cb = ((struct vki_iocb **)ARG3)[i];
++ PRE_MEM_READ( "io_submit(iocb)", (Addr)cb, sizeof(struct vki_iocb) );
++ switch (cb->aio_lio_opcode) {
++ case VKI_IOCB_CMD_PREAD:
++ PRE_MEM_WRITE( "io_submit(PREAD)", cb->aio_buf, cb->aio_nbytes );
++ break;
++
++ case VKI_IOCB_CMD_PWRITE:
++ PRE_MEM_READ( "io_submit(PWRITE)", cb->aio_buf, cb->aio_nbytes );
++ break;
++
++ default:
++ VG_(message)(Vg_DebugMsg,"Warning: unhandled io_submit opcode: %u\n",
++ cb->aio_lio_opcode);
++ break;
++ }
++ }
++ }
++}
++
++PRE(sys_io_cancel)
++{
++ PRINT("sys_io_cancel ( %llu, %#lx, %#lx )", (ULong)ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "io_cancel",
++ vki_aio_context_t, ctx_id, struct iocb *, iocb,
++ struct io_event *, result);
++ PRE_MEM_READ( "io_cancel(iocb)", ARG2, sizeof(struct vki_iocb) );
++ PRE_MEM_WRITE( "io_cancel(result)", ARG3, sizeof(struct vki_io_event) );
++}
++POST(sys_io_cancel)
++{
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_io_event) );
++}
++
++/* ---------------------------------------------------------------------
++ inotify_* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_inotify_init)
++{
++ PRINT("sys_inotify_init ( )");
++ PRE_REG_READ0(long, "inotify_init");
++}
++POST(sys_inotify_init)
++{
++ vg_assert(SUCCESS);
++ if (!ML_(fd_allowed)(RES, "inotify_init", tid, True)) {
++ VG_(close)(RES);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else {
++ if (VG_(clo_track_fds))
++ ML_(record_fd_open_nameless) (tid, RES);
++ }
++}
++
++PRE(sys_inotify_add_watch)
++{
++ PRINT( "sys_inotify_add_watch ( %ld, %#lx, %lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "inotify_add_watch", int, fd, char *, path, int, mask);
++ PRE_MEM_RASCIIZ( "inotify_add_watch(path)", ARG2 );
++}
++
++PRE(sys_inotify_rm_watch)
++{
++ PRINT( "sys_inotify_rm_watch ( %ld, %lx )", ARG1,ARG2);
++ PRE_REG_READ2(long, "inotify_rm_watch", int, fd, int, wd);
++}
++#endif
++
++/* ---------------------------------------------------------------------
++ mq_* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_mq_open)
++{
++ PRINT("sys_mq_open( %#lx(%s), %ld, %lld, %#lx )",
++ ARG1,(char *)ARG1,ARG2,(ULong)ARG3,ARG4);
++ PRE_REG_READ4(long, "mq_open",
++ const char *, name, int, oflag, vki_mode_t, mode,
++ struct mq_attr *, attr);
++ PRE_MEM_RASCIIZ( "mq_open(name)", ARG1 );
++ if ((ARG2 & VKI_O_CREAT) != 0 && ARG4 != 0) {
++ const struct vki_mq_attr *attr = (struct vki_mq_attr *)ARG4;
++ PRE_MEM_READ( "mq_open(attr->mq_maxmsg)",
++ (Addr)&attr->mq_maxmsg, sizeof(attr->mq_maxmsg) );
++ PRE_MEM_READ( "mq_open(attr->mq_msgsize)",
++ (Addr)&attr->mq_msgsize, sizeof(attr->mq_msgsize) );
++ }
++}
++POST(sys_mq_open)
++{
++ vg_assert(SUCCESS);
++ if (!ML_(fd_allowed)(RES, "mq_open", tid, True)) {
++ VG_(close)(RES);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else {
++ if (VG_(clo_track_fds))
++ ML_(record_fd_open_with_given_name)(tid, RES, (Char*)ARG1);
++ }
++}
++
++PRE(sys_mq_unlink)
++{
++ PRINT("sys_mq_unlink ( %#lx(%s) )", ARG1,(char *)ARG1);
++ PRE_REG_READ1(long, "mq_unlink", const char *, name);
++ PRE_MEM_RASCIIZ( "mq_unlink(name)", ARG1 );
++}
++
++#if 0
++PRE(sys_mq_timedsend)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_mq_timedsend ( %ld, %#lx, %llu, %ld, %#lx )",
++ ARG1,ARG2,(ULong)ARG3,ARG4,ARG5);
++ PRE_REG_READ5(long, "mq_timedsend",
++ vki_mqd_t, mqdes, const char *, msg_ptr, vki_size_t, msg_len,
++ unsigned int, msg_prio, const struct timespec *, abs_timeout);
++ if (!ML_(fd_allowed)(ARG1, "mq_timedsend", tid, False)) {
++ SET_STATUS_Failure( VKI_EBADF );
++ } else {
++ PRE_MEM_READ( "mq_timedsend(msg_ptr)", ARG2, ARG3 );
++ if (ARG5 != 0)
++ PRE_MEM_READ( "mq_timedsend(abs_timeout)", ARG5,
++ sizeof(struct vki_timespec) );
++ }
++}
++
++PRE(sys_mq_timedreceive)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_mq_timedreceive( %ld, %#lx, %llu, %#lx, %#lx )",
++ ARG1,ARG2,(ULong)ARG3,ARG4,ARG5);
++ PRE_REG_READ5(ssize_t, "mq_timedreceive",
++ vki_mqd_t, mqdes, char *, msg_ptr, vki_size_t, msg_len,
++ unsigned int *, msg_prio,
++ const struct timespec *, abs_timeout);
++ if (!ML_(fd_allowed)(ARG1, "mq_timedreceive", tid, False)) {
++ SET_STATUS_Failure( VKI_EBADF );
++ } else {
++ PRE_MEM_WRITE( "mq_timedreceive(msg_ptr)", ARG2, ARG3 );
++ if (ARG4 != 0)
++ PRE_MEM_WRITE( "mq_timedreceive(msg_prio)",
++ ARG4, sizeof(unsigned int) );
++ if (ARG5 != 0)
++ PRE_MEM_READ( "mq_timedreceive(abs_timeout)",
++ ARG5, sizeof(struct vki_timespec) );
++ }
++}
++POST(sys_mq_timedreceive)
++{
++ POST_MEM_WRITE( ARG2, ARG3 );
++ if (ARG4 != 0)
++ POST_MEM_WRITE( ARG4, sizeof(unsigned int) );
++}
++
++PRE(sys_mq_notify)
++{
++ PRINT("sys_mq_notify( %ld, %#lx )", ARG1,ARG2 );
++ PRE_REG_READ2(long, "mq_notify",
++ vki_mqd_t, mqdes, const struct sigevent *, notification);
++ if (!ML_(fd_allowed)(ARG1, "mq_notify", tid, False))
++ SET_STATUS_Failure( VKI_EBADF );
++ else if (ARG2 != 0)
++ PRE_MEM_READ( "mq_notify(notification)",
++ ARG2, sizeof(struct vki_sigevent) );
++}
++
++PRE(sys_mq_getsetattr)
++{
++ PRINT("sys_mq_getsetattr( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3 );
++ PRE_REG_READ3(long, "mq_getsetattr",
++ vki_mqd_t, mqdes, const struct mq_attr *, mqstat,
++ struct mq_attr *, omqstat);
++ if (!ML_(fd_allowed)(ARG1, "mq_getsetattr", tid, False)) {
++ SET_STATUS_Failure( VKI_EBADF );
++ } else {
++ if (ARG2 != 0) {
++ const struct vki_mq_attr *attr = (struct vki_mq_attr *)ARG2;
++ PRE_MEM_READ( "mq_getsetattr(mqstat->mq_flags)",
++ (Addr)&attr->mq_flags, sizeof(attr->mq_flags) );
++ }
++ if (ARG3 != 0)
++ PRE_MEM_WRITE( "mq_getsetattr(omqstat)", ARG3,
++ sizeof(struct vki_mq_attr) );
++ }
++}
++POST(sys_mq_getsetattr)
++{
++ if (ARG3 != 0)
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_mq_attr) );
++}
++
++#endif
++
++/* ---------------------------------------------------------------------
++ clock_* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_clock_settime)
++{
++ PRINT("sys_clock_settime( %ld, %#lx )", ARG1,ARG2);
++ PRE_REG_READ2(long, "clock_settime",
++ vki_clockid_t, clk_id, const struct timespec *, tp);
++ PRE_MEM_READ( "clock_settime(tp)", ARG2, sizeof(struct vki_timespec) );
++}
++
++PRE(sys_clock_gettime)
++{
++ PRINT("sys_clock_gettime( %ld, %#lx )" , ARG1,ARG2);
++ PRE_REG_READ2(long, "clock_gettime",
++ vki_clockid_t, clk_id, struct timespec *, tp);
++ PRE_MEM_WRITE( "clock_gettime(tp)", ARG2, sizeof(struct vki_timespec) );
++}
++POST(sys_clock_gettime)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_timespec) );
++}
++
++PRE(sys_clock_getres)
++{
++ PRINT("sys_clock_getres( %ld, %#lx )" , ARG1,ARG2);
++ // Nb: we can't use "RES" as the param name because that's a macro
++ // defined above!
++ PRE_REG_READ2(long, "clock_getres",
++ vki_clockid_t, clk_id, struct timespec *, res);
++ if (ARG2 != 0)
++ PRE_MEM_WRITE( "clock_getres(res)", ARG2, sizeof(struct vki_timespec) );
++}
++POST(sys_clock_getres)
++{
++ if (ARG2 != 0)
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_timespec) );
++}
++
++#if 0
++PRE(sys_clock_nanosleep)
++{
++ *flags |= SfMayBlock|SfPostOnFail;
++ PRINT("sys_clock_nanosleep( %ld, %ld, %#lx, %#lx )", ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(int32_t, "clock_nanosleep",
++ vki_clockid_t, clkid, int, flags,
++ const struct timespec *, rqtp, struct timespec *, rmtp);
++ PRE_MEM_READ( "clock_nanosleep(rqtp)", ARG3, sizeof(struct vki_timespec) );
++ if (ARG4 != 0)
++ PRE_MEM_WRITE( "clock_nanosleep(rmtp)", ARG4, sizeof(struct vki_timespec) );
++}
++POST(sys_clock_nanosleep)
++{
++ if (ARG4 != 0 && FAILURE && RES_unchecked == VKI_EINTR)
++ POST_MEM_WRITE( ARG4, sizeof(struct vki_timespec) );
++}
++
++/* ---------------------------------------------------------------------
++ timer_* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_timer_create)
++{
++ PRINT("sys_timer_create( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "timer_create",
++ vki_clockid_t, clockid, struct sigevent *, evp,
++ vki_timer_t *, timerid);
++ if (ARG2 != 0)
++ PRE_MEM_READ( "timer_create(evp)", ARG2, sizeof(struct vki_sigevent) );
++ PRE_MEM_WRITE( "timer_create(timerid)", ARG3, sizeof(vki_timer_t) );
++}
++POST(sys_timer_create)
++{
++ POST_MEM_WRITE( ARG3, sizeof(vki_timer_t) );
++}
++
++PRE(sys_timer_settime)
++{
++ PRINT("sys_timer_settime( %lld, %ld, %#lx, %#lx )", (ULong)ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "timer_settime",
++ vki_timer_t, timerid, int, flags,
++ const struct itimerspec *, value,
++ struct itimerspec *, ovalue);
++ PRE_MEM_READ( "timer_settime(value)", ARG3,
++ sizeof(struct vki_itimerspec) );
++ if (ARG4 != 0)
++ PRE_MEM_WRITE( "timer_settime(ovalue)", ARG4,
++ sizeof(struct vki_itimerspec) );
++}
++POST(sys_timer_settime)
++{
++ if (ARG4 != 0)
++ POST_MEM_WRITE( ARG4, sizeof(struct vki_itimerspec) );
++}
++
++PRE(sys_timer_gettime)
++{
++ PRINT("sys_timer_gettime( %lld, %#lx )", (ULong)ARG1,ARG2);
++ PRE_REG_READ2(long, "timer_gettime",
++ vki_timer_t, timerid, struct itimerspec *, value);
++ PRE_MEM_WRITE( "timer_gettime(value)", ARG2,
++ sizeof(struct vki_itimerspec));
++}
++POST(sys_timer_gettime)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_itimerspec) );
++}
++
++PRE(sys_timer_getoverrun)
++{
++ PRINT("sys_timer_getoverrun( %#lx )", ARG1);
++ PRE_REG_READ1(long, "timer_getoverrun", vki_timer_t, timerid);
++}
++
++PRE(sys_timer_delete)
++{
++ PRINT("sys_timer_delete( %#lx )", ARG1);
++ PRE_REG_READ1(long, "timer_delete", vki_timer_t, timerid);
++}
++
++/* ---------------------------------------------------------------------
++ sched_* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_sched_setparam)
++{
++ PRINT("sched_setparam ( %ld, %#lx )", ARG1, ARG2 );
++ PRE_REG_READ2(long, "sched_setparam",
++ vki_pid_t, pid, struct sched_param *, p);
++ PRE_MEM_READ( "sched_setparam(p)", ARG2, sizeof(struct vki_sched_param) );
++}
++POST(sys_sched_setparam)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_sched_param) );
++}
++
++PRE(sys_sched_getparam)
++{
++ PRINT("sched_getparam ( %ld, %#lx )", ARG1, ARG2 );
++ PRE_REG_READ2(long, "sched_getparam",
++ vki_pid_t, pid, struct sched_param *, p);
++ PRE_MEM_WRITE( "sched_getparam(p)", ARG2, sizeof(struct vki_sched_param) );
++}
++POST(sys_sched_getparam)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_sched_param) );
++}
++
++PRE(sys_sched_getscheduler)
++{
++ PRINT("sys_sched_getscheduler ( %ld )", ARG1);
++ PRE_REG_READ1(long, "sched_getscheduler", vki_pid_t, pid);
++}
++
++PRE(sys_sched_setscheduler)
++{
++ PRINT("sys_sched_setscheduler ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "sched_setscheduler",
++ vki_pid_t, pid, int, policy, struct sched_param *, p);
++ if (ARG3 != 0)
++ PRE_MEM_READ( "sched_setscheduler(p)",
++ ARG3, sizeof(struct vki_sched_param));
++}
++
++PRE(sys_sched_yield)
++{
++ *flags |= SfMayBlock;
++ PRINT("sched_yield()");
++ PRE_REG_READ0(long, "sched_yield");
++}
++#endif
++
++PRE(sys_sched_get_priority_max)
++{
++ PRINT("sched_get_priority_max ( %ld )", ARG1);
++ PRE_REG_READ1(long, "sched_get_priority_max", int, policy);
++}
++
++PRE(sys_sched_get_priority_min)
++{
++ PRINT("sched_get_priority_min ( %ld )", ARG1);
++ PRE_REG_READ1(long, "sched_get_priority_min", int, policy);
++}
++
++#if 0
++PRE(sys_sched_setaffinity)
++{
++ PRINT("sched_setaffinity ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
++ PRE_REG_READ3(long, "sched_setaffinity",
++ vki_pid_t, pid, unsigned int, len, unsigned long *, mask);
++ PRE_MEM_READ( "sched_setaffinity(mask)", ARG3, ARG2);
++}
++
++PRE(sys_sched_getaffinity)
++{
++ PRINT("sched_getaffinity ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
++ PRE_REG_READ3(long, "sched_getaffinity",
++ vki_pid_t, pid, unsigned int, len, unsigned long *, mask);
++ PRE_MEM_WRITE( "sched_getaffinity(mask)", ARG3, ARG2);
++}
++POST(sys_sched_getaffinity)
++{
++ POST_MEM_WRITE(ARG3, ARG2);
++}
++
++#endif
++
++/* ---------------------------------------------------------------------
++ miscellaneous wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_munlockall)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_munlockall ( )");
++ PRE_REG_READ0(long, "munlockall");
++}
++
++// Pipe on netbsd doesn't have args, and uses dual returns!
++PRE(sys_pipe)
++{
++ PRINT("sys_pipe ()");
++}
++POST(sys_pipe)
++{
++ if (!ML_(fd_allowed)(RES, "pipe", tid, True) ||
++ !ML_(fd_allowed)(RESHI, "pipe", tid, True)) {
++ VG_(close)(RES);
++ VG_(close)(RESHI);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else {
++ if (VG_(clo_track_fds)) {
++ ML_(record_fd_open_nameless)(tid, RES);
++ ML_(record_fd_open_nameless)(tid, RESHI);
++ }
++ }
++}
++
++PRE(sys_pipe2)
++{
++ PRINT("sys_pipe2 ( %#lx, %ld )", ARG1, ARG2);
++ PRE_REG_READ2(long, "pipe2",
++ int *, fildes, int, flags);
++ PRE_MEM_WRITE("pipe2(fildes)", ARG1, 2 * sizeof(int));
++
++}
++POST(sys_pipe2)
++{
++ int *fildes;
++
++ if (RES != 0)
++ return;
++
++ POST_MEM_WRITE(ARG1, 2 * sizeof(int));
++ fildes = (int *)ARG1;
++
++ if (!ML_(fd_allowed)(fildes[0], "pipe2", tid, True) ||
++ !ML_(fd_allowed)(fildes[1], "pipe2", tid, True)) {
++ VG_(close)(fildes[0]);
++ VG_(close)(fildes[1]);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else if (VG_(clo_track_fds)) {
++ ML_(record_fd_open_nameless)(tid, fildes[0]);
++ ML_(record_fd_open_nameless)(tid, fildes[1]);
++ }
++}
++
++#if 0
++PRE(sys_quotactl)
++{
++ PRINT("sys_quotactl (0x%lx, %#lx, 0x%lx, 0x%lx )", ARG1,ARG2,ARG3, ARG4);
++ PRE_REG_READ4(long, "quotactl",
++ unsigned int, cmd, const char *, special, vki_qid_t, id,
++ void *, addr);
++ PRE_MEM_RASCIIZ( "quotactl(special)", ARG2 );
++}
++
++PRE(sys_waitid)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_waitid( %ld, %ld, %#lx, %ld, %#lx )", ARG1,ARG2,ARG3,ARG4,ARG5);
++ PRE_REG_READ5(int32_t, "sys_waitid",
++ int, which, vki_pid_t, pid, struct vki_siginfo *, infop,
++ int, options, struct vki_rusage *, ru);
++ PRE_MEM_WRITE( "waitid(infop)", ARG3, sizeof(struct vki_siginfo) );
++ if (ARG5 != 0)
++ PRE_MEM_WRITE( "waitid(ru)", ARG5, sizeof(struct vki_rusage) );
++}
++POST(sys_waitid)
++{
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_siginfo) );
++ if (ARG5 != 0)
++ POST_MEM_WRITE( ARG5, sizeof(struct vki_rusage) );
++}
++
++/* ---------------------------------------------------------------------
++ utime wrapper
++ ------------------------------------------------------------------ */
++
++PRE(sys_utime)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_utime ( %#lx, %#lx )", ARG1,ARG2);
++ PRE_REG_READ2(long, "utime", char *, filename, struct utimbuf *, buf);
++ PRE_MEM_RASCIIZ( "utime(filename)", ARG1 );
++ if (ARG2 != 0)
++ PRE_MEM_READ( "utime(buf)", ARG2, sizeof(struct vki_utimbuf) );
++}
++
++#endif
++
++/* ---------------------------------------------------------------------
++ thr* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_thr_self)
++{
++ PRINT( "sys_thr_self ( %#lx )", ARG1 );
++ PRE_REG_READ1(long, "thr_self", long *, "id");
++ PRE_MEM_WRITE( "thr_self()", ARG1, sizeof(long));
++}
++POST(sys_thr_self)
++{
++ POST_MEM_WRITE( ARG1, sizeof(long));
++}
++
++PRE(sys_thr_exit)
++{
++ ThreadState *tst;
++
++ PRINT( "sys_thr_exit ( %#lx )", ARG1 );
++ PRE_REG_READ1(long, "thr_exit", long *, "status");
++
++ if (ARG1)
++ PRE_MEM_WRITE( "thr_exit(status)", ARG1, sizeof(long) );
++ tst = VG_(get_ThreadState)(tid);
++ tst->exitreason = VgSrc_ExitThread;
++ tst->os_state.exitcode = ARG1;
++ SET_STATUS_Success(0);
++}
++
++PRE(sys_thr_set_name)
++{
++ PRINT( "sys_thr_set_name ( %ld, %#lx )", ARG1, ARG2 );
++ PRE_REG_READ2(long, "thr_set_name", long, "id", const char *, "name");
++ PRE_MEM_RASCIIZ( "sys_thr_set_name(threadname)", ARG2);
++}
++
++PRE(sys_thr_kill)
++{
++ PRINT("sys_thr_kill ( %ld, %ld )", ARG1,ARG2);
++ PRE_REG_READ2(long, "thr_kill", long, id, int, sig);
++ if (!ML_(client_signal_OK)(ARG2)) {
++ SET_STATUS_Failure( VKI_EINVAL );
++ return;
++ }
++
++ /* Check to see if this kill gave us a pending signal */
++ *flags |= SfPollAfter;
++
++ if (VG_(clo_trace_signals))
++ VG_(message)(Vg_DebugMsg, "thr_kill: sending signal %ld to tid %ld\n",
++ ARG2, ARG1);
++
++ /* If we're sending SIGKILL, check to see if the target is one of
++ our threads and handle it specially. */
++ if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(ARG1, -1)) {
++ SET_STATUS_Success(0);
++ return;
++ }
++
++ /* Ask to handle this syscall via the slow route, since that's the
++ only one that sets tst->status to VgTs_WaitSys. If the result
++ of doing the syscall is an immediate run of
++ async_signalhandler() in m_signals, then we need the thread to
++ be properly tidied away. I have the impression the previous
++ version of this wrapper worked on x86/amd64 only because the
++ kernel did not immediately deliver the async signal to this
++ thread (on ppc it did, which broke the assertion re tst->status
++ at the top of async_signalhandler()). */
++ *flags |= SfMayBlock;
++}
++POST(sys_thr_kill)
++{
++ if (VG_(clo_trace_signals))
++ VG_(message)(Vg_DebugMsg, "thr_kill: sent signal %ld to tid %ld\n",
++ ARG2, ARG1);
++}
++PRE(sys_thr_kill2)
++{
++ PRINT("sys_thr_kill2 ( %ld, %ld, %ld )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "tgkill", int, pid, long, tid, int, sig);
++ if (!ML_(client_signal_OK)(ARG3)) {
++ SET_STATUS_Failure( VKI_EINVAL );
++ return;
++ }
++
++ /* Check to see if this kill gave us a pending signal */
++ *flags |= SfPollAfter;
++
++ if (VG_(clo_trace_signals))
++ VG_(message)(Vg_DebugMsg, "thr_kill2: sending signal %ld to pid %ld/%ld\n",
++ ARG3, ARG1, ARG2);
++
++ /* If we're sending SIGKILL, check to see if the target is one of
++ our threads and handle it specially. */
++ if (ARG3 == VKI_SIGKILL && ML_(do_sigkill)(ARG2, ARG1)) {
++ SET_STATUS_Success(0);
++ return;
++ }
++
++ /* Ask to handle this syscall via the slow route, since that's the
++ only one that sets tst->status to VgTs_WaitSys. If the result
++ of doing the syscall is an immediate run of
++ async_signalhandler() in m_signals, then we need the thread to
++ be properly tidied away. I have the impression the previous
++ version of this wrapper worked on x86/amd64 only because the
++ kernel did not immediately deliver the async signal to this
++ thread (on ppc it did, which broke the assertion re tst->status
++ at the top of async_signalhandler()). */
++ *flags |= SfMayBlock;
++}
++POST(sys_thr_kill2)
++{
++ if (VG_(clo_trace_signals))
++ VG_(message)(Vg_DebugMsg, "thr_kill2: sent signal %ld to pid %ld/%ld\n",
++ ARG3, ARG1, ARG2);
++}
++
++PRE(sys_thr_wake)
++{
++ PRINT("sys_thr_wake ( %ld )", ARG1);
++ PRE_REG_READ1(long, "thr_wake", long, id);
++}
++
++/* ---------------------------------------------------------------------
++ umtx* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys__umtx_op)
++{
++// ThreadState *tst;
++
++ /* 5 args are always passed through. The last two can vary, but
++ they're always pointers. They may not be used though. */
++ switch(ARG2) {
++ case VKI_UMTX_OP_LOCK:
++ PRINT( "sys__umtx_op ( %#lx, LOCK, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_lock",
++ struct umtx *, obj, int, op, unsigned long, id,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_lock(mtx)", ARG1, sizeof(struct vki_umtx) );
++ if (ARG5)
++ PRE_MEM_READ( "_umtx_op_lock(timespec)", ARG5, sizeof(struct vki_timespec) );
++ PRE_MEM_WRITE( "_umtx_op_lock(mtx)", ARG1, sizeof(struct vki_umtx) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_UNLOCK:
++ PRINT( "sys__umtx_op ( %#lx, UNLOCK, %ld)", ARG1, ARG3);
++ PRE_REG_READ3(long, "_umtx_op_unlock",
++ struct umtx *, obj, int, op, unsigned long, id);
++ PRE_MEM_READ( "_umtx_op_unlock(mtx)", ARG1, sizeof(struct vki_umtx) );
++ PRE_MEM_WRITE( "_umtx_op_unlock(mtx)", ARG1, sizeof(struct vki_umtx) );
++ break;
++ case VKI_UMTX_OP_WAIT:
++ PRINT( "sys__umtx_op ( %#lx, WAIT, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_wait",
++ struct umtx *, obj, int, op, unsigned long, id,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_wait(mtx)", ARG1, sizeof(struct vki_umtx) );
++ if (ARG5)
++ PRE_MEM_READ( "_umtx_op_wait(timespec)", ARG5, sizeof(struct vki_timespec) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_WAKE:
++ PRINT( "sys__umtx_op ( %#lx, WAKE, %ld)", ARG1, ARG3);
++ PRE_REG_READ3(long, "_umtx_op_wake",
++ struct umtx *, obj, int, op, unsigned long, id);
++ PRE_MEM_READ( "_umtx_op_wake(mtx)", ARG1, sizeof(struct vki_umtx) );
++ break;
++ case VKI_UMTX_OP_MUTEX_TRYLOCK:
++ PRINT( "sys__umtx_op ( %#lx, MUTEX_TRYLOCK, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_mutex_trylock",
++ struct umutex *, obj, int, op, unsigned long, noid,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_mutex_trylock(mutex)", ARG1, sizeof(struct vki_umutex) );
++ if (ARG5)
++ PRE_MEM_READ( "_umtx_op_mutex_trylock(timespec)", ARG5, sizeof(struct vki_timespec) );
++ PRE_MEM_WRITE( "_umtx_op_mutex_trylock(mutex)", ARG1, sizeof(struct vki_umutex) );
++ break;
++ case VKI_UMTX_OP_MUTEX_LOCK:
++ PRINT( "sys__umtx_op ( %#lx, MUTEX_LOCK, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_mutex_lock",
++ struct umutex *, obj, int, op, unsigned long, noid,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_mutex_lock(mutex)", ARG1, sizeof(struct vki_umutex) );
++ if (ARG5)
++ PRE_MEM_READ( "_umtx_op_mutex_lock(timespec)", ARG5, sizeof(struct vki_timespec) );
++ PRE_MEM_WRITE( "_umtx_op_mutex_lock(mutex)", ARG1, sizeof(struct vki_umutex) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_MUTEX_UNLOCK:
++ PRINT( "sys__umtx_op ( %#lx, MUTEX_UNLOCK)", ARG1);
++ PRE_REG_READ2(long, "_umtx_op_mutex_unlock",
++ struct umutex *, obj, int, op);
++ PRE_MEM_READ( "_umtx_op_mutex_unlock(mutex)", ARG1, sizeof(struct vki_umutex) );
++ PRE_MEM_WRITE( "_umtx_op_mutex_unlock(mutex)", ARG1, sizeof(struct vki_umutex) );
++ break;
++ case VKI_UMTX_OP_SET_CEILING:
++ PRINT( "sys__umtx_op ( %#lx, SET_CEILING, %ld, %#lx)", ARG1, ARG3, ARG4);
++ PRE_REG_READ4(long, "_umtx_op_set_ceiling",
++ struct umutex *, obj, int, op, unsigned int, ceiling,
++ unsigned int *, old_ceiling);
++ PRE_MEM_READ( "_umtx_op_set_ceiling(mutex)", ARG1, sizeof(struct vki_umutex) );
++ PRE_MEM_WRITE( "_umtx_op_set_ceiling(mutex)", ARG1, sizeof(struct vki_umutex) );
++ if (ARG4)
++ PRE_MEM_WRITE( "_umtx_op_set_ceiling(old_ceiling)", ARG4, sizeof(vki_uint32_t) );
++ break;
++ case VKI_UMTX_OP_CV_WAIT:
++ PRINT( "sys__umtx_op ( %#lx, CV_WAIT, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_cv_wait",
++ struct ucond *, obj, int, op, unsigned long, wflags,
++ struct umutex *, umtx, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_cv_wait(cond)", ARG1, sizeof(struct vki_ucond) );
++ PRE_MEM_WRITE( "_umtx_op_cv_wait(cond)", ARG1, sizeof(struct vki_ucond) );
++ PRE_MEM_READ( "_umtx_op_cv_wait(mutex)", ARG4, sizeof(struct vki_umutex) );
++ PRE_MEM_WRITE( "_umtx_op_cv_wait(mutex)", ARG4, sizeof(struct vki_umutex) );
++ if (ARG5)
++ PRE_MEM_READ( "_umtx_op_cv_wait(timespec)", ARG5, sizeof(struct vki_timespec) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_CV_SIGNAL:
++ PRINT( "sys__umtx_op ( %#lx, CV_SIGNAL)", ARG1);
++ PRE_REG_READ2(long, "_umtx_op_cv_signal",
++ struct ucond *, obj, int, op);
++ PRE_MEM_READ( "_umtx_op_cv_signal(cond)", ARG1, sizeof(struct vki_ucond) );
++ PRE_MEM_WRITE( "_umtx_op_cv_signal(cond)", ARG1, sizeof(struct vki_ucond) );
++ break;
++ case VKI_UMTX_OP_CV_BROADCAST:
++ PRINT( "sys__umtx_op ( %#lx, CV_BROADCAST, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ2(long, "_umtx_op_cv_broadcast",
++ struct ucond *, obj, int, op);
++ PRE_MEM_READ( "_umtx_op_cv_broadcast(cond)", ARG1, sizeof(struct vki_ucond) );
++ PRE_MEM_WRITE( "_umtx_op_cv_broadcast(cond)", ARG1, sizeof(struct vki_ucond) );
++ break;
++ case VKI_UMTX_OP_WAIT_UINT:
++ PRINT( "sys__umtx_op ( %#lx, CV_WAIT_UINT, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_wait_uint",
++ int *, obj, int, op, unsigned long, id,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_wait(uint)", ARG1, sizeof(int) );
++ if (ARG5)
++ PRE_MEM_READ( "_umtx_op_wait(timespec)", ARG5, sizeof(struct vki_timespec) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_RW_RDLOCK:
++ PRINT( "sys__umtx_op ( %#lx, RW_RDLOCK, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_rw_rdlock",
++ struct urwlock *, obj, int, op, unsigned long, noid,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_rw_rdlock(rw)", ARG1, sizeof(struct vki_urwlock) );
++ PRE_MEM_WRITE( "_umtx_op_rw_rdlock(rw)", ARG1, sizeof(struct vki_urwlock) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_RW_WRLOCK:
++ PRINT( "sys__umtx_op ( %#lx, RW_WRLOCK, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_rw_wrlock",
++ struct urwlock *, obj, int, op, unsigned long, noid,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_rw_wrlock(rw)", ARG1, sizeof(struct vki_urwlock) );
++ PRE_MEM_WRITE( "_umtx_op_rw_wrlock(rw)", ARG1, sizeof(struct vki_urwlock) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_RW_UNLOCK:
++ PRINT( "sys__umtx_op ( %#lx, RW_UNLOCK, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ2(long, "_umtx_op_rw_unlock",
++ struct urwlock *, obj, int, op);
++ PRE_MEM_READ( "_umtx_op_rw_unlock(rw)", ARG1, sizeof(struct vki_urwlock) );
++ PRE_MEM_WRITE( "_umtx_op_rw_unlock(rw)", ARG1, sizeof(struct vki_urwlock) );
++ break;
++ case VKI_UMTX_OP_WAIT_UINT_PRIVATE:
++ PRINT( "sys__umtx_op ( %#lx, CV_WAIT_UINT_PRIVATE, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_wait_uint_private",
++ int *, obj, int, op, unsigned long, id,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_wait_private(uint)", ARG1, sizeof(int) );
++ if (ARG5)
++ PRE_MEM_READ( "_umtx_op_wait_private(umtx_time)", ARG5, sizeof(struct vki_umtx_time) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_WAKE_PRIVATE:
++ PRINT( "sys__umtx_op ( %#lx, CV_WAKE_PRIVATE, %ld)", ARG1, ARG3);
++ PRE_REG_READ3(long, "_umtx_op_wake_private",
++ struct umtx *, obj, int, op, unsigned long, id);
++ PRE_MEM_READ( "_umtx_op_wake_private(mtx)", ARG1, sizeof(struct vki_umtx) );
++ break;
++ case VKI_UMTX_OP_MUTEX_WAIT:
++ PRINT( "sys__umtx_op ( %#lx, MUTEX_WAIT, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ2(long, "_umtx_op_mutex_wait",
++ struct umutex *, obj, int, op);
++ PRE_MEM_READ( "_umtx_op_mutex_wait(mutex)", ARG1, sizeof(struct vki_umutex) );
++ PRE_MEM_WRITE( "_umtx_op_mutex_wait(mutex)", ARG1, sizeof(struct vki_umutex) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_MUTEX_WAKE:
++ PRINT( "sys__umtx_op ( %#lx, MUTEX_WAKE, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ2(long, "_umtx_op_mutex_wake",
++ struct umutex *, obj, int, op);
++ PRE_MEM_READ( "_umtx_op_mutex_wake(mutex)", ARG1, sizeof(struct vki_umutex) );
++ PRE_MEM_WRITE( "_umtx_op_mutex_wake(mutex)", ARG1, sizeof(struct vki_umutex) );
++ break;
++ case VKI_UMTX_OP_SEM_WAIT:
++ PRINT( "sys__umtx_op ( %#lx, SEM_WAIT, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ5(long, "_umtx_op_sem_wait",
++ struct usem *, obj, int, op, unsigned long, id,
++ void *, zero, struct vki_timespec *, timeout);
++ PRE_MEM_READ( "_umtx_op_sem_wait(usem)", ARG1, sizeof(struct vki_usem) );
++ PRE_MEM_WRITE( "_umtx_op_sem_wait(usem)", ARG1, sizeof(struct vki_usem) );
++ if (ARG5)
++ PRE_MEM_READ( "_umtx_op_sem_wait(umtx_time)", ARG5, sizeof(struct vki_umtx_time) );
++ *flags |= SfMayBlock;
++ break;
++ case VKI_UMTX_OP_SEM_WAKE:
++ PRINT( "sys__umtx_op ( %#lx, SEM_WAKE, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ2(long, "_umtx_op_sem_wake",
++ struct umutex *, obj, int, op);
++ PRE_MEM_READ( "_umtx_op_sem_wake(mutex)", ARG1, sizeof(struct vki_usem) );
++ PRE_MEM_WRITE( "_umtx_op_sem_wake(mutex)", ARG1, sizeof(struct vki_usem) );
++ break;
++ case VKI_UMTX_OP_NWAKE_PRIVATE:
++ PRINT( "sys__umtx_op ( %#lx, NWAKE_PRIVATE, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ3(long, "_umtx_op_nwake_private",
++ struct umutex *, obj, int, op, int, count);
++ PRE_MEM_READ( "_umtx_op_nwake_private(mtxs)", ARG1, ARG3 * sizeof(void *) );
++ PRE_MEM_WRITE( "_umtx_op_mutex_wake(mtxs)", ARG1, sizeof(struct vki_umutex) );
++ break;
++ case VKI_UMTX_OP_MUTEX_WAKE2:
++ PRINT( "sys__umtx_op ( %#lx, MUTEX_WAKE2, %ld, %#lx, %#lx)", ARG1, ARG3, ARG4, ARG5);
++ PRE_REG_READ3(long, "_umtx_op_mutex_wake2",
++ struct umutex *, obj, int, op, unsigned long, flags);
++ PRE_MEM_READ( "_umtx_op_mutex_wake(mutex)", ARG1, sizeof(struct vki_umutex) );
++ PRE_MEM_WRITE( "_umtx_op_mutex_wake(mutex)", ARG1, sizeof(struct vki_umutex) );
++ break;
++ default:
++ PRINT( "sys__umtx_op ( %#lx, %ld(UNKNOWN), %ld, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4, ARG5);
++ break;
++ }
++// tst = VG_(get_ThreadState)(tid);
++//PRINT(" [[ UMTX_OP %d: me=%d arg1 %#lx = %#lx (%ld) ]]", ARG2, tst->os_state.lwpid, ARG1, *(UWord *)ARG1, *(UWord *)ARG1 & 0x7fffffff);
++}
++
++POST(sys__umtx_op)
++{
++//PRINT("[[ POST_UMTX_OP %d: arg1 %#lx = %#lx (%ld) ]]\n", ARG2, ARG1, *(UWord *)ARG1, *(UWord *)ARG1 & 0x7fffffff);
++ switch(ARG2) {
++ case VKI_UMTX_OP_LOCK:
++ if (SUCCESS)
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_umtx) );
++ break;
++ case VKI_UMTX_OP_UNLOCK:
++ if (SUCCESS)
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_umtx) );
++ break;
++ case VKI_UMTX_OP_WAIT:
++ case VKI_UMTX_OP_WAKE:
++ case VKI_UMTX_OP_WAIT_UINT:
++ case VKI_UMTX_OP_WAIT_UINT_PRIVATE:
++ case VKI_UMTX_OP_WAKE_PRIVATE:
++ break;
++ case VKI_UMTX_OP_MUTEX_TRYLOCK:
++ case VKI_UMTX_OP_MUTEX_LOCK:
++ case VKI_UMTX_OP_MUTEX_UNLOCK:
++ case VKI_UMTX_OP_MUTEX_WAIT: /* Sets/clears contested bits */
++ case VKI_UMTX_OP_MUTEX_WAKE: /* Sets/clears contested bits */
++ if (SUCCESS)
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_umutex) );
++ break;
++ case VKI_UMTX_OP_SET_CEILING:
++ if (SUCCESS) {
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_umutex) );
++ if (ARG4)
++ POST_MEM_WRITE( ARG4, sizeof(vki_uint32_t) );
++ }
++ break;
++ case VKI_UMTX_OP_CV_WAIT:
++ if (SUCCESS) {
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_ucond) );
++ POST_MEM_WRITE( ARG4, sizeof(struct vki_umutex) );
++ }
++ break;
++ case VKI_UMTX_OP_CV_SIGNAL:
++ if (SUCCESS) {
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_ucond) );
++ }
++ break;
++ case VKI_UMTX_OP_CV_BROADCAST:
++ if (SUCCESS) {
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_ucond) );
++ }
++ break;
++ case VKI_UMTX_OP_RW_RDLOCK:
++ case VKI_UMTX_OP_RW_WRLOCK:
++ case VKI_UMTX_OP_RW_UNLOCK:
++ if (SUCCESS) {
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_urwlock) );
++ }
++ break;
++ default:
++ break;
++ }
++}
++
++PRE(sys__umtx_lock)
++{
++ PRINT( "sys__umtx_lock ( %#lx )", ARG1);
++ PRE_REG_READ1(long, "_umtx_lock", struct vki_umtx *, umtx);
++ PRE_MEM_READ( "_umtx_lock(mtx)", ARG1, sizeof(struct vki_umtx) );
++ PRE_MEM_WRITE( "_umtx_lock(mtx)", ARG1, sizeof(struct vki_umtx) );
++}
++
++POST(sys__umtx_lock)
++{
++ if (SUCCESS)
++ POST_MEM_WRITE(ARG1, sizeof(struct vki_umtx));
++}
++
++PRE(sys__umtx_unlock)
++{
++ PRINT( "sys__umtx_unlock ( %#lx )", ARG1);
++ PRE_REG_READ1(long, "_umtx_unlock", struct vki_umtx *, umtx);
++ PRE_MEM_READ( "_umtx_unlock(mtx)", ARG1, sizeof(struct vki_umtx) );
++ PRE_MEM_WRITE( "_umtx_unlock(mtx)", ARG1, sizeof(struct vki_umtx) );
++}
++
++POST(sys__umtx_unlock)
++{
++ if (SUCCESS)
++ POST_MEM_WRITE(ARG1, sizeof(struct vki_umtx));
++}
++
++PRE(sys_rtprio_thread)
++{
++ PRINT( "sys_rtprio_thread ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3 );
++ PRE_REG_READ3(long, "rtprio_thread",
++ int, "function", __vki_lwpid_t, "lwpid", struct vki_rtprio *, "rtp");
++ if (ARG1 == VKI_RTP_SET) {
++ PRE_MEM_READ( "rtprio_thread(set)", ARG3, sizeof(struct vki_rtprio));
++ } else if (ARG1 == VKI_RTP_LOOKUP) {
++ PRE_MEM_WRITE( "rtprio_thread(lookup)", ARG3, sizeof(struct vki_rtprio));
++ } else {
++ /* PHK ?? */
++ }
++}
++POST(sys_rtprio_thread)
++{
++ if (ARG1 == VKI_RTP_LOOKUP && RES == 0)
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_rtprio));
++}
++
++/* ---------------------------------------------------------------------
++ sig* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_sigpending)
++{
++ PRINT( "sys_sigpending ( %#lx )", ARG1 );
++ PRE_REG_READ1(long, "sigpending", vki_sigset_t *, set);
++ PRE_MEM_WRITE( "sigpending(set)", ARG1, sizeof(vki_sigset_t));
++}
++POST(sys_sigpending)
++{
++ POST_MEM_WRITE( ARG1, sizeof(vki_sigset_t) ) ;
++}
++
++/* ---------------------------------------------------------------------
++ rt_sig* wrappers
++ ------------------------------------------------------------------ */
++
++static int sigformat[_VKI_NSIG];
++
++PRE(sys_sigaction4)
++{
++ PRINT("sys_sigaction ( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "sigaction",
++ int, signum, const struct sigaction *, act,
++ struct sigaction *, oldact);
++
++ if (ARG2 != 0) {
++ struct vki_sigaction *sa = (struct vki_sigaction *)ARG2;
++ PRE_MEM_READ( "sigaction(act->sa_handler)", (Addr)&sa->ksa_handler, sizeof(sa->ksa_handler));
++ PRE_MEM_READ( "sigaction(act->sa_mask)", (Addr)&sa->sa_mask, sizeof(sa->sa_mask));
++ PRE_MEM_READ( "sigaction(act->sa_flags)", (Addr)&sa->sa_flags, sizeof(sa->sa_flags));
++ if (ARG1 < _VKI_NSIG)
++ sigformat[ARG1] = 4;
++ }
++ if (ARG3 != 0)
++ PRE_MEM_WRITE( "sigaction(oldact)", ARG3, sizeof(struct vki_sigaction));
++
++ /* process the signal immediately. */
++ SET_STATUS_from_SysRes(
++ VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t *)ARG2,
++ (vki_sigaction_fromK_t *)ARG3)
++ );
++}
++POST(sys_sigaction4)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0 && ARG3 != 0)
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_sigaction));
++}
++
++/* Identical, but warns the signal handler to expect the different sigframe */
++PRE(sys_sigaction)
++{
++ PRINT("sys_sigaction6 ( %ld, %#lx, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "sigaction6",
++ int, signum, const struct sigaction *, act,
++ struct sigaction *, oldact);
++
++ if (ARG2 != 0) {
++ struct vki_sigaction *sa = (struct vki_sigaction *)ARG2;
++ PRE_MEM_READ( "sigaction6(act->sa_handler)", (Addr)&sa->ksa_handler, sizeof(sa->ksa_handler));
++ PRE_MEM_READ( "sigaction6(act->sa_mask)", (Addr)&sa->sa_mask, sizeof(sa->sa_mask));
++ PRE_MEM_READ( "sigaction6(act->sa_flags)", (Addr)&sa->sa_flags, sizeof(sa->sa_flags));
++ if (ARG1 < _VKI_NSIG)
++ sigformat[ARG1] = 6;
++ }
++ if (ARG3 != 0)
++ PRE_MEM_WRITE( "sigaction6(oldact)", ARG3, sizeof(struct vki_sigaction));
++
++ SET_STATUS_from_SysRes(
++ VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t *)ARG2,
++ (vki_sigaction_fromK_t *)ARG3)
++ );
++}
++POST(sys_sigaction)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0 && ARG3 != 0)
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_sigaction));
++}
++
++
++PRE(sys_sigprocmask)
++{
++ PRINT("sys_sigprocmask ( %ld, %#lx, %#lx )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "sigprocmask",
++ int, how, vki_sigset_t *, set, vki_sigset_t *, oldset);
++ if (ARG2 != 0)
++ PRE_MEM_READ( "sigprocmask(set)", ARG2, sizeof(vki_sigset_t));
++ if (ARG3 != 0)
++ PRE_MEM_WRITE( "sigprocmask(oldset)", ARG3, sizeof(vki_sigset_t));
++
++ SET_STATUS_from_SysRes(
++ VG_(do_sys_sigprocmask) ( tid, ARG1 /*how*/,
++ (vki_sigset_t*) ARG2,
++ (vki_sigset_t*) ARG3 )
++ );
++
++ if (SUCCESS)
++ *flags |= SfPollAfter;
++}
++POST(sys_sigprocmask)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0 && ARG3 != 0)
++ POST_MEM_WRITE( ARG3, sizeof(vki_sigset_t));
++}
++
++/* Not in 4.x */
++PRE(sys_sigtimedwait)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_sigtimedwait ( %#lx, %#lx, %#lx )",
++ ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "sigtimedwait",
++ const vki_sigset_t *, set, vki_siginfo_t *, info,
++ const struct timespec *, timeout);
++ if (ARG1 != 0)
++ PRE_MEM_READ( "sigtimedwait(set)", ARG1, sizeof(vki_sigset_t));
++ if (ARG2 != 0)
++ PRE_MEM_WRITE( "sigtimedwait(info)", ARG2, sizeof(vki_siginfo_t) );
++ if (ARG3 != 0)
++ PRE_MEM_READ( "sigtimedwait(timeout)",
++ ARG3, sizeof(struct vki_timespec) );
++}
++POST(sys_sigtimedwait)
++{
++ if (ARG2 != 0)
++ POST_MEM_WRITE( ARG2, sizeof(vki_siginfo_t) );
++}
++
++/* Not in 4.x */
++PRE(sys_sigwaitinfo)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_sigwaitinfo ( %#lx, %#lx )",
++ ARG1,ARG2);
++ PRE_REG_READ2(long, "sigwaitinfo",
++ const vki_sigset_t *, set, vki_siginfo_t *, info);
++ if (ARG1 != 0)
++ PRE_MEM_READ( "sigwaitinfo(set)", ARG1, sizeof(vki_sigset_t));
++ if (ARG2 != 0)
++ PRE_MEM_WRITE( "sigwaitinfo(info)", ARG2, sizeof(vki_siginfo_t) );
++}
++POST(sys_sigwaitinfo)
++{
++ if (ARG2 != 0)
++ POST_MEM_WRITE( ARG2, sizeof(vki_siginfo_t) );
++}
++
++#if 0 /* not on netbsd 4.x */
++PRE(sys_rt_sigqueueinfo)
++{
++ PRINT("sys_rt_sigqueueinfo(%ld, %ld, %#lx)", ARG1, ARG2, ARG3);
++ PRE_REG_READ3(long, "rt_sigqueueinfo",
++ int, pid, int, sig, vki_siginfo_t *, uinfo);
++ if (ARG2 != 0)
++ PRE_MEM_READ( "rt_sigqueueinfo(uinfo)", ARG3, sizeof(vki_siginfo_t) );
++}
++POST(sys_rt_sigqueueinfo)
++{
++ if (!ML_(client_signal_OK)(ARG2))
++ SET_STATUS_Failure( VKI_EINVAL );
++}
++#endif
++
++PRE(sys_sigsuspend)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_sigsuspend ( %#lx )", ARG1 );
++ PRE_REG_READ1(int, "rt_sigsuspend", vki_sigset_t *, mask)
++ if (ARG1 != (Addr)NULL) {
++ PRE_MEM_READ( "rt_sigsuspend(mask)", ARG1, sizeof(vki_sigset_t) );
++ }
++}
++
++#if 0
++/* ---------------------------------------------------------------------
++ linux msg* wrapper helpers
++ ------------------------------------------------------------------ */
++
++void
++ML_(linux_PRE_sys_msgsnd) ( ThreadId tid,
++ UWord arg0, UWord arg1, UWord arg2, UWord arg3 )
++{
++ /* int msgsnd(int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg); */
++ struct vki_msgbuf *msgp = (struct vki_msgbuf *)arg1;
++ PRE_MEM_READ( "msgsnd(msgp->mtype)", (Addr)&msgp->mtype, sizeof(msgp->mtype) );
++ PRE_MEM_READ( "msgsnd(msgp->mtext)", (Addr)&msgp->mtext, arg2 );
++}
++
++void
++ML_(linux_PRE_sys_msgrcv) ( ThreadId tid,
++ UWord arg0, UWord arg1, UWord arg2,
++ UWord arg3, UWord arg4 )
++{
++ /* ssize_t msgrcv(int msqid, struct msgbuf *msgp, size_t msgsz,
++ long msgtyp, int msgflg); */
++ struct vki_msgbuf *msgp = (struct vki_msgbuf *)arg1;
++ PRE_MEM_WRITE( "msgrcv(msgp->mtype)", (Addr)&msgp->mtype, sizeof(msgp->mtype) );
++ PRE_MEM_WRITE( "msgrcv(msgp->mtext)", (Addr)&msgp->mtext, arg2 );
++}
++void
++ML_(linux_POST_sys_msgrcv) ( ThreadId tid,
++ UWord res,
++ UWord arg0, UWord arg1, UWord arg2,
++ UWord arg3, UWord arg4 )
++{
++ struct vki_msgbuf *msgp = (struct vki_msgbuf *)arg1;
++ POST_MEM_WRITE( (Addr)&msgp->mtype, sizeof(msgp->mtype) );
++ POST_MEM_WRITE( (Addr)&msgp->mtext, res );
++}
++
++void
++ML_(linux_PRE_sys_msgctl) ( ThreadId tid,
++ UWord arg0, UWord arg1, UWord arg2 )
++{
++ /* int msgctl(int msqid, int cmd, struct msqid_ds *buf); */
++ switch (arg1 /* cmd */) {
++ case VKI_IPC_INFO:
++ case VKI_MSG_INFO:
++ case VKI_IPC_INFO|VKI_IPC_64:
++ case VKI_MSG_INFO|VKI_IPC_64:
++ PRE_MEM_WRITE( "msgctl(IPC_INFO, buf)",
++ arg2, sizeof(struct vki_msginfo) );
++ break;
++ case VKI_IPC_STAT:
++ case VKI_MSG_STAT:
++ PRE_MEM_WRITE( "msgctl(IPC_STAT, buf)",
++ arg2, sizeof(struct vki_msqid_ds) );
++ break;
++ case VKI_IPC_STAT|VKI_IPC_64:
++ case VKI_MSG_STAT|VKI_IPC_64:
++ PRE_MEM_WRITE( "msgctl(IPC_STAT, arg.buf)",
++ arg2, sizeof(struct vki_msqid64_ds) );
++ break;
++ case VKI_IPC_SET:
++ PRE_MEM_READ( "msgctl(IPC_SET, arg.buf)",
++ arg2, sizeof(struct vki_msqid_ds) );
++ break;
++ case VKI_IPC_SET|VKI_IPC_64:
++ PRE_MEM_READ( "msgctl(IPC_SET, arg.buf)",
++ arg2, sizeof(struct vki_msqid64_ds) );
++ break;
++ }
++}
++void
++ML_(linux_POST_sys_msgctl) ( ThreadId tid,
++ UWord res,
++ UWord arg0, UWord arg1, UWord arg2 )
++{
++ switch (arg1 /* cmd */) {
++ case VKI_IPC_INFO:
++ case VKI_MSG_INFO:
++ case VKI_IPC_INFO|VKI_IPC_64:
++ case VKI_MSG_INFO|VKI_IPC_64:
++ POST_MEM_WRITE( arg2, sizeof(struct vki_msginfo) );
++ break;
++ case VKI_IPC_STAT:
++ case VKI_MSG_STAT:
++ POST_MEM_WRITE( arg2, sizeof(struct vki_msqid_ds) );
++ break;
++ case VKI_IPC_STAT|VKI_IPC_64:
++ case VKI_MSG_STAT|VKI_IPC_64:
++ POST_MEM_WRITE( arg2, sizeof(struct vki_msqid64_ds) );
++ break;
++ }
++}
++
++#endif
++
++PRE(sys_chflags)
++{
++ PRINT("sys_chflags ( %#lx(%s), 0x%lx )", ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "chown",
++ const char *, path, vki_int32_t, flags);
++ PRE_MEM_RASCIIZ( "chflags(path)", ARG1 );
++}
++
++PRE(sys_fchflags)
++{
++ PRINT("sys_fchflags ( %ld, %ld )", ARG1,ARG2);
++ PRE_REG_READ2(long, "fchflags", unsigned int, fildes, vki_int32_t, flags);
++}
++
++
++PRE(sys_modfind)
++{
++ PRINT("sys_modfind ( %#lx )",ARG1);
++ PRE_REG_READ1(long, "modfind", char *, modname);
++ PRE_MEM_RASCIIZ( "modfind(modname)", ARG1 );
++}
++
++PRE(sys_modstat)
++{
++ PRINT("sys_modstat ( %ld, %#lx )",ARG1,ARG2);
++ PRE_REG_READ2(long, "modstat", int, modid, struct module_stat *, buf);
++ PRE_MEM_WRITE( "modstat(buf)", ARG2, sizeof(struct vki_module_stat) );
++}
++
++POST(sys_modstat)
++{
++ POST_MEM_WRITE( ARG2, sizeof(struct vki_module_stat) );
++}
++
++PRE(sys_lkmnosys0)
++{
++ PRINT("sys_lkmnosys0 ()");
++ PRE_REG_READ0(long, "lkmnosys0");
++}
++
++PRE(sys_lkmnosys1)
++{
++ PRINT("sys_lkmnosys1 ()");
++ PRE_REG_READ0(long, "lkmnosys1");
++}
++
++PRE(sys_lkmnosys2)
++{
++ PRINT("sys_lkmnosys2 ()");
++ PRE_REG_READ0(long, "lkmnosys2");
++}
++
++PRE(sys_lkmnosys3)
++{
++ PRINT("sys_lkmnosys3 ()");
++ PRE_REG_READ0(long, "lkmnosys3");
++}
++
++PRE(sys_lkmnosys4)
++{
++ PRINT("sys_lkmnosys4 ()");
++ PRE_REG_READ0(long, "lkmnosys4");
++}
++
++PRE(sys_lkmnosys5)
++{
++ PRINT("sys_lkmnosys5 ()");
++ PRE_REG_READ0(long, "lkmnosys5");
++}
++
++PRE(sys_lkmnosys6)
++{
++ PRINT("sys_lkmnosys6 ()");
++ PRE_REG_READ0(long, "lkmnosys6");
++}
++
++PRE(sys_lkmnosys7)
++{
++ PRINT("sys_lkmnosys7 ()");
++ PRE_REG_READ0(long, "lkmnosys7");
++}
++
++PRE(sys_lkmnosys8)
++{
++ PRINT("sys_lkmnosys8 ()");
++ PRE_REG_READ0(long, "lkmnosys8");
++}
++
++PRE(sys_kenv)
++{
++ PRINT("sys_kenv ( %ld, %#lx, %#lx, %ld )", ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "kenv",
++ int, action, const char *, name, char *, value, int, len);
++ switch (ARG1) {
++ case VKI_KENV_GET:
++ case VKI_KENV_SET:
++ case VKI_KENV_UNSET:
++ PRE_MEM_RASCIIZ("kenv(name)", ARG2);
++ /* FALLTHROUGH */
++ case VKI_KENV_DUMP:
++ break;
++ default:
++ I_die_here;
++ }
++}
++
++POST(sys_kenv)
++{
++ if (SUCCESS) {
++ switch (ARG1) {
++ case VKI_KENV_GET:
++ POST_MEM_WRITE(ARG3, ARG4);
++ break;
++ case VKI_KENV_DUMP:
++ if (ARG3 != (Addr)NULL)
++ POST_MEM_WRITE(ARG3, ARG4);
++ break;
++ }
++ }
++}
++
++PRE(sys_uuidgen)
++{
++ PRINT("sys_uuidgen ( %#lx, %ld )", ARG1,ARG2);
++ PRE_REG_READ2(long, "uuidgen",
++ struct vki_uuid *, store, int, count);
++ PRE_MEM_WRITE( "uuidgen(store)", ARG1, ARG2 * sizeof(struct vki_uuid));
++}
++
++POST(sys_uuidgen)
++{
++ if (SUCCESS)
++ POST_MEM_WRITE( ARG1, ARG2 * sizeof(struct vki_uuid) );
++}
++
++
++PRE(sys_shmget)
++{
++ PRINT("sys_shmget ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "shmget", vki_key_t, key, vki_size_t, size, int, shmflg);
++}
++
++PRE(sys_shmat)
++{
++ UWord arg2tmp;
++ PRINT("sys_shmat ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "shmat",
++ int, shmid, const void *, shmaddr, int, shmflg);
++ arg2tmp = ML_(generic_PRE_sys_shmat)(tid, ARG1,ARG2,ARG3);
++ if (arg2tmp == 0)
++ SET_STATUS_Failure( VKI_EINVAL );
++ else
++ ARG2 = arg2tmp;
++}
++
++POST(sys_shmat)
++{
++ ML_(generic_POST_sys_shmat)(tid, RES,ARG1,ARG2,ARG3);
++}
++
++PRE(sys_shmdt)
++{
++ PRINT("sys_shmdt ( %#lx )",ARG1);
++ PRE_REG_READ1(long, "shmdt", const void *, shmaddr);
++ if (!ML_(generic_PRE_sys_shmdt)(tid, ARG1))
++ SET_STATUS_Failure( VKI_EINVAL );
++}
++
++POST(sys_shmdt)
++{
++ ML_(generic_POST_sys_shmdt)(tid, RES,ARG1);
++}
++
++PRE(sys_shmctl)
++{
++ PRINT("sys_shmctl ( %ld, %ld, %#lx )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "shmctl",
++ int, shmid, int, cmd, struct vki_shmid_ds *, buf);
++ switch (ARG2 /* cmd */) {
++ case VKI_IPC_STAT:
++ PRE_MEM_WRITE( "shmctl(IPC_STAT, buf)",
++ ARG3, sizeof(struct vki_shmid_ds) );
++ break;
++ case VKI_IPC_SET:
++ PRE_MEM_READ( "shmctl(IPC_SET, buf)",
++ ARG3, sizeof(struct vki_shmid_ds) );
++ break;
++ }
++}
++
++PRE(sys_shmctl7)
++{
++ PRINT("sys_shmctl7 ( %ld, %ld, %#lx )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "shmctl",
++ int, shmid, int, cmd, struct vki_shmid_ds7 *, buf);
++ switch (ARG2 /* cmd */) {
++ case VKI_IPC_STAT:
++ PRE_MEM_WRITE( "shmctl7(IPC_STAT, buf)",
++ ARG3, sizeof(struct vki_shmid_ds7) );
++ break;
++ case VKI_IPC_SET:
++ PRE_MEM_READ( "shmctl7(IPC_SET, buf)",
++ ARG3, sizeof(struct vki_shmid_ds7) );
++ break;
++ }
++}
++
++POST(sys_shmctl)
++{
++ if (ARG2 == VKI_IPC_STAT) {
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_shmid_ds) );
++ }
++}
++
++POST(sys_shmctl7)
++{
++ if (ARG2 == VKI_IPC_STAT) {
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_shmid_ds7) );
++ }
++}
++
++PRE(sys_semget)
++{
++ PRINT("sys_semget ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "semget", vki_key_t, key, int, nsems, int, semflg);
++}
++
++PRE(sys_shm_open)
++{
++ PRE_REG_READ3(long, "shm_open",
++ const char *, "name", int, "flags", vki_mode_t, "mode");
++ if (ARG1 == VKI_SHM_ANON) {
++ PRINT("sys_shm_open(%#lx(SHM_ANON), %ld, %ld)", ARG1, ARG2, ARG3);
++ } else {
++ PRINT("sys_shm_open(%#lx(%s), %ld, %ld)", ARG1, (char *)ARG1, ARG2, ARG3);
++ PRE_MEM_RASCIIZ( "shm_open(filename)", ARG1 );
++ }
++ *flags |= SfMayBlock;
++}
++
++POST(sys_shm_open)
++{
++ vg_assert(SUCCESS);
++ if (!ML_(fd_allowed)(RES, "shm_open", tid, True)) {
++ VG_(close)(RES);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else {
++ if (VG_(clo_track_fds))
++ ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG1);
++ }
++}
++
++PRE(sys_shm_unlink)
++{
++ PRINT("sys_shm_unlink(%#lx(%s))", ARG1, (char *)ARG1);
++ PRE_REG_READ1(long, "shm_unlink",
++ const char *, "name");
++
++ PRE_MEM_RASCIIZ( "shm_unlink(filename)", ARG1 );
++
++ *flags |= SfMayBlock;
++}
++
++PRE(sys_semop)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_semop ( %ld, %#lx, %lu )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "semop",
++ int, semid, struct sembuf *, sops, unsigned, nsoops);
++ ML_(generic_PRE_sys_semop)(tid, ARG1,ARG2,ARG3);
++}
++
++struct ipc_perm7 {
++ unsigned short cuid; /* creator user id */
++ unsigned short cgid; /* creator group id */
++ unsigned short uid; /* user id */
++ unsigned short gid; /* group id */
++ unsigned short mode; /* r/w permission */
++ unsigned short seq; /* sequence # (to generate unique ipcid) */
++ vki_key_t key; /* user specified msg/sem/shm key */
++};
++
++struct semid_ds7 {
++ struct ipc_perm7 sem_perm; /* operation permission struct */
++ struct sem *sem_base; /* pointer to first semaphore in set */
++ unsigned short sem_nsems; /* number of sems in set */
++ vki_time_t sem_otime; /* last operation time */
++ long sem_pad1; /* SVABI/386 says I need this here */
++ vki_time_t sem_ctime; /* last change time */
++ /* Times measured in secs since */
++ /* 00:00:00 GMT, Jan. 1, 1970 */
++ long sem_pad2; /* SVABI/386 says I need this here */
++ long sem_pad3[4]; /* SVABI/386 says I need this here */
++};
++
++PRE(sys___semctl7)
++{
++ switch (ARG3) {
++ case VKI_IPC_INFO:
++ case VKI_SEM_INFO:
++ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "semctl",
++ int, semid, int, semnum, int, cmd, struct seminfo *, arg);
++ break;
++ case VKI_IPC_STAT:
++ case VKI_SEM_STAT:
++ case VKI_IPC_SET:
++ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "semctl",
++ int, semid, int, semnum, int, cmd, struct semid_ds7 *, arg);
++ break;
++ case VKI_GETALL:
++ case VKI_SETALL:
++ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "semctl",
++ int, semid, int, semnum, int, cmd, unsigned short *, arg);
++ break;
++ default:
++ PRINT("sys_semctl ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "semctl",
++ int, semid, int, semnum, int, cmd);
++ break;
++ }
++ ML_(generic_PRE_sys_semctl)(tid, ARG1,ARG2,ARG3,ARG4);
++}
++
++POST(sys___semctl7)
++{
++ ML_(generic_POST_sys_semctl)(tid, RES,ARG1,ARG2,ARG3,ARG4);
++}
++
++PRE(sys___semctl)
++{
++ switch (ARG3) {
++ case VKI_IPC_INFO:
++ case VKI_SEM_INFO:
++ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "semctl",
++ int, semid, int, semnum, int, cmd, struct seminfo *, arg);
++ break;
++ case VKI_IPC_STAT:
++ case VKI_SEM_STAT:
++ case VKI_IPC_SET:
++ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "semctl",
++ int, semid, int, semnum, int, cmd, struct semid_ds *, arg);
++ break;
++ case VKI_GETALL:
++ case VKI_SETALL:
++ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "semctl",
++ int, semid, int, semnum, int, cmd, unsigned short *, arg);
++ break;
++ default:
++ PRINT("sys_semctl ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "semctl",
++ int, semid, int, semnum, int, cmd);
++ break;
++ }
++ ML_(generic_PRE_sys_semctl)(tid, ARG1,ARG2,ARG3,ARG4);
++}
++
++POST(sys___semctl)
++{
++ ML_(generic_POST_sys_semctl)(tid, RES,ARG1,ARG2,ARG3,ARG4);
++}
++
++PRE(sys_eaccess)
++{
++ PRINT("sys_eaccess ( %#lx(%s), %ld )", ARG1,(char*)ARG1,ARG2);
++ PRE_REG_READ2(long, "eaccess", const char *, pathname, int, mode);
++ PRE_MEM_RASCIIZ( "eaccess(pathname)", ARG1 );
++}
++
++
++/* ---------------------------------------------------------------------
++ *at wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys_openat)
++{
++
++ if (ARG3 & VKI_O_CREAT) {
++ // 4-arg version
++ PRINT("sys_openat ( %ld, %#lx(%s), %ld, %ld )",ARG1,ARG2,(char*)ARG2,ARG3,ARG4);
++ PRE_REG_READ4(int, "openat",
++ int, dfd, const char *, filename, int, flags, vki_mode_t, mode);
++ } else {
++ // 3-arg version
++ PRINT("sys_openat ( %ld, %#lx(%s), %ld )",ARG1,ARG2,(char*)ARG2,ARG3);
++ PRE_REG_READ3(int, "openat",
++ int, dfd, const char *, filename, int, flags);
++ }
++
++ if (ARG1 != (unsigned)VKI_AT_FDCWD && !ML_(fd_allowed)(ARG1, "openat", tid, False))
++ SET_STATUS_Failure( VKI_EBADF );
++ else
++ PRE_MEM_RASCIIZ( "openat(filename)", ARG2 );
++
++ /* Otherwise handle normally */
++ *flags |= SfMayBlock;
++}
++
++POST(sys_openat)
++{
++ vg_assert(SUCCESS);
++ if (!ML_(fd_allowed)(RES, "openat", tid, True)) {
++ VG_(close)(RES);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else {
++ if (VG_(clo_track_fds))
++ ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
++ }
++}
++
++PRE(sys_mkdirat)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_mkdirat ( %ld, %#lx(%s), %ld )", ARG1,ARG2,(char*)ARG2,ARG3);
++ PRE_REG_READ3(long, "mkdirat",
++ int, dfd, const char *, pathname, int, mode);
++ PRE_MEM_RASCIIZ( "mkdirat(pathname)", ARG2 );
++}
++
++PRE(sys_mkfifoat)
++{
++ PRINT("sys_mkfifoat ( %ld, %#lx(%s), 0x%lx )", ARG1,ARG2,(char*)ARG2,ARG3 );
++ PRE_REG_READ3(long, "mkfifoat",
++ int, dfd, const char *, pathname, int, mode);
++ PRE_MEM_RASCIIZ( "mkfifoat(pathname)", ARG2 );
++}
++
++PRE(sys_mknodat)
++{
++ PRINT("sys_mknodat ( %ld, %#lx(%s), 0x%lx, 0x%lx )", ARG1,ARG2,(char*)ARG2,ARG3,ARG4 );
++ PRE_REG_READ4(long, "mknodat",
++ int, dfd, const char *, pathname, int, mode, unsigned, dev);
++ PRE_MEM_RASCIIZ( "mknodat(pathname)", ARG2 );
++}
++
++PRE(sys_fchownat)
++{
++ PRINT("sys_fchownat ( %ld, %#lx(%s), 0x%lx, 0x%lx )", ARG1,ARG2,(char*)ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "fchownat",
++ int, dfd, const char *, path,
++ vki_uid_t, owner, vki_gid_t, group);
++ PRE_MEM_RASCIIZ( "fchownat(path)", ARG2 );
++}
++
++PRE(sys_futimesat)
++{
++ PRINT("sys_futimesat ( %ld, %#lx(%s), %#lx )", ARG1,ARG2,(char*)ARG2,ARG3);
++ PRE_REG_READ3(long, "futimesat",
++ int, dfd, char *, filename, struct timeval *, tvp);
++ if (ARG2 != 0)
++ PRE_MEM_RASCIIZ( "futimesat(filename)", ARG2 );
++ if (ARG3 != 0)
++ PRE_MEM_READ( "futimesat(tvp)", ARG3, 2 * sizeof(struct vki_timeval) );
++}
++
++PRE(sys_fstatat)
++{
++ PRINT("sys_fstatat ( %ld, %#lx(%s), %#lx )", ARG1,ARG2,(char*)ARG2,ARG3);
++ PRE_REG_READ3(long, "fstatat",
++ int, dfd, char *, file_name, struct stat *, buf);
++ PRE_MEM_RASCIIZ( "fstatat(file_name)", ARG2 );
++ PRE_MEM_WRITE( "fstatat(buf)", ARG3, sizeof(struct vki_stat) );
++}
++
++POST(sys_fstatat)
++{
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_stat) );
++}
++
++PRE(sys_unlinkat)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_unlinkat ( %ld, %#lx(%s) )", ARG1,ARG2,(char*)ARG2);
++ PRE_REG_READ2(long, "unlinkat", int, dfd, const char *, pathname);
++ PRE_MEM_RASCIIZ( "unlinkat(pathname)", ARG2 );
++}
++
++PRE(sys_renameat)
++{
++ PRINT("sys_renameat ( %ld, %#lx(%s), %ld, %#lx(%s) )", ARG1,ARG2,(char*)ARG2,ARG3,ARG4,(char*)ARG4);
++ PRE_REG_READ4(long, "renameat",
++ int, olddfd, const char *, oldpath,
++ int, newdfd, const char *, newpath);
++ PRE_MEM_RASCIIZ( "renameat(oldpath)", ARG2 );
++ PRE_MEM_RASCIIZ( "renameat(newpath)", ARG4 );
++}
++
++PRE(sys_linkat)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_linkat ( %ld, %#lx(%s), %ld, %#lx(%s), %ld )",ARG1,ARG2,(char*)ARG2,ARG3,ARG4,(char*)ARG4,ARG5);
++ PRE_REG_READ5(long, "linkat",
++ int, olddfd, const char *, oldpath,
++ int, newdfd, const char *, newpath,
++ int, flags);
++ PRE_MEM_RASCIIZ( "linkat(oldpath)", ARG2);
++ PRE_MEM_RASCIIZ( "linkat(newpath)", ARG4);
++}
++
++PRE(sys_symlinkat)
++{
++ *flags |= SfMayBlock;
++ PRINT("sys_symlinkat ( %#lx(%s), %ld, %#lx(%s) )",ARG1,(char*)ARG1,ARG2,ARG3,(char*)ARG3);
++ PRE_REG_READ3(long, "symlinkat",
++ const char *, oldpath, int, newdfd, const char *, newpath);
++ PRE_MEM_RASCIIZ( "symlinkat(oldpath)", ARG1 );
++ PRE_MEM_RASCIIZ( "symlinkat(newpath)", ARG3 );
++}
++
++PRE(sys_readlinkat)
++{
++ HChar name[25];
++ Word saved = SYSNO;
++
++ PRINT("sys_readlinkat ( %ld, %#lx(%s), %#lx, %llu )", ARG1,ARG2,(char*)ARG2,ARG3,(ULong)ARG4);
++ PRE_REG_READ4(long, "readlinkat",
++ int, dfd, const char *, path, char *, buf, int, bufsiz);
++ PRE_MEM_RASCIIZ( "readlinkat(path)", ARG2 );
++ PRE_MEM_WRITE( "readlinkat(buf)", ARG3,ARG4 );
++
++ /*
++ * Handle the case where readlinkat is looking at /proc/curproc/file or
++ * /proc/<pid>/file.
++ */
++ VG_(sprintf)(name, "/proc/%d/file", VG_(getpid)());
++ if (ML_(safe_to_deref)((void*)ARG2, 1)
++ && (VG_(strcmp)((HChar *)ARG2, name) == 0
++ || VG_(strcmp)((HChar *)ARG2, "/proc/curproc/file") == 0)) {
++ VG_(sprintf)(name, "/proc/self/fd/%d", VG_(cl_exec_fd));
++ SET_STATUS_from_SysRes( VG_(do_syscall4)(saved, ARG1, (UWord)name,
++ ARG3, ARG4));
++ } else {
++ /* Normal case */
++ SET_STATUS_from_SysRes( VG_(do_syscall4)(saved, ARG1, ARG2, ARG3, ARG4));
++ }
++
++ if (SUCCESS && RES > 0)
++ POST_MEM_WRITE( ARG3, RES );
++}
++
++PRE(sys_fchmodat)
++{
++ PRINT("sys_fchmodat ( %ld, %#lx(%s), %ld )", ARG1,ARG2,(char*)ARG2,ARG3);
++ PRE_REG_READ3(long, "fchmodat",
++ int, dfd, const char *, path, vki_mode_t, mode);
++ PRE_MEM_RASCIIZ( "fchmodat(path)", ARG2 );
++}
++
++PRE(sys_faccessat)
++{
++ PRINT("sys_faccessat ( %ld, %#lx(%s), %ld )", ARG1,ARG2,(char*)ARG2,ARG3);
++ PRE_REG_READ3(long, "faccessat",
++ int, dfd, const char *, pathname, int, mode);
++ PRE_MEM_RASCIIZ( "faccessat(pathname)", ARG2 );
++}
++
++/* ---------------------------------------------------------------------
++ __acl* wrappers
++ ------------------------------------------------------------------ */
++
++PRE(sys___acl_get_file)
++{
++ PRINT("sys___acl_get_file ( %#lx(%s), %ld, %#lx )", ARG1,(char *)ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_get_file",
++ const char *, path, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_WRITE( "__acl_get_file(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++POST(sys___acl_get_file)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0) {
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_acl) );
++ }
++}
++
++PRE(sys___acl_set_file)
++{
++ PRINT("sys___acl_set_file ( %#lx(%s), %ld, %#lx )", ARG1,(char *)ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_set_file",
++ const char *, path, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_READ( "__acl_set_file(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++PRE(sys___acl_get_fd)
++{
++ PRINT("sys___acl_get_fd ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_get_fd",
++ int, fd, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_WRITE( "__acl_get_file(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++POST(sys___acl_get_fd)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0) {
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_acl) );
++ }
++}
++
++PRE(sys___acl_set_fd)
++{
++ PRINT("sys___acl_set_fd ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_set_fd",
++ int, fd, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_READ( "__acl_get_file(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++PRE(sys___acl_delete_file)
++{
++ PRINT("sys___acl_delete_file ( %#lx(%s), %ld )", ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "__acl_delete_file",
++ const char *, path, int, acltype);
++}
++
++PRE(sys___acl_delete_fd)
++{
++ PRINT("sys___acl_delete_fd ( %ld, %ld )", ARG1,ARG2);
++ PRE_REG_READ2(long, "__acl_delete_fd",
++ int, fd, int, acltype);
++}
++
++PRE(sys___acl_aclcheck_file)
++{
++ PRINT("sys___acl_aclcheck_file ( %#lx(%s), %ld, %#lx )", ARG1,(char *)ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_aclcheck_file",
++ const char *, path, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_READ( "__acl_aclcheck_file(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++PRE(sys___acl_aclcheck_fd)
++{
++ PRINT("sys___acl_aclcheck_fd ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_aclcheck_fd",
++ int, fd, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_READ( "__acl_aclcheck_fd(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++PRE(sys___acl_get_link)
++{
++ PRINT("sys___acl_get_link ( %#lx(%s), %ld, %#lx )", ARG1,(char *)ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_get_link",
++ const char *, path, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_WRITE( "__acl_get_link(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++POST(sys___acl_get_link)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0) {
++ POST_MEM_WRITE( ARG3, sizeof(struct vki_acl) );
++ }
++}
++
++PRE(sys___acl_set_link)
++{
++ PRINT("sys___acl_set_link ( %#lx(%s), %ld, %#lx )", ARG1,(char *)ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_set_link",
++ const char *, path, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_READ( "__acl_set_link(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++PRE(sys___acl_delete_link)
++{
++ PRINT("sys___acl_delete_link ( %#lx(%s), %ld )", ARG1,(char *)ARG1,ARG2);
++ PRE_REG_READ2(long, "__acl_delete_link",
++ const char *, path, int, acltype);
++}
++
++PRE(sys___acl_aclcheck_link)
++{
++ PRINT("sys___acl_aclcheck_link ( %#lx(%s), %ld, %#lx )", ARG1,(char *)ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "__acl_aclcheck_link",
++ const char *, path, int, acltype, struct vki_acl *, aclp);
++ PRE_MEM_READ( "__acl_aclcheck_link(aclp)", ARG3, sizeof(struct vki_acl) );
++}
++
++POST(sys_getcontext)
++{
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_ucontext) );
++}
++
++POST(sys_swapcontext)
++{
++ if (SUCCESS)
++ POST_MEM_WRITE( ARG1, sizeof(struct vki_ucontext) );
++}
++
++PRE(sys_fcntl)
++{
++ switch (ARG2) {
++ // These ones ignore ARG3.
++ case VKI_F_GETFD:
++ case VKI_F_GETFL:
++ case VKI_F_GETOWN:
++ PRINT("sys_fcntl ( %ld, %ld )", ARG1,ARG2);
++ PRE_REG_READ2(long, "fcntl", unsigned int, fd, unsigned int, cmd);
++ break;
++
++ // These ones use ARG3 as "arg".
++ case VKI_F_DUPFD:
++ case VKI_F_DUPFD_CLOEXEC:
++ case VKI_F_SETFD:
++ case VKI_F_SETFL:
++ case VKI_F_SETOWN:
++ PRINT("sys_fcntl[ARG3=='arg'] ( %ld, %ld, %ld )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "fcntl",
++ unsigned int, fd, unsigned int, cmd, unsigned long, arg);
++ break;
++
++ // These ones use ARG3 as "lock" - obsolete.
++ case VKI_F_OSETLKW:
++ *flags |= SfMayBlock;
++ /* FALLTHROUGH */
++ case VKI_F_OGETLK:
++ case VKI_F_OSETLK:
++ PRINT("sys_fcntl[ARG3=='lock'] ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "fcntl",
++ unsigned int, fd, unsigned int, cmd,
++ struct oflock *, lock);
++ break;
++
++ // This one uses ARG3 as "oldd" and ARG4 as "newd".
++ case VKI_F_DUP2FD:
++ PRINT("sys_fcntl[ARG3=='oldd', ARG4=='newd'] ( %ld, %ld, %ld, %ld )",
++ ARG1,ARG2,ARG3,ARG4);
++ PRE_REG_READ4(long, "fcntl",
++ unsigned int, fd, unsigned int, cmd,
++ unsigned long, oldd, unsigned long, newd);
++ break;
++
++ // These ones use ARG3 as "lock".
++ case VKI_F_SETLKW:
++ *flags |= SfMayBlock;
++ /* FALLTHROUGH */
++ case VKI_F_GETLK:
++ case VKI_F_SETLK:
++ case VKI_F_SETLK_REMOTE:
++ PRINT("sys_fcntl[ARG3=='lock'] ( %ld, %ld, %#lx )", ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "fcntl",
++ unsigned int, fd, unsigned int, cmd,
++ struct flock *, lock);
++ break;
++
++ default:
++ PRINT("sys_fcntl[UNKNOWN] ( %ld, %ld, %ld )", ARG1,ARG2,ARG3);
++ I_die_here;
++ break;
++ }
++}
++
++POST(sys_fcntl)
++{
++ vg_assert(SUCCESS);
++ if (ARG2 == VKI_F_DUPFD) {
++ if (!ML_(fd_allowed)(RES, "fcntl(DUPFD)", tid, True)) {
++ VG_(close)(RES);
++ SET_STATUS_Failure( VKI_EMFILE );
++ } else {
++ if (VG_(clo_track_fds))
++ ML_(record_fd_open_named)(tid, RES);
++ }
++ }
++}
++
++PRE(sys_ioctl)
++{
++ UInt dir = _VKI_IOC_DIR(ARG2);
++ UInt size = _VKI_IOC_SIZE(ARG2);
++ *flags |= SfMayBlock;
++ PRINT("sys_ioctl ( %ld, 0x%lx, %#lx )",ARG1,ARG2,ARG3);
++ PRE_REG_READ3(long, "ioctl",
++ unsigned int, fd, unsigned int, request, unsigned long, arg);
++
++/* On netbsd, ALL ioctl's are IOR/IOW encoded. Just use the default decoder */
++ if (VG_(strstr)(VG_(clo_sim_hints), "lax-ioctls") != NULL) {
++ /*
++ * Be very lax about ioctl handling; the only
++ * assumption is that the size is correct. Doesn't
++ * require the full buffer to be initialized when
++ * writing. Without this, using some device
++ * drivers with a large number of strange ioctl
++ * commands becomes very tiresome.
++ */
++ } else if (/* size == 0 || */ dir == _VKI_IOC_NONE) {
++ static Int moans = 3;
++ if (moans > 0 && !VG_(clo_xml)) {
++ moans--;
++ VG_(message)(Vg_UserMsg,
++ "Warning: noted but unhandled ioctl 0x%lx"
++ " with no size/direction hints\n",
++ ARG2);
++ VG_(message)(Vg_UserMsg,
++ " This could cause spurious value errors"
++ " to appear.\n");
++ VG_(message)(Vg_UserMsg,
++ " See README_MISSING_SYSCALL_OR_IOCTL for "
++ "guidance on writing a proper wrapper.\n" );
++ }
++ } else {
++ if ((dir & _VKI_IOC_WRITE) && size > 0)
++ PRE_MEM_READ( "ioctl(generic)", ARG3, size);
++ if ((dir & _VKI_IOC_READ) && size > 0)
++ PRE_MEM_WRITE( "ioctl(generic)", ARG3, size);
++ }
++}
++
++POST(sys_ioctl)
++{
++ UInt dir = _VKI_IOC_DIR(ARG2);
++ UInt size = _VKI_IOC_SIZE(ARG2);
++ vg_assert(SUCCESS);
++ if (size > 0 && (dir & _VKI_IOC_READ)
++ && RES == 0 && ARG3 != (Addr)NULL)
++ POST_MEM_WRITE(ARG3, size);
++}
++
++PRE(sys_ptrace)
++{
++ struct vki_ptrace_io_desc *io_desc;
++ PRINT("sys_ptrace ( %ld, %ld, 0x%lx, %ld)", ARG1, ARG2, ARG3, ARG4);
++
++ PRE_REG_READ4(int, "ptrace", int, request, int, pid, char *, addr, int, data);
++
++ switch (ARG1) {
++ case VKI_PTRACE_TRACEME:
++ break;
++ case VKI_PTRACE_READ_I:
++ case VKI_PTRACE_READ_D:
++ break;
++
++ case VKI_PTRACE_WRITE_I:
++ case VKI_PTRACE_WRITE_D:
++ break;
++
++ case VKI_PTRACE_IO:
++ PRE_MEM_READ("ptrace", ARG3, sizeof(struct vki_ptrace_io_desc));
++ io_desc = (struct vki_ptrace_io_desc *)ARG3;
++ switch (io_desc->piod_op) {
++ case VKI_PIOD_READ_D:
++ case VKI_PIOD_READ_I:
++ PRE_MEM_WRITE( "ptrace", (UWord)io_desc->piod_addr, io_desc->piod_len);
++ break;
++ case VKI_PIOD_WRITE_D:
++ case VKI_PIOD_WRITE_I:
++ PRE_MEM_READ( "ptrace", (UWord)io_desc->piod_addr, io_desc->piod_len);
++ break;
++ }
++ break;
++
++ case VKI_PTRACE_CONTINUE:
++ break;
++
++ case VKI_PTRACE_STEP:
++ break;
++
++ case VKI_PTRACE_KILL:
++ break;
++
++ case VKI_PTRACE_ATTACH:
++ break;
++
++ case VKI_PTRACE_DETACH:
++ break;
++
++ case VKI_PTRACE_GETREGS:
++ PRE_MEM_WRITE( "ptrace", ARG3, sizeof(struct vki_reg_struct));
++ break;
++
++ case VKI_PTRACE_SETREGS:
++ PRE_MEM_READ( "ptrace", ARG3, sizeof(struct vki_reg_struct));
++ break;
++
++ case VKI_PTRACE_GETFPREGS:
++ PRE_MEM_WRITE( "ptrace", ARG3, sizeof(struct vki_fpreg));
++ break;
++
++ case VKI_PTRACE_SETFPREGS:
++ PRE_MEM_READ( "ptrace", ARG3, sizeof(struct vki_fpreg));
++ break;
++
++ case VKI_PTRACE_GETDBREGS:
++ PRE_MEM_WRITE( "ptrace", ARG3, sizeof(struct vki_dbreg));
++ break;
++
++ case VKI_PTRACE_SETDBREGS:
++ PRE_MEM_READ( "ptrace", ARG3, sizeof(struct vki_dbreg));
++ break;
++
++ case VKI_PTRACE_LWPINFO:
++ PRE_MEM_WRITE( "ptrace", ARG3, sizeof(struct vki_ptrace_lwpinfo));
++ break;
++
++ case VKI_PTRACE_GETNUMLWPS:
++ break;
++
++ case VKI_PTRACE_GETLWPLIST:
++ PRE_MEM_WRITE( "ptrace", ARG3, sizeof(vki_lwpid_t) * ARG4);
++ break;
++
++ case VKI_PTRACE_SETSTEP:
++ break;
++
++ case VKI_PTRACE_CLEARSTEP:
++ break;
++
++ case VKI_PTRACE_SUSPEND:
++ break;
++
++ case VKI_PTRACE_RESUME:
++ break;
++
++ case VKI_PTRACE_TO_SCE:
++ break;
++
++ case VKI_PTRACE_TO_SCX:
++ break;
++
++ case VKI_PTRACE_SYSCALL:
++ break;
++
++ case VKI_PTRACE_VM_TIMESTAMP:
++ break;
++
++ case VKI_PTRACE_VM_ENTRY:
++ PRE_MEM_WRITE( "ptrace", ARG3, sizeof(struct vki_ptrace_vm_entry));
++ break;
++ }
++}
++
++POST(sys_ptrace)
++{
++ struct vki_ptrace_io_desc *io_desc;
++
++ switch (ARG1) {
++ case VKI_PTRACE_TRACEME:
++ break;
++ case VKI_PTRACE_READ_I:
++ case VKI_PTRACE_READ_D:
++ break;
++
++ case VKI_PTRACE_WRITE_I:
++ case VKI_PTRACE_WRITE_D:
++ break;
++
++ case VKI_PTRACE_IO:
++ io_desc = (struct vki_ptrace_io_desc *)ARG3;
++ switch (io_desc->piod_op) {
++ case VKI_PIOD_READ_D:
++ case VKI_PIOD_READ_I:
++ if (RES != -1)
++ POST_MEM_WRITE((UWord)io_desc->piod_addr, io_desc->piod_len);
++ break;
++ case VKI_PIOD_WRITE_D:
++ case VKI_PIOD_WRITE_I:
++ break;
++ }
++ break;
++
++ case VKI_PTRACE_CONTINUE:
++ break;
++
++ case VKI_PTRACE_STEP:
++ break;
++
++ case VKI_PTRACE_KILL:
++ break;
++
++ case VKI_PTRACE_ATTACH:
++ break;
++
++ case VKI_PTRACE_DETACH:
++ break;
++
++ case VKI_PTRACE_GETREGS:
++ if (RES != -1)
++ POST_MEM_WRITE(ARG3, sizeof(struct vki_reg_struct));
++ break;
++
++ case VKI_PTRACE_SETREGS:
++ break;
++
++ case VKI_PTRACE_GETFPREGS:
++ if (RES != -1)
++ POST_MEM_WRITE(ARG3, sizeof(struct vki_fpreg));
++ break;
++
++ case VKI_PTRACE_SETFPREGS:
++ break;
++
++ case VKI_PTRACE_GETDBREGS:
++ if (RES != -1)
++ POST_MEM_WRITE(ARG3, sizeof(struct vki_dbreg));
++ break;
++
++ case VKI_PTRACE_SETDBREGS:
++ break;
++
++ case VKI_PTRACE_LWPINFO:
++ if (RES != -1)
++ POST_MEM_WRITE(ARG3, sizeof(struct vki_ptrace_lwpinfo));
++ break;
++
++ case VKI_PTRACE_GETNUMLWPS:
++ break;
++
++ case VKI_PTRACE_GETLWPLIST:
++ if (RES != -1)
++ POST_MEM_WRITE(ARG3, sizeof(vki_lwpid_t) * RES);
++ break;
++
++ case VKI_PTRACE_SETSTEP:
++ break;
++
++ case VKI_PTRACE_CLEARSTEP:
++ break;
++
++ case VKI_PTRACE_SUSPEND:
++ break;
++
++ case VKI_PTRACE_RESUME:
++ break;
++
++ case VKI_PTRACE_TO_SCE:
++ break;
++
++ case VKI_PTRACE_TO_SCX:
++ break;
++
++ case VKI_PTRACE_SYSCALL:
++ break;
++
++ case VKI_PTRACE_VM_TIMESTAMP:
++ break;
++
++ case VKI_PTRACE_VM_ENTRY:
++ if (RES != -1)
++ POST_MEM_WRITE(ARG3, sizeof(struct vki_ptrace_vm_entry));
++ break;
++ }
++}
++
++PRE(sys_cpuset_setaffinity)
++{
++
++ PRINT("sys_cpuset_setaffinity ( %ld, %ld, %lld, %llu, %#lx )", ARG1, ARG2,
++ ARG3, ARG4, ARG5);
++ PRE_REG_READ5(int, "cpuset_setaffinity",
++ int, level, int, which, long, id,
++ size_t, setsize, void *, mask);
++ PRE_MEM_READ("cpuset_setaffinity", ARG5, ARG4);
++}
++
++PRE(sys_cpuset_getaffinity)
++{
++
++ PRINT("sys_cpuset_getaffinity ( %ld, %ld, %lld, %llu, %#lx )", ARG1, ARG2,
++ ARG3, ARG4, ARG5);
++ PRE_REG_READ5(int, "cpuset_getaffinity",
++ int, level, int, which, long, id,
++ size_t, setsize, void *, mask);
++ PRE_MEM_WRITE("cpuset_getaffinity", ARG5, ARG4);
++}
++
++POST(sys_cpuset_getaffinity)
++{
++ vg_assert(SUCCESS);
++ if (RES == 0)
++ POST_MEM_WRITE( ARG5, ARG4 );
++}
++
++struct pselect_sized_sigset {
++ const vki_sigset_t *ss;
++ vki_size_t ss_len;
++};
++struct pselect_adjusted_sigset {
++ struct pselect_sized_sigset ss; /* The actual syscall arg */
++ vki_sigset_t adjusted_ss;
++};
++
++PRE(sys_pselect)
++{
++ *flags |= SfMayBlock | SfPostOnFail;
++ PRINT("sys_pselect ( %ld, %#" FMT_REGWORD "x, %#" FMT_REGWORD "x, %#"
++ FMT_REGWORD "x, %#" FMT_REGWORD "x, %#" FMT_REGWORD "x )",
++ SARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
++ PRE_REG_READ6(long, "pselect",
++ int, n, vki_fd_set *, readfds, vki_fd_set *, writefds,
++ vki_fd_set *, exceptfds, struct vki_timeval *, timeout,
++ void *, sig);
++ // XXX: this possibly understates how much memory is read.
++ if (ARG2 != 0)
++ PRE_MEM_READ( "pselect(readfds)",
++ ARG2, ARG1/8 /* __FD_SETSIZE/8 */ );
++ if (ARG3 != 0)
++ PRE_MEM_READ( "pselect(writefds)",
++ ARG3, ARG1/8 /* __FD_SETSIZE/8 */ );
++ if (ARG4 != 0)
++ PRE_MEM_READ( "pselect(exceptfds)",
++ ARG4, ARG1/8 /* __FD_SETSIZE/8 */ );
++ if (ARG5 != 0)
++ PRE_MEM_READ( "pselect(timeout)", ARG5, sizeof(struct vki_timeval) );
++ if (ARG6 != 0) {
++ const struct pselect_sized_sigset *pss =
++ (struct pselect_sized_sigset *)(Addr)ARG6;
++ PRE_MEM_READ( "pselect(sig)", ARG6, sizeof(*pss) );
++ if (!ML_(safe_to_deref)(pss, sizeof(*pss))) {
++ ARG6 = 1; /* Something recognisable to POST() hook. */
++ } else {
++ struct pselect_adjusted_sigset *pas;
++ pas = VG_(malloc)("syswrap.pselect.1", sizeof(*pas));
++ ARG6 = (Addr)pas;
++ pas->ss.ss = (void *)1;
++ pas->ss.ss_len = pss->ss_len;
++ if (pss->ss_len == sizeof(*pss->ss)) {
++ if (pss->ss == NULL) {
++ pas->ss.ss = NULL;
++ } else {
++ PRE_MEM_READ("pselect(sig->ss)", (Addr)pss->ss, pss->ss_len);
++ if (ML_(safe_to_deref)(pss->ss, sizeof(*pss->ss))) {
++ pas->adjusted_ss = *pss->ss;
++ pas->ss.ss = &pas->adjusted_ss;
++ VG_(sanitize_client_sigmask)(&pas->adjusted_ss);
++ }
++ }
++ }
++ }
++ }
++}
++POST(sys_pselect)
++{
++ if (ARG6 != 0 && ARG6 != 1) {
++ VG_(free)((struct pselect_adjusted_sigset *)(Addr)ARG6);
++ }
++}
++#endif
++
++#undef PRE
++#undef POST
++
++const SyscallTableEntry ML_(syscall_table)[] = {
++ // syscall (handled specially) // 0
++ BSDX_(__NR_exit, sys_exit), // 1
++
++ BSDX_(__NR_fork, sys_fork), // 2
++
++ GENXY(__NR_read, sys_read), // 3
++
++ GENX_(__NR_write, sys_write), // 4
++
++#if 0
++ GENXY(__NR_open, sys_open), // 5
++ GENXY(__NR_close, sys_close), // 6
++ GENXY(__NR_wait4, sys_wait4), // 7
++
++ // 4.3 creat 8
++ GENX_(__NR_link, sys_link), // 9
++ GENX_(__NR_unlink, sys_unlink), // 10
++ // obsol execv 11
++
++ GENX_(__NR_chdir, sys_chdir), // 12
++ GENX_(__NR_fchdir, sys_fchdir), // 13
++ GENX_(__NR_mknod, sys_mknod), // 14
++ GENX_(__NR_chmod, sys_chmod), // 15
++
++ GENX_(__NR_chown, sys_chown), // 16
++ GENX_(__NR_break, sys_brk), // 17
++ BSDXY(__NR_getfsstat4, sys_getfsstat4), // 18
++ // 4.3 lseek 19
++
++ GENX_(__NR_getpid, sys_getpid), // 20
++ BSDX_(__NR_mount, sys_mount), // 21
++ BSDX_(__NR_unmount, sys_unmount), // 22
++ GENX_(__NR_setuid, sys_setuid), // 23
++
++ GENX_(__NR_getuid, sys_getuid), // 24
++ GENX_(__NR_geteuid, sys_geteuid), // 25
++ BSDXY(__NR_ptrace, sys_ptrace), // 26
++ BSDXY(__NR_recvmsg, sys_recvmsg), // 27
++
++ BSDX_(__NR_sendmsg, sys_sendmsg), // 28
++ BSDXY(__NR_recvfrom, sys_recvfrom), // 29
++ BSDXY(__NR_accept, sys_accept), // 30
++ BSDXY(__NR_getpeername, sys_getpeername), // 31
++
++ BSDXY(__NR_getsockname, sys_getsockname), // 32
++ GENX_(__NR_access, sys_access), // 33
++ BSDX_(__NR_chflags, sys_chflags), // 34
++ BSDX_(__NR_fchflags, sys_fchflags), // 35
++
++ GENX_(__NR_sync, sys_sync), // 36
++ GENX_(__NR_kill, sys_kill), // 37
++ // 4.3 stat 38
++ GENX_(__NR_getppid, sys_getppid), // 39
++
++ // 4.3 lstat 40
++ GENXY(__NR_dup, sys_dup), // 41
++ BSDXY(__NR_pipe, sys_pipe), // 42
++ GENX_(__NR_getegid, sys_getegid), // 43
++
++ // GENX_(__NR_profil, sys_profil), // 44
++// BSDX_(__NR_ktrace, sys_ktrace), // 45
++ // 4.3 sigaction 46
++ GENX_(__NR_getgid, sys_getgid), // 47
++
++ // 4.3 sigaction (int sigset) 48
++ BSDXY(__NR_getlogin, sys_getlogin), // 49
++ BSDX_(__NR_setlogin, sys_setlogin), // 50
++ GENX_(__NR_acct, sys_acct), // 51
++
++ // 4.3 sigpending 52
++ GENXY(__NR_sigaltstack, sys_sigaltstack), // 53
++ BSDXY(__NR_ioctl, sys_ioctl), // 54
++// BSDX_(__NR_reboot, sys_reboot), // 55
++
++ BSDX_(__NR_revoke, sys_revoke), // 56
++ GENX_(__NR_symlink, sys_symlink), // 57
++ GENX_(__NR_readlink, sys_readlink), // 58
++ GENX_(__NR_execve, sys_execve), // 59
++
++ GENX_(__NR_umask, sys_umask), // 60
++ GENX_(__NR_chroot, sys_chroot), // 61
++ // 4.3 fstat 62
++ // 4.3 getgerninfo 63
++
++ // 4.3 getpagesize 64
++ GENX_(__NR_msync, sys_msync), // 65
++ BSDX_(__NR_vfork, sys_fork), // 66
++ // obsol vread 67
++
++ // obsol vwrite 68
++ // BSDX_(__NR_sbrk, sys_sbrk), // 69
++ // BSDX_(__NR_sstk, sys_sstk), // 70
++ // 4.3 mmap 71
++
++ // 4.2 vadvise 72
++ GENXY(__NR_munmap, sys_munmap), // 73
++ GENXY(__NR_mprotect, sys_mprotect), // 74
++ GENX_(__NR_madvise, sys_madvise), // 75
++
++ // obsol vhangup 76
++ // obsol vlimit 77
++ GENXY(__NR_mincore, sys_mincore), // 78
++ GENXY(__NR_getgroups, sys_getgroups), // 79
++
++ GENX_(__NR_setgroups, sys_setgroups), // 80
++ GENX_(__NR_getpgrp, sys_getpgrp), // 81
++ GENX_(__NR_setpgid, sys_setpgid), // 82
++ GENXY(__NR_setitimer, sys_setitimer), // 83
++
++ // 4.3 wait 84
++// BSDX_(__NR_swapon, sys_swapon), // 85
++ GENXY(__NR_getitimer, sys_getitimer), // 86
++ // 4.3 gethostname 87
++
++ // 4.3 sethostname 88
++ BSDX_(__NR_getdtablesize, sys_getdtablesize), // 89
++ GENXY(__NR_dup2, sys_dup2), // 90
++ // unimpl getdopt 91
++
++ BSDXY(__NR_fcntl, sys_fcntl), // 92
++ GENX_(__NR_select, sys_select), // 93
++ // unimpl setdopt 94
++ GENX_(__NR_fsync, sys_fsync), // 95
++
++ GENX_(__NR_setpriority, sys_setpriority), // 96
++ BSDXY(__NR_socket, sys_socket), // 97
++ BSDX_(__NR_connect, sys_connect), // 98
++ // 4.3 accept 99
++
++ GENX_(__NR_getpriority, sys_getpriority), // 100
++ // 4.3 send 101
++ // 4.3 recv 102
++ // 4.3 sigreturn 103
++
++ BSDX_(__NR_bind, sys_bind), // 104
++ BSDX_(__NR_setsockopt, sys_setsockopt), // 105
++ BSDX_(__NR_listen, sys_listen), // 106
++ // obsol vtimes 107
++
++ // 4.3 sigvec 108
++ // 4.3 sigblock 109
++ // 4.3 sigsetmask 110
++ // 4.3 sigsuspend 111
++
++ // 4.3 sigstack 112
++ // 4.3 recvmsg 113
++ // 4.3 sendmsg 114
++ // 4.3 vtrace 115
++
++ GENXY(__NR_gettimeofday, sys_gettimeofday), // 116
++ GENXY(__NR_getrusage, sys_getrusage), // 117
++ BSDXY(__NR_getsockopt, sys_getsockopt), // 118
++ // unimpl resuba 119
++
++ GENXY(__NR_readv, sys_readv), // 120
++ GENX_(__NR_writev, sys_writev), // 121
++ GENX_(__NR_settimeofday, sys_settimeofday), // 122
++ GENX_(__NR_fchown, sys_fchown), // 123
++
++ GENX_(__NR_fchmod, sys_fchmod), // 124
++ // 4.3 recvfrom 125
++ GENX_(__NR_setreuid, sys_setreuid), // 126
++ GENX_(__NR_setregid, sys_setregid), // 127
++
++ GENX_(__NR_rename, sys_rename), // 128
++ // 4.3 truncate 129
++ // 4.3 ftruncate 130
++ GENX_(__NR_flock, sys_flock), // 131
++
++ BSDX_(__NR_mkfifo, sys_mkfifo), // 132
++ BSDX_(__NR_sendto, sys_sendto), // 133
++ BSDX_(__NR_shutdown, sys_shutdown), // 134
++ BSDXY(__NR_socketpair, sys_socketpair), // 135
++
++ GENX_(__NR_mkdir, sys_mkdir), // 136
++ GENX_(__NR_rmdir, sys_rmdir), // 137
++ GENX_(__NR_utimes, sys_utimes), // 138
++ // 4.2 sigreturn 139
++
++// BSDXY(__NR_adjtime, sys_adjtime), // 140
++ // 4.3 getpeername 141
++ // 4.3 gethostid 142
++ // 4.3 sethostid 143
++
++ // 4.3 getrlimit 144
++ // 4.3 setrlimit 145
++ // 4.3 killpg 146
++ GENX_(__NR_setsid, sys_setsid), // 147
++
++ BSDX_(__NR_quotactl, sys_quotactl), // 148
++ // 4.3 quota 149
++ // 4.3 getsockname 150
++ // bsd/os sem_lock 151
++
++ // bsd/os sem_wakeup 152
++ // bsd/os asyncdaemon 153
++ // nosys 154
++ // BSDXY(__NR_nfssvc, sys_nfssvc), // 155
++
++ // 4.3 getdirentries 156
++ GENXY(__NR_statfs, sys_statfs), // 157
++ GENXY(__NR_fstatfs, sys_fstatfs), // 158
++ // nosys 159
++
++// BSDXY(__NR_lgetfh, sys_lgetfh), // 160
++// BSDXY(__NR_getfh, sys_getfh), // 161
++ BSDXY(__NR_getdomainname, sys_getdomainname), // 162
++ BSDX_(__NR_setdomainname, sys_setdomainname), // 163
++
++ BSDXY(__NR_uname, sys_uname), // 164
++ BSDX_(__NR_sysarch, sys_sysarch), // 165
++// BSDXY(__NR_rtprio, sys_rtprio), // 166
++ // nosys 167
++
++ // nosys 168
++// BSDXY(__NR_semsys, sys_semsys), // 169
++// BSDXY(__NR_msgsys, sys_msgsys), // 170
++// BSDXY(__NR_shmsys, sys_shmsys), // 171
++
++ // nosys 172
++ BSDXY(__NR_pread6, sys_pread), // 173
++ BSDX_(__NR_pwrite6, sys_pwrite), // 174
++ // nosys 175
++
++// BSDXY(__NR_ntp_adjtime, sys_ntp_adjtime), // 176
++ // bsd/os sfork 177
++ // bsd/os getdescriptor 178
++ // bsd/os setdescriptor 179
++
++ // nosys 180
++ GENX_(__NR_setgid, sys_setgid), // 181
++ BSDX_(__NR_setegid, sys_setegid), // 182
++ BSDX_(__NR_seteuid, sys_seteuid), // 183
++
++ // unimpl lfs_bmapv 184
++ // unimpl lfs_markv 185
++ // unimpl lfs_segclean 186
++ // unimpl lfs_segwait 187
++
++ BSDXY(__NR_stat, sys_stat), // 188
++ BSDXY(__NR_fstat, sys_fstat), // 189
++ BSDXY(__NR_lstat, sys_lstat), // 190
++ BSDX_(__NR_pathconf, sys_pathconf), // 191
++
++ BSDX_(__NR_fpathconf, sys_fpathconf), // 192
++ // nosys 193
++ GENXY(__NR_getrlimit, sys_getrlimit), // 194
++ GENX_(__NR_setrlimit, sys_setrlimit), // 195
++
++ BSDXY(__NR_getdirentries, sys_getdirentries), // 196
++ BSDX_(__NR_mmap6, sys_mmap7), // 197
++ // __syscall (handled specially) // 198
++ BSDX_(__NR_lseek6, sys_lseek), // 199
++
++ BSDX_(__NR_truncate, sys_truncate), // 200
++ BSDX_(__NR_ftruncate, sys_ftruncate), // 201
++ BSDXY(__NR___sysctl, sys___sysctl), // 202
++ GENX_(__NR_mlock, sys_mlock), // 203
++
++ GENX_(__NR_munlock, sys_munlock), // 204
++ BSDX_(__NR_undelete, sys_undelete), // 205
++ BSDX_(__NR_futimes, sys_futimes), // 206
++ GENX_(__NR_getpgid, sys_getpgid), // 207
++
++ // netbsd newreboot 208
++ GENXY(__NR_poll, sys_poll), // 209
++ BSDX_(__NR_lkmnosys0, sys_lkmnosys0), // 210
++ BSDX_(__NR_lkmnosys1, sys_lkmnosys1), // 211
++
++ BSDX_(__NR_lkmnosys2, sys_lkmnosys2), // 212
++ BSDX_(__NR_lkmnosys3, sys_lkmnosys3), // 213
++ BSDX_(__NR_lkmnosys4, sys_lkmnosys4), // 214
++ BSDX_(__NR_lkmnosys5, sys_lkmnosys5), // 215
++
++ BSDX_(__NR_lkmnosys6, sys_lkmnosys6), // 216
++ BSDX_(__NR_lkmnosys7, sys_lkmnosys7), // 217
++ BSDX_(__NR_lkmnosys8, sys_lkmnosys8), // 218
++// BSDXY(__NR_nfs_fhopen, sys_nfs_fhopen), // 219
++
++ BSDXY(__NR___semctl7, sys___semctl7), // 220
++ BSDX_(__NR_semget, sys_semget), // 221
++ BSDX_(__NR_semop, sys_semop), // 222
++ // unimpl semconfig 223
++
++// BSDXY(__NR_msgctl, sys_msgctl), // 224
++// BSDX_(__NR_msgget, sys_msgget), // 225
++// BSDX_(__NR_msgsnd, sys_msgsnd), // 226
++// BSDXY(__NR_msgrcv, sys_msgrcv), // 227
++
++ BSDXY(__NR_shmat, sys_shmat), // 228
++ BSDXY(__NR_shmctl7, sys_shmctl7), // 229
++ BSDXY(__NR_shmdt, sys_shmdt), // 230
++ BSDX_(__NR_shmget, sys_shmget), // 231
++
++ BSDXY(__NR_clock_gettime, sys_clock_gettime), // 232
++ BSDX_(__NR_clock_settime, sys_clock_settime), // 233
++ BSDXY(__NR_clock_getres, sys_clock_getres), // 234
++ // unimpl timer_create 235
++
++ // unimpl timer_delete 236
++ // unimpl timer_settime 237
++ // unimpl timer_gettime 238
++ // unimpl timer_getoverrun 239
++
++ GENXY(__NR_nanosleep, sys_nanosleep), // 240
++ // nosys 241
++ // nosys 242
++ // nosys 243
++
++ // nosys 244
++ // nosys 245
++ // nosys 246
++ // nosys 247
++
++// BSDXY(__NR_ntp_gettime, sys_ntp_gettime), // 248
++ // nosys 249
++// BSDXY(__NR_minherit, sys_minherit), // 250
++ BSDX_(__NR_rfork, sys_rfork), // 251
++
++ GENXY(__NR_openbsd_poll, sys_poll), // 252
++ BSDX_(__NR_issetugid, sys_issetugid), // 253
++ GENX_(__NR_lchown, sys_lchown), // 254
++ // nosys 255
++
++ // nosys 256
++ // nosys 257
++ // nosys 258
++ // nosys 259
++
++ // nosys 260
++ // nosys 261
++ // nosys 262
++ // nosys 263
++
++ // nosys 264
++ // nosys 265
++ // nosys 266
++ // nosys 267
++
++ // nosys 268
++ // nosys 269
++ // nosys 270
++ // nosys 271
++
++ GENXY(__NR_getdents, sys_getdents), // 272
++ // nosys 273
++ BSDX_(__NR_lchmod, sys_lchmod), // 274
++ GENX_(__NR_netbsd_lchown, sys_lchown), // 275
++
++ BSDX_(__NR_lutimes, sys_lutimes), // 276
++ // netbsd msync 277
++ // netbsd stat 278
++ // netbsd fstat 279
++
++ // netbsd lstat 280
++ // nosys 281
++ // nosys 282
++ // nosys 283
++
++ // nosys 284
++ // nosys 285
++ // nosys 286
++ // nosys 287
++
++ // nosys 288
++ // nosys 289
++ // nosys 290
++ // nosys 291
++
++ // nosys 292
++ // nosys 293
++ // nosys 294
++ // nosys 295
++
++ // nosys 296
++ BSDXY(__NR_fhstatfs, sys_fhstatfs), // 297
++ BSDXY(__NR_fhopen, sys_fhopen), // 298
++ BSDXY(__NR_fhstat, sys_fhstat), // 299
++
++// BSDX_(__NR_modnext, sys_modnext), // 300
++ BSDXY(__NR_modstat, sys_modstat), // 301
++// BSDX_(__NR_modfnext, sys_modfnext), // 302
++ BSDX_(__NR_modfind, sys_modfind), // 303
++
++ BSDX_(__NR_kldload, sys_kldload), // 304
++ BSDX_(__NR_kldunload, sys_kldunload), // 305
++ BSDX_(__NR_kldfind, sys_kldfind), // 306
++ BSDX_(__NR_kldnext, sys_kldnext), // 307
++
++// BSDXY(__NR_kldstat, sys_kldstat), // 308
++// BSDX_(__NR_kldfirstmod, sys_kldfirstmod), // 309
++ GENX_(__NR_getsid, sys_getsid), // 310
++ BSDX_(__NR_setresuid, sys_setresuid), // 311
++
++ BSDX_(__NR_setresgid, sys_setresgid), // 312
++ // obsol signanosleep 313
++ // BSDXY(__NR_aio_return, sys_aio_return), // 314
++ // BSDXY(__NR_aio_suspend, sys_aio_suspend), // 315
++
++ // BSDXY(__NR_aio_cancel, sys_aio_cancel), // 316
++ // BSDXY(__NR_aio_error, sys_aio_error), // 317
++ // BSDXY(__NR_aio_read, sys_aio_read), // 318
++ // BSDXY(__NR_aio_write, sys_aio_write), // 319
++
++ // BSDXY(__NR_lio_listio, sys_lio_listio), // 320
++ BSDX_(__NR_yield, sys_yield), // 321
++ // nosys 322
++ // nosys 323
++
++ GENX_(__NR_mlockall, sys_mlockall), // 324
++ BSDX_(__NR_munlockall, sys_munlockall), // 325
++ BSDXY(__NR___getcwd, sys___getcwd), // 326
++// BSDXY(__NR_sched_setparam, sys_sched_setparam), // 327
++
++// BSDXY(__NR_sched_getparam, sys_sched_getparam), // 328
++// BSDX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 329
++// BSDX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 330
++ BSDX_(__NR_sched_yield, sys_sched_yield), // 331
++
++ BSDX_(__NR_sched_get_priority_max, sys_sched_get_priority_max), // 332
++ BSDX_(__NR_sched_get_priority_min, sys_sched_get_priority_min), // 333
++// BSDXY(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 334
++ BSDX_(__NR_utrace, sys_utrace), // 335
++
++ // compat3 sendfile 336
++ BSDXY(__NR_kldsym, sys_kldsym), // 337
++// BSDX_(__NR_jail, sys_jail), // 338
++ // unimpl pioctl 339
++
++ BSDXY(__NR_sigprocmask, sys_sigprocmask), // 340
++ BSDX_(__NR_sigsuspend, sys_sigsuspend), // 341
++ BSDXY(__NR_sigaction4, sys_sigaction4), // 342
++ BSDXY(__NR_sigpending, sys_sigpending), // 343
++
++// BSDX_(__NR_sigreturn4, sys_sigreturn4), // 344
++ BSDXY(__NR_sigtimedwait, sys_sigtimedwait), // 345
++ BSDXY(__NR_sigwaitinfo, sys_sigwaitinfo), // 346
++ BSDXY(__NR___acl_get_file, sys___acl_get_file), // 347
++
++ BSDX_(__NR___acl_set_file, sys___acl_set_file), // 348
++ BSDXY(__NR___acl_get_fd, sys___acl_get_fd), // 349
++ BSDX_(__NR___acl_set_fd, sys___acl_set_fd), // 350
++ BSDX_(__NR___acl_delete_file, sys___acl_delete_file), // 351
++
++ BSDX_(__NR___acl_delete_fd, sys___acl_delete_fd), // 352
++ BSDX_(__NR___acl_aclcheck_file, sys___acl_aclcheck_file), // 353
++ BSDX_(__NR___acl_aclcheck_fd, sys___acl_aclcheck_fd), // 354
++ // BSDXY(__NR_extattrctl, sys_extattrctl), // 355
++
++ // BSDXY(__NR_extattr_set_file, sys_extattr_set_file), // 356
++ // BSDXY(__NR_extattr_get_file, sys_extattr_get_file), // 357
++ // BSDXY(__NR_extattr_delete_file, sys_extattr_delete_file), // 358
++ // BSDXY(__NR_aio_waitcomplete, sys_aio_waitcomplete), // 359
++
++ BSDXY(__NR_getresuid, sys_getresuid), // 360
++ BSDXY(__NR_getresgid, sys_getresgid), // 361
++ BSDX_(__NR_kqueue, sys_kqueue), // 362
++ BSDXY(__NR_kevent, sys_kevent), // 363
++
++ // nosys 364
++ // nosys 365
++ // nosys 366
++ // nosys 367
++
++ // nosys 368
++ // nosys 369
++ // lkmressys 370
++ // extattr_set_fd 371
++
++ // extattr_get_fd 372
++ // extattr_delete_fd 373
++ // __setugid 374
++ // nfsclnt 375
++
++ BSDX_(__NR_eaccess, sys_eaccess), // 376
++ // afs_syscall 377
++ // nmount 378
++ // kse_exit 379
++
++ // kse_wakeup 380
++ // kse_create 381
++ // kse_thr_interrupt 382
++ // kse_release 383
++
++ // __mac_get_proc 384
++ // __mac_set_proc 385
++ // __mac_get_fd 386
++ // __mac_get_file 387
++
++ // __mac_set_fd 388
++ // __mac_set_file 389
++ BSDXY(__NR_kenv, sys_kenv), // 390
++ // lchflags 391
++
++ BSDXY(__NR_uuidgen, sys_uuidgen), // 392
++ BSDXY(__NR_sendfile, sys_sendfile), // 393
++ // mac_syscall 394
++ BSDXY(__NR_getfsstat, sys_getfsstat), // 395
++
++ BSDXY(__NR_statfs6, sys_statfs6), // 396
++ BSDXY(__NR_fstatfs6, sys_fstatfs6), // 397
++ BSDXY(__NR_fhstatfs6, sys_fhstatfs6), // 398
++ // nosys 399
++
++ // ksem_close 400
++ // ksem_post 401
++ // ksem_wait 402
++ // ksem_trywait 403
++
++ // ksem_init 404
++ // ksem_open 405
++ // ksem_unlink 406
++ // ksem_getvalue 407
++
++ // ksem_destroy 408
++ // __mac_get_pid 409
++ // __mac_get_link 410
++ // __mac_set_link 411
++
++ // extattr_set_link 412
++ // extattr_get_link 413
++ // extattr_delete_link 414
++ // __mac_execve 415
++
++ BSDXY(__NR_sigaction, sys_sigaction), // 416
++ BSDX_(__NR_sigreturn, sys_sigreturn), // 417
++ // __xstat 418
++ // __xfstat 419
++
++ // __xlstat 420
++ BSDXY(__NR_getcontext, sys_getcontext), // 421
++ BSDX_(__NR_setcontext, sys_setcontext), // 422
++ BSDXY(__NR_swapcontext, sys_swapcontext), // 423
++
++ // swapoff 424
++ BSDXY(__NR___acl_get_link, sys___acl_get_link), // 425
++ BSDX_(__NR___acl_set_link, sys___acl_set_link), // 426
++ BSDX_(__NR___acl_delete_link, sys___acl_delete_link), // 427
++
++ BSDX_(__NR___acl_aclcheck_link, sys___acl_aclcheck_link), // 428
++ //!sigwait 429
++ // thr_create 430
++ BSDX_(__NR_thr_exit, sys_thr_exit), // 431
++
++ BSDXY(__NR_thr_self, sys_thr_self), // 432
++ BSDXY(__NR_thr_kill, sys_thr_kill), // 433
++ BSDXY(__NR__umtx_lock, sys__umtx_lock), // 434
++ BSDXY(__NR__umtx_unlock, sys__umtx_unlock), // 435
++
++ // jail_attach 436
++ // extattr_list_fd 437
++ // extattr_list_file 438
++ // extattr_list_link 439
++
++ // kse_switchin 440
++ // ksem_timedwait 441
++ // thr_suspend 442
++ BSDX_(__NR_thr_wake, sys_thr_wake), // 443
++ // kldunloadf 444
++ // audit 445
++ // auditon 446
++ // getauid 447
++
++ // setauid 448
++ // getaudit 449
++ // setaudit 450
++ // getaudit_addr 451
++
++ // setaudit_addr 452
++ // auditctl 453
++ BSDXY(__NR__umtx_op, sys__umtx_op), // 454
++ BSDX_(__NR_thr_new, sys_thr_new), // 455
++
++ // sigqueue 456
++ BSDXY(__NR_kmq_open, sys_mq_open), // 457
++ // kmq_setattr 458
++ // kmq_timedreceive 459
++
++ // kmq_timedsend 460
++ // kmq_notify 461
++ BSDX_(__NR_kmq_unlink, sys_mq_unlink), // 462
++ // abort2 463
++
++ BSDX_(__NR_thr_set_name, sys_thr_set_name), // 464
++ // aio_fsync 465
++ BSDXY(__NR_rtprio_thread, sys_rtprio_thread), // 466
++ // nosys 467
++
++ // nosys 468
++ // __getpath_fromfd 469
++ // __getpath_fromaddr 470
++ // sctp_peeloff 471
++
++ // sctp_generic_sendmsg 472
++ // sctp_generic_sendmsg_iov 473
++ // sctp_generic_recvmsg 474
++ BSDXY(__NR_pread, sys_pread7), // 475
++
++ BSDX_(__NR_pwrite, sys_pwrite7), // 476
++ BSDX_(__NR_mmap, sys_mmap7), // 477
++ BSDX_(__NR_lseek, sys_lseek7), // 478
++ BSDX_(__NR_truncate7, sys_truncate7), // 479
++
++ BSDX_(__NR_ftruncate7, sys_ftruncate7), // 480
++ BSDXY(__NR_thr_kill2, sys_thr_kill2), // 481
++ BSDXY(__NR_shm_open, sys_shm_open), // 482
++ BSDX_(__NR_shm_unlink, sys_shm_unlink), // 483
++
++ // cpuset 484
++ // cpuset_setid 485
++ // cpuset_getid 486
++
++ BSDXY(__NR_cpuset_getaffinity, sys_cpuset_getaffinity), // 487
++ BSDX_(__NR_cpuset_setaffinity, sys_cpuset_setaffinity), // 488
++ BSDX_(__NR_faccessat, sys_faccessat), // 489
++ BSDX_(__NR_fchmodat, sys_fchmodat), // 490
++ BSDX_(__NR_fchownat, sys_fchownat), // 491
++
++ // fexecve 492
++ BSDXY(__NR_fstatat, sys_fstatat), // 493
++ BSDX_(__NR_futimesat, sys_futimesat), // 494
++ BSDX_(__NR_linkat, sys_linkat), // 495
++
++ BSDX_(__NR_mkdirat, sys_mkdirat), // 496
++ BSDX_(__NR_mkfifoat, sys_mkfifoat), // 497
++ BSDX_(__NR_mknodat, sys_mknodat), // 498
++ BSDXY(__NR_openat, sys_openat), // 499
++
++ BSDX_(__NR_readlinkat, sys_readlinkat), // 500
++ BSDX_(__NR_renameat, sys_renameat), // 501
++ BSDX_(__NR_symlinkat, sys_symlinkat), // 502
++ BSDX_(__NR_unlinkat, sys_unlinkat), // 503
++
++ // posix_openpt 504
++
++ BSDXY(__NR___semctl, sys___semctl), // 510
++ BSDXY(__NR_shmctl, sys_shmctl), // 512
++ BSDXY(__NR_pselect, sys_pselect), // 522
++ BSDXY(__NR_pipe2, sys_pipe2), // 542
++
++ BSDX_(__NR_fake_sigreturn, sys_fake_sigreturn), // 1000, fake sigreturn
++#endif
++};
++
++const UInt ML_(syscall_table_size) =
++ sizeof(ML_(syscall_table)) / sizeof(ML_(syscall_table)[0]);
++
++/*--------------------------------------------------------------------*/
++/*--- end ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__trampoline.S b/valgrind-netbsd/patches/patch-coregrind_m__trampoline.S
new file mode 100644
index 0000000000..34e7c27480
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__trampoline.S
@@ -0,0 +1,58 @@
+$NetBSD$
+
+--- coregrind/m_trampoline.S.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_trampoline.S
+@@ -852,7 +852,45 @@ VG_(trampoline_stuff_end):
+ # undef UD2_256
+ # undef UD2_1024
+ # undef UD2_PAGE
++
++/*---------------- amd64-netbsd ----------------*/
++
++#else
++#if defined(VGP_amd64_netbsd)
++
++# define UD2_16 ud2 ; ud2 ; ud2 ; ud2 ;ud2 ; ud2 ; ud2 ; ud2
++# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
++# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
++# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
++# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
++
++ /* a leading page of unexecutable code */
++ UD2_PAGE
++
++.global VG_(trampoline_stuff_start)
++VG_(trampoline_stuff_start):
++
++.global VG_(amd64_freebsd_SUBST_FOR_sigreturn)
++VG_(amd64_freebsd_SUBST_FOR_sigreturn):
++ /* This is a very specific sequence which GDB uses to
++ recognize signal handler frames. */
++ movq $__NR_fake_sigreturn, %rax
++ movq %rsp, %rdi
++ addq $40,%rdi
++ syscall
++ ud2
++.global VG_(trampoline_stuff_end)
++VG_(trampoline_stuff_end):
++
++ /* and a trailing page of unexecutable code */
++ UD2_PAGE
+
++# undef UD2_16
++# undef UD2_64
++# undef UD2_256
++# undef UD2_1024
++# undef UD2_PAGE
++
+ /*---------------- x86-darwin ----------------*/
+ #else
+ #if defined(VGP_x86_darwin)
+@@ -1581,6 +1619,7 @@ VG_(trampoline_stuff_end):
+ #endif
+ #endif
+ #endif
++#endif
+
+ /* Let the linker know we don't need an executable stack */
+ MARK_STACK_NO_EXEC
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__translate.c b/valgrind-netbsd/patches/patch-coregrind_m__translate.c
new file mode 100644
index 0000000000..d49103d399
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__translate.c
@@ -0,0 +1,15 @@
+$NetBSD$
+
+--- coregrind/m_translate.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_translate.c
+@@ -1679,6 +1679,10 @@ Bool VG_(translate) ( ThreadId tid,
+ vex_abiinfo.guest_amd64_assume_gs_is_const = True;
+ # endif
+
++# if defined(VGP_amd64_netbsd)
++ vex_abiinfo.guest_amd64_assume_fs_is_const = True;
++# endif
++
+ # if defined(VGP_amd64_darwin)
+ vex_abiinfo.guest_amd64_assume_gs_is_const = True;
+ # endif
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__ume_elf.c b/valgrind-netbsd/patches/patch-coregrind_m__ume_elf.c
new file mode 100644
index 0000000000..5a715df9ab
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__ume_elf.c
@@ -0,0 +1,34 @@
+$NetBSD$
+
+--- coregrind/m_ume/elf.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_ume/elf.c
+@@ -28,7 +28,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ #include "pub_core_basics.h"
+ #include "pub_core_vki.h"
+@@ -600,7 +600,11 @@ Int VG_(load_ELF)(Int fd, const HChar* n
+ info->phnum = e->e.e_phnum;
+ info->entry = e->e.e_entry + ebase;
+ info->phdr = 0;
++#if defined(VGO_netbsd)
++ info->stack_prot = VKI_PROT_READ|VKI_PROT_WRITE;
++#else
+ info->stack_prot = VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC;
++#endif
+
+ for (i = 0; i < e->e.e_phnum; i++) {
+ ESZ(Phdr) *ph = &e->p[i];
+@@ -870,7 +874,7 @@ Int VG_(load_ELF)(Int fd, const HChar* n
+ return 0;
+ }
+
+-#endif // defined(VGO_linux) || defined(VGO_solaris)
++#endif // defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__ume_main.c b/valgrind-netbsd/patches/patch-coregrind_m__ume_main.c
new file mode 100644
index 0000000000..738639344a
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__ume_main.c
@@ -0,0 +1,55 @@
+$NetBSD$
+
+--- coregrind/m_ume/main.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_ume/main.c
+@@ -51,7 +51,7 @@ typedef struct {
+ } ExeHandler;
+
+ static ExeHandler exe_handlers[] = {
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ { VG_(match_ELF), VG_(load_ELF) },
+ # elif defined(VGO_darwin)
+ { VG_(match_macho), VG_(load_macho) },
+@@ -97,8 +97,9 @@ VG_(pre_exec_check)(const HChar* exe_nam
+ }
+
+ fsz = (SizeT)VG_(fsize)(fd);
+- if (fsz < bufsz)
++ if (fsz < bufsz) {
+ bufsz = fsz;
++ }
+
+ res = VG_(pread)(fd, buf, bufsz, 0);
+ if (sr_isError(res) || sr_Res(res) != bufsz) {
+@@ -141,8 +142,9 @@ Int VG_(do_exec_inner)(const HChar* exe,
+ Int ret;
+
+ res = VG_(pre_exec_check)(exe, &fd, False/*allow_setuid*/);
+- if (sr_isError(res))
++ if (sr_isError(res)) {
+ return sr_Err(res);
++ }
+
+ vg_assert2(sr_Res(res) >= 0 && sr_Res(res) < EXE_HANDLER_COUNT,
+ "invalid VG_(pre_exec_check) result");
+@@ -249,7 +251,6 @@ static Int do_exec_shell_followup(Int re
+ if (sr_isError(res) && sr_Err(res) == VKI_ENOENT) {
+ VG_(fmsg)("%s: %s\n", exe_name, VG_(strerror)(ret));
+ exit_code = 127; // 127 == NOTFOUND (bash)
+-
+ // Was it a directory?
+ } else if (!sr_isError(res) && VKI_S_ISDIR(st.mode)) {
+ VG_(fmsg)("%s: is a directory\n", exe_name);
+@@ -258,11 +259,9 @@ static Int do_exec_shell_followup(Int re
+ } else if (0 != VG_(check_executable)(NULL, exe_name,
+ False/*allow_setuid*/)) {
+ VG_(fmsg)("%s: %s\n", exe_name, VG_(strerror)(ret));
+-
+ // Did it start with "#!"? If so, it must have been a bad interpreter.
+ } else if (is_hash_bang_file(exe_name)) {
+ VG_(fmsg)("%s: bad interpreter: %s\n", exe_name, VG_(strerror)(ret));
+-
+ // Otherwise it was something else.
+ } else {
+ VG_(fmsg)("%s: %s\n", exe_name, VG_(strerror)(ret));
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__ume_priv__ume.h b/valgrind-netbsd/patches/patch-coregrind_m__ume_priv__ume.h
new file mode 100644
index 0000000000..a874f353bb
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__ume_priv__ume.h
@@ -0,0 +1,33 @@
+$NetBSD$
+
+--- coregrind/m_ume/priv_ume.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_ume/priv_ume.h
+@@ -27,7 +27,7 @@
+ The GNU General Public License is contained in the file COPYING.
+ */
+
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ #ifndef __PRIV_UME_H
+ #define __PRIV_UME_H
+@@ -36,7 +36,7 @@
+
+ extern Int VG_(do_exec_inner)(const HChar *exe, ExeInfo *info);
+
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ extern Bool VG_(match_ELF) ( const void *hdr, SizeT len );
+ extern Int VG_(load_ELF) ( Int fd, const HChar *name, ExeInfo *info );
+ #elif defined(VGO_darwin)
+@@ -52,9 +52,8 @@ extern Int VG_(load_script) ( Int fd,
+
+ #endif // __PRIV_UME_H
+
+-#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
+ /*--------------------------------------------------------------------*/
+-
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__vki.c b/valgrind-netbsd/patches/patch-coregrind_m__vki.c
new file mode 100644
index 0000000000..d8d28283cd
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__vki.c
@@ -0,0 +1,36 @@
+$NetBSD$
+
+--- coregrind/m_vki.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/m_vki.c
+@@ -68,16 +68,21 @@ void VG_(vki_do_initial_consistency_chec
+ /* --- Platform-independent checks on signal sets --- */
+
+ vki_sigset_t set;
++
++#if !defined(VGO_netbsd)
++ // NetBSD uses 128 bits for sigset, but defines only 64
++
+ // Set's size must agree with _VKI_NSIG
+ vg_assert( 8 * sizeof(set) == _VKI_NSIG );
+ // Set's word size must agree with _VKI_NSIG_BPW
+ vg_assert( 8 * sizeof(set.sig[0]) == _VKI_NSIG_BPW );
+ // The set elements are 32- or 64-bit
+ vg_assert( _VKI_NSIG_BPW == 32 || _VKI_NSIG_BPW == 64 );
++#endif
+
+ /* --- Platform-specific checks on signal sets --- */
+
+-# if defined(VGO_linux) || defined(VGO_solaris)
++# if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ /* nothing to check */
+ # elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
+ vg_assert(_VKI_NSIG == NSIG);
+@@ -91,7 +96,7 @@ void VG_(vki_do_initial_consistency_chec
+
+ /* --- Platform-specific checks on sigactions --- */
+
+-# if defined(VGO_linux)
++# if defined(VGO_linux) || defined(VGO_netbsd)
+ /* the toK- and fromK- forms are identical */
+ vg_assert( sizeof(vki_sigaction_toK_t)
+ == sizeof(vki_sigaction_fromK_t) );
diff --git a/valgrind-netbsd/patches/patch-coregrind_m__vkiscnums.c b/valgrind-netbsd/patches/patch-coregrind_m__vkiscnums.c
new file mode 100644
index 0000000000..47fe704dd8
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_m__vkiscnums.c
@@ -0,0 +1,23 @@
+$NetBSD$
+
+--- coregrind/m_vkiscnums.c.orig 2018-07-13 08:52:05.000000000 +0000
++++ coregrind/m_vkiscnums.c
+@@ -69,6 +69,18 @@ STATIC_ASSERT(__NR_pipe2 == 5287);
+ #endif
+
+ //---------------------------------------------------------------------------
++#elif defined(VGO_netbsd)
++//---------------------------------------------------------------------------
++
++const HChar* VG_(sysnum_string)(Word sysnum)
++{
++ static HChar buf[20+1]; // large enough
++
++ VG_(snprintf)(buf, sizeof(buf), "%3ld", sysnum);
++ return buf;
++}
++
++//---------------------------------------------------------------------------
+ #elif defined(VGO_darwin)
+ //---------------------------------------------------------------------------
+
diff --git a/valgrind-netbsd/patches/patch-coregrind_pub__core__debuginfo.h b/valgrind-netbsd/patches/patch-coregrind_pub__core__debuginfo.h
new file mode 100644
index 0000000000..fcb95233f3
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_pub__core__debuginfo.h
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- coregrind/pub_core_debuginfo.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/pub_core_debuginfo.h
+@@ -62,7 +62,7 @@ extern void VG_(di_initialise) ( void );
+ released by simply re-opening and closing the same file (even via
+ different fd!).
+ */
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+ extern ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV, Int use_fd );
+
+ extern void VG_(di_notify_munmap)( Addr a, SizeT len );
diff --git a/valgrind-netbsd/patches/patch-coregrind_pub__core__initimg.h b/valgrind-netbsd/patches/patch-coregrind_pub__core__initimg.h
new file mode 100644
index 0000000000..1684abd1b4
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_pub__core__initimg.h
@@ -0,0 +1,24 @@
+$NetBSD$
+
+--- coregrind/pub_core_initimg.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/pub_core_initimg.h
+@@ -68,7 +68,7 @@ void VG_(ii_finalise_image)( IIFinaliseI
+
+ /* ------------------------- Linux ------------------------- */
+
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+
+ struct _IICreateImageInfo {
+ /* ------ Mandatory fields ------ */
+@@ -88,8 +88,10 @@ struct _IIFinaliseImageInfo {
+ Addr initial_client_IP;
+ Addr initial_client_TOC;
+ UInt* client_auxv;
++#if !defined(VGO_netbsd)
+ /* ------ Arch-specific ELF loading state ------ */
+ struct vki_arch_elf_state arch_elf_state;
++#endif
+ };
+
+ /* ------------------------- Darwin ------------------------- */
diff --git a/valgrind-netbsd/patches/patch-coregrind_pub__core__machine.h b/valgrind-netbsd/patches/patch-coregrind_pub__core__machine.h
new file mode 100644
index 0000000000..b71fe23948
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_pub__core__machine.h
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- coregrind/pub_core_machine.h.orig 2018-07-13 08:52:05.000000000 +0000
++++ coregrind/pub_core_machine.h
+@@ -46,7 +46,7 @@
+ # define VG_ELF_MACHINE EM_386
+ # define VG_ELF_CLASS ELFCLASS32
+ # undef VG_PLAT_USES_PPCTOC
+-#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris)
++#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+ # define VG_ELF_DATA2XXX ELFDATA2LSB
+ # define VG_ELF_MACHINE EM_X86_64
+ # define VG_ELF_CLASS ELFCLASS64
diff --git a/valgrind-netbsd/patches/patch-coregrind_pub__core__mallocfree.h b/valgrind-netbsd/patches/patch-coregrind_pub__core__mallocfree.h
new file mode 100644
index 0000000000..4b57159c4c
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_pub__core__mallocfree.h
@@ -0,0 +1,12 @@
+$NetBSD$
+
+--- coregrind/pub_core_mallocfree.h.orig 2018-07-13 08:52:05.000000000 +0000
++++ coregrind/pub_core_mallocfree.h
+@@ -82,6 +82,7 @@ typedef Int ArenaId;
+ defined(VGP_x86_darwin) || \
+ defined(VGP_amd64_darwin) || \
+ defined(VGP_arm64_linux) || \
++ defined(VGP_amd64_netbsd) || \
+ defined(VGP_amd64_solaris)
+ # define VG_MIN_MALLOC_SZB 16
+ #else
diff --git a/valgrind-netbsd/patches/patch-coregrind_pub__core__sigframe.h b/valgrind-netbsd/patches/patch-coregrind_pub__core__sigframe.h
new file mode 100644
index 0000000000..9e8c77b815
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_pub__core__sigframe.h
@@ -0,0 +1,18 @@
+$NetBSD$
+
+--- coregrind/pub_core_sigframe.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/pub_core_sigframe.h
+@@ -60,8 +60,13 @@ void VG_(sigframe_create) ( ThreadId tid
+
+ /* Remove a signal frame from thread 'tid's stack, and
+ restore the CPU state from it. */
++#ifdef VGO_netbsd
++extern
++void VG_(sigframe_destroy)( ThreadId tid );
++#else
+ extern
+ void VG_(sigframe_destroy)( ThreadId tid, Bool isRT );
++#endif
+
+ #if defined(VGO_solaris)
+ extern
diff --git a/valgrind-netbsd/patches/patch-coregrind_pub__core__syscall.h b/valgrind-netbsd/patches/patch-coregrind_pub__core__syscall.h
new file mode 100644
index 0000000000..daf03837fc
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_pub__core__syscall.h
@@ -0,0 +1,12 @@
+$NetBSD$
+
+--- coregrind/pub_core_syscall.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/pub_core_syscall.h
+@@ -75,6 +75,7 @@ extern SysRes VG_(mk_SysRes_x86_linux)
+ extern SysRes VG_(mk_SysRes_amd64_linux) ( Long val );
+ extern SysRes VG_(mk_SysRes_ppc32_linux) ( UInt val, UInt cr0so );
+ extern SysRes VG_(mk_SysRes_ppc64_linux) ( ULong val, ULong cr0so );
++extern SysRes VG_(mk_SysRes_amd64_netbsd)( ULong val, ULong val2, Bool err );
+ extern SysRes VG_(mk_SysRes_arm_linux) ( Int val );
+ extern SysRes VG_(mk_SysRes_arm64_linux) ( Long val );
+ extern SysRes VG_(mk_SysRes_x86_darwin) ( UChar scclass, Bool isErr,
diff --git a/valgrind-netbsd/patches/patch-coregrind_pub__core__trampoline.h b/valgrind-netbsd/patches/patch-coregrind_pub__core__trampoline.h
new file mode 100644
index 0000000000..edb07fe32d
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_pub__core__trampoline.h
@@ -0,0 +1,15 @@
+$NetBSD$
+
+--- coregrind/pub_core_trampoline.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/pub_core_trampoline.h
+@@ -58,6 +58,10 @@
+ extern Addr VG_(trampoline_stuff_start);
+ extern Addr VG_(trampoline_stuff_end);
+
++#if defined(VGP_amd64_netbsd)
++extern void VG_(amd64_netbsd_SUBST_FOR_sigreturn);
++#endif
++
+ #if defined(VGP_x86_linux)
+ extern Addr VG_(x86_linux_SUBST_FOR_sigreturn);
+ extern Addr VG_(x86_linux_SUBST_FOR_rt_sigreturn);
diff --git a/valgrind-netbsd/patches/patch-coregrind_vg__preloaded.c b/valgrind-netbsd/patches/patch-coregrind_vg__preloaded.c
new file mode 100644
index 0000000000..29374b7ed7
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-coregrind_vg__preloaded.c
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- coregrind/vg_preloaded.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ coregrind/vg_preloaded.c
+@@ -210,6 +210,8 @@ void VG_REPLACE_FUNCTION_ZU(libSystemZdZ
+ // but don't care if it's initialized
+ }
+
++#elif defined(VGO_netbsd)
++#warning not implemented
+ #elif defined(VGO_solaris)
+
+ /* Declare the errno and environ symbols weakly in case the client is not
diff --git a/valgrind-netbsd/patches/patch-drd_Makefile.am b/valgrind-netbsd/patches/patch-drd_Makefile.am
new file mode 100644
index 0000000000..e90ce68199
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-drd_Makefile.am
@@ -0,0 +1,24 @@
+$NetBSD$
+
+--- drd/Makefile.am.orig 2018-05-05 07:42:22.000000000 +0000
++++ drd/Makefile.am
+@@ -137,12 +137,12 @@ vgpreload_drd_@VGCONF_ARCH_PRI@_@VGCONF_
+ vgpreload_drd_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_CPPFLAGS = \
+ $(AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+ vgpreload_drd_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_CFLAGS = \
+- $(AM_CFLAGS_PSO_@VGCONF_PLATFORM_PRI_CAPS@) $(DRD_CFLAGS)
++ $(AM_CFLAGS_PSO_@VGCONF_PLATFORM_PRI_CAPS@) $(DRD_CFLAGS) -fPIC
+ vgpreload_drd_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_DEPENDENCIES = \
+ $(LIBREPLACEMALLOC_@VGCONF_PLATFORM_PRI_CAPS@)
+ vgpreload_drd_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_LDFLAGS = \
+ $(PRELOAD_LDFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) \
+- $(LIBREPLACEMALLOC_LDFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
++ $(LIBREPLACEMALLOC_LDFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) -fPIC
+
+ if VGCONF_HAVE_PLATFORM_SEC
+ vgpreload_drd_@VGCONF_ARCH_SEC@_@VGCONF_OS@_so_SOURCES = \
+@@ -157,4 +157,3 @@ vgpreload_drd_@VGCONF_ARCH_SEC@_@VGCONF_
+ $(PRELOAD_LDFLAGS_@VGCONF_PLATFORM_SEC_CAPS@) \
+ $(LIBREPLACEMALLOC_LDFLAGS_@VGCONF_PLATFORM_SEC_CAPS@)
+ endif
+-
diff --git a/valgrind-netbsd/patches/patch-drd_drd__main.c b/valgrind-netbsd/patches/patch-drd_drd__main.c
new file mode 100644
index 0000000000..72097257b2
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-drd_drd__main.c
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- drd/drd_main.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ drd/drd_main.c
+@@ -740,7 +740,7 @@ void drd__atfork_child(ThreadId tid)
+
+ static void DRD_(post_clo_init)(void)
+ {
+-#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) || defined(VGO_netbsd)
+ /* fine */
+ #else
+ VG_(printf)("\nWARNING: DRD has not yet been tested on this operating system.\n\n");
diff --git a/valgrind-netbsd/patches/patch-helgrind_Makefile.am b/valgrind-netbsd/patches/patch-helgrind_Makefile.am
new file mode 100644
index 0000000000..c3f251bbbe
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-helgrind_Makefile.am
@@ -0,0 +1,24 @@
+$NetBSD$
+
+--- helgrind/Makefile.am.orig 2018-05-05 07:42:22.000000000 +0000
++++ helgrind/Makefile.am
+@@ -97,12 +97,12 @@ vgpreload_helgrind_@VGCONF_ARCH_PRI@_@VG
+ vgpreload_helgrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_CPPFLAGS = \
+ $(AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+ vgpreload_helgrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_CFLAGS = \
+- $(AM_CFLAGS_PSO_@VGCONF_PLATFORM_PRI_CAPS@)
++ $(AM_CFLAGS_PSO_@VGCONF_PLATFORM_PRI_CAPS@) -fPIC
+ vgpreload_helgrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_DEPENDENCIES = \
+ $(LIBREPLACEMALLOC_@VGCONF_PLATFORM_PRI_CAPS@)
+ vgpreload_helgrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_LDFLAGS = \
+ $(PRELOAD_LDFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) \
+- $(LIBREPLACEMALLOC_LDFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
++ $(LIBREPLACEMALLOC_LDFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) -fPIC
+
+ if VGCONF_HAVE_PLATFORM_SEC
+ vgpreload_helgrind_@VGCONF_ARCH_SEC@_@VGCONF_OS@_so_SOURCES = \
+@@ -117,4 +117,3 @@ vgpreload_helgrind_@VGCONF_ARCH_SEC@_@VG
+ $(PRELOAD_LDFLAGS_@VGCONF_PLATFORM_SEC_CAPS@) \
+ $(LIBREPLACEMALLOC_LDFLAGS_@VGCONF_PLATFORM_SEC_CAPS@)
+ endif
+-
diff --git a/valgrind-netbsd/patches/patch-helgrind_hg__intercepts.c b/valgrind-netbsd/patches/patch-helgrind_hg__intercepts.c
new file mode 100644
index 0000000000..dd1149ff02
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-helgrind_hg__intercepts.c
@@ -0,0 +1,329 @@
+$NetBSD$
+
+--- helgrind/hg_intercepts.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ helgrind/hg_intercepts.c
+@@ -471,6 +471,12 @@ static int pthread_create_WRK(pthread_t
+ // trap anything else
+ assert(0);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZucreate, // pthread_create
++ pthread_t *thread, const pthread_attr_t *attr,
++ void *(*start) (void *), void *arg) {
++ return pthread_create_WRK(thread, attr, start, arg);
++ }
+ #elif defined(VGO_solaris)
+ PTH_FUNC(int, pthreadZucreate, // pthread_create
+ pthread_t *thread, const pthread_attr_t *attr,
+@@ -576,6 +582,11 @@ static int pthread_join_WRK(pthread_t th
+ pthread_t thread, void** value_pointer) {
+ return pthread_join_WRK(thread, value_pointer);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZujoin, // pthread_join
++ pthread_t thread, void** value_pointer) {
++ return pthread_join_WRK(thread, value_pointer);
++ }
+ #elif defined(VGO_solaris)
+ PTH_FUNC(int, pthreadZujoin, // pthread_join
+ pthread_t thread, void** value_pointer) {
+@@ -867,7 +878,7 @@ static int mutex_destroy_WRK(pthread_mut
+ return ret;
+ }
+
+-#if defined(VGO_linux) || defined(VGO_darwin)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ PTH_FUNC(int, pthreadZumutexZudestroy, // pthread_mutex_destroy
+ pthread_mutex_t *mutex) {
+ return mutex_destroy_WRK(mutex);
+@@ -919,7 +930,7 @@ static int mutex_lock_WRK(pthread_mutex_
+ return ret;
+ }
+
+-#if defined(VGO_linux) || defined(VGO_darwin)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ PTH_FUNC(int, pthreadZumutexZulock, // pthread_mutex_lock
+ pthread_mutex_t *mutex) {
+ return mutex_lock_WRK(mutex);
+@@ -1005,7 +1016,7 @@ static int mutex_trylock_WRK(pthread_mut
+ return ret;
+ }
+
+-#if defined(VGO_linux) || defined(VGO_darwin)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ PTH_FUNC(int, pthreadZumutexZutrylock, // pthread_mutex_trylock
+ pthread_mutex_t *mutex) {
+ return mutex_trylock_WRK(mutex);
+@@ -1109,7 +1120,7 @@ static int mutex_unlock_WRK(pthread_mute
+ return ret;
+ }
+
+-#if defined(VGO_linux) || defined(VGO_darwin)
++#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_netbsd)
+ PTH_FUNC(int, pthreadZumutexZuunlock, // pthread_mutex_unlock
+ pthread_mutex_t *mutex) {
+ return mutex_unlock_WRK(mutex);
+@@ -1234,6 +1245,11 @@ static int pthread_cond_wait_WRK(pthread
+ pthread_cond_t* cond, pthread_mutex_t* mutex) {
+ return pthread_cond_wait_WRK(cond, mutex);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZucondZuwait, // pthread_cond_wait
++ pthread_cond_t* cond, pthread_mutex_t* mutex) {
++ return pthread_cond_wait_WRK(cond, mutex);
++ }
+ #elif defined(VGO_solaris)
+ PTH_FUNC(int, condZuwait, // cond_wait
+ pthread_cond_t *cond, pthread_mutex_t *mutex) {
+@@ -1329,6 +1345,12 @@ static int pthread_cond_timedwait_WRK(pt
+ struct timespec* abstime) {
+ return pthread_cond_timedwait_WRK(cond, mutex, abstime, ETIMEDOUT);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZucondZutimedwait, // pthread_cond_timedwait
++ pthread_cond_t* cond, pthread_mutex_t* mutex,
++ struct timespec* abstime) {
++ return pthread_cond_timedwait_WRK(cond, mutex, abstime, ETIMEDOUT);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZucondZutimedwait, // pthread_cond_timedwait
+ pthread_cond_t* cond, pthread_mutex_t* mutex,
+@@ -1404,6 +1426,11 @@ static int pthread_cond_signal_WRK(pthre
+ pthread_cond_t* cond) {
+ return pthread_cond_signal_WRK(cond);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZucondZusignal, // pthread_cond_signal
++ pthread_cond_t* cond) {
++ return pthread_cond_signal_WRK(cond);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZucondZusignal, // pthread_cond_signal
+ pthread_cond_t* cond) {
+@@ -1465,6 +1492,11 @@ static int pthread_cond_broadcast_WRK(pt
+ pthread_cond_t* cond) {
+ return pthread_cond_broadcast_WRK(cond);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZucondZubroadcast, // pthread_cond_broadcast
++ pthread_cond_t* cond) {
++ return pthread_cond_broadcast_WRK(cond);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZucondZubroadcast, // pthread_cond_broadcast
+ pthread_cond_t* cond) {
+@@ -1521,6 +1553,11 @@ static int pthread_cond_init_WRK(pthread
+ pthread_cond_t* cond, pthread_condattr_t* cond_attr) {
+ return pthread_cond_init_WRK(cond, cond_attr);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZucondZuinitZAZa, // pthread_cond_init@*
++ pthread_cond_t* cond, pthread_condattr_t* cond_attr) {
++ return pthread_cond_init_WRK(cond, cond_attr);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZucondZuinit, // pthread_cond_init
+ pthread_cond_t* cond, pthread_condattr_t * cond_attr) {
+@@ -1611,6 +1648,11 @@ static int pthread_cond_destroy_WRK(pthr
+ pthread_cond_t* cond) {
+ return pthread_cond_destroy_WRK(cond);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZucondZudestroy, // pthread_cond_destroy
++ pthread_cond_t* cond) {
++ return pthread_cond_destroy_WRK(cond);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZucondZudestroy, // pthread_cond_destroy
+ pthread_cond_t* cond) {
+@@ -1817,6 +1859,16 @@ static int pthread_spin_init_or_unlock_W
+ /* this is never actually called */
+ return pthread_spin_init_or_unlock_WRK(lock, 0/*pshared*/);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZuspinZuinit, // pthread_spin_init
++ pthread_spinlock_t* lock, int pshared) {
++ return pthread_spin_init_or_unlock_WRK(lock, pshared);
++ }
++ PTH_FUNC(int, pthreadZuspinZuunlockZAZa, // pthread_spin_unlock@*
++ pthread_spinlock_t* lock) {
++ /* this is never actually called */
++ return pthread_spin_init_or_unlock_WRK(lock, 0/*pshared*/);
++ }
+ #elif defined(VGO_darwin)
+ #elif defined(VGO_solaris)
+ PTH_FUNC(int, pthreadZuspinZuinit, // pthread_spin_init
+@@ -1861,7 +1913,7 @@ static int pthread_spin_destroy_WRK(pthr
+ }
+ return ret;
+ }
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+ PTH_FUNC(int, pthreadZuspinZudestroy, // pthread_spin_destroy
+ pthread_spinlock_t *lock) {
+ return pthread_spin_destroy_WRK(lock);
+@@ -1914,7 +1966,7 @@ static int pthread_spin_lock_WRK(pthread
+ }
+ return ret;
+ }
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+ PTH_FUNC(int, pthreadZuspinZulock, // pthread_spin_lock
+ pthread_spinlock_t *lock) {
+ return pthread_spin_lock_WRK(lock);
+@@ -1968,7 +2020,7 @@ static int pthread_spin_trylock_WRK(pthr
+ }
+ return ret;
+ }
+-#if defined(VGO_linux)
++#if defined(VGO_linux) || defined(VGO_netbsd)
+ PTH_FUNC(int, pthreadZuspinZutrylock, // pthread_spin_trylock
+ pthread_spinlock_t *lock) {
+ return pthread_spin_trylock_WRK(lock);
+@@ -2041,6 +2093,12 @@ static int pthread_rwlock_init_WRK(pthre
+ pthread_rwlockattr_t* attr) {
+ return pthread_rwlock_init_WRK(rwl, attr);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZurwlockZuinit, // pthread_rwlock_init
++ pthread_rwlock_t *rwl,
++ pthread_rwlockattr_t* attr) {
++ return pthread_rwlock_init_WRK(rwl, attr);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZurwlockZuinitZa, // pthread_rwlock_init*
+ pthread_rwlock_t *rwl,
+@@ -2120,6 +2178,11 @@ static int pthread_rwlock_destroy_WRK(pt
+ pthread_rwlock_t *rwl) {
+ return pthread_rwlock_destroy_WRK(rwl);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZurwlockZudestroy, // pthread_rwlock_destroy
++ pthread_rwlock_t *rwl) {
++ return pthread_rwlock_destroy_WRK(rwl);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZurwlockZudestroyZa, // pthread_rwlock_destroy*
+ pthread_rwlock_t *rwl) {
+@@ -2174,6 +2237,11 @@ static int pthread_rwlock_wrlock_WRK(pth
+ pthread_rwlock_t* rwlock) {
+ return pthread_rwlock_wrlock_WRK(rwlock);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZurwlockZuwrlock, // pthread_rwlock_wrlock
++ pthread_rwlock_t* rwlock) {
++ return pthread_rwlock_wrlock_WRK(rwlock);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZurwlockZuwrlockZa, // pthread_rwlock_wrlock*
+ pthread_rwlock_t* rwlock) {
+@@ -2254,6 +2322,11 @@ static int pthread_rwlock_rdlock_WRK(pth
+ pthread_rwlock_t* rwlock) {
+ return pthread_rwlock_rdlock_WRK(rwlock);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZurwlockZurdlock, // pthread_rwlock_rdlock
++ pthread_rwlock_t* rwlock) {
++ return pthread_rwlock_rdlock_WRK(rwlock);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZurwlockZurdlockZa, // pthread_rwlock_rdlock*
+ pthread_rwlock_t* rwlock) {
+@@ -2340,6 +2413,11 @@ static int pthread_rwlock_trywrlock_WRK(
+ pthread_rwlock_t* rwlock) {
+ return pthread_rwlock_trywrlock_WRK(rwlock);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZurwlockZutrywrlock, // pthread_rwlock_trywrlock
++ pthread_rwlock_t* rwlock) {
++ return pthread_rwlock_trywrlock_WRK(rwlock);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZurwlockZutrywrlockZa, // pthread_rwlock_trywrlock*
+ pthread_rwlock_t* rwlock) {
+@@ -2401,6 +2479,11 @@ static int pthread_rwlock_tryrdlock_WRK(
+ pthread_rwlock_t* rwlock) {
+ return pthread_rwlock_tryrdlock_WRK(rwlock);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZurwlockZutryrdlock, // pthread_rwlock_tryrdlock
++ pthread_rwlock_t* rwlock) {
++ return pthread_rwlock_tryrdlock_WRK(rwlock);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZurwlockZutryrdlockZa, // pthread_rwlock_tryrdlock*
+ pthread_rwlock_t* rwlock) {
+@@ -2452,6 +2535,7 @@ static int pthread_rwlock_timedrdlock_WR
+ return ret;
+ }
+ #if defined(VGO_linux)
++#elif defined(VGO_netbsd)
+ #elif defined(VGO_darwin)
+ #elif defined(VGO_solaris)
+ PTH_FUNC(int, pthreadZurwlockZutimedrdlock, // pthread_rwlock_timedrdlock
+@@ -2506,6 +2590,7 @@ static int pthread_rwlock_timedwrlock_WR
+ }
+ #if defined(VGO_linux)
+ #elif defined(VGO_darwin)
++#elif defined(VGO_netbsd)
+ #elif defined(VGO_solaris)
+ PTH_FUNC(int, pthreadZurwlockZutimedwrlock, // pthread_rwlock_timedwrlock
+ pthread_rwlock_t *rwlock,
+@@ -2558,6 +2643,11 @@ static int pthread_rwlock_unlock_WRK(pth
+ pthread_rwlock_t* rwlock) {
+ return pthread_rwlock_unlock_WRK(rwlock);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, pthreadZurwlockZuunlock, // pthread_rwlock_unlock
++ pthread_rwlock_t* rwlock) {
++ return pthread_rwlock_unlock_WRK(rwlock);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, pthreadZurwlockZuunlockZa, // pthread_rwlock_unlock*
+ pthread_rwlock_t* rwlock) {
+@@ -2641,6 +2731,11 @@ static int sem_init_WRK(sem_t* sem, int
+ sem_t* sem, int pshared, unsigned long value) {
+ return sem_init_WRK(sem, pshared, value);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, semZuinit, // sem_init
++ sem_t* sem, int pshared, unsigned long value) {
++ return sem_init_WRK(sem, pshared, value);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, semZuinit, // sem_init
+ sem_t* sem, int pshared, unsigned long value) {
+@@ -2723,6 +2818,11 @@ static int sem_destroy_WRK(sem_t* sem)
+ sem_t* sem) {
+ return sem_destroy_WRK(sem);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, semZudestroy, // sem_destroy
++ sem_t* sem) {
++ return sem_destroy_WRK(sem);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, semZudestroy, // sem_destroy
+ sem_t* sem) {
+@@ -2785,6 +2885,10 @@ static int sem_wait_WRK(sem_t* sem)
+ PTH_FUNC(int, semZuwaitZAZa, sem_t* sem) { /* sem_wait@* */
+ return sem_wait_WRK(sem);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, semZuwait, sem_t* sem) { /* sem_wait */
++ return sem_wait_WRK(sem);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, semZuwait, sem_t* sem) { /* sem_wait */
+ return sem_wait_WRK(sem);
+@@ -2846,6 +2950,10 @@ static int sem_post_WRK(sem_t* sem)
+ PTH_FUNC(int, semZupostZAZa, sem_t* sem) { /* sem_post@* */
+ return sem_post_WRK(sem);
+ }
++#elif defined(VGO_netbsd)
++ PTH_FUNC(int, semZupost, sem_t* sem) { /* sem_post */
++ return sem_post_WRK(sem);
++ }
+ #elif defined(VGO_darwin)
+ PTH_FUNC(int, semZupost, sem_t* sem) { /* sem_post */
+ return sem_post_WRK(sem);
diff --git a/valgrind-netbsd/patches/patch-include_Makefile.am b/valgrind-netbsd/patches/patch-include_Makefile.am
new file mode 100644
index 0000000000..d23faea972
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_Makefile.am
@@ -0,0 +1,22 @@
+$NetBSD$
+
+--- include/Makefile.am.orig 2018-05-05 07:42:22.000000000 +0000
++++ include/Makefile.am
+@@ -47,6 +47,9 @@ nobase_pkginclude_HEADERS = \
+ pub_tool_xtmemory.h \
+ valgrind.h \
+ vki/vki-linux.h \
++ vki/vki-netbsd.h \
++ vki/vki-machine-types-amd64-netbsd.h \
++ vki/vki-amd64-netbsd.h \
+ vki/vki-darwin.h \
+ vki/vki-solaris.h \
+ vki/vki-solaris-repcache.h \
+@@ -79,6 +82,7 @@ nobase_pkginclude_HEADERS = \
+ vki/vki-scnums-mips64-linux.h \
+ vki/vki-scnums-darwin.h \
+ vki/vki-scnums-solaris.h \
++ vki/vki-scnums-netbsd.h \
+ vki/vki-xen.h \
+ vki/vki-xen-domctl.h \
+ vki/vki-xen-evtchn.h \
diff --git a/valgrind-netbsd/patches/patch-include_pub__tool__basics.h b/valgrind-netbsd/patches/patch-include_pub__tool__basics.h
new file mode 100644
index 0000000000..5fcd7e9b2b
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_pub__tool__basics.h
@@ -0,0 +1,38 @@
+$NetBSD$
+
+--- include/pub_tool_basics.h.orig 2018-07-13 08:52:05.000000000 +0000
++++ include/pub_tool_basics.h
+@@ -104,7 +104,7 @@ typedef Word PtrdiffT;
+ // always a signed 64-bit int. So we defined our own Off64T as well.
+ #if defined(VGO_linux) || defined(VGO_solaris)
+ typedef Word OffT; // 32 64
+-#elif defined(VGO_darwin)
++#elif defined(VGO_darwin) || defined(VGO_netbsd)
+ typedef Long OffT; // 64 64
+ #else
+ # error Unknown OS
+@@ -228,7 +228,7 @@ typedef
+ }
+ SysRes;
+
+-#elif defined(VGO_solaris)
++#elif defined(VGO_solaris) || defined(VGO_netbsd)
+ typedef
+ struct {
+ UWord _val;
+@@ -364,7 +364,14 @@ static inline Bool sr_EQ ( UInt sysno, S
+ && sr1._wLO == sr2._wLO && sr1._wHI == sr2._wHI;
+ }
+
+-#elif defined(VGO_solaris)
++#elif defined(VGO_solaris) || defined(VGO_netbsd)
++
++/*
++ NetBSD/amd64
++ X86_TF_RAX(frame) = rval[0];
++ X86_TF_RDX(frame) = rval[1];
++// X86_TF_RFLAGS(frame) &= ~PSL_C; // carry bit <- enabled ZC signs error
++*/
+
+ static inline Bool sr_isError ( SysRes sr ) {
+ return sr._isError;
diff --git a/valgrind-netbsd/patches/patch-include_pub__tool__basics__asm.h b/valgrind-netbsd/patches/patch-include_pub__tool__basics__asm.h
new file mode 100644
index 0000000000..186fba51a3
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_pub__tool__basics__asm.h
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- include/pub_tool_basics_asm.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ include/pub_tool_basics_asm.h
+@@ -48,7 +48,7 @@
+
+ #define VGAPPEND(str1,str2) str1##str2
+
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ # define VG_(str) VGAPPEND( vgPlain_, str)
+ # define ML_(str) VGAPPEND( vgModuleLocal_, str)
+ #elif defined(VGO_darwin)
diff --git a/valgrind-netbsd/patches/patch-include_pub__tool__machine.h b/valgrind-netbsd/patches/patch-include_pub__tool__machine.h
new file mode 100644
index 0000000000..dccbd7da08
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_pub__tool__machine.h
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- include/pub_tool_machine.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ include/pub_tool_machine.h
+@@ -41,7 +41,7 @@
+ // be larger than VG_MAX_INSTR_SZB
+ # define VG_STACK_REDZONE_SZB 0 // number of addressable bytes below %RSP
+
+-#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris)
++#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris) || defined(VGP_amd64_netbsd)
+ # define VG_MIN_INSTR_SZB 1
+ # define VG_MAX_INSTR_SZB 16
+ # define VG_CLREQ_SZB 19
diff --git a/valgrind-netbsd/patches/patch-include_pub__tool__redir.h b/valgrind-netbsd/patches/patch-include_pub__tool__redir.h
new file mode 100644
index 0000000000..8cfac39605
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_pub__tool__redir.h
@@ -0,0 +1,41 @@
+$NetBSD$
+
+--- include/pub_tool_redir.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ include/pub_tool_redir.h
+@@ -243,7 +243,7 @@
+
+ /* --- Soname of the standard C library. --- */
+
+-#if defined(VGO_linux) || defined(VGO_solaris)
++#if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_netbsd)
+ # if defined(MUSL_LIBC)
+ # define VG_Z_LIBC_SONAME libcZdZa // libc.*
+ #else
+@@ -284,6 +284,8 @@
+ #else
+ # define VG_Z_LIBPTHREAD_SONAME libpthreadZdsoZd0 // libpthread.so.0
+ #endif
++#elif defined(VGO_netbsd)
++# define VG_Z_LIBPTHREAD_SONAME libpthreadZdsoZa // libpthread.so*
+ #elif defined(VGO_darwin)
+ # define VG_Z_LIBPTHREAD_SONAME libSystemZdZaZddylib // libSystem.*.dylib
+ #elif defined(VGO_solaris)
+@@ -318,6 +320,18 @@
+
+ #endif
+
++/* --- Sonames for NetBSD ELF linkers. --- */
++
++#if defined(VGO_netbsd)
++
++#define VG_Z_LD_ELF_SO_1 ldZdelf_so // ld.elf_so
++#define VG_U_LD_ELF_SO_1 "ld.elf_so"
++
++#define VG_Z_LD_ELF32_SO_1 ldZdelf_soZhi386 // ld.elf_so-i386
++#define VG_U_LD_ELF32_SO_1 "ld.elf_so-i386"
++
++#endif
++
+ /* --- Executable name for Darwin Mach-O linker. --- */
+
+ #if defined(VGO_darwin)
diff --git a/valgrind-netbsd/patches/patch-include_pub__tool__vki.h b/valgrind-netbsd/patches/patch-include_pub__tool__vki.h
new file mode 100644
index 0000000000..447df472e1
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_pub__tool__vki.h
@@ -0,0 +1,13 @@
+$NetBSD$
+
+--- include/pub_tool_vki.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ include/pub_tool_vki.h
+@@ -52,6 +52,8 @@
+ # include "vki/vki-darwin.h"
+ #elif defined(VGO_solaris)
+ # include "vki/vki-solaris.h"
++#elif defined(VGO_netbsd)
++# include "vki/vki-netbsd.h"
+ #else
+ # error Unknown Plat/OS
+ #endif
diff --git a/valgrind-netbsd/patches/patch-include_pub__tool__vkiscnums.h b/valgrind-netbsd/patches/patch-include_pub__tool__vkiscnums.h
new file mode 100644
index 0000000000..38e380b859
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_pub__tool__vkiscnums.h
@@ -0,0 +1,16 @@
+$NetBSD$
+
+--- include/pub_tool_vkiscnums.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ include/pub_tool_vkiscnums.h
+@@ -46,6 +46,11 @@ extern const HChar *VG_(sysnum_string) (
+ // Macro provided for backward compatibility purposes.
+ #define VG_SYSNUM_STRING(sysnum) VG_(sysnum_string)(sysnum)
+
++#if defined(VGO_netbsd)
++ // See the NetBSD-specific case in pub_tool_vkiscnums_asm.h for an
++ // explanation of why we include this here rather than there.
++# include "vki/vki-scnums-netbsd.h"
++#endif
+
+ #endif // __PUB_TOOL_VKISCNUMS_H
+
diff --git a/valgrind-netbsd/patches/patch-include_pub__tool__vkiscnums__asm.h b/valgrind-netbsd/patches/patch-include_pub__tool__vkiscnums__asm.h
new file mode 100644
index 0000000000..bc93bea9bf
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_pub__tool__vkiscnums__asm.h
@@ -0,0 +1,14 @@
+$NetBSD$
+
+--- include/pub_tool_vkiscnums_asm.h.orig 2018-05-05 07:42:22.000000000 +0000
++++ include/pub_tool_vkiscnums_asm.h
+@@ -66,6 +66,9 @@
+ #elif defined(VGP_x86_solaris) || (VGP_amd64_solaris)
+ # include "vki/vki-scnums-solaris.h"
+
++#elif defined(VGP_amd64_netbsd)
++# include "vki/vki-scnums-netbsd.h"
++
+ #else
+ # error Unknown platform
+ #endif
diff --git a/valgrind-netbsd/patches/patch-include_valgrind.h b/valgrind-netbsd/patches/patch-include_valgrind.h
new file mode 100644
index 0000000000..c1faa78697
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_valgrind.h
@@ -0,0 +1,49 @@
+$NetBSD$
+
+--- include/valgrind.h.orig 2018-09-18 06:34:53.000000000 +0000
++++ include/valgrind.h
+@@ -110,6 +110,7 @@
+ */
+ #undef PLAT_x86_darwin
+ #undef PLAT_amd64_darwin
++#undef PLAT_amd64_netbsd
+ #undef PLAT_x86_win32
+ #undef PLAT_amd64_win64
+ #undef PLAT_x86_linux
+@@ -130,6 +131,8 @@
+ # define PLAT_x86_darwin 1
+ #elif defined(__APPLE__) && defined(__x86_64__)
+ # define PLAT_amd64_darwin 1
++#elif defined(__NetBSD__) && defined(__amd64__)
++# define PLAT_amd64_netbsd 1
+ #elif (defined(__MINGW32__) && !defined(__MINGW64__)) \
+ || defined(__CYGWIN32__) \
+ || (defined(_WIN32) && defined(_M_IX86))
+@@ -394,7 +397,8 @@ valgrind_do_client_request_expr(uintptr_
+
+ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
+ || defined(PLAT_amd64_solaris) \
+- || (defined(PLAT_amd64_win64) && defined(__GNUC__))
++ || (defined(PLAT_amd64_win64) && defined(__GNUC__)) \
++ || defined(PLAT_amd64_netbsd)
+
+ typedef
+ struct {
+@@ -454,7 +458,7 @@ typedef
+ ); \
+ } while (0)
+
+-#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris */
++#endif /* PLAT_amd64_linux || PLAT_amd64_darwin || PLAT_amd64_solaris || PLAT_amd64_netbsd */
+
+ /* ------------------------- amd64-Win64 ------------------------- */
+
+@@ -1577,7 +1581,7 @@ typedef
+ /* ---------------- amd64-{linux,darwin,solaris} --------------- */
+
+ #if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
+- || defined(PLAT_amd64_solaris)
++ || defined(PLAT_amd64_solaris) || defined(PLAT_amd64_netbsd)
+
+ /* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
diff --git a/valgrind-netbsd/patches/patch-include_vki_vki-amd64-netbsd.h b/valgrind-netbsd/patches/patch-include_vki_vki-amd64-netbsd.h
new file mode 100644
index 0000000000..664ae8a431
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_vki_vki-amd64-netbsd.h
@@ -0,0 +1,42 @@
+$NetBSD$
+
+--- include/vki/vki-amd64-netbsd.h.orig 2019-03-27 08:10:06.403900938 +0000
++++ include/vki/vki-amd64-netbsd.h
+@@ -0,0 +1,37 @@
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2005 Julian Seward
++ jseward%acm.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#ifndef __VKI_AMD64_NETBSD_H
++#define __VKI_AMD64_NETBSD_H
++
++/* PAGE_SHIFT determines the page size. */
++#define VKI_PAGE_SHIFT 12
++#define VKI_PAGE_SIZE (1UL << VKI_PAGE_SHIFT)
++#define VKI_MAX_PAGE_SHIFT VKI_PAGE_SHIFT
++#define VKI_MAX_PAGE_SIZE VKI_PAGE_SIZE
++
++#define VKI_MINSIGSTKSZ 8192
++
++#endif /* __VKI_AMD64_NETBSD_H */
diff --git a/valgrind-netbsd/patches/patch-include_vki_vki-machine-types-amd64-netbsd.h b/valgrind-netbsd/patches/patch-include_vki_vki-machine-types-amd64-netbsd.h
new file mode 100644
index 0000000000..fd8adb6eb3
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_vki_vki-machine-types-amd64-netbsd.h
@@ -0,0 +1,227 @@
+$NetBSD$
+
+--- include/vki/vki-machine-types-amd64-netbsd.h.orig 2019-03-30 06:21:05.020121263 +0000
++++ include/vki/vki-machine-types-amd64-netbsd.h
+@@ -0,0 +1,222 @@
++
++/*--------------------------------------------------------------------*/
++/*--- amd64/NetBSD-specific kernel interface: posix types. ---*/
++/*--- vki_posixtypes-amd64-netbsd.h ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2005 Julian Seward
++ jseward%acm.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#ifndef __VKI_MACHINE_TYPES_AMD64_NETBSD_H
++#define __VKI_MACHINE_TYPES_AMD64_NETBSD_H
++
++//----------------------------------------------------------------------
++// From sys/arch/amd64/include/int_types.h
++//----------------------------------------------------------------------
++
++typedef signed char vki_int8_t;
++typedef unsigned char vki_uint8_t;
++typedef short vki_int16_t;
++typedef unsigned short vki_uint16_t;
++typedef int vki_int32_t;
++typedef unsigned int vki_uint32_t;
++typedef long vki_int64_t;
++typedef unsigned long vki_uint64_t;
++typedef unsigned long vki_uintptr_t;
++typedef long vki_intptr_t;
++
++//----------------------------------------------------------------------
++// From sys/arch/amd64/include/types.h
++//----------------------------------------------------------------------
++
++typedef unsigned long vki_paddr_t;
++typedef unsigned long vki_psize_t;
++typedef unsigned long vki_vaddr_t;
++typedef unsigned long vki_vsize_t;
++typedef long int vki_register_t;
++typedef int vki_register32_t;
++
++typedef long int vki___register_t;
++typedef unsigned char vki___cpu_simple_lock_nv_t;
++
++//----------------------------------------------------------------------
++// From sys/arch/amd64/include/ansi.h
++//----------------------------------------------------------------------
++
++typedef unsigned int vki_bsd_clock_t;
++typedef long vki_bsd_ptrdiff_t;
++typedef unsigned long vki_bsd_size_t;
++typedef long vki_bsd_ssize_t;
++typedef vki_int64_t vki_bsd_time_t;
++typedef int vki_bsd_clockid_t;
++typedef int vki_bsd_timer_t;
++typedef int vki_bsd_suseconds_t;
++typedef unsigned int vki_bsd_useconds_t;
++typedef int vki_bsd_wchar_t;
++typedef int vki_bsd_wint_t;
++
++//----------------------------------------------------------------------
++// From sys/arch/amd64/include/limits.h
++//----------------------------------------------------------------------
++
++#define VKI_CHAR_BIT 8
++
++#define VKI_UCHAR_MAX 0xff
++#define VKI_SCHAR_MAX 0x7f
++#define VKI_SCHAR_MIN (-0x7f-1)
++
++#define VKI_USHRT_MAX 0xffff
++#define VKI_SHRT_MAX 0x7fff
++#define VKI_SHRT_MIN (-0x7fff-1)
++
++#define VKI_UINT_MAX 0xffffffffU
++#define VKI_INT_MAX 0x7fffffff
++#define VKI_INT_MIN (-0x7fffffff-1)
++
++#define VKI_ULONG_MAX 0xffffffffffffffffUL
++#define VKI_LONG_MAX 0x7fffffffffffffffL
++#define VKI_LONG_MIN (-0x7fffffffffffffffL-1)
++
++#define VKI_SSIZE_MAX VKI_LONG_MAX
++
++#define VKI_ULLONG_MAX 0xffffffffffffffffULL
++#define VKI_LLONG_MAX 0x7fffffffffffffffLL
++#define VKI_LLONG_MIN (-0x7fffffffffffffffLL-1)
++
++#define VKI_SSIZE_MIN VKI_LONG_MIN
++#define VKI_SIZE_T_MAX VKI_ULONG_MAX
++
++#define VKI_UQUAD_MAX 0xffffffffffffffffULL
++#define VKI_QUAD_MAX 0x7fffffffffffffffLL
++#define VKI_QUAD_MIN (-0x7fffffffffffffffLL-1)
++
++#define VKI_LONG_BIT 64
++#define VKI_WORD_BIT 32
++
++#define VKI_DBL_DIG __DBL_DIG__
++#define VKI_DBL_MAX __DBL_MAX__
++#define VKI_DBL_MIN __DBL_MIN__
++
++#define VKI_FLT_DIG __FLT_DIG__
++#define VKI_FLT_MAX __FLT_MAX__
++#define VKI_FLT_MIN __FLT_MIN__
++
++//----------------------------------------------------------------------
++// From sys/arch/amd64/include/frame_regs.h
++//----------------------------------------------------------------------
++
++#define VKI__FRAME_REG(greg, freg) \
++ greg(rdi, RDI, 0) /* tf_rdi */ \
++ greg(rsi, RSI, 1) /* tf_rsi */ \
++ greg(rdx, RDX, 2) /* tf_rdx */ \
++ greg(r10, R10, 6) /* tf_r10 */ \
++ greg(r8, R8, 4) /* tf_r8 */ \
++ greg(r9, R9, 5) /* tf_r9 */ \
++ freg(arg6, @, @) /* tf_arg6: syscall arg from stack */ \
++ freg(arg7, @, @) /* tf_arg7: syscall arg from stack */ \
++ freg(arg8, @, @) /* tf_arg8: syscall arg from stack */ \
++ freg(arg9, @, @) /* tf_arg9: syscall arg from stack */ \
++ greg(rcx, RCX, 3) /* tf_rcx */ \
++ greg(r11, R11, 7) /* tf_r11 */ \
++ greg(r12, R12, 8) /* tf_r12 */ \
++ greg(r13, R13, 9) /* tf_r13 */ \
++ greg(r14, R14, 10) /* tf_r14 */ \
++ greg(r15, R15, 11) /* tf_r15 */ \
++ greg(rbp, RBP, 12) /* tf_rbp */ \
++ greg(rbx, RBX, 13) /* tf_rbx */ \
++ greg(rax, RAX, 14) /* tf_rax */ \
++ greg(gs, GS, 15) /* tf_gs */ \
++ greg(fs, FS, 16) /* tf_fs */ \
++ greg(es, ES, 17) /* tf_es */ \
++ greg(ds, DS, 18) /* tf_ds */ \
++ greg(trapno, TRAPNO, /* tf_trapno */ \
++ 19) \
++ /* Below portion defined in hardware */ \
++ greg(err, ERR, 20) /* tf_err: Dummy inserted if not defined */ \
++ greg(rip, RIP, 21) /* tf_rip */ \
++ greg(cs, CS, 22) /* tf_cs */ \
++ greg(rflags, RFLAGS, /* tf_rflags */ \
++ 23) \
++ /* These are pushed unconditionally on the x86-64 */ \
++ greg(rsp, RSP, 24) /* tf_rsp */ \
++ greg(ss, SS, 25) /* tf_ss */
++
++#define VKI__FRAME_NOREG(reg, REG, idx)
++
++#define VKI__FRAME_GREG(greg) VKI__FRAME_REG(greg, VKI__FRAME_NOREG)
++
++//----------------------------------------------------------------------
++// From sys/arch/amd64/include/mcontext.h
++//----------------------------------------------------------------------
++
++#define VKI_GREG_OFFSETS(reg, REG, idx) VKI__REG_##REG = idx,
++enum { VKI__FRAME_GREG(VKI_GREG_OFFSETS) VKI__NGREG = 26 };
++#undef VKI_GREG_OFFSETS
++
++typedef unsigned long vki___greg_t;
++typedef vki___greg_t vki___gregset_t[VKI__NGREG];
++
++#define VKI__REG_URSP VKI__REG_RSP
++#define VKI__REG_RFL VKI__REG_RFLAGS
++
++typedef char vki___fpregset_t[512] __attribute__((__aligned__(8)));
++
++typedef struct vki_mcontext {
++ vki___gregset_t __gregs;
++ vki___greg_t _mc_tlsbase;
++ vki___fpregset_t __fpregs;
++} vki_mcontext_t;
++
++#define VKI__UC_UCONTEXT_ALIGN (~0xf)
++
++/* AMD64 ABI 128-bytes "red zone". */
++#define VKI__UC_MACHINE_SP(uc) ((uc)->uc_mcontext.__gregs[VKI__REG_RSP] - 128)
++#define VKI__UC_MACHINE_FP(uc) ((uc)->uc_mcontext.__gregs[VKI__REG_RBP])
++#define VKI__UC_MACHINE_PC(uc) ((uc)->uc_mcontext.__gregs[VKI__REG_RIP])
++#define VKI__UC_MACHINE_INTRV(uc) ((uc)->uc_mcontext.__gregs[VKI__REG_RAX])
++
++#define VKI__UC_MACHINE_SET_PC(uc, pc) _UC_MACHINE_PC(uc) = (pc)
++
++#define VKI__UC_TLSBASE 0x00080000
++
++/*
++ * mcontext extensions to handle signal delivery.
++ */
++#define VKI__UC_SETSTACK 0x00010000
++#define VKI__UC_CLRSTACK 0x00020000
++
++#define VKI___UCONTEXT_SIZE 784
++
++//----------------------------------------------------------------------
++// From sys/arch/amd64/include/cdefs.h
++//----------------------------------------------------------------------
++
++#define VKI___ALIGNBYTES (sizeof(long) - 1)
++
++#endif // __VKI_MACHINE_TYPES_AMD64_NETBSD_H
++
++/*--------------------------------------------------------------------*/
++/*--- end ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-include_vki_vki-netbsd.h b/valgrind-netbsd/patches/patch-include_vki_vki-netbsd.h
new file mode 100644
index 0000000000..cbff0eac90
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_vki_vki-netbsd.h
@@ -0,0 +1,3302 @@
+$NetBSD$
+
+--- include/vki/vki-netbsd.h.orig 2019-03-31 21:41:11.006389280 +0000
++++ include/vki/vki-netbsd.h
+@@ -0,0 +1,3297 @@
++
++/*--------------------------------------------------------------------*/
++/*--- NetBSD-specific kernel interface. vki-netbsd.h ---*/
++/*--------------------------------------------------------------------*/
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2011-2017 Petr Pavlu
++ setup%dagobah.cz@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++/* Copyright 2013-2017, Ivo Raisr <ivosh%ivosh.net@localhost> */
++
++/* Copyright 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
++
++/* The purpose of this file is described in vki-linux.h.
++
++ To avoid any copyright issues, vki-netbsd.h follows the same approach as
++ vki-darwin.h (not copying anything from kernel header files but instead
++ just including them).
++ */
++
++#ifndef __VKI_NETBSD_H
++#define __VKI_NETBSD_H
++
++//----------------------------------------------------------------------
++// Arch-specific POSIX types
++//----------------------------------------------------------------------
++
++#if defined(VGA_amd64)
++# include "vki-machine-types-amd64-netbsd.h"
++#else
++# error Unknown platform
++#endif
++
++//----------------------------------------------------------------------
++// sys/ansi.h
++//----------------------------------------------------------------------
++
++typedef char * vki___caddr_t;
++typedef vki_uint32_t vki___gid_t;
++typedef vki_uint32_t vki___in_addr_t;
++typedef vki_uint16_t vki___in_port_t;
++typedef vki_uint32_t vki___mode_t;
++typedef vki_int64_t vki___off_t;
++typedef vki_int32_t vki___pid_t;
++typedef vki_uint8_t vki___sa_family_t;
++typedef unsigned int vki___socklen_t;
++typedef vki_uint32_t vki___uid_t;
++typedef vki_uint64_t vki___fsblkcnt_t;
++typedef vki_uint64_t vki___fsfilcnt_t;
++
++struct vki___tag_wctrans_t;
++typedef struct vki___tag_wctrans_t *vki___wctrans_t;
++
++struct vki___tag_wctype_t;
++typedef struct vki___tag_wctype_t *vki___wctype_t;
++
++typedef union {
++ vki_int64_t __mbstateL; /* for alignment */
++ char __mbstate8[128];
++} vki___mbstate_t;
++
++typedef vki_uint64_t vki___fsfilcnt_t;
++
++//----------------------------------------------------------------------
++// sys/param.h
++//----------------------------------------------------------------------
++
++#define VKI_BSD 199506 /* System version (year & month). */
++#define VKI_BSD4_3 1
++#define VKI_BSD4_4 1
++
++#define VKI_MAXCOMLEN 16 /* max command name remembered */
++#define VKI_MAXINTERP VKI_PATH_MAX /* max interpreter file name length */
++/* DEPRECATED: use LOGIN_NAME_MAX instead. */
++#define VKI_MAXLOGNAME (VKI_LOGIN_NAME_MAX - 1) /* max login name length */
++#define VKI_NCARGS VKI_ARG_MAX /* max bytes for an exec function */
++#define VKI_NGROUPS VKI_NGROUPS_MAX /* max number groups */
++#define VKI_NOGROUP 65535 /* marker for empty group set member */
++#define VKI_MAXHOSTNAMELEN 256 /* max hostname size */
++
++#ifndef VKI_NOFILE
++#define VKI_NOFILE VKI_OPEN_MAX /* max open files per process */
++#endif
++
++#define VKI_ALIGNBYTES VKI___ALIGNBYTES
++
++//----------------------------------------------------------------------
++// sys/types.h
++//----------------------------------------------------------------------
++
++typedef vki_uint8_t vki_u_int8_t;
++typedef vki_uint16_t vki_u_int16_t;
++typedef vki_uint32_t vki_u_int32_t;
++typedef vki_uint64_t vki_u_int64_t;
++
++typedef unsigned char vki_u_char;
++typedef unsigned short vki_u_short;
++typedef unsigned int vki_u_int;
++typedef unsigned long vki_u_long;
++
++typedef unsigned char vki_unchar;
++typedef unsigned short vki_ushort;
++typedef unsigned int vki_uint;
++typedef unsigned long vki_ulong;
++
++typedef vki_uint64_t vki_u_quad_t;
++typedef vki_int64_t vki_quad_t;
++typedef vki_quad_t * vki_qaddr_t;
++
++typedef vki_int64_t vki_longlong_t;
++typedef vki_uint64_t vki_u_longlong_t;
++
++typedef vki_int64_t vki_blkcnt_t;
++typedef vki_int32_t vki_blksize_t;
++
++typedef vki___fsblkcnt_t vki_fsblkcnt_t;
++typedef vki___fsfilcnt_t vki_fsfilcnt_t;
++typedef vki___caddr_t vki_caddr_t;
++typedef vki_int64_t vki_daddr_t;
++
++typedef vki_uint64_t vki_dev_t;
++typedef vki_uint32_t vki_fixpt_t;
++typedef vki___gid_t vki_gid_t;
++
++typedef vki_uint32_t vki_id_t;
++typedef vki_uint64_t vki_ino_t;
++typedef long vki_key_t;
++
++typedef vki___mode_t vki_mode_t;
++
++typedef vki_uint32_t vki_nlink_t;
++
++typedef vki___off_t vki_off_t;
++typedef vki___pid_t vki_pid_t;
++typedef vki_int32_t vki_lwpid_t;
++typedef vki_uint64_t vki_rlim_t;
++typedef vki_int32_t vki_segsz_t;
++typedef vki_int32_t vki_swblk_t;
++
++typedef vki___uid_t vki_uid_t;
++
++typedef int vki_mqd_t;
++
++typedef unsigned long vki_cpuid_t;
++
++typedef int vki_psetid_t;
++
++typedef volatile vki___cpu_simple_lock_nv_t vki___cpu_simple_lock_t;
++
++typedef int vki_boolean_t;
++
++union vki___semun {
++ int val; /* value for SETVAL */
++ struct vki_semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */
++ unsigned short *array; /* array for GETALL & SETALL */
++};
++
++#define vki_semun vki___semun // linux compat
++
++typedef vki_int32_t vki___devmajor_t;
++typedef vki_int32_t vki___devminor_t;
++
++typedef vki___devmajor_t vki_devmajor_t;
++typedef vki___devminor_t vki_devminor_t;
++
++typedef vki_bsd_clock_t vki_clock_t;
++typedef vki_bsd_ptrdiff_t vki_ptrdiff_t;
++typedef vki_bsd_size_t vki_size_t;
++typedef vki_bsd_ssize_t vki_ssize_t;
++typedef vki_bsd_time_t vki_time_t;
++typedef vki_bsd_clockid_t vki_clockid_t;
++typedef vki_bsd_timer_t vki_timer_t;
++typedef vki_bsd_suseconds_t vki_suseconds_t;
++typedef vki_bsd_useconds_t vki_useconds_t;
++
++typedef struct vki_kauth_cred * vki_kauth_cred_t;
++
++typedef int vki_pri_t;
++
++//----------------------------------------------------------------------
++// sys/cdefs.h
++//----------------------------------------------------------------------
++
++#define VKI___CAST(__dt, __st) ((__dt)(__st))
++#define VKI___CASTV(__dt, __st) VKI___CAST(__dt, VKI___CAST(void *, __st))
++#define VKI___CASTCV(__dt, __st) VKI___CAST(__dt, VKI___CAST(const void *, __st))
++
++//----------------------------------------------------------------------
++// Now the rest of the arch-specific stuff
++//----------------------------------------------------------------------
++
++#if defined(VGA_amd64)
++# include "vki-amd64-netbsd.h"
++#else
++# error Unknown platform
++#endif
++
++//----------------------------------------------------------------------
++// sys/syslimits.h
++//----------------------------------------------------------------------
++
++#define VKI_ARG_MAX (256 * 1024)
++#define VKI_CHILD_MAX 160
++#define VKI_GID_MAX 2147483647U
++#define VKI_LINK_MAX 32767
++#define VKI_MAX_CANON 255
++#define VKI_MAX_INPUT 255
++#define VKI_NAME_MAX 511
++#define VKI_NGROUPS_MAX 16
++#define VKI_UID_MAX 2147483647U
++#define VKI_OPEN_MAX 128
++#define VKI_PATH_MAX 1024
++#define VKI_PIPE_BUF 512
++#define VKI_BC_BASE_MAX VKI_INT_MAX
++#define VKI_BC_DIM_MAX 65535
++#define VKI_BC_SCALE_MAX VKI_INT_MAX
++#define VKI_BC_STRING_MAX VKI_INT_MAX
++#define VKI_COLL_WEIGHTS_MAX 2
++#define VKI_EXPR_NEST_MAX 32
++#define VKI_LINE_MAX 2048
++#define VKI_RE_DUP_MAX 255
++#define VKI_LOGIN_NAME_MAX 17
++#define VKI_IOV_MAX 1024
++#define VKI_NZERO 20
++
++//----------------------------------------------------------------------
++// sys/timespec.h
++//----------------------------------------------------------------------
++
++struct vki_timespec {
++ vki_time_t tv_sec; /* seconds */
++ long tv_nsec; /* and nanoseconds */
++};
++
++//----------------------------------------------------------------------
++// sys/time.h
++//----------------------------------------------------------------------
++
++struct vki_timeval {
++ vki_time_t tv_sec; /* seconds */
++ vki_suseconds_t tv_usec; /* and microseconds */
++};
++
++struct vki_timezone {
++ int tz_minuteswest; /* minutes west of Greenwich */
++ int tz_dsttime; /* type of dst correction */
++};
++
++struct vki_bintime {
++ vki_time_t sec;
++ vki_uint64_t frac;
++};
++
++struct vki_itimerval {
++ struct vki_timeval it_interval; /* timer interval */
++ struct vki_timeval it_value; /* current value */
++};
++
++struct vki_itimerspec {
++ struct vki_timespec it_interval;
++ struct vki_timespec it_value;
++};
++
++
++//----------------------------------------------------------------------
++// From sys/resource.h
++//----------------------------------------------------------------------
++
++#define VKI_PRIO_MIN -20
++#define VKI_PRIO_MAX 20
++
++#define VKI_PRIO_PROCESS 0
++#define VKI_PRIO_PGRP 1
++#define VKI_PRIO_USER 2
++
++#define VKI_RUSAGE_SELF 0
++#define VKI_RUSAGE_CHILDREN -1
++
++struct vki_rusage {
++ struct vki_timeval ru_utime; /* user time used */
++ struct vki_timeval ru_stime; /* system time used */
++ long ru_maxrss; /* maximum resident set size */
++#define ru_first ru_ixrss
++ long ru_ixrss; /* integral shared memory size */
++ long ru_idrss; /* integral unshared data size */
++ long ru_isrss; /* integral unshared stack size */
++ long ru_minflt; /* page reclaims */
++ long ru_majflt; /* page faults */
++ long ru_nswap; /* swaps */
++ long ru_inblock; /* block input operations */
++ long ru_oublock; /* block output operations */
++ long ru_msgsnd; /* messages sent */
++ long ru_msgrcv; /* messages received */
++ long ru_nsignals; /* signals received */
++ long ru_nvcsw; /* voluntary context switches */
++ long ru_nivcsw; /* involuntary " */
++#define ru_last ru_nivcsw
++};
++
++struct vki_wrusage {
++ struct vki_rusage wru_self;
++ struct vki_rusage wru_children;
++};
++
++#define VKI_RLIMIT_CPU 0 /* cpu time in milliseconds */
++#define VKI_RLIMIT_FSIZE 1 /* maximum file size */
++#define VKI_RLIMIT_DATA 2 /* data size */
++#define VKI_RLIMIT_STACK 3 /* stack size */
++#define VKI_RLIMIT_CORE 4 /* core file size */
++#define VKI_RLIMIT_RSS 5 /* resident set size */
++#define VKI_RLIMIT_MEMLOCK 6 /* locked-in-memory address space */
++#define VKI_RLIMIT_NPROC 7 /* number of processes */
++#define VKI_RLIMIT_NOFILE 8 /* number of open files */
++#define VKI_RLIMIT_SBSIZE 9 /* maximum size of all socket buffers */
++#define VKI_RLIMIT_AS 10 /* virtual process size (inclusive of mmap) */
++#define VKI_RLIMIT_VMEM VKI_RLIMIT_AS /* common alias */
++#define VKI_RLIMIT_NTHR 11 /* number of threads */
++
++#define VKI_RLIM_NLIMITS 12 /* number of resource limits */
++
++#define VKI_RLIM_INFINITY (~((vki_u_quad_t)1 << 63)) /* no limit */
++#define VKI_RLIM_SAVED_MAX VKI_RLIM_INFINITY /* unrepresentable hard limit */
++#define VKI_RLIM_SAVED_CUR VKI_RLIM_INFINITY /* unrepresentable soft limit */
++
++struct vki_rlimit {
++ vki_rlim_t rlim_cur; /* current (soft) limit */
++ vki_rlim_t rlim_max; /* maximum value for rlim_cur */
++};
++
++struct vki_loadavg {
++ vki_fixpt_t ldavg[3];
++ long fscale;
++};
++
++//----------------------------------------------------------------------
++// From sys/sigtypes.h
++//----------------------------------------------------------------------
++
++typedef struct {
++ vki_uint32_t sig/*__bits*/[4 /* _VKI_NSIG_WORDS */];
++} vki_sigset_t;
++
++typedef struct vki_sigaltstack {
++ void *ss_sp;
++ vki_size_t ss_size;
++ int ss_flags;
++} vki_stack_t;
++
++//----------------------------------------------------------------------
++// From sys/sigtypes.h
++//----------------------------------------------------------------------
++
++typedef union vki_sigval {
++ int sival_int;
++ void *sival_ptr;
++} vki_sigval_t;
++
++struct vki__ksiginfo {
++ int _signo;
++ int _code;
++ int _errno;
++#ifdef _LP64
++ /* In _LP64 the union starts on an 8-byte boundary. */
++ int _pad;
++#endif
++ union {
++ struct {
++ vki_pid_t _pid;
++ vki_uid_t _uid;
++ vki_sigval_t _value;
++ } _rt;
++
++ struct {
++ vki_pid_t _pid;
++ vki_uid_t _uid;
++ int _status;
++ vki_clock_t _utime;
++ vki_clock_t _stime;
++ } _child;
++
++ struct {
++ void *_addr;
++ int _trap;
++ int _trap2;
++ int _trap3;
++ } _fault;
++
++ struct {
++ long _band;
++ int _fd;
++ } _poll;
++ } _reason;
++};
++
++typedef union vki_siginfo {
++ char si_pad[128]; /* Total size; for future expansion */
++ struct vki__ksiginfo _info;
++} vki_siginfo_t;
++
++#define si_signo _info._signo
++#define si_code _info._code
++#define si_errno _info._errno
++
++#define si_value _info._reason._rt._value
++#define si_pid _info._reason._child._pid
++#define si_uid _info._reason._child._uid
++#define si_status _info._reason._child._status
++#define si_utime _info._reason._child._utime
++#define si_stime _info._reason._child._stime
++
++#define si_addr _info._reason._fault._addr
++#define si_trap _info._reason._fault._trap
++#define si_trap2 _info._reason._fault._trap2
++#define si_trap3 _info._reason._fault._trap3
++
++#define si_band _info._reason._poll._band
++#define si_fd _info._reason._poll._fd
++
++#define VKI_ILL_ILLOPC 1 /* Illegal opcode */
++#define VKI_ILL_ILLOPN 2 /* Illegal operand */
++#define VKI_ILL_ILLADR 3 /* Illegal addressing mode */
++#define VKI_ILL_ILLTRP 4 /* Illegal trap */
++#define VKI_ILL_PRVOPC 5 /* Privileged opcode */
++#define VKI_ILL_PRVREG 6 /* Privileged register */
++#define VKI_ILL_COPROC 7 /* Coprocessor error */
++#define VKI_ILL_BADSTK 8 /* Internal stack error */
++
++#define VKI_FPE_INTDIV 1 /* Integer divide by zero */
++#define VKI_FPE_INTOVF 2 /* Integer overflow */
++#define VKI_FPE_FLTDIV 3 /* Floating point divide by zero */
++#define VKI_FPE_FLTOVF 4 /* Floating point overflow */
++#define VKI_FPE_FLTUND 5 /* Floating point underflow */
++#define VKI_FPE_FLTRES 6 /* Floating point inexact result */
++#define VKI_FPE_FLTINV 7 /* Invalid Floating point operation */
++#define VKI_FPE_FLTSUB 8 /* Subscript out of range */
++
++#define VKI_SEGV_MAPERR 1 /* Address not mapped to object */
++#define VKI_SEGV_ACCERR 2 /* Invalid permissions for mapped object*/
++
++#define VKI_BUS_ADRALN 1 /* Invalid address alignment */
++#define VKI_BUS_ADRERR 2 /* Non-existent physical address */
++#define VKI_BUS_OBJERR 3 /* Object specific hardware error */
++
++#define VKI_TRAP_BRKPT 1 /* Process breakpoint */
++#define VKI_TRAP_TRACE 2 /* Process trace trap */
++#define VKI_TRAP_EXEC 3 /* Process exec trap */
++#define VKI_TRAP_CHLD 4 /* Process child trap */
++#define VKI_TRAP_LWP 5 /* Process lwp trap */
++#define VKI_TRAP_DBREG 6 /* Process hardware debug register trap */
++#define VKI_TRAP_SCE 7 /* Process syscall entry trap */
++#define VKI_TRAP_SCX 8 /* Process syscall exit trap */
++
++#define VKI_CLD_EXITED 1 /* Child has exited */
++#define VKI_CLD_KILLED 2 /* Child has terminated abnormally but */
++ /* did not create a core file */
++#define VKI_CLD_DUMPED 3 /* Child has terminated abnormally and */
++ /* created a core file */
++#define VKI_CLD_TRAPPED 4 /* Traced child has trapped */
++#define VKI_CLD_STOPPED 5 /* Child has stopped */
++#define VKI_CLD_CONTINUED 6 /* Stopped child has continued */
++
++#define VKI_POLL_IN 1 /* Data input available */
++#define VKI_POLL_OUT 2 /* Output buffers available */
++#define VKI_POLL_MSG 3 /* Input message available */
++#define VKI_POLL_ERR 4 /* I/O Error */
++#define VKI_POLL_PRI 5 /* High priority input available */
++#define VKI_POLL_HUP 6 /* Device disconnected */
++
++#define VKI_SI_USER 0 /* Sent by kill(2) */
++#define VKI_SI_QUEUE -1 /* Sent by the sigqueue(2) */
++#define VKI_SI_TIMER -2 /* Generated by expiration of a timer */
++ /* set by timer_settime(2) */
++#define VKI_SI_ASYNCIO -3 /* Generated by completion of an */
++ /* asynchronous I/O signal */
++#define VKI_SI_MESGQ -4 /* Generated by arrival of a message on */
++ /* an empty message queue */
++
++#define VKI_SI_LWP -5 /* Generated by _lwp_kill(2) */
++#define VKI_SI_NOINFO 32767 /* No signal specific info available */
++
++//----------------------------------------------------------------------
++// From poll.h
++//----------------------------------------------------------------------
++
++typedef unsigned int vki_nfds_t;
++
++struct vki_pollfd {
++ int fd; /* file descriptor */
++ short events; /* events to look for */
++ short revents; /* events returned */
++};
++
++#define VKI_POLLIN 0x0001
++#define VKI_POLLPRI 0x0002
++#define VKI_POLLOUT 0x0004
++#define VKI_POLLRDNORM 0x0040
++#define VKI_POLLWRNORM POLLOUT
++#define VKI_POLLRDBAND 0x0080
++#define VKI_POLLWRBAND 0x0100
++
++#define VKI_POLLERR 0x0008
++#define VKI_POLLHUP 0x0010
++#define VKI_POLLNVAL 0x0020
++
++#define VKI_INFTIM -1
++
++//----------------------------------------------------------------------
++// From sys/uio.h
++//----------------------------------------------------------------------
++
++struct vki_iovec {
++ void *iov_base; /* Base address. */
++ vki_size_t iov_len; /* Length. */
++};
++
++enum vki_uio_rw { VKI_UIO_READ, VKI_UIO_WRITE };
++
++enum vki_uio_seg {
++ VKI_UIO_USERSPACE, /* from user data space */
++ VKI_UIO_SYSSPACE /* from system space */
++};
++
++#define VKI_UIO_MAXIOV 1024 /* max 1K of iov's */
++
++//----------------------------------------------------------------------
++// From sys/socket.h
++//----------------------------------------------------------------------
++
++typedef vki___sa_family_t vki_sa_family_t;
++
++typedef vki___socklen_t vki_socklen_t;
++
++#define VKI_SOCK_STREAM 1 /* stream socket */
++#define VKI_SOCK_DGRAM 2 /* datagram socket */
++#define VKI_SOCK_RAW 3 /* raw-protocol interface */
++#define VKI_SOCK_RDM 4 /* reliably-delivered message */
++#define VKI_SOCK_SEQPACKET 5 /* sequenced packet stream */
++#define VKI_SOCK_CONN_DGRAM 6 /* connection-orientated datagram */
++#define VKI_SOCK_DCCP VKI_SOCK_CONN_DGRAM
++
++#define VKI_SOCK_CLOEXEC 0x10000000 /* set close on exec on socket */
++#define VKI_SOCK_NONBLOCK 0x20000000 /* set non blocking i/o socket */
++#define VKI_SOCK_NOSIGPIPE 0x40000000 /* don't send sigpipe */
++#define VKI_SOCK_FLAGS_MASK 0xf0000000 /* flags mask */
++#define VKI_SO_DEBUG 0x0001 /* turn on debugging info recording */
++#define VKI_SO_ACCEPTCONN 0x0002 /* socket has had listen() */
++#define VKI_SO_REUSEADDR 0x0004 /* allow local address reuse */
++#define VKI_SO_KEEPALIVE 0x0008 /* keep connections alive */
++#define VKI_SO_DONTROUTE 0x0010 /* just use interface addresses */
++#define VKI_SO_BROADCAST 0x0020 /* permit sending of broadcast msgs */
++#define VKI_SO_USELOOPBACK 0x0040 /* bypass hardware when possible */
++#define VKI_SO_LINGER 0x0080 /* linger on close if data present */
++#define VKI_SO_OOBINLINE 0x0100 /* leave received OOB data in line */
++#define VKI_SO_REUSEPORT 0x0200 /* allow local address & port reuse */
++/* SO_OTIMESTAMP 0x0400 */
++#define VKI_SO_NOSIGPIPE 0x0800 /* no SIGPIPE from EPIPE */
++#define VKI_SO_ACCEPTFILTER 0x1000 /* there is an accept filter */
++#define VKI_SO_TIMESTAMP 0x2000 /* timestamp received dgram traffic */
++#define VKI_SO_RERROR 0x4000 /* Keep track of receive errors */
++
++#define VKI_SO_SNDBUF 0x1001 /* send buffer size */
++#define VKI_SO_RCVBUF 0x1002 /* receive buffer size */
++#define VKI_SO_SNDLOWAT 0x1003 /* send low-water mark */
++#define VKI_SO_RCVLOWAT 0x1004 /* receive low-water mark */
++/* SO_OSNDTIMEO 0x1005 */
++/* SO_ORCVTIMEO 0x1006 */
++#define VKI_SO_ERROR 0x1007 /* get error status and clear */
++#define VKI_SO_TYPE 0x1008 /* get socket type */
++#define VKI_SO_OVERFLOWED 0x1009 /* datagrams: return packets dropped */
++
++#define VKI_SO_NOHEADER 0x100a /* user supplies no header to kernel;
++ * kernel removes header and supplies
++ * payload
++ */
++#define VKI_SO_SNDTIMEO 0x100b /* send timeout */
++
++#define VKI_SO_RCVTIMEO 0x100c /* receive timeout */
++/*
++ * Structure used for manipulating linger option.
++ */
++
++struct vki_linger {
++ int l_onoff; /* option on/off */
++ int l_linger; /* linger time in seconds */
++};
++
++struct vki_accept_filter_arg {
++ char af_name[16];
++ char af_arg[256-16];
++};
++
++#define VKI_SOL_SOCKET 0xffff /* options for socket level */
++
++/*
++ * Address families.
++ */
++#define VKI_AF_UNSPEC 0 /* unspecified */
++#define VKI_AF_LOCAL 1 /* local to host */
++#define VKI_AF_UNIX VKI_AF_LOCAL /* backward compatibility */
++#define VKI_AF_INET 2 /* internetwork: UDP, TCP, etc. */
++#define VKI_AF_IMPLINK 3 /* arpanet imp addresses */
++#define VKI_AF_PUP 4 /* pup protocols: e.g. BSP */
++#define VKI_AF_CHAOS 5 /* mit CHAOS protocols */
++#define VKI_AF_NS 6 /* XEROX NS protocols */
++#define VKI_AF_ISO 7 /* ISO protocols */
++#define VKI_AF_OSI VKI_AF_ISO
++#define VKI_AF_ECMA 8 /* european computer manufacturers */
++#define VKI_AF_DATAKIT 9 /* datakit protocols */
++#define VKI_AF_CCITT 10 /* CCITT protocols, X.25 etc */
++
++#define VKI_AF_SNA 11 /* IBM SNA */
++#define VKI_AF_DECnet 12 /* DECnet */
++#define VKI_AF_DLI 13 /* DEC Direct data link interface */
++#define VKI_AF_LAT 14 /* LAT */
++#define VKI_AF_HYLINK 15 /* NSC Hyperchannel */
++#define VKI_AF_APPLETALK 16 /* Apple Talk */
++#define VKI_AF_OROUTE 17 /* Internal Routing Protocol */
++#define VKI_AF_LINK 18 /* Link layer interface */
++#define VKI_pseudo_AF_XTP 19 /* eXpress Transfer Protocol (no AF) */
++#define VKI_AF_COIP 20 /* connection-oriented IP, aka ST II */
++#define VKI_AF_CNT 21 /* Computer Network Technology */
++#define VKI_pseudo_AF_RTIP 22 /* Help Identify RTIP packets */
++#define VKI_AF_IPX 23 /* Novell Internet Protocol */
++#define VKI_AF_INET6 24 /* IP version 6 */
++#define VKI_pseudo_AF_PIP 25 /* Help Identify PIP packets */
++#define VKI_AF_ISDN 26 /* Integrated Services Digital Network*/
++#define VKI_AF_E164 VKI_AF_ISDN /* CCITT E.164 recommendation */
++#define VKI_AF_NATM 27 /* native ATM access */
++#define VKI_AF_ARP 28 /* (rev.) addr. res. prot. (RFC 826) */
++#define VKI_pseudo_AF_KEY 29 /* Internal key management protocol */
++#define VKI_pseudo_AF_HDRCMPLT 30 /* Used by BPF to not rewrite hdrs
++ in interface output routine */
++
++//#define VKI_AF_BLUETOOTH 31 /* Bluetooth: HCI, SCO, L2CAP, RFCOMM */
++
++#define VKI_AF_IEEE80211 32 /* IEEE80211 */
++#define VKI_AF_MPLS 33 /* MultiProtocol Label Switching */
++#define VKI_AF_ROUTE 34 /* Internal Routing Protocol */
++#define VKI_AF_CAN 35
++#define VKI_AF_ETHER 36
++#define VKI_AF_MAX 37
++
++struct vki_sockaddr {
++ vki_uint8_t sa_len; /* total length */
++ vki_sa_family_t sa_family; /* address family */
++ char sa_data[14]; /* actually longer; address value */
++};
++
++#define VKI__SS_MAXSIZE 128
++#define VKI__SS_ALIGNSIZE (sizeof(vki_int64_t))
++#define VKI__SS_PAD1SIZE (VKI__SS_ALIGNSIZE - 2)
++#define VKI__SS_PAD2SIZE (VKI__SS_MAXSIZE - 2 - \
++ VKI__SS_PAD1SIZE - VKI__SS_ALIGNSIZE)
++
++struct vki_sockaddr_storage {
++ vki_uint8_t ss_len; /* address length */
++ vki_sa_family_t ss_family; /* address family */
++ char __ss_pad1[VKI__SS_PAD1SIZE];
++ vki_int64_t __ss_align;/* force desired structure storage alignment */
++ char __ss_pad2[VKI__SS_PAD2SIZE];
++};
++
++#define vki_sstosa(__ss) ((struct vki_sockaddr *)(__ss))
++#define vki_sstocsa(__ss) ((const struct vki_sockaddr *)(__ss))
++
++#define VKI_PF_UNSPEC VKI_AF_UNSPEC
++#define VKI_PF_LOCAL VKI_AF_LOCAL
++#define VKI_PF_UNIX VKI_PF_LOCAL /* backward compatibility */
++#define VKI_PF_INET VKI_AF_INET
++#define VKI_PF_IMPLINK VKI_AF_IMPLINK
++#define VKI_PF_PUP VKI_AF_PUP
++#define VKI_PF_CHAOS VKI_AF_CHAOS
++#define VKI_PF_NS VKI_AF_NS
++#define VKI_PF_ISO VKI_AF_ISO
++#define VKI_PF_OSI VKI_AF_ISO
++#define VKI_PF_ECMA VKI_AF_ECMA
++#define VKI_PF_DATAKIT VKI_AF_DATAKIT
++#define VKI_PF_CCITT VKI_AF_CCITT
++#define VKI_PF_SNA VKI_AF_SNA
++#define VKI_PF_DECnet VKI_AF_DECnet
++#define VKI_PF_CHAOS VKI_AF_CHAOS
++#define VKI_PF_NS VKI_AF_NS
++#define VKI_PF_ISO VKI_AF_ISO
++#define VKI_PF_OSI VKI_AF_ISO
++#define VKI_PF_ECMA VKI_AF_ECMA
++#define VKI_PF_DATAKIT VKI_AF_DATAKIT
++#define VKI_PF_CCITT VKI_AF_CCITT
++#define VKI_PF_SNA VKI_AF_SNA
++#define VKI_PF_DECnet VKI_AF_DECnet
++#define VKI_PF_DLI VKI_AF_DLI
++#define VKI_PF_LAT VKI_AF_LAT
++#define VKI_PF_HYLINK VKI_AF_HYLINK
++#define VKI_PF_APPLETALK VKI_AF_APPLETALK
++#define VKI_PF_OROUTE VKI_AF_OROUTE
++#define VKI_PF_LINK VKI_AF_LINK
++
++#define VKI_PF_XTP VKI_pseudo_AF_XTP /* really just proto family, no AF */
++
++#define VKI_PF_COIP VKI_AF_COIP
++#define VKI_PF_CNT VKI_AF_CNT
++#define VKI_PF_INET6 VKI_AF_INET6
++#define VKI_PF_IPX VKI_AF_IPX /* same format as AF_NS */
++
++#define VKI_PF_RTIP VKI_pseudo_AF_RTIP /* same format as AF_INET */
++#define VKI_PF_PIP VKI_pseudo_AF_PIP
++
++#define VKI_PF_ISDN VKI_AF_ISDN /* same as E164 */
++#define VKI_PF_E164 VKI_AF_E164
++#define VKI_PF_NATM VKI_AF_NATM
++#define VKI_PF_ARP VKI_AF_ARP
++
++#define VKI_PF_KEY VKI_pseudo_AF_KEY /* like PF_ROUTE, only for key mgmt */
++
++#define VKI_PF_BLUETOOTH VKI_AF_BLUETOOTH
++#define VKI_PF_MPLS VKI_AF_MPLS
++#define VKI_PF_ROUTE VKI_AF_ROUTE
++#define VKI_PF_CAN VKI_AF_CAN
++#define VKI_PF_ETHER VKI_AF_ETHER
++
++#define VKI_PF_MAX VKI_AF_MAX
++
++struct vki_sockcred {
++ vki_pid_t sc_pid; /* process id */
++ vki_uid_t sc_uid; /* real user id */
++ vki_uid_t sc_euid; /* effective user id */
++ vki_gid_t sc_gid; /* real group id */
++ vki_gid_t sc_egid; /* effective group id */
++ int sc_ngroups; /* number of supplemental groups */
++ vki_gid_t sc_groups[1]; /* variable length */
++};
++
++#define VKI_SOCKCREDSIZE(ngrps) \
++ (/*CONSTCOND*/sizeof(struct vki_sockcred) + (sizeof(vki_gid_t) * \
++ ((ngrps) ? ((ngrps) - 1) : 0)))
++
++struct vki_kinfo_pcb {
++ vki_uint64_t ki_pcbaddr; /* PTR: pcb addr */
++ vki_uint64_t ki_ppcbaddr; /* PTR: ppcb addr */
++ vki_uint64_t ki_sockaddr; /* PTR: socket addr */
++
++ vki_uint32_t ki_family; /* INT: protocol family */
++ vki_uint32_t ki_type; /* INT: socket type */
++ vki_uint32_t ki_protocol; /* INT: protocol */
++ vki_uint32_t ki_pflags; /* INT: generic protocol flags */
++
++ vki_uint32_t ki_sostate; /* INT: socket state */
++ vki_uint32_t ki_prstate; /* INT: protocol state */
++ vki_int32_t ki_tstate; /* INT: tcp state */
++ vki_uint32_t ki_tflags; /* INT: tcp flags */
++
++ vki_uint64_t ki_rcvq; /* U_LONG: receive queue len */
++ vki_uint64_t ki_sndq; /* U_LONG: send queue len */
++
++ union {
++ struct vki_sockaddr _kis_src; /* STRUCT: local address */
++ char _kis_pad[256 + 8]; /* pad to max addr length */
++ } ki_s;
++ union {
++ struct vki_sockaddr _kid_dst; /* STRUCT: remote address */
++ char _kid_pad[256 + 8]; /* pad to max addr length */
++ } ki_d;
++
++ vki_uint64_t ki_inode; /* INO_T: fake inode number */
++ vki_uint64_t ki_vnode; /* PTR: if associated with file */
++ vki_uint64_t ki_conn; /* PTR: control block of peer */
++ vki_uint64_t ki_refs; /* PTR: referencing socket */
++ vki_uint64_t ki_nextref; /* PTR: link in refs list */
++};
++
++#define ki_src ki_s._kis_src
++#define ki_dst ki_d._kid_dst
++#define ki_spad ki_s._kis_pad
++#define ki_dpad ki_d._kid_pad
++
++#define VKI_PCB_SLOP 20
++#define VKI_PCB_ALL 0
++
++#define VKI_NET_RT_DUMP 1 /* dump; may limit to a.f. */
++#define VKI_NET_RT_FLAGS 2 /* by flags, e.g. RESOLVING */
++#define VKI_NET_RT_OOOIFLIST 3 /* old NET_RT_IFLIST (pre 1.5) */
++#define VKI_NET_RT_OOIFLIST 4 /* old NET_RT_IFLIST (pre-64bit time) */
++#define VKI_NET_RT_OIFLIST 5 /* old NET_RT_IFLIST (pre 8.0) */
++#define VKI_NET_RT_IFLIST 6 /* survey interface list */
++
++#ifndef VKI_SOMAXCONN
++#define VKI_SOMAXCONN 128
++#endif
++
++struct vki_msghdr {
++ void *msg_name; /* optional address */
++ vki_socklen_t msg_namelen; /* size of address */
++ struct vki_iovec *msg_iov; /* scatter/gather array */
++ int msg_iovlen; /* # elements in msg_iov */
++ void *msg_control; /* ancillary data, see below */
++ vki_socklen_t msg_controllen; /* ancillary data buffer len */
++ int msg_flags; /* flags on received message */
++};
++
++#define VKI_MSG_OOB 0x0001 /* process out-of-band data */
++#define VKI_MSG_PEEK 0x0002 /* peek at incoming message */
++#define VKI_MSG_DONTROUTE 0x0004 /* send without using routing tables */
++#define VKI_MSG_EOR 0x0008 /* data completes record */
++#define VKI_MSG_TRUNC 0x0010 /* data discarded before delivery */
++#define VKI_MSG_CTRUNC 0x0020 /* control data lost before delivery */
++#define VKI_MSG_WAITALL 0x0040 /* wait for full request or error */
++#define VKI_MSG_DONTWAIT 0x0080 /* this message should be nonblocking */
++#define VKI_MSG_BCAST 0x0100 /* this message was rcvd using link-level brdcst */
++#define VKI_MSG_MCAST 0x0200 /* this message was rcvd using link-level mcast */
++#define VKI_MSG_NOSIGNAL 0x0400 /* do not generate SIGPIPE on EOF */
++
++#define VKI_MSG_CMSG_CLOEXEC 0x0800 /* close on exec receiving fd */
++#define VKI_MSG_NBIO 0x1000 /* use non-blocking I/O */
++#define VKI_MSG_WAITFORONE 0x2000 /* recvmmsg() wait for one message */
++#define VKI_MSG_NOTIFICATION 0x4000 /* SCTP notification */
++
++struct vki_mmsghdr {
++ struct vki_msghdr msg_hdr;
++ unsigned int msg_len;
++};
++
++#define VKI_MSG_USERFLAGS 0x0ffffff
++#define VKI_MSG_NAMEMBUF 0x1000000 /* msg_name is an mbuf */
++#define VKI_MSG_CONTROLMBUF 0x2000000 /* msg_control is an mbuf */
++#define VKI_MSG_IOVUSRSPACE 0x4000000 /* msg_iov is in user space */
++#define VKI_MSG_LENUSRSPACE 0x8000000 /* address length is in user space */
++
++struct vki_cmsghdr {
++ vki_socklen_t cmsg_len; /* data byte count, including hdr */
++ int cmsg_level; /* originating protocol */
++ int cmsg_type; /* protocol-specific type */
++/* followed by u_char cmsg_data[]; */
++};
++
++#define VKI___CMSG_ALIGN(n) (((n) + VKI___ALIGNBYTES) & ~VKI___ALIGNBYTES)
++
++#define VKI_CMSG_ALIGN(n) VKI___CMSG_ALIGN(n)
++
++#define VKI___CMSG_ASIZE VKI___CMSG_ALIGN(sizeof(struct vki_cmsghdr))
++#define VKI___CMSG_MSGNEXT(cmsg) \
++ (VKI___CASTV(char *, cmsg) + VKI___CMSG_ALIGN((cmsg)->cmsg_len))
++#define VKI___CMSG_MSGEND(mhdr) \
++ (VKI___CASTV(char *, (mhdr)->msg_control) + (mhdr)->msg_controllen)
++
++/* given pointer to struct cmsghdr, return pointer to data */
++#define VKI_CMSG_DATA(cmsg) (VKI___CASTV(vki_u_char *, cmsg) + VKI___CMSG_ASIZE)
++#define VKI_CCMSG_DATA(cmsg) (VKI___CASTCV(const vki_u_char *, cmsg) + VKI___CMSG_ASIZE)
++
++#define VKI_CMSG_NXTHDR(mhdr, cmsg) \
++ VKI___CASTV(struct vki_cmsghdr *, \
++ VKI___CMSG_MSGNEXT(cmsg) + VKI___CMSG_ASIZE > VKI___CMSG_MSGEND(mhdr) ? 0 : \
++ VKI___CMSG_MSGNEXT(cmsg))
++
++#define VKI_CMSG_FIRSTHDR(mhdr) \
++ VKI___CASTV(struct vki_cmsghdr *, \
++ (mhdr)->msg_controllen < sizeof(struct vki_cmsghdr) ? 0 : \
++ (mhdr)->msg_control)
++
++#define VKI_CMSG_SPACE(l) (VKI___CMSG_ASIZE + VKI___CMSG_ALIGN(l))
++#define VKI_CMSG_LEN(l) (VKI___CMSG_ASIZE + (l))
++
++
++/* "Socket"-level control message types: */
++#define VKI_SCM_RIGHTS 0x01 /* access rights (array of int) */
++
++#define VKI_SCM_TIMESTAMP 0x08 /* timestamp (struct timeval) */
++#define VKI_SCM_CREDS 0x10 /* credentials (struct sockcred) */
++
++#define VKI_SHUT_RD 0 /* Disallow further receives. */
++#define VKI_SHUT_WR 1 /* Disallow further sends. */
++#define VKI_SHUT_RDWR 2 /* Disallow further sends/receives. */
++
++//----------------------------------------------------------------------
++// From sys/signal.h
++//----------------------------------------------------------------------
++
++#define VKI__NSIG 64
++#define VKI_NSIG VKI__NSIG
++#define _VKI_NSIG VKI_NSIG // linux compat
++#define _VKI_NSIG_BPW 32 // linux compat?
++#define _VKI_NSIG_WORDS 4 // linux compat?
++
++#define VKI_SIGHUP 1 /* hangup */
++#define VKI_SIGINT 2 /* interrupt */
++#define VKI_SIGQUIT 3 /* quit */
++#define VKI_SIGILL 4 /* illegal instruction (not reset when caught) */
++#define VKI_SIGTRAP 5 /* trace trap (not reset when caught) */
++#define VKI_SIGABRT 6 /* abort() */
++#define VKI_SIGIOT VKI_SIGABRT /* compatibility */
++#define VKI_SIGEMT 7 /* EMT instruction */
++#define VKI_SIGFPE 8 /* floating point exception */
++#define VKI_SIGKILL 9 /* kill (cannot be caught or ignored) */
++#define VKI_SIGBUS 10 /* bus error */
++#define VKI_SIGSEGV 11 /* segmentation violation */
++#define VKI_SIGSYS 12 /* bad argument to system call */
++#define VKI_SIGPIPE 13 /* write on a pipe with no one to read it */
++#define VKI_SIGALRM 14 /* alarm clock */
++#define VKI_SIGTERM 15 /* software termination signal from kill */
++#define VKI_SIGURG 16 /* urgent condition on IO channel */
++#define VKI_SIGSTOP 17 /* sendable stop signal not from tty */
++#define VKI_SIGTSTP 18 /* stop signal from tty */
++#define VKI_SIGCONT 19 /* continue a stopped process */
++#define VKI_SIGCHLD 20 /* to parent on child stop or exit */
++#define VKI_SIGTTIN 21 /* to readers pgrp upon background tty read */
++#define VKI_SIGTTOU 22 /* like TTIN for output if (tp->t_local<OSTOP) */
++#define VKI_SIGIO 23 /* input/output possible signal */
++#define VKI_SIGXCPU 24 /* exceeded CPU time limit */
++#define VKI_SIGXFSZ 25 /* exceeded file size limit */
++#define VKI_SIGVTALRM 26 /* virtual time alarm */
++#define VKI_SIGPROF 27 /* profiling time alarm */
++#define VKI_SIGWINCH 28 /* window size changes */
++#define VKI_SIGINFO 29 /* information request */
++#define VKI_SIGUSR1 30 /* user defined signal 1 */
++#define VKI_SIGUSR2 31 /* user defined signal 2 */
++#define VKI_SIGPWR 32 /* power fail/restart (not reset when caught) */
++#define VKI_SIGRTMIN 33
++#define VKI_SIGRTMAX 63
++
++#define VKI_SIG_DFL ((void (*)(int)) 0)
++#define VKI_SIG_IGN ((void (*)(int)) 1)
++#define VKI_SIG_ERR ((void (*)(int)) -1)
++#define VKI_SIG_HOLD ((void (*)(int)) 3)
++
++struct vki_sigaction {
++ union {
++ void (*_sa_handler)(int);
++ void (*_sa_sigaction)(int, vki_siginfo_t *, void *);
++ } _sa_u; /* signal handler */
++ vki_sigset_t sa_mask; /* signal mask to apply */
++ int sa_flags; /* see signal options below */
++};
++
++typedef struct vki_sigaction vki_sigaction_toK_t; // compat with linux
++typedef struct vki_sigaction vki_sigaction_fromK_t; // compat with linux
++
++#define vki_sa_handler _sa_u._sa_handler
++#define ksa_handler vki_sa_handler
++
++#define VKI_SA_RESTORER 0 /* linux compat, not supported */
++
++#define VKI_SA_ONSTACK 0x0001 /* take signal on signal stack */
++#define VKI_SA_RESTART 0x0002 /* restart system call on signal return */
++#define VKI_SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */
++#define VKI_SA_NODEFER 0x0010 /* don't mask the signal we're delivering */
++
++#define VKI_SA_ONESHOT VKI_SA_RESETHAND /* linux compat */
++#define VKI_SA_NOMASK VKI_SA_NODEFER /* linux compat */
++
++#define VKI_SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */
++#define VKI_SA_NOCLDWAIT 0x0020 /* do not generate zombies on unwaited child */
++
++#define VKI_SA_SIGINFO 0x0040 /* take sa_sigaction handler */
++
++#define VKI_SA_NOKERNINFO 0x0080 /* siginfo does not print kernel info on tty */
++
++#define VKI_SIG_BLOCK 1 /* block specified signal set */
++#define VKI_SIG_UNBLOCK 2 /* unblock specified signal set */
++#define VKI_SIG_SETMASK 3 /* set specified signal set */
++
++#define VKI_SS_INIT
++
++#define VKI_SS_ONSTACK 0x0001 /* take signals on alternate stack */
++#define VKI_SS_DISABLE 0x0004 /* disable taking signals on alternate stack */
++
++#define VKI_MINSIGSTKSZ 8192 /* minimum allowable stack */
++#define VKI_SIGSTKSZ (VKI_MINSIGSTKSZ + 32768) /* recommended stack size */
++
++struct vki_sigstack {
++ void *ss_sp; /* signal stack pointer */
++ int ss_onstack; /* current status */
++};
++
++#define VKI_BADSIG VKI_SIG_ERR
++
++struct vki_sigevent {
++ int sigev_notify;
++ int sigev_signo;
++ union vki_sigval sigev_value;
++ void (*sigev_notify_function)(union vki_sigval);
++ void /* pthread_attr_t */ *sigev_notify_attributes;
++};
++
++#define VKI_SIGEV_NONE 0
++#define VKI_SIGEV_SIGNAL 1
++#define VKI_SIGEV_THREAD 2
++
++#define VKI_SIGEV_SA 3
++
++//----------------------------------------------------------------------
++// From sys/ucontext.h
++//----------------------------------------------------------------------
++
++typedef struct vki___ucontext vki_ucontext_t;
++
++struct vki___ucontext {
++ unsigned int uc_flags; /* properties */
++ vki_ucontext_t * uc_link; /* context to resume */
++ vki_sigset_t uc_sigmask; /* signals blocked in this context */
++ vki_stack_t uc_stack; /* the stack used by this context */
++ vki_mcontext_t uc_mcontext; /* machine state */
++#if defined(VKI__UC_MACHINE_PAD)
++ long __uc_pad[VKI__UC_MACHINE_PAD];
++#endif
++};
++
++#define vki_ucontext vki___ucontext // compat with linux
++
++//----------------------------------------------------------------------
++// From sys/stat.h
++//----------------------------------------------------------------------
++
++struct vki_stat {
++ vki_dev_t st_dev; /* inode's device */
++ vki_mode_t st_mode; /* inode protection mode */
++ vki_ino_t st_ino; /* inode's number */
++ vki_nlink_t st_nlink; /* number of hard links */
++ vki_uid_t st_uid; /* user ID of the file's owner */
++ vki_gid_t st_gid; /* group ID of the file's group */
++ vki_dev_t st_rdev; /* device type */
++ struct vki_timespec st_atim; /* time of last access */
++ struct vki_timespec st_mtim; /* time of last data modification */
++ struct vki_timespec st_ctim; /* time of last file status change */
++ struct vki_timespec st_birthtim; /* time of creation */
++ vki_off_t st_size; /* file size, in bytes */
++ vki_blkcnt_t st_blocks; /* blocks allocated for file */
++ vki_blksize_t st_blksize; /* optimal blocksize for I/O */
++ vki_uint32_t st_flags; /* user defined flags for file */
++ vki_uint32_t st_gen; /* file generation number */
++ vki_uint32_t st_spare[2];
++};
++
++#define st_atime st_atim.tv_sec
++#define st_mtime st_mtim.tv_sec
++#define st_ctime st_ctim.tv_sec
++#define st_birthtime st_birthtim.tv_sec
++
++#define st_atimespec st_atim
++#define st_atimensec st_atim.tv_nsec
++#define st_atime_nsec st_atimensec // linux compat
++#define st_mtimespec st_mtim
++#define st_mtimensec st_mtim.tv_nsec
++#define st_mtime_nsec st_mtimensec // linux compat
++#define st_ctimespec st_ctim
++#define st_ctimensec st_ctim.tv_nsec
++#define st_ctime_nsec st_ctimensec // linux compat
++#define st_birthtimespec st_birthtim
++#define st_birthtimensec st_birthtimespec.tv_nsec
++#define st_birthtime_nsec st_birthtimensec // linux compat
++
++#define VKI_S_ISUID 0004000 /* set user id on execution */
++#define VKI_S_ISGID 0002000 /* set group id on execution */
++
++#define VKI_S_ISTXT 0001000 /* sticky bit */
++
++#define VKI_S_IRWXU 0000700 /* RWX mask for owner */
++#define VKI_S_IRUSR 0000400 /* R for owner */
++#define VKI_S_IWUSR 0000200 /* W for owner */
++#define VKI_S_IXUSR 0000100 /* X for owner */
++
++#define VKI_S_IREAD VKI_S_IRUSR
++#define VKI_S_IWRITE VKI_S_IWUSR
++#define VKI_S_IEXEC VKI_S_IXUSR
++#define VKI_S_IRWXG 0000070 /* RWX mask for group */
++#define VKI_S_IRGRP 0000040 /* R for group */
++#define VKI_S_IWGRP 0000020 /* W for group */
++#define VKI_S_IXGRP 0000010 /* X for group */
++
++#define VKI_S_IRWXO 0000007 /* RWX mask for other */
++#define VKI_S_IROTH 0000004 /* R for other */
++#define VKI_S_IWOTH 0000002 /* W for other */
++#define VKI_S_IXOTH 0000001 /* X for other */
++
++#define VKI__S_IFMT 0170000 /* type of file mask */
++#define VKI__S_IFIFO 0010000 /* named pipe (fifo) */
++#define VKI__S_IFCHR 0020000 /* character special */
++#define VKI__S_IFDIR 0040000 /* directory */
++#define VKI__S_IFBLK 0060000 /* block special */
++#define VKI__S_IFREG 0100000 /* regular */
++#define VKI__S_IFLNK 0120000 /* symbolic link */
++#define VKI__S_ISVTX 0001000 /* save swapped text even after use */
++#define VKI__S_IFSOCK 0140000 /* socket */
++#define VKI__S_IFWHT 0160000 /* whiteout */
++
++#define VKI__S_ARCH1 0200000 /* Archive state 1, ls -l shows 'a' */
++#define VKI__S_ARCH2 0400000 /* Archive state 2, ls -l shows 'A' */
++
++#define VKI_S_IFMT VKI__S_IFMT
++#define VKI_S_IFIFO VKI__S_IFIFO
++#define VKI_S_IFCHR VKI__S_IFCHR
++#define VKI_S_IFDIR VKI__S_IFDIR
++#define VKI_S_IFBLK VKI__S_IFBLK
++#define VKI_S_IFREG VKI__S_IFREG
++#define VKI_S_IFLNK VKI__S_IFLNK
++#define VKI_S_ISVTX VKI__S_ISVTX
++
++#define VKI_S_IFSOCK VKI__S_IFSOCK
++
++#define VKI_S_IFWHT VKI__S_IFWHT
++
++#define VKI_S_ARCH1 VKI__S_ARCH1
++#define VKI_S_ARCH2 VKI__S_ARCH2
++
++#define VKI_S_ISDIR(m) (((m) & VKI__S_IFMT) == VKI__S_IFDIR) /* directory */
++#define VKI_S_ISCHR(m) (((m) & VKI__S_IFMT) == VKI__S_IFCHR) /* char special */
++#define VKI_S_ISBLK(m) (((m) & VKI__S_IFMT) == VKI__S_IFBLK) /* block special */
++#define VKI_S_ISREG(m) (((m) & VKI__S_IFMT) == VKI__S_IFREG) /* regular file */
++#define VKI_S_ISFIFO(m) (((m) & VKI__S_IFMT) == VKI__S_IFIFO) /* fifo */
++
++#define VKI_S_ISLNK(m) (((m) & VKI__S_IFMT) == VKI__S_IFLNK) /* symbolic link */
++
++#define VKI_S_ISSOCK(m) (((m) & VKI__S_IFMT) == VKI__S_IFSOCK) /* socket */
++
++#define VKI_S_ISWHT(m) (((m) & VKI__S_IFMT) == VKI__S_IFWHT) /* whiteout */
++
++#define VKI_ACCESSPERMS (VKI_S_IRWXU|VKI_S_IRWXG|VKI_S_IRWXO) /* 0777 */
++ /* 7777 */
++#define VKI_ALLPERMS (VKI_S_ISUID|VKI_S_ISGID|VKI_S_ISTXT|VKI_S_IRWXU|VKI_S_IRWXG|VKI_S_IRWXO)
++ /* 0666 */
++#define VKI_DEFFILEMODE (VKI_S_IRUSR|VKI_S_IWUSR|VKI_S_IRGRP|VKI_S_IWGRP|VKI_S_IROTH|VKI_S_IWOTH)
++
++#define VKI_S_BLKSIZE 512 /* block size used in the stat struct */
++
++#define VKI_UF_SETTABLE 0x0000ffff /* mask of owner changeable flags */
++#define VKI_UF_NODUMP 0x00000001 /* do not dump file */
++#define VKI_UF_IMMUTABLE 0x00000002 /* file may not be changed */
++#define VKI_UF_APPEND 0x00000004 /* writes to file may only append */
++#define VKI_UF_OPAQUE 0x00000008 /* directory is opaque wrt. union */
++
++#define VKI_SF_SETTABLE 0xffff0000 /* mask of superuser changeable flags */
++#define VKI_SF_ARCHIVED 0x00010000 /* file is archived */
++#define VKI_SF_IMMUTABLE 0x00020000 /* file may not be changed */
++#define VKI_SF_APPEND 0x00040000 /* writes to file may only append */
++/* VKI_SF_NOUNLINK 0x00100000 [NOT IMPLEMENTED] */
++#define VKI_SF_SNAPSHOT 0x00200000 /* snapshot inode */
++#define VKI_SF_LOG 0x00400000 /* WAPBL log file inode */
++#define VKI_SF_SNAPINVAL 0x00800000 /* snapshot is invalid */
++
++#define VKI_UTIME_NOW ((1 << 30) - 1)
++#define VKI_UTIME_OMIT ((1 << 30) - 2)
++
++//----------------------------------------------------------------------
++// From sys/fntl.h
++//----------------------------------------------------------------------
++
++#define VKI_O_RDONLY 0x00000000 /* open for reading only */
++#define VKI_O_WRONLY 0x00000001 /* open for writing only */
++#define VKI_O_RDWR 0x00000002 /* open for reading and writing */
++#define VKI_O_ACCMODE 0x00000003 /* mask for above modes */
++
++#define VKI_FREAD 0x00000001
++#define VKI_FWRITE 0x00000002
++
++#define VKI_O_NONBLOCK 0x00000004 /* no delay */
++#define VKI_O_APPEND 0x00000008 /* set append mode */
++
++#define VKI_O_SHLOCK 0x00000010 /* open with shared file lock */
++#define VKI_O_EXLOCK 0x00000020 /* open with exclusive file lock */
++#define VKI_O_ASYNC 0x00000040 /* signal pgrp when data ready */
++
++#define VKI_O_SYNC 0x00000080 /* synchronous writes */
++
++#define VKI_O_NOFOLLOW 0x00000100 /* don't follow symlinks on the last */
++
++#define VKI_O_CREAT 0x00000200 /* create if nonexistent */
++#define VKI_O_TRUNC 0x00000400 /* truncate to zero length */
++#define VKI_O_EXCL 0x00000800 /* error if already exists */
++
++#define VKI_O_NOCTTY 0x00008000 /* don't assign controlling terminal */
++
++#define VKI_O_DSYNC 0x00010000 /* write: I/O data completion */
++#define VKI_O_RSYNC 0x00020000 /* read: I/O completion as for write */
++
++#define VKI_O_ALT_IO 0x00040000 /* use alternate i/o semantics */
++#define VKI_O_DIRECT 0x00080000 /* direct I/O hint */
++
++#define VKI_O_DIRECTORY 0x00200000 /* fail if not a directory */
++#define VKI_O_CLOEXEC 0x00400000 /* set close on exec */
++
++#define VKI_O_SEARCH 0x00800000 /* skip search permission checks */
++
++#define VKI_O_NOSIGPIPE 0x01000000 /* don't deliver sigpipe */
++#define VKI_O_REGULAR 0x02000000 /* fail if not a regular file */
++
++#define VKI_FAPPEND VKI_O_APPEND /* kernel/compat */
++#define VKI_FASYNC VKI_O_ASYNC /* kernel/compat */
++#define VKI_O_FSYNC VKI_O_SYNC /* compat */
++#define VKI_FNDELAY VKI_O_NONBLOCK /* compat */
++#define VKI_O_NDELAY VKI_O_NONBLOCK /* compat */
++
++#define VKI_F_DUPFD 0 /* duplicate file descriptor */
++#define VKI_F_GETFD 1 /* get file descriptor flags */
++#define VKI_F_SETFD 2 /* set file descriptor flags */
++#define VKI_F_GETFL 3 /* get file status flags */
++#define VKI_F_SETFL 4 /* set file status flags */
++
++#define VKI_F_GETOWN 5 /* get SIGIO/SIGURG proc/pgrp */
++#define VKI_F_SETOWN 6 /* set SIGIO/SIGURG proc/pgrp */
++
++#define VKI_F_GETLK 7 /* get record locking information */
++#define VKI_F_SETLK 8 /* set record locking information */
++#define VKI_F_SETLKW 9 /* F_SETLK; wait if blocked */
++
++#define VKI_F_CLOSEM 10 /* close all fds >= to the one given */
++#define VKI_F_MAXFD 11 /* return the max open fd */
++#define VKI_F_DUPFD_CLOEXEC 12 /* close on exec duplicated fd */
++#define VKI_F_GETNOSIGPIPE 13 /* get SIGPIPE disposition */
++#define VKI_F_SETNOSIGPIPE 14 /* set SIGPIPE disposition */
++
++#define VKI_FD_CLOEXEC 1 /* close-on-exec flag */
++
++#define VKI_F_RDLCK 1 /* shared or read lock */
++#define VKI_F_UNLCK 2 /* unlock */
++#define VKI_F_WRLCK 3 /* exclusive or write lock */
++
++#define VKI_F_PARAM_MASK 0xfff
++#define VKI_F_PARAM_LEN(x) (((x) >> 16) & VKI_F_PARAM_MASK)
++#define VKI_F_PARAM_MAX 4095
++#define VKI_F_FSCTL (int)0x80000000 /* This fcntl goes to the fs */
++#define VKI_F_FSVOID (int)0x40000000 /* no parameters */
++#define VKI_F_FSOUT (int)0x20000000 /* copy out parameter */
++#define VKI_F_FSIN (int)0x10000000 /* copy in parameter */
++#define VKI_F_FSINOUT (VKI_F_FSIN | VKI_F_FSOUT)
++#define VKI_F_FSDIRMASK (int)0x70000000 /* mask for IN/OUT/VOID */
++#define VKI_F_FSPRIV (int)0x00008000 /* command is fs-specific */
++
++#define VKI__FCN(inout, num, len) \
++ (VKI_F_FSCTL | inout | ((len & VKI_F_PARAM_MASK) << 16) | (num))
++#define VKI__FCNO(c) VKI__FCN(F_FSVOID, (c), 0)
++#define VKI__FCNR(c, t) VKI__FCN(F_FSIN, (c), (int)sizeof(t))
++#define VKI__FCNW(c, t) VKI__FCN(F_FSOUT, (c), (int)sizeof(t))
++#define VKI__FCNRW(c, t) VKI__FCN(F_FSINOUT, (c), (int)sizeof(t))
++
++#define VKI__FCN_FSPRIV(inout, fs, num, len) \
++ (VKI_F_FSCTL | VKI_F_FSPRIV | inout | ((len & VKI_F_PARAM_MASK) << 16) | \
++ (fs) << 8 | (num))
++#define VKI__FCNO_FSPRIV(f, c) VKI__FCN_FSPRIV(F_FSVOID, (f), (c), 0)
++#define VKI__FCNR_FSPRIV(f, c, t) VKI__FCN_FSPRIV(F_FSIN, (f), (c), (int)sizeof(t))
++#define VKI__FCNW_FSPRIV(f, c, t) VKI__FCN_FSPRIV(F_FSOUT, (f), (c), (int)sizeof(t))
++#define VKI__FCNRW_FSPRIV(f, c, t) VKI__FCN_FSPRIV(F_FSINOUT, (f), (c), (int)sizeof(t))
++
++struct vki_flock {
++ vki_off_t l_start; /* starting offset */
++ vki_off_t l_len; /* len = 0 means until end of file */
++ vki_pid_t l_pid; /* lock owner */
++ short l_type; /* lock type: read/write, etc. */
++ short l_whence; /* type of l_start */
++};
++
++#define VKI_LOCK_SH 0x01 /* shared file lock */
++#define VKI_LOCK_EX 0x02 /* exclusive file lock */
++#define VKI_LOCK_NB 0x04 /* don't block when locking */
++#define VKI_LOCK_UN 0x08 /* unlock file */
++
++/* Always ensure that these are consistent with <stdio.h> and <unistd.h>! */
++#ifndef VKI_SEEK_SET
++#define VKI_SEEK_SET 0 /* set file offset to offset */
++#endif
++
++#ifndef VKI_SEEK_CUR
++#define VKI_SEEK_CUR 1 /* set file offset to current plus offset */
++#endif
++
++#ifndef VKI_SEEK_END
++#define VKI_SEEK_END 2 /* set file offset to EOF plus offset */
++#endif
++
++#define VKI_POSIX_FADV_NORMAL 0 /* default advice / no advice */
++#define VKI_POSIX_FADV_RANDOM 1 /* random access */
++#define VKI_POSIX_FADV_SEQUENTIAL 2 /* sequential access(lower to higher) */
++#define VKI_POSIX_FADV_WILLNEED 3 /* be needed in near future */
++#define VKI_POSIX_FADV_DONTNEED 4 /* not be needed in near future */
++#define VKI_POSIX_FADV_NOREUSE 5 /* be accessed once */
++
++#define VKI_AT_FDCWD -100 /* Use cwd for relative link target */
++#define VKI_AT_EACCESS 0x100 /* Use euig/egid for access checks */
++#define VKI_AT_SYMLINK_NOFOLLOW 0x200 /* Do not follow symlinks */
++#define VKI_AT_SYMLINK_FOLLOW 0x400 /* Follow symlinks */
++#define VKI_AT_REMOVEDIR 0x800 /* Remove directory only */
++
++//----------------------------------------------------------------------
++// From sys/errno.h
++//----------------------------------------------------------------------
++
++#define VKI_EPERM 1 /* Operation not permitted */
++#define VKI_ENOENT 2 /* No such file or directory */
++#define VKI_ESRCH 3 /* No such process */
++#define VKI_EINTR 4 /* Interrupted system call */
++#define VKI_EIO 5 /* Input/output error */
++#define VKI_ENXIO 6 /* Device not configured */
++#define VKI_E2BIG 7 /* Argument list too long */
++#define VKI_ENOEXEC 8 /* Exec format error */
++#define VKI_EBADF 9 /* Bad file descriptor */
++#define VKI_ECHILD 10 /* No child processes */
++#define VKI_EDEADLK 11 /* Resource deadlock avoided */
++#define VKI_ENOMEM 12 /* Cannot allocate memory */
++#define VKI_EACCES 13 /* Permission denied */
++#define VKI_EFAULT 14 /* Bad address */
++#define VKI_ENOTBLK 15 /* Block device required */
++#define VKI_EBUSY 16 /* Device busy */
++#define VKI_EEXIST 17 /* File exists */
++#define VKI_EXDEV 18 /* Cross-device link */
++#define VKI_ENODEV 19 /* Operation not supported by device */
++#define VKI_ENOTDIR 20 /* Not a directory */
++#define VKI_EISDIR 21 /* Is a directory */
++#define VKI_EINVAL 22 /* Invalid argument */
++#define VKI_ENFILE 23 /* Too many open files in system */
++#define VKI_EMFILE 24 /* Too many open files */
++#define VKI_ENOTTY 25 /* Inappropriate ioctl for device */
++#define VKI_ETXTBSY 26 /* Text file busy */
++#define VKI_EFBIG 27 /* File too large */
++#define VKI_ENOSPC 28 /* No space left on device */
++#define VKI_ESPIPE 29 /* Illegal seek */
++#define VKI_EROFS 30 /* Read-only file system */
++#define VKI_EMLINK 31 /* Too many links */
++#define VKI_EPIPE 32 /* Broken pipe */
++#define VKI_EDOM 33 /* Numerical argument out of domain */
++#define VKI_ERANGE 34 /* Result too large or too small */
++#define VKI_EAGAIN 35 /* Resource temporarily unavailable */
++#define VKI_EWOULDBLOCK EAGAIN /* Operation would block */
++#define VKI_EINPROGRESS 36 /* Operation now in progress */
++#define VKI_EALREADY 37 /* Operation already in progress */
++#define VKI_ENOTSOCK 38 /* Socket operation on non-socket */
++#define VKI_EDESTADDRREQ 39 /* Destination address required */
++#define VKI_EMSGSIZE 40 /* Message too long */
++#define VKI_EPROTOTYPE 41 /* Protocol wrong type for socket */
++#define VKI_ENOPROTOOPT 42 /* Protocol option not available */
++#define VKI_EPROTONOSUPPORT 43 /* Protocol not supported */
++#define VKI_ESOCKTNOSUPPORT 44 /* Socket type not supported */
++#define VKI_EOPNOTSUPP 45 /* Operation not supported */
++#define VKI_EPFNOSUPPORT 46 /* Protocol family not supported */
++#define VKI_EAFNOSUPPORT 47 /* Address family not supported by protocol family */
++#define VKI_EADDRINUSE 48 /* Address already in use */
++#define VKI_EADDRNOTAVAIL 49 /* Can't assign requested address */
++#define VKI_ENETDOWN 50 /* Network is down */
++#define VKI_ENETUNREACH 51 /* Network is unreachable */
++#define VKI_ENETRESET 52 /* Network dropped connection on reset */
++#define VKI_ECONNABORTED 53 /* Software caused connection abort */
++#define VKI_ECONNRESET 54 /* Connection reset by peer */
++#define VKI_ENOBUFS 55 /* No buffer space available */
++#define VKI_EISCONN 56 /* Socket is already connected */
++#define VKI_ENOTCONN 57 /* Socket is not connected */
++#define VKI_ESHUTDOWN 58 /* Can't send after socket shutdown */
++#define VKI_ETOOMANYREFS 59 /* Too many references: can't splice */
++#define VKI_ETIMEDOUT 60 /* Operation timed out */
++#define VKI_ECONNREFUSED 61 /* Connection refused */
++#define VKI_ELOOP 62 /* Too many levels of symbolic links */
++#define VKI_ENAMETOOLONG 63 /* File name too long */
++#define VKI_EHOSTDOWN 64 /* Host is down */
++#define VKI_EHOSTUNREACH 65 /* No route to host */
++#define VKI_ENOTEMPTY 66 /* Directory not empty */
++#define VKI_EPROCLIM 67 /* Too many processes */
++#define VKI_EUSERS 68 /* Too many users */
++#define VKI_EDQUOT 69 /* Disc quota exceeded */
++#define VKI_ESTALE 70 /* Stale NFS file handle */
++#define VKI_EREMOTE 71 /* Too many levels of remote in path */
++#define VKI_EBADRPC 72 /* RPC struct is bad */
++#define VKI_ERPCMISMATCH 73 /* RPC version wrong */
++#define VKI_EPROGUNAVAIL 74 /* RPC prog. not avail */
++#define VKI_EPROGMISMATCH 75 /* Program version wrong */
++#define VKI_EPROCUNAVAIL 76 /* Bad procedure for program */
++#define VKI_ENOLCK 77 /* No locks available */
++#define VKI_ENOSYS 78 /* Function not implemented */
++#define VKI_EFTYPE 79 /* Inappropriate file type or format */
++#define VKI_EAUTH 80 /* Authentication error */
++#define VKI_ENEEDAUTH 81 /* Need authenticator */
++#define VKI_EIDRM 82 /* Identifier removed */
++#define VKI_ENOMSG 83 /* No message of desired type */
++#define VKI_EOVERFLOW 84 /* Value too large to be stored in data type */
++#define VKI_EILSEQ 85 /* Illegal byte sequence */
++#define VKI_ENOTSUP 86 /* Not supported */
++#define VKI_ECANCELED 87 /* Operation canceled */
++#define VKI_EBADMSG 88 /* Bad or Corrupt message */
++#define VKI_ENODATA 89 /* No message available */
++#define VKI_ENOSR 90 /* No STREAM resources */
++#define VKI_ENOSTR 91 /* Not a STREAM */
++#define VKI_ETIME 92 /* STREAM ioctl timeout */
++#define VKI_ENOATTR 93 /* Attribute not found */
++#define VKI_EMULTIHOP 94 /* Multihop attempted */
++#define VKI_ENOLINK 95 /* Link has been severed */
++#define VKI_EPROTO 96 /* Protocol error */
++#define VKI_ELAST 96 /* Must equal largest errno */
++#define VKI_EJUSTRETURN -2 /* don't modify regs, just return */
++#define VKI_ERESTART -3 /* restart syscall */
++#define VKI_EPASSTHROUGH -4 /* ioctl not handled by this layer */
++#define VKI_EDUPFD -5 /* Dup given fd */
++#define VKI_EMOVEFD -6 /* Move given fd */
++
++//----------------------------------------------------------------------
++// From unistd.h
++//----------------------------------------------------------------------
++
++#define VKI__POSIX_JOB_CONTROL 1
++
++#define VKI__POSIX_VERSION 200112L
++#define VKI__POSIX2_VERSION 200112L
++
++#define VKI__POSIX_SPAWN 200809L
++
++#undef VKI__POSIX_ADVISORY_INFO
++ /* asynchronous I/O is available */
++#define VKI__POSIX_ASYNCHRONOUS_IO 200112L
++ /* barriers */
++#define VKI__POSIX_BARRIERS 200112L
++ /* chown requires correct privileges */
++#define VKI__POSIX_CHOWN_RESTRICTED 1
++ /* clock selection */
++#define VKI__POSIX_CLOCK_SELECTION -1
++ /* cputime clock */
++#define VKI__POSIX_CPUTIME 200112L
++ /* CPU type */
++#undef VKI__POSIX_CPUTYPE
++ /* file synchronization is available */
++#define VKI__POSIX_FSYNC 1
++
++ /* support IPv6 */
++#define VKI__POSIX_IPV6 0
++ /* job control is available */
++#define VKI__POSIX_JOB_CONTROL 1
++ /* memory mapped files */
++#define VKI__POSIX_MAPPED_FILES 1
++ /* memory locking whole address space */
++#define VKI__POSIX_MEMLOCK 1
++ /* memory locking address ranges */
++#define VKI__POSIX_MEMLOCK_RANGE 1
++ /* memory access protections */
++#define VKI__POSIX_MEMORY_PROTECTION 1
++ /* message passing is available */
++#define VKI__POSIX_MESSAGE_PASSING 200112L
++ /* monotonic clock */
++#define VKI__POSIX_MONOTONIC_CLOCK 200112L
++ /* too-long path comp generate errors */
++#define VKI__POSIX_NO_TRUNC 1
++
++
++ /* prioritized I/O */
++#define VKI__POSIX_PRIORITIZED_IO -1
++ /* priority scheduling */
++#define VKI__POSIX_PRIORITY_SCHEDULING 200112L
++ /* raw sockets */
++#define VKI__POSIX_RAW_SOCKETS 200112L
++ /* read/write locks */
++#define VKI__POSIX_READER_WRITER_LOCKS 200112L
++ /* realtime signals */
++#undef VKI__POSIX_REALTIME_SIGNALS
++ /* regular expressions */
++#define VKI__POSIX_REGEXP 1
++ /* semaphores */
++#define VKI__POSIX_SEMAPHORES 0
++ /* shared memory objects */
++#define VKI__POSIX_SHARED_MEMORY_OBJECTS 0
++ /* shell */
++#define VKI__POSIX_SHELL 1
++
++
++ /* spin locks */
++#define VKI__POSIX_SPIN_LOCKS 200112L
++ /* sporadic server */
++#undef VKI__POSIX_SPORADIC_SERVER
++ /* synchronized I/O is available */
++#define VKI__POSIX_SYNCHRONIZED_IO 1
++ /* threads */
++#define VKI__POSIX_THREADS 200112L
++ /* pthread_attr for stack size */
++#define VKI__POSIX_THREAD_ATTR_STACKSIZE 200112L
++ /* pthread_attr for stack address */
++#define VKI__POSIX_THREAD_ATTR_STACKADDR 200112L
++ /* thread cputime clock */
++#define VKI__POSIX_THREAD_CPUTIME 200112L
++ /* _r functions */
++#define VKI__POSIX_THREAD_PRIO_PROTECT 200112L
++ /* PTHREAD_PRIO_PROTECT */
++#define VKI__POSIX_THREAD_SAFE_FUNCTIONS 200112L
++
++ /* timeouts */
++#undef VKI__POSIX_TIMEOUTS
++ /* timers */
++#define VKI__POSIX_TIMERS 200112L
++ /* typed memory objects */
++#undef VKI__POSIX_TYPED_MEMORY_OBJECTS
++ /* may disable terminal spec chars */
++#define VKI__POSIX_VDISABLE ((unsigned char)'\377')
++
++ /* C binding */
++#define VKI__POSIX2_C_BIND 200112L
++
++ /* XPG4.2 shared memory */
++#define VKI__XOPEN_SHM 0
++
++/* access function */
++#define VKI_F_OK 0 /* test for existence of file */
++#define VKI_X_OK 0x01 /* test for execute or search permission */
++#define VKI_W_OK 0x02 /* test for write permission */
++#define VKI_R_OK 0x04 /* test for read permission */
++
++/* whence values for lseek(2) */
++#define VKI_SEEK_SET 0 /* set file offset to offset */
++#define VKI_SEEK_CUR 1 /* set file offset to current plus offset */
++#define VKI_SEEK_END 2 /* set file offset to EOF plus offset */
++
++#define VKI_L_SET VKI_SEEK_SET
++#define VKI_L_INCR VKI_SEEK_CUR
++#define VKI_L_XTND VKI_SEEK_END
++
++#define VKI_FDATASYNC 0x0010 /* sync data and minimal metadata */
++#define VKI_FFILESYNC 0x0020 /* sync data and metadata */
++#define VKI_FDISKSYNC 0x0040 /* flush disk caches after sync */
++
++#define VKI__PC_LINK_MAX 1
++#define VKI__PC_MAX_CANON 2
++#define VKI__PC_MAX_INPUT 3
++#define VKI__PC_NAME_MAX 4
++#define VKI__PC_PATH_MAX 5
++#define VKI__PC_PIPE_BUF 6
++#define VKI__PC_CHOWN_RESTRICTED 7
++#define VKI__PC_NO_TRUNC 8
++#define VKI__PC_VDISABLE 9
++#define VKI__PC_SYNC_IO 10
++#define VKI__PC_FILESIZEBITS 11
++#define VKI__PC_SYMLINK_MAX 12
++#define VKI__PC_2_SYMLINKS 13
++#define VKI__PC_ACL_EXTENDED 14
++
++#define VKI__PC_MIN_HOLE_SIZE 15
++
++#define VKI__SC_ARG_MAX 1
++#define VKI__SC_CHILD_MAX 2
++#define VKI__O_SC_CLK_TCK 3 /* Old version, always 100 */
++#define VKI__SC_NGROUPS_MAX 4
++#define VKI__SC_OPEN_MAX 5
++#define VKI__SC_JOB_CONTROL 6
++#define VKI__SC_SAVED_IDS 7
++#define VKI__SC_VERSION 8
++#define VKI__SC_BC_BASE_MAX 9
++#define VKI__SC_BC_DIM_MAX 10
++#define VKI__SC_BC_SCALE_MAX 11
++#define VKI__SC_BC_STRING_MAX 12
++#define VKI__SC_COLL_WEIGHTS_MAX 13
++#define VKI__SC_EXPR_NEST_MAX 14
++#define VKI__SC_LINE_MAX 15
++#define VKI__SC_RE_DUP_MAX 16
++#define VKI__SC_2_VERSION 17
++#define VKI__SC_2_C_BIND 18
++#define VKI__SC_2_C_DEV 19
++#define VKI__SC_2_CHAR_TERM 20
++#define VKI__SC_2_FORT_DEV 21
++#define VKI__SC_2_FORT_RUN 22
++#define VKI__SC_2_LOCALEDEF 23
++#define VKI__SC_2_SW_DEV 24
++#define VKI__SC_2_UPE 25
++#define VKI__SC_STREAM_MAX 26
++#define VKI__SC_TZNAME_MAX 27
++#define VKI__SC_PAGESIZE 28
++#define VKI__SC_PAGE_SIZE VKI__SC_PAGESIZE /* 1170 compatibility */
++#define VKI__SC_FSYNC 29
++#define VKI__SC_XOPEN_SHM 30
++#define VKI__SC_SYNCHRONIZED_IO 31
++#define VKI__SC_IOV_MAX 32
++#define VKI__SC_MAPPED_FILES 33
++#define VKI__SC_MEMLOCK 34
++#define VKI__SC_MEMLOCK_RANGE 35
++#define VKI__SC_MEMORY_PROTECTION 36
++#define VKI__SC_LOGIN_NAME_MAX 37
++#define VKI__SC_MONOTONIC_CLOCK 38
++#define VKI__SC_CLK_TCK 39 /* New, variable version */
++#define VKI__SC_ATEXIT_MAX 40
++#define VKI__SC_THREADS 41
++#define VKI__SC_SEMAPHORES 42
++#define VKI__SC_BARRIERS 43
++#define VKI__SC_TIMERS 44
++#define VKI__SC_SPIN_LOCKS 45
++#define VKI__SC_READER_WRITER_LOCKS 46
++#define VKI__SC_GETGR_R_SIZE_MAX 47
++#define VKI__SC_GETPW_R_SIZE_MAX 48
++#define VKI__SC_CLOCK_SELECTION 49
++#define VKI__SC_ASYNCHRONOUS_IO 50
++#define VKI__SC_AIO_LISTIO_MAX 51
++#define VKI__SC_AIO_MAX 52
++#define VKI__SC_MESSAGE_PASSING 53
++#define VKI__SC_MQ_OPEN_MAX 54
++#define VKI__SC_MQ_PRIO_MAX 55
++#define VKI__SC_PRIORITY_SCHEDULING 56
++#define VKI__SC_THREAD_DESTRUCTOR_ITERATIONS 57
++#define VKI__SC_THREAD_KEYS_MAX 58
++#define VKI__SC_THREAD_STACK_MIN 59
++#define VKI__SC_THREAD_THREADS_MAX 60
++#define VKI__SC_THREAD_ATTR_STACKADDR 61
++#define VKI__SC_THREAD_ATTR_STACKSIZE 62
++#define VKI__SC_THREAD_PRIORITY_SCHEDULING 63
++#define VKI__SC_THREAD_PRIO_INHERIT 64
++#define VKI__SC_THREAD_PRIO_PROTECT 65
++#define VKI__SC_THREAD_PROCESS_SHARED 66
++#define VKI__SC_THREAD_SAFE_FUNCTIONS 67
++#define VKI__SC_TTY_NAME_MAX 68
++#define VKI__SC_HOST_NAME_MAX 69
++#define VKI__SC_PASS_MAX 70
++#define VKI__SC_REGEXP 71
++#define VKI__SC_SHELL 72
++#define VKI__SC_SYMLOOP_MAX 73
++
++/* Actually, they are not supported or implemented yet */
++#define VKI__SC_V6_ILP32_OFF32 74
++#define VKI__SC_V6_ILP32_OFFBIG 75
++#define VKI__SC_V6_LP64_OFF64 76
++#define VKI__SC_V6_LPBIG_OFFBIG 77
++#define VKI__SC_2_PBS 80
++#define VKI__SC_2_PBS_ACCOUNTING 81
++#define VKI__SC_2_PBS_CHECKPOINT 82
++#define VKI__SC_2_PBS_LOCATE 83
++#define VKI__SC_2_PBS_MESSAGE 84
++#define VKI__SC_2_PBS_TRACK 85
++
++/* These are implemented */
++#define VKI__SC_SPAWN 86
++#define VKI__SC_SHARED_MEMORY_OBJECTS 87
++#define VKI__SC_TIMER_MAX 88
++#define VKI__SC_SEM_NSEMS_MAX 89
++#define VKI__SC_CPUTIME 90
++#define VKI__SC_THREAD_CPUTIME 91
++#define VKI__SC_DELAYTIMER_MAX 92
++#define VKI__SC_SIGQUEUE_MAX 93
++#define VKI__SC_REALTIME_SIGNALS 94
++
++/* Extensions found in Solaris and Linux. */
++#define VKI__SC_PHYS_PAGES 121
++
++/* Commonly provided sysconf() extensions */
++#define VKI__SC_NPROCESSORS_CONF 1001
++#define VKI__SC_NPROCESSORS_ONLN 1002
++/* Native variables */
++#define VKI__SC_SCHED_RT_TS 2001
++#define VKI__CS_PATH 1
++
++//----------------------------------------------------------------------
++// From netinet/in.h
++//----------------------------------------------------------------------
++
++typedef vki___in_addr_t vki_in_addr_t;
++typedef vki___in_port_t vki_in_port_t;
++typedef vki___sa_family_t vki_sa_family_t;
++
++#define VKI_IPPROTO_IP 0 /* dummy for IP */
++#define VKI_IPPROTO_HOPOPTS 0 /* IP6 hop-by-hop options */
++#define VKI_IPPROTO_ICMP 1 /* control message protocol */
++#define VKI_IPPROTO_IGMP 2 /* group mgmt protocol */
++#define VKI_IPPROTO_GGP 3 /* gateway^2 (deprecated) */
++#define VKI_IPPROTO_IPV4 4 /* IP header */
++#define VKI_IPPROTO_IPIP 4 /* IP inside IP */
++#define VKI_IPPROTO_TCP 6 /* tcp */
++#define VKI_IPPROTO_EGP 8 /* exterior gateway protocol */
++#define VKI_IPPROTO_PUP 12 /* pup */
++#define VKI_IPPROTO_UDP 17 /* user datagram protocol */
++#define VKI_IPPROTO_IDP 22 /* xns idp */
++#define VKI_IPPROTO_TP 29 /* tp-4 w/ class negotiation */
++#define VKI_IPPROTO_DCCP 33 /* DCCP */
++#define VKI_IPPROTO_IPV6 41 /* IP6 header */
++#define VKI_IPPROTO_ROUTING 43 /* IP6 routing header */
++#define VKI_IPPROTO_FRAGMENT 44 /* IP6 fragmentation header */
++#define VKI_IPPROTO_RSVP 46 /* resource reservation */
++#define VKI_IPPROTO_GRE 47 /* GRE encaps RFC 1701 */
++#define VKI_IPPROTO_ESP 50 /* encap. security payload */
++#define VKI_IPPROTO_AH 51 /* authentication header */
++#define VKI_IPPROTO_MOBILE 55 /* IP Mobility RFC 2004 */
++#define VKI_IPPROTO_IPV6_ICMP 58 /* IPv6 ICMP */
++#define VKI_IPPROTO_ICMPV6 58 /* ICMP6 */
++#define VKI_IPPROTO_NONE 59 /* IP6 no next header */
++#define VKI_IPPROTO_DSTOPTS 60 /* IP6 destination option */
++#define VKI_IPPROTO_EON 80 /* ISO cnlp */
++#define VKI_IPPROTO_ETHERIP 97 /* Ethernet-in-IP */
++#define VKI_IPPROTO_ENCAP 98 /* encapsulation header */
++#define VKI_IPPROTO_PIM 103 /* Protocol indep. multicast */
++#define VKI_IPPROTO_IPCOMP 108 /* IP Payload Comp. Protocol */
++#define VKI_IPPROTO_VRRP 112 /* VRRP RFC 2338 */
++#define VKI_IPPROTO_CARP 112 /* Common Address Resolution Protocol */
++#define VKI_IPPROTO_L2TP 115 /* L2TPv3 */
++#define VKI_IPPROTO_SCTP 132 /* SCTP */
++#define VKI_IPPROTO_PFSYNC 240 /* PFSYNC */
++#define VKI_IPPROTO_RAW 255 /* raw IP packet */
++#define VKI_IPPROTO_MAX 256
++
++/* last return value of *_input(), meaning "all job for this pkt is done". */
++#define VKI_IPPROTO_DONE 257
++
++/* sysctl placeholder for (FAST_)IPSEC */
++#define VKI_CTL_IPPROTO_IPSEC 258
++
++#define VKI_IPPORT_RESERVED 1024
++#define VKI_IPPORT_ANONMIN 49152
++#define VKI_IPPORT_ANONMAX 65535
++#define VKI_IPPORT_RESERVEDMIN 600
++#define VKI_IPPORT_RESERVEDMAX (VKI_IPPORT_RESERVED-1)
++
++struct vki_in_addr {
++ vki_in_addr_t s_addr;
++} __attribute__((__packed__));
++
++#define VKI___IPADDR(x) ((vki_uint32_t)(x))
++
++#define VKI_IN_CLASSA(i) (((vki_uint32_t)(i) & VKI___IPADDR(0x80000000)) == \
++ VKI___IPADDR(0x00000000))
++#define VKI_IN_CLASSA_NET VKI___IPADDR(0xff000000)
++#define VKI_IN_CLASSA_NSHIFT 24
++#define VKI_IN_CLASSA_HOST VKI___IPADDR(0x00ffffff)
++#define VKI_IN_CLASSA_MAX 128
++
++#define VKI_IN_CLASSB(i) (((vki_uint32_t)(i) & VKI___IPADDR(0xc0000000)) == \
++ VKI___IPADDR(0x80000000))
++#define VKI_IN_CLASSB_NET VKI___IPADDR(0xffff0000)
++#define VKI_IN_CLASSB_NSHIFT 16
++#define VKI_IN_CLASSB_HOST VKI___IPADDR(0x0000ffff)
++
++#define VKI_IN_CLASSB_MAX 65536
++
++#define VKI_IN_CLASSC(i) (((vki_uint32_t)(i) & VKI___IPADDR(0xe0000000)) == \
++ VKI___IPADDR(0xc0000000))
++#define VKI_IN_CLASSC_NET VKI___IPADDR(0xffffff00)
++#define VKI_IN_CLASSC_NSHIFT 8
++#define VKI_IN_CLASSC_HOST VKI___IPADDR(0x000000ff)
++
++#define VKI_IN_CLASSD(i) (((vki_uint32_t)(i) & VKI___IPADDR(0xf0000000)) == \
++ VKI___IPADDR(0xe0000000))
++/* These ones aren't really net and host fields, but routing needn't know. */
++#define VKI_IN_CLASSD_NET VKI___IPADDR(0xf0000000)
++#define VKI_IN_CLASSD_NSHIFT 28
++#define VKI_IN_CLASSD_HOST VKI___IPADDR(0x0fffffff)
++#define VKI_IN_MULTICAST(i) VKI_IN_CLASSD(i)
++
++#define VKI_IN_EXPERIMENTAL(i) (((vki_uint32_t)(i) & VKI___IPADDR(0xf0000000)) == \
++ VKI___IPADDR(0xf0000000))
++
++#define VKI_IN_BADCLASS(i) (((vki_uint32_t)(i) & VKI___IPADDR(0xf0000000)) == \
++ VKI___IPADDR(0xf0000000))
++
++#define VKI_IN_LINKLOCAL(i) (((vki_uint32_t)(i) & VKI___IPADDR(0xffff0000)) == \
++ VKI___IPADDR(0xa9fe0000))
++
++#define VKI_IN_PRIVATE(i) ((((vki_uint32_t)(i) & VKI___IPADDR(0xff000000)) == \
++ VKI___IPADDR(0x0a000000)) || \
++ (((vki_uint32_t)(i) & VKI___IPADDR(0xfff00000)) == \
++ VKI___IPADDR(0xac100000)) || \
++ (((vki_uint32_t)(i) & VKI___IPADDR(0xffff0000)) == \
++ VKI___IPADDR(0xc0a80000)))
++
++#define VKI_IN_LOCAL_GROUP(i) (((vki_uint32_t)(i) & VKI___IPADDR(0xffffff00)) == \
++ VKI___IPADDR(0xe0000000))
++
++#define VKI_IN_ANY_LOCAL(i) (VKI_IN_LINKLOCAL(i) || VKI_IN_LOCAL_GROUP(i))
++
++#define VKI_INADDR_ANY VKI___IPADDR(0x00000000)
++#define VKI_INADDR_LOOPBACK VKI___IPADDR(0x7f000001)
++#define VKI_INADDR_BROADCAST VKI___IPADDR(0xffffffff) /* must be masked */
++#define VKI_INADDR_NONE VKI___IPADDR(0xffffffff) /* -1 return */
++
++#define VKI_INADDR_UNSPEC_GROUP VKI___IPADDR(0xe0000000) /* 224.0.0.0 */
++#define VKI_INADDR_ALLHOSTS_GROUP VKI___IPADDR(0xe0000001) /* 224.0.0.1 */
++#define VKI_INADDR_ALLRTRS_GROUP VKI___IPADDR(0xe0000002) /* 224.0.0.2 */
++#define VKI_INADDR_CARP_GROUP VKI___IPADDR(0xe0000012) /* 224.0.0.18 */
++#define VKI_INADDR_MAX_LOCAL_GROUP VKI___IPADDR(0xe00000ff) /* 224.0.0.255 */
++
++#define VKI_IN_LOOPBACKNET 127 /* official! */
++
++struct vki_sockaddr_in {
++ vki_uint8_t sin_len;
++ vki_sa_family_t sin_family;
++ vki_in_port_t sin_port;
++ struct vki_in_addr sin_addr;
++ vki_int8_t sin_zero[8];
++};
++
++#define VKI_INET_ADDRSTRLEN 16
++
++struct vki_ip_opts {
++ struct vki_in_addr ip_dst; /* first hop, 0 w/o src rt */
++ vki_int8_t ip_opts[40]; /* actually variable in size */
++};
++
++#define VKI_IP_OPTIONS 1 /* buf/ip_opts; set/get IP options */
++#define VKI_IP_HDRINCL 2 /* int; header is included with data */
++#define VKI_IP_TOS 3 /* int; IP type of service and preced. */
++#define VKI_IP_TTL 4 /* int; IP time to live */
++#define VKI_IP_RECVOPTS 5 /* bool; receive all IP opts w/dgram */
++#define VKI_IP_RECVRETOPTS 6 /* bool; receive IP opts for response */
++#define VKI_IP_RECVDSTADDR 7 /* bool; receive IP dst addr w/dgram */
++#define VKI_IP_RETOPTS 8 /* ip_opts; set/get IP options */
++#define VKI_IP_MULTICAST_IF 9 /* in_addr; set/get IP multicast i/f */
++#define VKI_IP_MULTICAST_TTL 10 /* u_char; set/get IP multicast ttl */
++#define VKI_IP_MULTICAST_LOOP 11 /* u_char; set/get IP multicast loopback */
++/* The add and drop membership option numbers need to match with the v6 ones */
++#define VKI_IP_ADD_MEMBERSHIP 12 /* ip_mreq; add an IP group membership */
++#define VKI_IP_DROP_MEMBERSHIP 13 /* ip_mreq; drop an IP group membership */
++#define VKI_IP_PORTALGO 18 /* int; port selection algo (rfc6056) */
++#define VKI_IP_PORTRANGE 19 /* int; range to use for ephemeral port */
++#define VKI_IP_RECVIF 20 /* bool; receive reception if w/dgram */
++
++#define VKI_IP_ERRORMTU 21 /* int; get MTU of last xmit = EMSGSIZE */
++#define VKI_IP_IPSEC_POLICY 22 /* struct; get/set security policy */
++#define VKI_IP_RECVTTL 23 /* bool; receive IP TTL w/dgram */
++#define VKI_IP_MINTTL 24 /* minimum TTL for packet or drop */
++#define VKI_IP_PKTINFO 25 /* struct; set default src if/addr */
++#define VKI_IP_RECVPKTINFO 26 /* int; receive dst if/addr w/dgram */
++
++#define VKI_IP_SENDSRCADDR VKI_IP_RECVDSTADDR /* FreeBSD compatibility */
++
++/*
++ * Information sent in the control message of a datagram socket for
++ * IP_PKTINFO and IP_RECVPKTINFO.
++ */
++struct vki_in_pktinfo {
++ struct vki_in_addr ipi_addr; /* src/dst address */
++ unsigned int ipi_ifindex; /* interface index */
++};
++
++#define ipi_spec_dst ipi_addr /* Solaris/Linux compatibility */
++
++/*
++ * Defaults and limits for options
++ */
++#define VKI_IP_DEFAULT_MULTICAST_TTL 1 /* normally limit m'casts to 1 hop */
++#define VKI_IP_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */
++#define VKI_IP_MAX_MEMBERSHIPS 20 /* per socket; must fit in one mbuf */
++
++/*
++ * Argument structure for IP_ADD_MEMBERSHIP and IP_DROP_MEMBERSHIP.
++ */
++struct vki_ip_mreq {
++ struct vki_in_addr imr_multiaddr; /* IP multicast address of group */
++ struct vki_in_addr imr_interface; /* local IP address of interface */
++};
++
++#define VKI_IP_PORTRANGE_DEFAULT 0 /* default range */
++#define VKI_IP_PORTRANGE_HIGH 1 /* same as DEFAULT (FreeBSD compat) */
++#define VKI_IP_PORTRANGE_LOW 2 /* use privileged range */
++
++#define VKI_IPCTL_FORWARDING 1 /* act as router */
++#define VKI_IPCTL_SENDREDIRECTS 2 /* may send redirects when forwarding */
++#define VKI_IPCTL_DEFTTL 3 /* default TTL */
++/* IPCTL_DEFMTU=4, never implemented */
++#define VKI_IPCTL_FORWSRCRT 5 /* forward source-routed packets */
++#define VKI_IPCTL_DIRECTEDBCAST 6 /* default broadcast behavior */
++#define VKI_IPCTL_ALLOWSRCRT 7 /* allow/drop all source-routed pkts */
++#define VKI_IPCTL_SUBNETSARELOCAL 8 /* treat subnets as local addresses */
++#define VKI_IPCTL_MTUDISC 9 /* allow path MTU discovery */
++
++#define VKI_IPCTL_ANONPORTMIN 10 /* minimum ephemeral port */
++#define VKI_IPCTL_ANONPORTMAX 11 /* maximum ephemeral port */
++#define VKI_IPCTL_MTUDISCTIMEOUT 12 /* allow path MTU discovery */
++#define VKI_IPCTL_MAXFLOWS 13 /* maximum ip flows allowed */
++#define VKI_IPCTL_HOSTZEROBROADCAST 14 /* is host zero a broadcast addr? */
++#define VKI_IPCTL_GIF_TTL 15 /* default TTL for gif encap packet */
++#define VKI_IPCTL_LOWPORTMIN 16 /* minimum reserved port */
++#define VKI_IPCTL_LOWPORTMAX 17 /* maximum reserved port */
++#define VKI_IPCTL_MAXFRAGPACKETS 18 /* max packets reassembly queue */
++
++#define VKI_IPCTL_GRE_TTL 19 /* default TTL for gre encap packet */
++#define VKI_IPCTL_CHECKINTERFACE 20 /* drop pkts in from 'wrong' iface */
++#define VKI_IPCTL_IFQ 21 /* IP packet input queue */
++#define VKI_IPCTL_RANDOMID 22 /* use random IP ids (if configured) */
++#define VKI_IPCTL_LOOPBACKCKSUM 23 /* do IP checksum on loopback */
++#define VKI_IPCTL_STATS 24 /* IP statistics */
++#define VKI_IPCTL_DAD_COUNT 25 /* DAD packets to send */
++
++//----------------------------------------------------------------------
++// From netinet/mman.h
++//----------------------------------------------------------------------
++
++#define VKI_PROT_NONE 0x00 /* no permissions */
++#define VKI_PROT_READ 0x01 /* pages can be read */
++#define VKI_PROT_WRITE 0x02 /* pages can be written */
++#define VKI_PROT_EXEC 0x04 /* pages can be executed */
++
++#define VKI_PROT_MPROTECT(x) ((x) << 3)
++#define VKI_PROT_MPROTECT_EXTRACT(x) (((x) >> 3) & 0x7)
++
++#define VKI_MAP_SHARED 0x0001 /* share changes */
++#define VKI_MAP_PRIVATE 0x0002 /* changes are private */
++ /* old MAP_COPY 0x0004 "copy" region at mmap time */
++
++#define VKI_MAP_REMAPDUP 0x0004 /* mremap only: duplicate the mapping */
++#define VKI_MAP_FIXED 0x0010 /* map addr must be exactly as requested */
++#define VKI_MAP_RENAME 0x0020 /* Sun: rename private pages to file */
++#define VKI_MAP_NORESERVE 0x0040 /* Sun: don't reserve needed swap area */
++#define VKI_MAP_INHERIT 0x0080 /* region is retained after exec */
++#define VKI_MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */
++#define VKI_MAP_TRYFIXED 0x0400 /* attempt hint address, even within break */
++#define VKI_MAP_WIRED 0x0800 /* mlock() mapping when it is established */
++
++#define VKI_MAP_FILE 0x0000 /* map from file (default) */
++#define VKI_MAP_ANONYMOUS 0x1000 /* allocated from memory, swap space */
++#define VKI_MAP_ANON VKI_MAP_ANONYMOUS
++#define VKI_MAP_STACK 0x2000 /* allocated from memory, swap space (stack) */
++
++#define VKI_MAP_ALIGNED(n) ((int)((unsigned int)(n) << VKI_MAP_ALIGNMENT_SHIFT))
++#define VKI_MAP_ALIGNMENT_SHIFT 24
++#define VKI_MAP_ALIGNMENT_MASK VKI_MAP_ALIGNED(0xff)
++#define VKI_MAP_ALIGNMENT_64KB VKI_MAP_ALIGNED(16) /* 2^16 */
++#define VKI_MAP_ALIGNMENT_16MB VKI_MAP_ALIGNED(24) /* 2^24 */
++#define VKI_MAP_ALIGNMENT_4GB VKI_MAP_ALIGNED(32) /* 2^32 */
++#define VKI_MAP_ALIGNMENT_1TB VKI_MAP_ALIGNED(40) /* 2^40 */
++#define VKI_MAP_ALIGNMENT_256TB VKI_MAP_ALIGNED(48) /* 2^48 */
++#define VKI_MAP_ALIGNMENT_64PB VKI_MAP_ALIGNED(56) /* 2^56 */
++
++#define VKI_MAP_FAILED ((void *) -1) /* mmap() failed */
++
++/*
++ * Flags to msync
++ */
++#define VKI_MS_ASYNC 0x01 /* perform asynchronous writes */
++#define VKI_MS_INVALIDATE 0x02 /* invalidate cached data */
++#define VKI_MS_SYNC 0x04 /* perform synchronous writes */
++
++#define VKI_MCL_CURRENT 0x01 /* lock all pages currently mapped */
++#define VKI_MCL_FUTURE 0x02 /* lock all pages mapped in the future */
++
++#define VKI_POSIX_MADV_NORMAL 0 /* No further special treatment */
++#define VKI_POSIX_MADV_RANDOM 1 /* Expect random page references */
++#define VKI_POSIX_MADV_SEQUENTIAL 2 /* Expect sequential page references */
++#define VKI_POSIX_MADV_WILLNEED 3 /* Will need these pages */
++#define VKI_POSIX_MADV_DONTNEED 4 /* Don't need these pages */
++
++#define VKI_MADV_NORMAL VKI_POSIX_MADV_NORMAL
++#define VKI_MADV_RANDOM VKI_POSIX_MADV_RANDOM
++#define VKI_MADV_SEQUENTIAL VKI_POSIX_MADV_SEQUENTIAL
++#define VKI_MADV_WILLNEED VKI_POSIX_MADV_WILLNEED
++#define VKI_MADV_DONTNEED VKI_POSIX_MADV_DONTNEED
++#define VKI_MADV_SPACEAVAIL 5 /* Insure that resources are reserved */
++#define VKI_MADV_FREE 6 /* Pages are empty, free them */
++
++#define VKI_MAP_INHERIT_SHARE 0 /* share with child */
++#define VKI_MAP_INHERIT_COPY 1 /* copy into child */
++#define VKI_MAP_INHERIT_NONE 2 /* absent from child */
++#define VKI_MAP_INHERIT_DONATE_COPY 3 /* copy and delete -- not
++ implemented in UVM */
++#define VKI_MAP_INHERIT_ZERO 4 /* zero in child */
++#define VKI_MAP_INHERIT_DEFAULT VKI_MAP_INHERIT_COPY
++
++//----------------------------------------------------------------------
++// From netinet/tcp.h
++//----------------------------------------------------------------------
++
++typedef vki_uint32_t vki_tcp_seq;
++
++struct vki_tcphdr {
++ vki_uint16_t th_sport; /* source port */
++ vki_uint16_t th_dport; /* destination port */
++ vki_tcp_seq th_seq; /* sequence number */
++ vki_tcp_seq th_ack; /* acknowledgement number */
++#if __x86_64__ /* Little endian */
++ /*LINTED non-portable bitfields*/
++ vki_uint8_t th_x2:4, /* (unused) */
++ th_off:4; /* data offset */
++#elif 0 /* Big endian */
++ /*LINTED non-portable bitfields*/
++ uint8_t th_off:4, /* data offset */
++ th_x2:4; /* (unused) */
++#else
++#error unknown endian
++#endif
++ vki_uint8_t th_flags;
++#define VKI_TH_FIN 0x01
++#define VKI_TH_SYN 0x02
++#define VKI_TH_RST 0x04
++#define VKI_TH_PUSH 0x08
++#define VKI_TH_ACK 0x10
++#define VKI_TH_URG 0x20
++#define VKI_TH_ECE 0x40
++#define VKI_TH_CWR 0x80
++ vki_uint16_t th_win; /* window */
++ vki_uint16_t th_sum; /* checksum */
++ vki_uint16_t th_urp; /* urgent pointer */
++} __attribute__((__packed__));
++
++#define VKI_TCPOPT_EOL 0
++#define VKI_TCPOLEN_EOL 1
++#define VKI_TCPOPT_PAD 0
++#define VKI_TCPOLEN_PAD 1
++#define VKI_TCPOPT_NOP 1
++#define VKI_TCPOLEN_NOP 1
++#define VKI_TCPOPT_MAXSEG 2
++#define VKI_TCPOLEN_MAXSEG 4
++#define VKI_TCPOPT_WINDOW 3
++#define VKI_TCPOLEN_WINDOW 3
++#define VKI_TCPOPT_SACK_PERMITTED 4 /* Experimental */
++#define VKI_TCPOLEN_SACK_PERMITTED 2
++#define VKI_TCPOPT_SACK 5 /* Experimental */
++#define VKI_TCPOPT_TIMESTAMP 8
++#define VKI_TCPOLEN_TIMESTAMP 10
++#define VKI_TCPOLEN_TSTAMP_APPA (VKI_TCPOLEN_TIMESTAMP+2) /* appendix A */
++
++#define VKI_TCPOPT_TSTAMP_HDR \
++ (VKI_TCPOPT_NOP<<24|VKI_TCPOPT_NOP<<16|VKI_TCPOPT_TIMESTAMP<<8|VKI_TCPOLEN_TIMESTAMP)
++
++#define VKI_TCPOPT_SIGNATURE 19 /* Keyed MD5: RFC 2385 */
++#define VKI_TCPOLEN_SIGNATURE 18
++#define VKI_TCPOLEN_SIGLEN (VKI_TCPOLEN_SIGNATURE+2) /* padding */
++
++#define VKI_MAX_TCPOPTLEN 40 /* max # bytes that go in options */
++
++#define VKI_TCP_MSS 536
++
++#define VKI_TCP_MINMSS 216
++
++#define VKI_TCP_MAXWIN 65535 /* largest value for (unscaled) window */
++
++#define VKI_TCP_MAX_WINSHIFT 14 /* maximum window shift */
++
++#define VKI_TCP_MAXBURST 4 /* maximum segments in a burst */
++
++/*
++ * User-settable options (used with setsockopt).
++ */
++#define VKI_TCP_NODELAY 1 /* don't delay send to coalesce packets */
++
++#define VKI_TCP_MAXSEG 2 /* set maximum segment size */
++#define VKI_TCP_KEEPIDLE 3
++
++#define VKI_TCP_KEEPINTVL 5
++#define VKI_TCP_KEEPCNT 6
++#define VKI_TCP_KEEPINIT 7
++
++#define VKI_TCP_INFO 9 /* retrieve tcp_info structure */
++#define VKI_TCP_MD5SIG 0x10 /* use MD5 digests (RFC2385) */
++#define VKI_TCP_CONGCTL 0x20 /* selected congestion control */
++
++#define VKI_TCPI_OPT_TIMESTAMPS 0x01
++#define VKI_TCPI_OPT_SACK 0x02
++#define VKI_TCPI_OPT_WSCALE 0x04
++#define VKI_TCPI_OPT_ECN 0x08
++#define VKI_TCPI_OPT_TOE 0x10
++
++struct vki_tcp_info {
++ vki_uint8_t tcpi_state; /* TCP FSM state. */
++ vki_uint8_t __tcpi_ca_state;
++ vki_uint8_t __tcpi_retransmits;
++ vki_uint8_t __tcpi_probes;
++ vki_uint8_t __tcpi_backoff;
++ vki_uint8_t tcpi_options; /* Options enabled on conn. */
++ /*LINTED: non-portable bitfield*/
++ vki_uint8_t tcpi_snd_wscale:4, /* RFC1323 send shift value. */
++ /*LINTED: non-portable bitfield*/
++ tcpi_rcv_wscale:4; /* RFC1323 recv shift value. */
++
++ vki_uint32_t tcpi_rto; /* Retransmission timeout (usec). */
++ vki_uint32_t __tcpi_ato;
++ vki_uint32_t tcpi_snd_mss; /* Max segment size for send. */
++ vki_uint32_t tcpi_rcv_mss; /* Max segment size for receive. */
++
++ vki_uint32_t __tcpi_unacked;
++ vki_uint32_t __tcpi_sacked;
++ vki_uint32_t __tcpi_lost;
++ vki_uint32_t __tcpi_retrans;
++ vki_uint32_t __tcpi_fackets;
++
++ /* Times; measurements in usecs. */
++ vki_uint32_t __tcpi_last_data_sent;
++ vki_uint32_t __tcpi_last_ack_sent; /* Also unimpl. on Linux? */
++ vki_uint32_t tcpi_last_data_recv; /* Time since last recv data. */
++ vki_uint32_t __tcpi_last_ack_recv;
++
++ /* Metrics; variable units. */
++ vki_uint32_t __tcpi_pmtu;
++ vki_uint32_t __tcpi_rcv_ssthresh;
++ vki_uint32_t tcpi_rtt; /* Smoothed RTT in usecs. */
++ vki_uint32_t tcpi_rttvar; /* RTT variance in usecs. */
++ vki_uint32_t tcpi_snd_ssthresh; /* Slow start threshold. */
++ vki_uint32_t tcpi_snd_cwnd; /* Send congestion window. */
++ vki_uint32_t __tcpi_advmss;
++ vki_uint32_t __tcpi_reordering;
++
++ vki_uint32_t __tcpi_rcv_rtt;
++ vki_uint32_t tcpi_rcv_space; /* Advertised recv window. */
++
++ /* FreeBSD/NetBSD extensions to tcp_info. */
++ vki_uint32_t tcpi_snd_wnd; /* Advertised send window. */
++ vki_uint32_t tcpi_snd_bwnd; /* No longer used. */
++ vki_uint32_t tcpi_snd_nxt; /* Next egress seqno */
++ vki_uint32_t tcpi_rcv_nxt; /* Next ingress seqno */
++ vki_uint32_t tcpi_toe_tid; /* HWTID for TOE endpoints */
++ vki_uint32_t tcpi_snd_rexmitpack; /* Retransmitted packets */
++ vki_uint32_t tcpi_rcv_ooopack; /* Out-of-order packets */
++ vki_uint32_t tcpi_snd_zerowin; /* Zero-sized windows sent */
++
++ /* Padding to grow without breaking ABI. */
++ vki_uint32_t __tcpi_pad[26]; /* Padding. */
++};
++
++//----------------------------------------------------------------------
++// From sys/exec_elf.h
++//----------------------------------------------------------------------
++
++#define VKI_AT_NULL 0 /* Marks end of array */
++#define VKI_AT_IGNORE 1 /* No meaning, a_un is undefined */
++#define VKI_AT_EXECFD 2 /* Open file descriptor of object file */
++#define VKI_AT_PHDR 3 /* &phdr[0] */
++#define VKI_AT_PHENT 4 /* sizeof(phdr[0]) */
++#define VKI_AT_PHNUM 5 /* # phdr entries */
++#define VKI_AT_PAGESZ 6 /* PAGESIZE */
++#define VKI_AT_BASE 7 /* Interpreter base addr */
++#define VKI_AT_FLAGS 8 /* Processor flags */
++#define VKI_AT_ENTRY 9 /* Entry address of executable */
++#define VKI_AT_DCACHEBSIZE 10 /* Data cache block size */
++#define VKI_AT_ICACHEBSIZE 11 /* Instruction cache block size */
++#define VKI_AT_UCACHEBSIZE 12 /* Unified cache block size */
++#define VKI_AT_STACKBASE 13 /* Base address of the main thread */
++
++#define VKI_AT_MIPS_NOTELF 10 /* XXX a_val != 0 -> MIPS XCOFF executable */
++
++#define VKI_AT_EUID 2000 /* euid (solaris compatible numbers) */
++#define VKI_AT_RUID 2001 /* ruid (solaris compatible numbers) */
++#define VKI_AT_EGID 2002 /* egid (solaris compatible numbers) */
++#define VKI_AT_RGID 2003 /* rgid (solaris compatible numbers) */
++
++#define VKI_AT_SUN_LDELF 2004 /* dynamic linker's ELF header */
++#define VKI_AT_SUN_LDSHDR 2005 /* dynamic linker's section header */
++#define VKI_AT_SUN_LDNAME 2006 /* dynamic linker's name */
++#define VKI_AT_SUN_LPGSIZE 2007 /* large pagesize */
++
++#define VKI_AT_SUN_PLATFORM 2008 /* sysinfo(SI_PLATFORM) */
++#define VKI_AT_SUN_HWCAP 2009 /* process hardware capabilities */
++#define VKI_AT_SUN_IFLUSH 2010 /* do we need to flush the instruction cache? */
++#define VKI_AT_SUN_CPU 2011 /* CPU name */
++ /* ibcs2 emulation band aid */
++#define VKI_AT_SUN_EMUL_ENTRY 2012 /* coff entry point */
++#define VKI_AT_SUN_EMUL_EXECFD 2013 /* coff file descriptor */
++ /* Executable's fully resolved name */
++#define VKI_AT_SUN_EXECNAME 2014
++
++//----------------------------------------------------------------------
++// From sys/un.h
++//----------------------------------------------------------------------
++
++struct vki_sockaddr_un {
++ vki_uint8_t sun_len; /* total sockaddr length */
++ vki_sa_family_t sun_family; /* AF_LOCAL */
++ char sun_path[104]; /* path name (gag) */
++};
++
++#define VKI_LOCAL_OCREDS 0x0001 /* pass credentials to receiver */
++#define VKI_LOCAL_CONNWAIT 0x0002 /* connects block until accepted */
++#define VKI_LOCAL_PEEREID 0x0003 /* get peer identification */
++#define VKI_LOCAL_CREDS 0x0004 /* pass credentials to receiver */
++
++struct vki_unpcbid {
++ vki_pid_t unp_pid; /* process id */
++ vki_uid_t unp_euid; /* effective user id */
++ vki_gid_t unp_egid; /* effective group id */
++};
++
++#define VKI_SUN_LEN(su) \
++ (sizeof(*(su)) - sizeof((su)->sun_path) + strlen((su)->sun_path))
++
++//----------------------------------------------------------------------
++// From netinet6/in6.h
++//----------------------------------------------------------------------
++
++#define VKI_IPV6PORT_RESERVED 1024
++#define VKI_IPV6PORT_ANONMIN 49152
++#define VKI_IPV6PORT_ANONMAX 65535
++#define VKI_IPV6PORT_RESERVEDMIN 600
++#define VKI_IPV6PORT_RESERVEDMAX (VKI_IPV6PORT_RESERVED-1)
++
++struct vki_in6_addr {
++ union {
++ vki_uint8_t __u6_addr8[16];
++ vki_uint16_t __u6_addr16[8];
++ vki_uint32_t __u6_addr32[4];
++ } __u6_addr; /* 128-bit IP6 address */
++};
++
++#define s6_addr __u6_addr.__u6_addr8
++
++#define VKI_INET6_ADDRSTRLEN 46
++
++#define VKI_SIN6_LEN
++
++struct vki_sockaddr_in6 {
++ vki_uint8_t sin6_len; /* length of this struct(socklen_t)*/
++ vki_sa_family_t sin6_family; /* AF_INET6 (sa_family_t) */
++ vki_in_port_t sin6_port; /* Transport layer port */
++ vki_uint32_t sin6_flowinfo; /* IP6 flow information */
++ struct vki_in6_addr sin6_addr; /* IP6 address */
++ vki_uint32_t sin6_scope_id; /* scope zone index */
++};
++
++#define VKI_IN6ADDR_ANY_INIT \
++ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}}
++#define VKI_IN6ADDR_LOOPBACK_INIT \
++ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}}
++#define VKI_IN6ADDR_NODELOCAL_ALLNODES_INIT \
++ {{{ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}}
++#define VKI_IN6ADDR_LINKLOCAL_ALLNODES_INIT \
++ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}}
++#define VKI_IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \
++ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }}}
++
++#define VKI___IPV6_ADDR_SCOPE_NODELOCAL 0x01
++#define VKI___IPV6_ADDR_SCOPE_LINKLOCAL 0x02
++#define VKI___IPV6_ADDR_SCOPE_SITELOCAL 0x05
++#define VKI___IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */
++#define VKI___IPV6_ADDR_SCOPE_GLOBAL 0x0e
++
++#define VKI_IN6_IS_ADDR_MULTICAST(a) ((a)->s6_addr[0] == 0xff)
++
++#define VKI___IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f)
++
++#define VKI_IPV6_SOCKOPT_RESERVED1 3 /* reserved for future use */
++#define VKI_IPV6_UNICAST_HOPS 4 /* int; IP6 hops */
++#define VKI_IPV6_MULTICAST_IF 9 /* u_int; set/get IP6 multicast i/f */
++#define VKI_IPV6_MULTICAST_HOPS 10 /* int; set/get IP6 multicast hops */
++#define VKI_IPV6_MULTICAST_LOOP 11 /* u_int; set/get IP6 multicast loopback */
++/* The join and leave membership option numbers need to match with the v4 ones */
++#define VKI_IPV6_JOIN_GROUP 12 /* ip6_mreq; join a group membership */
++#define VKI_IPV6_LEAVE_GROUP 13 /* ip6_mreq; leave a group membership */
++#define VKI_IPV6_PORTRANGE 14 /* int; range to choose for unspec port */
++
++#define VKI_IPV6_PORTALGO 17 /* int; port selection algo (rfc6056) */
++#define VKI_ICMP6_FILTER 18 /* icmp6_filter; icmp6 filter */
++
++#define VKI_IPV6_CHECKSUM 26 /* int; checksum offset for raw socket */
++#define VKI_IPV6_V6ONLY 27 /* bool; make AF_INET6 sockets v6 only */
++
++#define VKI_IPV6_IPSEC_POLICY 28 /* struct; get/set security policy */
++#define VKI_IPV6_FAITH 29 /* bool; accept FAITH'ed connections */
++
++/* new socket options introduced in RFC3542 */
++#define VKI_IPV6_RTHDRDSTOPTS 35 /* ip6_dest; send dst option before rthdr */
++
++#define VKI_IPV6_RECVPKTINFO 36 /* bool; recv if, dst addr */
++#define VKI_IPV6_RECVHOPLIMIT 37 /* bool; recv hop limit */
++
++#define VKI_IPV6_RECVRTHDR 38 /* bool; recv routing header */
++#define VKI_IPV6_RECVHOPOPTS 39 /* bool; recv hop-by-hop option */
++#define VKI_IPV6_RECVDSTOPTS 40 /* bool; recv dst option after rthdr */
++
++#define VKI_IPV6_USE_MIN_MTU 42 /* bool; send packets at the minimum MTU */
++#define VKI_IPV6_RECVPATHMTU 43 /* bool; notify an according MTU */
++#define VKI_IPV6_PATHMTU 44 /* mtuinfo; get the current path MTU (sopt),
++ 4 bytes int; MTU notification (cmsg) */
++
++/* more new socket options introduced in RFC3542 */
++#define VKI_IPV6_PKTINFO 46 /* in6_pktinfo; send if, src addr */
++#define VKI_IPV6_HOPLIMIT 47 /* int; send hop limit */
++#define VKI_IPV6_NEXTHOP 48 /* sockaddr; next hop addr */
++#define VKI_IPV6_HOPOPTS 49 /* ip6_hbh; send hop-by-hop option */
++#define VKI_IPV6_DSTOPTS 50 /* ip6_dest; send dst option befor rthdr */
++#define VKI_IPV6_RTHDR 51 /* ip6_rthdr; send routing header */
++
++#define VKI_IPV6_RECVTCLASS 57 /* bool; recv traffic class values */
++
++#define VKI_IPV6_TCLASS 61 /* int; send traffic class value */
++#define VKI_IPV6_DONTFRAG 62 /* bool; disable IPv6 fragmentation */
++#define VKI_IPV6_PREFER_TEMPADDR 63 /* int; prefer temporary address as
++ * the sorce address */
++/* to define VKI_items, should talk with KAME guys first, for *BSD compatibility */
++
++#define VKI_IPV6_RTHDR_LOOSE 0 /* this hop need not be a neighbor. XXX old spec */
++#define VKI_IPV6_RTHDR_STRICT 1 /* this hop must be a neighbor. XXX old spec */
++#define VKI_IPV6_RTHDR_TYPE_0 0 /* IPv6 routing header type 0 */
++
++
++#define VKI_IPV6_DEFAULT_MULTICAST_HOPS 1 /* normally limit m'casts to 1 hop */
++#define VKI_IPV6_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */
++
++struct vki_ipv6_mreq {
++ struct vki_in6_addr ipv6mr_multiaddr;
++ unsigned int ipv6mr_interface;
++};
++
++struct vki_in6_pktinfo {
++ struct vki_in6_addr ipi6_addr; /* src/dst IPv6 address */
++ unsigned int ipi6_ifindex; /* send/recv interface index */
++};
++
++/*
++ * Control structure for IPV6_RECVPATHMTU socket option.
++ */
++struct vki_ip6_mtuinfo {
++ struct vki_sockaddr_in6 ip6m_addr; /* or sockaddr_storage? */
++ vki_uint32_t ip6m_mtu;
++};
++
++#define VKI_IPV6_PORTRANGE_DEFAULT 0 /* default range */
++#define VKI_IPV6_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */
++#define VKI_IPV6_PORTRANGE_LOW 2 /* "low" - vouchsafe security */
++
++#define VKI_IPV6CTL_FORWARDING 1 /* act as router */
++#define VKI_IPV6CTL_SENDREDIRECTS 2 /* may send redirects when forwarding*/
++#define VKI_IPV6CTL_DEFHLIM 3 /* default Hop-Limit */
++/* IPV6CTL_DEFMTU=4, never implemented */
++#define VKI_IPV6CTL_FORWSRCRT 5 /* forward source-routed dgrams */
++#define VKI_IPV6CTL_STATS 6 /* stats */
++#define VKI_IPV6CTL_MRTSTATS 7 /* multicast forwarding stats */
++#define VKI_IPV6CTL_MRTPROTO 8 /* multicast routing protocol */
++#define VKI_IPV6CTL_MAXFRAGPACKETS 9 /* max packets reassembly queue */
++#define VKI_IPV6CTL_SOURCECHECK 10 /* verify source route and intf */
++#define VKI_IPV6CTL_SOURCECHECK_LOGINT 11 /* minimum logging interval */
++#define VKI_IPV6CTL_ACCEPT_RTADV 12
++#define VKI_IPV6CTL_KEEPFAITH 13
++#define VKI_IPV6CTL_LOG_INTERVAL 14
++#define VKI_IPV6CTL_HDRNESTLIMIT 15
++#define VKI_IPV6CTL_DAD_COUNT 16
++#define VKI_IPV6CTL_AUTO_FLOWLABEL 17
++
++#define VKI_IPV6CTL_DEFMCASTHLIM 18
++#define VKI_IPV6CTL_GIF_HLIM 19 /* default HLIM for gif encap packet */
++#define VKI_IPV6CTL_KAME_VERSION 20
++#define VKI_IPV6CTL_USE_DEPRECATED 21 /* use deprecated addr (RFC2462 5.5.4) */
++#define VKI_IPV6CTL_RR_PRUNE 22 /* walk timer for router renumbering */
++/* 23: reserved */
++#define VKI_IPV6CTL_V6ONLY 24
++/* 25 to 27: reserved */
++#define VKI_IPV6CTL_ANONPORTMIN 28 /* minimum ephemeral port */
++#define VKI_IPV6CTL_ANONPORTMAX 29 /* maximum ephemeral port */
++#define VKI_IPV6CTL_LOWPORTMIN 30 /* minimum reserved port */
++#define VKI_IPV6CTL_LOWPORTMAX 31 /* maximum reserved port */
++/* 32 to 34: reserved */
++#define VKI_IPV6CTL_AUTO_LINKLOCAL 35 /* automatic link-local addr assign */
++/* 36 to 37: reserved */
++#define VKI_IPV6CTL_ADDRCTLPOLICY 38 /* get/set address selection policy */
++#define VKI_IPV6CTL_USE_DEFAULTZONE 39 /* use default scope zone */
++
++#define VKI_IPV6CTL_MAXFRAGS 41 /* max fragments */
++#define VKI_IPV6CTL_IFQ 42 /* IPv6 packet input queue */
++#define VKI_IPV6CTL_RTADV_MAXROUTES 43 /* maximum number of routes */
++ /* via router advertisement */
++#define VKI_IPV6CTL_RTADV_NUMROUTES 44 /* current number of routes */
++ /* via router advertisement */
++
++#define VKI_INET6_IS_ADDR_LINKLOCAL 1
++#define VKI_INET6_IS_ADDR_MC_LINKLOCAL 2
++#define VKI_INET6_IS_ADDR_SITELOCAL 4
++
++//----------------------------------------------------------------------
++// From sys/ipc.h
++//----------------------------------------------------------------------
++
++struct vki_ipc_perm {
++ vki_uid_t uid; /* user id */
++ vki_gid_t gid; /* group id */
++ vki_uid_t cuid; /* creator user id */
++ vki_gid_t cgid; /* creator group id */
++ vki_mode_t mode; /* r/w permission */
++
++ /*
++ * These members are private and used only in the internal
++ * implementation of this interface.
++ */
++ unsigned short _seq; /* sequence # (to generate unique
++ msg/sem/shm id) */
++ vki_key_t _key; /* user specified msg/sem/shm key */
++};
++
++struct vki_ipc_perm_sysctl {
++ vki_uint64_t _key;
++ vki_uid_t uid;
++ vki_gid_t gid;
++ vki_uid_t cuid;
++ vki_gid_t cgid;
++ vki_mode_t mode;
++ vki_int16_t _seq;
++ vki_int16_t pad;
++};
++
++#define VKI_IPC_R 000400 /* read permission */
++#define VKI_IPC_W 000200 /* write/alter permission */
++#define VKI_IPC_M 010000 /* permission to change control info */
++
++#define VKI_IPC_CREAT 001000 /* create entry if key does not exist */
++#define VKI_IPC_EXCL 002000 /* fail if key exists */
++#define VKI_IPC_NOWAIT 004000 /* error if request must wait */
++
++#define VKI_IPC_PRIVATE (vki_key_t)0 /* private key */
++
++#define VKI_IPC_RMID 0 /* remove identifier */
++#define VKI_IPC_SET 1 /* set options */
++#define VKI_IPC_STAT 2 /* get options */
++
++#define VKI_IXSEQ_TO_IPCID(ix,perm) (((perm._seq) << 16) | (ix & 0xffff))
++
++//----------------------------------------------------------------------
++// From sys/sem.h
++//----------------------------------------------------------------------
++
++struct vki___sem {
++ unsigned short semval; /* semaphore value */
++ vki_pid_t sempid; /* pid of last operation */
++ unsigned short semncnt; /* # awaiting semval > cval */
++ unsigned short semzcnt; /* # awaiting semval = 0 */
++};
++
++struct vki_semid_ds {
++ struct vki_ipc_perm sem_perm; /* operation permission structure */
++ unsigned short sem_nsems; /* number of semaphores in set */
++ vki_time_t sem_otime; /* last semop() time */
++ vki_time_t sem_ctime; /* last time changed by semctl() */
++
++ /*
++ * These members are private and used only in the internal
++ * implementation of this interface.
++ */
++ struct vki___sem *_sem_base; /* pointer to first semaphore in set */
++};
++
++struct vki_sembuf {
++ unsigned short sem_num; /* semaphore # */
++ short sem_op; /* semaphore operation */
++ short sem_flg; /* operation flags */
++};
++#define VKI_SEM_UNDO 010000 /* undo changes on process exit */
++
++#define VKI_GETNCNT 3 /* Return the value of semncnt {READ} */
++#define VKI_GETPID 4 /* Return the value of sempid {READ} */
++#define VKI_GETVAL 5 /* Return the value of semval {READ} */
++#define VKI_GETALL 6 /* Return semvals into arg.array {READ} */
++#define VKI_GETZCNT 7 /* Return the value of semzcnt {READ} */
++#define VKI_SETVAL 8 /* Set the value of semval to arg.val {ALTER} */
++#define VKI_SETALL 9 /* Set semvals from arg.array {ALTER} */
++
++struct vki_seminfo {
++ vki_int32_t semmap; /* # of entries in semaphore map */
++ vki_int32_t semmni; /* # of semaphore identifiers */
++ vki_int32_t semmns; /* # of semaphores in system */
++ vki_int32_t semmnu; /* # of undo structures in system */
++ vki_int32_t semmsl; /* max # of semaphores per id */
++ vki_int32_t semopm; /* max # of operations per semop call */
++ vki_int32_t semume; /* max # of undo entries per process */
++ vki_int32_t semusz; /* size in bytes of undo structure */
++ vki_int32_t semvmx; /* semaphore maximum value */
++ vki_int32_t semaem; /* adjust on exit max value */
++};
++
++struct vki_semid_ds_sysctl {
++ struct vki_ipc_perm_sysctl sem_perm;
++ vki_int16_t sem_nsems;
++ vki_int16_t pad2;
++ vki_int32_t pad3;
++ vki_time_t sem_otime;
++ vki_time_t sem_ctime;
++};
++
++struct vki_sem_sysctl_info {
++ struct vki_seminfo seminfo;
++ struct vki_semid_ds_sysctl semids[1];
++};
++
++#define VKI_SEM_ALLOC 01000 /* semaphore is allocated */
++
++#define vki_get_semctl_arg(cmd, sembuf, arg) \
++ ((cmd) == VKI_IPC_SET || (cmd) == VKI_IPC_STAT ? (void *)sembuf \
++ : (cmd) == VKI_GETALL || (cmd) == VKI_SETVAL || (cmd) == VKI_SETALL ? (void *)arg \
++ : 0)
++
++//----------------------------------------------------------------------
++// From sys/shm.h
++//----------------------------------------------------------------------
++
++#define VKI_SHM_RDONLY 010000 /* Attach read-only (else read-write) */
++#define VKI_SHM_RND 020000 /* Round attach address to SHMLBA */
++
++#define VKI_SHMLBA VKI_PAGE_SIZE
++
++typedef unsigned int vki_shmatt_t;
++
++struct vki_shmid_ds {
++ struct vki_ipc_perm shm_perm; /* operation permission structure */
++ vki_size_t shm_segsz; /* size of segment in bytes */
++ vki_pid_t shm_lpid; /* process ID of last shm operation */
++ vki_pid_t shm_cpid; /* process ID of creator */
++ vki_shmatt_t shm_nattch; /* number of current attaches */
++ vki_time_t shm_atime; /* time of last shmat() */
++ vki_time_t shm_dtime; /* time of last shmdt() */
++ vki_time_t shm_ctime; /* time of last change by shmctl() */
++
++ void *_shm_internal;
++};
++
++#define VKI_SHM_LOCK 3 /* Lock segment in memory. */
++#define VKI_SHM_UNLOCK 4 /* Unlock a segment locked by SHM_LOCK. */
++
++#define VKI_SHM_R VKI_IPC_R /* S_IRUSR, R for owner */
++#define VKI_SHM_W VKI_IPC_W /* S_IWUSR, W for owner */
++
++struct vki_shminfo {
++ vki_uint64_t shmmax; /* max shared memory segment size (bytes) */
++ vki_uint32_t shmmin; /* min shared memory segment size (bytes) */
++ vki_uint32_t shmmni; /* max number of shared memory identifiers */
++ vki_uint32_t shmseg; /* max shared memory segments per process */
++ vki_uint32_t shmall; /* max amount of shared memory (pages) */
++};
++
++struct vki_shmid_ds_sysctl {
++ struct vki_ipc_perm_sysctl shm_perm;
++ vki_uint64_t shm_segsz;
++ vki_pid_t shm_lpid;
++ vki_pid_t shm_cpid;
++ vki_time_t shm_atime;
++ vki_time_t shm_dtime;
++ vki_time_t shm_ctime;
++ vki_uint32_t shm_nattch;
++};
++
++struct vki_shm_sysctl_info {
++ struct vki_shminfo shminfo;
++ struct vki_shmid_ds_sysctl shmids[1];
++};
++
++//----------------------------------------------------------------------
++// From sys/ioccom.h
++//----------------------------------------------------------------------
++
++#define VKI_IOCPARM_MASK 0x1fff /* parameter length, at most 13 bits */
++#define VKI_IOCPARM_SHIFT 16
++#define VKI_IOCGROUP_SHIFT 8
++#define VKI_IOCPARM_LEN(x) (((x) >> VKI_IOCPARM_SHIFT) & VKI_IOCPARM_MASK)
++#define VKI_IOCBASECMD(x) ((x) & ~(VKI_IOCPARM_MASK << VKI_IOCPARM_SHIFT))
++#define VKI_IOCGROUP(x) (((x) >> VKI_IOCGROUP_SHIFT) & 0xff)
++
++#define VKI_IOCPARM_MAX VKI_NBPG /* max size of ioctl args, mult. of NBPG */
++ /* no parameters */
++#define VKI_IOC_VOID (unsigned long)0x20000000
++ /* copy parameters out */
++#define VKI_IOC_OUT (unsigned long)0x40000000
++ /* copy parameters in */
++#define VKI_IOC_IN (unsigned long)0x80000000
++ /* copy parameters in and out */
++#define VKI_IOC_INOUT (VKI_IOC_IN|VKI_IOC_OUT)
++ /* mask for IN/OUT/VOID */
++#define VKI_IOC_DIRMASK (unsigned long)0xe0000000
++
++#define VKI__IOC(inout, group, num, len) \
++ ((inout) | (((len) & VKI_IOCPARM_MASK) << VKI_IOCPARM_SHIFT) | \
++ ((group) << VKI_IOCGROUP_SHIFT) | (num))
++#define VKI__IO(g,n) VKI__IOC(VKI_IOC_VOID, (g), (n), 0)
++#define VKI__IOR(g,n,t) VKI__IOC(VKI_IOC_OUT, (g), (n), sizeof(t))
++#define VKI__IOW(g,n,t) VKI__IOC(VKI_IOC_IN, (g), (n), sizeof(t))
++/* this should be _IORW, but stdio got there first */
++#define VKI__IOWR(g,n,t) VKI__IOC(VKI_IOC_INOUT, (g), (n), sizeof(t))
++
++// linux-like ioctl flags
++#define _VKI_IOC_DIR(x) ((x) & VKI_IOC_DIRMASK)
++#define _VKI_IOC_SIZE(x) VKI_IOCPARM_LEN(x)
++#define _VKI_IOC_NONE VKI_IOC_VOID /* GrP fixme correct? */
++#define _VKI_IOC_READ VKI_IOC_OUT
++#define _VKI_IOC_WRITE VKI_IOC_IN
++
++//----------------------------------------------------------------------
++// From sys/fd_set.h
++//----------------------------------------------------------------------
++
++typedef vki_uint32_t vki___fd_mask;
++
++#define VKI___NFDBITS (32)
++#define VKI___NFDSHIFT (5)
++#define VKI___NFDMASK (VKI___NFDBITS - 1)
++
++#ifndef VKI_FD_SETSIZE
++#define VKI_FD_SETSIZE 256
++#endif
++
++#define VKI___NFD_LEN(a) (((a) + (VKI___NFDBITS - 1)) / VKI___NFDBITS)
++#define VKI___NFD_SIZE VKI___NFD_LEN(VKI_FD_SETSIZE)
++#define VKI___NFD_BYTES(a) (VKI___NFD_LEN(a) * sizeof(vki___fd_mask))
++
++typedef struct vki_fd_set {
++ vki___fd_mask fds_bits[VKI___NFD_SIZE];
++} vki_fd_set;
++
++#define VKI_FD_SET(n, p) \
++ ((p)->fds_bits[(unsigned)(n) >> VKI___NFDSHIFT] |= (1U << ((n) & VKI___NFDMASK)))
++#define VKI_FD_CLR(n, p) \
++ ((p)->fds_bits[(unsigned)(n) >> VKI___NFDSHIFT] &= ~(1U << ((n) & VKI___NFDMASK)))
++#define VKI_FD_ISSET(n, p) \
++ ((p)->fds_bits[(unsigned)(n) >> VKI___NFDSHIFT] & (1U << ((n) & VKI___NFDMASK)))
++
++#define vki_fd_mask vki___fd_mask
++#define VKI_NFDBITS VKI___NFDBITS
++
++//----------------------------------------------------------------------
++// From sys/times.h
++//----------------------------------------------------------------------
++
++struct vki_tms {
++ vki_clock_t tms_utime; /* User CPU time */
++ vki_clock_t tms_stime; /* System CPU time */
++ vki_clock_t tms_cutime; /* User CPU time of terminated child procs */
++ vki_clock_t tms_cstime; /* System CPU time of terminated child procs */
++};
++
++//----------------------------------------------------------------------
++// From uvm/uvm_param.h
++//----------------------------------------------------------------------
++
++#define VKI_VM_METER 1 /* struct vmmeter */
++#define VKI_VM_LOADAVG 2 /* struct loadavg */
++#define VKI_VM_UVMEXP 3 /* struct uvmexp */
++#define VKI_VM_NKMEMPAGES 4 /* kmem_map pages */
++#define VKI_VM_UVMEXP2 5 /* struct uvmexp_sysctl */
++#define VKI_VM_ANONMIN 6
++#define VKI_VM_EXECMIN 7
++#define VKI_VM_FILEMIN 8
++#define VKI_VM_MAXSLP 9
++#define VKI_VM_USPACE 10
++#define VKI_VM_ANONMAX 11
++#define VKI_VM_EXECMAX 12
++#define VKI_VM_FILEMAX 13
++#define VKI_VM_MINADDRESS 14
++#define VKI_VM_MAXADDRESS 15
++#define VKI_VM_PROC 16 /* process information */
++#define VKI_VM_GUARD_SIZE 17 /* guard size for main thread */
++#define VKI_VM_THREAD_GUARD_SIZE 18 /* default guard size for new threads */
++
++#define VKI_VM_PROC_MAP 1 /* struct kinfo_vmentry */
++
++typedef unsigned int vki_uvm_flag_t;
++
++typedef int vki_vm_inherit_t; /* XXX: inheritance codes */
++typedef vki_off_t vki_voff_t; /* XXX: offset within a uvm_object */
++typedef vki_voff_t vki_pgoff_t; /* XXX: number of pages within a uvm object */
++
++//----------------------------------------------------------------------
++// From sys/sysctl.h
++//----------------------------------------------------------------------
++
++#define VKI_CTL_MAXNAME 12 /* largest number of components supported */
++#define VKI_SYSCTL_NAMELEN 32 /* longest name allowed for a node */
++
++#define VKI_CREATE_BASE (1024) /* start of dynamic mib allocation */
++#define VKI_SYSCTL_DEFSIZE 8 /* initial size of a child set */
++
++struct vki_ctlname {
++ const char *ctl_name; /* subsystem name */
++ int ctl_type; /* type of name */
++};
++
++#define VKI_CTLTYPE_NODE 1 /* name is a node */
++#define VKI_CTLTYPE_INT 2 /* name describes an integer */
++#define VKI_CTLTYPE_STRING 3 /* name describes a string */
++#define VKI_CTLTYPE_QUAD 4 /* name describes a 64-bit number */
++#define VKI_CTLTYPE_STRUCT 5 /* name describes a structure */
++#define VKI_CTLTYPE_BOOL 6 /* name describes a bool */
++
++#ifdef _LP64
++#define VKI_CTLTYPE_LONG VKI_CTLTYPE_QUAD
++#else
++#define VKI_CTLTYPE_LONG VKI_CTLTYPE_INT
++#endif
++
++#define VKI_CTLFLAG_READONLY 0x00000000
++
++#define VKI_CTLFLAG_READWRITE 0x00000070
++#define VKI_CTLFLAG_ANYWRITE 0x00000080
++#define VKI_CTLFLAG_PRIVATE 0x00000100
++#define VKI_CTLFLAG_PERMANENT 0x00000200
++#define VKI_CTLFLAG_OWNDATA 0x00000400
++#define VKI_CTLFLAG_IMMEDIATE 0x00000800
++#define VKI_CTLFLAG_HEX 0x00001000
++#define VKI_CTLFLAG_ROOT 0x00002000
++#define VKI_CTLFLAG_ANYNUMBER 0x00004000
++#define VKI_CTLFLAG_HIDDEN 0x00008000
++#define VKI_CTLFLAG_ALIAS 0x00010000
++#define VKI_CTLFLAG_MMAP 0x00020000
++#define VKI_CTLFLAG_OWNDESC 0x00040000
++#define VKI_CTLFLAG_UNSIGNED 0x00080000
++
++#define VKI_SYSCTL_VERS_MASK 0xff000000
++#define VKI_SYSCTL_VERS_0 0x00000000
++#define VKI_SYSCTL_VERS_1 0x01000000
++#define VKI_SYSCTL_VERSION VKI_SYSCTL_VERS_1
++#define VKI_SYSCTL_VERS(f) ((f) & VKI_SYSCTL_VERS_MASK)
++
++#define VKI_SYSCTL_USERFLAGS (VKI_CTLFLAG_READWRITE|\
++ VKI_CTLFLAG_ANYWRITE|\
++ VKI_CTLFLAG_PRIVATE|\
++ VKI_CTLFLAG_OWNDATA|\
++ VKI_CTLFLAG_IMMEDIATE|\
++ VKI_CTLFLAG_HEX|\
++ VKI_CTLFLAG_HIDDEN)
++
++#define VKI_SYSCTL_TYPEMASK 0x0000000f
++#define VKI_SYSCTL_TYPE(x) ((x) & VKI_SYSCTL_TYPEMASK)
++#define VKI_SYSCTL_FLAGMASK 0x00fffff0
++#define VKI_SYSCTL_FLAGS(x) ((x) & VKI_SYSCTL_FLAGMASK)
++
++#define VKI_CTL_EOL (-1) /* end of createv/destroyv list */
++#define VKI_CTL_QUERY (-2) /* enumerates children of a node */
++#define VKI_CTL_CREATE (-3) /* node create request */
++#define VKI_CTL_CREATESYM (-4) /* node create request with symbol */
++#define VKI_CTL_DESTROY (-5) /* node destroy request */
++#define VKI_CTL_MMAP (-6) /* mmap request */
++#define VKI_CTL_DESCRIBE (-7) /* get node descriptions */
++
++#define VKI_CTL_UNSPEC 0 /* unused */
++#define VKI_CTL_KERN 1 /* "high kernel": proc, limits */
++#define VKI_CTL_VM 2 /* virtual memory */
++#define VKI_CTL_VFS 3 /* file system, mount type is next */
++#define VKI_CTL_NET 4 /* network, see socket.h */
++#define VKI_CTL_DEBUG 5 /* debugging parameters */
++#define VKI_CTL_HW 6 /* generic CPU/io */
++#define VKI_CTL_MACHDEP 7 /* machine dependent */
++#define VKI_CTL_USER 8 /* user-level */
++#define VKI_CTL_DDB 9 /* in-kernel debugger */
++#define VKI_CTL_PROC 10 /* per-proc attr */
++#define VKI_CTL_VENDOR 11 /* vendor-specific data */
++#define VKI_CTL_EMUL 12 /* emulation-specific data */
++#define VKI_CTL_SECURITY 13 /* security */
++
++#define VKI_KERN_OSTYPE 1 /* string: system version */
++#define VKI_KERN_OSRELEASE 2 /* string: system release */
++#define VKI_KERN_OSREV 3 /* int: system revision */
++#define VKI_KERN_VERSION 4 /* string: compile time info */
++#define VKI_KERN_MAXVNODES 5 /* int: max vnodes */
++#define VKI_KERN_MAXPROC 6 /* int: max processes */
++#define VKI_KERN_MAXFILES 7 /* int: max open files */
++#define VKI_KERN_ARGMAX 8 /* int: max arguments to exec */
++#define VKI_KERN_SECURELVL 9 /* int: system security level */
++#define VKI_KERN_HOSTNAME 10 /* string: hostname */
++#define VKI_KERN_HOSTID 11 /* int: host identifier */
++#define VKI_KERN_CLOCKRATE 12 /* struct: struct clockinfo */
++#define VKI_KERN_VNODE 13 /* struct: vnode structures */
++#define VKI_KERN_PROC 14 /* struct: process entries */
++#define VKI_KERN_FILE 15 /* struct: file entries */
++#define VKI_KERN_PROF 16 /* node: kernel profiling info */
++#define VKI_KERN_POSIX1 17 /* int: POSIX.1 version */
++#define VKI_KERN_NGROUPS 18 /* int: # of supplemental group ids */
++#define VKI_KERN_JOB_CONTROL 19 /* int: is job control available */
++#define VKI_KERN_SAVED_IDS 20 /* int: saved set-user/group-ID */
++#define VKI_KERN_OBOOTTIME 21 /* struct: time kernel was booted */
++#define VKI_KERN_DOMAINNAME 22 /* string: (YP) domainname */
++#define VKI_KERN_MAXPARTITIONS 23 /* int: number of partitions/disk */
++#define VKI_KERN_RAWPARTITION 24 /* int: raw partition number */
++#define VKI_KERN_NTPTIME 25 /* struct: extended-precision time */
++#define VKI_KERN_TIMEX 26 /* struct: ntp timekeeping state */
++#define VKI_KERN_AUTONICETIME 27 /* int: proc time before autonice */
++#define VKI_KERN_AUTONICEVAL 28 /* int: auto nice value */
++#define VKI_KERN_RTC_OFFSET 29 /* int: offset of rtc from gmt */
++#define VKI_KERN_ROOT_DEVICE 30 /* string: root device */
++#define VKI_KERN_MSGBUFSIZE 31 /* int: max # of chars in msg buffer */
++#define VKI_KERN_FSYNC 32 /* int: file synchronization support */
++#define VKI_KERN_OLDSYSVMSG 33 /* old: SysV message queue support */
++#define VKI_KERN_OLDSYSVSEM 34 /* old: SysV semaphore support */
++#define VKI_KERN_OLDSYSVSHM 35 /* old: SysV shared memory support */
++#define VKI_KERN_OLDSHORTCORENAME 36 /* old, unimplemented */
++#define VKI_KERN_SYNCHRONIZED_IO 37 /* int: POSIX synchronized I/O */
++#define VKI_KERN_IOV_MAX 38 /* int: max iovec's for readv(2) etc. */
++#define VKI_KERN_MBUF 39 /* node: mbuf parameters */
++#define VKI_KERN_MAPPED_FILES 40 /* int: POSIX memory mapped files */
++#define VKI_KERN_MEMLOCK 41 /* int: POSIX memory locking */
++#define VKI_KERN_MEMLOCK_RANGE 42 /* int: POSIX memory range locking */
++#define VKI_KERN_MEMORY_PROTECTION 43 /* int: POSIX memory protections */
++#define VKI_KERN_LOGIN_NAME_MAX 44 /* int: max length login name + NUL */
++#define VKI_KERN_DEFCORENAME 45 /* old: sort core name format */
++#define VKI_KERN_LOGSIGEXIT 46 /* int: log signaled processes */
++#define VKI_KERN_PROC2 47 /* struct: process entries */
++#define VKI_KERN_PROC_ARGS 48 /* struct: process argv/env */
++#define VKI_KERN_FSCALE 49 /* int: fixpt FSCALE */
++#define VKI_KERN_CCPU 50 /* old: fixpt ccpu */
++#define VKI_KERN_CP_TIME 51 /* struct: CPU time counters */
++#define VKI_KERN_OLDSYSVIPC_INFO 52 /* old: number of valid kern ids */
++#define VKI_KERN_MSGBUF 53 /* kernel message buffer */
++#define VKI_KERN_CONSDEV 54 /* dev_t: console terminal device */
++#define VKI_KERN_MAXPTYS 55 /* int: maximum number of ptys */
++#define VKI_KERN_PIPE 56 /* node: pipe limits */
++#define VKI_KERN_MAXPHYS 57 /* int: kernel value of MAXPHYS */
++#define VKI_KERN_SBMAX 58 /* int: max socket buffer size */
++#define VKI_KERN_TKSTAT 59 /* tty in/out counters */
++#define VKI_KERN_MONOTONIC_CLOCK 60 /* int: POSIX monotonic clock */
++#define VKI_KERN_URND 61 /* int: random integer from urandom */
++#define VKI_KERN_LABELSECTOR 62 /* int: disklabel sector */
++#define VKI_KERN_LABELOFFSET 63 /* int: offset of label within sector */
++#define VKI_KERN_LWP 64 /* struct: lwp entries */
++#define VKI_KERN_FORKFSLEEP 65 /* int: sleep length on failed fork */
++#define VKI_KERN_POSIX_THREADS 66 /* int: POSIX Threads option */
++#define VKI_KERN_POSIX_SEMAPHORES 67 /* int: POSIX Semaphores option */
++#define VKI_KERN_POSIX_BARRIERS 68 /* int: POSIX Barriers option */
++#define VKI_KERN_POSIX_TIMERS 69 /* int: POSIX Timers option */
++#define VKI_KERN_POSIX_SPIN_LOCKS 70 /* int: POSIX Spin Locks option */
++#define VKI_KERN_POSIX_READER_WRITER_LOCKS 71 /* int: POSIX R/W Locks option */
++#define VKI_KERN_DUMP_ON_PANIC 72 /* int: dump on panic */
++#define VKI_KERN_SOMAXKVA 73 /* int: max socket kernel virtual mem */
++#define VKI_KERN_ROOT_PARTITION 74 /* int: root partition */
++#define VKI_KERN_DRIVERS 75 /* struct: driver names and majors #s */
++#define VKI_KERN_BUF 76 /* struct: buffers */
++#define VKI_KERN_FILE2 77 /* struct: file entries */
++#define VKI_KERN_VERIEXEC 78 /* node: verified exec */
++#define VKI_KERN_CP_ID 79 /* struct: cpu id numbers */
++#define VKI_KERN_HARDCLOCK_TICKS 80 /* int: number of hardclock ticks */
++#define VKI_KERN_ARND 81 /* void *buf, size_t siz random */
++#define VKI_KERN_SYSVIPC 82 /* node: SysV IPC parameters */
++#define VKI_KERN_BOOTTIME 83 /* struct: time kernel was booted */
++#define VKI_KERN_EVCNT 84 /* struct: evcnts */
++
++struct vki_clockinfo {
++ int hz; /* clock frequency */
++ int tick; /* micro-seconds per hz tick */
++ int tickadj; /* clock skew rate for adjtime() */
++ int stathz; /* statistics clock frequency */
++ int profhz; /* profiling clock frequency */
++};
++
++#define VKI_KERN_PROC_ALL 0 /* everything */
++#define VKI_KERN_PROC_PID 1 /* by process id */
++#define VKI_KERN_PROC_PGRP 2 /* by process group id */
++#define VKI_KERN_PROC_SESSION 3 /* by session of pid */
++#define VKI_KERN_PROC_TTY 4 /* by controlling tty */
++#define VKI_KERN_PROC_UID 5 /* by effective uid */
++#define VKI_KERN_PROC_RUID 6 /* by real uid */
++#define VKI_KERN_PROC_GID 7 /* by effective gid */
++#define VKI_KERN_PROC_RGID 8 /* by real gid */
++
++#define VKI_KERN_PROC_TTY_NODEV VKI_NODEV /* no controlling tty */
++#define VKI_KERN_PROC_TTY_REVOKE ((vki_dev_t)-2) /* revoked tty */
++
++struct vki_ki_pcred {
++ void *p_pad;
++ vki_uid_t p_ruid; /* Real user id */
++ vki_uid_t p_svuid; /* Saved effective user id */
++ vki_gid_t p_rgid; /* Real group id */
++ vki_gid_t p_svgid; /* Saved effective group id */
++ int p_refcnt; /* Number of references */
++};
++
++struct vki_ki_ucred {
++ vki_uint32_t cr_ref; /* reference count */
++ vki_uid_t cr_uid; /* effective user id */
++ vki_gid_t cr_gid; /* effective group id */
++ vki_uint32_t cr_ngroups; /* number of groups */
++ vki_gid_t cr_groups[VKI_NGROUPS]; /* groups */
++};
++
++#define VKI_PTRTOUINT64(p) ((vki_uint64_t)(vki_uintptr_t)(p))
++#define VKI_UINT64TOPTR(u) ((void *)(vki_uintptr_t)(u))
++
++#define VKI_KI_NGROUPS 16
++#define VKI_KI_MAXCOMLEN 24 /* extra for 8 byte alignment */
++#define VKI_KI_WMESGLEN 8
++#define VKI_KI_MAXLOGNAME 24 /* extra for 8 byte alignment */
++#define VKI_KI_MAXEMULLEN 16
++#define VKI_KI_LNAMELEN 20 /* extra 4 for alignment */
++
++#define VKI_KI_NOCPU (~(vki_uint64_t)0)
++
++typedef struct {
++ vki_uint32_t __bits[4];
++} vki_ki_sigset_t;
++
++struct vki_kinfo_proc2 {
++ vki_uint64_t p_forw; /* PTR: linked run/sleep queue. */
++ vki_uint64_t p_back;
++ vki_uint64_t p_paddr; /* PTR: address of proc */
++
++ vki_uint64_t p_addr; /* PTR: Kernel virtual addr of u-area */
++ vki_uint64_t p_fd; /* PTR: Ptr to open files structure. */
++ vki_uint64_t p_cwdi; /* PTR: cdir/rdir/cmask info */
++ vki_uint64_t p_stats; /* PTR: Accounting/statistics */
++ vki_uint64_t p_limit; /* PTR: Process limits. */
++ vki_uint64_t p_vmspace; /* PTR: Address space. */
++ vki_uint64_t p_sigacts; /* PTR: Signal actions, state */
++ vki_uint64_t p_sess; /* PTR: session pointer */
++ vki_uint64_t p_tsess; /* PTR: tty session pointer */
++ vki_uint64_t p_ru; /* PTR: Exit information. XXX */
++
++ vki_int32_t p_eflag; /* LONG: extra kinfo_proc2 flags */
++#define VKI_EPROC_CTTY 0x01 /* controlling tty vnode active */
++#define VKI_EPROC_SLEADER 0x02 /* session leader */
++ vki_int32_t p_exitsig; /* INT: signal to sent to parent on exit */
++ vki_int32_t p_flag; /* INT: P_* flags. */
++
++ vki_int32_t p_pid; /* PID_T: Process identifier. */
++ vki_int32_t p_ppid; /* PID_T: Parent process id */
++ vki_int32_t p_sid; /* PID_T: session id */
++ vki_int32_t p__pgid; /* PID_T: process group id */
++ /* XXX: <sys/proc.h> hijacks p_pgid */
++ vki_int32_t p_tpgid; /* PID_T: tty process group id */
++ vki_uint32_t p_uid; /* UID_T: effective user id */
++ vki_uint32_t p_ruid; /* UID_T: real user id */
++ vki_uint32_t p_gid; /* GID_T: effective group id */
++ vki_uint32_t p_rgid; /* GID_T: real group id */
++
++ vki_uint32_t p_groups[VKI_KI_NGROUPS]; /* GID_T: groups */
++ vki_int16_t p_ngroups; /* SHORT: number of groups */
++
++ vki_int16_t p_jobc; /* SHORT: job control counter */
++ vki_uint32_t p_tdev; /* XXX: DEV_T: controlling tty dev */
++
++ vki_uint32_t p_estcpu; /* U_INT: Time averaged value of p_cpticks. */
++ vki_uint32_t p_rtime_sec; /* STRUCT TIMEVAL: Real time. */
++ vki_uint32_t p_rtime_usec; /* STRUCT TIMEVAL: Real time. */
++ vki_int32_t p_cpticks; /* INT: Ticks of CPU time. */
++ vki_uint32_t p_pctcpu; /* FIXPT_T: %cpu for this process during p_swtime */
++ vki_uint32_t p_swtime; /* U_INT: Time swapped in or out. */
++ vki_uint32_t p_slptime; /* U_INT: Time since last blocked. */
++ vki_int32_t p_schedflags; /* INT: PSCHED_* flags */
++
++ vki_uint64_t p_uticks; /* U_QUAD_T: Statclock hits in user mode. */
++ vki_uint64_t p_sticks; /* U_QUAD_T: Statclock hits in system mode. */
++ vki_uint64_t p_iticks; /* U_QUAD_T: Statclock hits processing intr. */
++
++ vki_uint64_t p_tracep; /* PTR: Trace to vnode or file */
++ vki_int32_t p_traceflag; /* INT: Kernel trace points. */
++
++ vki_int32_t p_holdcnt; /* INT: If non-zero, don't swap. */
++
++ vki_ki_sigset_t p_siglist; /* SIGSET_T: Signals arrived but not delivered. */
++ vki_ki_sigset_t p_sigmask; /* SIGSET_T: Current signal mask. */
++ vki_ki_sigset_t p_sigignore; /* SIGSET_T: Signals being ignored. */
++ vki_ki_sigset_t p_sigcatch; /* SIGSET_T: Signals being caught by user. */
++
++ vki_int8_t p_stat; /* CHAR: S* process status (from LWP). */
++ vki_uint8_t p_priority; /* U_CHAR: Process priority. */
++ vki_uint8_t p_usrpri; /* U_CHAR: User-priority based on p_cpu and p_nice. */
++ vki_uint8_t p_nice; /* U_CHAR: Process "nice" value. */
++
++ vki_uint16_t p_xstat; /* U_SHORT: Exit status for wait; also stop signal. */
++ vki_uint16_t p_acflag; /* U_SHORT: Accounting flags. */
++
++ char p_comm[VKI_KI_MAXCOMLEN];
++
++ char p_wmesg[VKI_KI_WMESGLEN]; /* wchan message */
++ vki_uint64_t p_wchan; /* PTR: sleep address. */
++
++ char p_login[VKI_KI_MAXLOGNAME]; /* setlogin() name */
++
++ vki_int32_t p_vm_rssize; /* SEGSZ_T: current resident set size in pages */
++ vki_int32_t p_vm_tsize; /* SEGSZ_T: text size (pages) */
++ vki_int32_t p_vm_dsize; /* SEGSZ_T: data size (pages) */
++ vki_int32_t p_vm_ssize; /* SEGSZ_T: stack size (pages) */
++ vki_int64_t p_uvalid; /* CHAR: following p_u* parameters are valid */
++ /* XXX 64 bits for alignment */
++ vki_uint32_t p_ustart_sec; /* STRUCT TIMEVAL: starting time. */
++ vki_uint32_t p_ustart_usec; /* STRUCT TIMEVAL: starting time. */
++
++ vki_uint32_t p_uutime_sec; /* STRUCT TIMEVAL: user time. */
++ vki_uint32_t p_uutime_usec; /* STRUCT TIMEVAL: user time. */
++ vki_uint32_t p_ustime_sec; /* STRUCT TIMEVAL: system time. */
++ vki_uint32_t p_ustime_usec; /* STRUCT TIMEVAL: system time. */
++
++ vki_uint64_t p_uru_maxrss; /* LONG: max resident set size. */
++ vki_uint64_t p_uru_ixrss; /* LONG: integral shared memory size. */
++ vki_uint64_t p_uru_idrss; /* LONG: integral unshared data ". */
++ vki_uint64_t p_uru_isrss; /* LONG: integral unshared stack ". */
++ vki_uint64_t p_uru_minflt; /* LONG: page reclaims. */
++ vki_uint64_t p_uru_majflt; /* LONG: page faults. */
++ vki_uint64_t p_uru_nswap; /* LONG: swaps. */
++ vki_uint64_t p_uru_inblock; /* LONG: block input operations. */
++ vki_uint64_t p_uru_oublock; /* LONG: block output operations. */
++ vki_uint64_t p_uru_msgsnd; /* LONG: messages sent. */
++ vki_uint64_t p_uru_msgrcv; /* LONG: messages received. */
++ vki_uint64_t p_uru_nsignals; /* LONG: signals received. */
++ vki_uint64_t p_uru_nvcsw; /* LONG: voluntary context switches. */
++ vki_uint64_t p_uru_nivcsw; /* LONG: involuntary ". */
++
++ vki_uint32_t p_uctime_sec; /* STRUCT TIMEVAL: child u+s time. */
++ vki_uint32_t p_uctime_usec; /* STRUCT TIMEVAL: child u+s time. */
++ vki_uint64_t p_cpuid; /* LONG: CPU id */
++ vki_uint64_t p_realflag; /* INT: P_* flags (not including LWPs). */
++ vki_uint64_t p_nlwps; /* LONG: Number of LWPs */
++ vki_uint64_t p_nrlwps; /* LONG: Number of running LWPs */
++ vki_uint64_t p_realstat; /* LONG: non-LWP process status */
++ vki_uint32_t p_svuid; /* UID_T: saved user id */
++ vki_uint32_t p_svgid; /* GID_T: saved group id */
++ char p_ename[VKI_KI_MAXEMULLEN]; /* emulation name */
++ vki_int64_t p_vm_vsize; /* SEGSZ_T: total map size (pages) */
++ vki_int64_t p_vm_msize; /* SEGSZ_T: stack-adjusted map size (pages) */
++};
++
++#define VKI_P_ADVLOCK 0x00000001
++#define VKI_P_CONTROLT 0x00000002
++#define VKI_L_INMEM 0x00000004
++#define VKI_P_INMEM /* 0x00000004 */ L_INMEM
++#define VKI_P_NOCLDSTOP 0x00000008
++#define VKI_P_PPWAIT 0x00000010
++#define VKI_P_PROFIL 0x00000020
++#define VKI_L_SELECT 0x00000040
++#define VKI_P_SELECT /* 0x00000040 */ L_SELECT
++#define VKI_L_SINTR 0x00000080
++#define VKI_P_SINTR /* 0x00000080 */ L_SINTR
++#define VKI_P_SUGID 0x00000100
++#define VKI_L_SYSTEM 0x00000200
++#define VKI_P_SYSTEM /* 0x00000200 */ L_SYSTEM
++#define VKI_L_SA 0x00000400
++#define VKI_P_SA /* 0x00000400 */ L_SA
++#define VKI_P_TRACED 0x00000800
++#define VKI_P_WAITED 0x00001000
++#define VKI_P_WEXIT 0x00002000
++#define VKI_P_EXEC 0x00004000
++#define VKI_P_OWEUPC 0x00008000
++#define VKI_P_NOCLDWAIT 0x00020000
++#define VKI_P_32 0x00040000
++#define VKI_P_CLDSIGIGN 0x00080000
++#define VKI_P_SYSTRACE 0x00200000
++#define VKI_P_CHTRACED 0x00400000
++#define VKI_P_STOPFORK 0x00800000
++#define VKI_P_STOPEXEC 0x01000000
++#define VKI_P_STOPEXIT 0x02000000
++#define VKI_P_SYSCALL 0x04000000
++
++#define VKI_L_DETACHED 0x00800000
++
++struct vki_kinfo_lwp {
++ vki_uint64_t l_forw; /* PTR: linked run/sleep queue. */
++ vki_uint64_t l_back;
++ vki_uint64_t l_laddr; /* PTR: Address of LWP */
++ vki_uint64_t l_addr; /* PTR: Kernel virtual addr of u-area */
++ vki_int32_t l_lid; /* LWPID_T: LWP identifier */
++ vki_int32_t l_flag; /* INT: L_* flags. */
++ vki_uint32_t l_swtime; /* U_INT: Time swapped in or out. */
++ vki_uint32_t l_slptime; /* U_INT: Time since last blocked. */
++ vki_int32_t l_schedflags; /* INT: PSCHED_* flags */
++ vki_int32_t l_holdcnt; /* INT: If non-zero, don't swap. */
++ vki_uint8_t l_priority; /* U_CHAR: Process priority. */
++ vki_uint8_t l_usrpri; /* U_CHAR: User-priority based on l_cpu and p_nice. */
++ vki_int8_t l_stat; /* CHAR: S* process status. */
++ vki_int8_t l_pad1; /* fill out to 4-byte boundary */
++ vki_int32_t l_pad2; /* .. and then to an 8-byte boundary */
++ char l_wmesg[VKI_KI_WMESGLEN]; /* wchan message */
++ vki_uint64_t l_wchan; /* PTR: sleep address. */
++ vki_uint64_t l_cpuid; /* LONG: CPU id */
++ vki_uint32_t l_rtime_sec; /* STRUCT TIMEVAL: Real time. */
++ vki_uint32_t l_rtime_usec; /* STRUCT TIMEVAL: Real time. */
++ vki_uint32_t l_cpticks; /* INT: ticks during l_swtime */
++ vki_uint32_t l_pctcpu; /* FIXPT_T: cpu usage for ps */
++ vki_uint32_t l_pid; /* PID_T: process identifier */
++ char l_name[VKI_KI_LNAMELEN]; /* CHAR[]: name, may be empty */
++};
++
++#define VKI_KERN_PROC_ARGV 1 /* argv */
++#define VKI_KERN_PROC_NARGV 2 /* number of strings in above */
++#define VKI_KERN_PROC_ENV 3 /* environ */
++#define VKI_KERN_PROC_NENV 4 /* number of strings in above */
++#define VKI_KERN_PROC_PATHNAME 5 /* path to executable */
++
++#define VKI_KERN_SYSVIPC_INFO 1 /* struct: number of valid kern ids */
++#define VKI_KERN_SYSVIPC_MSG 2 /* int: SysV message queue support */
++#define VKI_KERN_SYSVIPC_SEM 3 /* int: SysV semaphore support */
++#define VKI_KERN_SYSVIPC_SHM 4 /* int: SysV shared memory support */
++#define VKI_KERN_SYSVIPC_SHMMAX 5 /* int: max shared memory segment size (bytes) */
++#define VKI_KERN_SYSVIPC_SHMMNI 6 /* int: max number of shared memory identifiers */
++#define VKI_KERN_SYSVIPC_SHMSEG 7 /* int: max shared memory segments per process */
++#define VKI_KERN_SYSVIPC_SHMMAXPGS 8 /* int: max amount of shared memory (pages) */
++#define VKI_KERN_SYSVIPC_SHMUSEPHYS 9 /* int: physical memory usage */
++
++#define VKI_KERN_SYSVIPC_MSG_INFO 4 /* msginfo and msgid_ds */
++#define VKI_KERN_SYSVIPC_SEM_INFO 5 /* seminfo and semid_ds */
++#define VKI_KERN_SYSVIPC_SHM_INFO 6 /* shminfo and shmid_ds */
++
++#define VKI_KERN_TKSTAT_NIN 1 /* total input character */
++#define VKI_KERN_TKSTAT_NOUT 2 /* total output character */
++#define VKI_KERN_TKSTAT_CANCC 3 /* canonical input character */
++#define VKI_KERN_TKSTAT_RAWCC 4 /* raw input character */
++
++struct vki_kinfo_drivers {
++ vki_devmajor_t d_cmajor;
++ vki_devmajor_t d_bmajor;
++ char d_name[24];
++};
++
++#define VKI_KERN_BUF_ALL 0 /* all buffers */
++
++struct vki_buf_sysctl {
++ vki_uint32_t b_flags; /* LONG: B_* flags */
++ vki_int32_t b_error; /* INT: Errno value */
++ vki_int32_t b_prio; /* INT: Hint for buffer queue discipline */
++ vki_uint32_t b_dev; /* DEV_T: Device associated with buffer */
++ vki_uint64_t b_bufsize; /* LONG: Allocated buffer size */
++ vki_uint64_t b_bcount; /* LONG: Valid bytes in buffer */
++ vki_uint64_t b_resid; /* LONG: Remaining I/O */
++ vki_uint64_t b_addr; /* CADDR_T: Memory, superblocks, indirect... */
++ vki_uint64_t b_blkno; /* DADDR_T: Underlying physical block number */
++ vki_uint64_t b_rawblkno; /* DADDR_T: Raw underlying physical block */
++ vki_uint64_t b_iodone; /* PTR: Function called upon completion */
++ vki_uint64_t b_proc; /* PTR: Associated proc if B_PHYS set */
++ vki_uint64_t b_vp; /* PTR: File vnode */
++ vki_uint64_t b_saveaddr; /* PTR: Original b_addr for physio */
++ vki_uint64_t b_lblkno; /* DADDR_T: Logical block number */
++};
++
++struct vki_kinfo_file {
++ vki_uint64_t ki_fileaddr; /* PTR: address of struct file */
++ vki_uint32_t ki_flag; /* INT: flags (see fcntl.h) */
++ vki_uint32_t ki_iflags; /* INT: internal flags */
++ vki_uint32_t ki_ftype; /* INT: descriptor type */
++ vki_uint32_t ki_count; /* UINT: reference count */
++ vki_uint32_t ki_msgcount; /* UINT: references from msg queue */
++ vki_uint32_t ki_usecount; /* INT: number active users */
++ vki_uint64_t ki_fucred; /* PTR: creds for descriptor */
++ vki_uint32_t ki_fuid; /* UID_T: descriptor credentials */
++ vki_uint32_t ki_fgid; /* GID_T: descriptor credentials */
++ vki_uint64_t ki_fops; /* PTR: address of fileops */
++ vki_uint64_t ki_foffset; /* OFF_T: offset */
++ vki_uint64_t ki_fdata; /* PTR: descriptor data */
++
++ /* vnode information to glue this file to something */
++ vki_uint64_t ki_vun; /* PTR: socket, specinfo, etc */
++ vki_uint64_t ki_vsize; /* OFF_T: size of file */
++ vki_uint32_t ki_vtype; /* ENUM: vnode type */
++ vki_uint32_t ki_vtag; /* ENUM: type of underlying data */
++ vki_uint64_t ki_vdata; /* PTR: private data for fs */
++ /* process information when retrieved via KERN_FILE_BYPID */
++ vki_uint32_t ki_pid; /* PID_T: process id */
++ vki_int32_t ki_fd; /* INT: descriptor number */
++ vki_uint32_t ki_ofileflags; /* CHAR: open file flags */
++ vki_uint32_t _ki_padto64bits;
++};
++
++#define VKI_KERN_FILE_BYFILE 1
++#define VKI_KERN_FILE_BYPID 2
++#define VKI_KERN_FILESLOP 10
++
++struct vki_evcnt_sysctl {
++ vki_uint64_t ev_count; /* current count */
++ vki_uint64_t ev_addr; /* kernel address of evcnt */
++ vki_uint64_t ev_parent; /* kernel address of parent */
++ vki_uint8_t ev_type; /* EVCNT_TRAP_* */
++ vki_uint8_t ev_grouplen; /* length of group with NUL */
++ vki_uint8_t ev_namelen; /* length of name with NUL */
++ vki_uint8_t ev_len; /* multiply by 8 */
++ /*
++ * Now the group and name strings follow (both include the trailing
++ * NUL). ev_name start at &ev_strings[ev_grouplen+1]
++ */
++ char ev_strings[0];
++};
++
++#define VKI_KERN_EVCNT_COUNT_ANY 0
++#define VKI_KERN_EVCNT_COUNT_NONZERO 1
++
++#define VKI_KVME_TYPE_NONE 0
++#define VKI_KVME_TYPE_OBJECT 1
++#define VKI_KVME_TYPE_VNODE 2
++#define VKI_KVME_TYPE_KERN 3
++#define VKI_KVME_TYPE_DEVICE 4
++#define VKI_KVME_TYPE_ANON 5
++#define VKI_KVME_TYPE_SUBMAP 6
++#define VKI_KVME_TYPE_UNKNOWN 255
++
++#define VKI_KVME_PROT_READ 0x00000001
++#define VKI_KVME_PROT_WRITE 0x00000002
++#define VKI_KVME_PROT_EXEC 0x00000004
++
++#define VKI_KVME_FLAG_COW 0x00000001
++#define VKI_KVME_FLAG_NEEDS_COPY 0x00000002
++#define VKI_KVME_FLAG_NOCOREDUMP 0x00000004
++#define VKI_KVME_FLAG_PAGEABLE 0x00000008
++#define VKI_KVME_FLAG_GROWS_UP 0x00000010
++#define VKI_KVME_FLAG_GROWS_DOWN 0x00000020
++
++
++struct vki_kinfo_vmentry {
++ vki_uint64_t kve_start; /* Starting address. */
++ vki_uint64_t kve_end; /* Finishing address. */
++ vki_uint64_t kve_offset; /* Mapping offset in object */
++
++ vki_uint32_t kve_type; /* Type of map entry. */
++ vki_uint32_t kve_flags; /* Flags on map entry. */
++ vki_uint32_t kve_count; /* Number of pages/entries */
++ vki_uint32_t kve_wired_count; /* Number of wired pages */
++
++ vki_uint32_t kve_advice; /* Advice */
++ vki_uint32_t kve_attributes; /* Map attribute */
++
++ vki_uint32_t kve_protection; /* Protection bitmask. */
++ vki_uint32_t kve_max_protection; /* Max protection bitmask */
++
++ vki_uint32_t kve_ref_count; /* VM obj ref count. */
++ vki_uint32_t kve_inheritance; /* Inheritance */
++
++ vki_uint64_t kve_vn_fileid; /* inode number if vnode */
++ vki_uint64_t kve_vn_size; /* File size. */
++ vki_uint64_t kve_vn_fsid; /* dev_t of vnode location */
++ vki_uint64_t kve_vn_rdev; /* Device id if device. */
++
++ vki_uint32_t kve_vn_type; /* Vnode type. */
++ vki_uint32_t kve_vn_mode; /* File mode. */
++
++ char kve_path[VKI_PATH_MAX]; /* Path to VM obj, if any. */
++};
++
++#define VKI_HW_MACHINE 1 /* string: machine class */
++#define VKI_HW_MODEL 2 /* string: specific machine model */
++#define VKI_HW_NCPU 3 /* int: number of cpus */
++#define VKI_HW_BYTEORDER 4 /* int: machine byte order */
++#define VKI_HW_PHYSMEM 5 /* int: total memory (bytes) */
++#define VKI_HW_USERMEM 6 /* int: non-kernel memory (bytes) */
++#define VKI_HW_PAGESIZE 7 /* int: software page size */
++#define VKI_HW_DISKNAMES 8 /* string: disk drive names */
++#define VKI_HW_IOSTATS 9 /* struct: iostats[] */
++#define VKI_HW_MACHINE_ARCH 10 /* string: machine architecture */
++#define VKI_HW_ALIGNBYTES 11 /* int: ALIGNBYTES for the kernel */
++#define VKI_HW_CNMAGIC 12 /* string: console magic sequence(s) */
++#define VKI_HW_PHYSMEM64 13 /* quad: total memory (bytes) */
++#define VKI_HW_USERMEM64 14 /* quad: non-kernel memory (bytes) */
++#define VKI_HW_IOSTATNAMES 15 /* string: iostat names */
++#define VKI_HW_NCPUONLINE 16 /* number CPUs online */
++
++/*
++ * CTL_USER definitions
++ */
++#define VKI_USER_CS_PATH 1 /* string: _CS_PATH */
++#define VKI_USER_BC_BASE_MAX 2 /* int: BC_BASE_MAX */
++#define VKI_USER_BC_DIM_MAX 3 /* int: BC_DIM_MAX */
++#define VKI_USER_BC_SCALE_MAX 4 /* int: BC_SCALE_MAX */
++#define VKI_USER_BC_STRING_MAX 5 /* int: BC_STRING_MAX */
++#define VKI_USER_COLL_WEIGHTS_MAX 6 /* int: COLL_WEIGHTS_MAX */
++#define VKI_USER_EXPR_NEST_MAX 7 /* int: EXPR_NEST_MAX */
++#define VKI_USER_LINE_MAX 8 /* int: LINE_MAX */
++#define VKI_USER_RE_DUP_MAX 9 /* int: RE_DUP_MAX */
++#define VKI_USER_POSIX2_VERSION 10 /* int: POSIX2_VERSION */
++#define VKI_USER_POSIX2_C_BIND 11 /* int: POSIX2_C_BIND */
++#define VKI_USER_POSIX2_C_DEV 12 /* int: POSIX2_C_DEV */
++#define VKI_USER_POSIX2_CHAR_TERM 13 /* int: POSIX2_CHAR_TERM */
++#define VKI_USER_POSIX2_FORT_DEV 14 /* int: POSIX2_FORT_DEV */
++#define VKI_USER_POSIX2_FORT_RUN 15 /* int: POSIX2_FORT_RUN */
++#define VKI_USER_POSIX2_LOCALEDEF 16 /* int: POSIX2_LOCALEDEF */
++#define VKI_USER_POSIX2_SW_DEV 17 /* int: POSIX2_SW_DEV */
++#define VKI_USER_POSIX2_UPE 18 /* int: POSIX2_UPE */
++#define VKI_USER_STREAM_MAX 19 /* int: POSIX2_STREAM_MAX */
++#define VKI_USER_TZNAME_MAX 20 /* int: _POSIX_TZNAME_MAX */
++#define VKI_USER_ATEXIT_MAX 21 /* int: {ATEXIT_MAX} */
++
++#define VKI_DDBCTL_RADIX 1 /* int: Input and output radix */
++#define VKI_DDBCTL_MAXOFF 2 /* int: max symbol offset */
++#define VKI_DDBCTL_MAXWIDTH 3 /* int: width of the display line */
++#define VKI_DDBCTL_LINES 4 /* int: number of display lines */
++#define VKI_DDBCTL_TABSTOPS 5 /* int: tab width */
++#define VKI_DDBCTL_ONPANIC 6 /* int: DDB on panic if non-zero */
++#define VKI_DDBCTL_FROMCONSOLE 7 /* int: DDB via console if non-zero */
++
++#define VKI_CTL_DEBUG_NAME 0 /* string: variable name */
++#define VKI_CTL_DEBUG_VALUE 1 /* int: variable value */
++
++#define VKI_PROC_CURPROC (~((vki_u_int)1 << 31))
++
++#define VKI_PROC_PID_CORENAME 1
++#define VKI_PROC_PID_LIMIT 2
++#define VKI_PROC_PID_STOPFORK 3
++#define VKI_PROC_PID_STOPEXEC 4
++#define VKI_PROC_PID_STOPEXIT 5
++#define VKI_PROC_PID_PAXFLAGS 6
++
++#define VKI_PROC_PID_LIMIT_CPU (VKI_RLIMIT_CPU+1)
++#define VKI_PROC_PID_LIMIT_FSIZE (VKI_RLIMIT_FSIZE+1)
++#define VKI_PROC_PID_LIMIT_DATA (VKI_RLIMIT_DATA+1)
++#define VKI_PROC_PID_LIMIT_STACK (VKI_RLIMIT_STACK+1)
++#define VKI_PROC_PID_LIMIT_CORE (VKI_RLIMIT_CORE+1)
++#define VKI_PROC_PID_LIMIT_RSS (VKI_RLIMIT_RSS+1)
++#define VKI_PROC_PID_LIMIT_MEMLOCK (VKI_RLIMIT_MEMLOCK+1)
++#define VKI_PROC_PID_LIMIT_NPROC (VKI_RLIMIT_NPROC+1)
++#define VKI_PROC_PID_LIMIT_NOFILE (VKI_RLIMIT_NOFILE+1)
++#define VKI_PROC_PID_LIMIT_SBSIZE (VKI_RLIMIT_SBSIZE+1)
++#define VKI_PROC_PID_LIMIT_AS (VKI_RLIMIT_AS+1)
++#define VKI_PROC_PID_LIMIT_NTHR (VKI_RLIMIT_NTHR+1)
++
++/* for each type, either hard or soft value */
++#define VKI_PROC_PID_LIMIT_TYPE_SOFT 1
++#define VKI_PROC_PID_LIMIT_TYPE_HARD 2
++
++/*
++ * Export PAX flag definitions to userland.
++ *
++ * XXX These are duplicated from sys/pax.h but that header is not
++ * XXX installed.
++ */
++#define VKI_CTL_PROC_PAXFLAGS_ASLR 0x01
++#define VKI_CTL_PROC_PAXFLAGS_MPROTECT 0x02
++#define VKI_CTL_PROC_PAXFLAGS_GUARD 0x04
++
++#define VKI_EMUL_LINUX 1
++#define VKI_EMUL_LINUX32 5
++
++#endif // __VKI_NETBSD_H
++
++/*--------------------------------------------------------------------*/
++/*--- end ---*/
++/*--------------------------------------------------------------------*/
diff --git a/valgrind-netbsd/patches/patch-include_vki_vki-scnums-netbsd.h b/valgrind-netbsd/patches/patch-include_vki_vki-scnums-netbsd.h
new file mode 100644
index 0000000000..a78f9589aa
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-include_vki_vki-scnums-netbsd.h
@@ -0,0 +1,471 @@
+$NetBSD$
+
+--- include/vki/vki-scnums-netbsd.h.orig 2019-03-28 09:26:43.803009544 +0000
++++ include/vki/vki-scnums-netbsd.h
+@@ -0,0 +1,466 @@
++
++/*
++ This file is part of Valgrind, a dynamic binary instrumentation
++ framework.
++
++ Copyright (C) 2000-2005 Julian Seward
++ jseward%acm.org@localhost
++
++ This program is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2 of the
++ License, or (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
++ 02111-1307, USA.
++
++ The GNU General Public License is contained in the file COPYING.
++*/
++
++#ifndef __VKI_UNISTD_NETBSD_H
++#define __VKI_UNISTD_NETBSD_H
++
++#define VG_NETBSD_SYSCALL_STD 0
++#define VG_NETBSD_SYSCALL0 1
++#define VG_NETBSD_SYSCALL198 2
++
++// From sys/syscall.h
++
++#define __NR_syscall 0
++#define __NR_exit 1
++#define __NR_fork 2
++#define __NR_read 3
++#define __NR_write 4
++#define __NR_open 5
++#define __NR_close 6
++#define __NR_compat_50_wait4 7
++#define __NR_compat_43_ocreat 8
++#define __NR_link 9
++#define __NR_unlink 10
++#define __NR_chdir 12
++#define __NR_fchdir 13
++#define __NR_compat_50_mknod 14
++#define __NR_chmod 15
++#define __NR_chown 16
++#define __NR_break 17
++#define __NR_compat_20_getfsstat 18
++#define __NR_compat_43_olseek 19
++#define __NR_getpid 20
++#define __NR_compat_40_mount 21
++#define __NR_unmount 22
++#define __NR_setuid 23
++#define __NR_getuid 24
++#define __NR_geteuid 25
++#define __NR_ptrace 26
++#define __NR_recvmsg 27
++#define __NR_sendmsg 28
++#define __NR_recvfrom 29
++#define __NR_accept 30
++#define __NR_getpeername 31
++#define __NR_getsockname 32
++#define __NR_access 33
++#define __NR_chflags 34
++#define __NR_fchflags 35
++#define __NR_sync 36
++#define __NR_kill 37
++#define __NR_compat_43_stat43 38
++#define __NR_getppid 39
++#define __NR_compat_43_lstat43 40
++#define __NR_dup 41
++#define __NR_pipe 42
++#define __NR_getegid 43
++#define __NR_profil 44
++#define __NR_ktrace 45
++#define __NR_compat_13_sigaction13 46
++#define __NR_getgid 47
++#define __NR_compat_13_sigprocmask13 48
++#define __NR___getlogin 49
++#define __NR___setlogin 50
++#define __NR_acct 51
++#define __NR_compat_13_sigpending13 52
++#define __NR_compat_13_sigaltstack13 53
++#define __NR_ioctl 54
++#define __NR_compat_12_oreboot 55
++#define __NR_revoke 56
++#define __NR_symlink 57
++#define __NR_readlink 58
++#define __NR_execve 59
++#define __NR_umask 60
++#define __NR_chroot 61
++#define __NR_compat_43_fstat43 62
++#define __NR_compat_43_ogetkerninfo 63
++#define __NR_compat_43_ogetpagesize 64
++#define __NR_compat_12_msync 65
++#define __NR_vfork 66
++#define __NR_compat_43_ommap 71
++#define __NR_vadvise 72
++#define __NR_munmap 73
++#define __NR_mprotect 74
++#define __NR_madvise 75
++#define __NR_mincore 78
++#define __NR_getgroups 79
++#define __NR_setgroups 80
++#define __NR_getpgrp 81
++#define __NR_setpgid 82
++#define __NR_compat_50_setitimer 83
++#define __NR_compat_43_owait 84
++#define __NR_compat_12_oswapon 85
++#define __NR_compat_50_getitimer 86
++#define __NR_compat_43_ogethostname 87
++#define __NR_compat_43_osethostname 88
++#define __NR_compat_43_ogetdtablesize 89
++#define __NR_dup2 90
++#define __NR_fcntl 92
++#define __NR_compat_50_select 93
++#define __NR_fsync 95
++#define __NR_setpriority 96
++#define __NR_compat_30_socket 97
++#define __NR_connect 98
++#define __NR_compat_43_oaccept 99
++#define __NR_getpriority 100
++#define __NR_compat_43_osend 101
++#define __NR_compat_43_orecv 102
++#define __NR_compat_13_sigreturn13 103
++#define __NR_bind 104
++#define __NR_setsockopt 105
++#define __NR_listen 106
++#define __NR_compat_43_osigvec 108
++#define __NR_compat_43_osigblock 109
++#define __NR_compat_43_osigsetmask 110
++#define __NR_compat_13_sigsuspend13 111
++#define __NR_compat_43_osigstack 112
++#define __NR_compat_43_orecvmsg 113
++#define __NR_compat_43_osendmsg 114
++#define __NR_compat_50_gettimeofday 116
++#define __NR_compat_50_getrusage 117
++#define __NR_getsockopt 118
++#define __NR_readv 120
++#define __NR_writev 121
++#define __NR_compat_50_settimeofday 122
++#define __NR_fchown 123
++#define __NR_fchmod 124
++#define __NR_compat_43_orecvfrom 125
++#define __NR_setreuid 126
++#define __NR_setregid 127
++#define __NR_rename 128
++#define __NR_compat_43_otruncate 129
++#define __NR_compat_43_oftruncate 130
++#define __NR_flock 131
++#define __NR_mkfifo 132
++#define __NR_sendto 133
++#define __NR_shutdown 134
++#define __NR_socketpair 135
++#define __NR_mkdir 136
++#define __NR_rmdir 137
++#define __NR_compat_50_utimes 138
++#define __NR_compat_50_adjtime 140
++#define __NR_compat_43_ogetpeername 141
++#define __NR_compat_43_ogethostid 142
++#define __NR_compat_43_osethostid 143
++#define __NR_compat_43_ogetrlimit 144
++#define __NR_compat_43_osetrlimit 145
++#define __NR_compat_43_okillpg 146
++#define __NR_setsid 147
++#define __NR_compat_50_quotactl 148
++#define __NR_compat_43_oquota 149
++#define __NR_compat_43_ogetsockname 150
++#define __NR_nfssvc 155
++#define __NR_compat_43_ogetdirentries 156
++#define __NR_compat_20_statfs 157
++#define __NR_compat_20_fstatfs 158
++#define __NR_compat_30_getfh 161
++#define __NR_compat_09_ogetdomainname 162
++#define __NR_compat_09_osetdomainname 163
++#define __NR_compat_09_ouname 164
++#define __NR_sysarch 165
++#define __NR_compat_10_osemsys 169
++#define __NR_compat_10_omsgsys 170
++#define __NR_compat_10_oshmsys 171
++#define __NR_pread 173
++#define __NR_pwrite 174
++#define __NR_compat_30_ntp_gettime 175
++#define __NR_ntp_adjtime 176
++#define __NR_setgid 181
++#define __NR_setegid 182
++#define __NR_seteuid 183
++#define __NR_lfs_bmapv 184
++#define __NR_lfs_markv 185
++#define __NR_lfs_segclean 186
++#define __NR_compat_50_lfs_segwait 187
++#define __NR_compat_12_stat12 188
++#define __NR_compat_12_fstat12 189
++#define __NR_compat_12_lstat12 190
++#define __NR_pathconf 191
++#define __NR_fpathconf 192
++#define __NR_getsockopt2 193
++#define __NR_getrlimit 194
++#define __NR_setrlimit 195
++#define __NR_compat_12_getdirentries 196
++#define __NR_mmap 197
++#define __NR___syscall 198
++#define __NR_lseek 199
++#define __NR_truncate 200
++#define __NR_ftruncate 201
++#define __NR___sysctl 202
++#define __NR_mlock 203
++#define __NR_munlock 204
++#define __NR_undelete 205
++#define __NR_compat_50_futimes 206
++#define __NR_getpgid 207
++#define __NR_reboot 208
++#define __NR_poll 209
++#define __NR_afssys 210
++#define __NR_compat_14___semctl 220
++#define __NR_semget 221
++#define __NR_semop 222
++#define __NR_semconfig 223
++#define __NR_compat_14_msgctl 224
++#define __NR_msgget 225
++#define __NR_msgsnd 226
++#define __NR_msgrcv 227
++#define __NR_shmat 228
++#define __NR_compat_14_shmctl 229
++#define __NR_shmdt 230
++#define __NR_shmget 231
++#define __NR_compat_50_clock_gettime 232
++#define __NR_compat_50_clock_settime 233
++#define __NR_compat_50_clock_getres 234
++#define __NR_timer_create 235
++#define __NR_timer_delete 236
++#define __NR_compat_50_timer_settime 237
++#define __NR_compat_50_timer_gettime 238
++#define __NR_timer_getoverrun 239
++#define __NR_compat_50_nanosleep 240
++#define __NR_fdatasync 241
++#define __NR_mlockall 242
++#define __NR_munlockall 243
++#define __NR_compat_50___sigtimedwait 244
++#define __NR_sigqueueinfo 245
++#define __NR_modctl 246
++#define __NR__ksem_init 247
++#define __NR__ksem_open 248
++#define __NR__ksem_unlink 249
++#define __NR__ksem_close 250
++#define __NR__ksem_post 251
++#define __NR__ksem_wait 252
++#define __NR__ksem_trywait 253
++#define __NR__ksem_getvalue 254
++#define __NR__ksem_destroy 255
++#define __NR__ksem_timedwait 256
++#define __NR_mq_open 257
++#define __NR_mq_close 258
++#define __NR_mq_unlink 259
++#define __NR_mq_getattr 260
++#define __NR_mq_setattr 261
++#define __NR_mq_notify 262
++#define __NR_mq_send 263
++#define __NR_mq_receive 264
++#define __NR_compat_50_mq_timedsend 265
++#define __NR_compat_50_mq_timedreceive 266
++#define __NR___posix_rename 270
++#define __NR_swapctl 271
++#define __NR_compat_30_getdents 272
++#define __NR_minherit 273
++#define __NR_lchmod 274
++#define __NR_lchown 275
++#define __NR_compat_50_lutimes 276
++#define __NR___msync13 277
++#define __NR_compat_30___stat13 278
++#define __NR_compat_30___fstat13 279
++#define __NR_compat_30___lstat13 280
++#define __NR___sigaltstack14 281
++#define __NR___vfork14 282
++#define __NR___posix_chown 283
++#define __NR___posix_fchown 284
++#define __NR___posix_lchown 285
++#define __NR_getsid 286
++#define __NR___clone 287
++#define __NR_fktrace 288
++#define __NR_preadv 289
++#define __NR_pwritev 290
++#define __NR_compat_16___sigaction14 291
++#define __NR___sigpending14 292
++#define __NR___sigprocmask14 293
++#define __NR___sigsuspend14 294
++#define __NR_compat_16___sigreturn14 295
++#define __NR___getcwd 296
++#define __NR_fchroot 297
++#define __NR_compat_30_fhopen 298
++#define __NR_compat_30_fhstat 299
++#define __NR_compat_20_fhstatfs 300
++#define __NR_compat_50_____semctl13 301
++#define __NR_compat_50___msgctl13 302
++#define __NR_compat_50___shmctl13 303
++#define __NR_lchflags 304
++#define __NR_issetugid 305
++#define __NR_utrace 306
++#define __NR_getcontext 307
++#define __NR_setcontext 308
++#define __NR__lwp_create 309
++#define __NR__lwp_exit 310
++#define __NR__lwp_self 311
++#define __NR__lwp_wait 312
++#define __NR__lwp_suspend 313
++#define __NR__lwp_continue 314
++#define __NR__lwp_wakeup 315
++#define __NR__lwp_getprivate 316
++#define __NR__lwp_setprivate 317
++#define __NR__lwp_kill 318
++#define __NR__lwp_detach 319
++#define __NR_compat_50__lwp_park 320
++#define __NR__lwp_unpark 321
++#define __NR__lwp_unpark_all 322
++#define __NR__lwp_setname 323
++#define __NR__lwp_getname 324
++#define __NR__lwp_ctl 325
++#define __NR_compat_60_sa_register 330
++#define __NR_compat_60_sa_stacks 331
++#define __NR_compat_60_sa_enable 332
++#define __NR_compat_60_sa_setconcurrency 333
++#define __NR_compat_60_sa_yield 334
++#define __NR_compat_60_sa_preempt 335
++#define __NR___sigaction_sigtramp 340
++#define __NR_rasctl 343
++#define __NR_kqueue 344
++#define __NR_compat_50_kevent 345
++#define __NR__sched_setparam 346
++#define __NR__sched_getparam 347
++#define __NR__sched_setaffinity 348
++#define __NR__sched_getaffinity 349
++#define __NR_sched_yield 350
++#define __NR__sched_protect 351
++#define __NR_fsync_range 354
++#define __NR_uuidgen 355
++#define __NR_getvfsstat 356
++#define __NR_statvfs1 357
++#define __NR_fstatvfs1 358
++#define __NR_compat_30_fhstatvfs1 359
++#define __NR_extattrctl 360
++#define __NR_extattr_set_file 361
++#define __NR_extattr_get_file 362
++#define __NR_extattr_delete_file 363
++#define __NR_extattr_set_fd 364
++#define __NR_extattr_get_fd 365
++#define __NR_extattr_delete_fd 366
++#define __NR_extattr_set_link 367
++#define __NR_extattr_get_link 368
++#define __NR_extattr_delete_link 369
++#define __NR_extattr_list_fd 370
++#define __NR_extattr_list_file 371
++#define __NR_extattr_list_link 372
++#define __NR_compat_50_pselect 373
++#define __NR_compat_50_pollts 374
++#define __NR_setxattr 375
++#define __NR_lsetxattr 376
++#define __NR_fsetxattr 377
++#define __NR_getxattr 378
++#define __NR_lgetxattr 379
++#define __NR_fgetxattr 380
++#define __NR_listxattr 381
++#define __NR_llistxattr 382
++#define __NR_flistxattr 383
++#define __NR_removexattr 384
++#define __NR_lremovexattr 385
++#define __NR_fremovexattr 386
++#define __NR_compat_50___stat30 387
++#define __NR_compat_50___fstat30 388
++#define __NR_compat_50___lstat30 389
++#define __NR___getdents30 390
++#define __NR_compat_30___fhstat30 392
++#define __NR_compat_50___ntp_gettime30 393
++#define __NR___socket30 394
++#define __NR___getfh30 395
++#define __NR___fhopen40 396
++#define __NR___fhstatvfs140 397
++#define __NR_compat_50___fhstat40 398
++#define __NR_aio_cancel 399
++#define __NR_aio_error 400
++#define __NR_aio_fsync 401
++#define __NR_aio_read 402
++#define __NR_aio_return 403
++#define __NR_compat_50_aio_suspend 404
++#define __NR_aio_write 405
++#define __NR_lio_listio 406
++#define __NR___mount50 410
++#define __NR_mremap 411
++#define __NR_pset_create 412
++#define __NR_pset_destroy 413
++#define __NR_pset_assign 414
++#define __NR__pset_bind 415
++#define __NR___posix_fadvise50 416
++#define __NR___select50 417
++#define __NR___gettimeofday50 418
++#define __NR___settimeofday50 419
++#define __NR___utimes50 420
++#define __NR___adjtime50 421
++#define __NR___lfs_segwait50 422
++#define __NR___futimes50 423
++#define __NR___lutimes50 424
++#define __NR___setitimer50 425
++#define __NR___getitimer50 426
++#define __NR___clock_gettime50 427
++#define __NR___clock_settime50 428
++#define __NR___clock_getres50 429
++#define __NR___nanosleep50 430
++#define __NR_____sigtimedwait50 431
++#define __NR___mq_timedsend50 432
++#define __NR___mq_timedreceive50 433
++#define __NR_compat_60__lwp_park 434
++#define __NR___kevent50 435
++#define __NR___pselect50 436
++#define __NR___pollts50 437
++#define __NR___aio_suspend50 438
++#define __NR___stat50 439
++#define __NR___fstat50 440
++#define __NR___lstat50 441
++#define __NR_____semctl50 442
++#define __NR___shmctl50 443
++#define __NR___msgctl50 444
++#define __NR___getrusage50 445
++#define __NR___timer_settime50 446
++#define __NR___timer_gettime50 447
++#define __NR___ntp_gettime50 448
++#define __NR___wait450 449
++#define __NR___mknod50 450
++#define __NR___fhstat50 451
++#define __NR_pipe2 453
++#define __NR_dup3 454
++#define __NR_kqueue1 455
++#define __NR_paccept 456
++#define __NR_linkat 457
++#define __NR_renameat 458
++#define __NR_mkfifoat 459
++#define __NR_mknodat 460
++#define __NR_mkdirat 461
++#define __NR_faccessat 462
++#define __NR_fchmodat 463
++#define __NR_fchownat 464
++#define __NR_fexecve 465
++#define __NR_fstatat 466
++#define __NR_utimensat 467
++#define __NR_openat 468
++#define __NR_readlinkat 469
++#define __NR_symlinkat 470
++#define __NR_unlinkat 471
++#define __NR_futimens 472
++#define __NR___quotactl 473
++#define __NR_posix_spawn 474
++#define __NR_recvmmsg 475
++#define __NR_sendmmsg 476
++#define __NR_clock_nanosleep 477
++#define __NR____lwp_park60 478
++#define __NR_posix_fallocate 479
++#define __NR_fdiscard 480
++#define __NR_wait6 481
++#define __NR_clock_getcpuclockid2 482
++
++#define __NR_fake_sigreturn 1000
++
++#endif /* __VKI_UNISTD_NETBSD_H */
diff --git a/valgrind-netbsd/patches/patch-memcheck_Makefile.am b/valgrind-netbsd/patches/patch-memcheck_Makefile.am
new file mode 100644
index 0000000000..2dfd609623
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_Makefile.am
@@ -0,0 +1,21 @@
+$NetBSD$
+
+--- memcheck/Makefile.am.orig 2018-05-05 07:42:22.000000000 +0000
++++ memcheck/Makefile.am
+@@ -98,6 +98,11 @@ vgpreload_memcheck_@VGCONF_ARCH_PRI@_@VG
+ $(PRELOAD_LDFLAGS_@VGCONF_PLATFORM_PRI_CAPS@) \
+ $(LIBREPLACEMALLOC_LDFLAGS_@VGCONF_PLATFORM_PRI_CAPS@)
+
++if VGCONF_OS_IS_NETBSD
++vgpreload_memcheck_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_LDFLAGS += \
++ -shared -fPIC
++endif
++
+ if VGCONF_HAVE_PLATFORM_SEC
+ vgpreload_memcheck_@VGCONF_ARCH_SEC@_@VGCONF_OS@_so_SOURCES = \
+ $(VGPRELOAD_MEMCHECK_SOURCES_COMMON)
+@@ -111,4 +116,3 @@ vgpreload_memcheck_@VGCONF_ARCH_SEC@_@VG
+ $(PRELOAD_LDFLAGS_@VGCONF_PLATFORM_SEC_CAPS@) \
+ $(LIBREPLACEMALLOC_LDFLAGS_@VGCONF_PLATFORM_SEC_CAPS@)
+ endif
+-
diff --git a/valgrind-netbsd/patches/patch-memcheck_mc__errors.c b/valgrind-netbsd/patches/patch-memcheck_mc__errors.c
new file mode 100644
index 0000000000..8dad339173
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_mc__errors.c
@@ -0,0 +1,376 @@
+$NetBSD$
+
+--- memcheck/mc_errors.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ memcheck/mc_errors.c
+@@ -202,6 +202,7 @@ struct _MC_Error {
+ Memcheck, we don't use. Hence a no-op.
+ */
+ void MC_(before_pp_Error) ( const Error* err ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ /* Do a printf-style operation on either the XML or normal output
+@@ -209,6 +210,7 @@ void MC_(before_pp_Error) ( const Error*
+ */
+ static void emit_WRK ( const HChar* format, va_list vargs )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (VG_(clo_xml)) {
+ VG_(vprintf_xml)(format, vargs);
+ } else {
+@@ -218,6 +220,7 @@ static void emit_WRK ( const HChar* form
+ static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
+ static void emit ( const HChar* format, ... )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ va_list vargs;
+ va_start(vargs, format);
+ emit_WRK(format, vargs);
+@@ -227,6 +230,7 @@ static void emit ( const HChar* format,
+
+ static const HChar* str_leak_lossmode ( Reachedness lossmode )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const HChar *loss = "?";
+ switch (lossmode) {
+ case Unreached: loss = "definitely lost"; break;
+@@ -239,6 +243,7 @@ static const HChar* str_leak_lossmode (
+
+ static const HChar* xml_leak_kind ( Reachedness lossmode )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const HChar *loss = "?";
+ switch (lossmode) {
+ case Unreached: loss = "Leak_DefinitelyLost"; break;
+@@ -254,6 +259,7 @@ const HChar* MC_(parse_leak_kinds_tokens
+
+ UInt MC_(all_Reachedness)(void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ static UInt all;
+
+ if (all == 0) {
+@@ -270,6 +276,7 @@ UInt MC_(all_Reachedness)(void)
+
+ static const HChar* pp_Reachedness_for_leak_kinds(Reachedness r)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch(r) {
+ case Reachable: return "reachable";
+ case Possible: return "possible";
+@@ -281,6 +288,7 @@ static const HChar* pp_Reachedness_for_l
+
+ static void mc_pp_origin ( ExeContext* ec, UInt okind )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const HChar* src = NULL;
+ tl_assert(ec);
+
+@@ -306,6 +314,7 @@ HChar * MC_(snprintf_delta) (HChar * buf
+ SizeT current_val, SizeT old_val,
+ LeakCheckDeltaMode delta_mode)
+ {
++//VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ // Make sure the buffer size is large enough. With old_val == 0 and
+ // current_val == ULLONG_MAX the delta including inserted commas is:
+ // 18,446,744,073,709,551,615
+@@ -325,6 +334,7 @@ HChar * MC_(snprintf_delta) (HChar * buf
+ static void pp_LossRecord(UInt n_this_record, UInt n_total_records,
+ LossRecord* lr, Bool xml)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ // char arrays to produce the indication of increase/decrease in case
+ // of delta_mode != LCD_Any
+ HChar d_bytes[31];
+@@ -408,14 +418,18 @@ static void pp_LossRecord(UInt n_this_re
+ void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
+ LossRecord* l)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ pp_LossRecord (n_this_record, n_total_records, l, /* xml */ False);
+ }
+
+ void MC_(pp_Error) ( const Error* err )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const Bool xml = VG_(clo_xml); /* a shorthand */
+ MC_Error* extra = VG_(get_error_extra)(err);
+
++ VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
++
+ switch (VG_(get_error_kind)(err)) {
+ case Err_CoreMem:
+ /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
+@@ -711,6 +725,7 @@ void MC_(pp_Error) ( const Error* err )
+ VG_(get_error_kind)(err));
+ VG_(tool_panic)("unknown error code in mc_pp_Error)");
+ }
++ VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ /*------------------------------------------------------------*/
+@@ -725,6 +740,7 @@ void MC_(pp_Error) ( const Error* err )
+ for the --workaround-gcc296-bugs kludge. */
+ static Bool is_just_below_ESP( Addr esp, Addr aa )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ esp -= VG_STACK_REDZONE_SZB;
+ if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
+ return True;
+@@ -737,9 +753,12 @@ static Bool is_just_below_ESP( Addr esp,
+ void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
+ Bool isWrite )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ Bool just_below_esp;
+
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
++
+ if (MC_(in_ignored_range)(a))
+ return;
+
+@@ -764,11 +783,14 @@ void MC_(record_address_error) ( ThreadI
+ extra.Err.Addr.szB = szB;
+ extra.Err.Addr.maybe_gcc = just_below_esp;
+ extra.Err.Addr.ai.tag = Addr_Undescribed;
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ void MC_(record_value_error) ( ThreadId tid, Int szB, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ tl_assert( MC_(clo_mc_level) >= 2 );
+ if (otag > 0)
+@@ -781,6 +803,7 @@ void MC_(record_value_error) ( ThreadId
+
+ void MC_(record_cond_error) ( ThreadId tid, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ tl_assert( MC_(clo_mc_level) >= 2 );
+ if (otag > 0)
+@@ -795,11 +818,13 @@ void MC_(record_cond_error) ( ThreadId t
+ /* This is for memory errors in signal-related memory. */
+ void MC_(record_core_mem_error) ( ThreadId tid, const HChar* msg )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
+ }
+
+ void MC_(record_regparam_error) ( ThreadId tid, const HChar* msg, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ tl_assert(VG_INVALID_THREADID != tid);
+ if (otag > 0)
+@@ -812,6 +837,7 @@ void MC_(record_regparam_error) ( Thread
+ void MC_(record_memparam_error) ( ThreadId tid, Addr a,
+ Bool isAddrErr, const HChar* msg, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ tl_assert(VG_INVALID_THREADID != tid);
+ if (!isAddrErr)
+@@ -829,6 +855,7 @@ void MC_(record_memparam_error) ( Thread
+
+ void MC_(record_jump_error) ( ThreadId tid, Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ tl_assert(VG_INVALID_THREADID != tid);
+ extra.Err.Jump.ai.tag = Addr_Undescribed;
+@@ -837,6 +864,7 @@ void MC_(record_jump_error) ( ThreadId t
+
+ void MC_(record_free_error) ( ThreadId tid, Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ tl_assert(VG_INVALID_THREADID != tid);
+ extra.Err.Free.ai.tag = Addr_Undescribed;
+@@ -845,6 +873,7 @@ void MC_(record_free_error) ( ThreadId t
+
+ void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ AddrInfo* ai = &extra.Err.FreeMismatch.ai;
+ tl_assert(VG_INVALID_THREADID != tid);
+@@ -862,6 +891,7 @@ void MC_(record_freemismatch_error) ( Th
+
+ void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ tl_assert(VG_INVALID_THREADID != tid);
+ extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
+@@ -871,6 +901,7 @@ void MC_(record_illegal_mempool_error) (
+ void MC_(record_overlap_error) ( ThreadId tid, const HChar* function,
+ Addr src, Addr dst, SizeT szB )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ tl_assert(VG_INVALID_THREADID != tid);
+ extra.Err.Overlap.src = src;
+@@ -884,6 +915,7 @@ Bool MC_(record_leak_error) ( ThreadId t
+ UInt n_total_records, LossRecord* lr,
+ Bool print_record, Bool count_error )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ extra.Err.Leak.n_this_record = n_this_record;
+ extra.Err.Leak.n_total_records = n_total_records;
+@@ -897,6 +929,7 @@ Bool MC_(record_leak_error) ( ThreadId t
+ Bool MC_(record_fishy_value_error) ( ThreadId tid, const HChar *function_name,
+ const HChar *argument_name, SizeT value)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+
+ tl_assert(VG_INVALID_THREADID != tid);
+@@ -916,6 +949,7 @@ Bool MC_(record_fishy_value_error) ( Thr
+ void MC_(record_user_error) ( ThreadId tid, Addr a,
+ Bool isAddrErr, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error extra;
+ if (otag != 0) {
+ tl_assert(!isAddrErr);
+@@ -934,6 +968,7 @@ void MC_(record_user_error) ( ThreadId t
+
+ Bool MC_(is_mempool_block)(MC_Chunk* mc_search)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+
+ if (!MC_(mempool_list))
+@@ -965,6 +1000,7 @@ Bool MC_(is_mempool_block)(MC_Chunk* mc_
+ are allowed to be different. */
+ Bool MC_(eq_Error) ( VgRes res, const Error* e1, const Error* e2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error* extra1 = VG_(get_error_extra)(e1);
+ MC_Error* extra2 = VG_(get_error_extra)(e2);
+
+@@ -1035,12 +1071,14 @@ Bool MC_(eq_Error) ( VgRes res, const Er
+ static
+ Bool addr_is_in_MC_Chunk_default_REDZONE_SZB(MC_Chunk* mc, Addr a)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return VG_(addr_is_in_block)( a, mc->data, mc->szB,
+ MC_(Malloc_Redzone_SzB) );
+ }
+ static
+ Bool addr_is_in_MC_Chunk_with_REDZONE_SZB(MC_Chunk* mc, Addr a, SizeT rzB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return VG_(addr_is_in_block)( a, mc->data, mc->szB,
+ rzB );
+ }
+@@ -1055,6 +1093,7 @@ static Bool mempool_block_maybe_describe
+ putting the result in ai. */
+ static void describe_addr ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc;
+
+ tl_assert(Addr_Undescribed == ai->tag);
+@@ -1126,6 +1165,7 @@ static void describe_addr ( DiEpoch ep,
+
+ void MC_(pp_describe_addr) ( DiEpoch ep, Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ AddrInfo ai;
+
+ ai.tag = Addr_Undescribed;
+@@ -1139,6 +1179,7 @@ void MC_(pp_describe_addr) ( DiEpoch ep,
+ static void update_origin ( /*OUT*/ExeContext** origin_ec,
+ UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt ecu = otag & ~3;
+ *origin_ec = NULL;
+ if (VG_(is_plausible_ECU)(ecu)) {
+@@ -1149,6 +1190,7 @@ static void update_origin ( /*OUT*/ExeCo
+ /* Updates the copy with address info if necessary (but not for all errors). */
+ UInt MC_(update_Error_extra)( const Error* err )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Error* extra = VG_(get_error_extra)(err);
+ DiEpoch ep = VG_(get_ExeContext_epoch)(VG_(get_error_where)(err));
+
+@@ -1232,6 +1274,7 @@ UInt MC_(update_Error_extra)( const Erro
+ static Bool client_block_maybe_describe( Addr a,
+ /*OUT*/AddrInfo* ai )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord i;
+ CGenBlock* cgbs = NULL;
+ UWord cgb_used = 0;
+@@ -1264,6 +1307,7 @@ static Bool client_block_maybe_describe(
+ static Bool mempool_block_maybe_describe( Addr a, Bool is_metapool,
+ /*OUT*/AddrInfo* ai )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+ tl_assert( MC_(mempool_list) );
+
+@@ -1321,6 +1365,7 @@ typedef
+
+ Bool MC_(is_recognised_suppression) ( const HChar* name, Supp* su )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SuppKind skind;
+
+ if (VG_STREQ(name, "Param")) skind = ParamSupp;
+@@ -1375,6 +1420,7 @@ typedef struct {
+ Bool MC_(read_extra_suppression_info) ( Int fd, HChar** bufpp,
+ SizeT* nBufp, Int* lineno, Supp *su )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Bool eof;
+ Int i;
+
+@@ -1438,6 +1484,7 @@ Bool MC_(read_extra_suppression_info) (
+
+ Bool MC_(error_matches_suppression) ( const Error* err, const Supp* su )
+ {
++//VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int su_szB;
+ MC_Error* extra = VG_(get_error_extra)(err);
+ ErrorKind ekind = VG_(get_error_kind)(err);
+@@ -1524,6 +1571,7 @@ Bool MC_(error_matches_suppression) ( co
+
+ const HChar* MC_(get_error_name) ( const Error* err )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (VG_(get_error_kind)(err)) {
+ case Err_RegParam: return "Param";
+ case Err_MemParam: return "Param";
+@@ -1568,6 +1616,7 @@ const HChar* MC_(get_error_name) ( const
+ SizeT MC_(get_extra_suppression_info) ( const Error* err,
+ /*OUT*/HChar* buf, Int nBuf )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ ErrorKind ekind = VG_(get_error_kind)(err);
+ tl_assert(buf);
+ tl_assert(nBuf >= 1);
+@@ -1594,6 +1643,7 @@ SizeT MC_(get_extra_suppression_info) (
+ SizeT MC_(print_extra_suppression_use) ( const Supp *su,
+ /*OUT*/HChar *buf, Int nBuf )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(nBuf >= 1);
+
+ if (VG_(get_supp_kind)(su) == LeakSupp) {
+@@ -1614,6 +1664,7 @@ SizeT MC_(print_extra_suppression_use) (
+
+ void MC_(update_extra_suppression_use) ( const Error* err, const Supp* su)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (VG_(get_supp_kind)(su) == LeakSupp) {
+ MC_LeakSuppExtra *lse = (MC_LeakSuppExtra*) VG_(get_supp_extra) (su);
+ MC_Error* extra = VG_(get_error_extra)(err);
diff --git a/valgrind-netbsd/patches/patch-memcheck_mc__leakcheck.c b/valgrind-netbsd/patches/patch-memcheck_mc__leakcheck.c
new file mode 100644
index 0000000000..847c5ac3f7
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_mc__leakcheck.c
@@ -0,0 +1,303 @@
+$NetBSD$
+
+--- memcheck/mc_leakcheck.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ memcheck/mc_leakcheck.c
+@@ -251,6 +251,7 @@
+ // Compare the MC_Chunks by 'data' (i.e. the address of the block).
+ static Int compare_MC_Chunks(const void* n1, const void* n2)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
+ const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
+ if (mc1->data < mc2->data) return -1;
+@@ -266,6 +267,7 @@ Int find_chunk_for_OLD ( Addr ptr,
+ Int n_chunks )
+
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ Addr a_lo, a_hi;
+ PROF_EVENT(MCPE_FIND_CHUNK_FOR_OLD);
+@@ -290,6 +292,7 @@ Int find_chunk_for ( Addr ptr,
+ MC_Chunk** chunks,
+ Int n_chunks )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Addr a_mid_lo, a_mid_hi;
+ Int lo, mid, hi, retVal;
+ // VG_(printf)("find chunk for %p = ", ptr);
+@@ -336,6 +339,7 @@ Int find_chunk_for ( Addr ptr,
+ static MC_Chunk**
+ find_active_chunks(Int* pn_chunks)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ // Our goal is to construct a set of chunks that includes every
+ // mempool chunk, and every malloc region that *doesn't* contain a
+ // mempool chunk.
+@@ -526,6 +530,7 @@ static SizeT MC_(blocks_heuristically_re
+ static Bool
+ lc_is_a_chunk_ptr(Addr ptr, Int* pch_no, MC_Chunk** pch, LC_Extra** pex)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int ch_no;
+ MC_Chunk* ch;
+ LC_Extra* ex;
+@@ -566,6 +571,7 @@ lc_is_a_chunk_ptr(Addr ptr, Int* pch_no,
+ // Push a chunk (well, just its index) onto the mark stack.
+ static void lc_push(Int ch_no, MC_Chunk* ch)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (!lc_extras[ch_no].pending) {
+ if (0) {
+ VG_(printf)("pushing %#lx-%#lx\n", ch->data, ch->data + ch->szB);
+@@ -582,6 +588,7 @@ static void lc_push(Int ch_no, MC_Chunk*
+ // there isn't one.
+ static Bool lc_pop(Int* ret)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (-1 == lc_markstack_top) {
+ return False;
+ } else {
+@@ -596,6 +603,7 @@ static Bool lc_pop(Int* ret)
+
+ static const HChar* pp_heuristic(LeakCheckHeuristic h)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch(h) {
+ case LchNone: return "none";
+ case LchStdString: return "stdstring";
+@@ -616,6 +624,7 @@ static const HChar* pp_heuristic(LeakChe
+ // function.
+ static Bool aligned_ptr_above_page0_is_vtable_addr(Addr ptr)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ // ??? If performance problem:
+ // ??? maybe implement a cache (array indexed by ptr % primenr)
+ // ??? of "I am a vtable ptr" ???
+@@ -681,6 +690,7 @@ static Bool aligned_ptr_above_page0_is_v
+ // true if a is properly aligned and points to 64bits of valid memory
+ static Bool is_valid_aligned_ULong ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (sizeof(Word) == 8)
+ return MC_(is_valid_aligned_word)(a);
+
+@@ -706,6 +716,7 @@ static
+ void leak_search_fault_catcher ( Int sigNo, Addr addr,
+ const HChar *who, VG_MINIMAL_JMP_BUF(jmpbuf) )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ vki_sigset_t sigmask;
+
+ if (0)
+@@ -742,6 +753,7 @@ static VG_MINIMAL_JMP_BUF(heuristic_reac
+ static
+ void heuristic_reachedness_fault_catcher ( Int sigNo, Addr addr )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ leak_search_fault_catcher (sigNo, addr,
+ "heuristic_reachedness_fault_catcher",
+ heuristic_reachedness_jmpbuf);
+@@ -760,7 +772,7 @@ static LeakCheckHeuristic heuristic_reac
+ MC_Chunk *ch, LC_Extra *ex,
+ UInt heur_set)
+ {
+-
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ fault_catcher_t prev_catcher;
+
+ prev_catcher = VG_(set_fault_catcher)(heuristic_reachedness_fault_catcher);
+@@ -887,6 +899,7 @@ static LeakCheckHeuristic heuristic_reac
+ static void
+ lc_push_without_clique_if_a_chunk_ptr(Addr ptr, Bool is_prior_definite)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int ch_no;
+ MC_Chunk* ch;
+ LC_Extra* ex;
+@@ -945,6 +958,7 @@ lc_push_without_clique_if_a_chunk_ptr(Ad
+ static void
+ lc_push_if_a_chunk_ptr_register(ThreadId tid, const HChar* regname, Addr ptr)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ lc_push_without_clique_if_a_chunk_ptr(ptr, /*is_prior_definite*/True);
+ }
+
+@@ -954,6 +968,7 @@ lc_push_if_a_chunk_ptr_register(ThreadId
+ static void
+ lc_push_with_clique_if_a_chunk_ptr(Addr ptr, Int clique, Int cur_clique)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int ch_no;
+ MC_Chunk* ch;
+ LC_Extra* ex;
+@@ -996,6 +1011,7 @@ static void
+ lc_push_if_a_chunk_ptr(Addr ptr,
+ Int clique, Int cur_clique, Bool is_prior_definite)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (-1 == clique)
+ lc_push_without_clique_if_a_chunk_ptr(ptr, is_prior_definite);
+ else
+@@ -1007,6 +1023,7 @@ static VG_MINIMAL_JMP_BUF(lc_scan_memory
+ static
+ void lc_scan_memory_fault_catcher ( Int sigNo, Addr addr )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ leak_search_fault_catcher (sigNo, addr,
+ "lc_scan_memory_fault_catcher",
+ lc_scan_memory_jmpbuf);
+@@ -1046,6 +1063,7 @@ lc_scan_memory(Addr start, SizeT len, Bo
+ Int clique, Int cur_clique,
+ Addr searched, SizeT szB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* memory scan is based on the assumption that valid pointers are aligned
+ on a multiple of sizeof(Addr). So, we can (and must) skip the begin and
+ end portions of the block if they are not aligned on sizeof(Addr):
+@@ -1182,6 +1200,7 @@ lc_scan_memory(Addr start, SizeT len, Bo
+ // Process the mark stack until empty.
+ static void lc_process_markstack(Int clique)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int top = -1; // shut gcc up
+ Bool is_prior_definite;
+
+@@ -1199,6 +1218,7 @@ static void lc_process_markstack(Int cli
+
+ static Word cmp_LossRecordKey_LossRecord(const void* key, const void* elem)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const LossRecordKey* a = key;
+ const LossRecordKey* b = &(((const LossRecord*)elem)->key);
+
+@@ -1217,6 +1237,7 @@ static Word cmp_LossRecordKey_LossRecord
+
+ static Int cmp_LossRecords(const void* va, const void* vb)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const LossRecord* lr_a = *(const LossRecord *const *)va;
+ const LossRecord* lr_b = *(const LossRecord *const *)vb;
+ SizeT total_szB_a = lr_a->szB + lr_a->indirect_szB;
+@@ -1243,6 +1264,7 @@ static Int cmp_LossRecords(const void* v
+ // allocates or reallocates lr_array, and set its elements to the loss records
+ // contains in lr_table.
+ static UInt get_lr_array_from_lr_table(void) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt i, n_lossrecords;
+ LossRecord* lr;
+
+@@ -1268,6 +1290,7 @@ static void get_printing_rules(LeakCheck
+ Bool* count_as_error,
+ Bool* print_record)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ // Rules for printing:
+ // - We don't show suppressed loss records ever (and that's controlled
+ // within the error manager).
+@@ -1341,10 +1364,12 @@ typedef
+
+ static void MC_(XT_Leak_init)(void* xtl)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(memset) (xtl, 0, sizeof(XT_Leak));
+ }
+ static void MC_(XT_Leak_add) (void* to, const void* xtleak)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ XT_Leak* xto = to;
+ const XT_Leak* xtl = xtleak;
+
+@@ -1357,6 +1382,7 @@ static void MC_(XT_Leak_add) (void* to,
+ }
+ static void XT_insert_lr (LossRecord* lr)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ XT_Leak xtl;
+ Reachedness i = lr->key.state;
+
+@@ -1388,10 +1414,12 @@ static void XT_insert_lr (LossRecord* lr
+
+ static void MC_(XT_Leak_sub) (void* from, const void* xtleak)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(0); // Should not be called.
+ }
+ static const HChar* MC_(XT_Leak_img) (const void* xtleak)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ static XT_Leak zero;
+ static HChar buf[600];
+ UInt off = 0;
+@@ -1490,6 +1518,7 @@ static const HChar* XT_Leak_events =
+
+ static void print_results(ThreadId tid, LeakCheckParams* lcp)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i, n_lossrecords, start_lr_output_scan;
+ LossRecord* lr;
+ Bool is_suppressed;
+@@ -1782,6 +1811,7 @@ static void print_results(ThreadId tid,
+ // Printing stops when *remaining reaches 0.
+ static void print_clique (Int clique, UInt level, UInt *remaining)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int ind;
+ UInt i, n_lossrecords;
+
+@@ -1821,6 +1851,7 @@ Bool MC_(print_block_list) ( UInt loss_r
+ UInt max_blocks,
+ UInt heuristics)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt loss_record_nr;
+ UInt i, n_lossrecords;
+ LossRecord* lr;
+@@ -1918,6 +1949,7 @@ Bool MC_(print_block_list) ( UInt loss_r
+ // pointing inside [searched, searched+szB[.
+ static void scan_memory_root_set(Addr searched, SizeT szB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ Int n_seg_starts;
+ Addr* seg_starts = VG_(get_segment_starts)( SkFileC | SkAnonC | SkShmC,
+@@ -1973,6 +2005,7 @@ static void scan_memory_root_set(Addr se
+
+ static MC_Mempool *find_mp_of_chunk (MC_Chunk* mc_search)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+
+ tl_assert( MC_(mempool_list) );
+@@ -1996,8 +2029,9 @@ static MC_Mempool *find_mp_of_chunk (MC_
+
+ void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams* lcp)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i, j;
+-
++
+ tl_assert(lcp->mode != LC_Off);
+
+ // Verify some assertions which are used in lc_scan_memory.
+@@ -2234,6 +2268,7 @@ static SizeT searched_szB;
+ static void
+ search_address_in_GP_reg(ThreadId tid, const HChar* regname, Addr addr_in_reg)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (addr_in_reg >= searched_wpa
+ && addr_in_reg < searched_wpa + searched_szB) {
+ if (addr_in_reg == searched_wpa)
+@@ -2250,6 +2285,7 @@ search_address_in_GP_reg(ThreadId tid, c
+
+ void MC_(who_points_at) ( Addr address, SizeT szB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk** chunks;
+ Int n_chunks;
+ Int i;
+@@ -2284,4 +2320,3 @@ void MC_(who_points_at) ( Addr address,
+ /*--------------------------------------------------------------------*/
+ /*--- end ---*/
+ /*--------------------------------------------------------------------*/
+-
diff --git a/valgrind-netbsd/patches/patch-memcheck_mc__machine.c b/valgrind-netbsd/patches/patch-memcheck_mc__machine.c
new file mode 100644
index 0000000000..618d8376cc
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_mc__machine.c
@@ -0,0 +1,43 @@
+$NetBSD$
+
+--- memcheck/mc_machine.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ memcheck/mc_machine.c
+@@ -49,12 +49,14 @@
+
+ __attribute__((unused))
+ static inline Bool host_is_big_endian ( void ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt x = 0x11223344;
+ return 0x1122 == *(UShort*)(&x);
+ }
+
+ __attribute__((unused))
+ static inline Bool host_is_little_endian ( void ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt x = 0x11223344;
+ return 0x3344 == *(UShort*)(&x);
+ }
+@@ -96,6 +98,7 @@ static Int get_otrack_shadow_offset_wrk
+
+ Int MC_(get_otrack_shadow_offset) ( Int offset, Int szB )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int cand = get_otrack_shadow_offset_wrk( offset, szB );
+ if (cand == -1)
+ return cand;
+@@ -107,6 +110,7 @@ Int MC_(get_otrack_shadow_offset) ( Int
+
+ static Int get_otrack_shadow_offset_wrk ( Int offset, Int szB )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* -------------------- ppc64 -------------------- */
+
+ # if defined(VGA_ppc64be) || defined(VGA_ppc64le)
+@@ -1342,6 +1346,7 @@ static Int get_otrack_shadow_offset_wrk
+ */
+ IRType MC_(get_otrack_reg_array_equiv_int_type) ( IRRegArray* arr )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* -------------------- ppc64 -------------------- */
+ # if defined(VGA_ppc64be) || defined(VGA_ppc64le)
+ /* The redir stack. */
diff --git a/valgrind-netbsd/patches/patch-memcheck_mc__main.c b/valgrind-netbsd/patches/patch-memcheck_mc__main.c
new file mode 100644
index 0000000000..fc69b35a3e
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_mc__main.c
@@ -0,0 +1,1687 @@
+$NetBSD$
+
+--- memcheck/mc_main.c.orig 2018-09-30 04:41:00.000000000 +0000
++++ memcheck/mc_main.c
+@@ -55,7 +55,24 @@
+ #include "memcheck.h" /* for client requests */
+
+ /* Set to 1 to do a little more sanity checking */
+-#define VG_DEBUG_MEMORY 0
++#define VG_DEBUG_MEMORY 1
++
++void __sanitizer_cov_trace_pc(void);
++
++void
++__sanitizer_cov_trace_pc(void)
++{
++ static int recursive = 0;
++
++ if (recursive > 0)
++ return;
++
++ recursive++;
++
++ VG_(debugLog)(2, "KR", "%s() %s:%d addr=%lx\n", __func__, __FILE__, __LINE__, __builtin_return_address(0));
++
++ recursive--;
++}
+
+ #define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
+
+@@ -266,9 +283,11 @@ static void ocache_sarp_Clear_Origins (
+ #define INLINE inline __attribute__((always_inline))
+
+ static INLINE Addr start_of_this_sm ( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return (a & (~SM_MASK));
+ }
+ static INLINE Bool is_start_of_sm ( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return (start_of_this_sm(a) == a);
+ }
+
+@@ -302,6 +321,7 @@ static void update_SM_counts(SecMap* old
+ */
+ static SecMap* copy_for_writing ( SecMap* dist_sm )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SecMap* new_sm;
+ tl_assert(dist_sm == &sm_distinguished[0]
+ || dist_sm == &sm_distinguished[1]
+@@ -345,6 +365,7 @@ static Int max_secVBit_nodes = 0;
+
+ static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
+ else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
+ else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
+@@ -412,6 +433,7 @@ static OSet* auxmap_L2 = NULL;
+
+ static void init_auxmap_L1_L2 ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ for (i = 0; i < N_AUXMAP_L1; i++) {
+ auxmap_L1[i].base = 0;
+@@ -432,6 +454,7 @@ static void init_auxmap_L1_L2 ( void )
+
+ static const HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Word i, j;
+ /* On a 32-bit platform, the L2 and L1 tables should
+ both remain empty forever.
+@@ -514,6 +537,7 @@ static const HChar* check_auxmap_L1_L2_s
+
+ static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Word i;
+ tl_assert(ent);
+ tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
+@@ -525,6 +549,7 @@ static void insert_into_auxmap_L1_at ( W
+
+ static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ AuxMapEnt key;
+ AuxMapEnt* res;
+ Word i;
+@@ -585,6 +610,7 @@ static INLINE AuxMapEnt* maybe_find_in_a
+
+ static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ AuxMapEnt *nyu, *res;
+
+ /* First see if we already have it. */
+@@ -612,12 +638,14 @@ static AuxMapEnt* find_or_alloc_in_auxma
+
+ static INLINE UWord get_primary_map_low_offset ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord pm_off = a >> 16;
+ return pm_off;
+ }
+
+ static INLINE SecMap** get_secmap_low_ptr ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord pm_off = a >> 16;
+ # if VG_DEBUG_MEMORY >= 1
+ tl_assert(pm_off < N_PRIMARY_MAP);
+@@ -627,12 +655,14 @@ static INLINE SecMap** get_secmap_low_pt
+
+ static INLINE SecMap** get_secmap_high_ptr ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ AuxMapEnt* am = find_or_alloc_in_auxmap(a);
+ return &am->sm;
+ }
+
+ static INLINE SecMap** get_secmap_ptr ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return ( a <= MAX_PRIMARY_ADDRESS
+ ? get_secmap_low_ptr(a)
+ : get_secmap_high_ptr(a));
+@@ -640,16 +670,19 @@ static INLINE SecMap** get_secmap_ptr (
+
+ static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return *get_secmap_low_ptr(a);
+ }
+
+ static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return *get_secmap_high_ptr(a);
+ }
+
+ static INLINE SecMap* get_secmap_for_writing_low(Addr a)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SecMap** p = get_secmap_low_ptr(a);
+ if (UNLIKELY(is_distinguished_sm(*p)))
+ *p = copy_for_writing(*p);
+@@ -671,6 +704,7 @@ static INLINE SecMap* get_secmap_for_wri
+ */
+ static INLINE SecMap* get_secmap_for_reading ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return ( a <= MAX_PRIMARY_ADDRESS
+ ? get_secmap_for_reading_low (a)
+ : get_secmap_for_reading_high(a) );
+@@ -685,6 +719,7 @@ static INLINE SecMap* get_secmap_for_rea
+ */
+ static INLINE SecMap* get_secmap_for_writing ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return ( a <= MAX_PRIMARY_ADDRESS
+ ? get_secmap_for_writing_low (a)
+ : get_secmap_for_writing_high(a) );
+@@ -696,6 +731,7 @@ static INLINE SecMap* get_secmap_for_wri
+ */
+ static SecMap* maybe_get_secmap_for ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (a <= MAX_PRIMARY_ADDRESS) {
+ return get_secmap_for_reading_low(a);
+ } else {
+@@ -709,6 +745,7 @@ static SecMap* maybe_get_secmap_for ( Ad
+ static INLINE
+ void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
+ *vabits8 &= ~(0x3 << shift); // mask out the two old bits
+ *vabits8 |= (vabits2 << shift); // mask in the two new bits
+@@ -717,6 +754,7 @@ void insert_vabits2_into_vabits8 ( Addr
+ static INLINE
+ void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt shift;
+ tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
+ shift = (a & 2) << 1; // shift by 0 or 4
+@@ -727,6 +765,7 @@ void insert_vabits4_into_vabits8 ( Addr
+ static INLINE
+ UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
+ vabits8 >>= shift; // shift the two bits to the bottom
+ return 0x3 & vabits8; // mask out the rest
+@@ -735,6 +774,7 @@ UChar extract_vabits2_from_vabits8 ( Add
+ static INLINE
+ UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt shift;
+ tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
+ shift = (a & 2) << 1; // shift by 0 or 4
+@@ -753,6 +793,7 @@ UChar extract_vabits4_from_vabits8 ( Add
+ static INLINE
+ void set_vabits2 ( Addr a, UChar vabits2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SecMap* sm = get_secmap_for_writing(a);
+ UWord sm_off = SM_OFF(a);
+ insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
+@@ -761,6 +802,7 @@ void set_vabits2 ( Addr a, UChar vabits2
+ static INLINE
+ UChar get_vabits2 ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SecMap* sm = get_secmap_for_reading(a);
+ UWord sm_off = SM_OFF(a);
+ UChar vabits8 = sm->vabits8[sm_off];
+@@ -774,6 +816,7 @@ UChar get_vabits2 ( Addr a )
+ static INLINE
+ UChar get_vabits8_for_aligned_word32 ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SecMap* sm = get_secmap_for_reading(a);
+ UWord sm_off = SM_OFF(a);
+ UChar vabits8 = sm->vabits8[sm_off];
+@@ -783,6 +826,7 @@ UChar get_vabits8_for_aligned_word32 ( A
+ static INLINE
+ void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SecMap* sm = get_secmap_for_writing(a);
+ UWord sm_off = SM_OFF(a);
+ sm->vabits8[sm_off] = vabits8;
+@@ -797,6 +841,7 @@ static void set_sec_vbits8(Addr a, UWor
+ static INLINE
+ Bool set_vbits8 ( Addr a, UChar vbits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Bool ok = True;
+ UChar vabits2 = get_vabits2(a);
+ if ( VA_BITS2_NOACCESS != vabits2 ) {
+@@ -823,6 +868,7 @@ Bool set_vbits8 ( Addr a, UChar vbits8 )
+ static INLINE
+ Bool get_vbits8 ( Addr a, UChar* vbits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Bool ok = True;
+ UChar vabits2 = get_vabits2(a);
+
+@@ -941,6 +987,7 @@ typedef
+
+ static OSet* createSecVBitTable(void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OSet* newSecVBitTable;
+ newSecVBitTable = VG_(OSetGen_Create_With_Pool)
+ ( offsetof(SecVBitNode, a),
+@@ -954,6 +1001,7 @@ static OSet* createSecVBitTable(void)
+
+ static void gcSecVBitTable(void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OSet* secVBitTable2;
+ SecVBitNode* n;
+ Int i, n_nodes = 0, n_survivors = 0;
+@@ -1018,6 +1066,7 @@ static void gcSecVBitTable(void)
+
+ static UWord get_sec_vbits8(Addr a)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
+ Int amod = a % BYTES_PER_SEC_VBIT_NODE;
+ SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
+@@ -1032,6 +1081,7 @@ static UWord get_sec_vbits8(Addr a)
+
+ static void set_sec_vbits8(Addr a, UWord vbits8)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
+ Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
+ SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
+@@ -1073,6 +1123,7 @@ static void set_sec_vbits8(Addr a, UWord
+ in a wordszB-sized word, given the specified endianness. */
+ static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
+ UWord byteno ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return bigendian ? (wordszB-1-byteno) : byteno;
+ }
+
+@@ -1093,6 +1144,7 @@ typedef
+
+ static const HChar* showIARKind ( IARKind iark )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (iark) {
+ case IAR_INVALID: return "INVALID";
+ case IAR_NotIgnored: return "NotIgnored";
+@@ -1107,6 +1159,7 @@ static RangeMap* gIgnoredAddressRanges =
+
+ static void init_gIgnoredAddressRanges ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (LIKELY(gIgnoredAddressRanges != NULL))
+ return;
+ gIgnoredAddressRanges = VG_(newRangeMap)( VG_(malloc), "mc.igIAR.1",
+@@ -1115,6 +1168,7 @@ static void init_gIgnoredAddressRanges (
+
+ Bool MC_(in_ignored_range) ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (LIKELY(gIgnoredAddressRanges == NULL))
+ return False;
+ UWord how = IAR_INVALID;
+@@ -1134,6 +1188,7 @@ Bool MC_(in_ignored_range) ( Addr a )
+
+ Bool MC_(in_ignored_range_below_sp) ( Addr sp, Addr a, UInt szB )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (LIKELY(!MC_(clo_ignore_range_below_sp)))
+ return False;
+ tl_assert(szB >= 1 && szB <= 32);
+@@ -1159,6 +1214,7 @@ Bool MC_(in_ignored_range_below_sp) ( Ad
+
+ static Bool parse_Addr_pair ( const HChar** ppc, Addr* result1, Addr* result2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Bool ok = VG_(parse_Addr) (ppc, result1);
+ if (!ok)
+ return False;
+@@ -1176,6 +1232,7 @@ static Bool parse_Addr_pair ( const HCha
+
+ static Bool parse_UInt_pair ( const HChar** ppc, UInt* result1, UInt* result2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Bool ok = VG_(parse_UInt) (ppc, result1);
+ if (!ok)
+ return False;
+@@ -1193,6 +1250,7 @@ static Bool parse_UInt_pair ( const HCha
+ ranges. */
+ static Bool parse_ignore_ranges ( const HChar* str0 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ init_gIgnoredAddressRanges();
+ const HChar* str = str0;
+ const HChar** ppc = &str;
+@@ -1218,6 +1276,7 @@ static Bool parse_ignore_ranges ( const
+ /* Add or remove [start, +len) from the set of ignored ranges. */
+ static Bool modify_ignore_ranges ( Bool addRange, Addr start, Addr len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ init_gIgnoredAddressRanges();
+ const Bool verbose = (VG_(clo_verbosity) > 1);
+ if (len == 0) {
+@@ -1261,6 +1320,7 @@ __attribute__((noinline))
+ void mc_LOADV_128_or_256_slow ( /*OUT*/ULong* res,
+ Addr a, SizeT nBits, Bool bigendian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ ULong pessim[4]; /* only used when p-l-ok=yes */
+ SSizeT szB = nBits / 8;
+ SSizeT szL = szB / 8; /* Size in Longs (64-bit units) */
+@@ -1391,6 +1451,8 @@ VG_REGPARM(3) /* make sure we're using a
+ this function may get called from hand written assembly. */
+ ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_LOADVN_SLOW);
+
+ /* ------------ BEGIN semi-fast cases ------------ */
+@@ -1474,7 +1536,9 @@ ULong mc_LOADVn_slow ( Addr a, SizeT nBi
+ /* If there's no possibility of getting a partial-loads-ok
+ exemption, report the error and quit. */
+ if (!MC_(clo_partial_loads_ok)) {
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return vbits64;
+ }
+
+@@ -1550,7 +1614,9 @@ ULong mc_LOADVn_slow ( Addr a, SizeT nBi
+
+ /* Exemption doesn't apply. Flag an addressing error in the normal
+ way. */
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+
+ return vbits64;
+ }
+@@ -1560,6 +1626,7 @@ static
+ __attribute__((noinline))
+ void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT szB = nBits / 8;
+ SizeT i, n_addrs_bad = 0;
+ UChar vbits8;
+@@ -1658,6 +1725,7 @@ void mc_STOREVn_slow ( Addr a, SizeT nBi
+ static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
+ UWord dsm_num )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord sm_off, sm_off16;
+ UWord vabits2 = vabits16 & 0x3;
+ SizeT lenA, lenB, len_to_next_secmap;
+@@ -1891,6 +1959,7 @@ static void set_address_range_perms ( Ad
+
+ void MC_(make_mem_noaccess) ( Addr a, SizeT len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_MEM_NOACCESS);
+ DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
+ set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
+@@ -1900,6 +1969,7 @@ void MC_(make_mem_noaccess) ( Addr a, Si
+
+ static void make_mem_undefined ( Addr a, SizeT len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_MEM_UNDEFINED);
+ DEBUG("make_mem_undefined(%p, %lu)\n", a, len);
+ set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
+@@ -1907,6 +1977,7 @@ static void make_mem_undefined ( Addr a,
+
+ void MC_(make_mem_undefined_w_otag) ( Addr a, SizeT len, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_MEM_UNDEFINED_W_OTAG);
+ DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
+ set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
+@@ -1918,6 +1989,7 @@ static
+ void make_mem_undefined_w_tid_and_okind ( Addr a, SizeT len,
+ ThreadId tid, UInt okind )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt ecu;
+ ExeContext* here;
+ /* VG_(record_ExeContext) checks for validity of tid, and asserts
+@@ -1933,17 +2005,20 @@ void make_mem_undefined_w_tid_and_okind
+ static
+ void mc_new_mem_w_tid_make_ECU ( Addr a, SizeT len, ThreadId tid )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ make_mem_undefined_w_tid_and_okind ( a, len, tid, MC_OKIND_UNKNOWN );
+ }
+
+ static
+ void mc_new_mem_w_tid_no_ECU ( Addr a, SizeT len, ThreadId tid )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(make_mem_undefined_w_otag) ( a, len, MC_OKIND_UNKNOWN );
+ }
+
+ void MC_(make_mem_defined) ( Addr a, SizeT len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_MEM_DEFINED);
+ DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
+ set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
+@@ -1954,6 +2029,7 @@ void MC_(make_mem_defined) ( Addr a, Siz
+ __attribute__((unused))
+ static void make_mem_defined_w_tid ( Addr a, SizeT len, ThreadId tid )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(make_mem_defined)(a, len);
+ }
+
+@@ -1963,6 +2039,7 @@ static void make_mem_defined_w_tid ( Add
+ addressibility. Low-performance implementation. */
+ static void make_mem_defined_if_addressable ( Addr a, SizeT len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ UChar vabits2;
+ DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
+@@ -1980,6 +2057,7 @@ static void make_mem_defined_if_addressa
+ /* Similarly (needed for mprotect handling ..) */
+ static void make_mem_defined_if_noaccess ( Addr a, SizeT len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ UChar vabits2;
+ DEBUG("make_mem_defined_if_noaccess(%p, %llu)\n", a, (ULong)len);
+@@ -1999,6 +2077,7 @@ static void make_mem_defined_if_noaccess
+
+ void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i, j;
+ UChar vabits2, vabits8;
+ Bool aligned, nooverlap;
+@@ -2408,9 +2487,11 @@ static UWord stats__ocacheL2_n_nodes_max
+ #define OC_W32S_PER_LINE (1 << (OC_BITS_PER_LINE - 2))
+
+ static INLINE UWord oc_line_offset ( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return (a >> 2) & (OC_W32S_PER_LINE - 1);
+ }
+ static INLINE Bool is_valid_oc_tag ( Addr tag ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return 0 == (tag & ((1 << OC_BITS_PER_LINE) - 1));
+ }
+
+@@ -2440,6 +2521,7 @@ typedef
+ and 'z' if all the represented tags are zero. */
+ static UChar classify_OCacheLine ( OCacheLine* line )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord i;
+ if (line->tag == 1/*invalid*/)
+ return 'e'; /* EMPTY */
+@@ -2470,6 +2552,7 @@ static UWord ocacheL1_event_ctr = 0;
+ static void init_ocacheL2 ( void ); /* fwds */
+ static void init_OCache ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord line, set;
+ tl_assert(MC_(clo_mc_level) >= 3);
+ tl_assert(ocacheL1 == NULL);
+@@ -2489,6 +2572,7 @@ static void init_OCache ( void )
+
+ static void moveLineForwards ( OCacheSet* set, UWord lineno )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine tmp;
+ stats_ocacheL1_movefwds++;
+ tl_assert(lineno > 0 && lineno < OC_LINES_PER_SET);
+@@ -2498,6 +2582,7 @@ static void moveLineForwards ( OCacheSet
+ }
+
+ static void zeroise_OCacheLine ( OCacheLine* line, Addr tag ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord i;
+ for (i = 0; i < OC_W32S_PER_LINE; i++) {
+ line->w32[i] = 0; /* NO ORIGIN */
+@@ -2512,9 +2597,11 @@ static void zeroise_OCacheLine ( OCacheL
+ static OSet* ocacheL2 = NULL;
+
+ static void* ocacheL2_malloc ( const HChar* cc, SizeT szB ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return VG_(malloc)(cc, szB);
+ }
+ static void ocacheL2_free ( void* v ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(free)( v );
+ }
+
+@@ -2523,6 +2610,7 @@ static UWord stats__ocacheL2_n_nodes = 0
+
+ static void init_ocacheL2 ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(!ocacheL2);
+ tl_assert(sizeof(Word) == sizeof(Addr)); /* since OCacheLine.tag :: Addr */
+ tl_assert(0 == offsetof(OCacheLine,tag));
+@@ -2536,6 +2624,7 @@ static void init_ocacheL2 ( void )
+ /* Find line with the given tag in the tree, or NULL if not found. */
+ static OCacheLine* ocacheL2_find_tag ( Addr tag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ tl_assert(is_valid_oc_tag(tag));
+ stats__ocacheL2_refs++;
+@@ -2547,6 +2636,7 @@ static OCacheLine* ocacheL2_find_tag ( A
+ free up the associated memory. */
+ static void ocacheL2_del_tag ( Addr tag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ tl_assert(is_valid_oc_tag(tag));
+ stats__ocacheL2_refs++;
+@@ -2562,6 +2652,7 @@ static void ocacheL2_del_tag ( Addr tag
+ present. */
+ static void ocacheL2_add_line ( OCacheLine* line )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* copy;
+ tl_assert(is_valid_oc_tag(line->tag));
+ copy = VG_(OSetGen_AllocNode)( ocacheL2, sizeof(OCacheLine) );
+@@ -2579,6 +2670,7 @@ static void ocacheL2_add_line ( OCacheLi
+ __attribute__((noinline))
+ static OCacheLine* find_OCacheLine_SLOW ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine *victim, *inL2;
+ UChar c;
+ UWord line;
+@@ -2664,6 +2756,7 @@ static OCacheLine* find_OCacheLine_SLOW
+
+ static INLINE OCacheLine* find_OCacheLine ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1);
+ UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1);
+ UWord tag = a & tagmask;
+@@ -2684,6 +2777,7 @@ static INLINE OCacheLine* find_OCacheLin
+
+ static INLINE void set_aligned_word64_Origin_to_undef ( Addr a, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ //// BEGIN inlined, specialised version of MC_(helperc_b_store8)
+ //// Set the origins for a+0 .. a+7
+ { OCacheLine* line;
+@@ -2713,6 +2807,7 @@ static INLINE void set_aligned_word64_Or
+
+ static INLINE void make_aligned_word32_undefined ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_UNDEFINED);
+
+ #ifndef PERF_FAST_STACK2
+@@ -2738,6 +2833,7 @@ static INLINE void make_aligned_word32_u
+ static INLINE
+ void make_aligned_word32_undefined_w_otag ( Addr a, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ make_aligned_word32_undefined(a);
+ //// BEGIN inlined, specialised version of MC_(helperc_b_store4)
+ //// Set the origins for a+0 .. a+3
+@@ -2756,6 +2852,7 @@ void make_aligned_word32_undefined_w_ota
+ static INLINE
+ void make_aligned_word32_noaccess ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD32_NOACCESS);
+
+ #ifndef PERF_FAST_STACK2
+@@ -2797,6 +2894,7 @@ void make_aligned_word32_noaccess ( Addr
+
+ static INLINE void make_aligned_word64_undefined ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_UNDEFINED);
+
+ #ifndef PERF_FAST_STACK2
+@@ -2822,6 +2920,7 @@ static INLINE void make_aligned_word64_u
+ static INLINE
+ void make_aligned_word64_undefined_w_otag ( Addr a, UInt otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ make_aligned_word64_undefined(a);
+ //// BEGIN inlined, specialised version of MC_(helperc_b_store8)
+ //// Set the origins for a+0 .. a+7
+@@ -2841,6 +2940,7 @@ void make_aligned_word64_undefined_w_ota
+ static INLINE
+ void make_aligned_word64_noaccess ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_ALIGNED_WORD64_NOACCESS);
+
+ #ifndef PERF_FAST_STACK2
+@@ -2892,6 +2992,7 @@ void make_aligned_word64_noaccess ( Addr
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_4_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_4);
+ if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -2904,6 +3005,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_4);
+ if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+@@ -2915,6 +3017,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_4);
+ if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
+@@ -2928,6 +3031,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_8_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_8);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -2943,6 +3047,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_8);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+@@ -2957,6 +3062,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_8);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
+@@ -2973,6 +3079,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_12_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_12);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -2992,6 +3099,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_12);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+@@ -3010,6 +3118,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_12);
+ /* Note the -12 in the test */
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
+@@ -3033,6 +3142,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_16_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_16);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -3053,6 +3163,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_16);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
+@@ -3072,6 +3183,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_16);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ /* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
+@@ -3092,6 +3204,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_32_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_32);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -3116,6 +3229,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_32);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ /* Straightforward */
+@@ -3139,6 +3253,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_32);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ /* Straightforward */
+@@ -3164,6 +3279,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_112_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_112);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -3189,6 +3305,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_112);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+@@ -3213,6 +3330,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_112);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
+@@ -3239,6 +3357,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_128_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_128);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -3266,6 +3385,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_128);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+@@ -3292,6 +3412,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_128);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
+@@ -3320,6 +3441,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_144_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_144);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -3349,6 +3471,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_144);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+@@ -3377,6 +3500,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_144);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
+@@ -3407,6 +3531,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(2) mc_new_mem_stack_160_w_ECU(Addr new_SP, UInt ecu)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK_160);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+@@ -3438,6 +3563,7 @@ static void VG_REGPARM(2) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK_160);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
+@@ -3468,6 +3594,7 @@ static void VG_REGPARM(1) mc_new_mem_sta
+ MAYBE_USED
+ static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK_160);
+ if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
+ make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
+@@ -3499,6 +3626,7 @@ static void VG_REGPARM(1) mc_die_mem_sta
+
+ static void mc_new_mem_stack_w_ECU ( Addr a, SizeT len, UInt ecu )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = ecu | MC_OKIND_STACK;
+ PROF_EVENT(MCPE_NEW_MEM_STACK);
+ MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + a, len, otag );
+@@ -3506,12 +3634,14 @@ static void mc_new_mem_stack_w_ECU ( Add
+
+ static void mc_new_mem_stack ( Addr a, SizeT len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_NEW_MEM_STACK);
+ make_mem_undefined ( -VG_STACK_REDZONE_SZB + a, len );
+ }
+
+ static void mc_die_mem_stack ( Addr a, SizeT len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_DIE_MEM_STACK);
+ MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
+ }
+@@ -3564,6 +3694,7 @@ static WCacheEnt nia_to_ecu_cache[N_NIA_
+
+ static void init_nia_to_ecu_cache ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord i;
+ Addr zero_addr = 0;
+ ExeContext* zero_ec;
+@@ -3585,6 +3716,7 @@ static void init_nia_to_ecu_cache ( void
+
+ static inline UInt convert_nia_to_ecu ( Addr nia )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord i;
+ UInt ecu;
+ ExeContext* ec;
+@@ -3628,6 +3760,7 @@ static inline UInt convert_nia_to_ecu (
+ VG_REGPARM(3)
+ void MC_(helperc_MAKE_STACK_UNINIT_w_o) ( Addr base, UWord len, Addr nia )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_STACK_UNINIT_W_O);
+ if (0)
+ VG_(printf)("helperc_MAKE_STACK_UNINIT_w_o (%#lx,%lu,nia=%#lx)\n",
+@@ -3830,6 +3963,7 @@ void MC_(helperc_MAKE_STACK_UNINIT_w_o)
+ VG_REGPARM(2)
+ void MC_(helperc_MAKE_STACK_UNINIT_no_o) ( Addr base, UWord len )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_STACK_UNINIT_NO_O);
+ if (0)
+ VG_(printf)("helperc_MAKE_STACK_UNINIT_no_o (%#lx,%lu)\n",
+@@ -3975,6 +4109,7 @@ void MC_(helperc_MAKE_STACK_UNINIT_no_o)
+ VG_REGPARM(1)
+ void MC_(helperc_MAKE_STACK_UNINIT_128_no_o) ( Addr base )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_MAKE_STACK_UNINIT_128_NO_O);
+ if (0)
+ VG_(printf)("helperc_MAKE_STACK_UNINIT_128_no_o (%#lx)\n", base );
+@@ -4120,6 +4255,7 @@ typedef
+ similar. */
+ Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ UWord vabits2;
+
+@@ -4139,6 +4275,7 @@ Bool MC_(check_mem_is_noaccess) ( Addr a
+ static Bool is_mem_addressable ( Addr a, SizeT len,
+ /*OUT*/Addr* bad_addr )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ UWord vabits2;
+
+@@ -4159,6 +4296,7 @@ static MC_ReadResult is_mem_defined ( Ad
+ /*OUT*/Addr* bad_addr,
+ /*OUT*/UInt* otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ UWord vabits2;
+
+@@ -4211,6 +4349,7 @@ static void is_mem_defined_comprehensive
+ /*OUT*/Addr* bad_addrA /* if so where? */
+ )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ UWord vabits2;
+ Bool already_saw_errV = False;
+@@ -4258,6 +4397,7 @@ static void is_mem_defined_comprehensive
+
+ static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr, UInt* otag )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord vabits2;
+
+ PROF_EVENT(MCPE_IS_DEFINED_ASCIIZ);
+@@ -4302,6 +4442,7 @@ static
+ void check_mem_is_addressable ( CorePart part, ThreadId tid, const HChar* s,
+ Addr base, SizeT size )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Addr bad_addr;
+ Bool ok = is_mem_addressable ( base, size, &bad_addr );
+
+@@ -4326,6 +4467,7 @@ static
+ void check_mem_is_defined ( CorePart part, ThreadId tid, const HChar* s,
+ Addr base, SizeT size )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt otag = 0;
+ Addr bad_addr;
+ MC_ReadResult res = is_mem_defined ( base, size, &bad_addr, &otag );
+@@ -4359,6 +4501,7 @@ static
+ void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
+ const HChar* s, Addr str )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_ReadResult res;
+ Addr bad_addr = 0; // shut GCC up
+ UInt otag = 0;
+@@ -4430,6 +4573,7 @@ static
+ void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx,
+ ULong di_handle )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (rr || ww || xx) {
+ /* (2) mmap/mprotect other -> defined */
+ MC_(make_mem_defined)(a, len);
+@@ -4442,6 +4586,7 @@ void mc_new_mem_mmap ( Addr a, SizeT len
+ static
+ void mc_new_mem_mprotect ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (rr || ww || xx) {
+ /* (4) mprotect other -> change any "noaccess" to "defined" */
+ make_mem_defined_if_noaccess(a, len);
+@@ -4456,6 +4601,7 @@ static
+ void mc_new_mem_startup( Addr a, SizeT len,
+ Bool rr, Bool ww, Bool xx, ULong di_handle )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ // Because code is defined, initialised variables get put in the data
+ // segment and are defined, and uninitialised variables get put in the
+ // bss segment and are auto-zeroed (and so defined).
+@@ -4479,6 +4625,7 @@ void mc_new_mem_startup( Addr a, SizeT l
+ static
+ void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(make_mem_defined)(a, len);
+ }
+
+@@ -4493,6 +4640,7 @@ void mc_post_mem_write(CorePart part, Th
+ static UInt mb_get_origin_for_guest_offset ( ThreadId tid,
+ Int offset, SizeT size )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int sh2off;
+ UInt area[3];
+ UInt otag;
+@@ -4519,6 +4667,7 @@ static UInt mb_get_origin_for_guest_offs
+ static void mc_post_reg_write ( CorePart part, ThreadId tid,
+ PtrdiffT offset, SizeT size)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ # define MAX_REG_WRITE_SIZE 1744
+ UChar area[MAX_REG_WRITE_SIZE];
+ tl_assert(size <= MAX_REG_WRITE_SIZE);
+@@ -4531,6 +4680,7 @@ static
+ void mc_post_reg_write_clientcall ( ThreadId tid,
+ PtrdiffT offset, SizeT size, Addr f)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_post_reg_write(/*dummy*/0, tid, offset, size);
+ }
+
+@@ -4541,6 +4691,7 @@ void mc_post_reg_write_clientcall ( Thre
+ static void mc_pre_reg_read ( CorePart part, ThreadId tid, const HChar* s,
+ PtrdiffT offset, SizeT size)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ Bool bad;
+ UInt otag;
+@@ -4575,6 +4726,7 @@ static void mc_pre_reg_read ( CorePart p
+ static void mc_copy_mem_to_reg ( CorePart part, ThreadId tid, Addr a,
+ PtrdiffT guest_state_offset, SizeT size )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ UChar vbits8;
+ Int offset;
+@@ -4625,6 +4777,7 @@ static void mc_copy_reg_to_mem ( CorePar
+ PtrdiffT guest_state_offset, Addr a,
+ SizeT size )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ UChar vbits8;
+ Int offset;
+@@ -4800,6 +4953,7 @@ static INLINE
+ void mc_LOADV_128_or_256 ( /*OUT*/ULong* res,
+ Addr a, SizeT nBits, Bool isBigEndian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_LOADV_128_OR_256);
+
+ #ifndef PERF_FAST_LOADV
+@@ -4846,19 +5000,23 @@ void mc_LOADV_128_or_256 ( /*OUT*/ULong*
+
+ VG_REGPARM(2) void MC_(helperc_LOADV256be) ( /*OUT*/V256* res, Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_LOADV_128_or_256(&res->w64[0], a, 256, True);
+ }
+ VG_REGPARM(2) void MC_(helperc_LOADV256le) ( /*OUT*/V256* res, Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_LOADV_128_or_256(&res->w64[0], a, 256, False);
+ }
+
+ VG_REGPARM(2) void MC_(helperc_LOADV128be) ( /*OUT*/V128* res, Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_LOADV_128_or_256(&res->w64[0], a, 128, True);
+ }
+ VG_REGPARM(2) void MC_(helperc_LOADV128le) ( /*OUT*/V128* res, Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_LOADV_128_or_256(&res->w64[0], a, 128, False);
+ }
+
+@@ -4869,6 +5027,7 @@ VG_REGPARM(2) void MC_(helperc_LOADV128l
+ static INLINE
+ ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_LOADV64);
+
+ #ifndef PERF_FAST_LOADV
+@@ -4906,6 +5065,7 @@ ULong mc_LOADV64 ( Addr a, Bool isBigEnd
+ // Generic for all platforms
+ VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return mc_LOADV64(a, True);
+ }
+
+@@ -4922,6 +5082,7 @@ VG_REGPARM(1) ULong MC_(helperc_LOADV64b
+ // Generic for all platforms except {arm32,x86}-linux and x86-solaris
+ VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return mc_LOADV64(a, False);
+ }
+ #endif
+@@ -4933,6 +5094,7 @@ VG_REGPARM(1) ULong MC_(helperc_LOADV64l
+ static INLINE
+ void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_STOREV64);
+
+ #ifndef PERF_FAST_STOREV
+@@ -4989,10 +5151,12 @@ void mc_STOREV64 ( Addr a, ULong vbits64
+
+ VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_STOREV64(a, vbits64, True);
+ }
+ VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_STOREV64(a, vbits64, False);
+ }
+
+@@ -5003,6 +5167,7 @@ VG_REGPARM(1) void MC_(helperc_STOREV64l
+ static INLINE
+ UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_LOADV32);
+
+ #ifndef PERF_FAST_LOADV
+@@ -5042,6 +5207,7 @@ UWord mc_LOADV32 ( Addr a, Bool isBigEnd
+ // Generic for all platforms
+ VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return mc_LOADV32(a, True);
+ }
+
+@@ -5058,7 +5224,10 @@ VG_REGPARM(1) UWord MC_(helperc_LOADV32b
+ // Generic for all platforms except {arm32,x86}-linux and x86-solaris
+ VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
+ {
+- return mc_LOADV32(a, False);
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
++ UWord return1 = mc_LOADV32(a, False);
++VG_(debugLog)(2, "initimg", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
++ return return1;
+ }
+ #endif
+
+@@ -5069,6 +5238,7 @@ VG_REGPARM(1) UWord MC_(helperc_LOADV32l
+ static INLINE
+ void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_STOREV32);
+
+ #ifndef PERF_FAST_STOREV
+@@ -5119,15 +5289,20 @@ void mc_STOREV32 ( Addr a, UWord vbits32
+ mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
+ }
+ #endif
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_STOREV32(a, vbits32, True);
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+ VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_STOREV32(a, vbits32, False);
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ /*------------------------------------------------------------*/
+@@ -5137,6 +5312,7 @@ VG_REGPARM(2) void MC_(helperc_STOREV32l
+ static INLINE
+ UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_LOADV16);
+
+ #ifndef PERF_FAST_LOADV
+@@ -5173,11 +5349,13 @@ UWord mc_LOADV16 ( Addr a, Bool isBigEnd
+ }
+ }
+ #endif
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ // Generic for all platforms
+ VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return mc_LOADV16(a, True);
+ }
+
+@@ -5295,6 +5473,7 @@ VG_REGPARM(1) UWord MC_(helperc_LOADV16l
+ static INLINE
+ Bool accessible_vabits4_in_vabits8 ( Addr a, UChar vabits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt shift;
+ tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
+ shift = (a & 2) << 1; // shift by 0 or 4
+@@ -5307,6 +5486,7 @@ Bool accessible_vabits4_in_vabits8 ( Add
+ static INLINE
+ void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_STOREV16);
+
+ #ifndef PERF_FAST_STOREV
+@@ -5365,10 +5545,12 @@ void mc_STOREV16 ( Addr a, UWord vbits16
+
+ VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_STOREV16(a, vbits16, True);
+ }
+ VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ mc_STOREV16(a, vbits16, False);
+ }
+
+@@ -5475,6 +5657,7 @@ __asm__(
+ VG_REGPARM(1)
+ UWord MC_(helperc_LOADV8) ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_LOADV8);
+
+ #ifndef PERF_FAST_LOADV
+@@ -5521,6 +5704,7 @@ UWord MC_(helperc_LOADV8) ( Addr a )
+ VG_REGPARM(2)
+ void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ PROF_EVENT(MCPE_STOREV8);
+
+ #ifndef PERF_FAST_STOREV
+@@ -5624,54 +5808,74 @@ void MC_(helperc_STOREV8) ( Addr a, UWor
+ /* Call these ones when an origin is available ... */
+ VG_REGPARM(1)
+ void MC_(helperc_value_check0_fail_w_o) ( UWord origin ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_cond_error) ( VG_(get_running_tid)(), (UInt)origin );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(1)
+ void MC_(helperc_value_check1_fail_w_o) ( UWord origin ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_value_error) ( VG_(get_running_tid)(), 1, (UInt)origin );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(1)
+ void MC_(helperc_value_check4_fail_w_o) ( UWord origin ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_value_error) ( VG_(get_running_tid)(), 4, (UInt)origin );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(1)
+ void MC_(helperc_value_check8_fail_w_o) ( UWord origin ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_value_error) ( VG_(get_running_tid)(), 8, (UInt)origin );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(2)
+ void MC_(helperc_value_checkN_fail_w_o) ( HWord sz, UWord origin ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz, (UInt)origin );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ /* ... and these when an origin isn't available. */
+
+ VG_REGPARM(0)
+ void MC_(helperc_value_check0_fail_no_o) ( void ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_cond_error) ( VG_(get_running_tid)(), 0/*origin*/ );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(0)
+ void MC_(helperc_value_check1_fail_no_o) ( void ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_value_error) ( VG_(get_running_tid)(), 1, 0/*origin*/ );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(0)
+ void MC_(helperc_value_check4_fail_no_o) ( void ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_value_error) ( VG_(get_running_tid)(), 4, 0/*origin*/ );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(0)
+ void MC_(helperc_value_check8_fail_no_o) ( void ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_value_error) ( VG_(get_running_tid)(), 8, 0/*origin*/ );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+ VG_REGPARM(1)
+ void MC_(helperc_value_checkN_fail_no_o) ( HWord sz ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz, 0/*origin*/ );
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ }
+
+
+@@ -5697,6 +5901,7 @@ static Int mc_get_or_set_vbits_for_clien
+ False <=> internal call from gdbserver */
+ )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT i;
+ Bool ok;
+ UChar vbits8;
+@@ -5743,6 +5948,7 @@ static Int mc_get_or_set_vbits_for_clien
+ */
+ Bool MC_(is_within_valid_secondary) ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SecMap* sm = maybe_get_secmap_for ( a );
+ if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]) {
+ /* Definitely not in use. */
+@@ -5757,6 +5963,7 @@ Bool MC_(is_within_valid_secondary) ( Ad
+ address is to be regarded as valid. */
+ Bool MC_(is_valid_aligned_word) ( Addr a )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
+ tl_assert(VG_IS_WORD_ALIGNED(a));
+ if (get_vabits8_for_aligned_word32 (a) != VA_BITS8_DEFINED)
+@@ -5778,6 +5985,7 @@ Bool MC_(is_valid_aligned_word) ( Addr a
+
+ static void init_shadow_memory ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ SecMap* sm;
+
+@@ -5819,6 +6027,7 @@ static void init_shadow_memory ( void )
+
+ static Bool mc_cheap_sanity_check ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ n_sanity_cheap++;
+ PROF_EVENT(MCPE_CHEAP_SANITY_CHECK);
+ /* Check for sane operating level */
+@@ -5830,6 +6039,7 @@ static Bool mc_cheap_sanity_check ( void
+
+ static Bool mc_expensive_sanity_check ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ Word n_secmaps_found;
+ SecMap* sm;
+@@ -5963,6 +6173,7 @@ static const HChar * MC_(parse_leak_heur
+
+ static Bool mc_process_cmd_line_options(const HChar* arg)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const HChar* tmp_str;
+ Int tmp_show;
+
+@@ -6168,6 +6379,7 @@ static Bool mc_process_cmd_line_options(
+
+ static void mc_print_usage(void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(printf)(
+ " --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
+ " --leak-resolution=low|med|high differentiation of leak stack traces [high]\n"
+@@ -6210,6 +6422,7 @@ static void mc_print_usage(void)
+
+ static void mc_print_debug_usage(void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(printf)(
+ " (none)\n"
+ );
+@@ -6250,6 +6463,7 @@ static ULong cgb_search = 0; /* Numb
+ void MC_(get_ClientBlock_array)( /*OUT*/CGenBlock** blocks,
+ /*OUT*/UWord* nBlocks )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ *blocks = cgbs;
+ *nBlocks = cgb_used;
+ }
+@@ -6258,6 +6472,7 @@ void MC_(get_ClientBlock_array)( /*OUT*/
+ static
+ Int alloc_client_block ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UWord i, sz_new;
+ CGenBlock* cgbs_new;
+
+@@ -6297,6 +6512,7 @@ Int alloc_client_block ( void )
+
+ static void show_client_block_stats ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(message)(Vg_DebugMsg,
+ "general CBs: %llu allocs, %llu discards, %llu maxinuse, %llu search\n",
+ cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
+@@ -6304,6 +6520,7 @@ static void show_client_block_stats ( vo
+ }
+ static void print_monitor_help ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(gdb_printf)
+ (
+ "\n"
+@@ -6358,6 +6575,7 @@ static void print_monitor_help ( void )
+ res[i] == 1 indicates the corresponding byte is addressable. */
+ static void gdb_xb (Addr address, SizeT szB, Int res[])
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt i;
+
+ for (i = 0; i < szB; i++) {
+@@ -6380,6 +6598,7 @@ static void gdb_xb (Addr address, SizeT
+ or address of the string terminator. */
+ static HChar* next_non_space (HChar *s)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ while (*s && *s == ' ')
+ s++;
+ return s;
+@@ -6393,6 +6612,7 @@ static HChar* next_non_space (HChar *s)
+ static Bool VG_(parse_slice) (HChar* s, HChar** saveptr,
+ UInt *from, UInt *to)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ HChar* wl;
+ HChar *endptr;
+ endptr = NULL;////
+@@ -6453,6 +6673,7 @@ static Bool VG_(parse_slice) (HChar* s,
+ /* return True if request recognised, False otherwise */
+ static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ HChar* wcmd;
+ HChar s[VG_(strlen)(req) + 1]; /* copy for strtok_r */
+ HChar *ssaveptr;
+@@ -6856,6 +7077,7 @@ static Bool handle_gdb_monitor_command (
+
+ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ Addr bad_addr;
+
+@@ -7334,6 +7556,7 @@ static const HChar* MC_(event_ctr_name)[
+
+ static void init_prof_mem ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i, name_count = 0;
+
+ for (i = 0; i < MCPE_LAST; i++) {
+@@ -7348,6 +7571,7 @@ static void init_prof_mem ( void )
+
+ static void done_prof_mem ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i, n;
+ Bool spaced = False;
+ for (i = n = 0; i < MCPE_LAST; i++) {
+@@ -7367,8 +7591,8 @@ static void done_prof_mem ( void )
+
+ #else
+
+-static void init_prof_mem ( void ) { }
+-static void done_prof_mem ( void ) { }
++static void init_prof_mem ( void ) { VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);}
++static void done_prof_mem ( void ) { VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);}
+
+ #endif
+
+@@ -7382,10 +7606,12 @@ static void done_prof_mem ( void ) { }
+ /*--------------------------------------------*/
+
+ static INLINE UInt merge_origins ( UInt or1, UInt or2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return or1 > or2 ? or1 : or2;
+ }
+
+ UWord VG_REGPARM(1) MC_(helperc_b_load1)( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ UChar descr;
+ UWord lineoff = oc_line_offset(a);
+@@ -7410,6 +7636,7 @@ UWord VG_REGPARM(1) MC_(helperc_b_load1)
+ }
+
+ UWord VG_REGPARM(1) MC_(helperc_b_load2)( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ UChar descr;
+ UWord lineoff, byteoff;
+@@ -7442,6 +7669,7 @@ UWord VG_REGPARM(1) MC_(helperc_b_load2)
+ }
+
+ UWord VG_REGPARM(1) MC_(helperc_b_load4)( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ UChar descr;
+ UWord lineoff;
+@@ -7473,6 +7701,7 @@ UWord VG_REGPARM(1) MC_(helperc_b_load4)
+ }
+
+ UWord VG_REGPARM(1) MC_(helperc_b_load8)( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ UChar descrLo, descrHi, descr;
+ UWord lineoff;
+@@ -7508,6 +7737,7 @@ UWord VG_REGPARM(1) MC_(helperc_b_load8)
+ }
+
+ UWord VG_REGPARM(1) MC_(helperc_b_load16)( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt oLo = (UInt)MC_(helperc_b_load8)( a + 0 );
+ UInt oHi = (UInt)MC_(helperc_b_load8)( a + 8 );
+ UInt oBoth = merge_origins(oLo, oHi);
+@@ -7515,6 +7745,7 @@ UWord VG_REGPARM(1) MC_(helperc_b_load16
+ }
+
+ UWord VG_REGPARM(1) MC_(helperc_b_load32)( Addr a ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt oQ0 = (UInt)MC_(helperc_b_load8)( a + 0 );
+ UInt oQ1 = (UInt)MC_(helperc_b_load8)( a + 8 );
+ UInt oQ2 = (UInt)MC_(helperc_b_load8)( a + 16 );
+@@ -7530,6 +7761,7 @@ UWord VG_REGPARM(1) MC_(helperc_b_load32
+ /*--------------------------------------------*/
+
+ void VG_REGPARM(2) MC_(helperc_b_store1)( Addr a, UWord d32 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ UWord lineoff = oc_line_offset(a);
+ UWord byteoff = a & 3; /* 0, 1, 2 or 3 */
+@@ -7549,6 +7781,7 @@ void VG_REGPARM(2) MC_(helperc_b_store1)
+ }
+
+ void VG_REGPARM(2) MC_(helperc_b_store2)( Addr a, UWord d32 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ UWord lineoff, byteoff;
+
+@@ -7577,6 +7810,7 @@ void VG_REGPARM(2) MC_(helperc_b_store2)
+ }
+
+ void VG_REGPARM(2) MC_(helperc_b_store4)( Addr a, UWord d32 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ UWord lineoff;
+
+@@ -7603,6 +7837,7 @@ void VG_REGPARM(2) MC_(helperc_b_store4)
+ }
+
+ void VG_REGPARM(2) MC_(helperc_b_store8)( Addr a, UWord d32 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ OCacheLine* line;
+ UWord lineoff;
+
+@@ -7632,11 +7867,13 @@ void VG_REGPARM(2) MC_(helperc_b_store8)
+ }
+
+ void VG_REGPARM(2) MC_(helperc_b_store16)( Addr a, UWord d32 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(helperc_b_store8)( a + 0, d32 );
+ MC_(helperc_b_store8)( a + 8, d32 );
+ }
+
+ void VG_REGPARM(2) MC_(helperc_b_store32)( Addr a, UWord d32 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(helperc_b_store8)( a + 0, d32 );
+ MC_(helperc_b_store8)( a + 8, d32 );
+ MC_(helperc_b_store8)( a + 16, d32 );
+@@ -7650,6 +7887,7 @@ void VG_REGPARM(2) MC_(helperc_b_store32
+
+ __attribute__((noinline))
+ static void ocache_sarp_Set_Origins ( Addr a, UWord len, UInt otag ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if ((a & 1) && len >= 1) {
+ MC_(helperc_b_store1)( a, otag );
+ a++;
+@@ -7682,6 +7920,7 @@ static void ocache_sarp_Set_Origins ( Ad
+
+ __attribute__((noinline))
+ static void ocache_sarp_Clear_Origins ( Addr a, UWord len ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if ((a & 1) && len >= 1) {
+ MC_(helperc_b_store1)( a, 0 );
+ a++;
+@@ -7719,6 +7958,7 @@ static void ocache_sarp_Clear_Origins (
+
+ static void mc_post_clo_init ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* If we've been asked to emit XML, mash around various other
+ options so as to constrain the output somewhat. */
+ if (VG_(clo_xml)) {
+@@ -7875,6 +8115,7 @@ static void mc_post_clo_init ( void )
+
+ static void print_SM_info(const HChar* type, Int n_SMs)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(message)(Vg_DebugMsg,
+ " memcheck: SMs: %s = %d (%luk, %luM)\n",
+ type,
+@@ -7885,6 +8126,7 @@ static void print_SM_info(const HChar* t
+
+ static void mc_print_stats (void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
+
+ VG_(message)(Vg_DebugMsg, " memcheck: freelist: vol %lld length %lld\n",
+@@ -7979,6 +8221,7 @@ static void mc_print_stats (void)
+
+ static void mc_fini ( Int exitcode )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(xtmemory_report) (VG_(clo_xtree_memory_file), True);
+ MC_(print_malloc_stats)();
+
+@@ -8076,6 +8319,7 @@ static void mc_fini ( Int exitcode )
+ static Bool mc_mark_unaddressable_for_watchpoint (PointKind kind, Bool insert,
+ Addr addr, SizeT len)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* GDBTD this is somewhat fishy. We might rather have to save the previous
+ accessibility and definedness in gdbserver so as to allow restoring it
+ properly. Currently, we assume that the user only watches things
+@@ -8089,6 +8333,7 @@ static Bool mc_mark_unaddressable_for_wa
+
+ static void mc_pre_clo_init(void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(details_name) ("Memcheck");
+ VG_(details_version) (NULL);
+ VG_(details_description) ("a memory error detector");
diff --git a/valgrind-netbsd/patches/patch-memcheck_mc__malloc__wrappers.c b/valgrind-netbsd/patches/patch-memcheck_mc__malloc__wrappers.c
new file mode 100644
index 0000000000..427c1f632c
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_mc__malloc__wrappers.c
@@ -0,0 +1,336 @@
+$NetBSD$
+
+--- memcheck/mc_malloc_wrappers.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ memcheck/mc_malloc_wrappers.c
+@@ -102,6 +102,7 @@ static MC_Chunk* freed_list_end[2] =
+ some of the oldest blocks in the queue at the same time. */
+ static void add_to_freed_queue ( MC_Chunk* mc )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const Bool show = False;
+ const int l = (mc->szB >= MC_(clo_freelist_big_blocks) ? 0 : 1);
+
+@@ -137,6 +138,7 @@ static void add_to_freed_queue ( MC_Chun
+ On exit, VG_(free_queue_volume) will be <= MC_(clo_freelist_vol). */
+ static void release_oldest_block(void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const Bool show = False;
+ int i;
+ tl_assert (VG_(free_queue_volume) > MC_(clo_freelist_vol));
+@@ -174,6 +176,7 @@ static void release_oldest_block(void)
+
+ MC_Chunk* MC_(get_freed_block_bracketting) (Addr a)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ int i;
+ for (i = 0; i < 2; i++) {
+ MC_Chunk* mc;
+@@ -194,6 +197,7 @@ static
+ MC_Chunk* create_MC_Chunk ( ThreadId tid, Addr p, SizeT szB,
+ MC_AllocKind kind)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc = VG_(allocEltPA)(MC_(chunk_poolalloc));
+ mc->data = p;
+ mc->szB = szB;
+@@ -224,12 +228,14 @@ MC_Chunk* create_MC_Chunk ( ThreadId tid
+ static inline
+ void delete_MC_Chunk (MC_Chunk* mc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ VG_(freeEltPA) (MC_(chunk_poolalloc), mc);
+ }
+
+ // True if mc is in the given block list.
+ static Bool in_block_list (const VgHashTable *block_list, MC_Chunk* mc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* found_mc = VG_(HT_lookup) ( block_list, (UWord)mc->data );
+ if (found_mc) {
+ tl_assert (found_mc->data == mc->data);
+@@ -254,6 +260,7 @@ static Bool in_block_list (const VgHashT
+ // True if mc is a live block (not yet freed).
+ static Bool live_block (MC_Chunk* mc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (mc->allockind == MC_AllocCustom) {
+ MC_Mempool* mp;
+ VG_(HT_ResetIter)(MC_(mempool_list));
+@@ -270,6 +277,7 @@ static Bool live_block (MC_Chunk* mc)
+
+ ExeContext* MC_(allocated_at) (MC_Chunk* mc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (MC_(clo_keep_stacktraces)) {
+ case KS_none: return VG_(null_ExeContext) ();
+ case KS_alloc: return mc->where[0];
+@@ -283,6 +291,7 @@ ExeContext* MC_(allocated_at) (MC_Chunk*
+
+ ExeContext* MC_(freed_at) (MC_Chunk* mc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (MC_(clo_keep_stacktraces)) {
+ case KS_none: return VG_(null_ExeContext) ();
+ case KS_alloc: return VG_(null_ExeContext) ();
+@@ -298,6 +307,7 @@ ExeContext* MC_(freed_at) (MC_Chunk* mc)
+
+ void MC_(set_allocated_at) (ThreadId tid, MC_Chunk* mc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (MC_(clo_keep_stacktraces)) {
+ case KS_none: return;
+ case KS_alloc: break;
+@@ -313,6 +323,7 @@ void MC_(set_allocated_at) (ThreadId ti
+
+ void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int pos;
+ ExeContext* ec_free;
+
+@@ -342,6 +353,7 @@ void MC_(set_freed_at) (ThreadId tid, M
+
+ UInt MC_(n_where_pointers) (void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (MC_(clo_keep_stacktraces)) {
+ case KS_none: return 0;
+ case KS_alloc:
+@@ -362,6 +374,7 @@ void* MC_(new_block) ( ThreadId tid,
+ Bool is_zeroed, MC_AllocKind kind,
+ VgHashTable *table)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc;
+
+ // Allocate and zero if necessary
+@@ -401,6 +414,7 @@ void* MC_(new_block) ( ThreadId tid,
+
+ void* MC_(malloc) ( ThreadId tid, SizeT n )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (MC_(record_fishy_value_error)(tid, "malloc", "size", n)) {
+ return NULL;
+ } else {
+@@ -411,6 +425,7 @@ void* MC_(malloc) ( ThreadId tid, SizeT
+
+ void* MC_(__builtin_new) ( ThreadId tid, SizeT n )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (MC_(record_fishy_value_error)(tid, "__builtin_new", "size", n)) {
+ return NULL;
+ } else {
+@@ -421,6 +436,7 @@ void* MC_(__builtin_new) ( ThreadId tid,
+
+ void* MC_(__builtin_vec_new) ( ThreadId tid, SizeT n )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (MC_(record_fishy_value_error)(tid, "__builtin_vec_new", "size", n)) {
+ return NULL;
+ } else {
+@@ -431,6 +447,7 @@ void* MC_(__builtin_vec_new) ( ThreadId
+
+ void* MC_(memalign) ( ThreadId tid, SizeT alignB, SizeT n )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (MC_(record_fishy_value_error)(tid, "memalign", "size", n)) {
+ return NULL;
+ } else {
+@@ -441,6 +458,7 @@ void* MC_(memalign) ( ThreadId tid, Size
+
+ void* MC_(calloc) ( ThreadId tid, SizeT nmemb, SizeT size1 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (MC_(record_fishy_value_error)(tid, "calloc", "nmemb", nmemb) ||
+ MC_(record_fishy_value_error)(tid, "calloc", "size", size1)) {
+ return NULL;
+@@ -453,6 +471,7 @@ void* MC_(calloc) ( ThreadId tid, SizeT
+ static
+ void die_and_free_mem ( ThreadId tid, MC_Chunk* mc, SizeT rzB )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* Note: we do not free fill the custom allocs produced
+ by MEMPOOL or by MALLOC/FREELIKE_BLOCK requests. */
+ if (MC_(clo_free_fill) != -1 && MC_AllocCustom != mc->allockind ) {
+@@ -478,6 +497,7 @@ void die_and_free_mem ( ThreadId tid, MC
+ static
+ void record_freemismatch_error (ThreadId tid, MC_Chunk* mc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* Only show such an error if the user hasn't disabled doing so. */
+ if (!MC_(clo_show_mismatched_frees))
+ return;
+@@ -496,6 +516,7 @@ void record_freemismatch_error (ThreadId
+
+ void MC_(handle_free) ( ThreadId tid, Addr p, UInt rzB, MC_AllocKind kind )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc;
+
+ cmalloc_n_frees++;
+@@ -515,24 +536,28 @@ void MC_(handle_free) ( ThreadId tid, Ad
+
+ void MC_(free) ( ThreadId tid, void* p )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(handle_free)(
+ tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocMalloc );
+ }
+
+ void MC_(__builtin_delete) ( ThreadId tid, void* p )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(handle_free)(
+ tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNew);
+ }
+
+ void MC_(__builtin_vec_delete) ( ThreadId tid, void* p )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_(handle_free)(
+ tid, (Addr)p, MC_(Malloc_Redzone_SzB), MC_AllocNewVec);
+ }
+
+ void* MC_(realloc) ( ThreadId tid, void* p_old, SizeT new_szB )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* old_mc;
+ MC_Chunk* new_mc;
+ Addr a_new;
+@@ -647,6 +672,7 @@ void* MC_(realloc) ( ThreadId tid, void*
+
+ SizeT MC_(malloc_usable_size) ( ThreadId tid, void* p )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
+
+ // There may be slop, but pretend there isn't because only the asked-for
+@@ -661,6 +687,7 @@ SizeT MC_(malloc_usable_size) ( ThreadId
+ void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
+ SizeT oldSizeB, SizeT newSizeB, SizeT rzB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc = VG_(HT_lookup) ( MC_(malloc_list), (UWord)p );
+ if (!mc || mc->szB != oldSizeB || newSizeB == 0) {
+ /* Reject if: p is not found, or oldSizeB is wrong,
+@@ -703,6 +730,7 @@ static void free_mallocs_in_mempool_bloc
+ Addr StartAddr,
+ Addr EndAddr)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk *mc;
+ ThreadId tid;
+
+@@ -733,6 +761,7 @@ static void free_mallocs_in_mempool_bloc
+ void MC_(create_mempool)(Addr pool, UInt rzB, Bool is_zeroed,
+ Bool auto_free, Bool metapool)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+
+ if (VG_(clo_verbosity) > 2 || (auto_free && !metapool)) {
+@@ -776,6 +805,7 @@ void MC_(create_mempool)(Addr pool, UInt
+
+ void MC_(destroy_mempool)(Addr pool)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc;
+ MC_Mempool* mp;
+
+@@ -810,6 +840,7 @@ void MC_(destroy_mempool)(Addr pool)
+ static Int
+ mp_compar(const void* n1, const void* n2)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const MC_Chunk* mc1 = *(const MC_Chunk *const *)n1;
+ const MC_Chunk* mc2 = *(const MC_Chunk *const *)n2;
+ if (mc1->data < mc2->data) return -1;
+@@ -820,6 +851,7 @@ mp_compar(const void* n1, const void* n2
+ static void
+ check_mempool_sane(MC_Mempool* mp)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt n_chunks, i, bad = 0;
+ static UInt tick = 0;
+
+@@ -895,6 +927,7 @@ check_mempool_sane(MC_Mempool* mp)
+
+ void MC_(mempool_alloc)(ThreadId tid, Addr pool, Addr addr, SizeT szB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+
+ if (VG_(clo_verbosity) > 2) {
+@@ -924,6 +957,7 @@ void MC_(mempool_alloc)(ThreadId tid, Ad
+
+ void MC_(mempool_free)(Addr pool, Addr addr)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+ MC_Chunk* mc;
+ ThreadId tid = VG_(get_running_tid)();
+@@ -963,6 +997,7 @@ void MC_(mempool_free)(Addr pool, Addr a
+
+ void MC_(mempool_trim)(Addr pool, Addr addr, SizeT szB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+ MC_Chunk* mc;
+ ThreadId tid = VG_(get_running_tid)();
+@@ -1077,6 +1112,7 @@ void MC_(mempool_trim)(Addr pool, Addr a
+
+ void MC_(move_mempool)(Addr poolA, Addr poolB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+
+ if (VG_(clo_verbosity) > 2) {
+@@ -1099,6 +1135,7 @@ void MC_(move_mempool)(Addr poolA, Addr
+
+ void MC_(mempool_change)(Addr pool, Addr addrA, Addr addrB, SizeT szB)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+ MC_Chunk* mc;
+ ThreadId tid = VG_(get_running_tid)();
+@@ -1132,6 +1169,7 @@ void MC_(mempool_change)(Addr pool, Addr
+
+ Bool MC_(mempool_exists)(Addr pool)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Mempool* mp;
+
+ mp = VG_(HT_lookup)(MC_(mempool_list), (UWord)pool);
+@@ -1143,6 +1181,7 @@ Bool MC_(mempool_exists)(Addr pool)
+
+ static void xtmemory_report_next_block(XT_Allocs* xta, ExeContext** ec_alloc)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc = VG_(HT_Next)(MC_(malloc_list));
+ if (mc) {
+ xta->nbytes = mc->szB;
+@@ -1154,6 +1193,7 @@ static void xtmemory_report_next_block(X
+
+ void MC_(xtmemory_report) ( const HChar* filename, Bool fini )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ // Make xtmemory_report_next_block ready to be called.
+ VG_(HT_ResetIter)(MC_(malloc_list));
+
+@@ -1167,6 +1207,7 @@ void MC_(xtmemory_report) ( const HChar*
+
+ void MC_(print_malloc_stats) ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ MC_Chunk* mc;
+ SizeT nblocks = 0;
+ ULong nbytes = 0;
+@@ -1196,6 +1237,7 @@ void MC_(print_malloc_stats) ( void )
+
+ SizeT MC_(get_cmalloc_n_frees) ( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return cmalloc_n_frees;
+ }
+
diff --git a/valgrind-netbsd/patches/patch-memcheck_mc__translate.c b/valgrind-netbsd/patches/patch-memcheck_mc__translate.c
new file mode 100644
index 0000000000..3cfb33880d
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_mc__translate.c
@@ -0,0 +1,1296 @@
+$NetBSD$
+
+--- memcheck/mc_translate.c.orig 2018-09-30 04:41:00.000000000 +0000
++++ memcheck/mc_translate.c
+@@ -196,6 +196,7 @@ typedef
+ static void DetailLevelByOp__set_all ( /*OUT*/DetailLevelByOp* dlbo,
+ DetailLevel dl )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ dlbo->dl_Add32 = dl;
+ dlbo->dl_Add64 = dl;
+ dlbo->dl_Sub32 = dl;
+@@ -208,6 +209,7 @@ static void DetailLevelByOp__set_all ( /
+
+ static void DetailLevelByOp__check_sanity ( const DetailLevelByOp* dlbo )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(dlbo->dl_Add32 >= DLcheap && dlbo->dl_Add32 <= DLexpensive);
+ tl_assert(dlbo->dl_Add64 >= DLcheap && dlbo->dl_Add64 <= DLexpensive);
+ tl_assert(dlbo->dl_Sub32 >= DLcheap && dlbo->dl_Sub32 <= DLexpensive);
+@@ -225,6 +227,7 @@ static void DetailLevelByOp__check_sanit
+ static UInt DetailLevelByOp__count ( const DetailLevelByOp* dlbo,
+ DetailLevel dl )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt n = 0;
+ n += (dlbo->dl_Add32 == dl ? 1 : 0);
+ n += (dlbo->dl_Add64 == dl ? 1 : 0);
+@@ -366,6 +369,7 @@ typedef
+ from VG_(indexXA)(mce->tmpMap) are invalidated. */
+ static IRTemp newTemp ( MCEnv* mce, IRType ty, TempKind kind )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Word newIx;
+ TempMapEnt ent;
+ IRTemp tmp = newIRTemp(mce->sb->tyenv, ty);
+@@ -382,6 +386,7 @@ static IRTemp newTemp ( MCEnv* mce, IRTy
+ so far exists, allocate one. */
+ static IRTemp findShadowTmpV ( MCEnv* mce, IRTemp orig )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ TempMapEnt* ent;
+ /* VG_(indexXA) range-checks 'orig', hence no need to check
+ here. */
+@@ -412,6 +417,7 @@ static IRTemp findShadowTmpV ( MCEnv* mc
+ regardless. */
+ static void newShadowTmpV ( MCEnv* mce, IRTemp orig )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ TempMapEnt* ent;
+ /* VG_(indexXA) range-checks 'orig', hence no need to check
+ here. */
+@@ -446,6 +452,7 @@ typedef IRExpr IRAtom;
+ like it's from original code? */
+ static Bool isOriginalAtom ( MCEnv* mce, IRAtom* a1 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (a1->tag == Iex_Const)
+ return True;
+ if (a1->tag == Iex_RdTmp) {
+@@ -459,6 +466,7 @@ static Bool isOriginalAtom ( MCEnv* mce,
+ like it's from shadow code? */
+ static Bool isShadowAtom ( MCEnv* mce, IRAtom* a1 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (a1->tag == Iex_Const)
+ return True;
+ if (a1->tag == Iex_RdTmp) {
+@@ -472,6 +480,7 @@ static Bool isShadowAtom ( MCEnv* mce, I
+ are identically-kinded. */
+ static Bool sameKindedAtoms ( IRAtom* a1, IRAtom* a2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (a1->tag == Iex_RdTmp && a2->tag == Iex_RdTmp)
+ return True;
+ if (a1->tag == Iex_Const && a2->tag == Iex_Const)
+@@ -491,6 +500,7 @@ static Bool sameKindedAtoms ( IRAtom* a1
+
+ static IRType shadowTypeV ( IRType ty )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (ty) {
+ case Ity_I1:
+ case Ity_I8:
+@@ -515,6 +525,7 @@ static IRType shadowTypeV ( IRType ty )
+ /* Produce a 'defined' value of the given shadow type. Should only be
+ supplied shadow types (Bit/I8/I16/I32/UI64). */
+ static IRExpr* definedOfType ( IRType ty ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (ty) {
+ case Ity_I1: return IRExpr_Const(IRConst_U1(False));
+ case Ity_I8: return IRExpr_Const(IRConst_U8(0));
+@@ -535,6 +546,7 @@ static IRExpr* definedOfType ( IRType ty
+
+ /* add stmt to a bb */
+ static inline void stmt ( HChar cat, MCEnv* mce, IRStmt* st ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (mce->trace) {
+ VG_(printf)(" %c: ", cat);
+ ppIRStmt(st);
+@@ -546,6 +558,7 @@ static inline void stmt ( HChar cat, MCE
+ /* assign value to tmp */
+ static inline
+ void assign ( HChar cat, MCEnv* mce, IRTemp tmp, IRExpr* expr ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ stmt(cat, mce, IRStmt_WrTmp(tmp,expr));
+ }
+
+@@ -572,6 +585,7 @@ void assign ( HChar cat, MCEnv* mce, IRT
+ assert that the two types agree. */
+ static IRAtom* assignNew ( HChar cat, MCEnv* mce, IRType ty, IRExpr* e )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ TempKind k;
+ IRTemp t;
+ IRType tyE = typeOfIRExpr(mce->sb->tyenv, e);
+@@ -597,6 +611,7 @@ static IRAtom* assignNew ( HChar cat, MC
+
+ static IRExpr *i128_const_zero(void)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* z64 = IRExpr_Const(IRConst_U64(0));
+ return binop(Iop_64HLto128, z64, z64);
+ }
+@@ -613,36 +628,42 @@ static IRExpr *i128_const_zero(void)
+ /* --------- Defined-if-either-defined --------- */
+
+ static IRAtom* mkDifD8 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_I8, binop(Iop_And8, a1, a2));
+ }
+
+ static IRAtom* mkDifD16 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_I16, binop(Iop_And16, a1, a2));
+ }
+
+ static IRAtom* mkDifD32 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_I32, binop(Iop_And32, a1, a2));
+ }
+
+ static IRAtom* mkDifD64 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_I64, binop(Iop_And64, a1, a2));
+ }
+
+ static IRAtom* mkDifDV128 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_V128, binop(Iop_AndV128, a1, a2));
+ }
+
+ static IRAtom* mkDifDV256 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_V256, binop(Iop_AndV256, a1, a2));
+@@ -651,30 +672,35 @@ static IRAtom* mkDifDV256 ( MCEnv* mce,
+ /* --------- Undefined-if-either-undefined --------- */
+
+ static IRAtom* mkUifU8 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_I8, binop(Iop_Or8, a1, a2));
+ }
+
+ static IRAtom* mkUifU16 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_I16, binop(Iop_Or16, a1, a2));
+ }
+
+ static IRAtom* mkUifU32 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_I32, binop(Iop_Or32, a1, a2));
+ }
+
+ static IRAtom* mkUifU64 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_I64, binop(Iop_Or64, a1, a2));
+ }
+
+ static IRAtom* mkUifU128 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *tmp1, *tmp2, *tmp3, *tmp4, *tmp5, *tmp6;
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+@@ -689,18 +715,21 @@ static IRAtom* mkUifU128 ( MCEnv* mce, I
+ }
+
+ static IRAtom* mkUifUV128 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_V128, binop(Iop_OrV128, a1, a2));
+ }
+
+ static IRAtom* mkUifUV256 ( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ tl_assert(isShadowAtom(mce,a2));
+ return assignNew('V', mce, Ity_V256, binop(Iop_OrV256, a1, a2));
+ }
+
+ static IRAtom* mkUifU ( MCEnv* mce, IRType vty, IRAtom* a1, IRAtom* a2 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (vty) {
+ case Ity_I8: return mkUifU8(mce, a1, a2);
+ case Ity_I16: return mkUifU16(mce, a1, a2);
+@@ -718,21 +747,25 @@ static IRAtom* mkUifU ( MCEnv* mce, IRTy
+ /* --------- The Left-family of operations. --------- */
+
+ static IRAtom* mkLeft8 ( MCEnv* mce, IRAtom* a1 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ return assignNew('V', mce, Ity_I8, unop(Iop_Left8, a1));
+ }
+
+ static IRAtom* mkLeft16 ( MCEnv* mce, IRAtom* a1 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ return assignNew('V', mce, Ity_I16, unop(Iop_Left16, a1));
+ }
+
+ static IRAtom* mkLeft32 ( MCEnv* mce, IRAtom* a1 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ return assignNew('V', mce, Ity_I32, unop(Iop_Left32, a1));
+ }
+
+ static IRAtom* mkLeft64 ( MCEnv* mce, IRAtom* a1 ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,a1));
+ return assignNew('V', mce, Ity_I64, unop(Iop_Left64, a1));
+ }
+@@ -744,6 +777,7 @@ static IRAtom* mkLeft64 ( MCEnv* mce, IR
+ */
+ static IRAtom* mkImproveAND8 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -752,6 +786,7 @@ static IRAtom* mkImproveAND8 ( MCEnv* mc
+
+ static IRAtom* mkImproveAND16 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -760,6 +795,7 @@ static IRAtom* mkImproveAND16 ( MCEnv* m
+
+ static IRAtom* mkImproveAND32 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -768,6 +804,7 @@ static IRAtom* mkImproveAND32 ( MCEnv* m
+
+ static IRAtom* mkImproveAND64 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -776,6 +813,7 @@ static IRAtom* mkImproveAND64 ( MCEnv* m
+
+ static IRAtom* mkImproveANDV128 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -784,6 +822,7 @@ static IRAtom* mkImproveANDV128 ( MCEnv*
+
+ static IRAtom* mkImproveANDV256 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -795,6 +834,7 @@ static IRAtom* mkImproveANDV256 ( MCEnv*
+ */
+ static IRAtom* mkImproveOR8 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -807,6 +847,7 @@ static IRAtom* mkImproveOR8 ( MCEnv* mce
+
+ static IRAtom* mkImproveOR16 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -819,6 +860,7 @@ static IRAtom* mkImproveOR16 ( MCEnv* mc
+
+ static IRAtom* mkImproveOR32 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -831,6 +873,7 @@ static IRAtom* mkImproveOR32 ( MCEnv* mc
+
+ static IRAtom* mkImproveOR64 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -843,6 +886,7 @@ static IRAtom* mkImproveOR64 ( MCEnv* mc
+
+ static IRAtom* mkImproveORV128 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -855,6 +899,7 @@ static IRAtom* mkImproveORV128 ( MCEnv*
+
+ static IRAtom* mkImproveORV256 ( MCEnv* mce, IRAtom* data, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, data));
+ tl_assert(isShadowAtom(mce, vbits));
+ tl_assert(sameKindedAtoms(data, vbits));
+@@ -873,6 +918,7 @@ static IRAtom* mkImproveORV256 ( MCEnv*
+
+ static IRAtom* mkPCastTo( MCEnv* mce, IRType dst_ty, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType src_ty;
+ IRAtom* tmp1;
+
+@@ -1024,6 +1070,7 @@ static IRAtom* mkPCastTo( MCEnv* mce, IR
+ the entire argument down to a single bit. */
+ static IRAtom* mkPCastXXtoXXlsb ( MCEnv* mce, IRAtom* varg, IRType ty )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (ty == Ity_V128) {
+ /* --- Case for V128 --- */
+ IRAtom* varg128 = varg;
+@@ -1065,6 +1112,7 @@ static IRAtom* mkPCastXXtoXXlsb ( MCEnv*
+ */
+ static IRAtom* mkOCastAt( MCEnv* mce, IRType ty, IRAtom* vbits )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IROp opSUB, opSHR, opSAR;
+ UInt sh;
+
+@@ -1163,6 +1211,7 @@ static IRAtom* expensiveCmpEQorNE ( MCEn
+ IRAtom* vxx, IRAtom* vyy,
+ IRAtom* xx, IRAtom* yy )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *naive, *vec, *improved, *final_cast;
+ IROp opDIFD, opUIFU, opOR, opXOR, opNOT;
+
+@@ -1261,6 +1310,7 @@ static IRAtom* expensiveCmpEQorNE ( MCEn
+ */
+ static Bool isZeroU32 ( IRAtom* e )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return
+ toBool( e->tag == Iex_Const
+ && e->Iex.Const.con->tag == Ico_U32
+@@ -1269,6 +1319,7 @@ static Bool isZeroU32 ( IRAtom* e )
+
+ static Bool isZeroU64 ( IRAtom* e )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return
+ toBool( e->tag == Iex_Const
+ && e->Iex.Const.con->tag == Ico_U64
+@@ -1280,6 +1331,7 @@ static IRAtom* doCmpORD ( MCEnv* mce,
+ IRAtom* xxhash, IRAtom* yyhash,
+ IRAtom* xx, IRAtom* yy )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Bool m64 = cmp_op == Iop_CmpORD64S || cmp_op == Iop_CmpORD64U;
+ Bool syned = cmp_op == Iop_CmpORD64S || cmp_op == Iop_CmpORD32S;
+ IROp opOR = m64 ? Iop_Or64 : Iop_Or32;
+@@ -1360,6 +1412,7 @@ static IRAtom* schemeE ( MCEnv* mce, IRE
+ call. */
+
+ static void setHelperAnns ( MCEnv* mce, IRDirty* di ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ di->nFxState = 2;
+ di->fxState[0].fx = Ifx_Read;
+ di->fxState[0].offset = mce->layout->offset_SP;
+@@ -1395,6 +1448,7 @@ static void setHelperAnns ( MCEnv* mce,
+ */
+ static void complainIfUndefined ( MCEnv* mce, IRAtom* atom, IRExpr *guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* vatom;
+ IRType ty;
+ Int sz;
+@@ -1582,6 +1636,7 @@ static void complainIfUndefined ( MCEnv*
+ */
+ static Bool isAlwaysDefd ( MCEnv* mce, Int offset, Int size )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int minoffD, maxoffD, i;
+ Int minoff = offset;
+ Int maxoff = minoff + size - 1;
+@@ -1616,6 +1671,7 @@ static
+ void do_shadow_PUT ( MCEnv* mce, Int offset,
+ IRAtom* atom, IRAtom* vatom, IRExpr *guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType ty;
+
+ // Don't do shadow PUTs if we're not doing undefined value checking.
+@@ -1662,6 +1718,7 @@ void do_shadow_PUT ( MCEnv* mce, Int of
+ static
+ void do_shadow_PUTI ( MCEnv* mce, IRPutI *puti)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* vatom;
+ IRType ty, tyS;
+ Int arrSize;;
+@@ -1706,6 +1763,7 @@ void do_shadow_PUTI ( MCEnv* mce, IRPutI
+ static
+ IRExpr* shadow_GET ( MCEnv* mce, Int offset, IRType ty )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType tyS = shadowTypeV(ty);
+ tl_assert(ty != Ity_I1);
+ tl_assert(ty != Ity_I128);
+@@ -1728,6 +1786,7 @@ static
+ IRExpr* shadow_GETI ( MCEnv* mce,
+ IRRegArray* descr, IRAtom* ix, Int bias )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType ty = descr->elemTy;
+ IRType tyS = shadowTypeV(ty);
+ Int arrSize = descr->nElems * sizeofIRType(ty);
+@@ -1759,6 +1818,7 @@ IRExpr* shadow_GETI ( MCEnv* mce,
+ static
+ IRAtom* mkLazy2 ( MCEnv* mce, IRType finalVty, IRAtom* va1, IRAtom* va2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ IRType t1 = typeOfIRExpr(mce->sb->tyenv, va1);
+ IRType t2 = typeOfIRExpr(mce->sb->tyenv, va2);
+@@ -1816,6 +1876,7 @@ static
+ IRAtom* mkLazy3 ( MCEnv* mce, IRType finalVty,
+ IRAtom* va1, IRAtom* va2, IRAtom* va3 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ IRType t1 = typeOfIRExpr(mce->sb->tyenv, va1);
+ IRType t2 = typeOfIRExpr(mce->sb->tyenv, va2);
+@@ -1950,6 +2011,7 @@ static
+ IRAtom* mkLazy4 ( MCEnv* mce, IRType finalVty,
+ IRAtom* va1, IRAtom* va2, IRAtom* va3, IRAtom* va4 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ IRType t1 = typeOfIRExpr(mce->sb->tyenv, va1);
+ IRType t2 = typeOfIRExpr(mce->sb->tyenv, va2);
+@@ -2063,6 +2125,7 @@ static
+ IRAtom* mkLazyN ( MCEnv* mce,
+ IRAtom** exprvec, IRType finalVtype, IRCallee* cee )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ IRAtom* here;
+ IRAtom* curr;
+@@ -2118,6 +2181,7 @@ IRAtom* expensiveAddSub ( MCEnv* mce,
+ IRAtom* qaa, IRAtom* qbb,
+ IRAtom* aa, IRAtom* bb )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *a_min, *b_min, *a_max, *b_max;
+ IROp opAND, opOR, opXOR, opNOT, opADD, opSUB;
+
+@@ -2202,6 +2266,7 @@ static
+ IRAtom* expensiveCountTrailingZeroes ( MCEnv* mce, IROp czop,
+ IRAtom* atom, IRAtom* vatom )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType ty;
+ IROp xorOp, subOp, andOp;
+ IRExpr *one;
+@@ -2281,6 +2346,7 @@ static IRAtom* scalarShift ( MCEnv* mce
+ IRAtom* qaa, IRAtom* qbb,
+ IRAtom* aa, IRAtom* bb )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isShadowAtom(mce,qaa));
+ tl_assert(isShadowAtom(mce,qbb));
+ tl_assert(isOriginalAtom(mce,aa));
+@@ -2306,71 +2372,85 @@ static IRAtom* scalarShift ( MCEnv* mce
+
+ static IRAtom* mkPCast8x16 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V128, unop(Iop_CmpNEZ8x16, at));
+ }
+
+ static IRAtom* mkPCast16x8 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V128, unop(Iop_CmpNEZ16x8, at));
+ }
+
+ static IRAtom* mkPCast32x4 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V128, unop(Iop_CmpNEZ32x4, at));
+ }
+
+ static IRAtom* mkPCast64x2 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V128, unop(Iop_CmpNEZ64x2, at));
+ }
+
+ static IRAtom* mkPCast128x1 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V128, unop(Iop_CmpNEZ128x1, at));
+ }
+
+ static IRAtom* mkPCast64x4 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V256, unop(Iop_CmpNEZ64x4, at));
+ }
+
+ static IRAtom* mkPCast32x8 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V256, unop(Iop_CmpNEZ32x8, at));
+ }
+
+ static IRAtom* mkPCast32x2 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_I64, unop(Iop_CmpNEZ32x2, at));
+ }
+
+ static IRAtom* mkPCast16x16 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V256, unop(Iop_CmpNEZ16x16, at));
+ }
+
+ static IRAtom* mkPCast16x4 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_I64, unop(Iop_CmpNEZ16x4, at));
+ }
+
+ static IRAtom* mkPCast8x32 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_V256, unop(Iop_CmpNEZ8x32, at));
+ }
+
+ static IRAtom* mkPCast8x8 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_I64, unop(Iop_CmpNEZ8x8, at));
+ }
+
+ static IRAtom* mkPCast16x2 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_I32, unop(Iop_CmpNEZ16x2, at));
+ }
+
+ static IRAtom* mkPCast8x4 ( MCEnv* mce, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew('V', mce, Ity_I32, unop(Iop_CmpNEZ8x4, at));
+ }
+
+@@ -2414,6 +2494,7 @@ static IRAtom* mkPCast8x4 ( MCEnv* mce,
+ static
+ IRAtom* binary32Fx4 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ tl_assert(isShadowAtom(mce, vatomY));
+@@ -2425,6 +2506,7 @@ IRAtom* binary32Fx4 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* unary32Fx4 ( MCEnv* mce, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ at = assignNew('V', mce, Ity_V128, mkPCast32x4(mce, vatomX));
+@@ -2434,6 +2516,7 @@ IRAtom* unary32Fx4 ( MCEnv* mce, IRAtom*
+ static
+ IRAtom* binary32F0x4 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ tl_assert(isShadowAtom(mce, vatomY));
+@@ -2447,6 +2530,7 @@ IRAtom* binary32F0x4 ( MCEnv* mce, IRAto
+ static
+ IRAtom* unary32F0x4 ( MCEnv* mce, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ at = assignNew('V', mce, Ity_I32, unop(Iop_V128to32, vatomX));
+@@ -2460,6 +2544,7 @@ IRAtom* unary32F0x4 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary64Fx2 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ tl_assert(isShadowAtom(mce, vatomY));
+@@ -2471,6 +2556,7 @@ IRAtom* binary64Fx2 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* unary64Fx2 ( MCEnv* mce, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ at = assignNew('V', mce, Ity_V128, mkPCast64x2(mce, vatomX));
+@@ -2480,6 +2566,7 @@ IRAtom* unary64Fx2 ( MCEnv* mce, IRAtom*
+ static
+ IRAtom* binary64F0x2 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ tl_assert(isShadowAtom(mce, vatomY));
+@@ -2493,6 +2580,7 @@ IRAtom* binary64F0x2 ( MCEnv* mce, IRAto
+ static
+ IRAtom* unary64F0x2 ( MCEnv* mce, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ at = assignNew('V', mce, Ity_I64, unop(Iop_V128to64, vatomX));
+@@ -2506,6 +2594,7 @@ IRAtom* unary64F0x2 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary32Fx2 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ tl_assert(isShadowAtom(mce, vatomY));
+@@ -2517,6 +2606,7 @@ IRAtom* binary32Fx2 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* unary32Fx2 ( MCEnv* mce, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ at = assignNew('V', mce, Ity_I64, mkPCast32x2(mce, vatomX));
+@@ -2528,6 +2618,7 @@ IRAtom* unary32Fx2 ( MCEnv* mce, IRAtom*
+ static
+ IRAtom* binary64Fx4 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ tl_assert(isShadowAtom(mce, vatomY));
+@@ -2539,6 +2630,7 @@ IRAtom* binary64Fx4 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* unary64Fx4 ( MCEnv* mce, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ at = assignNew('V', mce, Ity_V256, mkPCast64x4(mce, vatomX));
+@@ -2550,6 +2642,7 @@ IRAtom* unary64Fx4 ( MCEnv* mce, IRAtom*
+ static
+ IRAtom* binary32Fx8 ( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ tl_assert(isShadowAtom(mce, vatomY));
+@@ -2561,6 +2654,7 @@ IRAtom* binary32Fx8 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* unary32Fx8 ( MCEnv* mce, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ tl_assert(isShadowAtom(mce, vatomX));
+ at = assignNew('V', mce, Ity_V256, mkPCast32x8(mce, vatomX));
+@@ -2573,6 +2667,7 @@ static
+ IRAtom* binary64Fx2_w_rm ( MCEnv* mce, IRAtom* vRM,
+ IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* This is the same as binary64Fx2, except that we subsequently
+ pessimise vRM (definedness of the rounding mode), widen to 128
+ bits and UifU it into the result. As with the scalar cases, if
+@@ -2593,6 +2688,7 @@ static
+ IRAtom* binary32Fx4_w_rm ( MCEnv* mce, IRAtom* vRM,
+ IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* t1 = binary32Fx4(mce, vatomX, vatomY);
+ // PCast the RM, and widen it to 128 bits
+ IRAtom* t2 = mkPCastTo(mce, Ity_V128, vRM);
+@@ -2607,6 +2703,7 @@ static
+ IRAtom* binary64Fx4_w_rm ( MCEnv* mce, IRAtom* vRM,
+ IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* t1 = binary64Fx4(mce, vatomX, vatomY);
+ // PCast the RM, and widen it to 256 bits
+ IRAtom* t2 = mkPCastTo(mce, Ity_V256, vRM);
+@@ -2621,6 +2718,7 @@ static
+ IRAtom* binary32Fx8_w_rm ( MCEnv* mce, IRAtom* vRM,
+ IRAtom* vatomX, IRAtom* vatomY )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* t1 = binary32Fx8(mce, vatomX, vatomY);
+ // PCast the RM, and widen it to 256 bits
+ IRAtom* t2 = mkPCastTo(mce, Ity_V256, vRM);
+@@ -2634,6 +2732,7 @@ IRAtom* binary32Fx8_w_rm ( MCEnv* mce, I
+ static
+ IRAtom* unary64Fx2_w_rm ( MCEnv* mce, IRAtom* vRM, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* Same scheme as binary64Fx2_w_rm. */
+ // "do" the vector arg
+ IRAtom* t1 = unary64Fx2(mce, vatomX);
+@@ -2649,6 +2748,7 @@ IRAtom* unary64Fx2_w_rm ( MCEnv* mce, IR
+ static
+ IRAtom* unary32Fx4_w_rm ( MCEnv* mce, IRAtom* vRM, IRAtom* vatomX )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* Same scheme as unary32Fx4_w_rm. */
+ IRAtom* t1 = unary32Fx4(mce, vatomX);
+ // PCast the RM, and widen it to 128 bits
+@@ -2711,6 +2811,7 @@ IRAtom* unary32Fx4_w_rm ( MCEnv* mce, IR
+ static
+ IROp vanillaNarrowingOpOfShape ( IROp qnarrowOp )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (qnarrowOp) {
+ /* Binary: (128, 128) -> 128 */
+ case Iop_QNarrowBin16Sto8Ux16:
+@@ -2753,6 +2854,7 @@ static
+ IRAtom* vectorNarrowBinV128 ( MCEnv* mce, IROp narrow_op,
+ IRAtom* vatom1, IRAtom* vatom2)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *at1, *at2, *at3;
+ IRAtom* (*pcast)( MCEnv*, IRAtom* );
+ switch (narrow_op) {
+@@ -2779,6 +2881,7 @@ static
+ IRAtom* vectorNarrowBin64 ( MCEnv* mce, IROp narrow_op,
+ IRAtom* vatom1, IRAtom* vatom2)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *at1, *at2, *at3;
+ IRAtom* (*pcast)( MCEnv*, IRAtom* );
+ switch (narrow_op) {
+@@ -2800,6 +2903,7 @@ static
+ IRAtom* vectorNarrowUnV128 ( MCEnv* mce, IROp narrow_op,
+ IRAtom* vatom1)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *at1, *at2;
+ IRAtom* (*pcast)( MCEnv*, IRAtom* );
+ tl_assert(isShadowAtom(mce,vatom1));
+@@ -2839,6 +2943,7 @@ static
+ IRAtom* vectorWidenI64 ( MCEnv* mce, IROp longen_op,
+ IRAtom* vatom1)
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *at1, *at2;
+ IRAtom* (*pcast)( MCEnv*, IRAtom* );
+ switch (longen_op) {
+@@ -2867,6 +2972,7 @@ IRAtom* vectorWidenI64 ( MCEnv* mce, IRO
+ static
+ IRAtom* binary8Ix32 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV256(mce, vatom1, vatom2);
+ at = mkPCast8x32(mce, at);
+@@ -2876,6 +2982,7 @@ IRAtom* binary8Ix32 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary16Ix16 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV256(mce, vatom1, vatom2);
+ at = mkPCast16x16(mce, at);
+@@ -2885,6 +2992,7 @@ IRAtom* binary16Ix16 ( MCEnv* mce, IRAto
+ static
+ IRAtom* binary32Ix8 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV256(mce, vatom1, vatom2);
+ at = mkPCast32x8(mce, at);
+@@ -2894,6 +3002,7 @@ IRAtom* binary32Ix8 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary64Ix4 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV256(mce, vatom1, vatom2);
+ at = mkPCast64x4(mce, at);
+@@ -2905,6 +3014,7 @@ IRAtom* binary64Ix4 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary8Ix16 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV128(mce, vatom1, vatom2);
+ at = mkPCast8x16(mce, at);
+@@ -2914,6 +3024,7 @@ IRAtom* binary8Ix16 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary16Ix8 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV128(mce, vatom1, vatom2);
+ at = mkPCast16x8(mce, at);
+@@ -2923,6 +3034,7 @@ IRAtom* binary16Ix8 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary32Ix4 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV128(mce, vatom1, vatom2);
+ at = mkPCast32x4(mce, at);
+@@ -2932,6 +3044,7 @@ IRAtom* binary32Ix4 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary64Ix2 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV128(mce, vatom1, vatom2);
+ at = mkPCast64x2(mce, at);
+@@ -2941,6 +3054,7 @@ IRAtom* binary64Ix2 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary128Ix1 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifUV128(mce, vatom1, vatom2);
+ at = mkPCast128x1(mce, at);
+@@ -2952,6 +3066,7 @@ IRAtom* binary128Ix1 ( MCEnv* mce, IRAto
+ static
+ IRAtom* binary8Ix8 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifU64(mce, vatom1, vatom2);
+ at = mkPCast8x8(mce, at);
+@@ -2961,6 +3076,7 @@ IRAtom* binary8Ix8 ( MCEnv* mce, IRAtom*
+ static
+ IRAtom* binary16Ix4 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifU64(mce, vatom1, vatom2);
+ at = mkPCast16x4(mce, at);
+@@ -2970,6 +3086,7 @@ IRAtom* binary16Ix4 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary32Ix2 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifU64(mce, vatom1, vatom2);
+ at = mkPCast32x2(mce, at);
+@@ -2979,6 +3096,7 @@ IRAtom* binary32Ix2 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary64Ix1 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifU64(mce, vatom1, vatom2);
+ at = mkPCastTo(mce, Ity_I64, at);
+@@ -2990,6 +3108,7 @@ IRAtom* binary64Ix1 ( MCEnv* mce, IRAtom
+ static
+ IRAtom* binary8Ix4 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifU32(mce, vatom1, vatom2);
+ at = mkPCast8x4(mce, at);
+@@ -2999,6 +3118,7 @@ IRAtom* binary8Ix4 ( MCEnv* mce, IRAtom*
+ static
+ IRAtom* binary16Ix2 ( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* at;
+ at = mkUifU32(mce, vatom1, vatom2);
+ at = mkPCast16x2(mce, at);
+@@ -3016,6 +3136,7 @@ IRAtom* expr2vbits_Qop ( MCEnv* mce,
+ IRAtom* atom1, IRAtom* atom2,
+ IRAtom* atom3, IRAtom* atom4 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* vatom1 = expr2vbits( mce, atom1, HuOth );
+ IRAtom* vatom2 = expr2vbits( mce, atom2, HuOth );
+ IRAtom* vatom3 = expr2vbits( mce, atom3, HuOth );
+@@ -3075,6 +3196,7 @@ IRAtom* expr2vbits_Triop ( MCEnv* mce,
+ IROp op,
+ IRAtom* atom1, IRAtom* atom2, IRAtom* atom3 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom* vatom1 = expr2vbits( mce, atom1, HuOth );
+ IRAtom* vatom2 = expr2vbits( mce, atom2, HuOth );
+ IRAtom* vatom3 = expr2vbits( mce, atom3, HuOth );
+@@ -3219,6 +3341,7 @@ IRAtom* expr2vbits_Binop ( MCEnv* mce,
+ IRAtom* atom1, IRAtom* atom2,
+ HowUsed hu/*use HuOth if unknown*/ )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType and_or_ty;
+ IRAtom* (*uifu) (MCEnv*, IRAtom*, IRAtom*);
+ IRAtom* (*difd) (MCEnv*, IRAtom*, IRAtom*);
+@@ -4648,6 +4771,7 @@ IRAtom* expr2vbits_Binop ( MCEnv* mce,
+ static
+ IRExpr* expr2vbits_Unop ( MCEnv* mce, IROp op, IRAtom* atom )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* For the widening operations {8,16,32}{U,S}to{16,32,64}, the
+ selection of shadow operation implicitly duplicates the logic in
+ do_shadow_LoadG and should be kept in sync (in the very unlikely
+@@ -5017,6 +5141,7 @@ IRAtom* expr2vbits_Load_WRK ( MCEnv* mce
+ IREndness end, IRType ty,
+ IRAtom* addr, UInt bias, IRAtom* guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce,addr));
+ tl_assert(end == Iend_LE || end == Iend_BE);
+
+@@ -5152,6 +5277,7 @@ IRAtom* expr2vbits_Load ( MCEnv* mce,
+ IRAtom* addr, UInt bias,
+ IRAtom* guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(end == Iend_LE || end == Iend_BE);
+ switch (shadowTypeV(ty)) {
+ case Ity_I8:
+@@ -5186,6 +5312,7 @@ IRAtom* expr2vbits_Load_guarded_General
+ IRAtom* guard,
+ IROp vwiden, IRAtom* valt )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* Sanity check the conversion operation, and also set TYWIDE. */
+ IRType tyWide = Ity_INVALID;
+ switch (vwiden) {
+@@ -5243,6 +5370,7 @@ IRAtom* expr2vbits_Load_guarded_Simple (
+ IRAtom* addr, UInt bias,
+ IRAtom *guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return expr2vbits_Load_guarded_General(
+ mce, end, ty, addr, bias, guard, Iop_INVALID, definedOfType(ty)
+ );
+@@ -5253,6 +5381,7 @@ static
+ IRAtom* expr2vbits_ITE ( MCEnv* mce,
+ IRAtom* cond, IRAtom* iftrue, IRAtom* iffalse )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *vbitsC, *vbits0, *vbits1;
+ IRType ty;
+ /* Given ITE(cond, iftrue, iffalse), generate
+@@ -5281,6 +5410,7 @@ static
+ IRExpr* expr2vbits ( MCEnv* mce, IRExpr* e,
+ HowUsed hu/*use HuOth if unknown*/ )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (e->tag) {
+
+ case Iex_Get:
+@@ -5356,6 +5486,7 @@ IRExpr* expr2vbits ( MCEnv* mce, IRExpr*
+ static
+ IRExpr* zwidenToHostWord ( MCEnv* mce, IRAtom* vatom )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType ty, tyH;
+
+ /* vatom is vbits-value and as such can only have a shadow type. */
+@@ -5416,6 +5547,7 @@ void do_shadow_Store ( MCEnv* mce,
+ IRAtom* data, IRAtom* vdata,
+ IRAtom* guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IROp mkAdd;
+ IRType ty, tyAddr;
+ void* helper = NULL;
+@@ -5666,6 +5798,7 @@ void do_shadow_Store ( MCEnv* mce,
+
+ static IRType szToITy ( Int n )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ switch (n) {
+ case 1: return Ity_I8;
+ case 2: return Ity_I16;
+@@ -5678,6 +5811,7 @@ static IRType szToITy ( Int n )
+ static
+ void do_shadow_Dirty ( MCEnv* mce, IRDirty* d )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i, k, n, toDo, gSz, gOff;
+ IRAtom *src, *here, *curr;
+ IRType tySrc, tyDst;
+@@ -5906,6 +6040,7 @@ void do_shadow_Dirty ( MCEnv* mce, IRDir
+ static
+ void do_AbiHint ( MCEnv* mce, IRExpr* base, Int len, IRExpr* nia )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRDirty* di;
+
+ if (MC_(clo_mc_level) == 3) {
+@@ -5982,6 +6117,7 @@ static void bind_shadow_tmp_to_orig ( UC
+ MCEnv* mce,
+ IRAtom* orig, IRAtom* shadow )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(isOriginalAtom(mce, orig));
+ tl_assert(isShadowAtom(mce, shadow));
+ switch (orig->tag) {
+@@ -6008,6 +6144,7 @@ static void bind_shadow_tmp_to_orig ( UC
+ static
+ void do_shadow_CAS ( MCEnv* mce, IRCAS* cas )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* Scheme is (both single- and double- cases):
+
+ 1. fetch data#,dataB (the proposed new value)
+@@ -6141,6 +6278,7 @@ void do_shadow_CAS ( MCEnv* mce, IRCAS*
+
+ static void do_shadow_CAS_single ( MCEnv* mce, IRCAS* cas )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *vdataLo = NULL, *bdataLo = NULL;
+ IRAtom *vexpdLo = NULL, *bexpdLo = NULL;
+ IRAtom *voldLo = NULL, *boldLo = NULL;
+@@ -6231,6 +6369,7 @@ static void do_shadow_CAS_single ( MCEnv
+
+ static void do_shadow_CAS_double ( MCEnv* mce, IRCAS* cas )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRAtom *vdataHi = NULL, *bdataHi = NULL;
+ IRAtom *vdataLo = NULL, *bdataLo = NULL;
+ IRAtom *vexpdHi = NULL, *bexpdHi = NULL;
+@@ -6399,6 +6538,7 @@ static void do_shadow_LLSC ( MCEnv* m
+ IRExpr* stAddr,
+ IRExpr* stStoredata )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* In short: treat a load-linked like a normal load followed by an
+ assignment of the loaded (shadow) data to the result temporary.
+ Treat a store-conditional like a normal store, and mark the
+@@ -6458,6 +6598,7 @@ static void do_shadow_LLSC ( MCEnv* m
+
+ static void do_shadow_StoreG ( MCEnv* mce, IRStoreG* sg )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ complainIfUndefined(mce, sg->guard, NULL);
+ /* do_shadow_Store will generate code to check the definedness and
+ validity of sg->addr, in the case where sg->guard evaluates to
+@@ -6471,6 +6612,7 @@ static void do_shadow_StoreG ( MCEnv* mc
+
+ static void do_shadow_LoadG ( MCEnv* mce, IRLoadG* lg )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ complainIfUndefined(mce, lg->guard, NULL);
+ /* expr2vbits_Load_guarded_General will generate code to check the
+ definedness and validity of lg->addr, in the case where
+@@ -6516,6 +6658,7 @@ static void do_shadow_LoadG ( MCEnv* mce
+ /* Almost identical to findShadowTmpV. */
+ static IRTemp findShadowTmpB ( MCEnv* mce, IRTemp orig )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ TempMapEnt* ent;
+ /* VG_(indexXA) range-checks 'orig', hence no need to check
+ here. */
+@@ -6536,6 +6679,7 @@ static IRTemp findShadowTmpB ( MCEnv* mc
+
+ static IRAtom* gen_maxU32 ( MCEnv* mce, IRAtom* b1, IRAtom* b2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return assignNew( 'B', mce, Ity_I32, binop(Iop_Max32U, b1, b2) );
+ }
+
+@@ -6552,6 +6696,7 @@ static IRAtom* gen_guarded_load_b ( MCEn
+ IRAtom* baseaddr,
+ Int offset, IRExpr* guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ void* hFun;
+ const HChar* hName;
+ IRTemp bTmp;
+@@ -6624,6 +6769,7 @@ static IRAtom* gen_guarded_load_b ( MCEn
+ static IRAtom* gen_load_b ( MCEnv* mce, Int szB, IRAtom* baseaddr,
+ Int offset )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ return gen_guarded_load_b(mce, szB, baseaddr, offset, NULL/*guard*/);
+ }
+
+@@ -6641,6 +6787,7 @@ IRAtom* expr2ori_Load_guarded_General (
+ IRAtom* addr, UInt bias,
+ IRAtom* guard, IRAtom* balt )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* If the guard evaluates to True, this will hold the loaded
+ origin. If the guard evaluates to False, this will be zero,
+ meaning "unknown origin", in which case we will have to replace
+@@ -6668,6 +6815,7 @@ static void gen_store_b ( MCEnv* mce, In
+ IRAtom* baseaddr, Int offset, IRAtom* dataB,
+ IRAtom* guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ void* hFun;
+ const HChar* hName;
+ IRDirty* di;
+@@ -6719,6 +6867,7 @@ static void gen_store_b ( MCEnv* mce, In
+ }
+
+ static IRAtom* narrowTo32 ( MCEnv* mce, IRAtom* e ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType eTy = typeOfIRExpr(mce->sb->tyenv, e);
+ if (eTy == Ity_I64)
+ return assignNew( 'B', mce, Ity_I32, unop(Iop_64to32, e) );
+@@ -6728,6 +6877,7 @@ static IRAtom* narrowTo32 ( MCEnv* mce,
+ }
+
+ static IRAtom* zWidenFrom32 ( MCEnv* mce, IRType dstTy, IRAtom* e ) {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType eTy = typeOfIRExpr(mce->sb->tyenv, e);
+ tl_assert(eTy == Ity_I32);
+ if (dstTy == Ity_I64)
+@@ -6738,6 +6888,7 @@ static IRAtom* zWidenFrom32 ( MCEnv* mce
+
+ static IRAtom* schemeE ( MCEnv* mce, IRExpr* e )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(MC_(clo_mc_level) == 3);
+
+ switch (e->tag) {
+@@ -6874,6 +7025,7 @@ static IRAtom* schemeE ( MCEnv* mce, IRE
+
+ static void do_origins_Dirty ( MCEnv* mce, IRDirty* d )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ // This is a hacked version of do_shadow_Dirty
+ Int i, k, n, toDo, gSz, gOff;
+ IRAtom *here, *curr;
+@@ -7084,6 +7236,7 @@ static void do_origins_Store_guarded ( M
+ IRExpr* stData,
+ IRExpr* guard )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int dszB;
+ IRAtom* dataB;
+ /* assert that the B value for the address is already available
+@@ -7103,6 +7256,7 @@ static void do_origins_Store_plain ( MCE
+ IRExpr* stAddr,
+ IRExpr* stData )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ do_origins_Store_guarded ( mce, stEnd, stAddr, stData,
+ NULL/*guard*/ );
+ }
+@@ -7112,12 +7266,14 @@ static void do_origins_Store_plain ( MCE
+
+ static void do_origins_StoreG ( MCEnv* mce, IRStoreG* sg )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ do_origins_Store_guarded( mce, sg->end, sg->addr,
+ sg->data, sg->guard );
+ }
+
+ static void do_origins_LoadG ( MCEnv* mce, IRLoadG* lg )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ IRType loadedTy = Ity_INVALID;
+ switch (lg->cvt) {
+ case ILGop_IdentV128: loadedTy = Ity_V128; break;
+@@ -7142,6 +7298,7 @@ static void do_origins_LoadG ( MCEnv* mc
+
+ static void schemeS ( MCEnv* mce, IRStmt* st )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ tl_assert(MC_(clo_mc_level) == 3);
+
+ switch (st->tag) {
+@@ -7347,6 +7504,7 @@ typedef
+
+ static Bool sameIRValue ( IRExpr* e1, IRExpr* e2 )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (e1->tag != e2->tag)
+ return False;
+ switch (e1->tag) {
+@@ -7394,6 +7552,7 @@ static Bool sameIRValue ( IRExpr* e1, IR
+ static
+ Bool check_or_add ( Pairs* tidyingEnv, IRExpr* guard, void* entry )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ UInt i, n = tidyingEnv->pairsUsed;
+ tl_assert(n <= N_TIDYING_PAIRS);
+ for (i = 0; i < n; i++) {
+@@ -7428,6 +7587,7 @@ Bool check_or_add ( Pairs* tidyingEnv, I
+
+ static Bool is_helperc_value_checkN_fail ( const HChar* name )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* This is expensive because it happens a lot. We are checking to
+ see whether |name| is one of the following 8 strings:
+
+@@ -7471,6 +7631,7 @@ static Bool is_helperc_value_checkN_fail
+
+ IRSB* MC_(final_tidy) ( IRSB* sb_in )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ IRStmt* st;
+ IRDirty* di;
+@@ -7526,6 +7687,7 @@ IRSB* MC_(final_tidy) ( IRSB* sb_in )
+
+ void MC_(do_instrumentation_startup_checks)( void )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* Make a best-effort check to see that is_helperc_value_checkN_fail
+ is working as we expect. */
+
+@@ -7582,6 +7744,7 @@ void MC_(do_instrumentation_startup_chec
+
+ static Bool isBogusAtom ( IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ if (at->tag == Iex_RdTmp)
+ return False;
+ tl_assert(at->tag == Iex_Const);
+@@ -7625,6 +7788,7 @@ static Bool isBogusAtom ( IRAtom* at )
+ isBogusAtom()? */
+ static inline Bool containsBogusLiterals ( /*FLAT*/ IRStmt* st )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Int i;
+ IRExpr* e;
+ IRDirty* d;
+@@ -7834,6 +7998,7 @@ static inline void noteTmpUsesIn ( /*MOD
+ UInt tyenvUsed,
+ HowUsed newUse, IRAtom* at )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ /* For the atom |at|, declare that for any tmp |t| in |at|, we will have
+ seen a use of |newUse|. So, merge that info into |t|'s accumulated
+ use info. */
+@@ -7862,6 +8027,7 @@ static void preInstrumentationAnalysis (
+ /*OUT*/Bool* hasBogusLiteralsP,
+ const IRSB* sb_in )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ const UInt nOrigTmps = (UInt)sb_in->tyenv->types_used;
+
+ // We've seen no bogus literals so far.
+@@ -8065,6 +8231,7 @@ IRSB* MC_(instrument) ( VgCallbackClosur
+ const VexArchInfo* archinfo_host,
+ IRType gWordTy, IRType hWordTy )
+ {
++VG_(debugLog)(2, "KR", "%s() %s:%d\n", __func__, __FILE__, __LINE__);
+ Bool verboze = 0||False;
+ Int i, j, first_stmt;
+ IRStmt* st;
diff --git a/valgrind-netbsd/patches/patch-memcheck_tests_vbit-test_util.c b/valgrind-netbsd/patches/patch-memcheck_tests_vbit-test_util.c
new file mode 100644
index 0000000000..cdd5ae271c
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_tests_vbit-test_util.c
@@ -0,0 +1,16 @@
+$NetBSD$
+
+--- memcheck/tests/vbit-test/util.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ memcheck/tests/vbit-test/util.c
+@@ -39,6 +39,11 @@
+ # else
+ # define __BYTE_ORDER __BIG_ENDIAN
+ # endif
++#elif defined(__NetBSD__)
++#include <endian.h>
++#define __BYTE_ORDER _BYTE_ORDER
++#define __LITTLE_ENDIAN _LITTLE_ENDIAN
++#define __BIG_ENDIAN _BIG_ENDIAN
+ #else
+ #include <endian.h>
+ #endif
diff --git a/valgrind-netbsd/patches/patch-memcheck_tests_vbit-test_vbits.c b/valgrind-netbsd/patches/patch-memcheck_tests_vbit-test_vbits.c
new file mode 100644
index 0000000000..42008ce963
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-memcheck_tests_vbit-test_vbits.c
@@ -0,0 +1,16 @@
+$NetBSD$
+
+--- memcheck/tests/vbit-test/vbits.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ memcheck/tests/vbit-test/vbits.c
+@@ -38,6 +38,11 @@
+ # else
+ # define __BYTE_ORDER __BIG_ENDIAN
+ # endif
++#elif defined(__NetBSD__)
++#include <endian.h>
++#define __BYTE_ORDER _BYTE_ORDER
++#define __LITTLE_ENDIAN _LITTLE_ENDIAN
++#define __BIG_ENDIAN _BIG_ENDIAN
+ #else
+ #include <endian.h>
+ #endif
diff --git a/valgrind-netbsd/patches/patch-netbsd.supp b/valgrind-netbsd/patches/patch-netbsd.supp
new file mode 100644
index 0000000000..fadc4a37ce
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-netbsd.supp
@@ -0,0 +1,6 @@
+$NetBSD$
+
+--- netbsd.supp.orig 2019-03-26 11:05:40.634402025 +0000
++++ netbsd.supp
+@@ -0,0 +1 @@
++# Suppressions for NetBSD
diff --git a/valgrind-netbsd/patches/patch-shared_vg__replace__strmem.c b/valgrind-netbsd/patches/patch-shared_vg__replace__strmem.c
new file mode 100644
index 0000000000..b66de6ec68
--- /dev/null
+++ b/valgrind-netbsd/patches/patch-shared_vg__replace__strmem.c
@@ -0,0 +1,17 @@
+$NetBSD$
+
+--- shared/vg_replace_strmem.c.orig 2018-05-05 07:42:22.000000000 +0000
++++ shared/vg_replace_strmem.c
+@@ -209,6 +209,12 @@ static inline void my_exit ( int x )
+ STRRCHR(NONE, __dl_strrchr); /* in /system/bin/linker */
+ #endif
+
++#elif defined(VGO_netbsd)
++// STRCHR(VG_Z_LIBC_SONAME, strchr)
++// STRCHR(VG_Z_LIBC_SONAME, index)
++// STRCHR(VG_Z_LD_ELF_SO_1, strchr)
++// STRCHR(VG_Z_LD_ELF32_SO_1, strchr)
++
+ #elif defined(VGO_darwin)
+ //STRRCHR(VG_Z_LIBC_SONAME, strrchr)
+ //STRRCHR(VG_Z_LIBC_SONAME, rindex)
Home |
Main Index |
Thread Index |
Old Index