Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/sys Add support for kASan on amd64. Written by me, with ...
details: https://anonhg.NetBSD.org/src/rev/5e7d982ae227
branches: trunk
changeset: 992241:5e7d982ae227
user: maxv <maxv%NetBSD.org@localhost>
date: Mon Aug 20 15:04:51 2018 +0000
description:
Add support for kASan on amd64. Written by me, with some parts inspired
from Siddharth Muralee's initial work. This feature can detect several
kinds of memory bugs, and it's an excellent feature.
It can be enabled by uncommenting these three lines in GENERIC:
#makeoptions KASAN=1 # Kernel Address Sanitizer
#options KASAN
#no options SVS
The kernel is compiled without SVS, without DMAP and without PCPU area.
A shadow area is created at boot time, and it can cover the upper 128TB
of the address space. This area is populated gradually as we allocate
memory. With this design the memory consumption is kept at its lowest
level.
The compiler calls the __asan_* functions each time a memory access is
done. We verify whether this access is legal by looking at the shadow
area.
We declare our own special memcpy/memset/etc functions, because the
compiler's builtins don't add the __asan_* instrumentation.
Initially all the mappings are marked as valid. During dynamic
allocations, we add a redzone, which we mark as invalid. Any access on
it will trigger a kASan error message. Additionally, the compiler adds
a redzone on global variables, and we mark these redzones as invalid too.
The illegal-access detection works with a 1-byte granularity.
For now, we cover three areas:
- global variables
- kmem_alloc-ated areas
- malloc-ated areas
More will come, but that's a good start.
diffstat:
sys/arch/amd64/amd64/asan.c | 591 +++++++++++++++++++++++++++++++++++++
sys/arch/amd64/amd64/machdep.c | 19 +-
sys/arch/amd64/conf/GENERIC | 9 +-
sys/arch/amd64/conf/Makefile.amd64 | 7 +-
sys/arch/amd64/conf/files.amd64 | 3 +-
sys/arch/amd64/conf/kern.ldscript | 6 +-
sys/arch/amd64/include/pmap.h | 8 +-
sys/arch/amd64/include/types.h | 5 +-
sys/arch/x86/include/pmap.h | 9 +-
sys/arch/x86/x86/pmap.c | 19 +-
sys/conf/files | 3 +-
sys/kern/kern_malloc.c | 25 +-
sys/kern/subr_kmem.c | 28 +-
sys/lib/libkern/libkern.h | 14 +-
sys/sys/Makefile | 5 +-
sys/sys/asan.h | 41 ++
16 files changed, 764 insertions(+), 28 deletions(-)
diffs (truncated from 1163 to 300 lines):
diff -r b2de2fa0c249 -r 5e7d982ae227 sys/arch/amd64/amd64/asan.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sys/arch/amd64/amd64/asan.c Mon Aug 20 15:04:51 2018 +0000
@@ -0,0 +1,591 @@
+/* $NetBSD: asan.c,v 1.1 2018/08/20 15:04:51 maxv Exp $ */
+
+/*
+ * Copyright (c) 2018 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Maxime Villard, and Siddharth Muralee.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: asan.c,v 1.1 2018/08/20 15:04:51 maxv Exp $");
+
+#include <sys/param.h>
+#include <sys/device.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/asan.h>
+
+#include <uvm/uvm.h>
+#include <amd64/pmap.h>
+#include <amd64/vmparam.h>
+
+#define VIRTUAL_SHIFT 47 /* 48bit address space, cut half */
+#define CANONICAL_BASE 0xFFFF800000000000
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
+
+#define KASAN_SHADOW_SIZE (1ULL << (VIRTUAL_SHIFT - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_START (VA_SIGN_NEG((L4_SLOT_KASAN * NBPD_L4)))
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+#define __RET_ADDR (unsigned long)__builtin_return_address(0)
+
+void kasan_shadow_map(void *, size_t);
+void kasan_init(void);
+
+static bool kasan_enabled __read_mostly = false;
+
+static inline int8_t *kasan_addr_to_shad(const void *addr)
+{
+ vaddr_t va = (vaddr_t)addr;
+ return (int8_t *)(KASAN_SHADOW_START +
+ ((va - CANONICAL_BASE) >> KASAN_SHADOW_SCALE_SHIFT));
+}
+
+static __always_inline bool
+kasan_unsupported(vaddr_t addr)
+{
+ return (addr >= (vaddr_t)PTE_BASE &&
+ addr < ((vaddr_t)PTE_BASE + NBPD_L4));
+}
+
+static void
+kasan_shadow_map_page(vaddr_t va)
+{
+ paddr_t pa;
+
+ if (!pmap_valid_entry(L4_BASE[pl4_i(va)])) {
+ pa = pmap_get_physpage();
+ L4_BASE[pl4_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
+ }
+ if (!pmap_valid_entry(L3_BASE[pl3_i(va)])) {
+ pa = pmap_get_physpage();
+ L3_BASE[pl3_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
+ }
+ if (!pmap_valid_entry(L2_BASE[pl2_i(va)])) {
+ pa = pmap_get_physpage();
+ L2_BASE[pl2_i(va)] = pa | PG_KW | pmap_pg_nx | PG_V;
+ }
+ if (!pmap_valid_entry(L1_BASE[pl1_i(va)])) {
+ pa = pmap_get_physpage();
+ L1_BASE[pl1_i(va)] = pa | PG_KW | pmap_pg_g | pmap_pg_nx | PG_V;
+ }
+}
+
+/*
+ * Allocate the necessary stuff in the shadow, so that we can monitor the
+ * passed area.
+ */
+void
+kasan_shadow_map(void *addr, size_t size)
+{
+ size_t sz, npages, i;
+ vaddr_t va;
+
+ va = (vaddr_t)kasan_addr_to_shad(addr);
+ sz = roundup(size, KASAN_SHADOW_SCALE_SIZE) / KASAN_SHADOW_SCALE_SIZE;
+ va = rounddown(va, PAGE_SIZE);
+ npages = roundup(sz, PAGE_SIZE) / PAGE_SIZE;
+
+ KASSERT(va >= KASAN_SHADOW_START && va < KASAN_SHADOW_END);
+
+ for (i = 0; i < npages; i++) {
+ kasan_shadow_map_page(va + i * PAGE_SIZE);
+ }
+}
+
+/* -------------------------------------------------------------------------- */
+
+#ifdef __HAVE_PCPU_AREA
+#error "PCPU area not allowed with KASAN"
+#endif
+#ifdef __HAVE_DIRECT_MAP
+#error "DMAP not allowed with KASAN"
+#endif
+
+static void
+kasan_ctors(void)
+{
+ extern uint64_t __CTOR_LIST__, __CTOR_END__;
+ size_t nentries, i;
+ uint64_t *ptr;
+
+ nentries = ((size_t)&__CTOR_END__ - (size_t)&__CTOR_LIST__) /
+ sizeof(uintptr_t);
+
+ ptr = &__CTOR_LIST__;
+ for (i = 0; i < nentries; i++) {
+ void (*func)(void);
+
+ func = (void *)(*ptr);
+ (*func)();
+
+ ptr++;
+ }
+}
+
+/*
+ * Create the shadow mapping. We don't create the 'User' area, because we
+ * exclude it from the monitoring. The 'Main' area is created dynamically
+ * in pmap_growkernel.
+ */
+void
+kasan_init(void)
+{
+ extern struct bootspace bootspace;
+ size_t i;
+
+ CTASSERT((KASAN_SHADOW_SIZE / NBPD_L4) == NL4_SLOT_KASAN);
+
+ /* Kernel. */
+ for (i = 0; i < BTSPACE_NSEGS; i++) {
+ if (bootspace.segs[i].type == BTSEG_NONE) {
+ continue;
+ }
+ kasan_shadow_map((void *)bootspace.segs[i].va,
+ bootspace.segs[i].sz);
+ }
+
+ /* Boot region. */
+ kasan_shadow_map((void *)bootspace.boot.va, bootspace.boot.sz);
+
+ /* Module map. */
+ kasan_shadow_map((void *)bootspace.smodule,
+ (size_t)(bootspace.emodule - bootspace.smodule));
+
+ /* The bootstrap spare va. */
+ kasan_shadow_map((void *)bootspace.spareva, PAGE_SIZE);
+
+ kasan_enabled = true;
+
+ /* Call the ASAN constructors. */
+ kasan_ctors();
+}
+
+/* -------------------------------------------------------------------------- */
+
+static void
+kasan_report(unsigned long addr, size_t size, bool write, unsigned long rip)
+{
+ printf("kASan: Unauthorized Access In %p: Addr %p [%zu byte%s, %s]\n",
+ (void *)rip, (void *)addr, size, (size > 1 ? "s" : ""),
+ (write ? "write" : "read"));
+}
+
+/* -------------------------------------------------------------------------- */
+
+/* Our redzone values. */
+#define KASAN_GLOBAL_REDZONE 0xFA
+#define KASAN_MEMORY_REDZONE 0xFB
+
+/* Stack redzone shadow values. Part of the compiler ABI. */
+#define KASAN_STACK_LEFT 0xF1
+#define KASAN_STACK_MID 0xF2
+#define KASAN_STACK_RIGHT 0xF3
+#define KASAN_STACK_PARTIAL 0xF4
+#define KASAN_USE_AFTER_SCOPE 0xF8
+
+static void
+kasan_shadow_fill(const void *addr, size_t size, uint8_t val)
+{
+ void *shad;
+
+ if (__predict_false(!kasan_enabled))
+ return;
+ if (__predict_false(size == 0))
+ return;
+ if (__predict_false(kasan_unsupported((vaddr_t)addr)))
+ return;
+
+ KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
+ KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0);
+
+ shad = (void *)kasan_addr_to_shad(addr);
+ size = size >> KASAN_SHADOW_SCALE_SHIFT;
+
+ __builtin_memset(shad, val, size);
+}
+
+static __always_inline void
+kasan_shadow_1byte_markvalid(unsigned long addr)
+{
+ int8_t *byte = kasan_addr_to_shad((void *)addr);
+ int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
+
+ *byte = last;
+}
+
+void
+kasan_add_redzone(size_t *size)
+{
+ *size = roundup(*size, KASAN_SHADOW_SCALE_SIZE);
+ *size += KASAN_SHADOW_SCALE_SIZE;
+}
+
+static void
+kasan_markmem(const void *addr, size_t size, bool valid)
+{
+ size_t i;
+
+ KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
+
+ if (valid) {
+ for (i = 0; i < size; i++) {
+ kasan_shadow_1byte_markvalid((unsigned long)addr+i);
+ }
+ } else {
+ KASSERT(size % KASAN_SHADOW_SCALE_SIZE == 0);
+ kasan_shadow_fill(addr, size, KASAN_MEMORY_REDZONE);
+ }
+}
+
+void
+kasan_alloc(const void *addr, size_t size, size_t sz_with_redz)
+{
+ kasan_markmem(addr, sz_with_redz, false);
+ kasan_markmem(addr, size, true);
+}
+
+void
+kasan_free(const void *addr, size_t sz_with_redz)
+{
+ kasan_markmem(addr, sz_with_redz, true);
+}
+
+/* -------------------------------------------------------------------------- */
+
+#define ADDR_CROSSES_SCALE_BOUNDARY(addr, size) \
+ (addr >> KASAN_SHADOW_SCALE_SHIFT) != \
+ ((addr + size - 1) >> KASAN_SHADOW_SCALE_SHIFT)
+
+static __always_inline bool
+kasan_shadow_1byte_isvalid(unsigned long addr)
+{
+ int8_t *byte = kasan_addr_to_shad((void *)addr);
+ int8_t last = (addr & KASAN_SHADOW_MASK) + 1;
+
+ return __predict_true(*byte == 0 || last <= *byte);
Home |
Main Index |
Thread Index |
Old Index