Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys Several changes and improvements in KMEM_GUARD:
details: https://anonhg.NetBSD.org/src/rev/a4d59be3fad0
branches: trunk
changeset: 809677:a4d59be3fad0
user: maxv <maxv%NetBSD.org@localhost>
date: Mon Jul 27 09:24:28 2015 +0000
description:
Several changes and improvements in KMEM_GUARD:
- merge uvm_kmguard.{c,h} into subr_kmem.c. It is only user there, and
makes it more consistent. Also, it allows us to enable KMEM_GUARD
without enabling DEBUG.
- rename uvm_kmguard_XXX to kmem_guard_XXX, for consistency
- improve kmem_guard_alloc() so that it supports allocations bigger than
PAGE_SIZE
- remove the canary value, and use directly the kmem header as underflow
pattern.
- fix some comments
(The UAF fifo is disabled for the moment; we actually need to register
the va and its size, and add a weight support not to consume too much
memory.)
diffstat:
sys/kern/subr_kmem.c | 240 ++++++++++++++++++++++++++++++++++++++++++-------
sys/uvm/files.uvm | 3 +-
sys/uvm/uvm_kmguard.c | 197 -----------------------------------------
sys/uvm/uvm_kmguard.h | 46 ---------
4 files changed, 204 insertions(+), 282 deletions(-)
diffs (truncated from 622 to 300 lines):
diff -r 34516916ef7a -r a4d59be3fad0 sys/kern/subr_kmem.c
--- a/sys/kern/subr_kmem.c Mon Jul 27 07:53:46 2015 +0000
+++ b/sys/kern/subr_kmem.c Mon Jul 27 09:24:28 2015 +0000
@@ -1,11 +1,11 @@
-/* $NetBSD: subr_kmem.c,v 1.60 2014/07/22 07:38:41 maxv Exp $ */
+/* $NetBSD: subr_kmem.c,v 1.61 2015/07/27 09:24:28 maxv Exp $ */
/*-
- * Copyright (c) 2009 The NetBSD Foundation, Inc.
+ * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
- * by Andrew Doran.
+ * by Andrew Doran and Maxime Villard.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -87,10 +87,10 @@
* Check the pattern on allocation.
*
* KMEM_GUARD
- * A kernel with "option DEBUG" has "kmguard" debugging feature compiled
- * in. See the comment in uvm/uvm_kmguard.c for what kind of bugs it tries
- * to detect. Even if compiled in, it's disabled by default because it's
- * very expensive. You can enable it on boot by:
+ * A kernel with "option DEBUG" has "kmem_guard" debugging feature compiled
+ * in. See the comment below for what kind of bugs it tries to detect. Even
+ * if compiled in, it's disabled by default because it's very expensive.
+ * You can enable it on boot by:
* boot -d
* db> w kmem_guard_depth 0t30000
* db> c
@@ -100,7 +100,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.60 2014/07/22 07:38:41 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.61 2015/07/27 09:24:28 maxv Exp $");
#include <sys/param.h>
#include <sys/callback.h>
@@ -112,7 +112,6 @@
#include <uvm/uvm_extern.h>
#include <uvm/uvm_map.h>
-#include <uvm/uvm_kmguard.h>
#include <lib/libkern/libkern.h>
@@ -182,8 +181,10 @@
#endif /* defined(DIAGNOSTIC) */
#if defined(DEBUG) && defined(_HARDKERNEL)
+#define KMEM_SIZE
#define KMEM_POISON
#define KMEM_GUARD
+static void *kmem_freecheck;
#endif /* defined(DEBUG) */
#if defined(KMEM_POISON)
@@ -222,10 +223,20 @@
#ifndef KMEM_GUARD_DEPTH
#define KMEM_GUARD_DEPTH 0
#endif
+struct kmem_guard {
+ u_int kg_depth;
+ intptr_t * kg_fifo;
+ u_int kg_rotor;
+ vmem_t * kg_vmem;
+};
+
+static bool kmem_guard_init(struct kmem_guard *, u_int, vmem_t *);
+static void *kmem_guard_alloc(struct kmem_guard *, size_t, bool);
+static void kmem_guard_free(struct kmem_guard *, size_t, void *);
+
int kmem_guard_depth = KMEM_GUARD_DEPTH;
-size_t kmem_guard_size;
-static struct uvm_kmguard kmem_guard;
-static void *kmem_freecheck;
+static bool kmem_guard_enabled;
+static struct kmem_guard kmem_guard;
#endif /* defined(KMEM_GUARD) */
CTASSERT(KM_SLEEP == PR_WAITOK);
@@ -246,8 +257,8 @@
KASSERT(requested_size > 0);
#ifdef KMEM_GUARD
- if (requested_size <= kmem_guard_size) {
- return uvm_kmguard_alloc(&kmem_guard, requested_size,
+ if (kmem_guard_enabled) {
+ return kmem_guard_alloc(&kmem_guard, requested_size,
(kmflags & KM_SLEEP) != 0);
}
#endif
@@ -324,8 +335,8 @@
KASSERT(requested_size > 0);
#ifdef KMEM_GUARD
- if (requested_size <= kmem_guard_size) {
- uvm_kmguard_free(&kmem_guard, requested_size, p);
+ if (kmem_guard_enabled) {
+ kmem_guard_free(&kmem_guard, requested_size, p);
return;
}
#endif
@@ -372,7 +383,6 @@
void *
kmem_alloc(size_t size, km_flag_t kmflags)
{
-
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
"kmem(9) should not be used from the interrupt context");
return kmem_intr_alloc(size, kmflags);
@@ -386,7 +396,6 @@
void *
kmem_zalloc(size_t size, km_flag_t kmflags)
{
-
KASSERTMSG((!cpu_intr_p() && !cpu_softintr_p()),
"kmem(9) should not be used from the interrupt context");
return kmem_intr_zalloc(size, kmflags);
@@ -400,7 +409,6 @@
void
kmem_free(void *p, size_t size)
{
-
KASSERT(!cpu_intr_p());
KASSERT(!cpu_softintr_p());
kmem_intr_free(p, size);
@@ -466,9 +474,8 @@
void
kmem_init(void)
{
-
#ifdef KMEM_GUARD
- uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
+ kmem_guard_enabled = kmem_guard_init(&kmem_guard, kmem_guard_depth,
kmem_va_arena);
#endif
kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
@@ -480,8 +487,32 @@
size_t
kmem_roundup_size(size_t size)
{
+ return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
+}
- return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
+/*
+ * Used to dynamically allocate string with kmem accordingly to format.
+ */
+char *
+kmem_asprintf(const char *fmt, ...)
+{
+ int size __diagused, len;
+ va_list va;
+ char *str;
+
+ va_start(va, fmt);
+ len = vsnprintf(NULL, 0, fmt, va);
+ va_end(va);
+
+ str = kmem_alloc(len + 1, KM_SLEEP);
+
+ va_start(va, fmt);
+ size = vsnprintf(str, len + 1, fmt, va);
+ va_end(va);
+
+ KASSERT(size == len);
+
+ return str;
}
/* ------------------ DEBUG / DIAGNOSTIC ------------------ */
@@ -626,27 +657,162 @@
#endif /* defined(KMEM_REDZONE) */
+#if defined(KMEM_GUARD)
/*
- * Used to dynamically allocate string with kmem accordingly to format.
+ * The ultimate memory allocator for debugging, baby. It tries to catch:
+ *
+ * 1. Overflow, in realtime. A guard page sits immediately after the
+ * requested area; a read/write overflow therefore triggers a page
+ * fault.
+ * 2. Invalid pointer/size passed, at free. A kmem_header structure sits
+ * just before the requested area, and holds the allocated size. Any
+ * difference with what is given at free triggers a panic.
+ * 3. Underflow, at free. If an underflow occurs, the kmem header will be
+ * modified, and 2. will trigger a panic.
+ * 4. Use-after-free. When freeing, the memory is unmapped, and depending
+ * on the value of kmem_guard_depth, the kernel will more or less delay
+ * the recycling of that memory. Which means that any ulterior read/write
+ * access to the memory will trigger a page fault, given it hasn't been
+ * recycled yet.
*/
-char *
-kmem_asprintf(const char *fmt, ...)
+
+#include <sys/atomic.h>
+#include <uvm/uvm.h>
+
+static bool
+kmem_guard_init(struct kmem_guard *kg, u_int depth, vmem_t *vm)
{
- int size __diagused, len;
- va_list va;
- char *str;
+ vaddr_t va;
+
+ /* If not enabled, we have nothing to do. */
+ if (depth == 0) {
+ return false;
+ }
+ depth = roundup(depth, PAGE_SIZE / sizeof(void *));
+ KASSERT(depth != 0);
+
+ /*
+ * Allocate fifo.
+ */
+ va = uvm_km_alloc(kernel_map, depth * sizeof(void *), PAGE_SIZE,
+ UVM_KMF_WIRED | UVM_KMF_ZERO);
+ if (va == 0) {
+ return false;
+ }
+
+ /*
+ * Init object.
+ */
+ kg->kg_vmem = vm;
+ kg->kg_fifo = (void *)va;
+ kg->kg_depth = depth;
+ kg->kg_rotor = 0;
+
+ printf("kmem_guard(%p): depth %d\n", kg, depth);
+ return true;
+}
+
+static void *
+kmem_guard_alloc(struct kmem_guard *kg, size_t requested_size, bool waitok)
+{
+ struct vm_page *pg;
+ vm_flag_t flags;
+ vmem_addr_t va;
+ vaddr_t loopva;
+ vsize_t loopsize;
+ size_t size;
+ void **p;
+
+ /*
+ * Compute the size: take the kmem header into account, and add a guard
+ * page at the end.
+ */
+ size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
+
+ /* Allocate pages of kernel VA, but do not map anything in yet. */
+ flags = VM_BESTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP);
+ if (vmem_alloc(kg->kg_vmem, size, flags, &va) != 0) {
+ return NULL;
+ }
- va_start(va, fmt);
- len = vsnprintf(NULL, 0, fmt, va);
- va_end(va);
+ loopva = va;
+ loopsize = size - PAGE_SIZE;
+
+ while (loopsize) {
+ pg = uvm_pagealloc(NULL, loopva, NULL, 0);
+ if (__predict_false(pg == NULL)) {
+ if (waitok) {
+ uvm_wait("kmem_guard");
+ continue;
+ } else {
+ uvm_km_pgremove_intrsafe(kernel_map, va,
+ va + size);
+ vmem_free(kg->kg_vmem, va, size);
+ return NULL;
+ }
+ }
- str = kmem_alloc(len + 1, KM_SLEEP);
+ pg->flags &= ~PG_BUSY; /* new page */
+ UVM_PAGE_OWN(pg, NULL);
+ pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
+ VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
+
+ loopva += PAGE_SIZE;
+ loopsize -= PAGE_SIZE;
+ }
+
+ pmap_update(pmap_kernel());
+
+ /*
+ * Offset the returned pointer so that the unmapped guard page sits
+ * immediately after the returned object.
+ */
Home |
Main Index |
Thread Index |
Old Index