Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/libexec/ld.elf_so PT_GNU_RELRO segments are arranged such th...
details: https://anonhg.NetBSD.org/src/rev/0273c8368fb0
branches: trunk
changeset: 969872:0273c8368fb0
user: thorpej <thorpej%NetBSD.org@localhost>
date: Wed Mar 04 01:21:17 2020 +0000
description:
PT_GNU_RELRO segments are arranged such that their vaddr + memsz ends
on a linker common page size boundary. However, if the common page size
used by the linker is less than the VM page size being used by the kernel,
this can end up in the middle of a VM page and when the region is write-
protected, this can cause objects in neighboring .data to get incorrectly
write-protected, resulting in a crash.
Avoid this situation by calculating the end of the RELRO region not by
rounding memsz up to the VM page size, but rather by adding vaddr + memsz
and then truncating to the VM page size.
Fixes PR toolchain/55043.
XXX pullup-9
diffstat:
libexec/ld.elf_so/headers.c | 10 +++++-----
libexec/ld.elf_so/map_object.c | 9 +++++----
libexec/ld.elf_so/rtld.c | 22 +++++++++++++++++-----
3 files changed, 27 insertions(+), 14 deletions(-)
diffs (108 lines):
diff -r 91e8eb0ce3e0 -r 0273c8368fb0 libexec/ld.elf_so/headers.c
--- a/libexec/ld.elf_so/headers.c Tue Mar 03 23:32:58 2020 +0000
+++ b/libexec/ld.elf_so/headers.c Wed Mar 04 01:21:17 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: headers.c,v 1.67 2020/02/29 18:53:55 kamil Exp $ */
+/* $NetBSD: headers.c,v 1.68 2020/03/04 01:21:17 thorpej Exp $ */
/*
* Copyright 1996 John D. Polstra.
@@ -40,7 +40,7 @@
#include <sys/cdefs.h>
#ifndef lint
-__RCSID("$NetBSD: headers.c,v 1.67 2020/02/29 18:53:55 kamil Exp $");
+__RCSID("$NetBSD: headers.c,v 1.68 2020/03/04 01:21:17 thorpej Exp $");
#endif /* not lint */
#include <err.h>
@@ -516,9 +516,9 @@
#ifdef GNU_RELRO
case PT_GNU_RELRO:
- obj->relro_page = obj->relocbase
- + round_down(ph->p_vaddr);
- obj->relro_size = round_up(ph->p_memsz);
+ /* rounding happens later. */
+ obj->relro_page = obj->relocbase + ph->p_vaddr;
+ obj->relro_size = ph->p_memsz;
dbg(("headers: %s %p phsize %" PRImemsz,
"PT_GNU_RELRO", (void *)(uintptr_t)vaddr,
ph->p_memsz));
diff -r 91e8eb0ce3e0 -r 0273c8368fb0 libexec/ld.elf_so/map_object.c
--- a/libexec/ld.elf_so/map_object.c Tue Mar 03 23:32:58 2020 +0000
+++ b/libexec/ld.elf_so/map_object.c Wed Mar 04 01:21:17 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: map_object.c,v 1.60 2019/01/06 19:44:54 joerg Exp $ */
+/* $NetBSD: map_object.c,v 1.61 2020/03/04 01:21:17 thorpej Exp $ */
/*
* Copyright 1996 John D. Polstra.
@@ -34,7 +34,7 @@
#include <sys/cdefs.h>
#ifndef lint
-__RCSID("$NetBSD: map_object.c,v 1.60 2019/01/06 19:44:54 joerg Exp $");
+__RCSID("$NetBSD: map_object.c,v 1.61 2020/03/04 01:21:17 thorpej Exp $");
#endif /* not lint */
#include <errno.h>
@@ -406,8 +406,9 @@
obj->relocbase = mapbase - base_vaddr;
#ifdef GNU_RELRO
- obj->relro_page = obj->relocbase + round_down(relro_page);
- obj->relro_size = round_up(relro_size);
+ /* rounding happens later. */
+ obj->relro_page = obj->relocbase + relro_page;
+ obj->relro_size = relro_size;
#endif
if (obj->dynamic)
diff -r 91e8eb0ce3e0 -r 0273c8368fb0 libexec/ld.elf_so/rtld.c
--- a/libexec/ld.elf_so/rtld.c Tue Mar 03 23:32:58 2020 +0000
+++ b/libexec/ld.elf_so/rtld.c Wed Mar 04 01:21:17 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: rtld.c,v 1.202 2020/02/29 04:23:05 kamil Exp $ */
+/* $NetBSD: rtld.c,v 1.203 2020/03/04 01:21:17 thorpej Exp $ */
/*
* Copyright 1996 John D. Polstra.
@@ -40,7 +40,7 @@
#include <sys/cdefs.h>
#ifndef lint
-__RCSID("$NetBSD: rtld.c,v 1.202 2020/02/29 04:23:05 kamil Exp $");
+__RCSID("$NetBSD: rtld.c,v 1.203 2020/03/04 01:21:17 thorpej Exp $");
#endif /* not lint */
#include <sys/param.h>
@@ -1773,13 +1773,25 @@
_rtld_relro(const Obj_Entry *obj, bool wantmain)
{
#ifdef GNU_RELRO
- if (obj->relro_size == 0)
+ /*
+ * If our VM page size is larger than the page size used by the
+ * linker when laying out the object, we could end up making data
+ * read-only that is unintended. Detect and avoid this situation.
+ * It may mean we are unable to protect everything we'd like, but
+ * it's better than crashing.
+ */
+ uintptr_t relro_end = (uintptr_t)obj->relro_page + obj->relro_size;
+ uintptr_t relro_start = round_down((uintptr_t)obj->relro_page);
+ assert(relro_end >= relro_start);
+ size_t relro_size = round_down(relro_end) - relro_start;
+
+ if (relro_size == 0)
return 0;
if (wantmain != (obj ==_rtld_objmain))
return 0;
- dbg(("RELRO %s %p %zx\n", obj->path, obj->relro_page, obj->relro_size));
- if (mprotect(obj->relro_page, obj->relro_size, PROT_READ) == -1) {
+ dbg(("RELRO %s %p %zx\n", obj->path, (void *)relro_start, relro_size));
+ if (mprotect((void *)relro_start, relro_size, PROT_READ) == -1) {
_rtld_error("%s: Cannot enforce relro " "protection: %s",
obj->path, xstrerror(errno));
return -1;
Home |
Main Index |
Thread Index |
Old Index