tech-toolchain archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[RFC/WIP PATCH] [ld.elf_so] Support ELF files with number of segments != 2
Update the mapping code to support ELF files that do not follow strictly
the layout used by GNU ld on NetBSD. In particular, allow up to 3
segments, and do not rely on specific order.
The code is based on the solutions used by FreeBSD. However, I have
tried to keep the necessary changes to the bare minimum. Technically,
most of the code should work with any number of segments. However,
to avoid explicitly allocating memory it is currently bound to up to 3
segments which is the maximum exhibited by lld currently (one R+X
segment, one R+W segment and one R segment). If it ever becomes
necessary to support more segements, the limit can be trivially raised.
The code no longer assumes that the first segment would be code segment,
and the second one will be data. Instead, it iterates over the complete
list of segments and maps all of them. BSS is detected via comparing
size in file vs size in memory, the way FreeBSD does that. Code (text)
segment is determined via having the execution (PF_X) flag.
Currently, the code does not unmap the header as the segment data from
it is read afterwards to perform the segment mappings. Furthermore,
the code unmapping gaps between segments is not implemented. Once
I receive feedback about the correctness of the approach, I will look
into implementing the missing features.
---
libexec/ld.elf_so/map_object.c | 93 ++++++++++++++++++++--------------
1 file changed, 54 insertions(+), 39 deletions(-)
diff --git a/libexec/ld.elf_so/map_object.c b/libexec/ld.elf_so/map_object.c
index b878bfaa0fa3..cff756558b49 100644
--- a/libexec/ld.elf_so/map_object.c
+++ b/libexec/ld.elf_so/map_object.c
@@ -71,7 +71,7 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
#endif
size_t phsize;
Elf_Phdr *phlimit;
- Elf_Phdr *segs[2];
+ Elf_Phdr *segs[3];
int nsegs;
caddr_t mapbase = MAP_FAILED;
size_t mapsize = 0;
@@ -80,8 +80,6 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
Elf_Addr base_alignment;
Elf_Addr base_vaddr;
Elf_Addr base_vlimit;
- Elf_Addr text_vlimit;
- int text_flags;
caddr_t base_addr;
Elf_Off data_offset;
Elf_Addr data_vaddr;
@@ -93,14 +91,19 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
#endif
Elf_Addr phdr_vaddr;
size_t phdr_memsz;
+#if 0
caddr_t gap_addr;
size_t gap_size;
+#endif
int i;
#ifdef RTLD_LOADER
Elf_Addr clear_vaddr;
caddr_t clear_addr;
size_t nclear;
#endif
+ Elf_Addr bss_vaddr;
+ caddr_t bss_addr;
+ Elf_Addr bss_vlimit;
#ifdef GNU_RELRO
Elf_Addr relro_page;
size_t relro_size;
@@ -191,7 +194,7 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
break;
case PT_LOAD:
- if (nsegs < 2)
+ if (nsegs < 3)
segs[nsegs] = phdr;
++nsegs;
@@ -242,7 +245,7 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
_rtld_error("%s: not dynamically linked", path);
goto bad;
}
- if (nsegs != 2) {
+ if (nsegs < 0 || nsegs > 3) {
_rtld_error("%s: wrong number of segments (%d != 2)", path,
nsegs);
goto bad;
@@ -263,18 +266,8 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
base_alignment = segs[0]->p_align;
base_offset = round_down(segs[0]->p_offset);
base_vaddr = round_down(segs[0]->p_vaddr);
- base_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_memsz);
- text_vlimit = round_up(segs[0]->p_vaddr + segs[0]->p_memsz);
- text_flags = protflags(segs[0]->p_flags);
- data_offset = round_down(segs[1]->p_offset);
- data_vaddr = round_down(segs[1]->p_vaddr);
- data_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_filesz);
- data_flags = protflags(segs[1]->p_flags);
-#ifdef RTLD_LOADER
- clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
-#endif
+ base_vlimit = round_up(segs[nsegs-1]->p_vaddr + segs[nsegs-1]->p_memsz);
- obj->textsize = text_vlimit - base_vaddr;
obj->vaddrbase = base_vaddr;
obj->isdynamic = ehdr->e_type == ET_DYN;
@@ -322,11 +315,13 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
dbg(("%s: phdr %p phsize %zu (%s)", obj->path, obj->phdr, obj->phsize,
obj->phdr_loaded ? "loaded" : "allocated"));
+#if 0 /* TODO? */
/* Unmap header if it overlaps the first load section. */
if (base_offset < _rtld_pagesz) {
munmap(ehdr, _rtld_pagesz);
obj->ehdr = MAP_FAILED;
}
+#endif
/*
* Calculate log2 of the base section alignment.
@@ -345,7 +340,7 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
base_addr = NULL;
#endif
mapsize = base_vlimit - base_vaddr;
- mapbase = mmap(base_addr, mapsize, text_flags,
+ mapbase = mmap(base_addr, mapsize, PROT_NONE, // was: text_flags,
mapflags | MAP_FILE | MAP_PRIVATE, fd, base_offset);
if (mapbase == MAP_FAILED) {
_rtld_error("mmap of entire address space failed: %s",
@@ -353,23 +348,51 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
goto bad;
}
- /* Overlay the data segment onto the proper region. */
- data_addr = mapbase + (data_vaddr - base_vaddr);
- if (mmap(data_addr, data_vlimit - data_vaddr, data_flags,
- MAP_FILE | MAP_PRIVATE | MAP_FIXED, fd, data_offset) ==
- MAP_FAILED) {
- _rtld_error("mmap of data failed: %s", xstrerror(errno));
- goto bad;
- }
+ /* Overlay the individual segments onto the proper regions. */
+ for (i = 0; i < nsegs; i++) {
+ data_offset = round_down(segs[i]->p_offset);
+ data_vaddr = round_down(segs[i]->p_vaddr);
+ data_vlimit = round_up(segs[i]->p_vaddr + segs[i]->p_filesz);
+ data_flags = protflags(segs[i]->p_flags);
+
+ data_addr = mapbase + (data_vaddr - base_vaddr);
+ if (data_flags & PF_X) { /* text segment */
+ obj->textsize = data_vlimit - data_vaddr;
+ }
- /* Overlay the bss segment onto the proper region. */
- if (mmap(mapbase + data_vlimit - base_vaddr, base_vlimit - data_vlimit,
- data_flags, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0) ==
- MAP_FAILED) {
- _rtld_error("mmap of bss failed: %s", xstrerror(errno));
- goto bad;
+ if (mmap(data_addr, data_vlimit - data_vaddr, data_flags,
+ MAP_FILE | MAP_PRIVATE | MAP_FIXED, fd, data_offset) ==
+ MAP_FAILED) {
+ _rtld_error("mmap of data failed: %s", xstrerror(errno));
+ goto bad;
+ }
+
+ if (segs[i]->p_filesz != segs[i]->p_memsz) {
+#ifdef RTLD_LOADER
+ clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
+
+ /* Clear any BSS in the last page of the data segment. */
+ clear_addr = mapbase + (clear_vaddr - base_vaddr);
+ if ((nclear = data_vlimit - clear_vaddr) > 0)
+ memset(clear_addr, 0, nclear);
+#endif
+
+ /* Overlay the bss segment onto the proper region. */
+ bss_vaddr = data_vlimit;
+ bss_addr = mapbase + (bss_vaddr - base_vaddr);
+ bss_vlimit = round_up(segs[i]->p_vaddr + segs[i]->p_memsz);
+ if (bss_vlimit > bss_vaddr) {
+ if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_flags,
+ MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0) ==
+ MAP_FAILED) {
+ _rtld_error("mmap of bss failed: %s", xstrerror(errno));
+ goto bad;
+ }
+ }
+ }
}
+#if 0 /* TODO */
/* Unmap the gap between the text and data. */
gap_addr = mapbase + round_up(text_vlimit - base_vaddr);
gap_size = data_addr - gap_addr;
@@ -378,14 +401,6 @@ _rtld_map_object(const char *path, int fd, const struct stat *sb)
xstrerror(errno));
goto bad;
}
-
-#ifdef RTLD_LOADER
- /* Clear any BSS in the last page of the data segment. */
- clear_addr = mapbase + (clear_vaddr - base_vaddr);
- if ((nclear = data_vlimit - clear_vaddr) > 0)
- memset(clear_addr, 0, nclear);
-
- /* Non-file portion of BSS mapped above. */
#endif
#if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
--
2.20.1
Home |
Main Index |
Thread Index |
Old Index