From: rcombs <rcombs@rcombs.me>
To: musl@lists.openwall.com
Subject: [musl] [PATCH 3/4] ldso: move (un)map_library functions to separate file
Date: Sat, 28 Mar 2020 19:19:27 -0500 [thread overview]
Message-ID: <1585441168-23444-3-git-send-email-rcombs@rcombs.me> (raw)
In-Reply-To: <1585441168-23444-1-git-send-email-rcombs@rcombs.me>
---
ldso/dynlink.c | 270 +--------------------------------------------------
ldso/map_library.h | 276 +++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 278 insertions(+), 268 deletions(-)
create mode 100644 ldso/map_library.h
diff --git a/ldso/dynlink.c b/ldso/dynlink.c
index 5f637fd..0e557b1 100644
--- a/ldso/dynlink.c
+++ b/ldso/dynlink.c
@@ -154,6 +154,8 @@ extern hidden void (*const __init_array_end)(void), (*const __fini_array_end)(vo
weak_alias(__init_array_start, __init_array_end);
weak_alias(__fini_array_start, __fini_array_end);
+#include "map_library.h"
+
static int dl_strcmp(const char *l, const char *r)
{
for (; *l==*r && *l; l++, r++);
@@ -161,44 +163,6 @@ static int dl_strcmp(const char *l, const char *r)
}
#define strcmp(l,r) dl_strcmp(l,r)
-/* Compute load address for a virtual address in a given dso. */
-#if DL_FDPIC
-static void *laddr(const struct dso *p, size_t v)
-{
- size_t j=0;
- if (!p->loadmap) return p->base + v;
- for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++);
- return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
-}
-static void *laddr_pg(const struct dso *p, size_t v)
-{
- size_t j=0;
- size_t pgsz = PAGE_SIZE;
- if (!p->loadmap) return p->base + v;
- for (j=0; ; j++) {
- size_t a = p->loadmap->segs[j].p_vaddr;
- size_t b = a + p->loadmap->segs[j].p_memsz;
- a &= -pgsz;
- b += pgsz-1;
- b &= -pgsz;
- if (v-a<b-a) break;
- }
- return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
-}
-static void (*fdbarrier(void *p))()
-{
- void (*fd)();
- __asm__("" : "=r"(fd) : "0"(p));
- return fd;
-}
-#define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \
- laddr(p, v), (p)->got }))
-#else
-#define laddr(p, v) (void *)((p)->base + (v))
-#define laddr_pg(p, v) laddr(p, v)
-#define fpaddr(p, v) ((void (*)())laddr(p, v))
-#endif
-
static void decode_vec(size_t *v, size_t *a, size_t cnt)
{
size_t i;
@@ -557,236 +521,6 @@ static void reclaim_gaps(struct dso *dso)
}
}
-static void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
-{
- static int no_map_fixed;
- char *q;
- if (!no_map_fixed) {
- q = mmap(p, n, prot, flags|MAP_FIXED, fd, off);
- if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL)
- return q;
- no_map_fixed = 1;
- }
- /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
- if (flags & MAP_ANONYMOUS) {
- memset(p, 0, n);
- return p;
- }
- ssize_t r;
- if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED;
- for (q=p; n; q+=r, off+=r, n-=r) {
- r = read(fd, q, n);
- if (r < 0 && errno != EINTR) return MAP_FAILED;
- if (!r) {
- memset(q, 0, n);
- break;
- }
- }
- return p;
-}
-
-static void unmap_library(struct dso *dso)
-{
- if (dso->loadmap) {
- size_t i;
- for (i=0; i<dso->loadmap->nsegs; i++) {
- if (!dso->loadmap->segs[i].p_memsz)
- continue;
- munmap((void *)dso->loadmap->segs[i].addr,
- dso->loadmap->segs[i].p_memsz);
- }
- free(dso->loadmap);
- } else if (dso->map && dso->map_len) {
- munmap(dso->map, dso->map_len);
- }
-}
-
-static void *map_library(int fd, struct dso *dso)
-{
- Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
- void *allocated_buf=0;
- size_t phsize;
- size_t addr_min=SIZE_MAX, addr_max=0, map_len;
- size_t this_min, this_max;
- size_t nsegs = 0;
- off_t off_start;
- Ehdr *eh;
- Phdr *ph, *ph0;
- unsigned prot;
- unsigned char *map=MAP_FAILED, *base;
- size_t dyn=0;
- size_t tls_image=0;
- size_t i;
-
- ssize_t l = read(fd, buf, sizeof buf);
- eh = buf;
- if (l<0) return 0;
- if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC))
- goto noexec;
- phsize = eh->e_phentsize * eh->e_phnum;
- if (phsize > sizeof buf - sizeof *eh) {
- allocated_buf = malloc(phsize);
- if (!allocated_buf) return 0;
- l = pread(fd, allocated_buf, phsize, eh->e_phoff);
- if (l < 0) goto error;
- if (l != phsize) goto noexec;
- ph = ph0 = allocated_buf;
- } else if (eh->e_phoff + phsize > l) {
- l = pread(fd, buf+1, phsize, eh->e_phoff);
- if (l < 0) goto error;
- if (l != phsize) goto noexec;
- ph = ph0 = (void *)(buf + 1);
- } else {
- ph = ph0 = (void *)((char *)buf + eh->e_phoff);
- }
- for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
- if (ph->p_type == PT_DYNAMIC) {
- dyn = ph->p_vaddr;
- } else if (ph->p_type == PT_TLS) {
- tls_image = ph->p_vaddr;
- dso->tls.align = ph->p_align;
- dso->tls.len = ph->p_filesz;
- dso->tls.size = ph->p_memsz;
- } else if (ph->p_type == PT_GNU_RELRO) {
- dso->relro_start = ph->p_vaddr & -PAGE_SIZE;
- dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
- } else if (ph->p_type == PT_GNU_STACK) {
- if (!runtime && ph->p_memsz > __default_stacksize) {
- __default_stacksize =
- ph->p_memsz < DEFAULT_STACK_MAX ?
- ph->p_memsz : DEFAULT_STACK_MAX;
- }
- }
- if (ph->p_type != PT_LOAD) continue;
- nsegs++;
- if (ph->p_vaddr < addr_min) {
- addr_min = ph->p_vaddr;
- off_start = ph->p_offset;
- prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
- ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
- ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
- }
- if (ph->p_vaddr+ph->p_memsz > addr_max) {
- addr_max = ph->p_vaddr+ph->p_memsz;
- }
- }
- if (!dyn) goto noexec;
- if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
- dso->loadmap = calloc(1, sizeof *dso->loadmap
- + nsegs * sizeof *dso->loadmap->segs);
- if (!dso->loadmap) goto error;
- dso->loadmap->nsegs = nsegs;
- for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) {
- if (ph->p_type != PT_LOAD) continue;
- prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
- ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
- ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
- map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
- prot, MAP_PRIVATE,
- fd, ph->p_offset & -PAGE_SIZE);
- if (map == MAP_FAILED) {
- unmap_library(dso);
- goto error;
- }
- dso->loadmap->segs[i].addr = (size_t)map +
- (ph->p_vaddr & PAGE_SIZE-1);
- dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
- dso->loadmap->segs[i].p_memsz = ph->p_memsz;
- i++;
- if (prot & PROT_WRITE) {
- size_t brk = (ph->p_vaddr & PAGE_SIZE-1)
- + ph->p_filesz;
- size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
- size_t pgend = brk + ph->p_memsz - ph->p_filesz
- + PAGE_SIZE-1 & -PAGE_SIZE;
- if (pgend > pgbrk && mmap_fixed(map+pgbrk,
- pgend-pgbrk, prot,
- MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
- -1, off_start) == MAP_FAILED)
- goto error;
- memset(map + brk, 0, pgbrk-brk);
- }
- }
- map = (void *)dso->loadmap->segs[0].addr;
- map_len = 0;
- goto done_mapping;
- }
- addr_max += PAGE_SIZE-1;
- addr_max &= -PAGE_SIZE;
- addr_min &= -PAGE_SIZE;
- off_start &= -PAGE_SIZE;
- map_len = addr_max - addr_min + off_start;
- /* The first time, we map too much, possibly even more than
- * the length of the file. This is okay because we will not
- * use the invalid part; we just need to reserve the right
- * amount of virtual address space to map over later. */
- map = DL_NOMMU_SUPPORT
- ? mmap((void *)addr_min, map_len, PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)
- : mmap((void *)addr_min, map_len, prot,
- MAP_PRIVATE, fd, off_start);
- if (map==MAP_FAILED) goto error;
- dso->map = map;
- dso->map_len = map_len;
- /* If the loaded file is not relocatable and the requested address is
- * not available, then the load operation must fail. */
- if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) {
- errno = EBUSY;
- goto error;
- }
- base = map - addr_min;
- dso->phdr = 0;
- dso->phnum = 0;
- for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
- if (ph->p_type != PT_LOAD) continue;
- /* Check if the programs headers are in this load segment, and
- * if so, record the address for use by dl_iterate_phdr. */
- if (!dso->phdr && eh->e_phoff >= ph->p_offset
- && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
- dso->phdr = (void *)(base + ph->p_vaddr
- + (eh->e_phoff-ph->p_offset));
- dso->phnum = eh->e_phnum;
- dso->phentsize = eh->e_phentsize;
- }
- this_min = ph->p_vaddr & -PAGE_SIZE;
- this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
- off_start = ph->p_offset & -PAGE_SIZE;
- prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
- ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
- ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
- /* Reuse the existing mapping for the lowest-address LOAD */
- if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT)
- if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
- goto error;
- if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) {
- size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
- size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
- memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
- if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
- goto error;
- }
- }
- for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
- if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
- if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC)
- && errno != ENOSYS)
- goto error;
- break;
- }
-done_mapping:
- dso->base = base;
- dso->dynv = laddr(dso, dyn);
- if (dso->tls.size) dso->tls.image = laddr(dso, tls_image);
- free(allocated_buf);
- return map;
-noexec:
- errno = ENOEXEC;
-error:
- if (map!=MAP_FAILED) unmap_library(dso);
- free(allocated_buf);
- return 0;
-}
-
static int path_open(const char *name, const char *s, char *buf, size_t buf_size)
{
size_t l;
diff --git a/ldso/map_library.h b/ldso/map_library.h
new file mode 100644
index 0000000..d685471
--- /dev/null
+++ b/ldso/map_library.h
@@ -0,0 +1,276 @@
+#include <errno.h>
+#include <features.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include "dynlink.h"
+#include "pthread_impl.h"
+
+/* Compute load address for a virtual address in a given dso. */
+#if DL_FDPIC
+static inline void *laddr(const struct dso *p, size_t v)
+{
+ size_t j=0;
+ if (!p->loadmap) return p->base + v;
+ for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++);
+ return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
+}
+static inline void *laddr_pg(const struct dso *p, size_t v)
+{
+ size_t j=0;
+ size_t pgsz = PAGE_SIZE;
+ if (!p->loadmap) return p->base + v;
+ for (j=0; ; j++) {
+ size_t a = p->loadmap->segs[j].p_vaddr;
+ size_t b = a + p->loadmap->segs[j].p_memsz;
+ a &= -pgsz;
+ b += pgsz-1;
+ b &= -pgsz;
+ if (v-a<b-a) break;
+ }
+ return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr);
+}
+static void (*fdbarrier(void *p))()
+{
+ void (*fd)();
+ __asm__("" : "=r"(fd) : "0"(p));
+ return fd;
+}
+#define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \
+ laddr(p, v), (p)->got }))
+#else
+#define laddr(p, v) (void *)((p)->base + (v))
+#define laddr_pg(p, v) laddr(p, v)
+#define fpaddr(p, v) ((void (*)())laddr(p, v))
+#endif
+
+static void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
+{
+ static int no_map_fixed;
+ char *q;
+ if (!no_map_fixed) {
+ q = mmap(p, n, prot, flags|MAP_FIXED, fd, off);
+ if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL)
+ return q;
+ no_map_fixed = 1;
+ }
+ /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
+ if (flags & MAP_ANONYMOUS) {
+ memset(p, 0, n);
+ return p;
+ }
+ ssize_t r;
+ if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED;
+ for (q=p; n; q+=r, off+=r, n-=r) {
+ r = read(fd, q, n);
+ if (r < 0 && errno != EINTR) return MAP_FAILED;
+ if (!r) {
+ memset(q, 0, n);
+ break;
+ }
+ }
+ return p;
+}
+
+static inline void unmap_library(struct dso *dso)
+{
+ if (dso->loadmap) {
+ size_t i;
+ for (i=0; i<dso->loadmap->nsegs; i++) {
+ if (!dso->loadmap->segs[i].p_memsz)
+ continue;
+ munmap((void *)dso->loadmap->segs[i].addr,
+ dso->loadmap->segs[i].p_memsz);
+ }
+ free(dso->loadmap);
+ } else if (dso->map && dso->map_len) {
+ munmap(dso->map, dso->map_len);
+ }
+}
+
+static inline void *map_library(int fd, struct dso *dso)
+{
+ Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)];
+ void *allocated_buf=0;
+ size_t phsize;
+ size_t ph_allocated_size;
+ size_t addr_min=SIZE_MAX, addr_max=0, map_len;
+ size_t this_min, this_max;
+ size_t nsegs = 0;
+ off_t off_start;
+ Ehdr *eh;
+ Phdr *ph, *ph0;
+ unsigned prot;
+ unsigned char *map=MAP_FAILED, *base;
+ size_t dyn=0;
+ size_t i;
+ size_t tls_image=0;
+
+ ssize_t l = read(fd, buf, sizeof buf);
+ eh = buf;
+ if (l<0) return 0;
+ if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC))
+ goto noexec;
+ phsize = eh->e_phentsize * eh->e_phnum;
+ if (phsize > sizeof buf - sizeof *eh) {
+ allocated_buf = malloc(phsize);
+ if (!allocated_buf) return 0;
+ l = pread(fd, allocated_buf, phsize, eh->e_phoff);
+ if (l < 0) goto error;
+ if (l != phsize) goto noexec;
+ ph = ph0 = allocated_buf;
+ } else if (eh->e_phoff + phsize > l) {
+ l = pread(fd, buf+1, phsize, eh->e_phoff);
+ if (l < 0) goto error;
+ if (l != phsize) goto noexec;
+ ph = ph0 = (void *)(buf + 1);
+ } else {
+ ph = ph0 = (void *)((char *)buf + eh->e_phoff);
+ }
+ for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
+ if (ph->p_type == PT_DYNAMIC) {
+ dyn = ph->p_vaddr;
+ } else if (ph->p_type == PT_TLS) {
+ tls_image = ph->p_vaddr;
+ dso->tls.align = ph->p_align;
+ dso->tls.len = ph->p_filesz;
+ dso->tls.size = ph->p_memsz;
+ } else if (ph->p_type == PT_GNU_RELRO) {
+ dso->relro_start = ph->p_vaddr & -PAGE_SIZE;
+ dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
+ } else if (ph->p_type == PT_GNU_STACK) {
+ if (!runtime && ph->p_memsz > __default_stacksize) {
+ __default_stacksize =
+ ph->p_memsz < DEFAULT_STACK_MAX ?
+ ph->p_memsz : DEFAULT_STACK_MAX;
+ }
+ }
+ if (ph->p_type != PT_LOAD) continue;
+ nsegs++;
+ if (ph->p_vaddr < addr_min) {
+ addr_min = ph->p_vaddr;
+ off_start = ph->p_offset;
+ prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
+ ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
+ ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
+ }
+ if (ph->p_vaddr+ph->p_memsz > addr_max) {
+ addr_max = ph->p_vaddr+ph->p_memsz;
+ }
+ }
+ if (!dyn) goto noexec;
+ if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) {
+ dso->loadmap = calloc(1, sizeof *dso->loadmap
+ + nsegs * sizeof *dso->loadmap->segs);
+ if (!dso->loadmap) goto error;
+ dso->loadmap->nsegs = nsegs;
+ for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) {
+ if (ph->p_type != PT_LOAD) continue;
+ prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
+ ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
+ ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
+ map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
+ prot, MAP_PRIVATE,
+ fd, ph->p_offset & -PAGE_SIZE);
+ if (map == MAP_FAILED) {
+ unmap_library(dso);
+ goto error;
+ }
+ dso->loadmap->segs[i].addr = (size_t)map +
+ (ph->p_vaddr & PAGE_SIZE-1);
+ dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
+ dso->loadmap->segs[i].p_memsz = ph->p_memsz;
+ i++;
+ if (prot & PROT_WRITE) {
+ size_t brk = (ph->p_vaddr & PAGE_SIZE-1)
+ + ph->p_filesz;
+ size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
+ size_t pgend = brk + ph->p_memsz - ph->p_filesz
+ + PAGE_SIZE-1 & -PAGE_SIZE;
+ if (pgend > pgbrk && mmap_fixed(map+pgbrk,
+ pgend-pgbrk, prot,
+ MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
+ -1, off_start) == MAP_FAILED)
+ goto error;
+ memset(map + brk, 0, pgbrk-brk);
+ }
+ }
+ map = (void *)dso->loadmap->segs[0].addr;
+ map_len = 0;
+ goto done_mapping;
+ }
+ addr_max += PAGE_SIZE-1;
+ addr_max &= -PAGE_SIZE;
+ addr_min &= -PAGE_SIZE;
+ off_start &= -PAGE_SIZE;
+ map_len = addr_max - addr_min + off_start;
+ /* The first time, we map too much, possibly even more than
+ * the length of the file. This is okay because we will not
+ * use the invalid part; we just need to reserve the right
+ * amount of virtual address space to map over later. */
+ map = DL_NOMMU_SUPPORT
+ ? mmap((void *)addr_min, map_len, PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)
+ : mmap((void *)addr_min, map_len, prot,
+ MAP_PRIVATE, fd, off_start);
+ if (map==MAP_FAILED) goto error;
+ dso->map = map;
+ dso->map_len = map_len;
+ /* If the loaded file is not relocatable and the requested address is
+ * not available, then the load operation must fail. */
+ if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) {
+ errno = EBUSY;
+ goto error;
+ }
+ base = map - addr_min;
+ dso->phdr = 0;
+ dso->phnum = 0;
+ for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) {
+ if (ph->p_type != PT_LOAD) continue;
+ /* Check if the programs headers are in this load segment, and
+ * if so, record the address for use by dl_iterate_phdr. */
+ if (!dso->phdr && eh->e_phoff >= ph->p_offset
+ && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) {
+ dso->phdr = (void *)(base + ph->p_vaddr
+ + (eh->e_phoff-ph->p_offset));
+ dso->phnum = eh->e_phnum;
+ dso->phentsize = eh->e_phentsize;
+ }
+ this_min = ph->p_vaddr & -PAGE_SIZE;
+ this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
+ off_start = ph->p_offset & -PAGE_SIZE;
+ prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) |
+ ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
+ ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
+ /* Reuse the existing mapping for the lowest-address LOAD */
+ if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT)
+ if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED)
+ goto error;
+ if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) {
+ size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz;
+ size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE;
+ memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1);
+ if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED)
+ goto error;
+ }
+ }
+ for (i=0; ((size_t *)(base+dyn))[i]; i+=2)
+ if (((size_t *)(base+dyn))[i]==DT_TEXTREL) {
+ if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC)
+ && errno != ENOSYS)
+ goto error;
+ break;
+ }
+done_mapping:
+ dso->base = base;
+ dso->dynv = laddr(dso, dyn);
+ if (dso->tls.size) dso->tls.image = laddr(dso, tls_image);
+ free(allocated_buf);
+ return map;
+noexec:
+ errno = ENOEXEC;
+error:
+ if (map!=MAP_FAILED) unmap_library(dso);
+ free(allocated_buf);
+ return 0;
+}
--
2.7.4
next prev parent reply other threads:[~2020-03-29 0:31 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-29 0:19 [musl] [PATCH 1/4] ldso: add option to rewrite the argv block rcombs
2020-03-29 0:19 ` [musl] [PATCH 2/4] ldso: when run via CLI, set auxv as if the app was loaded by the kernel rcombs
2020-03-29 0:19 ` rcombs [this message]
2020-03-29 3:07 ` [musl] [PATCH 3/4] ldso: move (un)map_library functions to separate file Rich Felker
2020-03-29 0:19 ` [musl] [PATCH 4/4] crt: add dcrt1, with support for locating the dynamic loader at runtime rcombs
2020-03-29 2:54 ` [musl] [PATCH 1/4] ldso: add option to rewrite the argv block Rich Felker
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1585441168-23444-3-git-send-email-rcombs@rcombs.me \
--to=rcombs@rcombs.me \
--cc=musl@lists.openwall.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
Code repositories for project(s) associated with this public inbox
https://git.vuxu.org/mirror/musl/
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).