From mboxrd@z Thu Jan 1 00:00:00 1970 X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on inbox.vuxu.org X-Spam-Level: X-Spam-Status: No, score=-1.8 required=5.0 tests=DKIM_INVALID,DKIM_SIGNED, MAILING_LIST_MULTI,RCVD_IN_DNSWL_MED,RCVD_IN_MSPIKE_H3, RCVD_IN_MSPIKE_WL,RDNS_NONE,SPF_PASS autolearn=ham autolearn_force=no version=3.4.2 Received: (qmail 22203 invoked from network); 29 Mar 2020 00:31:34 -0000 Received-SPF: pass (mother.openwall.net: domain of lists.openwall.com designates 195.42.179.200 as permitted sender) receiver=inbox.vuxu.org; client-ip=195.42.179.200 envelope-from= Received: from unknown (HELO mother.openwall.net) (195.42.179.200) by inbox.vuxu.org with ESMTP; 29 Mar 2020 00:31:34 -0000 Received: (qmail 18036 invoked by uid 550); 29 Mar 2020 00:31:28 -0000 Mailing-List: contact musl-help@lists.openwall.com; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-ID: Reply-To: musl@lists.openwall.com Received: (qmail 9715 invoked from network); 29 Mar 2020 00:20:12 -0000 DKIM-Signature: a=rsa-sha256; v=1; c=relaxed/relaxed; d=rcombs.me; q=dns/txt; s=mx; t=1585441213; h=References: In-Reply-To: Message-Id: Date: Subject: To: From: Sender; bh=SK+feqxC21Wnwf6yiYxQ41JWcB5wpXl/YjGMuF0Y9nQ=; b=pD5wQk3zNFSHF0WlCN/cMepeEURyUTkvR0dqTFbLNkPS8yxsomZMfpxsuFETqs/KX4fi1nTw RplhrJckP2mZSRhPB9GqtLPHVkjeZWxUiPu97+j/g21arV1P+LpI2YU4loJGXUXYQCnIqa7n lhzs7MK/6OneQintpKwlRW4T128= X-Mailgun-Sending-Ip: 198.61.254.54 X-Mailgun-Sid: WyI4ZjMyMiIsICJtdXNsQGxpc3RzLm9wZW53YWxsLmNvbSIsICJiMGJhIl0= Sender: rcombs@rcombs.me From: rcombs To: musl@lists.openwall.com Date: Sat, 28 Mar 2020 19:19:27 -0500 Message-Id: <1585441168-23444-3-git-send-email-rcombs@rcombs.me> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1585441168-23444-1-git-send-email-rcombs@rcombs.me> References: <1585441168-23444-1-git-send-email-rcombs@rcombs.me> Subject: [musl] [PATCH 3/4] ldso: move (un)map_library functions to separate file --- ldso/dynlink.c | 270 +-------------------------------------------------- ldso/map_library.h | 276 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 278 insertions(+), 268 deletions(-) create mode 100644 ldso/map_library.h diff --git a/ldso/dynlink.c b/ldso/dynlink.c index 5f637fd..0e557b1 100644 --- a/ldso/dynlink.c +++ b/ldso/dynlink.c @@ -154,6 +154,8 @@ extern hidden void (*const __init_array_end)(void), (*const __fini_array_end)(vo weak_alias(__init_array_start, __init_array_end); weak_alias(__fini_array_start, __fini_array_end); +#include "map_library.h" + static int dl_strcmp(const char *l, const char *r) { for (; *l==*r && *l; l++, r++); @@ -161,44 +163,6 @@ static int dl_strcmp(const char *l, const char *r) } #define strcmp(l,r) dl_strcmp(l,r) -/* Compute load address for a virtual address in a given dso. */ -#if DL_FDPIC -static void *laddr(const struct dso *p, size_t v) -{ - size_t j=0; - if (!p->loadmap) return p->base + v; - for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); - return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); -} -static void *laddr_pg(const struct dso *p, size_t v) -{ - size_t j=0; - size_t pgsz = PAGE_SIZE; - if (!p->loadmap) return p->base + v; - for (j=0; ; j++) { - size_t a = p->loadmap->segs[j].p_vaddr; - size_t b = a + p->loadmap->segs[j].p_memsz; - a &= -pgsz; - b += pgsz-1; - b &= -pgsz; - if (v-aloadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); -} -static void (*fdbarrier(void *p))() -{ - void (*fd)(); - __asm__("" : "=r"(fd) : "0"(p)); - return fd; -} -#define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \ - laddr(p, v), (p)->got })) -#else -#define laddr(p, v) (void *)((p)->base + (v)) -#define laddr_pg(p, v) laddr(p, v) -#define fpaddr(p, v) ((void (*)())laddr(p, v)) -#endif - static void decode_vec(size_t *v, size_t *a, size_t cnt) { size_t i; @@ -557,236 +521,6 @@ static void reclaim_gaps(struct dso *dso) } } -static void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off) -{ - static int no_map_fixed; - char *q; - if (!no_map_fixed) { - q = mmap(p, n, prot, flags|MAP_FIXED, fd, off); - if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL) - return q; - no_map_fixed = 1; - } - /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */ - if (flags & MAP_ANONYMOUS) { - memset(p, 0, n); - return p; - } - ssize_t r; - if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED; - for (q=p; n; q+=r, off+=r, n-=r) { - r = read(fd, q, n); - if (r < 0 && errno != EINTR) return MAP_FAILED; - if (!r) { - memset(q, 0, n); - break; - } - } - return p; -} - -static void unmap_library(struct dso *dso) -{ - if (dso->loadmap) { - size_t i; - for (i=0; iloadmap->nsegs; i++) { - if (!dso->loadmap->segs[i].p_memsz) - continue; - munmap((void *)dso->loadmap->segs[i].addr, - dso->loadmap->segs[i].p_memsz); - } - free(dso->loadmap); - } else if (dso->map && dso->map_len) { - munmap(dso->map, dso->map_len); - } -} - -static void *map_library(int fd, struct dso *dso) -{ - Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)]; - void *allocated_buf=0; - size_t phsize; - size_t addr_min=SIZE_MAX, addr_max=0, map_len; - size_t this_min, this_max; - size_t nsegs = 0; - off_t off_start; - Ehdr *eh; - Phdr *ph, *ph0; - unsigned prot; - unsigned char *map=MAP_FAILED, *base; - size_t dyn=0; - size_t tls_image=0; - size_t i; - - ssize_t l = read(fd, buf, sizeof buf); - eh = buf; - if (l<0) return 0; - if (le_type != ET_DYN && eh->e_type != ET_EXEC)) - goto noexec; - phsize = eh->e_phentsize * eh->e_phnum; - if (phsize > sizeof buf - sizeof *eh) { - allocated_buf = malloc(phsize); - if (!allocated_buf) return 0; - l = pread(fd, allocated_buf, phsize, eh->e_phoff); - if (l < 0) goto error; - if (l != phsize) goto noexec; - ph = ph0 = allocated_buf; - } else if (eh->e_phoff + phsize > l) { - l = pread(fd, buf+1, phsize, eh->e_phoff); - if (l < 0) goto error; - if (l != phsize) goto noexec; - ph = ph0 = (void *)(buf + 1); - } else { - ph = ph0 = (void *)((char *)buf + eh->e_phoff); - } - for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) { - if (ph->p_type == PT_DYNAMIC) { - dyn = ph->p_vaddr; - } else if (ph->p_type == PT_TLS) { - tls_image = ph->p_vaddr; - dso->tls.align = ph->p_align; - dso->tls.len = ph->p_filesz; - dso->tls.size = ph->p_memsz; - } else if (ph->p_type == PT_GNU_RELRO) { - dso->relro_start = ph->p_vaddr & -PAGE_SIZE; - dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE; - } else if (ph->p_type == PT_GNU_STACK) { - if (!runtime && ph->p_memsz > __default_stacksize) { - __default_stacksize = - ph->p_memsz < DEFAULT_STACK_MAX ? - ph->p_memsz : DEFAULT_STACK_MAX; - } - } - if (ph->p_type != PT_LOAD) continue; - nsegs++; - if (ph->p_vaddr < addr_min) { - addr_min = ph->p_vaddr; - off_start = ph->p_offset; - prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | - ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | - ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); - } - if (ph->p_vaddr+ph->p_memsz > addr_max) { - addr_max = ph->p_vaddr+ph->p_memsz; - } - } - if (!dyn) goto noexec; - if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) { - dso->loadmap = calloc(1, sizeof *dso->loadmap - + nsegs * sizeof *dso->loadmap->segs); - if (!dso->loadmap) goto error; - dso->loadmap->nsegs = nsegs; - for (ph=ph0, i=0; ie_phentsize)) { - if (ph->p_type != PT_LOAD) continue; - prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | - ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | - ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); - map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1), - prot, MAP_PRIVATE, - fd, ph->p_offset & -PAGE_SIZE); - if (map == MAP_FAILED) { - unmap_library(dso); - goto error; - } - dso->loadmap->segs[i].addr = (size_t)map + - (ph->p_vaddr & PAGE_SIZE-1); - dso->loadmap->segs[i].p_vaddr = ph->p_vaddr; - dso->loadmap->segs[i].p_memsz = ph->p_memsz; - i++; - if (prot & PROT_WRITE) { - size_t brk = (ph->p_vaddr & PAGE_SIZE-1) - + ph->p_filesz; - size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE; - size_t pgend = brk + ph->p_memsz - ph->p_filesz - + PAGE_SIZE-1 & -PAGE_SIZE; - if (pgend > pgbrk && mmap_fixed(map+pgbrk, - pgend-pgbrk, prot, - MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, - -1, off_start) == MAP_FAILED) - goto error; - memset(map + brk, 0, pgbrk-brk); - } - } - map = (void *)dso->loadmap->segs[0].addr; - map_len = 0; - goto done_mapping; - } - addr_max += PAGE_SIZE-1; - addr_max &= -PAGE_SIZE; - addr_min &= -PAGE_SIZE; - off_start &= -PAGE_SIZE; - map_len = addr_max - addr_min + off_start; - /* The first time, we map too much, possibly even more than - * the length of the file. This is okay because we will not - * use the invalid part; we just need to reserve the right - * amount of virtual address space to map over later. */ - map = DL_NOMMU_SUPPORT - ? mmap((void *)addr_min, map_len, PROT_READ|PROT_WRITE|PROT_EXEC, - MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) - : mmap((void *)addr_min, map_len, prot, - MAP_PRIVATE, fd, off_start); - if (map==MAP_FAILED) goto error; - dso->map = map; - dso->map_len = map_len; - /* If the loaded file is not relocatable and the requested address is - * not available, then the load operation must fail. */ - if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) { - errno = EBUSY; - goto error; - } - base = map - addr_min; - dso->phdr = 0; - dso->phnum = 0; - for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) { - if (ph->p_type != PT_LOAD) continue; - /* Check if the programs headers are in this load segment, and - * if so, record the address for use by dl_iterate_phdr. */ - if (!dso->phdr && eh->e_phoff >= ph->p_offset - && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) { - dso->phdr = (void *)(base + ph->p_vaddr - + (eh->e_phoff-ph->p_offset)); - dso->phnum = eh->e_phnum; - dso->phentsize = eh->e_phentsize; - } - this_min = ph->p_vaddr & -PAGE_SIZE; - this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE; - off_start = ph->p_offset & -PAGE_SIZE; - prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | - ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | - ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); - /* Reuse the existing mapping for the lowest-address LOAD */ - if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT) - if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED) - goto error; - if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) { - size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz; - size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE; - memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1); - if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED) - goto error; - } - } - for (i=0; ((size_t *)(base+dyn))[i]; i+=2) - if (((size_t *)(base+dyn))[i]==DT_TEXTREL) { - if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC) - && errno != ENOSYS) - goto error; - break; - } -done_mapping: - dso->base = base; - dso->dynv = laddr(dso, dyn); - if (dso->tls.size) dso->tls.image = laddr(dso, tls_image); - free(allocated_buf); - return map; -noexec: - errno = ENOEXEC; -error: - if (map!=MAP_FAILED) unmap_library(dso); - free(allocated_buf); - return 0; -} - static int path_open(const char *name, const char *s, char *buf, size_t buf_size) { size_t l; diff --git a/ldso/map_library.h b/ldso/map_library.h new file mode 100644 index 0000000..d685471 --- /dev/null +++ b/ldso/map_library.h @@ -0,0 +1,276 @@ +#include +#include +#include +#include +#include +#include "dynlink.h" +#include "pthread_impl.h" + +/* Compute load address for a virtual address in a given dso. */ +#if DL_FDPIC +static inline void *laddr(const struct dso *p, size_t v) +{ + size_t j=0; + if (!p->loadmap) return p->base + v; + for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); + return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); +} +static inline void *laddr_pg(const struct dso *p, size_t v) +{ + size_t j=0; + size_t pgsz = PAGE_SIZE; + if (!p->loadmap) return p->base + v; + for (j=0; ; j++) { + size_t a = p->loadmap->segs[j].p_vaddr; + size_t b = a + p->loadmap->segs[j].p_memsz; + a &= -pgsz; + b += pgsz-1; + b &= -pgsz; + if (v-aloadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); +} +static void (*fdbarrier(void *p))() +{ + void (*fd)(); + __asm__("" : "=r"(fd) : "0"(p)); + return fd; +} +#define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \ + laddr(p, v), (p)->got })) +#else +#define laddr(p, v) (void *)((p)->base + (v)) +#define laddr_pg(p, v) laddr(p, v) +#define fpaddr(p, v) ((void (*)())laddr(p, v)) +#endif + +static void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off) +{ + static int no_map_fixed; + char *q; + if (!no_map_fixed) { + q = mmap(p, n, prot, flags|MAP_FIXED, fd, off); + if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL) + return q; + no_map_fixed = 1; + } + /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */ + if (flags & MAP_ANONYMOUS) { + memset(p, 0, n); + return p; + } + ssize_t r; + if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED; + for (q=p; n; q+=r, off+=r, n-=r) { + r = read(fd, q, n); + if (r < 0 && errno != EINTR) return MAP_FAILED; + if (!r) { + memset(q, 0, n); + break; + } + } + return p; +} + +static inline void unmap_library(struct dso *dso) +{ + if (dso->loadmap) { + size_t i; + for (i=0; iloadmap->nsegs; i++) { + if (!dso->loadmap->segs[i].p_memsz) + continue; + munmap((void *)dso->loadmap->segs[i].addr, + dso->loadmap->segs[i].p_memsz); + } + free(dso->loadmap); + } else if (dso->map && dso->map_len) { + munmap(dso->map, dso->map_len); + } +} + +static inline void *map_library(int fd, struct dso *dso) +{ + Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)]; + void *allocated_buf=0; + size_t phsize; + size_t ph_allocated_size; + size_t addr_min=SIZE_MAX, addr_max=0, map_len; + size_t this_min, this_max; + size_t nsegs = 0; + off_t off_start; + Ehdr *eh; + Phdr *ph, *ph0; + unsigned prot; + unsigned char *map=MAP_FAILED, *base; + size_t dyn=0; + size_t i; + size_t tls_image=0; + + ssize_t l = read(fd, buf, sizeof buf); + eh = buf; + if (l<0) return 0; + if (le_type != ET_DYN && eh->e_type != ET_EXEC)) + goto noexec; + phsize = eh->e_phentsize * eh->e_phnum; + if (phsize > sizeof buf - sizeof *eh) { + allocated_buf = malloc(phsize); + if (!allocated_buf) return 0; + l = pread(fd, allocated_buf, phsize, eh->e_phoff); + if (l < 0) goto error; + if (l != phsize) goto noexec; + ph = ph0 = allocated_buf; + } else if (eh->e_phoff + phsize > l) { + l = pread(fd, buf+1, phsize, eh->e_phoff); + if (l < 0) goto error; + if (l != phsize) goto noexec; + ph = ph0 = (void *)(buf + 1); + } else { + ph = ph0 = (void *)((char *)buf + eh->e_phoff); + } + for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) { + if (ph->p_type == PT_DYNAMIC) { + dyn = ph->p_vaddr; + } else if (ph->p_type == PT_TLS) { + tls_image = ph->p_vaddr; + dso->tls.align = ph->p_align; + dso->tls.len = ph->p_filesz; + dso->tls.size = ph->p_memsz; + } else if (ph->p_type == PT_GNU_RELRO) { + dso->relro_start = ph->p_vaddr & -PAGE_SIZE; + dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE; + } else if (ph->p_type == PT_GNU_STACK) { + if (!runtime && ph->p_memsz > __default_stacksize) { + __default_stacksize = + ph->p_memsz < DEFAULT_STACK_MAX ? + ph->p_memsz : DEFAULT_STACK_MAX; + } + } + if (ph->p_type != PT_LOAD) continue; + nsegs++; + if (ph->p_vaddr < addr_min) { + addr_min = ph->p_vaddr; + off_start = ph->p_offset; + prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | + ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | + ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); + } + if (ph->p_vaddr+ph->p_memsz > addr_max) { + addr_max = ph->p_vaddr+ph->p_memsz; + } + } + if (!dyn) goto noexec; + if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) { + dso->loadmap = calloc(1, sizeof *dso->loadmap + + nsegs * sizeof *dso->loadmap->segs); + if (!dso->loadmap) goto error; + dso->loadmap->nsegs = nsegs; + for (ph=ph0, i=0; ie_phentsize)) { + if (ph->p_type != PT_LOAD) continue; + prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | + ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | + ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); + map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1), + prot, MAP_PRIVATE, + fd, ph->p_offset & -PAGE_SIZE); + if (map == MAP_FAILED) { + unmap_library(dso); + goto error; + } + dso->loadmap->segs[i].addr = (size_t)map + + (ph->p_vaddr & PAGE_SIZE-1); + dso->loadmap->segs[i].p_vaddr = ph->p_vaddr; + dso->loadmap->segs[i].p_memsz = ph->p_memsz; + i++; + if (prot & PROT_WRITE) { + size_t brk = (ph->p_vaddr & PAGE_SIZE-1) + + ph->p_filesz; + size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE; + size_t pgend = brk + ph->p_memsz - ph->p_filesz + + PAGE_SIZE-1 & -PAGE_SIZE; + if (pgend > pgbrk && mmap_fixed(map+pgbrk, + pgend-pgbrk, prot, + MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, + -1, off_start) == MAP_FAILED) + goto error; + memset(map + brk, 0, pgbrk-brk); + } + } + map = (void *)dso->loadmap->segs[0].addr; + map_len = 0; + goto done_mapping; + } + addr_max += PAGE_SIZE-1; + addr_max &= -PAGE_SIZE; + addr_min &= -PAGE_SIZE; + off_start &= -PAGE_SIZE; + map_len = addr_max - addr_min + off_start; + /* The first time, we map too much, possibly even more than + * the length of the file. This is okay because we will not + * use the invalid part; we just need to reserve the right + * amount of virtual address space to map over later. */ + map = DL_NOMMU_SUPPORT + ? mmap((void *)addr_min, map_len, PROT_READ|PROT_WRITE|PROT_EXEC, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) + : mmap((void *)addr_min, map_len, prot, + MAP_PRIVATE, fd, off_start); + if (map==MAP_FAILED) goto error; + dso->map = map; + dso->map_len = map_len; + /* If the loaded file is not relocatable and the requested address is + * not available, then the load operation must fail. */ + if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) { + errno = EBUSY; + goto error; + } + base = map - addr_min; + dso->phdr = 0; + dso->phnum = 0; + for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) { + if (ph->p_type != PT_LOAD) continue; + /* Check if the programs headers are in this load segment, and + * if so, record the address for use by dl_iterate_phdr. */ + if (!dso->phdr && eh->e_phoff >= ph->p_offset + && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) { + dso->phdr = (void *)(base + ph->p_vaddr + + (eh->e_phoff-ph->p_offset)); + dso->phnum = eh->e_phnum; + dso->phentsize = eh->e_phentsize; + } + this_min = ph->p_vaddr & -PAGE_SIZE; + this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE; + off_start = ph->p_offset & -PAGE_SIZE; + prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | + ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | + ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); + /* Reuse the existing mapping for the lowest-address LOAD */ + if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT) + if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED) + goto error; + if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) { + size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz; + size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE; + memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1); + if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED) + goto error; + } + } + for (i=0; ((size_t *)(base+dyn))[i]; i+=2) + if (((size_t *)(base+dyn))[i]==DT_TEXTREL) { + if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC) + && errno != ENOSYS) + goto error; + break; + } +done_mapping: + dso->base = base; + dso->dynv = laddr(dso, dyn); + if (dso->tls.size) dso->tls.image = laddr(dso, tls_image); + free(allocated_buf); + return map; +noexec: + errno = ENOEXEC; +error: + if (map!=MAP_FAILED) unmap_library(dso); + free(allocated_buf); + return 0; +} -- 2.7.4