#include #include #include #include #include #include "dynlink.h" #include "pthread_impl.h" /* Compute load address for a virtual address in a given dso. */ #if DL_FDPIC static inline void *laddr(const struct dso *p, size_t v) { size_t j=0; if (!p->loadmap) return p->base + v; for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); } static inline void *laddr_pg(const struct dso *p, size_t v) { size_t j=0; size_t pgsz = PAGE_SIZE; if (!p->loadmap) return p->base + v; for (j=0; ; j++) { size_t a = p->loadmap->segs[j].p_vaddr; size_t b = a + p->loadmap->segs[j].p_memsz; a &= -pgsz; b += pgsz-1; b &= -pgsz; if (v-aloadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); } static void (*fdbarrier(void *p))() { void (*fd)(); __asm__("" : "=r"(fd) : "0"(p)); return fd; } #define fpaddr(p, v) fdbarrier((&(struct funcdesc){ \ laddr(p, v), (p)->got })) #else #define laddr(p, v) (void *)((p)->base + (v)) #define laddr_pg(p, v) laddr(p, v) #define fpaddr(p, v) ((void (*)())laddr(p, v)) #endif static void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off) { static int no_map_fixed; char *q; if (!no_map_fixed) { q = mmap(p, n, prot, flags|MAP_FIXED, fd, off); if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL) return q; no_map_fixed = 1; } /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */ if (flags & MAP_ANONYMOUS) { memset(p, 0, n); return p; } ssize_t r; if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED; for (q=p; n; q+=r, off+=r, n-=r) { r = read(fd, q, n); if (r < 0 && errno != EINTR) return MAP_FAILED; if (!r) { memset(q, 0, n); break; } } return p; } static inline void unmap_library(struct dso *dso) { if (dso->loadmap) { size_t i; for (i=0; iloadmap->nsegs; i++) { if (!dso->loadmap->segs[i].p_memsz) continue; munmap((void *)dso->loadmap->segs[i].addr, dso->loadmap->segs[i].p_memsz); } free(dso->loadmap); } else if (dso->map && dso->map_len) { munmap(dso->map, dso->map_len); } } static inline void *map_library(int fd, struct dso *dso) { Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)]; void *allocated_buf=0; size_t phsize; size_t ph_allocated_size; size_t addr_min=SIZE_MAX, addr_max=0, map_len; size_t this_min, this_max; size_t nsegs = 0; off_t off_start; Ehdr *eh; Phdr *ph, *ph0; unsigned prot; unsigned char *map=MAP_FAILED, *base; size_t dyn=0; size_t i; size_t tls_image=0; ssize_t l = read(fd, buf, sizeof buf); eh = buf; if (l<0) return 0; if (le_type != ET_DYN && eh->e_type != ET_EXEC)) goto noexec; phsize = eh->e_phentsize * eh->e_phnum; if (phsize > sizeof buf - sizeof *eh) { allocated_buf = malloc(phsize); if (!allocated_buf) return 0; l = pread(fd, allocated_buf, phsize, eh->e_phoff); if (l < 0) goto error; if (l != phsize) goto noexec; ph = ph0 = allocated_buf; } else if (eh->e_phoff + phsize > l) { l = pread(fd, buf+1, phsize, eh->e_phoff); if (l < 0) goto error; if (l != phsize) goto noexec; ph = ph0 = (void *)(buf + 1); } else { ph = ph0 = (void *)((char *)buf + eh->e_phoff); } for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) { if (ph->p_type == PT_DYNAMIC) { dyn = ph->p_vaddr; } else if (ph->p_type == PT_TLS) { tls_image = ph->p_vaddr; dso->tls.align = ph->p_align; dso->tls.len = ph->p_filesz; dso->tls.size = ph->p_memsz; } else if (ph->p_type == PT_GNU_RELRO) { dso->relro_start = ph->p_vaddr & -PAGE_SIZE; dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE; } else if (ph->p_type == PT_GNU_STACK) { if (!runtime && ph->p_memsz > __default_stacksize) { __default_stacksize = ph->p_memsz < DEFAULT_STACK_MAX ? ph->p_memsz : DEFAULT_STACK_MAX; } } if (ph->p_type != PT_LOAD) continue; nsegs++; if (ph->p_vaddr < addr_min) { addr_min = ph->p_vaddr; off_start = ph->p_offset; prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); } if (ph->p_vaddr+ph->p_memsz > addr_max) { addr_max = ph->p_vaddr+ph->p_memsz; } } if (!dyn) goto noexec; if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) { dso->loadmap = calloc(1, sizeof *dso->loadmap + nsegs * sizeof *dso->loadmap->segs); if (!dso->loadmap) goto error; dso->loadmap->nsegs = nsegs; for (ph=ph0, i=0; ie_phentsize)) { if (ph->p_type != PT_LOAD) continue; prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1), prot, MAP_PRIVATE, fd, ph->p_offset & -PAGE_SIZE); if (map == MAP_FAILED) { unmap_library(dso); goto error; } dso->loadmap->segs[i].addr = (size_t)map + (ph->p_vaddr & PAGE_SIZE-1); dso->loadmap->segs[i].p_vaddr = ph->p_vaddr; dso->loadmap->segs[i].p_memsz = ph->p_memsz; i++; if (prot & PROT_WRITE) { size_t brk = (ph->p_vaddr & PAGE_SIZE-1) + ph->p_filesz; size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE; size_t pgend = brk + ph->p_memsz - ph->p_filesz + PAGE_SIZE-1 & -PAGE_SIZE; if (pgend > pgbrk && mmap_fixed(map+pgbrk, pgend-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, off_start) == MAP_FAILED) goto error; memset(map + brk, 0, pgbrk-brk); } } map = (void *)dso->loadmap->segs[0].addr; map_len = 0; goto done_mapping; } addr_max += PAGE_SIZE-1; addr_max &= -PAGE_SIZE; addr_min &= -PAGE_SIZE; off_start &= -PAGE_SIZE; map_len = addr_max - addr_min + off_start; /* The first time, we map too much, possibly even more than * the length of the file. This is okay because we will not * use the invalid part; we just need to reserve the right * amount of virtual address space to map over later. */ map = DL_NOMMU_SUPPORT ? mmap((void *)addr_min, map_len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) : mmap((void *)addr_min, map_len, prot, MAP_PRIVATE, fd, off_start); if (map==MAP_FAILED) goto error; dso->map = map; dso->map_len = map_len; /* If the loaded file is not relocatable and the requested address is * not available, then the load operation must fail. */ if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) { errno = EBUSY; goto error; } base = map - addr_min; dso->phdr = 0; dso->phnum = 0; for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) { if (ph->p_type != PT_LOAD) continue; /* Check if the programs headers are in this load segment, and * if so, record the address for use by dl_iterate_phdr. */ if (!dso->phdr && eh->e_phoff >= ph->p_offset && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) { dso->phdr = (void *)(base + ph->p_vaddr + (eh->e_phoff-ph->p_offset)); dso->phnum = eh->e_phnum; dso->phentsize = eh->e_phentsize; } this_min = ph->p_vaddr & -PAGE_SIZE; this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE; off_start = ph->p_offset & -PAGE_SIZE; prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); /* Reuse the existing mapping for the lowest-address LOAD */ if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT) if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED) goto error; if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) { size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz; size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE; memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1); if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED) goto error; } } for (i=0; ((size_t *)(base+dyn))[i]; i+=2) if (((size_t *)(base+dyn))[i]==DT_TEXTREL) { if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC) && errno != ENOSYS) goto error; break; } done_mapping: dso->base = base; dso->dynv = laddr(dso, dyn); if (dso->tls.size) dso->tls.image = laddr(dso, tls_image); free(allocated_buf); return map; noexec: errno = ENOEXEC; error: if (map!=MAP_FAILED) unmap_library(dso); free(allocated_buf); return 0; }