|
@@ -354,6 +354,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
DL_LOADADDR_TYPE lib_loadaddr;
|
|
|
DL_INIT_LOADADDR_EXTRA_DECLS
|
|
|
|
|
|
+ libaddr = 0;
|
|
|
infile = _dl_open(libname, O_RDONLY, 0);
|
|
|
if (infile < 0) {
|
|
|
_dl_internal_error_number = LD_ERROR_NOFILE;
|
|
@@ -449,6 +450,8 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
ppnt++;
|
|
|
}
|
|
|
|
|
|
+ DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
|
|
|
+
|
|
|
maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN;
|
|
|
minvma = minvma & ~0xffffU;
|
|
|
|
|
@@ -456,17 +459,19 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
if (!piclib)
|
|
|
flags |= MAP_FIXED;
|
|
|
|
|
|
- status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
|
|
|
- maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
|
|
|
- if (_dl_mmap_check_error(status)) {
|
|
|
- _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
|
|
|
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
|
|
|
- _dl_close(infile);
|
|
|
- _dl_munmap(header, _dl_pagesize);
|
|
|
- return NULL;
|
|
|
+ if (piclib == 0 || piclib == 1) {
|
|
|
+ status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
|
|
|
+ maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
|
|
|
+ if (_dl_mmap_check_error(status)) {
|
|
|
+ _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
|
|
|
+ _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
|
|
|
+ _dl_close(infile);
|
|
|
+ _dl_munmap(header, _dl_pagesize);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ libaddr = (unsigned long) status;
|
|
|
+ flags |= MAP_FIXED;
|
|
|
}
|
|
|
- libaddr = (unsigned long) status;
|
|
|
- flags |= MAP_FIXED;
|
|
|
|
|
|
|
|
|
ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
|
|
@@ -474,11 +479,24 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
DL_INIT_LOADADDR(lib_loadaddr, libaddr, ppnt, epnt->e_phnum);
|
|
|
|
|
|
for (i = 0; i < epnt->e_phnum; i++) {
|
|
|
+ if (DL_IS_SPECIAL_SEGMENT (epnt, ppnt)) {
|
|
|
+ char *addr;
|
|
|
+
|
|
|
+ addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
|
|
|
+ if (addr == NULL)
|
|
|
+ goto cant_map;
|
|
|
+
|
|
|
+ DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
|
|
|
+ ppnt++;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
if (ppnt->p_type == PT_GNU_RELRO) {
|
|
|
relro_addr = ppnt->p_vaddr;
|
|
|
relro_size = ppnt->p_memsz;
|
|
|
}
|
|
|
if (ppnt->p_type == PT_LOAD) {
|
|
|
+ char *tryaddr;
|
|
|
+ ssize_t size;
|
|
|
|
|
|
|
|
|
if (i == 0 && ppnt->p_vaddr > 0x1000000) {
|
|
@@ -489,53 +507,155 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
if (ppnt->p_flags & PF_W) {
|
|
|
unsigned long map_size;
|
|
|
char *cpnt;
|
|
|
-
|
|
|
- status = (char *) _dl_mmap((char *) ((piclib ? libaddr : 0) +
|
|
|
- (ppnt->p_vaddr & PAGE_ALIGN)), (ppnt->p_vaddr & ADDR_ALIGN)
|
|
|
- + ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags, infile,
|
|
|
- ppnt->p_offset & OFFS_ALIGN);
|
|
|
-
|
|
|
- if (_dl_mmap_check_error(status)) {
|
|
|
+ char *piclib2map = 0;
|
|
|
+
|
|
|
+ if (piclib == 2 &&
|
|
|
+
|
|
|
+ call if memsz doesn't require
|
|
|
+ an additional page, but this
|
|
|
+ would require mmap to always
|
|
|
+ return page-aligned addresses
|
|
|
+ and a whole number of pages
|
|
|
+ allocated. Unfortunately on
|
|
|
+ uClinux may return misaligned
|
|
|
+ addresses and may allocate
|
|
|
+ partial pages, so we may end up
|
|
|
+ doing unnecessary mmap calls.
|
|
|
+
|
|
|
+ This is what we could do if we
|
|
|
+ knew mmap would always return
|
|
|
+ aligned pages:
|
|
|
+
|
|
|
+ ((ppnt->p_vaddr + ppnt->p_filesz
|
|
|
+ + ADDR_ALIGN)
|
|
|
+ & PAGE_ALIGN)
|
|
|
+ < ppnt->p_vaddr + ppnt->p_memsz)
|
|
|
+
|
|
|
+ Instead, we have to do this: */
|
|
|
+ ppnt->p_filesz < ppnt->p_memsz)
|
|
|
+ {
|
|
|
+ piclib2map = (char *)
|
|
|
+ _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
|
|
|
+ + ppnt->p_memsz,
|
|
|
+ LXFLAGS(ppnt->p_flags),
|
|
|
+ flags | MAP_ANONYMOUS, -1, 0);
|
|
|
+ if (_dl_mmap_check_error(piclib2map))
|
|
|
+ goto cant_map;
|
|
|
+ DL_INIT_LOADADDR_HDR
|
|
|
+ (lib_loadaddr, piclib2map
|
|
|
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
|
|
|
+ }
|
|
|
+
|
|
|
+ tryaddr = piclib == 2 ? piclib2map
|
|
|
+ : ((char*) (piclib ? libaddr : 0) +
|
|
|
+ (ppnt->p_vaddr & PAGE_ALIGN));
|
|
|
+
|
|
|
+ size = (ppnt->p_vaddr & ADDR_ALIGN)
|
|
|
+ + ppnt->p_filesz;
|
|
|
+
|
|
|
+
|
|
|
+ So instead of desperately call mmap and fail,
|
|
|
+ we set status to MAP_FAILED to save a call
|
|
|
+ to mmap (). */
|
|
|
+#ifndef __ARCH_USE_MMU__
|
|
|
+ if (piclib2map == 0)
|
|
|
+#endif
|
|
|
+ status = (char *) _dl_mmap
|
|
|
+ (tryaddr, size, LXFLAGS(ppnt->p_flags),
|
|
|
+ flags | (piclib2map ? MAP_FIXED : 0),
|
|
|
+ infile, ppnt->p_offset & OFFS_ALIGN);
|
|
|
+#ifndef __ARCH_USE_MMU__
|
|
|
+ else
|
|
|
+ status = MAP_FAILED;
|
|
|
+#endif
|
|
|
+#ifdef _DL_PREAD
|
|
|
+ if (_dl_mmap_check_error(status) && piclib2map
|
|
|
+ && (_DL_PREAD (infile, tryaddr, size,
|
|
|
+ ppnt->p_offset & OFFS_ALIGN)
|
|
|
+ == size))
|
|
|
+ status = tryaddr;
|
|
|
+#endif
|
|
|
+ if (_dl_mmap_check_error(status)
|
|
|
+ || (tryaddr && tryaddr != status)) {
|
|
|
+ cant_map:
|
|
|
_dl_dprintf(2, "%s:%i: can't map '%s'\n",
|
|
|
_dl_progname, __LINE__, libname);
|
|
|
_dl_internal_error_number = LD_ERROR_MMAP_FAILED;
|
|
|
- _dl_munmap((char *) libaddr, maxvma - minvma);
|
|
|
+ DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
|
|
|
_dl_close(infile);
|
|
|
_dl_munmap(header, _dl_pagesize);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
- cpnt = (char *) (status + (ppnt->p_vaddr & ADDR_ALIGN) +
|
|
|
- ppnt->p_filesz);
|
|
|
- while (((unsigned long) cpnt) & ADDR_ALIGN)
|
|
|
- *cpnt++ = 0;
|
|
|
-
|
|
|
-
|
|
|
- * correct to do or not, but the basic way that
|
|
|
- * we handle bss segments is that we mmap
|
|
|
- * /dev/zero if there are any pages left over
|
|
|
- * that are not mapped as part of the file */
|
|
|
-
|
|
|
- map_size = (ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) & PAGE_ALIGN;
|
|
|
-
|
|
|
- if (map_size < ppnt->p_vaddr + ppnt->p_memsz)
|
|
|
- status = (char *) _dl_mmap((char *) map_size +
|
|
|
- (piclib ? libaddr : 0),
|
|
|
- ppnt->p_vaddr + ppnt->p_memsz - map_size,
|
|
|
- LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS, -1, 0);
|
|
|
- } else
|
|
|
- status = (char *) _dl_mmap((char *) (ppnt->p_vaddr & PAGE_ALIGN)
|
|
|
- + (piclib ? libaddr : 0), (ppnt->p_vaddr & ADDR_ALIGN) +
|
|
|
- ppnt->p_filesz, LXFLAGS(ppnt->p_flags), flags,
|
|
|
- infile, ppnt->p_offset & OFFS_ALIGN);
|
|
|
- if (_dl_mmap_check_error(status)) {
|
|
|
- _dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
|
|
|
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
|
|
|
- _dl_munmap((char *) libaddr, maxvma - minvma);
|
|
|
- _dl_close(infile);
|
|
|
- _dl_munmap(header, _dl_pagesize);
|
|
|
- return NULL;
|
|
|
+ if (! piclib2map)
|
|
|
+ DL_INIT_LOADADDR_HDR
|
|
|
+ (lib_loadaddr, status
|
|
|
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
|
|
|
+
|
|
|
+
|
|
|
+ zero-out any data from the end of
|
|
|
+ the region we mapped in from the
|
|
|
+ file (filesz) to the end of the
|
|
|
+ loadable segment (memsz). We may
|
|
|
+ need additional pages for memsz,
|
|
|
+ that we map in below, and we can
|
|
|
+ count on the kernel to zero them
|
|
|
+ out, but we have to zero out stuff
|
|
|
+ in the last page that we mapped in
|
|
|
+ from the file. However, we can't
|
|
|
+ assume to have actually obtained
|
|
|
+ full pages from the kernel, since
|
|
|
+ we didn't ask for them, and uClibc
|
|
|
+ may not give us full pages for
|
|
|
+ small allocations. So only zero
|
|
|
+ out up to memsz or the end of the
|
|
|
+ page, whichever comes first. */
|
|
|
+
|
|
|
+
|
|
|
+ portion not backed by filesz. */
|
|
|
+ cpnt = (char *) (status + size);
|
|
|
+
|
|
|
+
|
|
|
+ beginning of the next page. */
|
|
|
+ map_size = (ppnt->p_vaddr + ppnt->p_filesz
|
|
|
+ + ADDR_ALIGN) & PAGE_ALIGN;
|
|
|
+
|
|
|
+#ifndef MIN
|
|
|
+# define MIN(a,b) ((a) < (b) ? (a) : (b))
|
|
|
+#endif
|
|
|
+ _dl_memset (cpnt, 0,
|
|
|
+ MIN (map_size
|
|
|
+ - (ppnt->p_vaddr
|
|
|
+ + ppnt->p_filesz),
|
|
|
+ ppnt->p_memsz
|
|
|
+ - ppnt->p_filesz));
|
|
|
+
|
|
|
+ if (map_size < ppnt->p_vaddr + ppnt->p_memsz
|
|
|
+ && !piclib2map) {
|
|
|
+ tryaddr = map_size + (char*)(piclib ? libaddr : 0);
|
|
|
+ status = (char *) _dl_mmap(tryaddr,
|
|
|
+ ppnt->p_vaddr + ppnt->p_memsz - map_size,
|
|
|
+ LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
|
|
+ if (_dl_mmap_check_error(status)
|
|
|
+ || tryaddr != status)
|
|
|
+ goto cant_map;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ tryaddr = (piclib == 2 ? 0
|
|
|
+ : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
|
|
|
+ + (piclib ? libaddr : 0));
|
|
|
+ size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
|
|
|
+ status = (char *) _dl_mmap
|
|
|
+ (tryaddr, size, LXFLAGS(ppnt->p_flags),
|
|
|
+ flags | (piclib == 2 ? MAP_EXECUTABLE
|
|
|
+ | MAP_DENYWRITE : 0),
|
|
|
+ infile, ppnt->p_offset & OFFS_ALIGN);
|
|
|
+ if (_dl_mmap_check_error(status)
|
|
|
+ || (tryaddr && tryaddr != status))
|
|
|
+ goto cant_map;
|
|
|
+ DL_INIT_LOADADDR_HDR
|
|
|
+ (lib_loadaddr, status
|
|
|
+ + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
|
|
|
}
|
|
|
|
|
|
|