|
@@ -314,6 +314,121 @@ goof:
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+ * Make a writeable mapping of a segment, regardless of whether PF_W is
|
|
|
+ * set or not.
|
|
|
+ */
|
|
|
+static void *
|
|
|
+map_writeable (int infile, ElfW(Phdr) *ppnt, int piclib, int flags,
|
|
|
+ unsigned long libaddr)
|
|
|
+{
|
|
|
+ int prot_flags = ppnt->p_flags | PF_W;
|
|
|
+ char *status, *retval;
|
|
|
+ char *tryaddr;
|
|
|
+ ssize_t size;
|
|
|
+ unsigned long map_size;
|
|
|
+ char *cpnt;
|
|
|
+ char *piclib2map = NULL;
|
|
|
+
|
|
|
+ if (piclib == 2 &&
|
|
|
+
|
|
|
+ require an additional page, but this would require mmap
|
|
|
+ to always return page-aligned addresses and a whole
|
|
|
+ number of pages allocated. Unfortunately on uClinux
|
|
|
+ may return misaligned addresses and may allocate
|
|
|
+ partial pages, so we may end up doing unnecessary mmap
|
|
|
+ calls.
|
|
|
+
|
|
|
+ This is what we could do if we knew mmap would always
|
|
|
+ return aligned pages:
|
|
|
+
|
|
|
+ ((ppnt->p_vaddr + ppnt->p_filesz + ADDR_ALIGN) &
|
|
|
+ PAGE_ALIGN) < ppnt->p_vaddr + ppnt->p_memsz)
|
|
|
+
|
|
|
+ Instead, we have to do this: */
|
|
|
+ ppnt->p_filesz < ppnt->p_memsz)
|
|
|
+ {
|
|
|
+ piclib2map = (char *)
|
|
|
+ _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_memsz,
|
|
|
+ LXFLAGS(prot_flags), flags | MAP_ANONYMOUS, -1, 0);
|
|
|
+ if (_dl_mmap_check_error(piclib2map))
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ tryaddr = piclib == 2 ? piclib2map
|
|
|
+ : ((char*) (piclib ? libaddr : 0) +
|
|
|
+ (ppnt->p_vaddr & PAGE_ALIGN));
|
|
|
+
|
|
|
+ size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
|
|
|
+
|
|
|
+
|
|
|
+ So instead of desperately call mmap and fail,
|
|
|
+ we set status to MAP_FAILED to save a call
|
|
|
+ to mmap (). */
|
|
|
+#ifndef __ARCH_USE_MMU__
|
|
|
+ if (piclib2map == 0)
|
|
|
+#endif
|
|
|
+ status = (char *) _dl_mmap
|
|
|
+ (tryaddr, size, LXFLAGS(prot_flags),
|
|
|
+ flags | (piclib2map ? MAP_FIXED : 0),
|
|
|
+ infile, ppnt->p_offset & OFFS_ALIGN);
|
|
|
+#ifndef __ARCH_USE_MMU__
|
|
|
+ else
|
|
|
+ status = MAP_FAILED;
|
|
|
+#endif
|
|
|
+#ifdef _DL_PREAD
|
|
|
+ if (_dl_mmap_check_error(status) && piclib2map
|
|
|
+ && (_DL_PREAD (infile, tryaddr, size,
|
|
|
+ ppnt->p_offset & OFFS_ALIGN) == size))
|
|
|
+ status = tryaddr;
|
|
|
+#endif
|
|
|
+ if (_dl_mmap_check_error(status) || (tryaddr && tryaddr != status))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (piclib2map)
|
|
|
+ retval = piclib2map;
|
|
|
+ else
|
|
|
+ retval = status;
|
|
|
+
|
|
|
+
|
|
|
+ of the region we mapped in from the file (filesz) to the
|
|
|
+ end of the loadable segment (memsz). We may need
|
|
|
+ additional pages for memsz, that we map in below, and we
|
|
|
+ can count on the kernel to zero them out, but we have to
|
|
|
+ zero out stuff in the last page that we mapped in from the
|
|
|
+ file. However, we can't assume to have actually obtained
|
|
|
+ full pages from the kernel, since we didn't ask for them,
|
|
|
+ and uClibc may not give us full pages for small
|
|
|
+ allocations. So only zero out up to memsz or the end of
|
|
|
+ the page, whichever comes first. */
|
|
|
+
|
|
|
+
|
|
|
+ filesz. */
|
|
|
+ cpnt = (char *) (status + size);
|
|
|
+
|
|
|
+
|
|
|
+ beginning of the next page. */
|
|
|
+ map_size = (ppnt->p_vaddr + ppnt->p_filesz
|
|
|
+ + ADDR_ALIGN) & PAGE_ALIGN;
|
|
|
+
|
|
|
+ _dl_memset (cpnt, 0,
|
|
|
+ MIN (map_size
|
|
|
+ - (ppnt->p_vaddr
|
|
|
+ + ppnt->p_filesz),
|
|
|
+ ppnt->p_memsz
|
|
|
+ - ppnt->p_filesz));
|
|
|
+
|
|
|
+ if (map_size < ppnt->p_vaddr + ppnt->p_memsz && !piclib2map) {
|
|
|
+ tryaddr = map_size + (char*)(piclib ? libaddr : 0);
|
|
|
+ status = (char *) _dl_mmap(tryaddr,
|
|
|
+ ppnt->p_vaddr + ppnt->p_memsz - map_size,
|
|
|
+ LXFLAGS(prot_flags),
|
|
|
+ flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
|
|
+ if (_dl_mmap_check_error(status) || tryaddr != status)
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ return retval;
|
|
|
+}
|
|
|
|
|
|
|
|
|
* Read one ELF library into memory, mmap it into the correct locations and
|
|
@@ -475,6 +590,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
status = (char *) _dl_mmap((char *) (piclib ? 0 : minvma),
|
|
|
maxvma - minvma, PROT_NONE, flags | MAP_ANONYMOUS, -1, 0);
|
|
|
if (_dl_mmap_check_error(status)) {
|
|
|
+ cant_map:
|
|
|
_dl_dprintf(2, "%s:%i: can't map '%s'\n", _dl_progname, __LINE__, libname);
|
|
|
_dl_internal_error_number = LD_ERROR_MMAP_FAILED;
|
|
|
_dl_close(infile);
|
|
@@ -495,8 +611,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
char *addr;
|
|
|
|
|
|
addr = DL_MAP_SEGMENT (epnt, ppnt, infile, flags);
|
|
|
- if (addr == NULL)
|
|
|
+ if (addr == NULL) {
|
|
|
+ cant_map1:
|
|
|
+ DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
|
|
|
goto cant_map;
|
|
|
+ }
|
|
|
|
|
|
DL_INIT_LOADADDR_HDR (lib_loadaddr, addr, ppnt);
|
|
|
ppnt++;
|
|
@@ -517,141 +636,9 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
}
|
|
|
|
|
|
if (ppnt->p_flags & PF_W) {
|
|
|
- unsigned long map_size;
|
|
|
- char *cpnt;
|
|
|
- char *piclib2map = 0;
|
|
|
-
|
|
|
- if (piclib == 2 &&
|
|
|
-
|
|
|
- call if memsz doesn't require
|
|
|
- an additional page, but this
|
|
|
- would require mmap to always
|
|
|
- return page-aligned addresses
|
|
|
- and a whole number of pages
|
|
|
- allocated. Unfortunately on
|
|
|
- uClinux may return misaligned
|
|
|
- addresses and may allocate
|
|
|
- partial pages, so we may end up
|
|
|
- doing unnecessary mmap calls.
|
|
|
-
|
|
|
- This is what we could do if we
|
|
|
- knew mmap would always return
|
|
|
- aligned pages:
|
|
|
-
|
|
|
- ((ppnt->p_vaddr + ppnt->p_filesz
|
|
|
- + ADDR_ALIGN)
|
|
|
- & PAGE_ALIGN)
|
|
|
- < ppnt->p_vaddr + ppnt->p_memsz)
|
|
|
-
|
|
|
- Instead, we have to do this: */
|
|
|
- ppnt->p_filesz < ppnt->p_memsz)
|
|
|
- {
|
|
|
- piclib2map = (char *)
|
|
|
- _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
|
|
|
- + ppnt->p_memsz,
|
|
|
- LXFLAGS(ppnt->p_flags),
|
|
|
- flags | MAP_ANONYMOUS, -1, 0);
|
|
|
- if (_dl_mmap_check_error(piclib2map))
|
|
|
- goto cant_map;
|
|
|
- DL_INIT_LOADADDR_HDR
|
|
|
- (lib_loadaddr, piclib2map
|
|
|
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
|
|
|
- }
|
|
|
-
|
|
|
- tryaddr = piclib == 2 ? piclib2map
|
|
|
- : ((char*) (piclib ? libaddr : 0) +
|
|
|
- (ppnt->p_vaddr & PAGE_ALIGN));
|
|
|
-
|
|
|
- size = (ppnt->p_vaddr & ADDR_ALIGN)
|
|
|
- + ppnt->p_filesz;
|
|
|
-
|
|
|
-
|
|
|
- So instead of desperately call mmap and fail,
|
|
|
- we set status to MAP_FAILED to save a call
|
|
|
- to mmap (). */
|
|
|
-#ifndef __ARCH_USE_MMU__
|
|
|
- if (piclib2map == 0)
|
|
|
-#endif
|
|
|
- status = (char *) _dl_mmap
|
|
|
- (tryaddr, size, LXFLAGS(ppnt->p_flags),
|
|
|
- flags | (piclib2map ? MAP_FIXED : 0),
|
|
|
- infile, ppnt->p_offset & OFFS_ALIGN);
|
|
|
-#ifndef __ARCH_USE_MMU__
|
|
|
- else
|
|
|
- status = MAP_FAILED;
|
|
|
-#endif
|
|
|
-#ifdef _DL_PREAD
|
|
|
- if (_dl_mmap_check_error(status) && piclib2map
|
|
|
- && (_DL_PREAD (infile, tryaddr, size,
|
|
|
- ppnt->p_offset & OFFS_ALIGN)
|
|
|
- == size))
|
|
|
- status = tryaddr;
|
|
|
-#endif
|
|
|
- if (_dl_mmap_check_error(status)
|
|
|
- || (tryaddr && tryaddr != status)) {
|
|
|
- cant_map:
|
|
|
- _dl_dprintf(2, "%s:%i: can't map '%s'\n",
|
|
|
- _dl_progname, __LINE__, libname);
|
|
|
- _dl_internal_error_number = LD_ERROR_MMAP_FAILED;
|
|
|
- DL_LOADADDR_UNMAP (lib_loadaddr, maxvma - minvma);
|
|
|
- _dl_close(infile);
|
|
|
- _dl_munmap(header, _dl_pagesize);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
- if (! piclib2map) {
|
|
|
- DL_INIT_LOADADDR_HDR
|
|
|
- (lib_loadaddr, status
|
|
|
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
|
|
|
- }
|
|
|
-
|
|
|
- zero-out any data from the end of
|
|
|
- the region we mapped in from the
|
|
|
- file (filesz) to the end of the
|
|
|
- loadable segment (memsz). We may
|
|
|
- need additional pages for memsz,
|
|
|
- that we map in below, and we can
|
|
|
- count on the kernel to zero them
|
|
|
- out, but we have to zero out stuff
|
|
|
- in the last page that we mapped in
|
|
|
- from the file. However, we can't
|
|
|
- assume to have actually obtained
|
|
|
- full pages from the kernel, since
|
|
|
- we didn't ask for them, and uClibc
|
|
|
- may not give us full pages for
|
|
|
- small allocations. So only zero
|
|
|
- out up to memsz or the end of the
|
|
|
- page, whichever comes first. */
|
|
|
-
|
|
|
-
|
|
|
- portion not backed by filesz. */
|
|
|
- cpnt = (char *) (status + size);
|
|
|
-
|
|
|
-
|
|
|
- beginning of the next page. */
|
|
|
- map_size = (ppnt->p_vaddr + ppnt->p_filesz
|
|
|
- + ADDR_ALIGN) & PAGE_ALIGN;
|
|
|
-
|
|
|
-#ifndef MIN
|
|
|
-# define MIN(a,b) ((a) < (b) ? (a) : (b))
|
|
|
-#endif
|
|
|
- _dl_memset (cpnt, 0,
|
|
|
- MIN (map_size
|
|
|
- - (ppnt->p_vaddr
|
|
|
- + ppnt->p_filesz),
|
|
|
- ppnt->p_memsz
|
|
|
- - ppnt->p_filesz));
|
|
|
-
|
|
|
- if (map_size < ppnt->p_vaddr + ppnt->p_memsz
|
|
|
- && !piclib2map) {
|
|
|
- tryaddr = map_size + (char*)(piclib ? libaddr : 0);
|
|
|
- status = (char *) _dl_mmap(tryaddr,
|
|
|
- ppnt->p_vaddr + ppnt->p_memsz - map_size,
|
|
|
- LXFLAGS(ppnt->p_flags), flags | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
|
|
|
- if (_dl_mmap_check_error(status)
|
|
|
- || tryaddr != status)
|
|
|
- goto cant_map;
|
|
|
- }
|
|
|
+ status = map_writeable (infile, ppnt, piclib, flags, libaddr);
|
|
|
+ if (status == NULL)
|
|
|
+ goto cant_map1;
|
|
|
} else {
|
|
|
tryaddr = (piclib == 2 ? 0
|
|
|
: (char *) (ppnt->p_vaddr & PAGE_ALIGN)
|
|
@@ -664,11 +651,11 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
infile, ppnt->p_offset & OFFS_ALIGN);
|
|
|
if (_dl_mmap_check_error(status)
|
|
|
|| (tryaddr && tryaddr != status))
|
|
|
- goto cant_map;
|
|
|
- DL_INIT_LOADADDR_HDR
|
|
|
- (lib_loadaddr, status
|
|
|
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
|
|
|
+ goto cant_map1;
|
|
|
}
|
|
|
+ DL_INIT_LOADADDR_HDR(lib_loadaddr,
|
|
|
+ status + (ppnt->p_vaddr & ADDR_ALIGN),
|
|
|
+ ppnt);
|
|
|
|
|
|
|
|
|
libaddr = (unsigned long) status;
|
|
@@ -677,7 +664,6 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
}
|
|
|
ppnt++;
|
|
|
}
|
|
|
- _dl_close(infile);
|
|
|
|
|
|
|
|
|
if (piclib) {
|
|
@@ -696,6 +682,7 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
_dl_dprintf(2, "%s: '%s' is missing a dynamic section\n",
|
|
|
_dl_progname, libname);
|
|
|
_dl_munmap(header, _dl_pagesize);
|
|
|
+ _dl_close(infile);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
@@ -711,10 +698,23 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
ppnt = (ElfW(Phdr) *)(intptr_t) & header[epnt->e_phoff];
|
|
|
for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
|
|
|
if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W)) {
|
|
|
+#ifdef __ARCH_USE_MMU__
|
|
|
_dl_mprotect((void *) ((piclib ? libaddr : 0) +
|
|
|
(ppnt->p_vaddr & PAGE_ALIGN)),
|
|
|
(ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
|
|
|
PROT_READ | PROT_WRITE | PROT_EXEC);
|
|
|
+#else
|
|
|
+ void *new_addr;
|
|
|
+ new_addr = map_writeable (infile, ppnt, piclib, flags, libaddr);
|
|
|
+ if (!new_addr) {
|
|
|
+ _dl_dprintf(_dl_debug_file, "Can't modify %s's text section.",
|
|
|
+ libname);
|
|
|
+ _dl_exit(1);
|
|
|
+ }
|
|
|
+ DL_UPDATE_LOADADDR_HDR(lib_loadaddr,
|
|
|
+ new_addr + (ppnt->p_vaddr & ADDR_ALIGN),
|
|
|
+ ppnt);
|
|
|
+#endif
|
|
|
}
|
|
|
}
|
|
|
#else
|
|
@@ -725,6 +725,8 @@ struct elf_resolve *_dl_load_elf_shared_library(int secure,
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+ _dl_close(infile);
|
|
|
+
|
|
|
tpnt = _dl_add_elf_hash_table(libname, lib_loadaddr, dynamic_info,
|
|
|
dynamic_addr, 0);
|
|
|
tpnt->relro_addr = relro_addr;
|