[PATCH] ldso: use _dl_pagesize for library-loading addresses and offsets
Jeremy Kerr
jk at ozlabs.org
Tue Nov 25 10:37:43 UTC 2008
Some powerpc machines can support 64k pages, enabled by the
CONFIG_64K_PAGES option in linux.
However, the uClibc dynamic loader won't currently work on these
machines, as it uses hard-coded values (PAGE_ALIGN, ADDR_ALIGN and
OFFS_ALIGN) in the ldso architecture-specific headers. When running on
a kernel with 64k pages, ld.so tries to mmap with 4k-aligned addresses,
rather than 64k, so mmap fails with -EINVAL.
When booting a 64k machine with a uClibc dynamic linker, init fails
with:
/init:500: can't map '/lib/libc.so.0'
/init:500: can't map '/lib/libc.so.0'
/init:500: can't map '/lib/libc.so.0'
/init: can't load library 'libc.so.0'
Kernel panic - not syncing: Attempted to kill init!
This change allows ld.so determine these alignment masks at runtime,
rather than compile-time. Since we have the _dl_pagesize variable
available, we can use that to generate the appropriate masks.
This allows me to start a uClibc-based root fs on a 64k machine.
Signed-off-by: Jeremy Kerr <jk at ozlabs.org>
---
ldso/ldso/dl-elf.c | 39 ++++++++++++++++++++++-----------------
1 file changed, 22 insertions(+), 17 deletions(-)
Index: uClibc/ldso/ldso/dl-elf.c
===================================================================
--- uClibc.orig/ldso/ldso/dl-elf.c 2008-11-25 20:19:19.000000000 +1100
+++ uClibc/ldso/ldso/dl-elf.c 2008-11-25 20:22:26.000000000 +1100
@@ -336,6 +336,7 @@ struct elf_resolve *_dl_load_elf_shared_
unsigned long dynamic_info[DYNAMIC_SIZE];
unsigned long *lpnt;
unsigned long libaddr;
+ unsigned long page_align, addr_align, offs_align;
unsigned long minvma = 0xffffffff, maxvma = 0;
int i, flags, piclib, infile;
ElfW(Addr) relro_addr = 0;
@@ -442,7 +443,11 @@ struct elf_resolve *_dl_load_elf_shared_
DL_CHECK_LIB_TYPE (epnt, piclib, _dl_progname, libname);
- maxvma = (maxvma + ADDR_ALIGN) & ~ADDR_ALIGN;
+ addr_align = _dl_pagesize - 1;
+ page_align = ~addr_align;
+ offs_align = page_align & ~(1 << 31);
+
+ maxvma = (maxvma + addr_align) & ~addr_align;
minvma = minvma & ~0xffffU;
flags = MAP_PRIVATE /*| MAP_DENYWRITE */ ;
@@ -517,15 +522,15 @@ struct elf_resolve *_dl_load_elf_shared_
aligned pages:
((ppnt->p_vaddr + ppnt->p_filesz
- + ADDR_ALIGN)
- & PAGE_ALIGN)
+ + addr_align)
+ & page_align)
< ppnt->p_vaddr + ppnt->p_memsz)
Instead, we have to do this: */
ppnt->p_filesz < ppnt->p_memsz)
{
piclib2map = (char *)
- _dl_mmap(0, (ppnt->p_vaddr & ADDR_ALIGN)
+ _dl_mmap(0, (ppnt->p_vaddr & addr_align)
+ ppnt->p_memsz,
LXFLAGS(ppnt->p_flags),
flags | MAP_ANONYMOUS, -1, 0);
@@ -533,14 +538,14 @@ struct elf_resolve *_dl_load_elf_shared_
goto cant_map;
DL_INIT_LOADADDR_HDR
(lib_loadaddr, piclib2map
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
+ + (ppnt->p_vaddr & addr_align), ppnt);
}
tryaddr = piclib == 2 ? piclib2map
: ((char*) (piclib ? libaddr : 0) +
- (ppnt->p_vaddr & PAGE_ALIGN));
+ (ppnt->p_vaddr & page_align));
- size = (ppnt->p_vaddr & ADDR_ALIGN)
+ size = (ppnt->p_vaddr & addr_align)
+ ppnt->p_filesz;
/* For !MMU, mmap to fixed address will fail.
@@ -553,7 +558,7 @@ struct elf_resolve *_dl_load_elf_shared_
status = (char *) _dl_mmap
(tryaddr, size, LXFLAGS(ppnt->p_flags),
flags | (piclib2map ? MAP_FIXED : 0),
- infile, ppnt->p_offset & OFFS_ALIGN);
+ infile, ppnt->p_offset & offs_align);
#ifndef __ARCH_USE_MMU__
else
status = MAP_FAILED;
@@ -561,7 +566,7 @@ struct elf_resolve *_dl_load_elf_shared_
#ifdef _DL_PREAD
if (_dl_mmap_check_error(status) && piclib2map
&& (_DL_PREAD (infile, tryaddr, size,
- ppnt->p_offset & OFFS_ALIGN)
+ ppnt->p_offset & offs_align)
== size))
status = tryaddr;
#endif
@@ -580,7 +585,7 @@ struct elf_resolve *_dl_load_elf_shared_
if (! piclib2map) {
DL_INIT_LOADADDR_HDR
(lib_loadaddr, status
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
+ + (ppnt->p_vaddr & addr_align), ppnt);
}
/* Now we want to allocate and
zero-out any data from the end of
@@ -608,7 +613,7 @@ struct elf_resolve *_dl_load_elf_shared_
/* MAP_SIZE is the address of the
beginning of the next page. */
map_size = (ppnt->p_vaddr + ppnt->p_filesz
- + ADDR_ALIGN) & PAGE_ALIGN;
+ + addr_align) & page_align;
#ifndef MIN
# define MIN(a,b) ((a) < (b) ? (a) : (b))
@@ -632,20 +637,20 @@ struct elf_resolve *_dl_load_elf_shared_
}
} else {
tryaddr = (piclib == 2 ? 0
- : (char *) (ppnt->p_vaddr & PAGE_ALIGN)
+ : (char *) (ppnt->p_vaddr & page_align)
+ (piclib ? libaddr : 0));
- size = (ppnt->p_vaddr & ADDR_ALIGN) + ppnt->p_filesz;
+ size = (ppnt->p_vaddr & addr_align) + ppnt->p_filesz;
status = (char *) _dl_mmap
(tryaddr, size, LXFLAGS(ppnt->p_flags),
flags | (piclib == 2 ? MAP_EXECUTABLE
| MAP_DENYWRITE : 0),
- infile, ppnt->p_offset & OFFS_ALIGN);
+ infile, ppnt->p_offset & offs_align);
if (_dl_mmap_check_error(status)
|| (tryaddr && tryaddr != status))
goto cant_map;
DL_INIT_LOADADDR_HDR
(lib_loadaddr, status
- + (ppnt->p_vaddr & ADDR_ALIGN), ppnt);
+ + (ppnt->p_vaddr & addr_align), ppnt);
}
/* if (libaddr == 0 && piclib) {
@@ -690,8 +695,8 @@ struct elf_resolve *_dl_load_elf_shared_
for (i = 0; i < epnt->e_phnum; i++, ppnt++) {
if (ppnt->p_type == PT_LOAD && !(ppnt->p_flags & PF_W))
_dl_mprotect((void *) ((piclib ? libaddr : 0) +
- (ppnt->p_vaddr & PAGE_ALIGN)),
- (ppnt->p_vaddr & ADDR_ALIGN) + (unsigned long) ppnt->p_filesz,
+ (ppnt->p_vaddr & page_align)),
+ (ppnt->p_vaddr & addr_align) + (unsigned long) ppnt->p_filesz,
PROT_READ | PROT_WRITE | PROT_EXEC);
}
#else
More information about the uClibc
mailing list