[uClibc][PATCH] Preliminary patches for MIPS dynamic linker...
Steven J. Hill
sjhill at realitydiluted.com
Tue Mar 5 23:09:22 UTC 2002
This is the first patch set for a native MIPS dynamic linker for
uClibc. It does not work currently, but I'm debugging it right
now :). If you want to help, have at it! Cheers.
-Steve
diff -urN -x CVS uclibc/ldso/ldso/mips/README uclibc-patched/ldso/ldso/mips/README
--- uclibc/ldso/ldso/mips/README Wed Dec 31 18:00:00 1969
+++ uclibc-patched/ldso/ldso/mips/README Thu Feb 28 11:10:18 2002
@@ -0,0 +1,8 @@
+boot1_arch.h - taken from glibc-2.2.5 'sysdeps/mips/dl-machine.h' in function
+ '_start' around line 393
+elfinterp.c - taken from glibc-2.2.5 'sysdeps/mips/dl-machine.h'
+ld_syscalls.h - taken from Linux kernel 2.4.17, 'include/asm-mips/unistd.h'
+ld_sysdep.h - taken from glibc-2.2.5 'sysdeps/mips/dl-machine.h' in function
+ 'elf_machine_rel' around line 475
+resolve.S - taken from glibc-2.2.5 'sysdeps/mips/dl-machine.h' in function
+ '_dl_runtime_resolve' around line 335
diff -urN -x CVS uclibc/ldso/ldso/mips/boot1_arch.h uclibc-patched/ldso/ldso/mips/boot1_arch.h
--- uclibc/ldso/ldso/mips/boot1_arch.h Wed Dec 31 18:00:00 1969
+++ uclibc-patched/ldso/ldso/mips/boot1_arch.h Thu Feb 28 09:08:18 2002
@@ -0,0 +1,31 @@
+/* Any assmbly language/system dependent hacks needed to setup boot1.c so it
+ * will work as expected and cope with whatever platform specific wierdness is
+ * needed for this architecture. */
+
+/* Override the default _dl_boot function, and replace it with a bit of asm.
+ * Then call the real _dl_boot function, which is now named _dl_boot2. */
+
+/*
+asm("\
+.text
+.globl _dl_boot
+_dl_boot:
+ .set noreorder
+ bltzal $0, 0f
+ nop
+0: .cpload $31
+ .set reorder
+ la $4, _DYNAMIC
+ sw $4, -0x7ff0($28)
+ move $4, $29
+ subu $29, 16
+ la $8, coff
+ bltzal $8, coff
+coff: subu $8, $31, $8
+ jal _dl_boot2
+");
+
+#define _dl_boot _dl_boot2
+#define DL_BOOT(X) static void * __attribute__ ((unused)) _dl_boot (X)
+*/
+#define DL_BOOT(X) void _dl_boot (X)
diff -urN -x CVS uclibc/ldso/ldso/mips/elfinterp.c uclibc-patched/ldso/ldso/mips/elfinterp.c
--- uclibc/ldso/ldso/mips/elfinterp.c Wed Dec 31 18:00:00 1969
+++ uclibc-patched/ldso/ldso/mips/elfinterp.c Thu Feb 28 10:10:57 2002
@@ -0,0 +1,244 @@
+/* Run an ELF binary on a linux system.
+
+ Copyright (C) 1993, Eric Youngdale.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef VERBOSE_DLINKER
+#define VERBOSE_DLINKER
+#endif
+#ifdef VERBOSE_DLINKER
+static char *_dl_reltypes[] =
+ { "R_MIPS_NONE", "R_MIPS_16", "R_MIPS_32", "R_MIPS_REL32",
+ "R_MIPS_26", "R_MIPS_HI16", "R_MIPS_LO16", "R_MIPS_GPREL16",
+ "R_MIPS_LITERAL", "R_MIPS_GOT16", "R_MIPS_PC16", "R_MIPS_CALL16",
+ "R_MIPS_GPREL32", "R_MIPS_SHIFT5", "R_MIPS_SHIFT6", "R_MIPS_64",
+ "R_MIPS_GOT_DISP", "R_MIPS_GOT_PAGE", "R_MIPS_GOT_OFST",
+ "R_MIPS_GOT_HI16", "R_MIPS_GOT_LO16", "R_MIPS_SUB",
+ "R_MIPS_INSERT_A", "R_MIPS_INSERT_B", "R_MIPS_DELETE",
+ "R_MIPS_HIGHER", "R_MIPS_HIGHEST", "R_MIPS_CALL_HI16",
+ "R_MIPS_CALL_LO16", "R_MIPS_SCN_DISP", "R_MIPS_REL16",
+ "R_MIPS_ADD_IMMEDIATE", "R_MIPS_PJUMP", "R_MIPS_RELGOT",
+ "R_MIPS_JALR", "R_MIPS_NUM"
+};
+#endif
+
+/* Program to load an ELF binary on a linux system, and run it.
+ References to symbols in sharable libraries can be resolved by either
+ an ELF sharable library or a linux style of shared library. */
+
+/* Disclaimer: I have never seen any AT&T source code for SVr4, nor have
+ I ever taken any courses on internals. This program was developed using
+ information available through the book "UNIX SYSTEM V RELEASE 4,
+ Programmers guide: Ansi C and Programming Support Tools", which did
+ a more than adequate job of explaining everything required to get this
+ working. */
+
+
+extern int _dl_linux_resolve(void);
+
+unsigned long _dl_linux_resolver(struct elf_resolve *tpnt, int reloc_entry)
+{
+ int reloc_type;
+ Elf32_Rel *this_reloc;
+ char *strtab;
+ Elf32_Sym *symtab;
+ Elf32_Rel *rel_addr;
+ int symtab_index;
+ char *new_addr;
+ char **got_addr;
+ unsigned long instr_addr;
+
+ rel_addr = (Elf32_Rel *) (tpnt->dynamic_info[DT_JMPREL] + tpnt->loadaddr);
+
+ this_reloc = rel_addr + (reloc_entry >> 3);
+ reloc_type = ELF32_R_TYPE(this_reloc->r_info);
+ symtab_index = ELF32_R_SYM(this_reloc->r_info);
+
+ symtab = (Elf32_Sym *) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr);
+ strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr);
+
+ _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n",
+ _dl_progname);
+ _dl_exit(1);
+
+ return (unsigned long) new_addr;
+}
+
+void _dl_parse_lazy_relocation_information(struct elf_resolve *tpnt,
+ unsigned long rel_addr, unsigned long rel_size, int type)
+{
+ int i;
+ char *strtab;
+ int reloc_type;
+ int symtab_index;
+ Elf32_Sym *symtab;
+ Elf32_Rel *rpnt;
+ unsigned long *reloc_addr;
+
+ /* Now parse the relocation information */
+ rpnt = (Elf32_Rel *) (rel_addr + tpnt->loadaddr);
+ rel_size = rel_size / sizeof(Elf32_Rel);
+
+ symtab =
+ (Elf32_Sym *) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr);
+ strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr);
+
+ for (i = 0; i < rel_size; i++, rpnt++) {
+ reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset);
+ reloc_type = ELF32_R_TYPE(rpnt->r_info);
+ symtab_index = ELF32_R_SYM(rpnt->r_info);
+
+ /* When the dynamic linker bootstrapped itself, it resolved some symbols.
+ Make sure we do not do them again */
+ if (!symtab_index && tpnt->libtype == program_interpreter)
+ continue;
+ if (symtab_index && tpnt->libtype == program_interpreter &&
+ _dl_symbol(strtab + symtab[symtab_index].st_name))
+ continue;
+
+ _dl_dprintf(2, "%s: (LAZY) can't handle reloc type ",
+ _dl_progname);
+#ifdef VERBOSE_DLINKER
+ _dl_dprintf(2, "%s ", _dl_reltypes[reloc_type]);
+#endif
+ if (symtab_index)
+ _dl_dprintf(2, "'%s'\n", strtab + symtab[symtab_index].st_name);
+ _dl_exit(1);
+ };
+}
+
+int _dl_parse_relocation_information(struct elf_resolve *tpnt,
+ unsigned long rel_addr, unsigned long rel_size, int type)
+{
+ int i;
+ char *strtab;
+ int reloc_type;
+ int goof = 0;
+ Elf32_Sym *symtab;
+ Elf32_Rel *rpnt;
+ unsigned long *reloc_addr;
+ unsigned long symbol_addr;
+ int symtab_index;
+
+ /* Now parse the relocation information */
+
+ rpnt = (Elf32_Rel *) (rel_addr + tpnt->loadaddr);
+ rel_size = rel_size / sizeof(Elf32_Rel);
+
+ symtab = (Elf32_Sym *) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr);
+ strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr);
+
+ for (i = 0; i < rel_size; i++, rpnt++) {
+ reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset);
+ reloc_type = ELF32_R_TYPE(rpnt->r_info);
+ symtab_index = ELF32_R_SYM(rpnt->r_info);
+ symbol_addr = 0;
+
+ if (!symtab_index && tpnt->libtype == program_interpreter)
+ continue;
+
+ if (symtab_index) {
+
+ if (tpnt->libtype == program_interpreter &&
+ _dl_symbol(strtab + symtab[symtab_index].st_name))
+ continue;
+
+ symbol_addr = (unsigned long) _dl_find_hash(strtab + symtab[symtab_index].st_name,
+ tpnt->symbol_scope, (unsigned long) reloc_addr,
+ (reloc_type == R_ARM_JUMP_SLOT ? tpnt : NULL), 0);
+
+ /*
+ * We want to allow undefined references to weak symbols - this might
+ * have been intentional. We should not be linking local symbols
+ * here, so all bases should be covered.
+ */
+ if (!symbol_addr && ELF32_ST_BIND(symtab[symtab_index].st_info) == STB_GLOBAL) {
+ _dl_dprintf(2, "%s: can't resolve symbol '%s'\n",
+ _dl_progname, strtab + symtab[symtab_index].st_name);
+ goof++;
+ }
+ }
+ switch (reloc_type) {
+ case R_MIPS_NONE:
+ break;
+ default:
+ _dl_dprintf(2, "%s: can't handle reloc type ", _dl_progname);
+#ifdef VERBOSE_DLINKER
+ _dl_dprintf(2, "%s ", _dl_reltypes[reloc_type]);
+#endif
+ if (symtab_index)
+ _dl_dprintf(2, "'%s'\n", strtab + symtab[symtab_index].st_name);
+ _dl_exit(1);
+ };
+
+ };
+ return goof;
+}
+
+
+/* This is done as a separate step, because there are cases where
+ information is first copied and later initialized. This results in
+ the wrong information being copied. Someone at Sun was complaining about
+ a bug in the handling of _COPY by SVr4, and this may in fact be what he
+ was talking about. Sigh. */
+
+/* No, there are cases where the SVr4 linker fails to emit COPY relocs
+ at all */
+
+int _dl_parse_copy_information(struct dyn_elf *xpnt, unsigned long rel_addr,
+ unsigned long rel_size, int type)
+{
+ int i;
+ char *strtab;
+ int reloc_type;
+ int goof = 0;
+ Elf32_Sym *symtab;
+ Elf32_Rel *rpnt;
+ unsigned long *reloc_addr;
+ unsigned long symbol_addr;
+ struct elf_resolve *tpnt;
+ int symtab_index;
+
+ /* Now parse the relocation information */
+
+ tpnt = xpnt->dyn;
+
+ rpnt = (Elf32_Rel *) (rel_addr + tpnt->loadaddr);
+ rel_size = rel_size / sizeof(Elf32_Rel);
+
+ symtab = (Elf32_Sym *) (tpnt->dynamic_info[DT_SYMTAB] + tpnt->loadaddr);
+ strtab = (char *) (tpnt->dynamic_info[DT_STRTAB] + tpnt->loadaddr);
+
+ for (i = 0; i < rel_size; i++, rpnt++) {
+ reloc_addr = (unsigned long *) (tpnt->loadaddr + (unsigned long) rpnt->r_offset);
+ reloc_type = ELF32_R_TYPE(rpnt->r_info);
+ symtab_index = ELF32_R_SYM(rpnt->r_info);
+ symbol_addr = 0;
+ if (!symtab_index && tpnt->libtype == program_interpreter)
+ continue;
+ if (symtab_index) {
+ symbol_addr = (unsigned long) _dl_find_hash(strtab +
+ symtab[symtab_index].st_name, xpnt->next,
+ (unsigned long) reloc_addr, NULL, 1);
+ if (!symbol_addr) {
+ _dl_dprintf(2, "%s: can't resolve symbol '%s'\n",
+ _dl_progname, strtab + symtab[symtab_index].st_name);
+ goof++;
+ };
+ };
+ };
+ return goof;
+}
diff -urN -x CVS uclibc/ldso/ldso/mips/ld_syscalls.h uclibc-patched/ldso/ldso/mips/ld_syscalls.h
--- uclibc/ldso/ldso/mips/ld_syscalls.h Wed Dec 31 18:00:00 1969
+++ uclibc-patched/ldso/ldso/mips/ld_syscalls.h Thu Feb 28 11:05:18 2002
@@ -0,0 +1,136 @@
+/*
+ * This file contains the system call macros and syscall
+ * numbers used by the shared library loader. Taken from
+ * Linux/MIPS 2.4.17 version kernel.
+ */
+
+#define __NR_SYSCALL_BASE 4000
+
+#define __NR_exit (__NR_SYSCALL_BASE + 1)
+#define __NR_read (__NR_SYSCALL_BASE + 3)
+#define __NR_write (__NR_SYSCALL_BASE + 4)
+#define __NR_open (__NR_SYSCALL_BASE + 5)
+#define __NR_close (__NR_SYSCALL_BASE + 6)
+#define __NR_getuid (__NR_SYSCALL_BASE + 24)
+#define __NR_getgid (__NR_SYSCALL_BASE + 47)
+#define __NR_geteuid (__NR_SYSCALL_BASE + 49)
+#define __NR_getegid (__NR_SYSCALL_BASE + 50)
+#define __NR_mmap (__NR_SYSCALL_BASE + 90)
+#define __NR_munmap (__NR_SYSCALL_BASE + 91)
+#define __NR_stat (__NR_SYSCALL_BASE + 106)
+#define __NR_mprotect (__NR_SYSCALL_BASE + 125)
+
+/* Here are the macros which define how this platform makes
+ * system calls. This particular variant does _not_ set
+ * errno (note how it is disabled in __syscall_return) since
+ * these will get called before the errno symbol is dynamicly
+ * linked. */
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+ register unsigned long __v0 asm("$2") = __NR_##name; \
+ register unsigned long __a3 asm("$7"); \
+ \
+ __asm__ volatile ( \
+ ".set\tnoreorder\n\t" \
+ "li\t$2, %2\t\t\t# " #name "\n\t" \
+ "syscall\n\t" \
+ ".set\treorder" \
+ : "=&r" (__v0), "=r" (__a3) \
+ : "i" (__NR_##name) \
+ : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24"); \
+ \
+ if (__a3 == 0) \
+ return (type) __v0; \
+ return -1; \
+}
+
+#define _syscall1(type,name,atype,a) \
+type name(atype a) \
+{ \
+ register unsigned long __v0 asm("$2") = __NR_##name; \
+ register unsigned long __a0 asm("$4") = (unsigned long) a; \
+ register unsigned long __a3 asm("$7"); \
+ \
+ __asm__ volatile ( \
+ ".set\tnoreorder\n\t" \
+ "li\t$2, %3\t\t\t# " #name "\n\t" \
+ "syscall\n\t" \
+ ".set\treorder" \
+ : "=&r" (__v0), "=r" (__a3) \
+ : "r" (__a0), "i" (__NR_##name) \
+ : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24"); \
+ \
+ if (__a3 == 0) \
+ return (type) __v0; \
+ return (type) -1; \
+}
+
+#define _syscall2(type,name,atype,a,btype,b) \
+type name(atype a, btype b) \
+{ \
+ register unsigned long __v0 asm("$2") = __NR_##name; \
+ register unsigned long __a0 asm("$4") = (unsigned long) a; \
+ register unsigned long __a1 asm("$5") = (unsigned long) b; \
+ register unsigned long __a3 asm("$7"); \
+ \
+ __asm__ volatile ( \
+ ".set\tnoreorder\n\t" \
+ "li\t$2, %4\t\t\t# " #name "\n\t" \
+ "syscall\n\t" \
+ ".set\treorder" \
+ : "=&r" (__v0), "=r" (__a3) \
+ : "r" (__a0), "r" (__a1), "i" (__NR_##name) \
+ : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24"); \
+ \
+ if (__a3 == 0) \
+ return (type) __v0; \
+ return (type) -1; \
+}
+
+#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
+type name(atype a, btype b, ctype c) \
+{ \
+ register unsigned long __v0 asm("$2") = __NR_##name; \
+ register unsigned long __a0 asm("$4") = (unsigned long) a; \
+ register unsigned long __a1 asm("$5") = (unsigned long) b; \
+ register unsigned long __a2 asm("$6") = (unsigned long) c; \
+ register unsigned long __a3 asm("$7"); \
+ \
+ __asm__ volatile ( \
+ ".set\tnoreorder\n\t" \
+ "li\t$2, %5\t\t\t# " #name "\n\t" \
+ "syscall\n\t" \
+ ".set\treorder" \
+ : "=&r" (__v0), "=r" (__a3) \
+ : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
+ : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24"); \
+ \
+ if (__a3 == 0) \
+ return (type) __v0; \
+ return (type) -1; \
+}
+
+#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
+type name(atype a, btype b, ctype c, dtype d) \
+{ \
+ register unsigned long __v0 asm("$2") = __NR_##name; \
+ register unsigned long __a0 asm("$4") = (unsigned long) a; \
+ register unsigned long __a1 asm("$5") = (unsigned long) b; \
+ register unsigned long __a2 asm("$6") = (unsigned long) c; \
+ register unsigned long __a3 asm("$7") = (unsigned long) d; \
+ \
+ __asm__ volatile ( \
+ ".set\tnoreorder\n\t" \
+ "li\t$2, %5\t\t\t# " #name "\n\t" \
+ "syscall\n\t" \
+ ".set\treorder" \
+ : "=&r" (__v0), "+r" (__a3) \
+ : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
+ : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24"); \
+ \
+ if (__a3 == 0) \
+ return (type) __v0; \
+ return (type) -1; \
+}
diff -urN -x CVS uclibc/ldso/ldso/mips/ld_sysdep.h uclibc-patched/ldso/ldso/mips/ld_sysdep.h
--- uclibc/ldso/ldso/mips/ld_sysdep.h Wed Dec 31 18:00:00 1969
+++ uclibc-patched/ldso/ldso/mips/ld_sysdep.h Thu Feb 28 11:09:30 2002
@@ -0,0 +1,67 @@
+/*
+ * Various assmbly language/system dependent hacks that are required
+ * so that we can minimize the amount of platform specific code.
+ */
+
+/*
+ * Define this if the system uses RELOCA.
+ */
+#undef ELF_USES_RELOCA
+
+/*
+ * Get a pointer to the argv array. On many platforms this can be just
+ * the address if the first argument, on other platforms we need to
+ * do something a little more subtle here.
+ */
+#define GET_ARGV(ARGVP, ARGS) ARGVP = ((unsigned long*) ARGS)
+
+/*
+ * Initialization sequence for a GOT.
+ */
+#define INIT_GOT(GOT_BASE,MODULE) \
+{ \
+ GOT_BASE[2] = (unsigned long) _dl_linux_resolve; \
+ GOT_BASE[1] = (unsigned long) MODULE; \
+}
+
+/*
+ * Here is a macro to perform a relocation. This is only used when
+ * bootstrapping the dynamic loader. RELP is the relocation that we
+ * are performing, REL is the pointer to the address we are relocating.
+ * SYMBOL is the symbol involved in the relocation, and LOAD is the
+ * load address.
+ */
+#define PERFORM_BOOTSTRAP_RELOC(RELP,REL,SYMBOL,LOAD) \
+ switch(ELF32_R_TYPE((RELP)->r_info)){ \
+ case R_MIPS_REL32: \
+ *REL += SYMBOL; \
+ break; \
+ case R_MIPS_NONE: \
+ break; \
+ default: \
+ SEND_STDERR("Aiieeee!"); \
+ _dl_exit(1); \
+ }
+
+
+/*
+ * Transfer control to the user's application, once the dynamic loader
+ * is done. This routine has to exit the current function, then
+ * call the _dl_elf_main function.
+ */
+
+#define START() return (void) _dl_elf_main;
+
+
+
+/* Here we define the magic numbers that this dynamic loader should accept */
+
+#define MAGIC1 EM_MIPS
+#define MAGIC2 EM_MIPS_RS3_LE
+/* Used for error messages */
+#define ELF_TARGET "MIPS"
+
+struct elf_resolve;
+unsigned long _dl_linux_resolver(struct elf_resolve * tpnt, int reloc_entry);
+
+#define do_rem(result, n, base) result = (n % base)
diff -urN -x CVS uclibc/ldso/ldso/mips/resolve.S uclibc-patched/ldso/ldso/mips/resolve.S
--- uclibc/ldso/ldso/mips/resolve.S Wed Dec 31 18:00:00 1969
+++ uclibc-patched/ldso/ldso/mips/resolve.S Thu Feb 28 10:53:37 2002
@@ -0,0 +1,48 @@
+/*
+ * Linux dynamic resolving code for MIPS. Fixes up the GOT entry as
+ * indicated in register t8 and jumps to the resolved address. Shamelessly
+ * ripped from 'sysdeps/mips/dl-machine.h' in glibc-2.2.5.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING.LIB" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996-2001 Kazumoto Kojima <kkojima at info.kanagawa-u.ac.jp>
+ * Copyright (C) 2002 Steven J. Hill <sjhill at realitydiluted.com>
+ *
+ */
+.text
+.align 2
+.globl _dl_linux_resolve
+.type _dl_linux_resolve, at function
+.ent _dl_linux_resolve
+_dl_linux_resolve:
+ .frame $29, 40, $31
+ .set noreorder
+ move $3, $28 # Save GP
+ addu $25, 8 # t9 ($25) now points at .cpload instruction
+ .cpload $25 # Compute GP
+ .set reorder
+ move $2, $31 # Save slot call pc
+ subu $29, 40 # Save arguments and sp value in stack
+ .cprestore 32
+ sw $15, 36($29)
+ sw $4, 16($29)
+ sw $5, 20($29)
+ sw $6, 24($29)
+ sw $7, 28($29)
+ move $4, $24
+ move $5, $15
+ move $6, $3
+ move $7, $2
+ jal _dl_linux_resolver
+ lw $31, 36($29)
+ lw $4, 16($29)
+ lw $5, 20($29)
+ lw $6, 24($29)
+ lw $7, 28($29)
+ addu $29, 40
+ move $25, $2
+ jr $25
+.size _dl_linux_resolve,.-_dl_linux_resolve
+.end _dl_linux_resolve
More information about the uClibc
mailing list