svn commit: branches/uClibc-nptl/ldso/ldso

kraj at uclibc.org kraj at uclibc.org
Fri Jul 11 07:57:23 UTC 2008


Author: kraj
Date: 2008-07-11 00:57:22 -0700 (Fri, 11 Jul 2008)
New Revision: 22770

Log:
Signed-off-by: Khem Raj <raj.khem at gmail.com>
Replace the code which could force gcc to use libgcc division functions, the libgcc function inturn call 'raise' which is expected from uclibc which we are compiling. This avoids this circular dependency problem.


Modified:
   branches/uClibc-nptl/ldso/ldso/dl-tls.c
   branches/uClibc-nptl/ldso/ldso/ldso.c


Changeset:
Modified: branches/uClibc-nptl/ldso/ldso/dl-tls.c
===================================================================
--- branches/uClibc-nptl/ldso/ldso/dl-tls.c	2008-07-10 23:42:36 UTC (rev 22769)
+++ branches/uClibc-nptl/ldso/ldso/dl-tls.c	2008-07-11 07:57:22 UTC (rev 22770)
@@ -33,11 +33,15 @@
 void *(*_dl_calloc_function) (size_t __nmemb, size_t __size) = NULL;
 void *(*_dl_realloc_function) (void *__ptr, size_t __size) = NULL;
 void *(*_dl_memalign_function) (size_t __boundary, size_t __size) = NULL;
-extern void (*_dl_free_function) (void *__ptr);
 
+void (*_dl_free_function) (void *__ptr);
 void *_dl_memalign (size_t __boundary, size_t __size);
 struct link_map *_dl_update_slotinfo (unsigned long int req_modid);
 
+/* Round up N to the nearest multiple of P, where P is a power of 2
+   --- without using libgcc division routines.  */
+#define roundup_pow2(n, p) (((n) + (p) - 1) & ~((p) - 1))
+
 void *
 _dl_calloc (size_t __nmemb, size_t __size)
 {
@@ -47,12 +51,6 @@
 	if (_dl_calloc_function)
 		return (*_dl_calloc_function) (__nmemb, __size);
 
-	if (__nmemb && __size != (size / __nmemb)) {
-			_dl_dprintf(2, "%si:%i: unaligned structures\n",
-				__FUNCTION__, __LINE__);
-			_dl_exit(1);
-	}
-	
 	if ((result = _dl_malloc(size)) != NULL) {
 		_dl_memset(result, 0, size);
 	}
@@ -141,9 +139,9 @@
 	if (freebytes < blsize)
 		goto fail;
 
-	n = (freebytes - blsize) / map->l_tls_align;
+	n = (freebytes - blsize) & ~(map->l_tls_align - 1);
 
-	size_t offset = _dl_tls_static_used + (freebytes - n * map->l_tls_align
+	size_t offset = _dl_tls_static_used + (freebytes - n
 		- map->l_tls_firstbyte_offset);
 
 	map->l_tls_offset = _dl_tls_static_used = offset;
@@ -151,7 +149,7 @@
 	size_t used;
 	size_t check;
 
-	size_t offset = roundup (_dl_tls_static_used, map->l_tls_align);
+	size_t offset = roundup_pow2 (_dl_tls_static_used, map->l_tls_align);
 	used = offset + map->l_tls_blocksize;
 	check = used;
 
@@ -185,6 +183,7 @@
 		map->l_need_tls_init = 1;
 }
 
+#ifdef SHARED
 /* Initialize static TLS area and DTV for current (only) thread.
    libpthread implementations should provide their own hook
    to handle all threads.  */
@@ -214,6 +213,7 @@
 	_dl_memset((dest + map->l_tls_initimage_size), '\0',
 		map->l_tls_blocksize - map->l_tls_initimage_size);
 }
+#endif
 
 /* Taken from glibc/sysdeps/generic/dl-tls.c */
 static void
@@ -331,7 +331,7 @@
   /* We simply start with zero.  */
   size_t offset = 0;
 
-  for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
+  for (size_t cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
     {
       _dl_assert (cnt < _dl_tls_dtv_slotinfo_list->len);
 
@@ -342,8 +342,8 @@
 
       if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
 	{
-	  off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
-			 - firstbyte, slotinfo[cnt].map->l_tls_align)
+	  off = roundup_pow2 (freetop + slotinfo[cnt].map->l_tls_blocksize
+                          - firstbyte, slotinfo[cnt].map->l_tls_align)
 		+ firstbyte;
 	  if (off <= freebottom)
 	    {
@@ -356,8 +356,9 @@
 	    }
 	}
 
-      off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
-		     slotinfo[cnt].map->l_tls_align) + firstbyte;
+      off = roundup_pow2 (offset + slotinfo[cnt].map->l_tls_blocksize
+                          - firstbyte, slotinfo[cnt].map->l_tls_align)
+            + firstbyte;
       if (off > offset + slotinfo[cnt].map->l_tls_blocksize
 		+ (freebottom - freetop))
 	{
@@ -372,14 +373,13 @@
     }
 
   _dl_tls_static_used = offset;
-  _dl_tls_static_size = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
+  _dl_tls_static_size = (roundup_pow2 (offset + TLS_STATIC_SURPLUS, max_align)
 			    + TLS_TCB_SIZE);
 # elif TLS_DTV_AT_TP
   /* The TLS blocks start right after the TCB.  */
   size_t offset = TLS_TCB_SIZE;
   size_t cnt;
 
-  /* The first slot is never used */
   for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
     {
       _dl_assert (cnt < _dl_tls_dtv_slotinfo_list->len);
@@ -391,7 +391,7 @@
 
       if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
 	{
-	  off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
+	  off = roundup_pow2 (freebottom, slotinfo[cnt].map->l_tls_align);
 	  if (off - freebottom < firstbyte)
 	    off += slotinfo[cnt].map->l_tls_align;
 	  if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
@@ -403,7 +403,7 @@
 	    }
 	}
 
-      off = roundup (offset, slotinfo[cnt].map->l_tls_align);
+      off = roundup_pow2 (offset, slotinfo[cnt].map->l_tls_align);
       if (off - offset < firstbyte)
 	off += slotinfo[cnt].map->l_tls_align;
 
@@ -418,8 +418,8 @@
     }
 
   _dl_tls_static_used = offset;
-  _dl_tls_static_size = roundup (offset + TLS_STATIC_SURPLUS,
-				    TLS_TCB_ALIGN);
+  _dl_tls_static_size = roundup_pow2 (offset + TLS_STATIC_SURPLUS,
+                                      TLS_TCB_ALIGN);
 # else
 #  error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
 # endif
@@ -431,6 +431,7 @@
 /* This is called only when the data structure setup was skipped at startup,
    when there was no need for it then.  Now we have dynamically loaded
    something needing TLS, or libpthread needs it.  */
+hidden_proto(_dl_tls_setup)
 int
 internal_function
 _dl_tls_setup (void)
@@ -457,7 +458,7 @@
 
   return 0;
 }
-rtld_hidden_def (_dl_tls_setup)
+hidden_def (_dl_tls_setup)
 
 static void *
 internal_function
@@ -944,6 +945,11 @@
   /* Add the information into the slotinfo data structure.  */
   listp->slotinfo[idx].map = l;
   listp->slotinfo[idx].gen = _dl_tls_generation + 1;
+  /* ??? ideally this would be done once per call to dlopen.  However there's
+     no easy way to indicate whether a library used TLS, so do it here
+	 instead. */
+  /* Bump the TLS generation number.  */
+  _dl_tls_generation++;
 }
 
 /* Taken from glibc/elf/rtld.c */
@@ -958,6 +964,9 @@
 	return &data;
 }
 
+#ifdef SHARED
+void *init_tls (void);
+hidden_proto(init_tls)
 void *
 internal_function
 init_tls (void)
@@ -976,6 +985,7 @@
 	size_t nelem = _dl_tls_max_dtv_idx + 1 + TLS_SLOTINFO_SURPLUS;
 
 	/* Allocate.  */
+    _dl_assert (_dl_tls_dtv_slotinfo_list == NULL);
 	_dl_tls_dtv_slotinfo_list = (struct dtv_slotinfo_list *)
 		_dl_calloc (sizeof (struct dtv_slotinfo_list)
 			+ nelem * sizeof (struct dtv_slotinfo), 1);
@@ -988,18 +998,19 @@
 
 	/* Fill in the information from the loaded modules.  No namespace
 	   but the base one can be filled at this time.  */
-	int i = 0;
-	struct link_map *l;
-	for (l =  (struct link_map *) _dl_loaded_modules; l != NULL; l = l->l_next)
+	int i = 1;                  /* The first module ID is 1. */
+	struct elf_resolve *l;
+	for (l =  _dl_loaded_modules; l != NULL; l = l->next)
 		if (l->l_tls_blocksize != 0)
 		{
 			/* This is a module with TLS data.  Store the map reference.
 			   The generation counter is zero.  */
-
-			/* Skeep slot[0]: it will be never used */			
-			slotinfo[++i].map = l;
+			_dl_assert (i == l->l_tls_modid);
+			slotinfo[i].map = (struct link_map *) l;
+			/* slotinfo[i].gen = 0; */
+			i++;
 		}
-	_dl_assert (i == _dl_tls_max_dtv_idx);
+	_dl_assert (i == _dl_tls_max_dtv_idx + 1);
 
 	/* Compute the TLS offsets for the various blocks.  */
 	_dl_determine_tlsoffset ();
@@ -1021,7 +1032,7 @@
 
 	/* And finally install it for the main thread.  If ld.so itself uses
 	   TLS we know the thread pointer was initialized earlier.  */
-	const char *lossage = (char *) TLS_INIT_TP (tcbp, USE___THREAD);
+	const char *lossage = TLS_INIT_TP (tcbp, USE___THREAD);
 	if(__builtin_expect (lossage != NULL, 0)) {
 		_dl_debug_early("cannot set up thread-local storage: %s\n", lossage);
 		_dl_exit(30);
@@ -1030,3 +1041,6 @@
 
 	return tcbp;
 }
+hidden_def (init_tls)
+#endif
+

Modified: branches/uClibc-nptl/ldso/ldso/ldso.c
===================================================================
--- branches/uClibc-nptl/ldso/ldso/ldso.c	2008-07-10 23:42:36 UTC (rev 22769)
+++ branches/uClibc-nptl/ldso/ldso/ldso.c	2008-07-11 07:57:22 UTC (rev 22770)
@@ -1062,7 +1062,7 @@
 		rounded = (1 << i++);
 	}
 
-	delta = (((size_t) _dl_malloc_addr + __size) % rounded);
+	delta = (((size_t) _dl_malloc_addr + __size) & (rounded - 1));
 
 	if ((result = _dl_malloc(rounded - delta)) == NULL)
 		return result;




More information about the uClibc-cvs mailing list