svn commit: trunk/uClibc/libpthread/linuxthreads

vapier at uclibc.org vapier at uclibc.org
Thu Sep 8 03:14:25 UTC 2005


Author: vapier
Date: 2005-09-07 20:14:23 -0700 (Wed, 07 Sep 2005)
New Revision: 11383

Log:
import misc defines from glibc

Modified:
   trunk/uClibc/libpthread/linuxthreads/internals.h


Changeset:
Modified: trunk/uClibc/libpthread/linuxthreads/internals.h
===================================================================
--- trunk/uClibc/libpthread/linuxthreads/internals.h	2005-09-08 03:11:58 UTC (rev 11382)
+++ trunk/uClibc/libpthread/linuxthreads/internals.h	2005-09-08 03:14:23 UTC (rev 11383)
@@ -13,7 +13,7 @@
 /* GNU Library General Public License for more details.                 */
 
 #ifndef _INTERNALS_H
-#define _INTERNALS_H   1
+#define _INTERNALS_H	1
 
 /* Internal data structures */
 
@@ -24,6 +24,7 @@
 #include <setjmp.h>
 #include <signal.h>
 #include <unistd.h>
+#include <bits/stackinfo.h>
 #include <sys/types.h>
 #include "pt-machine.h"
 #include "semaphore.h"
@@ -279,6 +280,11 @@
 
 extern int __pthread_manager_reader;
 
+#ifdef FLOATING_STACKS
+/* Maximum stack size.  */
+extern size_t __pthread_max_stacksize;
+#endif
+
 /* Limits of the thread manager stack. */
 
 extern char *__pthread_manager_thread_bos;
@@ -298,6 +304,9 @@
 /* Pointer to descriptor of thread with last event.  */
 extern volatile pthread_descr __pthread_last_event;
 
+/* Flag which tells whether we are executing on SMP kernel. */
+extern int __pthread_smp_kernel;
+
 /* Return the handle corresponding to a thread id */
 
 static inline pthread_handle thread_handle(pthread_t id)
@@ -314,14 +323,30 @@
 
 /* Fill in defaults left unspecified by pt-machine.h.  */
 
+/* We round up a value with page size. */
+#ifndef page_roundup
+#define page_roundup(v,p) ((((size_t) (v)) + (p) - 1) & ~((p) - 1))
+#endif
+
 /* The page size we can get from the system.  This should likely not be
    changed by the machine file but, you never know.  */
 extern size_t __pagesize;
 #include <bits/uClibc_page.h>
 #ifndef PAGE_SIZE
-#define PAGE_SIZE  (sysconf (_SC_PAGESIZE))
+#define PAGE_SIZE  (sysconf (_SC_PAGE_SIZE))
 #endif
 
+/* The initial size of the thread stack.  Must be a multiple of PAGE_SIZE.  */
+#ifndef INITIAL_STACK_SIZE
+#define INITIAL_STACK_SIZE  (4 * __pagesize)
+#endif
+
+/* Size of the thread manager stack. The "- 32" avoids wasting space
+   with some malloc() implementations. */
+#ifndef THREAD_MANAGER_STACK_SIZE
+#define THREAD_MANAGER_STACK_SIZE  (2 * __pagesize - 32)
+#endif
+
 /* The max size of the thread stack segments.  If the default
    THREAD_SELF implementation is used, this must be a power of two and
    a multiple of PAGE_SIZE.  */
@@ -333,17 +358,6 @@
 #endif
 #endif
 
-/* The initial size of the thread stack.  Must be a multiple of PAGE_SIZE.  */
-#ifndef INITIAL_STACK_SIZE
-#define INITIAL_STACK_SIZE  (4 * __pagesize)
-#endif
-
-/* Size of the thread manager stack. The "- 32" avoids wasting space
-   with some malloc() implementations. */
-#ifndef THREAD_MANAGER_STACK_SIZE
-#define THREAD_MANAGER_STACK_SIZE  (2 * __pagesize - 32)
-#endif
-
 /* The base of the "array" of thread stacks.  The array will grow down from
    here.  Defaults to the calculated bottom of the initial application
    stack.  */
@@ -362,6 +376,7 @@
    x86).  Still we need the compiler to respect the barrier and emit
    all outstanding operations which modify memory.  Some architectures
    distinguish between full, read and write barriers.  */
+
 #ifndef MEMORY_BARRIER
 #define MEMORY_BARRIER() asm ("" : : : "memory")
 #endif
@@ -372,6 +387,30 @@
 #define WRITE_MEMORY_BARRIER() MEMORY_BARRIER()
 #endif
 
+/* Max number of times we must spin on a spinlock calling sched_yield().
+   After MAX_SPIN_COUNT iterations, we put the calling thread to sleep. */
+
+#ifndef MAX_SPIN_COUNT
+#define MAX_SPIN_COUNT 50
+#endif
+
+/* Max number of times the spinlock in the adaptive mutex implementation
+   spins actively on SMP systems.  */
+
+#ifndef MAX_ADAPTIVE_SPIN_COUNT
+#define MAX_ADAPTIVE_SPIN_COUNT 100
+#endif
+
+/* Duration of sleep (in nanoseconds) when we can't acquire a spinlock
+   after MAX_SPIN_COUNT iterations of sched_yield().
+   With the 2.0 and 2.1 kernels, this MUST BE > 2ms.
+   (Otherwise the kernel does busy-waiting for realtime threads,
+    giving other threads no chance to run.) */
+
+#ifndef SPIN_SLEEP_DURATION
+#define SPIN_SLEEP_DURATION 2000001
+#endif
+
 /* Recover thread descriptor for the current thread */
 
 extern pthread_descr __pthread_find_self (void) __attribute__ ((const));
@@ -420,23 +459,6 @@
 #endif
 }
 
-/* Max number of times we must spin on a spinlock calling sched_yield().
-   After MAX_SPIN_COUNT iterations, we put the calling thread to sleep. */
-
-#ifndef MAX_SPIN_COUNT
-#define MAX_SPIN_COUNT 50
-#endif
-
-/* Duration of sleep (in nanoseconds) when we can't acquire a spinlock
-   after MAX_SPIN_COUNT iterations of sched_yield().
-   With the 2.0 and 2.1 kernels, this MUST BE > 2ms.
-   (Otherwise the kernel does busy-waiting for realtime threads,
-    giving other threads no chance to run.) */
-
-#ifndef SPIN_SLEEP_DURATION
-#define SPIN_SLEEP_DURATION 2000001
-#endif
-
 /* Debugging */
 
 #ifdef DEBUG




More information about the uClibc-cvs mailing list