* [RFC PATCH] lowlevellock.h cleanups, LLL_SHARED vs. LLL_PRIVATE on lll locks
@ 2007-07-25 20:11 Jakub Jelinek
2007-07-31 11:11 ` [PATCH] " Jakub Jelinek
0 siblings, 1 reply; 5+ messages in thread
From: Jakub Jelinek @ 2007-07-25 20:11 UTC (permalink / raw)
To: Ulrich Drepper; +Cc: Glibc hackers
Hi!
On top of the patch I posted yesterday, done only for x86_64 and powerpc
(will finish the rest of arches if you agree this is the right direction,
even write a ChangeLog entry):
1) lll_lock/lll_unlock etc. now have also private argument (LLL_PRIVATE
or LLL_SHARED) to make it explicit what the type of the futex it is,
the LLL_PRIVATE for __builtin_constant_p argument is optimized
(uses __lll_*_private helpers which have the same number of arguments
as __lll_* had before and always use private futexes if supported),
otherwise the helpers take additional argument (I believe most if not
all current LLL_SHARED users will be in the end variable users,
except perhaps for wake_tid)
2) removed the lll_mutex_lock vs. lll_lock duplication, on all architectures
that support NPTL they are defined the same anyway, similarly removed
various macros and prototypes of things that long time don't exist
in NPTL or are never used
3) on x86_64 (later i386 too) the libc !UP optimized assembly to jump around
lock is now handled through macros used in the __asm to avoid too much
source duplications
4) on x86_64 (later i386 too) lowlevellock.h is now usable in __ASSEMBLER__
and defines just the few needed things for most of the *.S routines
to avoid massive code duplication
5) as lll_lock etc. now has explicit private status, various always internal
libpthread.so locks could be made LLL_PRIVATE, plus pthread_rwlock_*
and pthread_barrier_* guard locks are now private resp. shared depending
on whether the object is process private or shared
I have kept LLL_PRIVATE to be 0 and LLL_SHARED 128, but it shouldn't be
very hard to swap those two (in some places the current state is better,
e.g. for rwlocks, in other cases the other would be better (barriers,
sem_*). All futexes/locks in pthread_mutex_* and pthread_cond_* still
use LLL_SHARED, that's the last big thing that needs to be handled.
Built & tested on x86_64-linux (with both 2.6.21 and 2.6.22 kernels)
and on ppc64-linux (only 2.6.18 kernel, will need to reinstall that box).
--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c.jj 2007-07-23 19:36:30.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c 2007-07-25 20:44:47.000000000 +0200
@@ -29,11 +29,37 @@
int
__new_sem_post (sem_t *sem)
{
+ struct new_sem *isem = (struct new_sem *) sem;
+
+ __asm __volatile (__lll_rel_instr ::: "memory");
+ atomic_increment (&isem->value);
+ atomic_full_barrier ();
+ if (isem->nwaiters > 0)
+ {
+ int err = lll_futex_wake (&isem->value, 1,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
+ if (__builtin_expect (err, 0) < 0)
+ {
+ __set_errno (-err);
+ return -1;
+ }
+ }
+ return 0;
+}
+versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
+
+#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
+
+int
+attribute_compat_text_section
+__old_sem_post (sem_t *sem)
+{
int *futex = (int *) sem;
__asm __volatile (__lll_rel_instr ::: "memory");
int nr = atomic_increment_val (futex);
- int err = lll_futex_wake (futex, nr, LLL_SHARED);
+ /* We always have to assume it is a shared semaphore. */
+ int err = lll_futex_wake (futex, 1, LLL_SHARED);
if (__builtin_expect (err, 0) < 0)
{
__set_errno (-err);
@@ -41,8 +67,6 @@ __new_sem_post (sem_t *sem)
}
return 0;
}
-versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
-#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
-strong_alias (__new_sem_post, __old_sem_post)
+
compat_symbol (libpthread, __old_sem_post, sem_post, GLIBC_2_0);
#endif
--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h.jj 2007-07-25 20:00:18.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h 2007-07-25 20:57:08.000000000 +0200
@@ -69,9 +69,6 @@
# endif
#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
#define lll_futex_wait(futexp, val, private) \
lll_futex_timed_wait (futexp, val, NULL, private)
@@ -97,14 +94,15 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
do \
{ \
INTERNAL_SYSCALL_DECL (__err); \
int *__futexp = &(futexv); \
\
atomic_or (__futexp, FUTEX_OWNER_DIED); \
- INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0); \
+ INTERNAL_SYSCALL (futex, __err, 4, __futexp, \
+ __lll_private_flag (FUTEX_WAKE, private), 1, 0); \
} \
while (0)
@@ -171,119 +169,111 @@
__val; \
})
-#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+#define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id)
/* Set *futex to 1 if it is 0, atomically. Returns the old value */
#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
-#define lll_mutex_trylock(lock) __lll_trylock (&(lock))
+#define lll_trylock(lock) __lll_trylock (&(lock))
/* Set *futex to 2 if it is 0, atomically. Returns the old value */
#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
-#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock))
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
-#define lll_mutex_lock(lock) \
+#define lll_lock(lock, private) \
(void) ({ \
int *__futex = &(lock); \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
0) != 0) \
- __lll_lock_wait (__futex); \
+ { \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __lll_lock_wait_private (__futex); \
+ else \
+ __lll_lock_wait (__futex, private); \
+ } \
})
-#define lll_robust_mutex_lock(lock, id) \
+#define lll_robust_lock(lock, id, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
0), 0)) \
- __val = __lll_robust_lock_wait (__futex); \
+ __val = __lll_robust_lock_wait (__futex, private); \
__val; \
})
-#define lll_mutex_cond_lock(lock) \
+#define lll_cond_lock(lock, private) \
(void) ({ \
int *__futex = &(lock); \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
0) != 0) \
- __lll_lock_wait (__futex); \
+ __lll_lock_wait (__futex, private); \
})
-#define lll_robust_mutex_cond_lock(lock, id) \
+#define lll_robust_cond_lock(lock, id, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
int __id = id | FUTEX_WAITERS; \
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
0), 0)) \
- __val = __lll_robust_lock_wait (__futex); \
+ __val = __lll_robust_lock_wait (__futex, private); \
__val; \
})
extern int __lll_timedlock_wait
- (int *futex, const struct timespec *) attribute_hidden;
+ (int *futex, const struct timespec *, int private) attribute_hidden;
extern int __lll_robust_timedlock_wait
- (int *futex, const struct timespec *) attribute_hidden;
+ (int *futex, const struct timespec *, int private) attribute_hidden;
-#define lll_mutex_timedlock(lock, abstime) \
+#define lll_timedlock(lock, abstime, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
0) != 0) \
- __val = __lll_timedlock_wait (__futex, abstime); \
+ __val = __lll_timedlock_wait (__futex, abstime, private); \
__val; \
})
-#define lll_robust_mutex_timedlock(lock, abstime, id) \
+#define lll_robust_timedlock(lock, abstime, id, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
0), 0)) \
- __val = __lll_robust_timedlock_wait (__futex, abstime); \
+ __val = __lll_robust_timedlock_wait (__futex, abstime, private); \
__val; \
})
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__builtin_expect (__val > 1, 0)) \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_robust_mutex_unlock(lock) \
+#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
- }))
-
-#define lll_mutex_unlock_force(lock) \
- ((void) ({ \
- int *__futex = &(lock); \
- *__futex = 0; \
- __asm __volatile (__lll_rel_instr ::: "memory"); \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
(futex != 0)
-/* Our internal lock implementation is identical to the binary-compatible
- mutex implementation. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
@@ -293,11 +283,6 @@ typedef int lll_lock_t;
1 - taken by one user
>1 - taken by more users */
-#define lll_trylock(lock) lll_mutex_trylock (lock)
-#define lll_lock(lock) lll_mutex_lock (lock)
-#define lll_unlock(lock) lll_mutex_unlock (lock)
-#define lll_islocked(lock) lll_mutex_islocked (lock)
-
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -320,26 +305,4 @@ extern int __lll_timedwait_tid (int *, c
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c 2007-07-25 20:00:24.000000000 +0200
@@ -39,7 +39,7 @@ pthread_barrier_wait (barrier)
int result = 0;
/* Make sure we are alone. */
- lll_lock (ibarrier->b.lock);
+ lll_lock (ibarrier->b.lock, /* XYZ */ LLL_SHARED);
/* One more arrival. */
--ibarrier->b.left;
@@ -66,7 +66,7 @@ pthread_barrier_wait (barrier)
unsigned int event = ibarrier->b.curr_event;
/* Before suspending, make the barrier available to others. */
- lll_unlock (ibarrier->b.lock);
+ lll_unlock (ibarrier->b.lock, /* XYZ */ LLL_SHARED);
/* Wait for the event counter of the barrier to change. */
do
@@ -84,7 +84,7 @@ pthread_barrier_wait (barrier)
{
if (atomic_increment_val (&ibarrier->b.left) == init_count)
/* We are done. */
- lll_unlock (ibarrier->b.lock);
+ lll_unlock (ibarrier->b.lock, /* XYZ */ LLL_SHARED);
}
else
{
@@ -97,7 +97,7 @@ pthread_barrier_wait (barrier)
__sparc32_atomic_do_unlock24 (&ibarrier->left_lock);
if (left == init_count)
/* We are done. */
- lll_unlock (ibarrier->b.lock);
+ lll_unlock (ibarrier->b.lock, /* XYZ */ LLL_SHARED);
}
return result;
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S.jj 2007-05-28 13:45:24.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,19 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAKE 1
-
.text
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S 2007-07-25 20:00:24.000000000 +0200
@@ -19,19 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <tcb-offsets.h>
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -58,7 +49,9 @@ __condvar_cleanup:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -105,7 +98,9 @@ __condvar_cleanup:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
/* Wake up all waiters to make sure no signal gets lost. */
2: testq %r12, %r12
@@ -307,7 +302,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
jmp 2b
/* Unlock in loop requires wakeup. */
@@ -315,7 +312,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
@@ -323,7 +322,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -334,7 +335,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 11b
/* The initial unlocking of the mutex failed. */
@@ -351,7 +354,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
13: movq %r10, %rax
jmp 14b
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S.jj 2007-05-28 13:45:24.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,23 +18,15 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-
.text
.globl sem_timedwait
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S.jj 2007-05-28 13:45:24.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,19 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-
.text
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S.jj 2007-07-23 19:36:30.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,23 +18,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_rdlock
@@ -123,11 +112,11 @@ __pthread_rwlock_rdlock:
movq %rdx, %rax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -139,11 +128,11 @@ __pthread_rwlock_rdlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -159,21 +148,21 @@ __pthread_rwlock_rdlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 11b
-12:
+12: movl PSHARED(%rdi), %esi
#if MUTEX == 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h.jj 2007-07-25 20:00:18.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h 2007-07-25 20:00:24.000000000 +0200
@@ -20,17 +20,27 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <kernel-features.h>
-#include <tcb-offsets.h>
-
-#ifndef LOCK_INSTR
-# ifdef UP
-# define LOCK_INSTR /* nothing */
-# else
-# define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+# endif
+#else
+# ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
# endif
#endif
@@ -38,11 +48,13 @@
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
/* Values for 'private' parameter of locking macros. Yes, the
definition seems to be backwards. But it is not. The bit will be
@@ -50,6 +62,8 @@
#define LLL_PRIVATE 0
#define LLL_SHARED FUTEX_PRIVATE_FLAG
+#ifndef __ASSEMBLER__
+
#if !defined NOT_IN_libc || defined IS_IN_rtld
/* In libc.so or ld.so all futexes are private. */
# ifdef __ASSUME_PRIVATE_FUTEX
@@ -76,13 +90,13 @@
# endif
#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
+/* Initializer for lock. */
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
/* Delay in spinlock loop. */
-#define BUSY_WAIT_NOP asm ("rep; nop")
+#define BUSY_WAIT_NOP asm ("rep; nop")
#define LLL_STUB_UNWIND_INFO_START \
@@ -196,7 +210,7 @@ LLL_STUB_UNWIND_INFO_END
: "=a" (__status) \
: "0" (SYS_futex), "D" (futex), \
"S" (__lll_private_flag (FUTEX_WAIT, private)), \
- "d" (_val), "r" (__to) \
+ "d" (_val), "r" (__to) \
: "memory", "cc", "r11", "cx"); \
__status; \
})
@@ -217,240 +231,320 @@ LLL_STUB_UNWIND_INFO_END
/* Does not preserve %eax and %ecx. */
-extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden;
-/* Does not preserver %eax, %ecx, and %edx. */
-extern int __lll_mutex_timedlock_wait (int *__futex, int __val,
- const struct timespec *__abstime)
+extern int __lll_lock_wait_private (int *__futex, int __val) attribute_hidden;
+extern int __lll_lock_wait (int *__futex, int __val, int private)
+ attribute_hidden;
+/* Does not preserve %eax, %ecx, and %edx. */
+extern int __lll_timedlock_wait (int *__futex, int __val,
+ const struct timespec *__abstime, int private)
attribute_hidden;
/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
+extern int __lll_unlock_wake_private (int *__futex) attribute_hidden;
+extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
after the cmpxchg instruction. In case the operation succeded this
value is zero. In case the operation failed, the cmpxchg instruction
has loaded the current value of the memory work which is guaranteed
to be nonzero. */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %2, %1\n\t" \
+ "jmp 1f\n\t" \
+ "0:\tcmpxchgl %2, %1\n\t" \
+ "1:"
+#endif
+
+#define lll_trylock(futex) \
({ int ret; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm __volatile (__lll_trylock_asm \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-
-#define lll_robust_mutex_trylock(futex, id) \
+#define lll_robust_trylock(futex, id) \
({ int ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (id), "m" (futex), \
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
({ int ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-
-#define lll_mutex_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_lock_%=, @function\n" \
- "_L_mutex_lock_%=:\n" \
- "1:\tleaq %2, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_lock_wait\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_mutex_lock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (1), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
+ "jmp 24f\n" \
+ "0:\tcmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#endif
+#define lll_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); \
+ else \
+ __asm __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (1), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ }) \
-#define lll_robust_mutex_lock(futex, id) \
+#define lll_robust_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_lock_%=, @function\n" \
- "_L_robust_mutex_lock_%=:\n" \
+ ".type _L_robust_lock_%=, @function\n" \
+ "_L_robust_lock_%=:\n" \
"1:\tleaq %2, %%rdi\n" \
"2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_robust_mutex_lock_wait\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
"4:\taddq $128, %%rsp\n" \
"5:\tjmp 24f\n" \
- "6:\t.size _L_robust_mutex_lock_%=, 6b-1b\n\t" \
+ "6:\t.size _L_robust_lock_%=, 6b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_5 \
"24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
"=a" (result) \
- : "0" (id), "m" (futex), "3" (0) \
+ : "1" (id), "m" (futex), "3" (0), "0" (private) \
: "cx", "r11", "cc", "memory"); \
result; })
+#define lll_cond_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_cond_lock_%=, @function\n" \
+ "_L_cond_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_cond_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (2), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ })
-#define lll_mutex_cond_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_cond_lock_%=, @function\n" \
- "_L_mutex_cond_lock_%=:\n" \
- "1:\tleaq %2, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_lock_wait\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_mutex_cond_lock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (2), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_cond_lock(futex, id) \
+#define lll_robust_cond_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_cond_lock_%=, @function\n" \
- "_L_robust_mutex_cond_lock_%=:\n" \
+ ".type _L_robust_cond_lock_%=, @function\n" \
+ "_L_robust_cond_lock_%=:\n" \
"1:\tleaq %2, %%rdi\n" \
"2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_robust_mutex_lock_wait\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
"4:\taddq $128, %%rsp\n" \
"5:\tjmp 24f\n" \
- "6:\t.size _L_robust_mutex_cond_lock_%=, 6b-1b\n\t" \
+ "6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_5 \
"24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
"=a" (result) \
- : "0" (id | FUTEX_WAITERS), "m" (futex), "3" (0) \
+ : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \
+ "0" (private) \
: "cx", "r11", "cc", "memory"); \
result; })
-
-#define lll_mutex_timedlock(futex, timeout) \
+#define lll_timedlock(futex, timeout, private) \
({ int result, ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
- ".type _L_mutex_timedlock_%=, @function\n" \
- "_L_mutex_timedlock_%=:\n" \
+ ".type _L_timedlock_%=, @function\n" \
+ "_L_timedlock_%=:\n" \
"1:\tleaq %4, %%rdi\n" \
"0:\tmovq %8, %%rdx\n" \
"2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_timedlock_wait\n" \
+ "3:\tcallq __lll_timedlock_wait\n" \
"4:\taddq $128, %%rsp\n" \
"5:\tjmp 24f\n" \
- "6:\t.size _L_mutex_timedlock_%=, 6b-1b\n\t" \
+ "6:\t.size _L_timedlock_%=, 6b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_6 \
"24:" \
- : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \
+ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
"=&d" (ignore3), "=m" (futex) \
- : "0" (0), "2" (1), "m" (futex), "m" (timeout) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
+ "2" (private) \
: "memory", "cx", "cc", "r10", "r11"); \
result; })
-
-#define lll_robust_mutex_timedlock(futex, timeout, id) \
+#define lll_robust_timedlock(futex, timeout, id, private) \
({ int result, ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_timedlock_%=, @function\n" \
- "_L_robust_mutex_timedlock_%=:\n" \
+ ".type _L_robust_timedlock_%=, @function\n" \
+ "_L_robust_timedlock_%=:\n" \
"1:\tleaq %4, %%rdi\n" \
"0:\tmovq %8, %%rdx\n" \
"2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_robust_mutex_timedlock_wait\n" \
+ "3:\tcallq __lll_robust_timedlock_wait\n" \
"4:\taddq $128, %%rsp\n" \
"5:\tjmp 24f\n" \
- "6:\t.size _L_robust_mutex_timedlock_%=, 6b-1b\n\t" \
+ "6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_6 \
"24:" \
- : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \
+ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
"=&d" (ignore3), "=m" (futex) \
- : "0" (0), "2" (id), "m" (futex), "m" (timeout) \
+ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
+ "2" (private) \
: "memory", "cx", "cc", "r10", "r11"); \
result; })
+#if defined NOT_IN_libc || defined UP
+# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
+ "jne 1f\n\t"
+#else
+# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; decl %0\n\t" \
+ "jne 1f\n\t" \
+ "jmp 24f\n\t" \
+ "0:\tdecl %0\n\t" \
+ "jne 1f\n\t"
+#endif
-#define lll_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "decl %0\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_unlock_%=, @function\n" \
- "_L_mutex_unlock_%=:\n" \
- "1:\tleaq %0, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_unlock_wake\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_mutex_unlock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_robust_mutex_unlock_%=, @function\n" \
- "_L_robust_mutex_unlock_%=:\n" \
- "1:\tleaq %0, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_unlock_wake\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_robust_mutex_unlock_%=, 6b-1b\n\t"\
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "i" (FUTEX_WAITERS), "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_dead(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
- "syscall" \
- : "=m" (futex), "=a" (ignore) \
- : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
- "S" (FUTEX_WAKE), "1" (__NR_futex), \
- "d" (1) \
- : "cx", "r11", "cc", "memory"); })
-
+#define lll_unlock(futex, private) \
+ (void) \
+ ({ int ignore; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm __volatile (__lll_unlock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=, @function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ else \
+ __asm __volatile (__lll_unlock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=, @function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex), "S" (private) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ })
+
+#define lll_robust_unlock(futex, private) \
+ do \
+ { \
+ int ignore; \
+ __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
+ "jne 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_unlock_%=, @function\n" \
+ "_L_robust_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "i" (FUTEX_WAITERS), "m" (futex), \
+ "S" (private) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ } \
+ while (0)
+
+#define lll_robust_dead(futex, private) \
+ do \
+ { \
+ int ignore; \
+ __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
+ "syscall" \
+ : "=m" (futex), "=a" (ignore) \
+ : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
+ "S" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "1" (__NR_futex), "d" (1) \
+ : "cx", "r11", "cc", "memory"); \
+ } \
+ while (0)
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \
@@ -461,117 +555,13 @@ extern int __lll_mutex_unlock_wait (int
__asm __volatile ("syscall" \
: "=a" (__res) \
: "0" (__NR_futex), "D" ((void *) ftx), \
- "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \
- "r" (__nr_move), "r" (__mutex), "r" (__val) \
+ "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \
+ "r" (__nr_move), "r" (__mutex), "r" (__val) \
: "cx", "r11", "cc", "memory"); \
__res < 0; })
-
-#define lll_mutex_islocked(futex) \
- (futex != LLL_MUTEX_LOCK_INITIALIZER)
-
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
-/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
-
-
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- 2 - taken by more users */
-
-
-#if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
-#else
-/* Special versions of the macros for use in libc itself. They avoid
- the lock prefix when the thread library is not used.
-
- The code sequence to avoid unnecessary lock prefixes is what the AMD
- guys suggested. If you do not like it, bring it up with AMD.
-
- XXX In future we might even want to avoid it on UP machines. */
-
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; cmpxchgl %2, %1\n\t" \
- "jmp 1f\n" \
- "0:\tcmpxchgl %2, %1\n\t" \
- "1:setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
- : "memory"); \
- ret; })
-
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; cmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- "jmp 24f\n" \
- "0:\tcmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_lock_%=, @function\n" \
- "_L_lock_%=:\n" \
- "1:\tleaq %2, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_lock_wait\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_lock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (1), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; decl %0\n\t" \
- "jne 1f\n\t" \
- "jmp 24f\n" \
- "0:\tdecl %0\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_unlock_%=, @function\n" \
- "_L_unlock_%=:\n" \
- "1:\tleaq %0, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_unlock_wake\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
-#endif
-
-
#define lll_islocked(futex) \
- (futex != LLL_MUTEX_LOCK_INITIALIZER)
+ (futex != LLL_LOCK_INITIALIZER)
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
@@ -610,25 +600,6 @@ extern int __lll_timedwait_tid (int *tid
} \
__result; })
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
+#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S.jj 2006-09-08 13:57:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,33 +20,46 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <lowlevellock.h>
#include <lowlevelrobustlock.h>
+#include <kernel-features.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
#endif
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_WAITERS 0x80000000
-#define FUTEX_OWNER_DIED 0x40000000
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
- .globl __lll_robust_mutex_lock_wait
- .type __lll_robust_mutex_lock_wait,@function
- .hidden __lll_robust_mutex_lock_wait
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
.align 16
-__lll_robust_mutex_lock_wait:
+__lll_robust_lock_wait:
cfi_startproc
pushq %r10
cfi_adjust_cfa_offset(8)
@@ -55,11 +69,7 @@ __lll_robust_mutex_lock_wait:
cfi_offset(%rdx, -24)
xorq %r10, %r10 /* No timeout. */
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
+ LOAD_FUTEX_WAIT (%esi)
4: movl %eax, %edx
orl $FUTEX_WAITERS, %edx
@@ -97,14 +107,14 @@ __lll_robust_mutex_lock_wait:
cfi_restore(%r10)
retq
cfi_endproc
- .size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
- .globl __lll_robust_mutex_timedlock_wait
- .type __lll_robust_mutex_timedlock_wait,@function
- .hidden __lll_robust_mutex_timedlock_wait
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
.align 16
-__lll_robust_mutex_timedlock_wait:
+__lll_robust_timedlock_wait:
cfi_startproc
/* Check for a valid timeout value. */
cmpq $1000000000, 8(%rdx)
@@ -122,10 +132,12 @@ __lll_robust_mutex_timedlock_wait:
cfi_offset(%r9, -24)
cfi_offset(%r12, -32)
cfi_offset(%r13, -40)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
/* Stack frame for the timespec and timeval structs. */
- subq $24, %rsp
- cfi_adjust_cfa_offset(24)
+ subq $32, %rsp
+ cfi_adjust_cfa_offset(32)
movq %rdi, %r12
movq %rdx, %r13
@@ -174,11 +186,8 @@ __lll_robust_mutex_timedlock_wait:
jnz 5f
2: movq %rsp, %r10
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
+ movl 32(%rsp), %esi
+ LOAD_FUTEX_WAIT (%esi)
movq %r12, %rdi
movl $SYS_futex, %eax
syscall
@@ -195,8 +204,8 @@ __lll_robust_mutex_timedlock_wait:
cmpxchgl %edx, (%r12)
jnz 7f
-6: addq $24, %rsp
- cfi_adjust_cfa_offset(-24)
+6: addq $40, %rsp
+ cfi_adjust_cfa_offset(-40)
popq %r13
cfi_adjust_cfa_offset(-8)
cfi_restore(%r13)
@@ -214,7 +223,7 @@ __lll_robust_mutex_timedlock_wait:
3: movl $EINVAL, %eax
retq
- cfi_adjust_cfa_offset(56)
+ cfi_adjust_cfa_offset(72)
cfi_offset(%r8, -16)
cfi_offset(%r9, -24)
cfi_offset(%r12, -32)
@@ -226,4 +235,4 @@ __lll_robust_mutex_timedlock_wait:
8: movl $ETIMEDOUT, %eax
jmp 6b
cfi_endproc
- .size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S.jj 2007-07-25 19:23:33.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,26 +18,15 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_rwlock_timedwrlock
@@ -168,11 +157,11 @@ pthread_rwlock_timedwrlock:
popq %r12
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 2b
14: cmpl %fs:TID, %eax
@@ -180,13 +169,13 @@ pthread_rwlock_timedwrlock:
20: movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leal MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
/* Overflow. */
@@ -194,22 +183,22 @@ pthread_rwlock_timedwrlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 11b
-12:
+12: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 13b
16: movq $-ETIMEDOUT, %rdx
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S.jj 2007-05-28 13:45:24.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,18 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelbarrier.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
.text
@@ -142,21 +133,29 @@ pthread_barrier_wait:
retq
-1: addq $MUTEX, %rdi
- callq __lll_mutex_lock_wait
+1: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_lock_wait
subq $MUTEX, %rdi
jmp 2b
-4: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+4: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 5b
-6: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+6: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
subq $MUTEX, %rdi
jmp 7b
-9: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+9: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 10b
.size pthread_barrier_wait,.-pthread_barrier_wait
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S 2007-07-25 20:00:24.000000000 +0200
@@ -19,33 +19,46 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
-#endif
-
-#define SYS_futex 202
-#ifndef FUTEX_WAIT
-# define FUTEX_WAIT 0
-# define FUTEX_WAKE 1
-#endif
-
-#ifndef LOAD_FUTEX_WAIT
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
# if FUTEX_WAIT == 0
# define LOAD_FUTEX_WAIT(reg) \
- xorl reg, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
# else
# define LOAD_FUTEX_WAIT(reg) \
- movl $FUTEX_WAIT, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
# define LOAD_FUTEX_WAKE(reg) \
- movl $FUTEX_WAKE, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
#endif
@@ -53,11 +66,11 @@
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
cfi_startproc
pushq %r10
cfi_adjust_cfa_offset(8)
@@ -67,7 +80,7 @@ __lll_mutex_lock_wait:
cfi_offset(%rdx, -24)
xorq %r10, %r10 /* No timeout. */
movl $2, %edx
- LOAD_FUTEX_WAIT (%esi)
+ LOAD_PRIVATE_FUTEX_WAIT (%esi)
cmpl %edx, %eax /* NB: %edx == 2 */
jne 2f
@@ -89,15 +102,52 @@ __lll_mutex_lock_wait:
cfi_restore(%r10)
retq
cfi_endproc
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
.align 16
-__lll_mutex_timedlock_wait:
+__lll_lock_wait:
+ cfi_startproc
+ pushq %r10
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
+ xorq %r10, %r10 /* No timeout. */
+ movl $2, %edx
+ LOAD_FUTEX_WAIT (%esi)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+2: movl %edx, %eax
+ xchgl %eax, (%rdi) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
+ retq
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
+ .align 16
+__lll_timedlock_wait:
cfi_startproc
/* Check for a valid timeout value. */
cmpq $1000000000, 8(%rdx)
@@ -118,10 +168,12 @@ __lll_mutex_timedlock_wait:
cfi_offset(%r12, -32)
cfi_offset(%r13, -40)
cfi_offset(%r14, -48)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
/* Stack frame for the timespec and timeval structs. */
- subq $16, %rsp
- cfi_adjust_cfa_offset(16)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset(24)
movq %rdi, %r12
movq %rdx, %r13
@@ -162,6 +214,7 @@ __lll_mutex_timedlock_wait:
je 8f
movq %rsp, %r10
+ movl 24(%rsp), %esi
LOAD_FUTEX_WAIT (%esi)
movq %r12, %rdi
movl $SYS_futex, %eax
@@ -174,8 +227,8 @@ __lll_mutex_timedlock_wait:
cmpxchgl %edx, (%r12)
jnz 7f
-6: addq $16, %rsp
- cfi_adjust_cfa_offset(-16)
+6: addq $32, %rsp
+ cfi_adjust_cfa_offset(-32)
popq %r14
cfi_adjust_cfa_offset(-8)
cfi_restore(%r14)
@@ -196,7 +249,7 @@ __lll_mutex_timedlock_wait:
3: movl $EINVAL, %eax
retq
- cfi_adjust_cfa_offset(56)
+ cfi_adjust_cfa_offset(72)
cfi_offset(%r8, -16)
cfi_offset(%r9, -24)
cfi_offset(%r12, -32)
@@ -216,15 +269,15 @@ __lll_mutex_timedlock_wait:
5: movl $ETIMEDOUT, %eax
jmp 6b
cfi_endproc
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
.align 16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake_private:
cfi_startproc
pushq %rsi
cfi_adjust_cfa_offset(8)
@@ -234,7 +287,7 @@ __lll_mutex_unlock_wake:
cfi_offset(%rdx, -24)
movl $0, (%rdi)
- LOAD_FUTEX_WAKE (%esi)
+ LOAD_PRIVATE_FUTEX_WAKE (%esi)
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
syscall
@@ -247,10 +300,38 @@ __lll_mutex_unlock_wake:
cfi_restore(%rsi)
retq
cfi_endproc
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
-
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
+ .align 16
+__lll_unlock_wake:
+ cfi_startproc
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%rsi, -16)
+ cfi_offset(%rdx, -24)
+
+ movl $0, (%rdi)
+ LOAD_FUTEX_WAKE (%esi)
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ syscall
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %rsi
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rsi)
+ retq
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
+
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S.jj 2007-07-23 19:36:30.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,22 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_unlock
@@ -107,28 +96,28 @@ __pthread_rwlock_unlock:
4: xorl %eax, %eax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 2b
-3:
+3: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 4b
-7:
+7: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 8b
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S.jj 2007-07-23 19:36:30.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,23 +18,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_wrlock
@@ -121,11 +110,11 @@ __pthread_rwlock_wrlock:
movq %rdx, %rax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -136,32 +125,32 @@ __pthread_rwlock_wrlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
4: decl WRITERS_QUEUED(%rdi)
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 11b
-12:
+12: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S.jj 2007-07-25 19:23:33.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,27 +18,15 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_rwlock_timedrdlock
@@ -172,11 +160,11 @@ pthread_rwlock_timedrdlock:
popq %r12
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 2b
14: cmpl %fs:TID, %eax
@@ -184,13 +172,13 @@ pthread_rwlock_timedrdlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leal MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
/* Overflow. */
@@ -203,22 +191,22 @@ pthread_rwlock_timedrdlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 11b
-12:
+12: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 13b
16: movq $-ETIMEDOUT, %rdx
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S.jj 2006-08-03 19:36:25.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,23 +20,11 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <kernel-features.h>
#include <pthread-pi-defines.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_CMP_REQUEUE 4
-
-#define EINVAL 22
+#include <pthread-errnos.h>
.text
@@ -115,7 +104,9 @@ __pthread_cond_broadcast:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -123,12 +114,16 @@ __pthread_cond_broadcast:
/* Unlock in loop requires wakeup. */
5: addq $cond_lock-cond_futex, %rdi
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 6b
/* Unlock in loop requires wakeup. */
7: addq $cond_lock-cond_futex, %rdi
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
subq $cond_lock-cond_futex, %rdi
jmp 8b
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S.jj 2007-05-28 13:45:24.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S 2007-07-25 20:00:24.000000000 +0200
@@ -18,15 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
.text
.globl sem_trywait
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,23 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_WAKE_OP 5
-
-#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
-
-#define EINVAL 22
+#include <pthread-errnos.h>
.text
@@ -111,7 +98,9 @@ __pthread_cond_signal:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -120,7 +109,9 @@ __pthread_cond_signal:
/* Unlock in loop requires wakeup. */
5:
movq %r8, %rdi
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 6b
.size __pthread_cond_signal, .-__pthread_cond_signal
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S.jj 2007-05-24 16:41:08.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S 2007-07-25 20:00:24.000000000 +0200
@@ -19,17 +19,8 @@
#include <kernel-features.h>
#include <tcb-offsets.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
.comm __fork_generation, 4, 4
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S 2007-07-25 20:00:24.000000000 +0200
@@ -17,19 +17,4 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-#include <kernel-features.h>
-
-/* All locks in libc are private. Use the kernel feature if possible. */
-#define FUTEX_PRIVATE_FLAG 128
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG)
-# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG)
-#else
-# define LOAD_FUTEX_WAIT(reg) \
- movl %fs:PRIVATE_FUTEX, reg
-# define LOAD_FUTEX_WAKE(reg) \
- movl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAKE, reg
-#endif
-
#include "lowlevellock.S"
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S 2007-07-25 20:00:24.000000000 +0200
@@ -19,19 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <pthread-errnos.h>
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
@@ -301,7 +292,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
jmp 2b
/* Unlock in loop requires wakeup. */
@@ -309,7 +302,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
@@ -317,7 +312,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -328,7 +325,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 11b
/* The initial unlocking of the mutex failed. */
@@ -345,7 +344,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
17: movq (%rsp), %rax
jmp 18b
--- libc/nptl/sysdeps/unix/sysv/linux/fork.c.jj 2007-07-25 20:00:18.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/fork.c 2007-07-25 20:00:24.000000000 +0200
@@ -183,7 +183,7 @@ __libc_fork (void)
}
/* Initialize the fork lock. */
- __fork_lock = (lll_lock_t) LLL_LOCK_INITIALIZER;
+ __fork_lock = LLL_LOCK_INITIALIZER;
}
else
{
--- libc/nptl/sysdeps/unix/sysv/linux/sem_post.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sem_post.c 2007-07-25 20:00:24.000000000 +0200
@@ -36,8 +36,7 @@ __new_sem_post (sem_t *sem)
if (isem->nwaiters > 0)
{
int err = lll_futex_wake (&isem->value, 1,
- // XYZ check mutex flag
- LLL_SHARED);
+ isem->private ^ FUTEX_PRIVATE_FLAG);
if (__builtin_expect (err, 0) < 0)
{
__set_errno (-err);
--- libc/nptl/sysdeps/unix/sysv/linux/lowlevellock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/lowlevellock.c 2007-07-25 20:00:24.000000000 +0200
@@ -25,22 +25,35 @@
void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
{
do
{
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
if (oldval != 0)
- lll_futex_wait (futex, 2,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_wait (futex, 2, LLL_PRIVATE);
+ }
+ while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+}
+
+
+/* These functions doesn't get included in libc.so */
+#ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+ do
+ {
+ int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
+ if (oldval != 0)
+ lll_futex_wait (futex, 2, private);
}
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
}
int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -70,9 +83,7 @@ __lll_timedlock_wait (int *futex, const
/* Wait. */
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
if (oldval != 0)
- lll_futex_timed_wait (futex, 2, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_timed_wait (futex, 2, &rt, private);
}
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
@@ -80,8 +91,6 @@ __lll_timedlock_wait (int *futex, const
}
-/* This function doesn't get included in libc.so */
-#ifdef IS_IN_libpthread
int
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
{
--- libc/nptl/sysdeps/unix/sysv/linux/fork.h.jj 2006-06-21 17:36:39.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/fork.h 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -26,7 +26,7 @@ extern unsigned long int __fork_generati
extern unsigned long int *__fork_generation_pointer attribute_hidden;
/* Lock to protect allocation and deallocation of fork handlers. */
-extern lll_lock_t __fork_lock attribute_hidden;
+extern int __fork_lock attribute_hidden;
/* Elements of the fork handler lists. */
struct fork_handler
--- libc/nptl/sysdeps/unix/sysv/linux/sem_wait.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sem_wait.c 2007-07-25 20:00:24.000000000 +0200
@@ -57,8 +57,7 @@ __new_sem_wait (sem_t *sem)
int oldtype = __pthread_enable_asynccancel ();
err = lll_futex_wait (&isem->value, 0,
- // XYZ check mutex flag
- LLL_SHARED);
+ isem->private ^ FUTEX_PRIVATE_FLAG);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (oldtype);
--- libc/nptl/sysdeps/unix/sysv/linux/register-atfork.c.jj 2005-12-30 09:04:04.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/register-atfork.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -24,7 +24,7 @@
/* Lock to protect allocation and deallocation of fork handlers. */
-lll_lock_t __fork_lock = LLL_LOCK_INITIALIZER;
+int __fork_lock = LLL_LOCK_INITIALIZER;
/* Number of pre-allocated handler entries. */
@@ -85,7 +85,7 @@ __register_atfork (prepare, parent, chil
void *dso_handle;
{
/* Get the lock to not conflict with other allocations. */
- lll_lock (__fork_lock);
+ lll_lock (__fork_lock, LLL_PRIVATE);
struct fork_handler *newp = fork_handler_alloc ();
@@ -102,7 +102,7 @@ __register_atfork (prepare, parent, chil
}
/* Release the lock. */
- lll_unlock (__fork_lock);
+ lll_unlock (__fork_lock, LLL_PRIVATE);
return newp == NULL ? ENOMEM : 0;
}
@@ -112,7 +112,7 @@ libc_hidden_def (__register_atfork)
libc_freeres_fn (free_mem)
{
/* Get the lock to not conflict with running forks. */
- lll_lock (__fork_lock);
+ lll_lock (__fork_lock, LLL_PRIVATE);
/* No more fork handlers. */
__fork_handlers = NULL;
@@ -123,7 +123,7 @@ libc_freeres_fn (free_mem)
memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool));
/* Release the lock. */
- lll_unlock (__fork_lock);
+ lll_unlock (__fork_lock, LLL_PRIVATE);
/* We can free the memory after releasing the lock. */
while (runp != NULL)
--- libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c 2007-07-25 20:00:24.000000000 +0200
@@ -85,8 +85,7 @@ sem_timedwait (sem_t *sem, const struct
int oldtype = __pthread_enable_asynccancel ();
err = lll_futex_timed_wait (&isem->value, 0, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ isem->private ^ FUTEX_PRIVATE_FLAG);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (oldtype);
--- libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -25,7 +25,7 @@
int
-__lll_robust_lock_wait (int *futex)
+__lll_robust_lock_wait (int *futex, int private)
{
int oldval = *futex;
int tid = THREAD_GETMEM (THREAD_SELF, tid);
@@ -44,9 +44,7 @@ __lll_robust_lock_wait (int *futex)
&& atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
continue;
- lll_futex_wait (futex, newval,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_wait (futex, newval, private);
try:
;
@@ -59,7 +57,8 @@ __lll_robust_lock_wait (int *futex)
int
-__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime,
+ int private)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -102,9 +101,7 @@ __lll_robust_timedlock_wait (int *futex,
&& atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
continue;
- lll_futex_timed_wait (futex, newval, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_timed_wait (futex, newval, &rt, private);
try:
;
--- libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c.jj 2006-02-17 09:09:45.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,8 +1,8 @@
#include <pthreadP.h>
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock (mutex)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock (mutex)
-#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_cond_lock (mutex, id)
+#define LLL_MUTEX_LOCK(mutex) lll_cond_lock (mutex, /* XYZ */ LLL_SHARED)
+#define LLL_MUTEX_TRYLOCK(mutex) lll_cond_trylock (mutex)
+#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_cond_lock (mutex, id, /* XYZ */ LLL_SHARED)
#define __pthread_mutex_lock __pthread_mutex_cond_lock
#define NO_INCR
--- libc/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c.jj 2007-07-25 20:00:18.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c 2007-07-25 20:00:24.000000000 +0200
@@ -54,7 +54,7 @@ __unregister_atfork (dso_handle)
that there couldn't have been another thread deleting something.
The __unregister_atfork function is only called from the
dlclose() code which itself serializes the operations. */
- lll_lock (__fork_lock);
+ lll_lock (__fork_lock, LLL_PRIVATE);
/* We have to create a new list with all the entries we don't remove. */
struct deleted_handler
@@ -89,7 +89,7 @@ __unregister_atfork (dso_handle)
while (runp != NULL);
/* Release the lock. */
- lll_unlock (__fork_lock);
+ lll_unlock (__fork_lock, LLL_PRIVATE);
/* Walk the list of all entries which have to be deleted. */
while (deleted != NULL)
--- libc/nptl/sysdeps/pthread/createthread.c.jj 2006-09-08 13:57:51.000000000 +0200
+++ libc/nptl/sysdeps/pthread/createthread.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -60,7 +60,7 @@ do_clone (struct pthread *pd, const stru
/* We Make sure the thread does not run far by forcing it to get a
lock. We lock it here too so that the new thread cannot continue
until we tell it to. */
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* One more thread. We cannot have the thread do this itself, since it
might exist but not have been scheduled yet by the time we've returned
@@ -223,7 +223,7 @@ create_thread (struct pthread *pd, const
__nptl_create_event ();
/* And finally restart the new thread. */
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
}
return res;
@@ -250,7 +250,7 @@ create_thread (struct pthread *pd, const
if (res == 0 && stopped)
/* And finally restart the new thread. */
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return res;
}
--- libc/nptl/sysdeps/pthread/bits/stdio-lock.h.jj 2007-07-23 16:21:17.000000000 +0200
+++ libc/nptl/sysdeps/pthread/bits/stdio-lock.h 2007-07-25 20:00:24.000000000 +0200
@@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void
void *__self = THREAD_SELF; \
if ((_name).owner != __self) \
{ \
- lll_lock ((_name).lock); \
+ lll_lock ((_name).lock, LLL_PRIVATE); \
(_name).owner = __self; \
} \
++(_name).cnt; \
@@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void
if (--(_name).cnt == 0) \
{ \
(_name).owner = NULL; \
- lll_unlock ((_name).lock); \
+ lll_unlock ((_name).lock, LLL_PRIVATE); \
} \
} while (0)
--- libc/nptl/sysdeps/pthread/bits/libc-lock.h.jj 2007-03-19 17:43:11.000000000 +0100
+++ libc/nptl/sysdeps/pthread/bits/libc-lock.h 2007-07-25 20:00:24.000000000 +0200
@@ -228,7 +228,7 @@ typedef pthread_key_t __libc_key_t;
/* Lock the named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# define __libc_lock_lock(NAME) \
- ({ lll_lock (NAME); 0; })
+ ({ lll_lock (NAME, LLL_PRIVATE); 0; })
#else
# define __libc_lock_lock(NAME) \
__libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
@@ -245,7 +245,7 @@ typedef pthread_key_t __libc_key_t;
void *self = THREAD_SELF; \
if ((NAME).owner != self) \
{ \
- lll_lock ((NAME).lock); \
+ lll_lock ((NAME).lock, LLL_PRIVATE); \
(NAME).owner = self; \
} \
++(NAME).cnt; \
@@ -299,7 +299,7 @@ typedef pthread_key_t __libc_key_t;
/* Unlock the named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# define __libc_lock_unlock(NAME) \
- lll_unlock (NAME)
+ lll_unlock (NAME, LLL_PRIVATE)
#else
# define __libc_lock_unlock(NAME) \
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
@@ -315,7 +315,7 @@ typedef pthread_key_t __libc_key_t;
if (--(NAME).cnt == 0) \
{ \
(NAME).owner = NULL; \
- lll_unlock ((NAME).lock); \
+ lll_unlock ((NAME).lock, LLL_PRIVATE); \
} \
} while (0)
#else
--- libc/nptl/pthread_mutex_unlock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_mutex_unlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -47,7 +47,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
case PTHREAD_MUTEX_ERRORCHECK_NP:
/* Error checking mutex. */
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_mutex_islocked (mutex->__data.__lock))
+ || ! lll_islocked (mutex->__data.__lock))
return EPERM;
/* FALLTHROUGH */
@@ -61,7 +61,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
--mutex->__data.__nusers;
/* Unlock. */
- lll_mutex_unlock (mutex->__data.__lock);
+ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
break;
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
@@ -92,7 +92,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
if ((mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_mutex_islocked (mutex->__data.__lock))
+ || ! lll_islocked (mutex->__data.__lock))
return EPERM;
/* If the previous owner died and the caller did not succeed in
@@ -115,7 +115,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
--mutex->__data.__nusers;
/* Unlock. */
- lll_robust_mutex_unlock (mutex->__data.__lock);
+ lll_robust_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
@@ -161,7 +161,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
if ((mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_mutex_islocked (mutex->__data.__lock))
+ || ! lll_islocked (mutex->__data.__lock))
return EPERM;
/* If the previous owner died and the caller did not succeed in
--- libc/nptl/pthread_rwlock_unlock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_rwlock_unlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -27,7 +27,7 @@
int
__pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
if (rwlock->__data.__writer)
rwlock->__data.__writer = 0;
else
@@ -37,23 +37,21 @@ __pthread_rwlock_unlock (pthread_rwlock_
if (rwlock->__data.__nr_writers_queued)
{
++rwlock->__data.__writer_wakeup;
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
lll_futex_wake (&rwlock->__data.__writer_wakeup, 1,
- // XYZ check mutex flag
- LLL_SHARED);
+ rwlock->__data.__shared);
return 0;
}
else if (rwlock->__data.__nr_readers_queued)
{
++rwlock->__data.__readers_wakeup;
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
- // XYZ check mutex flag
- LLL_SHARED);
+ rwlock->__data.__shared);
return 0;
}
}
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return 0;
}
--- libc/nptl/pthread_rwlock_timedrdlock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_rwlock_timedrdlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (rwlock, abst
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock(rwlock->__data.__lock);
+ lll_lock(rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -110,16 +110,14 @@ pthread_rwlock_timedrdlock (rwlock, abst
int waitval = rwlock->__data.__readers_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer to finish. */
err = lll_futex_timed_wait (&rwlock->__data.__readers_wakeup,
- waitval, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ waitval, &rt, rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
--rwlock->__data.__nr_readers_queued;
@@ -133,7 +131,7 @@ pthread_rwlock_timedrdlock (rwlock, abst
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/old_pthread_cond_signal.c.jj 2003-03-21 09:02:07.000000000 +0100
+++ libc/nptl/old_pthread_cond_signal.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -33,7 +33,7 @@ __pthread_cond_signal_2_0 (cond)
{
pthread_cond_t *newcond;
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
if (newcond == NULL)
return ENOMEM;
--- libc/nptl/pthread_cond_timedwait.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_cond_timedwait.c 2007-07-25 20:00:24.000000000 +0200
@@ -54,13 +54,13 @@ __pthread_cond_timedwait (cond, mutex, a
return EINVAL;
/* Make sure we are along. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Now we can release the mutex. */
int err = __pthread_mutex_unlock_usercnt (mutex, 0);
if (err)
{
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return err;
}
@@ -146,7 +146,7 @@ __pthread_cond_timedwait (cond, mutex, a
unsigned int futex_val = cond->__data.__futex;
/* Prepare to wait. Release the condvar futex. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Enable asynchronous cancellation. Required by the standard. */
cbuffer.oldtype = __pthread_enable_asynccancel ();
@@ -161,7 +161,7 @@ __pthread_cond_timedwait (cond, mutex, a
__pthread_disable_asynccancel (cbuffer.oldtype);
/* We are going to look at shared data again, so get the lock. */
- lll_mutex_lock(cond->__data.__lock);
+ lll_lock(cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* If a broadcast happened, we are done. */
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -203,7 +203,7 @@ __pthread_cond_timedwait (cond, mutex, a
LLL_SHARED);
/* We are done with the condvar. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* The cancellation handling is back to normal, remove the handler. */
__pthread_cleanup_pop (&buffer, 0);
--- libc/nptl/pthread_cond_broadcast.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_cond_broadcast.c 2007-07-25 20:00:24.000000000 +0200
@@ -33,7 +33,7 @@ __pthread_cond_broadcast (cond)
pthread_cond_t *cond;
{
/* Make sure we are alone. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Are there any waiters to be woken? */
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -47,7 +47,7 @@ __pthread_cond_broadcast (cond)
++cond->__data.__broadcast_seq;
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Do not use requeue for pshared condvars. */
if (cond->__data.__mutex == (void *) ~0l)
@@ -79,7 +79,7 @@ __pthread_cond_broadcast (cond)
}
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return 0;
}
--- libc/nptl/sem_open.c.jj 2007-05-28 13:45:23.000000000 +0200
+++ libc/nptl/sem_open.c 2007-07-25 20:00:24.000000000 +0200
@@ -147,7 +147,7 @@ __sem_search (const void *a, const void
void *__sem_mappings attribute_hidden;
/* Lock to protect the search tree. */
-lll_lock_t __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
+int __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
/* Search for existing mapping and if possible add the one provided. */
@@ -161,7 +161,7 @@ check_add_mapping (const char *name, siz
if (__fxstat64 (_STAT_VER, fd, &st) == 0)
{
/* Get the lock. */
- lll_lock (__sem_mappings_lock);
+ lll_lock (__sem_mappings_lock, LLL_PRIVATE);
/* Search for an existing mapping given the information we have. */
struct inuse_sem *fake;
@@ -210,7 +210,7 @@ check_add_mapping (const char *name, siz
}
/* Release the lock. */
- lll_unlock (__sem_mappings_lock);
+ lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
}
if (result != existing && existing != SEM_FAILED && existing != MAP_FAILED)
--- libc/nptl/pthread_rwlock_tryrdlock.c.jj 2007-05-28 13:45:23.000000000 +0200
+++ libc/nptl/pthread_rwlock_tryrdlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -28,7 +28,7 @@ __pthread_rwlock_tryrdlock (rwlock)
{
int result = EBUSY;
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
if (rwlock->__data.__writer == 0
&& (rwlock->__data.__nr_writers_queued == 0
@@ -43,7 +43,7 @@ __pthread_rwlock_tryrdlock (rwlock)
result = 0;
}
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/pthread_rwlock_trywrlock.c.jj 2006-09-04 16:42:01.000000000 +0200
+++ libc/nptl/pthread_rwlock_trywrlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -28,7 +28,7 @@ __pthread_rwlock_trywrlock (rwlock)
{
int result = EBUSY;
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
{
@@ -36,7 +36,7 @@ __pthread_rwlock_trywrlock (rwlock)
result = 0;
}
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/pthread_once.c.jj 2006-10-28 07:09:12.000000000 +0200
+++ libc/nptl/pthread_once.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -22,7 +22,7 @@
-static lll_lock_t once_lock = LLL_LOCK_INITIALIZER;
+static int once_lock = LLL_LOCK_INITIALIZER;
int
@@ -35,7 +35,7 @@ __pthread_once (once_control, init_routi
object. */
if (*once_control == PTHREAD_ONCE_INIT)
{
- lll_lock (once_lock);
+ lll_lock (once_lock, LLL_PRIVATE);
/* XXX This implementation is not complete. It doesn't take
cancelation and fork into account. */
@@ -46,7 +46,7 @@ __pthread_once (once_control, init_routi
*once_control = !PTHREAD_ONCE_INIT;
}
- lll_unlock (once_lock);
+ lll_unlock (once_lock, LLL_PRIVATE);
}
return 0;
--- libc/nptl/pthread_getschedparam.c.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/pthread_getschedparam.c 2007-07-25 20:00:24.000000000 +0200
@@ -38,7 +38,7 @@ __pthread_getschedparam (threadid, polic
int result = 0;
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* The library is responsible for maintaining the values at all
times. If the user uses a interface other than
@@ -68,7 +68,7 @@ __pthread_getschedparam (threadid, polic
memcpy (param, &pd->schedparam, sizeof (struct sched_param));
}
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/pthread_barrier_init.c.jj 2007-05-28 13:45:23.000000000 +0200
+++ libc/nptl/pthread_barrier_init.c 2007-07-25 20:00:24.000000000 +0200
@@ -40,7 +40,7 @@ pthread_barrier_init (barrier, attr, cou
if (__builtin_expect (count == 0, 0))
return EINVAL;
- struct pthread_barrierattr *iattr
+ const struct pthread_barrierattr *iattr
= (attr != NULL
? iattr = (struct pthread_barrierattr *) attr
: &default_attr);
--- libc/nptl/semaphoreP.h.jj 2007-05-28 13:45:23.000000000 +0200
+++ libc/nptl/semaphoreP.h 2007-07-25 20:00:24.000000000 +0200
@@ -48,7 +48,7 @@ extern pthread_once_t __namedsem_once at
extern void *__sem_mappings attribute_hidden;
/* Lock to protect the search tree. */
-extern lll_lock_t __sem_mappings_lock attribute_hidden;
+extern int __sem_mappings_lock attribute_hidden;
/* Initializer for mountpoint. */
--- libc/nptl/pthread_setschedparam.c.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/pthread_setschedparam.c 2007-07-25 20:00:24.000000000 +0200
@@ -39,7 +39,7 @@ __pthread_setschedparam (threadid, polic
int result = 0;
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
struct sched_param p;
const struct sched_param *orig_param = param;
@@ -67,7 +67,7 @@ __pthread_setschedparam (threadid, polic
pd->flags |= ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET;
}
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/pthread_cond_init.c.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/pthread_cond_init.c 2007-07-25 20:00:24.000000000 +0200
@@ -28,7 +28,7 @@ __pthread_cond_init (cond, cond_attr)
{
struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr;
- cond->__data.__lock = LLL_MUTEX_LOCK_INITIALIZER;
+ cond->__data.__lock = LLL_LOCK_INITIALIZER;
cond->__data.__futex = 0;
cond->__data.__nwaiters = (icond_attr != NULL
&& ((icond_attr->value
--- libc/nptl/pthread_getattr_np.c.jj 2007-07-23 16:21:17.000000000 +0200
+++ libc/nptl/pthread_getattr_np.c 2007-07-25 20:00:24.000000000 +0200
@@ -39,7 +39,7 @@ pthread_getattr_np (thread_id, attr)
struct pthread_attr *iattr = (struct pthread_attr *) attr;
int ret = 0;
- lll_lock (thread->lock);
+ lll_lock (thread->lock, LLL_PRIVATE);
/* The thread library is responsible for keeping the values in the
thread desriptor up-to-date in case the user changes them. */
@@ -173,7 +173,7 @@ pthread_getattr_np (thread_id, attr)
}
}
- lll_unlock (thread->lock);
+ lll_unlock (thread->lock, LLL_PRIVATE);
return ret;
}
--- libc/nptl/pthread_barrier_wait.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_barrier_wait.c 2007-07-25 20:00:24.000000000 +0200
@@ -32,7 +32,7 @@ pthread_barrier_wait (barrier)
int result = 0;
/* Make sure we are alone. */
- lll_lock (ibarrier->lock);
+ lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* One more arrival. */
--ibarrier->left;
@@ -46,8 +46,7 @@ pthread_barrier_wait (barrier)
/* Wake up everybody. */
lll_futex_wake (&ibarrier->curr_event, INT_MAX,
- // XYZ check mutex flag
- LLL_SHARED);
+ ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* This is the thread which finished the serialization. */
result = PTHREAD_BARRIER_SERIAL_THREAD;
@@ -59,13 +58,12 @@ pthread_barrier_wait (barrier)
unsigned int event = ibarrier->curr_event;
/* Before suspending, make the barrier available to others. */
- lll_unlock (ibarrier->lock);
+ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* Wait for the event counter of the barrier to change. */
do
lll_futex_wait (&ibarrier->curr_event, event,
- // XYZ check mutex flag
- LLL_SHARED);
+ ibarrier->private ^ FUTEX_PRIVATE_FLAG);
while (event == ibarrier->curr_event);
}
@@ -75,7 +73,7 @@ pthread_barrier_wait (barrier)
/* If this was the last woken thread, unlock. */
if (atomic_increment_val (&ibarrier->left) == init_count)
/* We are done. */
- lll_unlock (ibarrier->lock);
+ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
return result;
}
--- libc/nptl/pthread_barrier_destroy.c.jj 2002-11-26 23:49:50.000000000 +0100
+++ libc/nptl/pthread_barrier_destroy.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -31,14 +31,14 @@ pthread_barrier_destroy (barrier)
ibarrier = (struct pthread_barrier *) barrier;
- lll_lock (ibarrier->lock);
+ lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1))
/* The barrier is not used anymore. */
result = 0;
else
/* Still used, return with an error. */
- lll_unlock (ibarrier->lock);
+ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
return result;
}
--- libc/nptl/descr.h.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/descr.h 2007-07-25 20:00:24.000000000 +0200
@@ -309,10 +309,10 @@ struct pthread
int parent_cancelhandling;
/* Lock to synchronize access to the descriptor. */
- lll_lock_t lock;
+ int lock;
/* Lock for synchronizing setxid calls. */
- lll_lock_t setxid_futex;
+ int setxid_futex;
#if HP_TIMING_AVAIL
/* Offset of the CPU clock at start thread start time. */
--- libc/nptl/pthread_rwlock_wrlock.c.jj 2007-07-23 19:36:30.000000000 +0200
+++ libc/nptl/pthread_rwlock_wrlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -32,7 +32,7 @@ __pthread_rwlock_wrlock (rwlock)
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -65,22 +65,21 @@ __pthread_rwlock_wrlock (rwlock)
int waitval = rwlock->__data.__writer_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer or reader(s) to finish. */
lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval,
- // XYZ check mutex flag
- LLL_SHARED);
+ rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
/* To start over again, remove the thread from the writer list. */
--rwlock->__data.__nr_writers_queued;
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/sem_close.c.jj 2003-05-17 22:49:02.000000000 +0200
+++ libc/nptl/sem_close.c 2007-07-25 20:00:24.000000000 +0200
@@ -47,7 +47,7 @@ sem_close (sem)
int result = 0;
/* Get the lock. */
- lll_lock (__sem_mappings_lock);
+ lll_lock (__sem_mappings_lock, LLL_PRIVATE);
/* Locate the entry for the mapping the caller provided. */
rec = NULL;
@@ -75,7 +75,7 @@ sem_close (sem)
}
/* Release the lock. */
- lll_unlock (__sem_mappings_lock);
+ lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/pthread_rwlock_rdlock.c.jj 2007-07-23 19:36:30.000000000 +0200
+++ libc/nptl/pthread_rwlock_rdlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -32,7 +32,7 @@ __pthread_rwlock_rdlock (rwlock)
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -74,21 +74,20 @@ __pthread_rwlock_rdlock (rwlock)
int waitval = rwlock->__data.__readers_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer to finish. */
- lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
+ rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
--rwlock->__data.__nr_readers_queued;
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/pthread_mutex_timedlock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_mutex_timedlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -56,7 +56,8 @@ pthread_mutex_timedlock (mutex, abstime)
}
/* We have to get the mutex. */
- result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+ result = lll_timedlock (mutex->__data.__lock, abstime,
+ /* XYZ */ LLL_SHARED);
if (result != 0)
goto out;
@@ -76,14 +77,15 @@ pthread_mutex_timedlock (mutex, abstime)
case PTHREAD_MUTEX_TIMED_NP:
simple:
/* Normal mutex. */
- result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+ result = lll_timedlock (mutex->__data.__lock, abstime,
+ /* XYZ */ LLL_SHARED);
break;
case PTHREAD_MUTEX_ADAPTIVE_NP:
if (! __is_smp)
goto simple;
- if (lll_mutex_trylock (mutex->__data.__lock) != 0)
+ if (lll_trylock (mutex->__data.__lock) != 0)
{
int cnt = 0;
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
@@ -92,7 +94,8 @@ pthread_mutex_timedlock (mutex, abstime)
{
if (cnt++ >= max_cnt)
{
- result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+ result = lll_timedlock (mutex->__data.__lock, abstime,
+ /* XYZ */ LLL_SHARED);
break;
}
@@ -100,7 +103,7 @@ pthread_mutex_timedlock (mutex, abstime)
BUSY_WAIT_NOP;
#endif
}
- while (lll_mutex_trylock (mutex->__data.__lock) != 0);
+ while (lll_trylock (mutex->__data.__lock) != 0);
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
}
@@ -174,15 +177,15 @@ pthread_mutex_timedlock (mutex, abstime)
}
}
- result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
- id);
+ result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
+ /* XYZ */ LLL_SHARED);
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
- lll_mutex_unlock (mutex->__data.__lock);
+ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
--- libc/nptl/pthread_cond_destroy.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_cond_destroy.c 2007-07-25 20:00:24.000000000 +0200
@@ -27,13 +27,13 @@ __pthread_cond_destroy (cond)
pthread_cond_t *cond;
{
/* Make sure we are alone. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
{
/* If there are still some waiters which have not been
woken up, this is an application bug. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return EBUSY;
}
@@ -66,13 +66,13 @@ __pthread_cond_destroy (cond)
do
{
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
lll_futex_wait (&cond->__data.__nwaiters, nwaiters,
// XYZ check mutex flag
LLL_SHARED);
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
nwaiters = cond->__data.__nwaiters;
}
--- libc/nptl/old_pthread_cond_broadcast.c.jj 2003-03-21 09:02:07.000000000 +0100
+++ libc/nptl/old_pthread_cond_broadcast.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -33,7 +33,7 @@ __pthread_cond_broadcast_2_0 (cond)
{
pthread_cond_t *newcond;
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
if (newcond == NULL)
return ENOMEM;
--- libc/nptl/pthread_mutex_trylock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_mutex_trylock.c 2007-07-25 20:00:24.000000000 +0200
@@ -48,7 +48,7 @@ __pthread_mutex_trylock (mutex)
return 0;
}
- if (lll_mutex_trylock (mutex->__data.__lock) == 0)
+ if (lll_trylock (mutex->__data.__lock) == 0)
{
/* Record the ownership. */
mutex->__data.__owner = id;
@@ -62,7 +62,7 @@ __pthread_mutex_trylock (mutex)
case PTHREAD_MUTEX_TIMED_NP:
case PTHREAD_MUTEX_ADAPTIVE_NP:
/* Normal mutex. */
- if (lll_mutex_trylock (mutex->__data.__lock) != 0)
+ if (lll_trylock (mutex->__data.__lock) != 0)
break;
/* Record the ownership. */
@@ -140,7 +140,7 @@ __pthread_mutex_trylock (mutex)
}
}
- oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
+ oldval = lll_robust_trylock (mutex->__data.__lock, id);
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
@@ -154,7 +154,7 @@ __pthread_mutex_trylock (mutex)
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
if (oldval == id)
- lll_mutex_unlock (mutex->__data.__lock);
+ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
--- libc/nptl/pthread_cond_wait.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_cond_wait.c 2007-07-25 20:00:24.000000000 +0200
@@ -45,7 +45,7 @@ __condvar_cleanup (void *arg)
unsigned int destroying;
/* We are going to modify shared data. */
- lll_mutex_lock (cbuffer->cond->__data.__lock);
+ lll_lock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq)
{
@@ -78,7 +78,7 @@ __condvar_cleanup (void *arg)
}
/* We are done. */
- lll_mutex_unlock (cbuffer->cond->__data.__lock);
+ lll_unlock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Wake everybody to make sure no condvar signal gets lost. */
if (! destroying)
@@ -102,13 +102,13 @@ __pthread_cond_wait (cond, mutex)
int err;
/* Make sure we are along. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Now we can release the mutex. */
err = __pthread_mutex_unlock_usercnt (mutex, 0);
if (__builtin_expect (err, 0))
{
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return err;
}
@@ -144,7 +144,7 @@ __pthread_cond_wait (cond, mutex)
unsigned int futex_val = cond->__data.__futex;
/* Prepare to wait. Release the condvar futex. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Enable asynchronous cancellation. Required by the standard. */
cbuffer.oldtype = __pthread_enable_asynccancel ();
@@ -158,7 +158,7 @@ __pthread_cond_wait (cond, mutex)
__pthread_disable_asynccancel (cbuffer.oldtype);
/* We are going to look at shared data again, so get the lock. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* If a broadcast happened, we are done. */
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -186,7 +186,7 @@ __pthread_cond_wait (cond, mutex)
LLL_SHARED);
/* We are done with the condvar. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* The cancellation handling is back to normal, remove the handler. */
__pthread_cleanup_pop (&buffer, 0);
--- libc/nptl/old_pthread_cond_wait.c.jj 2003-03-21 09:02:07.000000000 +0100
+++ libc/nptl/old_pthread_cond_wait.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -34,7 +34,7 @@ __pthread_cond_wait_2_0 (cond, mutex)
{
pthread_cond_t *newcond;
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
if (newcond == NULL)
return ENOMEM;
--- libc/nptl/pthread_cond_signal.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_cond_signal.c 2007-07-25 20:00:24.000000000 +0200
@@ -33,7 +33,7 @@ __pthread_cond_signal (cond)
pthread_cond_t *cond;
{
/* Make sure we are alone. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Are there any waiters to be woken? */
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -56,7 +56,7 @@ __pthread_cond_signal (cond)
}
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return 0;
}
--- libc/nptl/pthreadP.h.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthreadP.h 2007-07-25 20:00:24.000000000 +0200
@@ -151,7 +151,7 @@ hidden_proto (__stack_user)
/* Attribute handling. */
extern struct pthread_attr *__attr_list attribute_hidden;
-extern lll_lock_t __attr_list_lock attribute_hidden;
+extern int __attr_list_lock attribute_hidden;
/* First available RT signal. */
extern int __current_sigrtmin attribute_hidden;
--- libc/nptl/pthread_mutex_lock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_mutex_lock.c 2007-07-25 20:00:24.000000000 +0200
@@ -27,9 +27,9 @@
#ifndef LLL_MUTEX_LOCK
-# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
-# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
-# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
+# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED)
+# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED)
#endif
@@ -198,7 +198,7 @@ __pthread_mutex_lock (mutex)
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
- lll_mutex_unlock (mutex->__data.__lock);
+ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
--- libc/nptl/pthread_rwlock_timedwrlock.c.jj 2007-06-19 13:10:21.000000000 +0200
+++ libc/nptl/pthread_rwlock_timedwrlock.c 2007-07-25 20:00:24.000000000 +0200
@@ -33,7 +33,7 @@ pthread_rwlock_timedwrlock (rwlock, abst
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -100,16 +100,14 @@ pthread_rwlock_timedwrlock (rwlock, abst
int waitval = rwlock->__data.__writer_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer or reader(s) to finish. */
err = lll_futex_timed_wait (&rwlock->__data.__writer_wakeup,
- waitval, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ waitval, &rt, rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
/* To start over again, remove the thread from the writer list. */
--rwlock->__data.__nr_writers_queued;
@@ -123,7 +121,7 @@ pthread_rwlock_timedwrlock (rwlock, abst
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/allocatestack.c.jj 2007-07-25 20:00:18.000000000 +0200
+++ libc/nptl/allocatestack.c 2007-07-25 20:00:24.000000000 +0200
@@ -103,7 +103,7 @@ static size_t stack_cache_maxsize = 40 *
static size_t stack_cache_actsize;
/* Mutex protecting this variable. */
-static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
+static int stack_cache_lock = LLL_LOCK_INITIALIZER;
/* List of queued stack frames. */
static LIST_HEAD (stack_cache);
@@ -139,7 +139,7 @@ get_cached_stack (size_t *sizep, void **
struct pthread *result = NULL;
list_t *entry;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Search the cache for a matching entry. We search for the
smallest stack which has at least the required size. Note that
@@ -172,7 +172,7 @@ get_cached_stack (size_t *sizep, void **
|| __builtin_expect (result->stackblock_size > 4 * size, 0))
{
/* Release the lock. */
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return NULL;
}
@@ -187,7 +187,7 @@ get_cached_stack (size_t *sizep, void **
stack_cache_actsize -= result->stackblock_size;
/* Release the lock early. */
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
/* Report size and location of the stack to the caller. */
*sizep = result->stackblock_size;
@@ -400,12 +400,12 @@ allocate_stack (const struct pthread_att
/* Prepare to modify global data. */
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* And add to the list of stacks in use. */
list_add (&pd->list, &__stack_user);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
}
else
{
@@ -544,12 +544,12 @@ allocate_stack (const struct pthread_att
/* Prepare to modify global data. */
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* And add to the list of stacks in use. */
list_add (&pd->list, &stack_used);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
/* There might have been a race. Another thread might have
@@ -598,12 +598,12 @@ allocate_stack (const struct pthread_att
mprot_error:
err = errno;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Remove the thread from the list. */
list_del (&pd->list);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
/* Get rid of the TLS block we allocated. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
@@ -699,7 +699,7 @@ void
internal_function
__deallocate_stack (struct pthread *pd)
{
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Remove the thread from the list of threads with user defined
stacks. */
@@ -715,7 +715,7 @@ __deallocate_stack (struct pthread *pd)
/* Free the memory associated with the ELF TLS. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
}
@@ -732,7 +732,7 @@ __make_stacks_executable (void **stack_e
const size_t pagemask = ~(__getpagesize () - 1);
#endif
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
list_t *runp;
list_for_each (runp, &stack_used)
@@ -761,7 +761,7 @@ __make_stacks_executable (void **stack_e
break;
}
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return err;
}
@@ -837,7 +837,7 @@ __find_thread_by_id (pid_t tid)
{
struct pthread *result = NULL;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Iterate over the list with system-allocated threads first. */
list_t *runp;
@@ -869,7 +869,7 @@ __find_thread_by_id (pid_t tid)
}
out:
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return result;
}
@@ -920,7 +920,7 @@ attribute_hidden
__nptl_setxid (struct xid_command *cmdp)
{
int result;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
__xidcmd = cmdp;
cmdp->cntr = 0;
@@ -966,7 +966,7 @@ __nptl_setxid (struct xid_command *cmdp)
result = -1;
}
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return result;
}
@@ -995,7 +995,7 @@ void
attribute_hidden
__pthread_init_static_tls (struct link_map *map)
{
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Iterate over the list with system-allocated threads first. */
list_t *runp;
@@ -1006,7 +1006,7 @@ __pthread_init_static_tls (struct link_m
list_for_each (runp, &__stack_user)
init_one_static_tls (list_entry (runp, struct pthread, list), map);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
}
@@ -1014,7 +1014,7 @@ void
attribute_hidden
__wait_lookup_done (void)
{
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
struct pthread *self = THREAD_SELF;
@@ -1063,5 +1063,5 @@ __wait_lookup_done (void)
while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
}
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
}
--- libc/nptl/tpp.c.jj 2006-08-15 01:02:29.000000000 +0200
+++ libc/nptl/tpp.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,5 +1,5 @@
/* Thread Priority Protect helpers.
- Copyright (C) 2006 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
@@ -93,7 +93,7 @@ __pthread_tpp_change_priority (int previ
if (priomax == newpriomax)
return 0;
- lll_lock (self->lock);
+ lll_lock (self->lock, LLL_PRIVATE);
tpp->priomax = newpriomax;
@@ -129,7 +129,7 @@ __pthread_tpp_change_priority (int previ
}
}
- lll_unlock (self->lock);
+ lll_unlock (self->lock, LLL_PRIVATE);
return result;
}
@@ -144,7 +144,7 @@ __pthread_current_priority (void)
int result = 0;
- lll_lock (self->lock);
+ lll_lock (self->lock, LLL_PRIVATE);
if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
{
@@ -166,7 +166,7 @@ __pthread_current_priority (void)
if (result != -1)
result = self->schedparam.sched_priority;
- lll_unlock (self->lock);
+ lll_unlock (self->lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/pthread_setschedprio.c.jj 2007-06-01 12:07:58.000000000 +0200
+++ libc/nptl/pthread_setschedprio.c 2007-07-25 20:00:24.000000000 +0200
@@ -41,7 +41,7 @@ pthread_setschedprio (threadid, prio)
struct sched_param param;
param.sched_priority = prio;
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* If the thread should have higher priority because of some
PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority. */
@@ -60,7 +60,7 @@ pthread_setschedprio (threadid, prio)
pd->flags |= ATTR_FLAG_SCHED_SET;
}
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/pthread_attr_init.c.jj 2004-03-22 14:45:55.000000000 +0100
+++ libc/nptl/pthread_attr_init.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -27,7 +27,7 @@
struct pthread_attr *__attr_list;
-lll_lock_t __attr_list_lock = LLL_LOCK_INITIALIZER;
+int __attr_list_lock = LLL_LOCK_INITIALIZER;
int
--- libc/nptl/old_pthread_cond_timedwait.c.jj 2003-03-21 09:02:07.000000000 +0100
+++ libc/nptl/old_pthread_cond_timedwait.c 2007-07-25 20:00:24.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -35,7 +35,7 @@ __pthread_cond_timedwait_2_0 (cond, mute
{
pthread_cond_t *newcond;
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
if (newcond == NULL)
return ENOMEM;
--- libc/nptl/pthread_create.c.jj 2007-07-25 20:00:18.000000000 +0200
+++ libc/nptl/pthread_create.c 2007-07-25 20:00:24.000000000 +0200
@@ -63,7 +63,7 @@ __find_in_stack_list (pd)
list_t *entry;
struct pthread *result = NULL;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
list_for_each (entry, &stack_used)
{
@@ -90,7 +90,7 @@ __find_in_stack_list (pd)
}
}
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return result;
}
@@ -284,9 +284,9 @@ start_thread (void *arg)
int oldtype = CANCEL_ASYNC ();
/* Get the lock the parent locked to force synchronization. */
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* And give it up right away. */
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
CANCEL_RESET (oldtype);
}
@@ -370,7 +370,7 @@ start_thread (void *arg)
# endif
this->__list.__next = NULL;
- lll_robust_mutex_dead (this->__lock);
+ lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
}
while (robust != (void *) &pd->robust_head);
}
Jakub
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH] lowlevellock.h cleanups, LLL_SHARED vs. LLL_PRIVATE on lll locks
2007-07-25 20:11 [RFC PATCH] lowlevellock.h cleanups, LLL_SHARED vs. LLL_PRIVATE on lll locks Jakub Jelinek
@ 2007-07-31 11:11 ` Jakub Jelinek
2007-08-01 23:00 ` Kaz Kojima
0 siblings, 1 reply; 5+ messages in thread
From: Jakub Jelinek @ 2007-07-31 11:11 UTC (permalink / raw)
To: Ulrich Drepper; +Cc: Glibc hackers
Hi!
On Wed, Jul 25, 2007 at 10:15:02PM +0200, Jakub Jelinek wrote:
> On top of the patch I posted yesterday, done only for x86_64 and powerpc
> (will finish the rest of arches if you agree this is the right direction,
> even write a ChangeLog entry).
Here is an updated patch against CVS trunk plus
http://sources.redhat.com/ml/libc-hacker/2007-07/msg00050.html
which changes all in-tree arches but SH (will leave that to Kaz) and
sparc sem_* stuff (will do when I find time for it).
2007-07-31 Anton Blanchard <anton@samba.org>
* sysdeps/unix/sysv/linux/powerpc/sem_post.c (__new_sem_post):
Use __asm __volatile (__lll_acq_instr ::: "memory") instead of
atomic_full_barrier.
2007-07-31 Jakub Jelinek <jakub@redhat.com>
* allocatestack.c (stack_cache_lock): Change type to int.
(get_cached_stack, allocate_stack, __deallocate_stack,
__make_stacks_executable, __find_thread_by_id, __nptl_setxid,
__pthread_init_static_tls, __wait_lookup_done): Add LLL_PRIVATE
as second argument to lll_lock and lll_unlock macros on
stack_cache_lock.
* pthread_create.c (__find_in_stack_list): Likewise.
(start_thread): Similarly with pd->lock. Use lll_robust_dead
macro instead of lll_robust_mutex_dead, pass LLL_SHARED to it
as second argument.
* descr.h (struct pthread): Change lock and setxid_futex field
type to int.
* old_pthread_cond_broadcast.c (__pthread_cond_broadcast_2_0): Use
LLL_LOCK_INITIALIZER instead of LLL_MUTEX_LOCK_INITIALIZER.
* old_pthread_cond_signal.c (__pthread_cond_signal_2_0): Likewise.
* old_pthread_cond_timedwait.c (__pthread_cond_timedwait_2_0):
Likewise.
* old_pthread_cond_wait.c (__pthread_cond_wait_2_0): Likewise.
* pthread_cond_init.c (__pthread_cond_init): Likewise.
* pthreadP.h (__attr_list_lock): Change type to int.
* pthread_attr_init.c (__attr_list_lock): Likewise.
* pthread_barrier_destroy.c (pthread_barrier_destroy): Pass
ibarrier->private ^ FUTEX_PRIVATE_FLAG as second argument to
lll_{,un}lock.
* pthread_barrier_wait.c (pthread_barrier_wait): Likewise and
also for lll_futex_{wake,wait}.
* pthread_barrier_init.c (pthread_barrier_init): Make iattr
a pointer to const.
* pthread_cond_broadcast.c (__pthread_cond_broadcast): Pass
LLL_SHARED as second argument to lll_{,un}lock.
* pthread_cond_destroy.c (__pthread_cond_destroy): Likewise.
* pthread_cond_signal.c (__pthread_cond_singal): Likewise.
* pthread_cond_timedwait.c (__pthread_cond_timedwait): Likewise.
* pthread_cond_wait.c (__condvar_cleanup, __pthread_cond_wait):
Likewise.
* pthread_getattr_np.c (pthread_getattr_np): Add LLL_PRIVATE
as second argument to lll_{,un}lock macros on pd->lock.
* pthread_getschedparam.c (__pthread_getschedparam): Likewise.
* pthread_setschedparam.c (__pthread_setschedparam): Likewise.
* pthread_setschedprio.c (pthread_setschedprio): Likewise.
* tpp.c (__pthread_tpp_change_priority, __pthread_current_priority):
Likewise.
* sysdeps/pthread/createthread.c (do_clone, create_thread):
Likewise.
* pthread_once.c (once_lock): Change type to int.
(__pthread_once): Pass LLL_PRIVATE as second argument to
lll_{,un}lock macros on once_lock.
* pthread_rwlock_rdlock.c (__pthread_rwlock_rdlock): Use
lll_{,un}lock macros instead of lll_mutex_{,un}lock, pass
rwlock->__data.__shared as second argument to them and similarly
for lll_futex_w*.
* pthread_rwlock_timedrdlock.c (pthread_rwlock_timedrdlock):
Likewise.
* pthread_rwlock_timedwrlock.c (pthread_rwlock_timedwrlock):
Likewise.
* pthread_rwlock_tryrdlock.c (__pthread_rwlock_tryrdlock): Likewise.
* pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Likewise.
* pthread_rwlock_unlock.c (__pthread_rwlock_unlock): Likewise.
* pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock): Likewise.
* sem_close.c (sem_close): Pass LLL_PRIVATE as second argument
to lll_{,un}lock macros on __sem_mappings_lock.
* sem_open.c (check_add_mapping): Likewise.
(__sem_mappings_lock): Change type to int.
* semaphoreP.h (__sem_mappings_lock): Likewise.
* pthread_mutex_lock.c (LLL_MUTEX_LOCK, LLL_MUTEX_TRYLOCK,
LLL_ROBUST_MUTEX_LOCK): Use lll_{,try,robust_}lock macros
instead of lll_*mutex_*, pass LLL_SHARED as last
argument.
(__pthread_mutex_lock): Use lll_unlock instead of lll_mutex_unlock,
pass LLL_SHARED as last argument.
* sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c (LLL_MUTEX_LOCK,
LLL_MUTEX_TRYLOCK, LLL_ROBUST_MUTEX_LOCK): Use
lll_{cond_,cond_try,robust_cond}lock macros instead of lll_*mutex_*,
pass LLL_SHARED as last argument.
* pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use
lll_{timed,try,robust_timed,un}lock instead of lll_*mutex*, pass
LLL_SHARED as last argument.
* pthread_mutex_trylock.c (__pthread_mutex_trylock): Similarly.
* pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt):
Similarly.
* sysdeps/pthread/bits/libc-lock.h (__libc_lock_lock,
__libc_lock_lock_recursive, __libc_lock_unlock,
__libc_lock_unlock_recursive): Pass LLL_PRIVATE as second
argument to lll_{,un}lock.
* sysdeps/pthread/bits/stdio-lock.h (_IO_lock_lock,
_IO_lock_unlock): Likewise.
* sysdeps/unix/sysv/linux/fork.c (__libc_fork): Don't use
compound literal.
* sysdeps/unix/sysv/linux/unregister-atfork.c (__unregister_atfork):
Pass LLL_PRIVATE as second argument to lll_{,un}lock macros on
__fork_lock.
* sysdeps/unix/sysv/linux/register-atfork.c (__register_atfork,
free_mem): Likewise.
(__fork_lock): Change type to int.
* sysdeps/unix/sysv/linux/fork.h (__fork_lock): Likewise.
* sysdeps/unix/sysv/linux/sem_post.c (__new_sem_post): Pass
isem->private ^ FUTEX_PRIVATE_FLAG as second argument to
lll_futex_wake.
* sysdeps/unix/sysv/linux/sem_timedwait.c (sem_timedwait): Likewise.
* sysdeps/unix/sysv/linux/sem_wait.c (__new_sem_wait): Likewise.
* sysdeps/unix/sysv/linux/lowlevellock.c (__lll_lock_wait_private):
New function.
(__lll_lock_wait, __lll_timedlock_wait): Add private argument and
pass it through to lll_futex_*wait, only compile in when
IS_IN_libpthread.
* sysdeps/unix/sysv/linux/lowlevelrobustlock.c
(__lll_robust_lock_wait, __lll_robust_timedlock_wait): Add private
argument and pass it through to lll_futex_*wait.
* sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Renamed all
lll_mutex_* resp. lll_robust_mutex_* macros to lll_* resp.
lll_robust_*. Renamed all __lll_mutex_* resp. __lll_robust_mutex_*
inline functions to __lll_* resp. __lll_robust_*.
(LLL_MUTEX_LOCK_INITIALIZER): Remove.
(lll_mutex_dead): Add private argument.
(__lll_lock_wait_private): New prototype.
(__lll_lock_wait, __lll_robust_lock_wait, __lll_lock_timedwait,
__lll_robust_lock_timedwait): Add private argument to prototypes.
(__lll_lock): Add private argument, if it is constant LLL_PRIVATE,
call __lll_lock_wait_private, otherwise pass private to
__lll_lock_wait.
(__lll_robust_lock, __lll_cond_lock, __lll_timedlock,
__lll_robust_timedlock): Add private argument, pass it to
__lll_*wait functions.
(__lll_unlock): Add private argument, if it is constant LLL_PRIVATE,
call __lll_unlock_wake_private, otherwise pass private to
__lll_unlock_wake.
(__lll_robust_unlock): Add private argument, pass it to
__lll_robust_unlock_wake.
(lll_lock, lll_robust_lock, lll_cond_lock, lll_timedlock,
lll_robust_timedlock, lll_unlock, lll_robust_unlock): Add private
argument, pass it through to __lll_* inline function.
(__lll_mutex_unlock_force, lll_mutex_unlock_force): Remove.
(lll_lock_t): Remove.
(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
lll_cond_wake, lll_cond_broadcast): Remove.
* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/i386/lowlevellock.h: Allow including
the header from assembler. Renamed all lll_mutex_* resp.
lll_robust_mutex_* macros to lll_* resp. lll_robust_*.
(LOCK, FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP,
FUTEX_OP_CLEAR_WAKE_IF_GT_ONE): Define.
(LLL_MUTEX_LOCK_INITIALIZER, LLL_MUTEX_LOCK_INITIALIZER_LOCKED,
LLL_MUTEX_LOCK_INITIALIZER_WAITERS): Remove.
(__lll_mutex_lock_wait, __lll_mutex_timedlock_wait,
__lll_mutex_unlock_wake, __lll_lock_wait, __lll_unlock_wake):
Remove prototype.
(__lll_trylock_asm, __lll_lock_asm_start, __lll_unlock_asm): Define.
(lll_robust_trylock, lll_cond_trylock): Use LLL_LOCK_INITIALIZER*
rather than LLL_MUTEX_LOCK_INITIALIZER* macros.
(lll_trylock): Likewise, use __lll_trylock_asm, pass
MULTIPLE_THREADS_OFFSET as another asm operand.
(lll_lock): Add private argument, use __lll_lock_asm_start, pass
MULTIPLE_THREADS_OFFSET as last asm operand, call
__lll_lock_wait_private if private is constant LLL_PRIVATE,
otherwise pass private as another argument to __lll_lock_wait.
(lll_robust_lock, lll_cond_lock, lll_robust_cond_lock,
lll_timedlock, lll_robust_timedlock): Add private argument, pass
private as another argument to __lll_*lock_wait call.
(lll_unlock): Add private argument, use __lll_unlock_asm, pass
MULTIPLE_THREADS_OFFSET as another asm operand, call
__lll_unlock_wake_private if private is constant LLL_PRIVATE,
otherwise pass private as another argument to __lll_unlock_wake.
(lll_robust_unlock): Add private argument, pass private as another
argument to __lll_unlock_wake.
(lll_robust_dead): Add private argument, use __lll_private_flag
macro.
(lll_islocked): Use LLL_LOCK_INITIALIZER instead of
LLL_MUTEX_LOCK_INITIALIZER.
(lll_lock_t): Remove.
(LLL_LOCK_INITIALIZER_WAITERS): Define.
(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
lll_cond_wake, lll_cond_broadcast): Remove.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Revert
2007-05-2{3,9} changes.
* sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Include
kernel-features.h and lowlevellock.h.
(LOAD_PRIVATE_FUTEX_WAIT): Define.
(LOAD_FUTEX_WAIT): Rewritten.
(LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
define.
(__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
(__lll_mutex_lock_wait): Rename to ...
(__lll_lock_wait): ... this. Take futex addr from %edx instead of
%ecx, %ecx is now private argument. Don't compile in for libc.so.
(__lll_mutex_timedlock_wait): Rename to ...
(__lll_timedlock_wait): ... this. Use __NR_gettimeofday. %esi
contains private argument. Don't compile in for libc.so.
(__lll_mutex_unlock_wake): Rename to ...
(__lll_unlock_wake): ... this. %ecx contains private argument.
Don't compile in for libc.so.
(__lll_timedwait_tid): Use __NR_gettimeofday.
* sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Include
kernel-features.h and lowlevellock.h.
(LOAD_FUTEX_WAIT): Define.
(LOCK, SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't
define.
(__lll_robust_mutex_lock_wait): Rename to ...
(__lll_robust_lock_wait): ... this. Futex addr is now in %edx
argument, %ecx argument contains private. Use LOAD_FUTEX_WAIT
macro.
(__lll_robust_mutex_timedlock_wait): Rename to ...
(__lll_robust_timedlock_wait): ... this. Use __NR_gettimeofday.
%esi argument contains private, use LOAD_FUTEX_WAIT macro.
* sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Include
lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
(pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
PRIVATE(%ebx) ^ LLL_SHARED as private argument in %ecx to
__lll_lock_wait and __lll_unlock_wake, pass MUTEX(%ebx) address
to __lll_lock_wait in %edx.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S:
Include lowlevellock.h and pthread-errnos.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
(__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*, pass
cond_lock address in %edx rather than %ecx to __lll_lock_wait,
pass LLL_SHARED in %ecx to both __lll_lock_wait and
__lll_unlock_wake.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S:
Include lowlevellock.h and pthread-errnos.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
(__pthread_cond_signal): Rename __lll_mutex_* to __lll_*, pass
cond_lock address in %edx rather than %ecx to __lll_lock_wait,
pass LLL_SHARED in %ecx to both __lll_lock_wait and
__lll_unlock_wake.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S:
Include lowlevellock.h.
(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
Don't define.
(__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*, pass
cond_lock address in %edx rather than %ecx to __lll_lock_wait,
pass LLL_SHARED in %ecx to both __lll_lock_wait and
__lll_unlock_wake. Use __NR_gettimeofday.
* sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
(__pthread_cond_wait, __condvar_w_cleanup): Rename __lll_mutex_*
to __lll_*, pass cond_lock address in %edx rather than %ecx to
__lll_lock_wait, pass LLL_SHARED in %ecx to both __lll_lock_wait
and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
(__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*, pass
MUTEX(%ebx) address in %edx rather than %ecx to
__lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
and __lll_unlock_wake. Move return value from %ecx to %edx
register.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S:
Include lowlevellock.h.
(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
Don't define.
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
MUTEX(%ebp) address in %edx rather than %ecx to
__lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
and __lll_unlock_wake. Move return value from %ecx to %edx
register. Use __NR_gettimeofday.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S:
Include lowlevellock.h.
(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE, LOCK):
Don't define.
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
MUTEX(%ebp) address in %edx rather than %ecx to
__lll_lock_wait, pass PSHARED(%ebp) in %ecx to both __lll_lock_wait
and __lll_unlock_wake. Move return value from %ecx to %edx
register. Use __NR_gettimeofday.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
(__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*, pass
MUTEX(%edi) address in %edx rather than %ecx to
__lll_lock_wait, pass PSHARED(%edi) in %ecx to both __lll_lock_wait
and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*, pass
MUTEX(%ebx) address in %edx rather than %ecx to
__lll_lock_wait, pass PSHARED(%ebx) in %ecx to both __lll_lock_wait
and __lll_unlock_wake. Move return value from %ecx to %edx
register.
* sysdeps/unix/sysv/linux/i386/pthread_once.S: Include
lowlevellock.h.
(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
define.
* sysdeps/unix/sysv/linux/i386/i486/sem_post.S: Include lowlevellock.h.
(LOCK, SYS_futex, FUTEX_WAKE): Don't define.
* sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Include
lowlevellock.h.
(LOCK, SYS_futex, SYS_gettimeofday, FUTEX_WAIT): Don't define.
(sem_timedwait): Use __NR_gettimeofday.
* sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S: Include
lowlevellock.h.
(LOCK): Don't define.
* sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Include
lowlevellock.h.
(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
* sysdeps/unix/sysv/linux/powerpc/sem_post.c: Wake only when there
are waiters.
* sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S: Revert
2007-05-2{3,9} changes.
* sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Include
kernel-features.h and lowlevellock.h.
(LOAD_PRIVATE_FUTEX_WAIT): Define.
(LOAD_FUTEX_WAIT): Rewritten.
(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
(__lll_lock_wait_private, __lll_unlock_wake_private): New functions.
(__lll_mutex_lock_wait): Rename to ...
(__lll_lock_wait): ... this. %esi is now private argument.
Don't compile in for libc.so.
(__lll_mutex_timedlock_wait): Rename to ...
(__lll_timedlock_wait): ... this. %esi contains private argument.
Don't compile in for libc.so.
(__lll_mutex_unlock_wake): Rename to ...
(__lll_unlock_wake): ... this. %esi contains private argument.
Don't compile in for libc.so.
* sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Include
kernel-features.h and lowlevellock.h.
(LOAD_FUTEX_WAIT): Define.
(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Don't define.
(__lll_robust_mutex_lock_wait): Rename to ...
(__lll_robust_lock_wait): ... this. %esi argument contains private.
Use LOAD_FUTEX_WAIT macro.
(__lll_robust_mutex_timedlock_wait): Rename to ...
(__lll_robust_timedlock_wait): ... this. %esi argument contains
private, use LOAD_FUTEX_WAIT macro.
* sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S: Include
lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
(pthread_barrier_wait): Rename __lll_mutex_* to __lll_*, pass
PRIVATE(%rdi) ^ LLL_SHARED as private argument in %esi to
__lll_lock_wait and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S:
Include lowlevellock.h and pthread-errnos.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
FUTEX_CMP_REQUEUE, EINVAL, LOCK): Don't define.
(__pthread_cond_broadcast): Rename __lll_mutex_* to __lll_*,
pass LLL_SHARED in %esi to both __lll_lock_wait and
__lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S:
Include lowlevellock.h and pthread-errnos.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_WAKE_OP,
FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, EINVAL, LOCK): Don't define.
(__pthread_cond_signal): Rename __lll_mutex_* to __lll_*,
pass LLL_SHARED in %esi to both __lll_lock_wait and
__lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
(__pthread_cond_timedwait): Rename __lll_mutex_* to __lll_*,
pass LLL_SHARED in %esi to both __lll_lock_wait and
__lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, LOCK): Don't define.
(__pthread_cond_wait, __condvar_cleanup): Rename __lll_mutex_*
to __lll_*, pass LLL_SHARED in %esi to both __lll_lock_wait
and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
Don't define.
(__pthread_rwlock_rdlock): Rename __lll_mutex_* to __lll_*,
pass PSHARED(%rdi) in %esi to both __lll_lock_wait
and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
Don't define.
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
pass PSHARED(%rdi) in %esi to both __lll_lock_wait
and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
Don't define.
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
pass PSHARED(%rdi) in %esi to both __lll_lock_wait
and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
Don't define.
(__pthread_rwlock_unlock): Rename __lll_mutex_* to __lll_*,
pass PSHARED(%rdi) in %esi to both __lll_lock_wait
and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S:
Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG, LOCK):
Don't define.
(__pthread_rwlock_wrlock): Rename __lll_mutex_* to __lll_*,
pass PSHARED(%rdi) in %ecx to both __lll_lock_wait
and __lll_unlock_wake.
* sysdeps/unix/sysv/linux/x86_64/pthread_once.S: Include
lowlevellock.h.
(LOCK, SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Don't
define.
* sysdeps/unix/sysv/linux/x86_64/sem_post.S: Include lowlevellock.h.
(LOCK, SYS_futex, FUTEX_WAKE): Don't define.
* sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Include
lowlevellock.h.
(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
* sysdeps/unix/sysv/linux/x86_64/sem_trywait.S: Include
lowlevellock.h.
(LOCK): Don't define.
* sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Include
lowlevellock.h.
(LOCK, SYS_futex, FUTEX_WAIT): Don't define.
* sysdeps/unix/sysv/linux/sparc/internaltypes.h: New file.
* sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c: New file.
* sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c: New file.
* sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c: New file.
* sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c
(__lll_lock_wait_private): New function.
(__lll_lock_wait, __lll_timedlock_wait): Add private argument, pass
it to lll_futex_*wait. Don't compile in for libc.so.
* sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c:
Remove.
* sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
(struct sparc_pthread_barrier): Remove.
(pthread_barrier_wait): Use union sparc_pthread_barrier instead of
struct sparc_pthread_barrier. Pass
ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE to lll_{,un}lock
and lll_futex_wait macros.
* sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c:
Remove.
* sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c:
Include sparc pthread_barrier_wait.c instead of generic one.
--- libc/nptl/sem_open.c.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/sem_open.c 2007-07-29 11:48:55.000000000 +0200
@@ -147,7 +147,7 @@ __sem_search (const void *a, const void
void *__sem_mappings attribute_hidden;
/* Lock to protect the search tree. */
-lll_lock_t __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
+int __sem_mappings_lock attribute_hidden = LLL_LOCK_INITIALIZER;
/* Search for existing mapping and if possible add the one provided. */
@@ -161,7 +161,7 @@ check_add_mapping (const char *name, siz
if (__fxstat64 (_STAT_VER, fd, &st) == 0)
{
/* Get the lock. */
- lll_lock (__sem_mappings_lock);
+ lll_lock (__sem_mappings_lock, LLL_PRIVATE);
/* Search for an existing mapping given the information we have. */
struct inuse_sem *fake;
@@ -210,7 +210,7 @@ check_add_mapping (const char *name, siz
}
/* Release the lock. */
- lll_unlock (__sem_mappings_lock);
+ lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
}
if (result != existing && existing != SEM_FAILED && existing != MAP_FAILED)
--- libc/nptl/pthread_mutex_timedlock.c.jj 2007-06-29 10:19:56.000000000 +0200
+++ libc/nptl/pthread_mutex_timedlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -56,7 +56,8 @@ pthread_mutex_timedlock (mutex, abstime)
}
/* We have to get the mutex. */
- result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+ result = lll_timedlock (mutex->__data.__lock, abstime,
+ /* XYZ */ LLL_SHARED);
if (result != 0)
goto out;
@@ -76,14 +77,15 @@ pthread_mutex_timedlock (mutex, abstime)
case PTHREAD_MUTEX_TIMED_NP:
simple:
/* Normal mutex. */
- result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+ result = lll_timedlock (mutex->__data.__lock, abstime,
+ /* XYZ */ LLL_SHARED);
break;
case PTHREAD_MUTEX_ADAPTIVE_NP:
if (! __is_smp)
goto simple;
- if (lll_mutex_trylock (mutex->__data.__lock) != 0)
+ if (lll_trylock (mutex->__data.__lock) != 0)
{
int cnt = 0;
int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
@@ -92,7 +94,8 @@ pthread_mutex_timedlock (mutex, abstime)
{
if (cnt++ >= max_cnt)
{
- result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+ result = lll_timedlock (mutex->__data.__lock, abstime,
+ /* XYZ */ LLL_SHARED);
break;
}
@@ -100,7 +103,7 @@ pthread_mutex_timedlock (mutex, abstime)
BUSY_WAIT_NOP;
#endif
}
- while (lll_mutex_trylock (mutex->__data.__lock) != 0);
+ while (lll_trylock (mutex->__data.__lock) != 0);
mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
}
@@ -174,15 +177,15 @@ pthread_mutex_timedlock (mutex, abstime)
}
}
- result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
- id);
+ result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
+ /* XYZ */ LLL_SHARED);
if (__builtin_expect (mutex->__data.__owner
== PTHREAD_MUTEX_NOTRECOVERABLE, 0))
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
- lll_mutex_unlock (mutex->__data.__lock);
+ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
--- libc/nptl/pthread_mutex_unlock.c.jj 2007-06-29 10:19:56.000000000 +0200
+++ libc/nptl/pthread_mutex_unlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -47,7 +47,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
case PTHREAD_MUTEX_ERRORCHECK_NP:
/* Error checking mutex. */
if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_mutex_islocked (mutex->__data.__lock))
+ || ! lll_islocked (mutex->__data.__lock))
return EPERM;
/* FALLTHROUGH */
@@ -61,7 +61,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
--mutex->__data.__nusers;
/* Unlock. */
- lll_mutex_unlock (mutex->__data.__lock);
+ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
break;
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
@@ -92,7 +92,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
if ((mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_mutex_islocked (mutex->__data.__lock))
+ || ! lll_islocked (mutex->__data.__lock))
return EPERM;
/* If the previous owner died and the caller did not succeed in
@@ -115,7 +115,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
--mutex->__data.__nusers;
/* Unlock. */
- lll_robust_mutex_unlock (mutex->__data.__lock);
+ lll_robust_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
break;
@@ -161,7 +161,7 @@ __pthread_mutex_unlock_usercnt (mutex, d
case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
if ((mutex->__data.__lock & FUTEX_TID_MASK)
!= THREAD_GETMEM (THREAD_SELF, tid)
- || ! lll_mutex_islocked (mutex->__data.__lock))
+ || ! lll_islocked (mutex->__data.__lock))
return EPERM;
/* If the previous owner died and the caller did not succeed in
--- libc/nptl/old_pthread_cond_signal.c.jj 2003-03-21 09:02:07.000000000 +0100
+++ libc/nptl/old_pthread_cond_signal.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -33,7 +33,7 @@ __pthread_cond_signal_2_0 (cond)
{
pthread_cond_t *newcond;
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
if (newcond == NULL)
return ENOMEM;
--- libc/nptl/old_pthread_cond_timedwait.c.jj 2003-03-21 09:02:07.000000000 +0100
+++ libc/nptl/old_pthread_cond_timedwait.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -35,7 +35,7 @@ __pthread_cond_timedwait_2_0 (cond, mute
{
pthread_cond_t *newcond;
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
if (newcond == NULL)
return ENOMEM;
--- libc/nptl/descr.h.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/descr.h 2007-07-29 11:48:55.000000000 +0200
@@ -309,10 +309,10 @@ struct pthread
int parent_cancelhandling;
/* Lock to synchronize access to the descriptor. */
- lll_lock_t lock;
+ int lock;
/* Lock for synchronizing setxid calls. */
- lll_lock_t setxid_futex;
+ int setxid_futex;
#if HP_TIMING_AVAIL
/* Offset of the CPU clock at start thread start time. */
--- libc/nptl/allocatestack.c.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/allocatestack.c 2007-07-29 11:48:55.000000000 +0200
@@ -103,7 +103,7 @@ static size_t stack_cache_maxsize = 40 *
static size_t stack_cache_actsize;
/* Mutex protecting this variable. */
-static lll_lock_t stack_cache_lock = LLL_LOCK_INITIALIZER;
+static int stack_cache_lock = LLL_LOCK_INITIALIZER;
/* List of queued stack frames. */
static LIST_HEAD (stack_cache);
@@ -139,7 +139,7 @@ get_cached_stack (size_t *sizep, void **
struct pthread *result = NULL;
list_t *entry;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Search the cache for a matching entry. We search for the
smallest stack which has at least the required size. Note that
@@ -172,7 +172,7 @@ get_cached_stack (size_t *sizep, void **
|| __builtin_expect (result->stackblock_size > 4 * size, 0))
{
/* Release the lock. */
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return NULL;
}
@@ -187,7 +187,7 @@ get_cached_stack (size_t *sizep, void **
stack_cache_actsize -= result->stackblock_size;
/* Release the lock early. */
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
/* Report size and location of the stack to the caller. */
*sizep = result->stackblock_size;
@@ -400,12 +400,12 @@ allocate_stack (const struct pthread_att
/* Prepare to modify global data. */
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* And add to the list of stacks in use. */
list_add (&pd->list, &__stack_user);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
}
else
{
@@ -544,12 +544,12 @@ allocate_stack (const struct pthread_att
/* Prepare to modify global data. */
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* And add to the list of stacks in use. */
list_add (&pd->list, &stack_used);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
/* There might have been a race. Another thread might have
@@ -598,12 +598,12 @@ allocate_stack (const struct pthread_att
mprot_error:
err = errno;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Remove the thread from the list. */
list_del (&pd->list);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
/* Get rid of the TLS block we allocated. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
@@ -699,7 +699,7 @@ void
internal_function
__deallocate_stack (struct pthread *pd)
{
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Remove the thread from the list of threads with user defined
stacks. */
@@ -715,7 +715,7 @@ __deallocate_stack (struct pthread *pd)
/* Free the memory associated with the ELF TLS. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
}
@@ -732,7 +732,7 @@ __make_stacks_executable (void **stack_e
const size_t pagemask = ~(__getpagesize () - 1);
#endif
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
list_t *runp;
list_for_each (runp, &stack_used)
@@ -761,7 +761,7 @@ __make_stacks_executable (void **stack_e
break;
}
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return err;
}
@@ -837,7 +837,7 @@ __find_thread_by_id (pid_t tid)
{
struct pthread *result = NULL;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Iterate over the list with system-allocated threads first. */
list_t *runp;
@@ -869,7 +869,7 @@ __find_thread_by_id (pid_t tid)
}
out:
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return result;
}
@@ -920,7 +920,7 @@ attribute_hidden
__nptl_setxid (struct xid_command *cmdp)
{
int result;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
__xidcmd = cmdp;
cmdp->cntr = 0;
@@ -966,7 +966,7 @@ __nptl_setxid (struct xid_command *cmdp)
result = -1;
}
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return result;
}
@@ -995,7 +995,7 @@ void
attribute_hidden
__pthread_init_static_tls (struct link_map *map)
{
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
/* Iterate over the list with system-allocated threads first. */
list_t *runp;
@@ -1006,7 +1006,7 @@ __pthread_init_static_tls (struct link_m
list_for_each (runp, &__stack_user)
init_one_static_tls (list_entry (runp, struct pthread, list), map);
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
}
@@ -1014,7 +1014,7 @@ void
attribute_hidden
__wait_lookup_done (void)
{
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
struct pthread *self = THREAD_SELF;
@@ -1063,5 +1063,5 @@ __wait_lookup_done (void)
while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
}
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
}
--- libc/nptl/pthread_rwlock_tryrdlock.c.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/pthread_rwlock_tryrdlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -28,7 +28,7 @@ __pthread_rwlock_tryrdlock (rwlock)
{
int result = EBUSY;
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
if (rwlock->__data.__writer == 0
&& (rwlock->__data.__nr_writers_queued == 0
@@ -43,7 +43,7 @@ __pthread_rwlock_tryrdlock (rwlock)
result = 0;
}
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/pthread_rwlock_trywrlock.c.jj 2007-01-03 11:04:36.000000000 +0100
+++ libc/nptl/pthread_rwlock_trywrlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -28,7 +28,7 @@ __pthread_rwlock_trywrlock (rwlock)
{
int result = EBUSY;
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
{
@@ -36,7 +36,7 @@ __pthread_rwlock_trywrlock (rwlock)
result = 0;
}
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/pthread_getschedparam.c.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/pthread_getschedparam.c 2007-07-29 11:48:55.000000000 +0200
@@ -38,7 +38,7 @@ __pthread_getschedparam (threadid, polic
int result = 0;
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* The library is responsible for maintaining the values at all
times. If the user uses a interface other than
@@ -68,7 +68,7 @@ __pthread_getschedparam (threadid, polic
memcpy (param, &pd->schedparam, sizeof (struct sched_param));
}
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/pthread_barrier_init.c.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/pthread_barrier_init.c 2007-07-29 11:48:55.000000000 +0200
@@ -40,7 +40,7 @@ pthread_barrier_init (barrier, attr, cou
if (__builtin_expect (count == 0, 0))
return EINVAL;
- struct pthread_barrierattr *iattr
+ const struct pthread_barrierattr *iattr
= (attr != NULL
? iattr = (struct pthread_barrierattr *) attr
: &default_attr);
--- libc/nptl/old_pthread_cond_wait.c.jj 2003-03-21 09:02:07.000000000 +0100
+++ libc/nptl/old_pthread_cond_wait.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -34,7 +34,7 @@ __pthread_cond_wait_2_0 (cond, mutex)
{
pthread_cond_t *newcond;
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
if (newcond == NULL)
return ENOMEM;
--- libc/nptl/pthread_cond_destroy.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_cond_destroy.c 2007-07-29 11:48:55.000000000 +0200
@@ -27,13 +27,13 @@ __pthread_cond_destroy (cond)
pthread_cond_t *cond;
{
/* Make sure we are alone. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
{
/* If there are still some waiters which have not been
woken up, this is an application bug. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return EBUSY;
}
@@ -66,13 +66,13 @@ __pthread_cond_destroy (cond)
do
{
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
lll_futex_wait (&cond->__data.__nwaiters, nwaiters,
// XYZ check mutex flag
LLL_SHARED);
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
nwaiters = cond->__data.__nwaiters;
}
--- libc/nptl/pthread_rwlock_rdlock.c.jj 2007-07-24 10:50:54.000000000 +0200
+++ libc/nptl/pthread_rwlock_rdlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -32,7 +32,7 @@ __pthread_rwlock_rdlock (rwlock)
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -74,21 +74,20 @@ __pthread_rwlock_rdlock (rwlock)
int waitval = rwlock->__data.__readers_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer to finish. */
- lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
+ rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
--rwlock->__data.__nr_readers_queued;
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/pthread_create.c.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/pthread_create.c 2007-07-29 11:48:55.000000000 +0200
@@ -63,7 +63,7 @@ __find_in_stack_list (pd)
list_t *entry;
struct pthread *result = NULL;
- lll_lock (stack_cache_lock);
+ lll_lock (stack_cache_lock, LLL_PRIVATE);
list_for_each (entry, &stack_used)
{
@@ -90,7 +90,7 @@ __find_in_stack_list (pd)
}
}
- lll_unlock (stack_cache_lock);
+ lll_unlock (stack_cache_lock, LLL_PRIVATE);
return result;
}
@@ -284,9 +284,9 @@ start_thread (void *arg)
int oldtype = CANCEL_ASYNC ();
/* Get the lock the parent locked to force synchronization. */
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* And give it up right away. */
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
CANCEL_RESET (oldtype);
}
@@ -370,7 +370,7 @@ start_thread (void *arg)
# endif
this->__list.__next = NULL;
- lll_robust_mutex_dead (this->__lock);
+ lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
}
while (robust != (void *) &pd->robust_head);
}
--- libc/nptl/pthread_rwlock_wrlock.c.jj 2007-07-24 10:50:54.000000000 +0200
+++ libc/nptl/pthread_rwlock_wrlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -32,7 +32,7 @@ __pthread_rwlock_wrlock (rwlock)
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -65,22 +65,21 @@ __pthread_rwlock_wrlock (rwlock)
int waitval = rwlock->__data.__writer_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer or reader(s) to finish. */
lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval,
- // XYZ check mutex flag
- LLL_SHARED);
+ rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
/* To start over again, remove the thread from the writer list. */
--rwlock->__data.__nr_writers_queued;
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/pthread_rwlock_unlock.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_rwlock_unlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -27,7 +27,7 @@
int
__pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
if (rwlock->__data.__writer)
rwlock->__data.__writer = 0;
else
@@ -37,23 +37,21 @@ __pthread_rwlock_unlock (pthread_rwlock_
if (rwlock->__data.__nr_writers_queued)
{
++rwlock->__data.__writer_wakeup;
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
lll_futex_wake (&rwlock->__data.__writer_wakeup, 1,
- // XYZ check mutex flag
- LLL_SHARED);
+ rwlock->__data.__shared);
return 0;
}
else if (rwlock->__data.__nr_readers_queued)
{
++rwlock->__data.__readers_wakeup;
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
- // XYZ check mutex flag
- LLL_SHARED);
+ rwlock->__data.__shared);
return 0;
}
}
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return 0;
}
--- libc/nptl/pthread_rwlock_timedwrlock.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_rwlock_timedwrlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -33,7 +33,7 @@ pthread_rwlock_timedwrlock (rwlock, abst
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -100,16 +100,14 @@ pthread_rwlock_timedwrlock (rwlock, abst
int waitval = rwlock->__data.__writer_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer or reader(s) to finish. */
err = lll_futex_timed_wait (&rwlock->__data.__writer_wakeup,
- waitval, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ waitval, &rt, rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
/* To start over again, remove the thread from the writer list. */
--rwlock->__data.__nr_writers_queued;
@@ -123,7 +121,7 @@ pthread_rwlock_timedwrlock (rwlock, abst
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/old_pthread_cond_broadcast.c.jj 2003-03-21 09:02:07.000000000 +0100
+++ libc/nptl/old_pthread_cond_broadcast.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -33,7 +33,7 @@ __pthread_cond_broadcast_2_0 (cond)
{
pthread_cond_t *newcond;
-#if LLL_MUTEX_LOCK_INITIALIZER == 0
+#if LLL_LOCK_INITIALIZER == 0
newcond = (pthread_cond_t *) calloc (sizeof (pthread_cond_t), 1);
if (newcond == NULL)
return ENOMEM;
--- libc/nptl/pthread_cond_wait.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_cond_wait.c 2007-07-29 11:48:55.000000000 +0200
@@ -45,7 +45,7 @@ __condvar_cleanup (void *arg)
unsigned int destroying;
/* We are going to modify shared data. */
- lll_mutex_lock (cbuffer->cond->__data.__lock);
+ lll_lock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq)
{
@@ -78,7 +78,7 @@ __condvar_cleanup (void *arg)
}
/* We are done. */
- lll_mutex_unlock (cbuffer->cond->__data.__lock);
+ lll_unlock (cbuffer->cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Wake everybody to make sure no condvar signal gets lost. */
if (! destroying)
@@ -102,13 +102,13 @@ __pthread_cond_wait (cond, mutex)
int err;
/* Make sure we are along. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Now we can release the mutex. */
err = __pthread_mutex_unlock_usercnt (mutex, 0);
if (__builtin_expect (err, 0))
{
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return err;
}
@@ -144,7 +144,7 @@ __pthread_cond_wait (cond, mutex)
unsigned int futex_val = cond->__data.__futex;
/* Prepare to wait. Release the condvar futex. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Enable asynchronous cancellation. Required by the standard. */
cbuffer.oldtype = __pthread_enable_asynccancel ();
@@ -158,7 +158,7 @@ __pthread_cond_wait (cond, mutex)
__pthread_disable_asynccancel (cbuffer.oldtype);
/* We are going to look at shared data again, so get the lock. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* If a broadcast happened, we are done. */
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -186,7 +186,7 @@ __pthread_cond_wait (cond, mutex)
LLL_SHARED);
/* We are done with the condvar. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* The cancellation handling is back to normal, remove the handler. */
__pthread_cleanup_pop (&buffer, 0);
--- libc/nptl/pthread_cond_init.c.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/pthread_cond_init.c 2007-07-29 11:48:55.000000000 +0200
@@ -28,7 +28,7 @@ __pthread_cond_init (cond, cond_attr)
{
struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr;
- cond->__data.__lock = LLL_MUTEX_LOCK_INITIALIZER;
+ cond->__data.__lock = LLL_LOCK_INITIALIZER;
cond->__data.__futex = 0;
cond->__data.__nwaiters = (icond_attr != NULL
&& ((icond_attr->value
--- libc/nptl/pthread_attr_init.c.jj 2004-03-19 00:56:31.000000000 +0100
+++ libc/nptl/pthread_attr_init.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -27,7 +27,7 @@
struct pthread_attr *__attr_list;
-lll_lock_t __attr_list_lock = LLL_LOCK_INITIALIZER;
+int __attr_list_lock = LLL_LOCK_INITIALIZER;
int
--- libc/nptl/pthread_setschedparam.c.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/pthread_setschedparam.c 2007-07-29 11:48:55.000000000 +0200
@@ -39,7 +39,7 @@ __pthread_setschedparam (threadid, polic
int result = 0;
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
struct sched_param p;
const struct sched_param *orig_param = param;
@@ -67,7 +67,7 @@ __pthread_setschedparam (threadid, polic
pd->flags |= ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET;
}
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/pthread_cond_broadcast.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_cond_broadcast.c 2007-07-29 11:48:55.000000000 +0200
@@ -33,7 +33,7 @@ __pthread_cond_broadcast (cond)
pthread_cond_t *cond;
{
/* Make sure we are alone. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Are there any waiters to be woken? */
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -47,7 +47,7 @@ __pthread_cond_broadcast (cond)
++cond->__data.__broadcast_seq;
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Do not use requeue for pshared condvars. */
if (cond->__data.__mutex == (void *) ~0l)
@@ -79,7 +79,7 @@ __pthread_cond_broadcast (cond)
}
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return 0;
}
--- libc/nptl/pthread_barrier_destroy.c.jj 2002-11-26 23:49:50.000000000 +0100
+++ libc/nptl/pthread_barrier_destroy.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -31,14 +31,14 @@ pthread_barrier_destroy (barrier)
ibarrier = (struct pthread_barrier *) barrier;
- lll_lock (ibarrier->lock);
+ lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
if (__builtin_expect (ibarrier->left == ibarrier->init_count, 1))
/* The barrier is not used anymore. */
result = 0;
else
/* Still used, return with an error. */
- lll_unlock (ibarrier->lock);
+ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
return result;
}
--- libc/nptl/sem_close.c.jj 2003-05-17 22:49:02.000000000 +0200
+++ libc/nptl/sem_close.c 2007-07-29 11:48:55.000000000 +0200
@@ -47,7 +47,7 @@ sem_close (sem)
int result = 0;
/* Get the lock. */
- lll_lock (__sem_mappings_lock);
+ lll_lock (__sem_mappings_lock, LLL_PRIVATE);
/* Locate the entry for the mapping the caller provided. */
rec = NULL;
@@ -75,7 +75,7 @@ sem_close (sem)
}
/* Release the lock. */
- lll_unlock (__sem_mappings_lock);
+ lll_unlock (__sem_mappings_lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/semaphoreP.h.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/semaphoreP.h 2007-07-29 11:48:55.000000000 +0200
@@ -48,7 +48,7 @@ extern pthread_once_t __namedsem_once at
extern void *__sem_mappings attribute_hidden;
/* Lock to protect the search tree. */
-extern lll_lock_t __sem_mappings_lock attribute_hidden;
+extern int __sem_mappings_lock attribute_hidden;
/* Initializer for mountpoint. */
--- libc/nptl/pthread_once.c.jj 2006-10-28 07:09:12.000000000 +0200
+++ libc/nptl/pthread_once.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -22,7 +22,7 @@
-static lll_lock_t once_lock = LLL_LOCK_INITIALIZER;
+static int once_lock = LLL_LOCK_INITIALIZER;
int
@@ -35,7 +35,7 @@ __pthread_once (once_control, init_routi
object. */
if (*once_control == PTHREAD_ONCE_INIT)
{
- lll_lock (once_lock);
+ lll_lock (once_lock, LLL_PRIVATE);
/* XXX This implementation is not complete. It doesn't take
cancelation and fork into account. */
@@ -46,7 +46,7 @@ __pthread_once (once_control, init_routi
*once_control = !PTHREAD_ONCE_INIT;
}
- lll_unlock (once_lock);
+ lll_unlock (once_lock, LLL_PRIVATE);
}
return 0;
--- libc/nptl/pthread_rwlock_timedrdlock.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_rwlock_timedrdlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (rwlock, abst
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock(rwlock->__data.__lock);
+ lll_lock(rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -110,16 +110,14 @@ pthread_rwlock_timedrdlock (rwlock, abst
int waitval = rwlock->__data.__readers_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer to finish. */
err = lll_futex_timed_wait (&rwlock->__data.__readers_wakeup,
- waitval, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ waitval, &rt, rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
--rwlock->__data.__nr_readers_queued;
@@ -133,7 +131,7 @@ pthread_rwlock_timedrdlock (rwlock, abst
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
--- libc/nptl/pthreadP.h.jj 2007-06-29 10:19:56.000000000 +0200
+++ libc/nptl/pthreadP.h 2007-07-29 11:48:55.000000000 +0200
@@ -151,7 +151,7 @@ hidden_proto (__stack_user)
/* Attribute handling. */
extern struct pthread_attr *__attr_list attribute_hidden;
-extern lll_lock_t __attr_list_lock attribute_hidden;
+extern int __attr_list_lock attribute_hidden;
/* First available RT signal. */
extern int __current_sigrtmin attribute_hidden;
--- libc/nptl/pthread_cond_timedwait.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_cond_timedwait.c 2007-07-29 11:48:55.000000000 +0200
@@ -54,13 +54,13 @@ __pthread_cond_timedwait (cond, mutex, a
return EINVAL;
/* Make sure we are along. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Now we can release the mutex. */
int err = __pthread_mutex_unlock_usercnt (mutex, 0);
if (err)
{
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return err;
}
@@ -146,7 +146,7 @@ __pthread_cond_timedwait (cond, mutex, a
unsigned int futex_val = cond->__data.__futex;
/* Prepare to wait. Release the condvar futex. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Enable asynchronous cancellation. Required by the standard. */
cbuffer.oldtype = __pthread_enable_asynccancel ();
@@ -161,7 +161,7 @@ __pthread_cond_timedwait (cond, mutex, a
__pthread_disable_asynccancel (cbuffer.oldtype);
/* We are going to look at shared data again, so get the lock. */
- lll_mutex_lock(cond->__data.__lock);
+ lll_lock(cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* If a broadcast happened, we are done. */
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -203,7 +203,7 @@ __pthread_cond_timedwait (cond, mutex, a
LLL_SHARED);
/* We are done with the condvar. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* The cancellation handling is back to normal, remove the handler. */
__pthread_cleanup_pop (&buffer, 0);
--- libc/nptl/pthread_setschedprio.c.jj 2007-06-04 08:42:05.000000000 +0200
+++ libc/nptl/pthread_setschedprio.c 2007-07-29 11:48:55.000000000 +0200
@@ -41,7 +41,7 @@ pthread_setschedprio (threadid, prio)
struct sched_param param;
param.sched_priority = prio;
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* If the thread should have higher priority because of some
PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority. */
@@ -60,7 +60,7 @@ pthread_setschedprio (threadid, prio)
pd->flags |= ATTR_FLAG_SCHED_SET;
}
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return result;
}
--- libc/nptl/pthread_cond_signal.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_cond_signal.c 2007-07-29 11:48:55.000000000 +0200
@@ -33,7 +33,7 @@ __pthread_cond_signal (cond)
pthread_cond_t *cond;
{
/* Make sure we are alone. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
/* Are there any waiters to be woken? */
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -56,7 +56,7 @@ __pthread_cond_signal (cond)
}
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, /* XYZ */ LLL_SHARED);
return 0;
}
--- libc/nptl/pthread_barrier_wait.c.jj 2007-06-08 09:13:50.000000000 +0200
+++ libc/nptl/pthread_barrier_wait.c 2007-07-29 11:48:55.000000000 +0200
@@ -32,7 +32,7 @@ pthread_barrier_wait (barrier)
int result = 0;
/* Make sure we are alone. */
- lll_lock (ibarrier->lock);
+ lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* One more arrival. */
--ibarrier->left;
@@ -46,8 +46,7 @@ pthread_barrier_wait (barrier)
/* Wake up everybody. */
lll_futex_wake (&ibarrier->curr_event, INT_MAX,
- // XYZ check mutex flag
- LLL_SHARED);
+ ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* This is the thread which finished the serialization. */
result = PTHREAD_BARRIER_SERIAL_THREAD;
@@ -59,13 +58,12 @@ pthread_barrier_wait (barrier)
unsigned int event = ibarrier->curr_event;
/* Before suspending, make the barrier available to others. */
- lll_unlock (ibarrier->lock);
+ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* Wait for the event counter of the barrier to change. */
do
lll_futex_wait (&ibarrier->curr_event, event,
- // XYZ check mutex flag
- LLL_SHARED);
+ ibarrier->private ^ FUTEX_PRIVATE_FLAG);
while (event == ibarrier->curr_event);
}
@@ -75,7 +73,7 @@ pthread_barrier_wait (barrier)
/* If this was the last woken thread, unlock. */
if (atomic_increment_val (&ibarrier->left) == init_count)
/* We are done. */
- lll_unlock (ibarrier->lock);
+ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
return result;
}
--- libc/nptl/pthread_mutex_lock.c.jj 2007-06-29 10:19:56.000000000 +0200
+++ libc/nptl/pthread_mutex_lock.c 2007-07-29 11:48:55.000000000 +0200
@@ -27,9 +27,9 @@
#ifndef LLL_MUTEX_LOCK
-# define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
-# define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
-# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
+# define LLL_MUTEX_LOCK(mutex) lll_lock (mutex, /* XYZ */ LLL_SHARED)
+# define LLL_MUTEX_TRYLOCK(mutex) lll_trylock (mutex)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_lock (mutex, id, /* XYZ */ LLL_SHARED)
#endif
@@ -198,7 +198,7 @@ __pthread_mutex_lock (mutex)
{
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
- lll_mutex_unlock (mutex->__data.__lock);
+ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
--- libc/nptl/pthread_mutex_trylock.c.jj 2007-06-29 10:19:56.000000000 +0200
+++ libc/nptl/pthread_mutex_trylock.c 2007-07-29 11:48:55.000000000 +0200
@@ -48,7 +48,7 @@ __pthread_mutex_trylock (mutex)
return 0;
}
- if (lll_mutex_trylock (mutex->__data.__lock) == 0)
+ if (lll_trylock (mutex->__data.__lock) == 0)
{
/* Record the ownership. */
mutex->__data.__owner = id;
@@ -62,7 +62,7 @@ __pthread_mutex_trylock (mutex)
case PTHREAD_MUTEX_TIMED_NP:
case PTHREAD_MUTEX_ADAPTIVE_NP:
/* Normal mutex. */
- if (lll_mutex_trylock (mutex->__data.__lock) != 0)
+ if (lll_trylock (mutex->__data.__lock) != 0)
break;
/* Record the ownership. */
@@ -140,7 +140,7 @@ __pthread_mutex_trylock (mutex)
}
}
- oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
+ oldval = lll_robust_trylock (mutex->__data.__lock, id);
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
@@ -154,7 +154,7 @@ __pthread_mutex_trylock (mutex)
/* This mutex is now not recoverable. */
mutex->__data.__count = 0;
if (oldval == id)
- lll_mutex_unlock (mutex->__data.__lock);
+ lll_unlock (mutex->__data.__lock, /* XYZ */ LLL_SHARED);
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
return ENOTRECOVERABLE;
}
--- libc/nptl/pthread_getattr_np.c.jj 2007-06-29 10:19:56.000000000 +0200
+++ libc/nptl/pthread_getattr_np.c 2007-07-29 11:48:55.000000000 +0200
@@ -39,7 +39,7 @@ pthread_getattr_np (thread_id, attr)
struct pthread_attr *iattr = (struct pthread_attr *) attr;
int ret = 0;
- lll_lock (thread->lock);
+ lll_lock (thread->lock, LLL_PRIVATE);
/* The thread library is responsible for keeping the values in the
thread desriptor up-to-date in case the user changes them. */
@@ -173,7 +173,7 @@ pthread_getattr_np (thread_id, attr)
}
}
- lll_unlock (thread->lock);
+ lll_unlock (thread->lock, LLL_PRIVATE);
return ret;
}
--- libc/nptl/sysdeps/pthread/createthread.c.jj 2006-09-05 19:13:14.000000000 +0200
+++ libc/nptl/sysdeps/pthread/createthread.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -60,7 +60,7 @@ do_clone (struct pthread *pd, const stru
/* We Make sure the thread does not run far by forcing it to get a
lock. We lock it here too so that the new thread cannot continue
until we tell it to. */
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* One more thread. We cannot have the thread do this itself, since it
might exist but not have been scheduled yet by the time we've returned
@@ -223,7 +223,7 @@ create_thread (struct pthread *pd, const
__nptl_create_event ();
/* And finally restart the new thread. */
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
}
return res;
@@ -250,7 +250,7 @@ create_thread (struct pthread *pd, const
if (res == 0 && stopped)
/* And finally restart the new thread. */
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return res;
}
--- libc/nptl/sysdeps/pthread/bits/stdio-lock.h.jj 2007-07-19 19:46:48.000000000 +0200
+++ libc/nptl/sysdeps/pthread/bits/stdio-lock.h 2007-07-31 12:40:13.000000000 +0200
@@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void
void *__self = THREAD_SELF; \
if ((_name).owner != __self) \
{ \
- lll_lock ((_name).lock); \
+ lll_lock ((_name).lock, LLL_PRIVATE); \
(_name).owner = __self; \
} \
++(_name).cnt; \
@@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void
if (--(_name).cnt == 0) \
{ \
(_name).owner = NULL; \
- lll_unlock ((_name).lock); \
+ lll_unlock ((_name).lock, LLL_PRIVATE); \
} \
} while (0)
--- libc/nptl/sysdeps/pthread/bits/libc-lock.h.jj 2007-03-21 21:22:17.000000000 +0100
+++ libc/nptl/sysdeps/pthread/bits/libc-lock.h 2007-07-29 11:48:55.000000000 +0200
@@ -228,7 +228,7 @@ typedef pthread_key_t __libc_key_t;
/* Lock the named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# define __libc_lock_lock(NAME) \
- ({ lll_lock (NAME); 0; })
+ ({ lll_lock (NAME, LLL_PRIVATE); 0; })
#else
# define __libc_lock_lock(NAME) \
__libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
@@ -245,7 +245,7 @@ typedef pthread_key_t __libc_key_t;
void *self = THREAD_SELF; \
if ((NAME).owner != self) \
{ \
- lll_lock ((NAME).lock); \
+ lll_lock ((NAME).lock, LLL_PRIVATE); \
(NAME).owner = self; \
} \
++(NAME).cnt; \
@@ -299,7 +299,7 @@ typedef pthread_key_t __libc_key_t;
/* Unlock the named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# define __libc_lock_unlock(NAME) \
- lll_unlock (NAME)
+ lll_unlock (NAME, LLL_PRIVATE)
#else
# define __libc_lock_unlock(NAME) \
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
@@ -315,7 +315,7 @@ typedef pthread_key_t __libc_key_t;
if (--(NAME).cnt == 0) \
{ \
(NAME).owner = NULL; \
- lll_unlock ((NAME).lock); \
+ lll_unlock ((NAME).lock, LLL_PRIVATE); \
} \
} while (0)
#else
--- libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h 2007-07-30 23:01:58.000000000 +0200
@@ -70,9 +70,6 @@
#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
#define lll_futex_wait(futexp, val, private) \
lll_futex_timed_wait (futexp, val, NULL, private)
@@ -96,7 +93,7 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
})
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv) \
do \
{ \
int *__futexp = &(futexv); \
@@ -132,149 +129,130 @@
static inline int __attribute__((always_inline))
-__lll_mutex_trylock(int *futex)
+__lll_trylock(int *futex)
{
return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
}
-#define lll_mutex_trylock(lock) __lll_mutex_trylock (&(lock))
+#define lll_trylock(lock) __lll_trylock (&(lock))
static inline int __attribute__((always_inline))
-__lll_mutex_cond_trylock(int *futex)
+__lll_cond_trylock(int *futex)
{
return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
}
-#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
static inline int __attribute__((always_inline))
-__lll_robust_mutex_trylock(int *futex, int id)
+__lll_robust_trylock(int *futex, int id)
{
return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
}
-#define lll_robust_mutex_trylock(lock, id) \
- __lll_robust_mutex_trylock (&(lock), id)
+#define lll_robust_trylock(lock, id) \
+ __lll_robust_trylock (&(lock), id)
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
static inline void __attribute__((always_inline))
-__lll_mutex_lock(int *futex)
+__lll_lock(int *futex, int private)
{
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
- __lll_lock_wait (futex);
+ {
+ if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+ __lll_lock_wait_private (futex);
+ else
+ __lll_lock_wait (futex, private);
+ }
}
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
static inline int __attribute__ ((always_inline))
-__lll_robust_mutex_lock (int *futex, int id)
+__lll_robust_lock (int *futex, int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_lock_wait (futex);
+ result = __lll_robust_lock_wait (futex, private);
return result;
}
-#define lll_robust_mutex_lock(futex, id) \
- __lll_robust_mutex_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), id, private)
static inline void __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
{
if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
- __lll_lock_wait (futex);
+ __lll_lock_wait (futex, private);
}
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
-#define lll_robust_mutex_cond_lock(futex, id) \
- __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+#define lll_robust_cond_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
static inline int __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
- result = __lll_timedlock_wait (futex, abstime);
+ result = __lll_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_mutex_timedlock(futex, abstime) \
- __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+ __lll_timedlock (&(futex), abstime, private)
static inline int __attribute__ ((always_inline))
-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
- int id)
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+ int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
result = __lll_robust_timedlock_wait (futex, abstime);
return result;
}
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
- __lll_robust_mutex_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+ __lll_robust_timedlock (&(futex), abstime, id, private)
static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
+__lll_unlock (int *futex, int private)
{
int val = atomic_exchange_rel (futex, 0);
if (__builtin_expect (val > 1, 0))
- lll_futex_wake (futex, 1, LLL_SHARED);
+ lll_futex_wake (futex, 1, private);
}
-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
static inline void __attribute__ ((always_inline))
-__lll_robust_mutex_unlock (int *futex, int mask)
+__lll_robust_unlock (int *futex, int private)
{
int val = atomic_exchange_rel (futex, 0);
- if (__builtin_expect (val & mask, 0))
- lll_futex_wake (futex, 1, LLL_SHARED);
+ if (__builtin_expect (val & FUTEX_WAITERS, 0))
+ lll_futex_wake (futex, 1, private);
}
-#define lll_robust_mutex_unlock(futex) \
- __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
-
+#define lll_robust_unlock(futex, private) \
+ __lll_robust_unlock(&(futex), private)
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
-{
- (void) atomic_exchange_rel (futex, 0);
- lll_futex_wake (futex, 1, LLL_SHARED);
-}
-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
-
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
(futex != 0)
-
-/* Our internal lock implementation is identical to the binary-compatible
- mutex implementation. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- >1 - taken by more users */
-
-#define lll_trylock(lock) lll_mutex_trylock (lock)
-#define lll_lock(lock) lll_mutex_lock (lock)
-#define lll_unlock(lock) lll_mutex_unlock (lock)
-#define lll_islocked(lock) lll_mutex_islocked (lock)
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
@@ -298,26 +276,4 @@ extern int __lll_timedwait_tid (int *, c
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h 2007-07-30 21:50:40.000000000 +0200
@@ -73,9 +73,6 @@
/* Delay in spinlock loop. */
#define BUSY_WAIT_NOP asm ("hint @pause")
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
#define lll_futex_wait(futex, val, private) \
lll_futex_timed_wait (futex, val, NULL, private)
@@ -95,12 +92,13 @@
_r10 == -1 ? -_retval : _retval; \
})
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
do \
{ \
int *__futexp = &(futexv); \
atomic_or (__futexp, FUTEX_OWNER_DIED); \
- DO_INLINE_SYSCALL(futex, 3, (long) __futexp, FUTEX_WAKE, 1); \
+ DO_INLINE_SYSCALL(futex, 3, (long) __futexp, \
+ __lll_private_flag (FUTEX_WAKE, private), 1); \
} \
while (0)
@@ -123,156 +121,144 @@ while (0)
})
-#define __lll_mutex_trylock(futex) \
+#define __lll_trylock(futex) \
(atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0)
-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define lll_trylock(futex) __lll_trylock (&(futex))
-#define __lll_robust_mutex_trylock(futex, id) \
+#define __lll_robust_trylock(futex, id) \
(atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
-#define lll_robust_mutex_trylock(futex, id) \
- __lll_robust_mutex_trylock (&(futex), id)
+#define lll_robust_trylock(futex, id) \
+ __lll_robust_trylock (&(futex), id)
-#define __lll_mutex_cond_trylock(futex) \
+#define __lll_cond_trylock(futex) \
(atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0)
-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
-
-
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
-#define __lll_mutex_lock(futex) \
- ((void) ({ \
- int *__futex = (futex); \
- if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0) \
- __lll_lock_wait (__futex); \
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
+
+
+#define __lll_lock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
+ 1, 0), 0)) \
+ { \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __lll_lock_wait_private (__futex); \
+ else \
+ __lll_lock_wait (__futex, private); \
}))
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
-#define __lll_robust_mutex_lock(futex, id) \
- ({ \
- int *__futex = (futex); \
- int __val = 0; \
- \
- if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
- __val = __lll_robust_lock_wait (__futex); \
- __val; \
+#define __lll_robust_lock(futex, id, private) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex, private); \
+ __val; \
})
-#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), id, private)
-#define __lll_mutex_cond_lock(futex) \
- ((void) ({ \
- int *__futex = (futex); \
- if (atomic_compare_and_exchange_bool_acq (__futex, 2, 0) != 0) \
- __lll_lock_wait (__futex); \
+#define __lll_cond_lock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, 2, \
+ 0), 0)) \
+ __lll_lock_wait (__futex, private); \
}))
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
-#define __lll_robust_mutex_cond_lock(futex, id) \
- ({ \
- int *__futex = (futex); \
- int __val = 0; \
- int __id = (id) | FUTEX_WAITERS; \
- \
- if (atomic_compare_and_exchange_bool_acq (__futex, __id, 0) != 0) \
- __val = __lll_robust_lock_wait (__futex); \
- __val; \
+#define __lll_robust_cond_lock(futex, id, private) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ int __id = (id) | FUTEX_WAITERS; \
+ \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
+ __id, 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex, private); \
+ __val; \
})
-#define lll_robust_mutex_cond_lock(futex, id) \
- __lll_robust_mutex_cond_lock (&(futex), id)
-
-
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
+#define lll_robust_cond_lock(futex, id, private) \
+ __lll_robust_cond_lock (&(futex), id, private)
-#define __lll_mutex_timedlock(futex, abstime) \
- ({ \
- int *__futex = (futex); \
- int __val = 0; \
- \
- if (atomic_compare_and_exchange_bool_acq (__futex, 1, 0) != 0) \
- __val = __lll_timedlock_wait (__futex, abstime); \
- __val; \
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+
+
+#define __lll_timedlock(futex, abstime, private) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, 1, \
+ 0), 0)) \
+ __val = __lll_timedlock_wait (__futex, abstime, private); \
+ __val; \
})
-#define lll_mutex_timedlock(futex, abstime) \
- __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+ __lll_timedlock (&(futex), abstime, private)
-#define __lll_robust_mutex_timedlock(futex, abstime, id) \
- ({ \
- int *__futex = (futex); \
- int __val = 0; \
- \
- if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
- __val = __lll_robust_timedlock_wait (__futex, abstime); \
- __val; \
+#define __lll_robust_timedlock(futex, abstime, id, private) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime, private); \
+ __val; \
})
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
- __lll_robust_mutex_timedlock (&(futex), abstime, id)
-
-
-#define __lll_mutex_unlock(futex) \
- ((void) ({ \
- int *__futex = (futex); \
- int __val = atomic_exchange_rel (__futex, 0); \
- \
- if (__builtin_expect (__val > 1, 0)) \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
- }))
-#define lll_mutex_unlock(futex) \
- __lll_mutex_unlock(&(futex))
+#define lll_robust_timedlock(futex, abstime, id, private) \
+ __lll_robust_timedlock (&(futex), abstime, id, private)
-#define __lll_robust_mutex_unlock(futex) \
- ((void) ({ \
- int *__futex = (futex); \
- int __val = atomic_exchange_rel (__futex, 0); \
- \
- if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
+#define __lll_unlock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ \
+ if (__builtin_expect (__val > 1, 0)) \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_robust_mutex_unlock(futex) \
- __lll_robust_mutex_unlock(&(futex))
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
-#define __lll_mutex_unlock_force(futex) \
- ((void) ({ \
- int *__futex = (futex); \
- (void) atomic_exchange_rel (__futex, 0); \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
+#define __lll_robust_unlock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_mutex_unlock_force(futex) \
- __lll_mutex_unlock_force(&(futex))
+#define lll_robust_unlock(futex, private) \
+ __lll_robust_unlock(&(futex), private)
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
(futex != 0)
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. We can use the lll_mutex_*. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-#define lll_trylock(futex) lll_mutex_trylock (futex)
-#define lll_lock(futex) lll_mutex_lock (futex)
-#define lll_unlock(futex) lll_mutex_unlock (futex)
-#define lll_islocked(futex) lll_mutex_islocked (futex)
-
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -297,26 +283,4 @@ extern int __lll_timedwait_tid (int *, c
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c 2007-07-29 11:48:55.000000000 +0200
@@ -54,7 +54,7 @@ __unregister_atfork (dso_handle)
that there couldn't have been another thread deleting something.
The __unregister_atfork function is only called from the
dlclose() code which itself serializes the operations. */
- lll_lock (__fork_lock);
+ lll_lock (__fork_lock, LLL_PRIVATE);
/* We have to create a new list with all the entries we don't remove. */
struct deleted_handler
@@ -89,7 +89,7 @@ __unregister_atfork (dso_handle)
while (runp != NULL);
/* Release the lock. */
- lll_unlock (__fork_lock);
+ lll_unlock (__fork_lock, LLL_PRIVATE);
/* Walk the list of all entries which have to be deleted. */
while (deleted != NULL)
--- libc/nptl/sysdeps/unix/sysv/linux/fork.h.jj 2006-05-15 22:19:43.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/fork.h 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -26,7 +26,7 @@ extern unsigned long int __fork_generati
extern unsigned long int *__fork_generation_pointer attribute_hidden;
/* Lock to protect allocation and deallocation of fork handlers. */
-extern lll_lock_t __fork_lock attribute_hidden;
+extern int __fork_lock attribute_hidden;
/* Elements of the fork handler lists. */
struct fork_handler
--- libc/nptl/sysdeps/unix/sysv/linux/fork.c.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/fork.c 2007-07-29 11:48:55.000000000 +0200
@@ -183,7 +183,7 @@ __libc_fork (void)
}
/* Initialize the fork lock. */
- __fork_lock = (lll_lock_t) LLL_LOCK_INITIALIZER;
+ __fork_lock = LLL_LOCK_INITIALIZER;
}
else
{
--- libc/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S.jj 2007-05-24 16:41:25.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S 2007-07-31 12:20:52.000000000 +0200
@@ -20,19 +20,9 @@
#include <unwindbuf.h>
#include <sysdep.h>
#include <kernel-features.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
.comm __fork_generation, 4, 4
.text
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S.jj 2007-07-30 18:10:05.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S 2007-07-30 18:08:45.000000000 +0200
@@ -18,22 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_rwlock_timedrdlock
@@ -88,7 +77,7 @@ pthread_rwlock_timedrdlock:
/* Get current time. */
11: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -142,11 +131,11 @@ pthread_rwlock_timedrdlock:
cmpl $-ETIMEDOUT, %esi
jne 2b
-18: movl $ETIMEDOUT, %ecx
+18: movl $ETIMEDOUT, %edx
jmp 9f
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
addl $1, NR_READERS(%ebp)
je 8f
9: LOCK
@@ -157,7 +146,7 @@ pthread_rwlock_timedrdlock:
#endif
jne 6f
-7: movl %ecx, %eax
+7: movl %edx, %eax
addl $8, %esp
popl %ebp
@@ -168,16 +157,17 @@ pthread_rwlock_timedrdlock:
1:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -186,17 +176,18 @@ pthread_rwlock_timedrdlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
8: subl $1, NR_READERS(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
/* Overflow. */
4: subl $1, READERS_QUEUED(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -205,21 +196,23 @@ pthread_rwlock_timedrdlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 13b
16: movl $-ETIMEDOUT, %esi
jmp 17b
-19: movl $EINVAL, %ecx
+19: movl $EINVAL, %edx
jmp 9b
.size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S 2007-07-30 18:26:35.000000000 +0200
@@ -18,21 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_wrlock
@@ -106,7 +96,7 @@ __pthread_rwlock_wrlock:
13: subl $1, WRITERS_QUEUED(%ebx)
jmp 2b
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
movl %gs:TID, %eax
movl %eax, WRITER(%ebx)
9: LOCK
@@ -118,23 +108,24 @@ __pthread_rwlock_wrlock:
jne 6f
7:
- movl %ecx, %eax
+ movl %edx, %eax
popl %ebx
popl %esi
ret
1:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID , %eax
jne 3b
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -143,11 +134,12 @@ __pthread_rwlock_wrlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 7b
4: subl $1, WRITERS_QUEUED(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -156,16 +148,18 @@ __pthread_rwlock_wrlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 13b
.size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S.jj 2006-04-09 04:42:29.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S 2007-07-31 12:37:45.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -20,12 +20,7 @@
#include <sysdep.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
+#include <lowlevellock.h>
.text
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S.jj 2006-09-05 16:46:43.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S 2007-07-30 15:56:10.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,31 +19,36 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <lowlevellock.h>
#include <lowlevelrobustlock.h>
+#include <kernel-features.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
-# else
-# define LOCK lock
-# endif
-#endif
-
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
#define FUTEX_WAITERS 0x80000000
#define FUTEX_OWNER_DIED 0x40000000
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+#endif
- .globl __lll_robust_mutex_lock_wait
- .type __lll_robust_mutex_lock_wait,@function
- .hidden __lll_robust_mutex_lock_wait
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
.align 16
-__lll_robust_mutex_lock_wait:
+__lll_robust_lock_wait:
cfi_startproc
pushl %edx
cfi_adjust_cfa_offset(4)
@@ -55,9 +60,9 @@ __lll_robust_mutex_lock_wait:
cfi_offset(%ebx, -12)
cfi_offset(%esi, -16)
- movl %ecx, %ebx
+ movl %edx, %ebx
xorl %esi, %esi /* No timeout. */
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ LOAD_FUTEX_WAIT (%ecx)
4: movl %eax, %edx
orl $FUTEX_WAITERS, %edx
@@ -98,14 +103,14 @@ __lll_robust_mutex_lock_wait:
cfi_restore(%edx)
ret
cfi_endproc
- .size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
- .globl __lll_robust_mutex_timedlock_wait
- .type __lll_robust_mutex_timedlock_wait,@function
- .hidden __lll_robust_mutex_timedlock_wait
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
.align 16
-__lll_robust_mutex_timedlock_wait:
+__lll_robust_timedlock_wait:
cfi_startproc
/* Check for a valid timeout value. */
cmpl $1000000000, 4(%edx)
@@ -136,7 +141,7 @@ __lll_robust_mutex_timedlock_wait:
/* Get current time. */
movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -177,7 +182,8 @@ __lll_robust_mutex_timedlock_wait:
2:
/* Futex call. */
movl %esp, %esi
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl 20(%esp), %ecx
+ LOAD_FUTEX_WAIT (%ecx)
movl $SYS_futex, %eax
ENTER_KERNEL
movl %eax, %ecx
@@ -224,4 +230,4 @@ __lll_robust_mutex_timedlock_wait:
8: movl $ETIMEDOUT, %eax
jmp 6b
cfi_endproc
- .size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S 2007-07-30 17:42:49.000000000 +0200
@@ -18,21 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_rdlock
@@ -108,7 +98,7 @@ __pthread_rwlock_rdlock:
13: subl $1, READERS_QUEUED(%ebx)
jmp 2b
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
addl $1, NR_READERS(%ebx)
je 8f
9: LOCK
@@ -120,24 +110,25 @@ __pthread_rwlock_rdlock:
jne 6f
7:
- movl %ecx, %eax
+ movl %edx, %eax
popl %ebx
popl %esi
ret
1:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
/* Deadlock detected. */
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -146,17 +137,18 @@ __pthread_rwlock_rdlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
8: subl $1, NR_READERS(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
/* Overflow. */
4: subl $1, READERS_QUEUED(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -165,16 +157,18 @@ __pthread_rwlock_rdlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 13b
.size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S 2007-07-30 19:06:09.000000000 +0200
@@ -19,42 +19,53 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %gs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
-#endif
-
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#ifndef FUTEX_WAIT
-# define FUTEX_WAIT 0
-# define FUTEX_WAKE 1
-#endif
-
-#ifndef LOAD_FUTEX_WAIT
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
# if FUTEX_WAIT == 0
# define LOAD_FUTEX_WAIT(reg) \
- xorl reg, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg
# else
# define LOAD_FUTEX_WAIT(reg) \
- movl $FUTEX_WAIT, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
# define LOAD_FUTEX_WAKE(reg) \
- movl $FUTEX_WAKE, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
#endif
-
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
cfi_startproc
pushl %edx
cfi_adjust_cfa_offset(4)
@@ -69,7 +80,7 @@ __lll_mutex_lock_wait:
movl $2, %edx
movl %ecx, %ebx
xorl %esi, %esi /* No timeout. */
- LOAD_FUTEX_WAIT (%ecx)
+ LOAD_PRIVATE_FUTEX_WAIT (%ecx)
cmpl %edx, %eax /* NB: %edx == 2 */
jne 2f
@@ -94,15 +105,60 @@ __lll_mutex_lock_wait:
cfi_restore(%edx)
ret
cfi_endproc
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
.align 16
-__lll_mutex_timedlock_wait:
+__lll_lock_wait:
+ cfi_startproc
+ pushl %edx
+ cfi_adjust_cfa_offset(4)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edx, -8)
+ cfi_offset(%ebx, -12)
+ cfi_offset(%esi, -16)
+
+ movl %edx, %ebx
+ movl $2, %edx
+ xorl %esi, %esi /* No timeout. */
+ LOAD_FUTEX_WAIT (%ecx)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+2: movl %edx, %eax
+ xchgl %eax, (%ebx) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
+ ret
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
+ .align 16
+__lll_timedlock_wait:
cfi_startproc
/* Check for a valid timeout value. */
cmpl $1000000000, 4(%edx)
@@ -132,7 +188,7 @@ __lll_mutex_timedlock_wait:
/* Get current time. */
movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -165,6 +221,7 @@ __lll_mutex_timedlock_wait:
/* Futex call. */
movl %esp, %esi
+ movl 16(%esp), %ecx
LOAD_FUTEX_WAIT (%ecx)
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -215,15 +272,51 @@ __lll_mutex_timedlock_wait:
5: movl $ETIMEDOUT, %eax
jmp 6b
cfi_endproc
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
+ .align 16
+__lll_unlock_wake_private:
+ cfi_startproc
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ pushl %ecx
+ cfi_adjust_cfa_offset(4)
+ pushl %edx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%ecx, -12)
+ cfi_offset(%edx, -16)
+
+ movl %eax, %ebx
+ movl $0, (%eax)
+ LOAD_PRIVATE_FUTEX_WAKE (%ecx)
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+ popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
+ popl %ecx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ecx)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ ret
+ cfi_endproc
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
.align 16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake:
cfi_startproc
pushl %ebx
cfi_adjust_cfa_offset(4)
@@ -253,10 +346,8 @@ __lll_mutex_unlock_wake:
cfi_restore(%ebx)
ret
cfi_endproc
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+ .size __lll_unlock_wake,.-__lll_unlock_wake
-
-#ifdef NOT_IN_libc
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
@@ -274,7 +365,7 @@ __lll_timedwait_tid:
/* Get current time. */
2: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S.jj 2006-07-29 06:31:49.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S 2007-07-30 16:47:51.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,24 +19,11 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <kernel-features.h>
#include <pthread-pi-defines.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_CMP_REQUEUE 4
-
-#define EINVAL 22
-
+#include <pthread-errnos.h>
.text
@@ -141,21 +128,27 @@ __pthread_cond_broadcast:
/* Initial locking failed. */
1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 2b
/* Unlock in loop requires waekup. */
5: leal cond_lock-cond_futex(%ebx), %eax
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 6b
/* Unlock in loop requires waekup. */
7: leal cond_lock-cond_futex(%ebx), %eax
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 8b
9: /* The futex requeue functionality is not available. */
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S 2007-07-31 12:22:18.000000000 +0200
@@ -21,15 +21,7 @@
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex 240
-#define FUTEX_WAKE 1
+#include <lowlevellock.h>
.text
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S.jj 2007-07-30 18:19:01.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S 2007-07-30 18:15:07.000000000 +0200
@@ -18,20 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_unlock
@@ -115,11 +105,12 @@ __pthread_rwlock_unlock:
1:
#if MUTEX == 0
- movl %edi, %ecx
+ movl %edi, %edx
#else
- leal MUTEX(%edi), %ecx
+ leal MUTEX(%edi), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%edi), %ecx
+ call __lll_lock_wait
jmp 2b
3:
@@ -128,7 +119,8 @@ __pthread_rwlock_unlock:
#else
leal MUTEX(%edi), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%edi), %ecx
+ call __lll_unlock_wake
jmp 4b
7:
@@ -137,7 +129,8 @@ __pthread_rwlock_unlock:
#else
leal MUTEX(%edi), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%edi), %ecx
+ call __lll_unlock_wake
jmp 8b
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S 2007-07-30 14:23:04.000000000 +0200
@@ -17,19 +17,4 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-#include <kernel-features.h>
-
-/* All locks in libc are private. Use the kernel feature if possible. */
-#define FUTEX_PRIVATE_FLAG 128
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG)
-# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG)
-#else
-# define LOAD_FUTEX_WAIT(reg) \
- movl %gs:PRIVATE_FUTEX, reg
-# define LOAD_FUTEX_WAKE(reg) \
- movl %gs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAKE, reg
-#endif
-
#include "lowlevellock.S"
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S 2007-07-30 17:23:12.000000000 +0200
@@ -19,19 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <tcb-offsets.h>
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -202,11 +193,13 @@ __pthread_cond_wait:
1:
.LSbl1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 2b
/* Unlock in loop requires waekup. */
@@ -217,17 +210,21 @@ __pthread_cond_wait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
5:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 6b
/* Unlock after loop requires wakeup. */
@@ -237,7 +234,9 @@ __pthread_cond_wait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 11b
/* The initial unlocking of the mutex failed. */
@@ -257,7 +256,9 @@ __pthread_cond_wait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
movl %esi, %eax
jmp 14b
@@ -287,11 +288,13 @@ __condvar_w_cleanup:
jz 1f
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_lock_wait
1: movl broadcast_seq(%ebx), %eax
cmpl 12(%esp), %eax
@@ -348,7 +351,9 @@ __condvar_w_cleanup:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
/* Wake up all waiters to make sure no signal gets lost. */
2: testl %edi, %edi
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S.jj 2005-09-08 19:40:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S 2007-07-30 16:47:18.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,23 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_WAKE_OP 5
-
-#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
-
-#define EINVAL 22
+#include <pthread-errnos.h>
.text
@@ -119,17 +106,21 @@ __pthread_cond_signal:
/* Unlock in loop requires wakeup. */
5: movl %edi, %eax
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 6b
/* Initial locking failed. */
1:
#if cond_lock == 0
- movl %edi, %ecx
+ movl %edi, %edx
#else
- leal cond_lock(%edi), %ecx
+ leal cond_lock(%edi), %edx
#endif
- call __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 2b
.size __pthread_cond_signal, .-__pthread_cond_signal
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S 2007-07-31 12:23:11.000000000 +0200
@@ -21,16 +21,7 @@
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
+#include <lowlevellock.h>
#if VALUE != 0
@@ -82,7 +73,7 @@ sem_timedwait:
7: xorl %ecx, %ecx
movl %esp, %ebx
movl %ecx, %edx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S 2007-07-31 12:22:45.000000000 +0200
@@ -21,15 +21,7 @@
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex 240
-#define FUTEX_WAIT 0
+#include <lowlevellock.h>
#if VALUE != 0
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S 2007-07-30 16:48:57.000000000 +0200
@@ -18,19 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelbarrier.h>
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_barrier_wait
@@ -152,19 +142,27 @@ pthread_barrier_wait:
popl %ebx
ret
-1: leal MUTEX(%ebx), %ecx
- call __lll_mutex_lock_wait
+1: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
+ xorl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 2b
-4: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+4: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 5b
-6: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+6: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 7b
-9: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+9: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 10b
.size pthread_barrier_wait,.-pthread_barrier_wait
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S 2007-07-30 17:15:28.000000000 +0200
@@ -19,20 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <pthread-errnos.h>
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -127,7 +117,7 @@ __pthread_cond_timedwait:
/* Get the current time. */
leal 4(%esp), %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
movl %edx, %ebx
@@ -285,11 +275,13 @@ __pthread_cond_timedwait:
1:
.LSbl1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 2b
/* Unlock in loop requires wakeup. */
@@ -300,17 +292,21 @@ __pthread_cond_timedwait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
5:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 6b
/* Unlock after loop requires wakeup. */
@@ -320,7 +316,9 @@ __pthread_cond_timedwait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 11b
/* The initial unlocking of the mutex failed. */
@@ -340,7 +338,9 @@ __pthread_cond_timedwait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
movl %esi, %eax
jmp 18b
@@ -350,7 +350,7 @@ __pthread_cond_timedwait:
.LSbl4:
19: leal 4(%esp), %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
movl %edx, %ebx
@@ -396,11 +396,13 @@ __condvar_tw_cleanup:
jz 1f
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_lock_wait
1: movl broadcast_seq(%ebx), %eax
cmpl 20(%esp), %eax
@@ -457,7 +459,9 @@ __condvar_tw_cleanup:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
/* Wake up all waiters to make sure no signal gets lost. */
2: testl %edi, %edi
--- libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S.jj 2007-07-30 18:10:59.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S 2007-07-30 18:06:27.000000000 +0200
@@ -18,22 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_rwlock_timedwrlock
@@ -86,7 +75,7 @@ pthread_rwlock_timedwrlock:
/* Get current time. */
11: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -140,11 +129,11 @@ pthread_rwlock_timedwrlock:
cmpl $-ETIMEDOUT, %esi
jne 2b
-18: movl $ETIMEDOUT, %ecx
+18: movl $ETIMEDOUT, %edx
jmp 9f
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
movl %gs:TID, %eax
movl %eax, WRITER(%ebp)
9: LOCK
@@ -155,7 +144,7 @@ pthread_rwlock_timedwrlock:
#endif
jne 6f
-7: movl %ecx, %eax
+7: movl %edx, %eax
addl $8, %esp
popl %ebp
@@ -166,16 +155,17 @@ pthread_rwlock_timedwrlock:
1:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
-20: movl $EDEADLK, %ecx
+20: movl $EDEADLK, %edx
jmp 9b
6:
@@ -184,12 +174,13 @@ pthread_rwlock_timedwrlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
4: subl $1, WRITERS_QUEUED(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -198,21 +189,23 @@ pthread_rwlock_timedwrlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 13b
16: movl $-ETIMEDOUT, %esi
jmp 17b
-19: movl $EINVAL, %ecx
+19: movl $EINVAL, %edx
jmp 9b
.size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
--- libc/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h.jj 2007-07-29 12:06:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h 2007-07-30 19:02:17.000000000 +0200
@@ -20,28 +20,41 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <kernel-features.h>
-#include <tcb-offsets.h>
-
-#ifndef LOCK_INSTR
-# ifdef UP
-# define LOCK_INSTR /* nothing */
-# else
-# define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+# endif
+#else
+# ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
# endif
#endif
#define SYS_futex 240
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
/* Values for 'private' parameter of locking macros. Yes, the
definition seems to be backwards. But it is not. The bit will be
@@ -76,11 +89,12 @@
# endif
#endif
+#ifndef __ASSEMBLER__
/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
#ifdef PIC
@@ -102,7 +116,7 @@
#endif
/* Delay in spinlock loop. */
-#define BUSY_WAIT_NOP asm ("rep; nop")
+#define BUSY_WAIT_NOP asm ("rep; nop")
#define LLL_STUB_UNWIND_INFO_START \
@@ -217,332 +231,309 @@ LLL_STUB_UNWIND_INFO_END
} while (0)
-/* Does not preserve %eax and %ecx. */
-extern int __lll_mutex_lock_wait (int val, int *__futex)
- __attribute ((regparm (2))) attribute_hidden;
-/* Does not preserve %eax, %ecx, and %edx. */
-extern int __lll_mutex_timedlock_wait (int val, int *__futex,
- const struct timespec *abstime)
- __attribute ((regparm (3))) attribute_hidden;
-/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wake (int *__futex)
- __attribute ((regparm (1))) attribute_hidden;
-
-
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
after the cmpxchg instruction. In case the operation succeded this
value is zero. In case the operation failed, the cmpxchg instruction
has loaded the current value of the memory work which is guaranteed
to be nonzero. */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgl %2, %1"
+#endif
+
+#define lll_trylock(futex) \
({ int ret; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm __volatile (__lll_trylock_asm \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER), \
+ "i" (MULTIPLE_THREADS_OFFSET) \
: "memory"); \
ret; })
-
-#define lll_robust_mutex_trylock(futex, id) \
+#define lll_robust_trylock(futex, id) \
({ int ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
: "r" (id), "m" (futex), \
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
({ int ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgl %1, %2\n\t"
+#endif
-#define lll_mutex_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jnz _L_mutex_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_lock_%=,@function\n" \
- "_L_mutex_lock_%=:\n" \
- "1:\tleal %2, %%ecx\n" \
- "2:\tcall __lll_mutex_lock_wait\n" \
- "3:\tjmp 18f\n" \
- "4:\t.size _L_mutex_lock_%=, 4b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_3 \
- "18:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex) \
- : "memory"); })
-
+#define lll_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm __volatile (__lll_lock_asm_start \
+ "jnz _L_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=,@function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleal %2, %%ecx\n" \
+ "2:\tcall __lll_lock_wait_private\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_3 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (1), "m" (futex), \
+ "i" (MULTIPLE_THREADS_OFFSET) \
+ : "memory"); \
+ else \
+ { \
+ int ignore3; \
+ __asm __volatile (__lll_lock_asm_start \
+ "jnz _L_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=,@function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %8, %%ecx\n" \
+ "2:\tcall __lll_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), \
+ "=m" (futex), "=&d" (ignore3) \
+ : "1" (1), "m" (futex), \
+ "i" (MULTIPLE_THREADS_OFFSET), "0" (0), \
+ "g" (private) \
+ : "memory"); \
+ } \
+ })
-#define lll_robust_mutex_lock(futex, id) \
- ({ int result, ignore; \
+#define lll_robust_lock(futex, id, private) \
+ ({ int result, ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jnz _L_robust_mutex_lock_%=\n\t" \
+ "jnz _L_robust_lock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_lock_%=,@function\n" \
- "_L_robust_mutex_lock_%=:\n" \
- "1:\tleal %2, %%ecx\n" \
- "2:\tcall __lll_robust_mutex_lock_wait\n" \
+ ".type _L_robust_lock_%=,@function\n" \
+ "_L_robust_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_robust_lock_wait\n" \
"3:\tjmp 18f\n" \
- "4:\t.size _L_robust_mutex_lock_%=, 4b-1b\n\t" \
+ "4:\t.size _L_robust_lock_%=, 4b-1b\n\t" \
".previous\n" \
- LLL_STUB_UNWIND_INFO_3 \
+ LLL_STUB_UNWIND_INFO_4 \
"18:" \
- : "=a" (result), "=c" (ignore), "=m" (futex) \
- : "0" (0), "1" (id), "m" (futex) \
+ : "=a" (result), "=c" (ignore1), "=m" (futex), \
+ "=&d" (ignore2) \
+ : "0" (0), "1" (id), "m" (futex), "g" (private) \
: "memory"); \
result; })
-/* Special version of lll_mutex_lock which causes the unlock function to
+/* Special version of lll_lock which causes the unlock function to
always wakeup waiters. */
-#define lll_mutex_cond_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jnz _L_mutex_cond_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_cond_lock_%=,@function\n" \
- "_L_mutex_cond_lock_%=:\n" \
- "1:\tleal %2, %%ecx\n" \
- "2:\tcall __lll_mutex_lock_wait\n" \
- "3:\tjmp 18f\n" \
- "4:\t.size _L_mutex_cond_lock_%=, 4b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_3 \
- "18:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (2), "m" (futex) \
- : "memory"); })
+#define lll_cond_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ "jnz _L_cond_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_cond_lock_%=,@function\n" \
+ "_L_cond_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_cond_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex), \
+ "=&d" (ignore3) \
+ : "0" (0), "1" (2), "m" (futex), "g" (private) \
+ : "memory"); \
+ })
-#define lll_robust_mutex_cond_lock(futex, id) \
- ({ int result, ignore; \
+#define lll_robust_cond_lock(futex, id, private) \
+ ({ int result, ignore1, ignore2; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jnz _L_robust_mutex_cond_lock_%=\n\t" \
+ "jnz _L_robust_cond_lock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_cond_lock_%=,@function\n" \
- "_L_robust_mutex_cond_lock_%=:\n" \
- "1:\tleal %2, %%ecx\n" \
- "2:\tcall __lll_robust_mutex_lock_wait\n" \
+ ".type _L_robust_cond_lock_%=,@function\n" \
+ "_L_robust_cond_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_robust_lock_wait\n" \
"3:\tjmp 18f\n" \
- "4:\t.size _L_robust_mutex_cond_lock_%=, 4b-1b\n\t" \
+ "4:\t.size _L_robust_cond_lock_%=, 4b-1b\n\t" \
".previous\n" \
- LLL_STUB_UNWIND_INFO_3 \
+ LLL_STUB_UNWIND_INFO_4 \
"18:" \
- : "=a" (result), "=c" (ignore), "=m" (futex) \
- : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex) \
+ : "=a" (result), "=c" (ignore1), "=m" (futex), \
+ "=&d" (ignore2) \
+ : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex), \
+ "g" (private) \
: "memory"); \
result; })
-#define lll_mutex_timedlock(futex, timeout) \
- ({ int result, ignore1, ignore2; \
+#define lll_timedlock(futex, timeout, private) \
+ ({ int result, ignore1, ignore2, ignore3; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
- "jnz _L_mutex_timedlock_%=\n\t" \
+ "jnz _L_timedlock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_mutex_timedlock_%=,@function\n" \
- "_L_mutex_timedlock_%=:\n" \
+ ".type _L_timedlock_%=,@function\n" \
+ "_L_timedlock_%=:\n" \
"1:\tleal %3, %%ecx\n" \
- "0:\tmovl %7, %%edx\n" \
- "2:\tcall __lll_mutex_timedlock_wait\n" \
+ "0:\tmovl %8, %%edx\n" \
+ "2:\tcall __lll_timedlock_wait\n" \
"3:\tjmp 18f\n" \
- "4:\t.size _L_mutex_timedlock_%=, 4b-1b\n\t" \
+ "4:\t.size _L_timedlock_%=, 4b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_4 \
"18:" \
: "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
- "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex), "m" (timeout) \
+ "=m" (futex), "=S" (ignore3) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
+ "4" (private) \
: "memory"); \
result; })
-#define lll_robust_mutex_timedlock(futex, timeout, id) \
- ({ int result, ignore1, ignore2; \
+#define lll_robust_timedlock(futex, timeout, id, private) \
+ ({ int result, ignore1, ignore2, ignore3; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
- "jnz _L_robust_mutex_timedlock_%=\n\t" \
+ "jnz _L_robust_timedlock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_timedlock_%=,@function\n" \
- "_L_robust_mutex_timedlock_%=:\n" \
+ ".type _L_robust_timedlock_%=,@function\n" \
+ "_L_robust_timedlock_%=:\n" \
"1:\tleal %3, %%ecx\n" \
- "0:\tmovl %7, %%edx\n" \
- "2:\tcall __lll_robust_mutex_timedlock_wait\n" \
+ "0:\tmovl %8, %%edx\n" \
+ "2:\tcall __lll_robust_timedlock_wait\n" \
"3:\tjmp 18f\n" \
- "4:\t.size _L_robust_mutex_timedlock_%=, 4b-1b\n\t" \
+ "4:\t.size _L_robust_timedlock_%=, 4b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_4 \
"18:" \
: "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
- "=m" (futex) \
- : "0" (0), "1" (id), "m" (futex), "m" (timeout) \
+ "=m" (futex), "=S" (ignore3) \
+ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
+ "4" (private) \
: "memory"); \
result; })
-
-#define lll_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "subl $1, %0\n\t" \
- "jne _L_mutex_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_unlock_%=,@function\n" \
- "_L_mutex_unlock_%=:\n" \
- "1:\tleal %0, %%eax\n" \
- "2:\tcall __lll_mutex_unlock_wake\n" \
- "3:\tjmp 18f\n" \
- "4:\t.size _L_mutex_unlock_%=, 4b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_3 \
- "18:" \
- : "=m" (futex), "=&a" (ignore) \
- : "m" (futex) \
- : "memory"); })
-
-
-#define lll_robust_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
- "jne _L_robust_mutex_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_robust_mutex_unlock_%=,@function\n" \
- "_L_robust_mutex_unlock_%=:\n\t" \
- "1:\tleal %0, %%eax\n" \
- "2:\tcall __lll_mutex_unlock_wake\n" \
- "3:\tjmp 18f\n" \
- "4:\t.size _L_robust_mutex_unlock_%=, 4b-1b\n\t"\
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_3 \
- "18:" \
- : "=m" (futex), "=&a" (ignore) \
- : "i" (FUTEX_WAITERS), "m" (futex) \
- : "memory"); })
-
-
-#define lll_robust_mutex_dead(futex) \
- (void) ({ int __ignore; \
- register int _nr asm ("edx") = 1; \
- __asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \
- LLL_EBX_LOAD \
- LLL_ENTER_KERNEL \
- LLL_EBX_LOAD \
- : "=a" (__ignore) \
- : "0" (SYS_futex), LLL_EBX_REG (&(futex)), \
- "c" (FUTEX_WAKE), "d" (_nr), \
- "i" (FUTEX_OWNER_DIED), \
- "i" (offsetof (tcbhead_t, sysinfo))); })
-
-
-#define lll_mutex_islocked(futex) \
- (futex != 0)
-
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
-/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
-
-
-extern int __lll_lock_wait (int val, int *__futex)
- __attribute ((regparm (2))) attribute_hidden;
-extern int __lll_unlock_wake (int *__futex)
- __attribute ((regparm (1))) attribute_hidden;
-
-
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- 2 - taken by more users */
-
-
#if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
+# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
#else
-/* Special versions of the macros for use in libc itself. They avoid
- the lock prefix when the thread library is not used.
-
- XXX In future we might even want to avoid it on UP machines. */
-# include <tls.h>
-
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm __volatile ("cmpl $0, %%gs:%P5\n\t" \
- "je 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgl %2, %1; setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
- : "memory"); \
- ret; })
-
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm __volatile ("cmpl $0, %%gs:%P6\n\t" \
- "je 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgl %1, %2\n\t" \
- "jnz _L_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_lock_%=,@function\n" \
- "_L_lock_%=:\n" \
- "1:\tleal %2, %%ecx\n" \
- "2:\tcall __lll_mutex_lock_wait\n" \
- "3:\tjmp 18f\n" \
- "4:\t.size _L_lock_%=, 4b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_3 \
- "18:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
- : "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile ("cmpl $0, %%gs:%P3\n\t" \
- "je 0f\n\t" \
- "lock\n" \
- "0:\tsubl $1,%0\n\t" \
- "jne _L_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_unlock_%=,@function\n" \
- "_L_unlock_%=:\n" \
- "1:\tleal %0, %%eax\n" \
- "2:\tcall __lll_mutex_unlock_wake\n" \
- "3:\tjmp 18f\n\t" \
- "4:\t.size _L_unlock_%=, 4b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_3 \
- "18:" \
- : "=m" (futex), "=&a" (ignore) \
- : "m" (futex), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
- : "memory"); })
+# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tsubl $1,%0\n\t"
#endif
+#define lll_unlock(futex, private) \
+ (void) \
+ ({ int ignore; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm __volatile (__lll_unlock_asm \
+ "jne _L_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=,@function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleal %0, %%eax\n" \
+ "2:\tcall __lll_unlock_wake_private\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_3 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore) \
+ : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET) \
+ : "memory"); \
+ else \
+ { \
+ int ignore2; \
+ __asm __volatile (__lll_unlock_asm \
+ "jne _L_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=,@function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleal %0, %%eax\n" \
+ "0:\tmovl %5, %%ecx\n" \
+ "2:\tcall __lll_unlock_wake\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
+ : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex), \
+ "g" (private) \
+ : "memory"); \
+ } \
+ })
+
+#define lll_robust_unlock(futex, private) \
+ (void) \
+ ({ int ignore, ignore2; \
+ __asm __volatile (LOCK_INSTR "andl %3, %0\n\t" \
+ "jne _L_robust_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_unlock_%=,@function\n" \
+ "_L_robust_unlock_%=:\n\t" \
+ "1:\tleal %0, %%eax\n" \
+ "0:\tmovl %5, %%ecx\n" \
+ "2:\tcall __lll_unlock_wake\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
+ : "i" (FUTEX_WAITERS), "m" (futex), "g" (private) \
+ : "memory"); \
+ })
+
+
+#define lll_robust_dead(futex, private) \
+ (void) \
+ ({ int __ignore; \
+ register int _nr asm ("edx") = 1; \
+ __asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \
+ LLL_EBX_LOAD \
+ LLL_ENTER_KERNEL \
+ LLL_EBX_LOAD \
+ : "=a" (__ignore) \
+ : "0" (SYS_futex), LLL_EBX_REG (&(futex)), \
+ "c" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "d" (_nr), "i" (FUTEX_OWNER_DIED), \
+ "i" (offsetof (tcbhead_t, sysinfo))); \
+ })
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -581,28 +572,6 @@ extern int __lll_timedwait_tid (int *tid
} \
__result; })
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- __attribute ((regparm (2))) attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
+#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/sem_post.c.jj 2007-06-08 09:13:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sem_post.c 2007-07-29 11:48:55.000000000 +0200
@@ -36,8 +36,7 @@ __new_sem_post (sem_t *sem)
if (isem->nwaiters > 0)
{
int err = lll_futex_wake (&isem->value, 1,
- // XYZ check mutex flag
- LLL_SHARED);
+ isem->private ^ FUTEX_PRIVATE_FLAG);
if (__builtin_expect (err, 0) < 0)
{
__set_errno (-err);
--- libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c.jj 2007-06-08 09:13:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c 2007-07-29 11:48:55.000000000 +0200
@@ -25,7 +25,7 @@
int
-__lll_robust_lock_wait (int *futex)
+__lll_robust_lock_wait (int *futex, int private)
{
int oldval = *futex;
int tid = THREAD_GETMEM (THREAD_SELF, tid);
@@ -44,9 +44,7 @@ __lll_robust_lock_wait (int *futex)
&& atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
continue;
- lll_futex_wait (futex, newval,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_wait (futex, newval, private);
try:
;
@@ -59,7 +57,8 @@ __lll_robust_lock_wait (int *futex)
int
-__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime,
+ int private)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -102,9 +101,7 @@ __lll_robust_timedlock_wait (int *futex,
&& atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
continue;
- lll_futex_timed_wait (futex, newval, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_timed_wait (futex, newval, &rt, private);
try:
;
--- libc/nptl/sysdeps/unix/sysv/linux/register-atfork.c.jj 2005-12-21 23:17:21.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/register-atfork.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -24,7 +24,7 @@
/* Lock to protect allocation and deallocation of fork handlers. */
-lll_lock_t __fork_lock = LLL_LOCK_INITIALIZER;
+int __fork_lock = LLL_LOCK_INITIALIZER;
/* Number of pre-allocated handler entries. */
@@ -85,7 +85,7 @@ __register_atfork (prepare, parent, chil
void *dso_handle;
{
/* Get the lock to not conflict with other allocations. */
- lll_lock (__fork_lock);
+ lll_lock (__fork_lock, LLL_PRIVATE);
struct fork_handler *newp = fork_handler_alloc ();
@@ -102,7 +102,7 @@ __register_atfork (prepare, parent, chil
}
/* Release the lock. */
- lll_unlock (__fork_lock);
+ lll_unlock (__fork_lock, LLL_PRIVATE);
return newp == NULL ? ENOMEM : 0;
}
@@ -112,7 +112,7 @@ libc_hidden_def (__register_atfork)
libc_freeres_fn (free_mem)
{
/* Get the lock to not conflict with running forks. */
- lll_lock (__fork_lock);
+ lll_lock (__fork_lock, LLL_PRIVATE);
/* No more fork handlers. */
__fork_handlers = NULL;
@@ -123,7 +123,7 @@ libc_freeres_fn (free_mem)
memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool));
/* Release the lock. */
- lll_unlock (__fork_lock);
+ lll_unlock (__fork_lock, LLL_PRIVATE);
/* We can free the memory after releasing the lock. */
while (runp != NULL)
--- libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h 2007-07-30 22:05:42.000000000 +0200
@@ -68,9 +68,6 @@
# endif
#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
#define lll_futex_wait(futex, val, private) \
lll_futex_timed_wait (futex, val, NULL, private)
@@ -108,13 +105,13 @@
})
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
do \
{ \
int *__futexp = &(futexv); \
\
atomic_or (__futexp, FUTEX_OWNER_DIED); \
- lll_futex_wake (__futexp, 1, LLL_SHARED); \
+ lll_futex_wake (__futexp, 1, private); \
} \
while (0)
@@ -175,7 +172,7 @@
static inline int
__attribute__ ((always_inline))
-__lll_mutex_trylock (int *futex)
+__lll_trylock (int *futex)
{
unsigned int old;
@@ -184,12 +181,12 @@ __lll_mutex_trylock (int *futex)
: "0" (0), "d" (1), "m" (*futex) : "cc", "memory" );
return old != 0;
}
-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define lll_trylock(futex) __lll_trylock (&(futex))
static inline int
__attribute__ ((always_inline))
-__lll_mutex_cond_trylock (int *futex)
+__lll_cond_trylock (int *futex)
{
unsigned int old;
@@ -198,12 +195,12 @@ __lll_mutex_cond_trylock (int *futex)
: "0" (0), "d" (2), "m" (*futex) : "cc", "memory" );
return old != 0;
}
-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
+#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
static inline int
__attribute__ ((always_inline))
-__lll_robust_mutex_trylock (int *futex, int id)
+__lll_robust_trylock (int *futex, int id)
{
unsigned int old;
@@ -212,141 +209,121 @@ __lll_robust_mutex_trylock (int *futex,
: "0" (0), "d" (id), "m" (*futex) : "cc", "memory" );
return old != 0;
}
-#define lll_robust_mutex_trylock(futex, id) \
- __lll_robust_mutex_trylock (&(futex), id)
+#define lll_robust_trylock(futex, id) \
+ __lll_robust_trylock (&(futex), id)
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
static inline void
__attribute__ ((always_inline))
-__lll_mutex_lock (int *futex)
+__lll_lock (int *futex, int private)
{
- if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
- __lll_lock_wait (futex);
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0))
+ {
+ if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+ __lll_lock_wait_private (futex);
+ else
+ __lll_lock_wait (futex, private);
+ }
}
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
static inline int
__attribute__ ((always_inline))
-__lll_robust_mutex_lock (int *futex, int id)
+__lll_robust_lock (int *futex, int id, int private)
{
int result = 0;
- if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_lock_wait (futex);
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, id, 0),
+ 0))
+ result = __lll_robust_lock_wait (futex, private);
return result;
}
-#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), id, private)
static inline void
__attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
{
- if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
- __lll_lock_wait (futex);
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 2, 0), 0))
+ __lll_lock_wait (futex, private);
}
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
-#define lll_robust_mutex_cond_lock(futex, id) \
- __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+#define lll_robust_cond_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
extern int __lll_timedlock_wait
- (int *futex, const struct timespec *) attribute_hidden;
+ (int *futex, const struct timespec *, int private) attribute_hidden;
extern int __lll_robust_timedlock_wait
- (int *futex, const struct timespec *) attribute_hidden;
+ (int *futex, const struct timespec *, int private) attribute_hidden;
static inline int
__attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
{
int result = 0;
- if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
- result = __lll_timedlock_wait (futex, abstime);
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, 1, 0), 0))
+ result = __lll_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_mutex_timedlock(futex, abstime) \
- __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+ __lll_timedlock (&(futex), abstime, private)
static inline int
__attribute__ ((always_inline))
-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
- int id)
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+ int id, int private)
{
int result = 0;
- if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_timedlock_wait (futex, abstime);
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (futex, id, 0),
+ 0))
+ result = __lll_robust_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
- __lll_robust_mutex_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+ __lll_robust_timedlock (&(futex), abstime, id, private)
static inline void
__attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
+__lll_unlock (int *futex, int private)
{
int oldval;
int newval = 0;
lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
- if (oldval > 1)
- lll_futex_wake (futex, 1, LLL_SHARED);
+ if (__builtin_expect (oldval > 1, 0))
+ lll_futex_wake (futex, 1, private);
}
-#define lll_mutex_unlock(futex) \
- __lll_mutex_unlock(&(futex))
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
static inline void
__attribute__ ((always_inline))
-__lll_robust_mutex_unlock (int *futex, int mask)
+__lll_robust_unlock (int *futex, int private)
{
int oldval;
int newval = 0;
lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
- if (oldval & mask)
- lll_futex_wake (futex, 1, LLL_SHARED);
+ if (__builtin_expect (oldval & FUTEX_WAITERS, 0))
+ lll_futex_wake (futex, 1, private);
}
-#define lll_robust_mutex_unlock(futex) \
- __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
-
+#define lll_robust_unlock(futex, private) \
+ __lll_robust_unlock(&(futex), private)
-static inline void
-__attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
-{
- *futex = 0;
- lll_futex_wake (futex, 1, LLL_SHARED);
-}
-#define lll_mutex_unlock_force(futex) \
- __lll_mutex_unlock_force(&(futex))
-
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
(futex != 0)
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. We can use the lll_mutex_*. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-#define lll_trylock(futex) lll_mutex_trylock (futex)
-#define lll_lock(futex) lll_mutex_lock (futex)
-#define lll_unlock(futex) lll_mutex_unlock (futex)
-#define lll_islocked(futex) lll_mutex_islocked (futex)
-
-/* The states of a lock are:
- 1 - untaken
- 0 - taken by one user
- <0 - taken by more users */
-
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -373,25 +350,4 @@ extern int __lll_timedwait_tid (int *, c
__res; \
})
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c.jj 2007-07-24 10:50:54.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c 2007-07-29 11:48:55.000000000 +0200
@@ -29,11 +29,37 @@
int
__new_sem_post (sem_t *sem)
{
+ struct new_sem *isem = (struct new_sem *) sem;
+
+ __asm __volatile (__lll_rel_instr ::: "memory");
+ atomic_increment (&isem->value);
+ __asm __volatile (__lll_acq_instr ::: "memory");
+ if (isem->nwaiters > 0)
+ {
+ int err = lll_futex_wake (&isem->value, 1,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
+ if (__builtin_expect (err, 0) < 0)
+ {
+ __set_errno (-err);
+ return -1;
+ }
+ }
+ return 0;
+}
+versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
+
+#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
+
+int
+attribute_compat_text_section
+__old_sem_post (sem_t *sem)
+{
int *futex = (int *) sem;
__asm __volatile (__lll_rel_instr ::: "memory");
int nr = atomic_increment_val (futex);
- int err = lll_futex_wake (futex, nr, LLL_SHARED);
+ /* We always have to assume it is a shared semaphore. */
+ int err = lll_futex_wake (futex, 1, LLL_SHARED);
if (__builtin_expect (err, 0) < 0)
{
__set_errno (-err);
@@ -41,8 +67,6 @@ __new_sem_post (sem_t *sem)
}
return 0;
}
-versioned_symbol (libpthread, __new_sem_post, sem_post, GLIBC_2_1);
-#if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
-strong_alias (__new_sem_post, __old_sem_post)
+
compat_symbol (libpthread, __old_sem_post, sem_post, GLIBC_2_0);
#endif
--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h 2007-07-29 11:48:55.000000000 +0200
@@ -69,9 +69,6 @@
# endif
#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
#define lll_futex_wait(futexp, val, private) \
lll_futex_timed_wait (futexp, val, NULL, private)
@@ -97,14 +94,15 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
do \
{ \
INTERNAL_SYSCALL_DECL (__err); \
int *__futexp = &(futexv); \
\
atomic_or (__futexp, FUTEX_OWNER_DIED); \
- INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0); \
+ INTERNAL_SYSCALL (futex, __err, 4, __futexp, \
+ __lll_private_flag (FUTEX_WAKE, private), 1, 0); \
} \
while (0)
@@ -171,119 +169,111 @@
__val; \
})
-#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+#define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id)
/* Set *futex to 1 if it is 0, atomically. Returns the old value */
#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
-#define lll_mutex_trylock(lock) __lll_trylock (&(lock))
+#define lll_trylock(lock) __lll_trylock (&(lock))
/* Set *futex to 2 if it is 0, atomically. Returns the old value */
#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
-#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock))
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
-#define lll_mutex_lock(lock) \
+#define lll_lock(lock, private) \
(void) ({ \
int *__futex = &(lock); \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
0) != 0) \
- __lll_lock_wait (__futex); \
+ { \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __lll_lock_wait_private (__futex); \
+ else \
+ __lll_lock_wait (__futex, private); \
+ } \
})
-#define lll_robust_mutex_lock(lock, id) \
+#define lll_robust_lock(lock, id, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
0), 0)) \
- __val = __lll_robust_lock_wait (__futex); \
+ __val = __lll_robust_lock_wait (__futex, private); \
__val; \
})
-#define lll_mutex_cond_lock(lock) \
+#define lll_cond_lock(lock, private) \
(void) ({ \
int *__futex = &(lock); \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
0) != 0) \
- __lll_lock_wait (__futex); \
+ __lll_lock_wait (__futex, private); \
})
-#define lll_robust_mutex_cond_lock(lock, id) \
+#define lll_robust_cond_lock(lock, id, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
int __id = id | FUTEX_WAITERS; \
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
0), 0)) \
- __val = __lll_robust_lock_wait (__futex); \
+ __val = __lll_robust_lock_wait (__futex, private); \
__val; \
})
extern int __lll_timedlock_wait
- (int *futex, const struct timespec *) attribute_hidden;
+ (int *futex, const struct timespec *, int private) attribute_hidden;
extern int __lll_robust_timedlock_wait
- (int *futex, const struct timespec *) attribute_hidden;
+ (int *futex, const struct timespec *, int private) attribute_hidden;
-#define lll_mutex_timedlock(lock, abstime) \
+#define lll_timedlock(lock, abstime, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
0) != 0) \
- __val = __lll_timedlock_wait (__futex, abstime); \
+ __val = __lll_timedlock_wait (__futex, abstime, private); \
__val; \
})
-#define lll_robust_mutex_timedlock(lock, abstime, id) \
+#define lll_robust_timedlock(lock, abstime, id, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
0), 0)) \
- __val = __lll_robust_timedlock_wait (__futex, abstime); \
+ __val = __lll_robust_timedlock_wait (__futex, abstime, private); \
__val; \
})
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__builtin_expect (__val > 1, 0)) \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_robust_mutex_unlock(lock) \
+#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
- }))
-
-#define lll_mutex_unlock_force(lock) \
- ((void) ({ \
- int *__futex = &(lock); \
- *__futex = 0; \
- __asm __volatile (__lll_rel_instr ::: "memory"); \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
(futex != 0)
-/* Our internal lock implementation is identical to the binary-compatible
- mutex implementation. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
@@ -293,11 +283,6 @@ typedef int lll_lock_t;
1 - taken by one user
>1 - taken by more users */
-#define lll_trylock(lock) lll_mutex_trylock (lock)
-#define lll_lock(lock) lll_mutex_lock (lock)
-#define lll_unlock(lock) lll_mutex_unlock (lock)
-#define lll_islocked(lock) lll_mutex_islocked (lock)
-
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -320,26 +305,4 @@ extern int __lll_timedwait_tid (int *, c
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/lowlevellock.c.jj 2007-06-08 09:13:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/lowlevellock.c 2007-07-29 11:48:55.000000000 +0200
@@ -25,22 +25,35 @@
void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
{
do
{
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
if (oldval != 0)
- lll_futex_wait (futex, 2,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_wait (futex, 2, LLL_PRIVATE);
+ }
+ while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+}
+
+
+/* These functions doesn't get included in libc.so */
+#ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+ do
+ {
+ int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
+ if (oldval != 0)
+ lll_futex_wait (futex, 2, private);
}
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
}
int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -70,9 +83,7 @@ __lll_timedlock_wait (int *futex, const
/* Wait. */
int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
if (oldval != 0)
- lll_futex_timed_wait (futex, 2, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_timed_wait (futex, 2, &rt, private);
}
while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
@@ -80,8 +91,6 @@ __lll_timedlock_wait (int *futex, const
}
-/* This function doesn't get included in libc.so */
-#ifdef IS_IN_libpthread
int
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
{
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S.jj 2007-05-24 16:41:25.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S 2007-07-29 11:48:55.000000000 +0200
@@ -19,17 +19,8 @@
#include <kernel-features.h>
#include <tcb-offsets.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
.comm __fork_generation, 4, 4
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S.jj 2007-07-24 10:50:55.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,27 +18,15 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_rwlock_timedrdlock
@@ -172,11 +160,11 @@ pthread_rwlock_timedrdlock:
popq %r12
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 2b
14: cmpl %fs:TID, %eax
@@ -184,13 +172,13 @@ pthread_rwlock_timedrdlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leal MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
/* Overflow. */
@@ -203,22 +191,22 @@ pthread_rwlock_timedrdlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 11b
-12:
+12: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 13b
16: movq $-ETIMEDOUT, %rdx
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S.jj 2007-07-24 10:50:55.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,23 +18,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_wrlock
@@ -121,11 +110,11 @@ __pthread_rwlock_wrlock:
movq %rdx, %rax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -136,32 +125,32 @@ __pthread_rwlock_wrlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
4: decl WRITERS_QUEUED(%rdi)
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 11b
-12:
+12: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,15 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
.text
.globl sem_trywait
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S.jj 2006-09-05 16:46:43.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S 2007-07-30 15:56:48.000000000 +0200
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,33 +20,40 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <lowlevellock.h>
#include <lowlevelrobustlock.h>
+#include <kernel-features.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
#endif
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_WAITERS 0x80000000
-#define FUTEX_OWNER_DIED 0x40000000
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
- .globl __lll_robust_mutex_lock_wait
- .type __lll_robust_mutex_lock_wait,@function
- .hidden __lll_robust_mutex_lock_wait
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
.align 16
-__lll_robust_mutex_lock_wait:
+__lll_robust_lock_wait:
cfi_startproc
pushq %r10
cfi_adjust_cfa_offset(8)
@@ -55,11 +63,7 @@ __lll_robust_mutex_lock_wait:
cfi_offset(%rdx, -24)
xorq %r10, %r10 /* No timeout. */
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
+ LOAD_FUTEX_WAIT (%esi)
4: movl %eax, %edx
orl $FUTEX_WAITERS, %edx
@@ -97,14 +101,14 @@ __lll_robust_mutex_lock_wait:
cfi_restore(%r10)
retq
cfi_endproc
- .size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
- .globl __lll_robust_mutex_timedlock_wait
- .type __lll_robust_mutex_timedlock_wait,@function
- .hidden __lll_robust_mutex_timedlock_wait
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
.align 16
-__lll_robust_mutex_timedlock_wait:
+__lll_robust_timedlock_wait:
cfi_startproc
/* Check for a valid timeout value. */
cmpq $1000000000, 8(%rdx)
@@ -122,10 +126,12 @@ __lll_robust_mutex_timedlock_wait:
cfi_offset(%r9, -24)
cfi_offset(%r12, -32)
cfi_offset(%r13, -40)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
/* Stack frame for the timespec and timeval structs. */
- subq $24, %rsp
- cfi_adjust_cfa_offset(24)
+ subq $32, %rsp
+ cfi_adjust_cfa_offset(32)
movq %rdi, %r12
movq %rdx, %r13
@@ -174,11 +180,8 @@ __lll_robust_mutex_timedlock_wait:
jnz 5f
2: movq %rsp, %r10
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
+ movl 32(%rsp), %esi
+ LOAD_FUTEX_WAIT (%esi)
movq %r12, %rdi
movl $SYS_futex, %eax
syscall
@@ -195,8 +198,8 @@ __lll_robust_mutex_timedlock_wait:
cmpxchgl %edx, (%r12)
jnz 7f
-6: addq $24, %rsp
- cfi_adjust_cfa_offset(-24)
+6: addq $40, %rsp
+ cfi_adjust_cfa_offset(-40)
popq %r13
cfi_adjust_cfa_offset(-8)
cfi_restore(%r13)
@@ -214,7 +217,7 @@ __lll_robust_mutex_timedlock_wait:
3: movl $EINVAL, %eax
retq
- cfi_adjust_cfa_offset(56)
+ cfi_adjust_cfa_offset(72)
cfi_offset(%r8, -16)
cfi_offset(%r9, -24)
cfi_offset(%r12, -32)
@@ -226,4 +229,4 @@ __lll_robust_mutex_timedlock_wait:
8: movl $ETIMEDOUT, %eax
jmp 6b
cfi_endproc
- .size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S.jj 2007-07-24 10:50:55.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,23 +18,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_rdlock
@@ -123,11 +112,11 @@ __pthread_rwlock_rdlock:
movq %rdx, %rax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -139,11 +128,11 @@ __pthread_rwlock_rdlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -159,21 +148,21 @@ __pthread_rwlock_rdlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 11b
-12:
+12: movl PSHARED(%rdi), %esi
#if MUTEX == 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S 2007-07-31 12:40:13.000000000 +0200
@@ -19,33 +19,46 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
-#endif
-
-#define SYS_futex 202
-#ifndef FUTEX_WAIT
-# define FUTEX_WAIT 0
-# define FUTEX_WAKE 1
-#endif
-
-#ifndef LOAD_FUTEX_WAIT
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
# if FUTEX_WAIT == 0
# define LOAD_FUTEX_WAIT(reg) \
- xorl reg, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
# else
# define LOAD_FUTEX_WAIT(reg) \
- movl $FUTEX_WAIT, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
# define LOAD_FUTEX_WAKE(reg) \
- movl $FUTEX_WAKE, reg
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
#endif
@@ -53,11 +66,11 @@
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
cfi_startproc
pushq %r10
cfi_adjust_cfa_offset(8)
@@ -67,7 +80,7 @@ __lll_mutex_lock_wait:
cfi_offset(%rdx, -24)
xorq %r10, %r10 /* No timeout. */
movl $2, %edx
- LOAD_FUTEX_WAIT (%esi)
+ LOAD_PRIVATE_FUTEX_WAIT (%esi)
cmpl %edx, %eax /* NB: %edx == 2 */
jne 2f
@@ -89,15 +102,52 @@ __lll_mutex_lock_wait:
cfi_restore(%r10)
retq
cfi_endproc
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
.align 16
-__lll_mutex_timedlock_wait:
+__lll_lock_wait:
+ cfi_startproc
+ pushq %r10
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
+ xorq %r10, %r10 /* No timeout. */
+ movl $2, %edx
+ LOAD_FUTEX_WAIT (%esi)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+2: movl %edx, %eax
+ xchgl %eax, (%rdi) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
+ retq
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
+ .align 16
+__lll_timedlock_wait:
cfi_startproc
/* Check for a valid timeout value. */
cmpq $1000000000, 8(%rdx)
@@ -118,10 +168,12 @@ __lll_mutex_timedlock_wait:
cfi_offset(%r12, -32)
cfi_offset(%r13, -40)
cfi_offset(%r14, -48)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
/* Stack frame for the timespec and timeval structs. */
- subq $16, %rsp
- cfi_adjust_cfa_offset(16)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset(24)
movq %rdi, %r12
movq %rdx, %r13
@@ -162,6 +214,7 @@ __lll_mutex_timedlock_wait:
je 8f
movq %rsp, %r10
+ movl 24(%rsp), %esi
LOAD_FUTEX_WAIT (%esi)
movq %r12, %rdi
movl $SYS_futex, %eax
@@ -174,8 +227,8 @@ __lll_mutex_timedlock_wait:
cmpxchgl %edx, (%r12)
jnz 7f
-6: addq $16, %rsp
- cfi_adjust_cfa_offset(-16)
+6: addq $32, %rsp
+ cfi_adjust_cfa_offset(-32)
popq %r14
cfi_adjust_cfa_offset(-8)
cfi_restore(%r14)
@@ -196,7 +249,7 @@ __lll_mutex_timedlock_wait:
3: movl $EINVAL, %eax
retq
- cfi_adjust_cfa_offset(56)
+ cfi_adjust_cfa_offset(72)
cfi_offset(%r8, -16)
cfi_offset(%r9, -24)
cfi_offset(%r12, -32)
@@ -216,15 +269,15 @@ __lll_mutex_timedlock_wait:
5: movl $ETIMEDOUT, %eax
jmp 6b
cfi_endproc
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
.align 16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake_private:
cfi_startproc
pushq %rsi
cfi_adjust_cfa_offset(8)
@@ -234,7 +287,7 @@ __lll_mutex_unlock_wake:
cfi_offset(%rdx, -24)
movl $0, (%rdi)
- LOAD_FUTEX_WAKE (%esi)
+ LOAD_PRIVATE_FUTEX_WAKE (%esi)
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
syscall
@@ -247,10 +300,38 @@ __lll_mutex_unlock_wake:
cfi_restore(%rsi)
retq
cfi_endproc
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
-
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
+ .align 16
+__lll_unlock_wake:
+ cfi_startproc
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%rsi, -16)
+ cfi_offset(%rdx, -24)
+
+ movl $0, (%rdi)
+ LOAD_FUTEX_WAKE (%esi)
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ syscall
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %rsi
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rsi)
+ retq
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
+
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S.jj 2006-07-29 06:31:49.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,23 +20,11 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <kernel-features.h>
#include <pthread-pi-defines.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_CMP_REQUEUE 4
-
-#define EINVAL 22
+#include <pthread-errnos.h>
.text
@@ -115,7 +104,9 @@ __pthread_cond_broadcast:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -123,12 +114,16 @@ __pthread_cond_broadcast:
/* Unlock in loop requires wakeup. */
5: addq $cond_lock-cond_futex, %rdi
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 6b
/* Unlock in loop requires wakeup. */
7: addq $cond_lock-cond_futex, %rdi
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
subq $cond_lock-cond_futex, %rdi
jmp 8b
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,19 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAKE 1
-
.text
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S.jj 2007-07-24 10:50:55.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,22 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl __pthread_rwlock_unlock
@@ -107,28 +96,28 @@ __pthread_rwlock_unlock:
4: xorl %eax, %eax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 2b
-3:
+3: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 4b
-7:
+7: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 8b
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/libc-lowlevellock.S 2007-07-29 11:48:55.000000000 +0200
@@ -17,19 +17,4 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-#include <kernel-features.h>
-
-/* All locks in libc are private. Use the kernel feature if possible. */
-#define FUTEX_PRIVATE_FLAG 128
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG)
-# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG)
-#else
-# define LOAD_FUTEX_WAIT(reg) \
- movl %fs:PRIVATE_FUTEX, reg
-# define LOAD_FUTEX_WAKE(reg) \
- movl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAKE, reg
-#endif
-
#include "lowlevellock.S"
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S 2007-07-29 11:48:55.000000000 +0200
@@ -19,19 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <tcb-offsets.h>
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -58,7 +49,9 @@ __condvar_cleanup:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -105,7 +98,9 @@ __condvar_cleanup:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
/* Wake up all waiters to make sure no signal gets lost. */
2: testq %r12, %r12
@@ -307,7 +302,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
jmp 2b
/* Unlock in loop requires wakeup. */
@@ -315,7 +312,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
@@ -323,7 +322,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -334,7 +335,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 11b
/* The initial unlocking of the mutex failed. */
@@ -351,7 +354,9 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
13: movq %r10, %rax
jmp 14b
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S.jj 2005-09-08 19:40:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S 2007-07-29 11:48:55.000000000 +0200
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,23 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_WAKE_OP 5
-
-#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
-
-#define EINVAL 22
+#include <pthread-errnos.h>
.text
@@ -111,7 +98,9 @@ __pthread_cond_signal:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -120,7 +109,9 @@ __pthread_cond_signal:
/* Unlock in loop requires wakeup. */
5:
movq %r8, %rdi
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 6b
.size __pthread_cond_signal, .-__pthread_cond_signal
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,23 +18,15 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-
.text
.globl sem_timedwait
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,19 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-
.text
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,18 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelbarrier.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
.text
@@ -142,21 +133,29 @@ pthread_barrier_wait:
retq
-1: addq $MUTEX, %rdi
- callq __lll_mutex_lock_wait
+1: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_lock_wait
subq $MUTEX, %rdi
jmp 2b
-4: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+4: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 5b
-6: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+6: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
subq $MUTEX, %rdi
jmp 7b
-9: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+9: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 10b
.size pthread_barrier_wait,.-pthread_barrier_wait
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h 2007-07-31 12:40:13.000000000 +0200
@@ -20,17 +20,27 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <kernel-features.h>
-#include <tcb-offsets.h>
-
-#ifndef LOCK_INSTR
-# ifdef UP
-# define LOCK_INSTR /* nothing */
-# else
-# define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+# endif
+#else
+# ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
# endif
#endif
@@ -38,11 +48,13 @@
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
/* Values for 'private' parameter of locking macros. Yes, the
definition seems to be backwards. But it is not. The bit will be
@@ -50,6 +62,8 @@
#define LLL_PRIVATE 0
#define LLL_SHARED FUTEX_PRIVATE_FLAG
+#ifndef __ASSEMBLER__
+
#if !defined NOT_IN_libc || defined IS_IN_rtld
/* In libc.so or ld.so all futexes are private. */
# ifdef __ASSUME_PRIVATE_FUTEX
@@ -76,13 +90,13 @@
# endif
#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
+/* Initializer for lock. */
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
/* Delay in spinlock loop. */
-#define BUSY_WAIT_NOP asm ("rep; nop")
+#define BUSY_WAIT_NOP asm ("rep; nop")
#define LLL_STUB_UNWIND_INFO_START \
@@ -196,7 +210,7 @@ LLL_STUB_UNWIND_INFO_END
: "=a" (__status) \
: "0" (SYS_futex), "D" (futex), \
"S" (__lll_private_flag (FUTEX_WAIT, private)), \
- "d" (_val), "r" (__to) \
+ "d" (_val), "r" (__to) \
: "memory", "cc", "r11", "cx"); \
__status; \
})
@@ -215,242 +229,308 @@ LLL_STUB_UNWIND_INFO_END
} while (0)
-
-/* Does not preserve %eax and %ecx. */
-extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden;
-/* Does not preserver %eax, %ecx, and %edx. */
-extern int __lll_mutex_timedlock_wait (int *__futex, int __val,
- const struct timespec *__abstime)
- attribute_hidden;
-/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
-
-
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
after the cmpxchg instruction. In case the operation succeded this
value is zero. In case the operation failed, the cmpxchg instruction
has loaded the current value of the memory work which is guaranteed
to be nonzero. */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %2, %1\n\t" \
+ "jmp 1f\n\t" \
+ "0:\tcmpxchgl %2, %1\n\t" \
+ "1:"
+#endif
+
+#define lll_trylock(futex) \
({ int ret; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm __volatile (__lll_trylock_asm \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-
-#define lll_robust_mutex_trylock(futex, id) \
+#define lll_robust_trylock(futex, id) \
({ int ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (id), "m" (futex), \
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
({ int ret; \
__asm __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-
-#define lll_mutex_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_lock_%=, @function\n" \
- "_L_mutex_lock_%=:\n" \
- "1:\tleaq %2, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_lock_wait\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_mutex_lock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (1), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
+ "jmp 24f\n" \
+ "0:\tcmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#endif
+#define lll_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); \
+ else \
+ __asm __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (1), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ }) \
-#define lll_robust_mutex_lock(futex, id) \
+#define lll_robust_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_lock_%=, @function\n" \
- "_L_robust_mutex_lock_%=:\n" \
+ ".type _L_robust_lock_%=, @function\n" \
+ "_L_robust_lock_%=:\n" \
"1:\tleaq %2, %%rdi\n" \
"2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_robust_mutex_lock_wait\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
"4:\taddq $128, %%rsp\n" \
"5:\tjmp 24f\n" \
- "6:\t.size _L_robust_mutex_lock_%=, 6b-1b\n\t" \
+ "6:\t.size _L_robust_lock_%=, 6b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_5 \
"24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
"=a" (result) \
- : "0" (id), "m" (futex), "3" (0) \
+ : "1" (id), "m" (futex), "3" (0), "0" (private) \
: "cx", "r11", "cc", "memory"); \
result; })
+#define lll_cond_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_cond_lock_%=, @function\n" \
+ "_L_cond_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_cond_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (2), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ })
-#define lll_mutex_cond_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_cond_lock_%=, @function\n" \
- "_L_mutex_cond_lock_%=:\n" \
- "1:\tleaq %2, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_lock_wait\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_mutex_cond_lock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (2), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_cond_lock(futex, id) \
+#define lll_robust_cond_lock(futex, id, private) \
({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_cond_lock_%=, @function\n" \
- "_L_robust_mutex_cond_lock_%=:\n" \
+ ".type _L_robust_cond_lock_%=, @function\n" \
+ "_L_robust_cond_lock_%=:\n" \
"1:\tleaq %2, %%rdi\n" \
"2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_robust_mutex_lock_wait\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
"4:\taddq $128, %%rsp\n" \
"5:\tjmp 24f\n" \
- "6:\t.size _L_robust_mutex_cond_lock_%=, 6b-1b\n\t" \
+ "6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_5 \
"24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
"=a" (result) \
- : "0" (id | FUTEX_WAITERS), "m" (futex), "3" (0) \
+ : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \
+ "0" (private) \
: "cx", "r11", "cc", "memory"); \
result; })
-
-#define lll_mutex_timedlock(futex, timeout) \
+#define lll_timedlock(futex, timeout, private) \
({ int result, ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
- ".type _L_mutex_timedlock_%=, @function\n" \
- "_L_mutex_timedlock_%=:\n" \
+ ".type _L_timedlock_%=, @function\n" \
+ "_L_timedlock_%=:\n" \
"1:\tleaq %4, %%rdi\n" \
"0:\tmovq %8, %%rdx\n" \
"2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_timedlock_wait\n" \
+ "3:\tcallq __lll_timedlock_wait\n" \
"4:\taddq $128, %%rsp\n" \
"5:\tjmp 24f\n" \
- "6:\t.size _L_mutex_timedlock_%=, 6b-1b\n\t" \
+ "6:\t.size _L_timedlock_%=, 6b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_6 \
"24:" \
- : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \
+ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
"=&d" (ignore3), "=m" (futex) \
- : "0" (0), "2" (1), "m" (futex), "m" (timeout) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
+ "2" (private) \
: "memory", "cx", "cc", "r10", "r11"); \
result; })
-
-#define lll_robust_mutex_timedlock(futex, timeout, id) \
+#define lll_robust_timedlock(futex, timeout, id, private) \
({ int result, ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
+ __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
"jnz 1f\n\t" \
".subsection 1\n\t" \
- ".type _L_robust_mutex_timedlock_%=, @function\n" \
- "_L_robust_mutex_timedlock_%=:\n" \
+ ".type _L_robust_timedlock_%=, @function\n" \
+ "_L_robust_timedlock_%=:\n" \
"1:\tleaq %4, %%rdi\n" \
"0:\tmovq %8, %%rdx\n" \
"2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_robust_mutex_timedlock_wait\n" \
+ "3:\tcallq __lll_robust_timedlock_wait\n" \
"4:\taddq $128, %%rsp\n" \
"5:\tjmp 24f\n" \
- "6:\t.size _L_robust_mutex_timedlock_%=, 6b-1b\n\t" \
+ "6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t" \
".previous\n" \
LLL_STUB_UNWIND_INFO_6 \
"24:" \
- : "=a" (result), "=&D" (ignore1), "=S" (ignore2), \
+ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
"=&d" (ignore3), "=m" (futex) \
- : "0" (0), "2" (id), "m" (futex), "m" (timeout) \
+ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
+ "2" (private) \
: "memory", "cx", "cc", "r10", "r11"); \
result; })
+#if defined NOT_IN_libc || defined UP
+# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
+ "jne 1f\n\t"
+#else
+# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; decl %0\n\t" \
+ "jne 1f\n\t" \
+ "jmp 24f\n\t" \
+ "0:\tdecl %0\n\t" \
+ "jne 1f\n\t"
+#endif
-#define lll_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "decl %0\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_unlock_%=, @function\n" \
- "_L_mutex_unlock_%=:\n" \
- "1:\tleaq %0, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_unlock_wake\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_mutex_unlock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_robust_mutex_unlock_%=, @function\n" \
- "_L_robust_mutex_unlock_%=:\n" \
- "1:\tleaq %0, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_unlock_wake\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_robust_mutex_unlock_%=, 6b-1b\n\t"\
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "i" (FUTEX_WAITERS), "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
-
-
-#define lll_robust_mutex_dead(futex) \
- (void) ({ int ignore; \
- __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
- "syscall" \
- : "=m" (futex), "=a" (ignore) \
- : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
- "S" (FUTEX_WAKE), "1" (__NR_futex), \
- "d" (1) \
- : "cx", "r11", "cc", "memory"); })
-
+#define lll_unlock(futex, private) \
+ (void) \
+ ({ int ignore; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm __volatile (__lll_unlock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=, @function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ else \
+ __asm __volatile (__lll_unlock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=, @function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex), "S" (private) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ })
+
+#define lll_robust_unlock(futex, private) \
+ do \
+ { \
+ int ignore; \
+ __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
+ "jne 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_unlock_%=, @function\n" \
+ "_L_robust_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "i" (FUTEX_WAITERS), "m" (futex), \
+ "S" (private) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ } \
+ while (0)
+
+#define lll_robust_dead(futex, private) \
+ do \
+ { \
+ int ignore; \
+ __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
+ "syscall" \
+ : "=m" (futex), "=a" (ignore) \
+ : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
+ "S" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "1" (__NR_futex), "d" (1) \
+ : "cx", "r11", "cc", "memory"); \
+ } \
+ while (0)
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \
@@ -461,117 +541,13 @@ extern int __lll_mutex_unlock_wait (int
__asm __volatile ("syscall" \
: "=a" (__res) \
: "0" (__NR_futex), "D" ((void *) ftx), \
- "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \
- "r" (__nr_move), "r" (__mutex), "r" (__val) \
+ "S" (FUTEX_CMP_REQUEUE), "d" (nr_wake), \
+ "r" (__nr_move), "r" (__mutex), "r" (__val) \
: "cx", "r11", "cc", "memory"); \
__res < 0; })
-
-#define lll_mutex_islocked(futex) \
- (futex != LLL_MUTEX_LOCK_INITIALIZER)
-
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
-/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
-
-
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- 2 - taken by more users */
-
-
-#if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
-#else
-/* Special versions of the macros for use in libc itself. They avoid
- the lock prefix when the thread library is not used.
-
- The code sequence to avoid unnecessary lock prefixes is what the AMD
- guys suggested. If you do not like it, bring it up with AMD.
-
- XXX In future we might even want to avoid it on UP machines. */
-
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; cmpxchgl %2, %1\n\t" \
- "jmp 1f\n" \
- "0:\tcmpxchgl %2, %1\n\t" \
- "1:setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
- : "memory"); \
- ret; })
-
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; cmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- "jmp 24f\n" \
- "0:\tcmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_lock_%=, @function\n" \
- "_L_lock_%=:\n" \
- "1:\tleaq %2, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_lock_wait\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_lock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (1), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; decl %0\n\t" \
- "jne 1f\n\t" \
- "jmp 24f\n" \
- "0:\tdecl %0\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n\t" \
- ".type _L_unlock_%=, @function\n" \
- "_L_unlock_%=:\n" \
- "1:\tleaq %0, %%rdi\n" \
- "2:\tsubq $128, %%rsp\n" \
- "3:\tcallq __lll_mutex_unlock_wake\n" \
- "4:\taddq $128, %%rsp\n" \
- "5:\tjmp 24f\n" \
- "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
- ".previous\n" \
- LLL_STUB_UNWIND_INFO_5 \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
-#endif
-
-
#define lll_islocked(futex) \
- (futex != LLL_MUTEX_LOCK_INITIALIZER)
+ (futex != LLL_LOCK_INITIALIZER)
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
@@ -610,25 +586,6 @@ extern int __lll_timedwait_tid (int *tid
} \
__result; })
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
+#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S 2007-07-29 11:48:55.000000000 +0200
@@ -19,19 +19,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <pthread-errnos.h>
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
@@ -301,7 +292,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
jmp 2b
/* Unlock in loop requires wakeup. */
@@ -309,7 +302,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
@@ -317,7 +312,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -328,7 +325,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 11b
/* The initial unlocking of the mutex failed. */
@@ -345,7 +344,9 @@ __pthread_cond_timedwait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ /* XYZ */
+ movl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
17: movq (%rsp), %rax
jmp 18b
--- libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S.jj 2007-07-24 10:50:55.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S 2007-07-29 11:48:55.000000000 +0200
@@ -18,26 +18,15 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <kernel-features.h>
-#define SYS_futex 202
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_rwlock_timedwrlock
@@ -168,11 +157,11 @@ pthread_rwlock_timedwrlock:
popq %r12
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 2b
14: cmpl %fs:TID, %eax
@@ -180,13 +169,13 @@ pthread_rwlock_timedwrlock:
20: movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leal MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
/* Overflow. */
@@ -194,22 +183,22 @@ pthread_rwlock_timedwrlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 11b
-12:
+12: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 13b
16: movq $-ETIMEDOUT, %rdx
--- libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c.jj 2007-06-08 09:13:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c 2007-07-29 11:48:55.000000000 +0200
@@ -85,8 +85,7 @@ sem_timedwait (sem_t *sem, const struct
int oldtype = __pthread_enable_asynccancel ();
err = lll_futex_timed_wait (&isem->value, 0, &rt,
- // XYZ check mutex flag
- LLL_SHARED);
+ isem->private ^ FUTEX_PRIVATE_FLAG);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (oldtype);
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c.jj 2007-07-30 22:47:08.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c 2007-07-30 22:47:50.000000000 +0200
@@ -0,0 +1,55 @@
+/* Copyright (C) 2002, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_barrier_init (barrier, attr, count)
+ pthread_barrier_t *barrier;
+ const pthread_barrierattr_t *attr;
+ unsigned int count;
+{
+ union sparc_pthread_barrier *ibarrier;
+
+ if (__builtin_expect (count == 0, 0))
+ return EINVAL;
+
+ struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr;
+ if (iattr != NULL)
+ {
+ if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
+ && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
+ /* Invalid attribute. */
+ return EINVAL;
+ }
+
+ ibarrier = (union sparc_pthread_barrier *) barrier;
+
+ /* Initialize the individual fields. */
+ ibarrier->b.lock = LLL_LOCK_INITIALIZER;
+ ibarrier->b.left = count;
+ ibarrier->b.init_count = count;
+ ibarrier->b.curr_event = 0;
+ ibarrier->s.left_lock = 0;
+ ibarrier->s.pshared = (iattr && iattr->pshared == PTHREAD_PROCESS_SHARED);
+
+ return 0;
+}
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c.jj 2006-01-04 00:46:19.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_init.c 2007-07-30 22:46:57.000000000 +0200
@@ -1,62 +0,0 @@
-/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <errno.h>
-#include "pthreadP.h"
-#include <lowlevellock.h>
-
-struct sparc_pthread_barrier
-{
- struct pthread_barrier b;
- unsigned char left_lock;
- unsigned char pshared;
-};
-
-int
-pthread_barrier_init (barrier, attr, count)
- pthread_barrier_t *barrier;
- const pthread_barrierattr_t *attr;
- unsigned int count;
-{
- struct sparc_pthread_barrier *ibarrier;
-
- if (__builtin_expect (count == 0, 0))
- return EINVAL;
-
- struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr;
- if (iattr != NULL)
- {
- if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
- && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
- /* Invalid attribute. */
- return EINVAL;
- }
-
- ibarrier = (struct sparc_pthread_barrier *) barrier;
-
- /* Initialize the individual fields. */
- ibarrier->b.lock = LLL_LOCK_INITIALIZER;
- ibarrier->b.left = count;
- ibarrier->b.init_count = count;
- ibarrier->b.curr_event = 0;
- ibarrier->left_lock = 0;
- ibarrier->pshared = (iattr && iattr->pshared == PTHREAD_PROCESS_SHARED);
-
- return 0;
-}
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c.jj 2007-06-04 08:42:06.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/lowlevellock.c 2007-07-30 22:18:37.000000000 +0200
@@ -25,20 +25,35 @@
void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
{
do
{
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
if (oldval != 0)
- lll_futex_wait (futex, 2);
+ lll_futex_wait (futex, 2, LLL_PRIVATE);
+ }
+ while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
+}
+
+#ifdef IS_IN_libpthread
+/* These functions don't get included in libc.so */
+
+void
+__lll_lock_wait (int *futex, int private)
+{
+ do
+ {
+ int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
+ if (oldval != 0)
+ lll_futex_wait (futex, 2, private);
}
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
}
int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -68,7 +83,7 @@ __lll_timedlock_wait (int *futex, const
/* Wait. */
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
if (oldval != 0)
- lll_futex_timed_wait (futex, 2, &rt);
+ lll_futex_timed_wait (futex, 2, &rt, private);
}
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
@@ -76,8 +91,6 @@ __lll_timedlock_wait (int *futex, const
}
-/* This function doesn't get included in libc.so */
-#ifdef IS_IN_libpthread
int
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
{
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c.jj 2006-01-04 00:58:44.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_init.c 2007-07-30 22:50:27.000000000 +0200
@@ -1 +0,0 @@
-#include "../../../../../../../pthread_barrier_init.c"
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c.jj 2007-01-11 00:19:18.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sparcv9/pthread_barrier_wait.c 2007-07-30 22:50:42.000000000 +0200
@@ -1 +1 @@
-#include "../../../../../../../pthread_barrier_wait.c"
+#include "../../pthread_barrier_wait.c"
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c.jj 2007-06-08 09:13:53.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c 2007-07-31 12:40:13.000000000 +0200
@@ -22,24 +22,18 @@
#include <lowlevellock.h>
#include <pthreadP.h>
-struct sparc_pthread_barrier
-{
- struct pthread_barrier b;
- unsigned char left_lock;
- unsigned char pshared;
-};
-
/* Wait on barrier. */
int
pthread_barrier_wait (barrier)
pthread_barrier_t *barrier;
{
- struct sparc_pthread_barrier *ibarrier
- = (struct sparc_pthread_barrier *) barrier;
+ union sparc_pthread_barrier *ibarrier
+ = (union sparc_pthread_barrier *) barrier;
int result = 0;
+ int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
/* Make sure we are alone. */
- lll_lock (ibarrier->b.lock);
+ lll_lock (ibarrier->b.lock, private);
/* One more arrival. */
--ibarrier->b.left;
@@ -52,9 +46,7 @@ pthread_barrier_wait (barrier)
++ibarrier->b.curr_event;
/* Wake up everybody. */
- lll_futex_wake (&ibarrier->b.curr_event, INT_MAX,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private);
/* This is the thread which finished the serialization. */
result = PTHREAD_BARRIER_SERIAL_THREAD;
@@ -66,13 +58,11 @@ pthread_barrier_wait (barrier)
unsigned int event = ibarrier->b.curr_event;
/* Before suspending, make the barrier available to others. */
- lll_unlock (ibarrier->b.lock);
+ lll_unlock (ibarrier->b.lock, private);
/* Wait for the event counter of the barrier to change. */
do
- lll_futex_wait (&ibarrier->b.curr_event, event,
- // XYZ check mutex flag
- LLL_SHARED);
+ lll_futex_wait (&ibarrier->b.curr_event, event, private);
while (event == ibarrier->b.curr_event);
}
@@ -80,11 +70,11 @@ pthread_barrier_wait (barrier)
unsigned int init_count = ibarrier->b.init_count;
/* If this was the last woken thread, unlock. */
- if (__atomic_is_v9 || ibarrier->pshared == 0)
+ if (__atomic_is_v9 || ibarrier->s.pshared == 0)
{
if (atomic_increment_val (&ibarrier->b.left) == init_count)
/* We are done. */
- lll_unlock (ibarrier->b.lock);
+ lll_unlock (ibarrier->b.lock, private);
}
else
{
@@ -92,12 +82,12 @@ pthread_barrier_wait (barrier)
/* Slightly more complicated. On pre-v9 CPUs, atomic_increment_val
is only atomic for threads within the same process, not for
multiple processes. */
- __sparc32_atomic_do_lock24 (&ibarrier->left_lock);
+ __sparc32_atomic_do_lock24 (&ibarrier->s.left_lock);
left = ++ibarrier->b.left;
- __sparc32_atomic_do_unlock24 (&ibarrier->left_lock);
+ __sparc32_atomic_do_unlock24 (&ibarrier->s.left_lock);
if (left == init_count)
/* We are done. */
- lll_unlock (ibarrier->b.lock);
+ lll_unlock (ibarrier->b.lock, private);
}
return result;
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c.jj 2007-07-30 22:30:29.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c 2007-07-30 22:46:38.000000000 +0200
@@ -0,0 +1,45 @@
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_barrier_destroy (barrier)
+ pthread_barrier_t *barrier;
+{
+ union sparc_pthread_barrier *ibarrier;
+ int result = EBUSY;
+
+ ibarrier = (union sparc_pthread_barrier *) barrier;
+
+ int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
+
+ lll_lock (ibarrier->b.lock, private);
+
+ if (__builtin_expect (ibarrier->b.left == ibarrier->b.init_count, 1))
+ /* The barrier is not used anymore. */
+ result = 0;
+ else
+ /* Still used, return with an error. */
+ lll_unlock (ibarrier->b.lock, private);
+
+ return result;
+}
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h.jj 2007-07-29 11:45:14.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h 2007-07-31 12:40:13.000000000 +0200
@@ -70,9 +70,6 @@
#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
#define lll_futex_wait(futexp, val, private) \
lll_futex_timed_wait (futexp, val, NULL, private)
@@ -110,12 +107,12 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
do \
{ \
int *__futexp = &(futexv); \
atomic_or (__futexp, FUTEX_OWNER_DIED); \
- lll_futex_wake (__futexp, 1, LLL_SHARED); \
+ lll_futex_wake (__futexp, 1, private); \
} \
while (0)
@@ -139,146 +136,132 @@
static inline int
__attribute__ ((always_inline))
-__lll_mutex_trylock (int *futex)
+__lll_trylock (int *futex)
{
return atomic_compare_and_exchange_val_24_acq (futex, 1, 0) != 0;
}
-#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define lll_trylock(futex) __lll_trylock (&(futex))
static inline int
__attribute__ ((always_inline))
-__lll_mutex_cond_trylock (int *futex)
+__lll_cond_trylock (int *futex)
{
return atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0;
}
-#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
+#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
static inline int
__attribute__ ((always_inline))
-__lll_robust_mutex_trylock (int *futex, int id)
+__lll_robust_trylock (int *futex, int id)
{
return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
}
-#define lll_robust_mutex_trylock(futex, id) \
- __lll_robust_mutex_trylock (&(futex), id)
+#define lll_robust_trylock(futex, id) \
+ __lll_robust_trylock (&(futex), id)
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
static inline void
__attribute__ ((always_inline))
-__lll_mutex_lock (int *futex)
+__lll_lock (int *futex, int private)
{
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
if (__builtin_expect (val != 0, 0))
- __lll_lock_wait (futex);
+ {
+ if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+ __lll_lock_wait_private (futex);
+ else
+ __lll_lock_wait (futex, private);
+ }
}
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
static inline int
__attribute__ ((always_inline))
-__lll_robust_mutex_lock (int *futex, int id)
+__lll_robust_lock (int *futex, int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_lock_wait (futex);
+ result = __lll_robust_lock_wait (futex, private);
return result;
}
-#define lll_robust_mutex_lock(futex, id) \
- __lll_robust_mutex_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), id, private)
static inline void
__attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
{
int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0);
if (__builtin_expect (val != 0, 0))
- __lll_lock_wait (futex);
+ __lll_lock_wait (futex, private);
}
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
-#define lll_robust_mutex_cond_lock(futex, id) \
- __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+#define lll_robust_cond_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
static inline int
__attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
{
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
int result = 0;
if (__builtin_expect (val != 0, 0))
- result = __lll_timedlock_wait (futex, abstime);
+ result = __lll_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_mutex_timedlock(futex, abstime) \
- __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+ __lll_timedlock (&(futex), abstime, private)
static inline int
__attribute__ ((always_inline))
-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
- int id)
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+ int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_timedlock_wait (futex, abstime);
+ result = __lll_robust_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
- __lll_robust_mutex_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+ __lll_robust_timedlock (&(futex), abstime, id, private)
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_24_rel (__futex, 0); \
if (__builtin_expect (__val > 1, 0)) \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_robust_mutex_unlock(lock) \
+#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
- }))
-
-#define lll_mutex_unlock_force(lock) \
- ((void) ({ \
- int *__futex = &(lock); \
- (void) atomic_exchange_24_rel (__futex, 0); \
- lll_futex_wake (__futex, 1, LLL_SHARED); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
(futex != 0)
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. We can use the lll_mutex_*. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-#define lll_trylock(futex) lll_mutex_trylock (futex)
-#define lll_lock(futex) lll_mutex_lock (futex)
-#define lll_unlock(futex) lll_mutex_unlock (futex)
-#define lll_islocked(futex) lll_mutex_islocked (futex)
-
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -303,26 +286,4 @@ extern int __lll_timedwait_tid (int *, c
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c.jj 2007-07-30 22:49:29.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_wait.c 2007-07-30 22:50:08.000000000 +0200
@@ -0,0 +1,78 @@
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthreadP.h>
+
+/* Wait on barrier. */
+int
+pthread_barrier_wait (barrier)
+ pthread_barrier_t *barrier;
+{
+ union sparc_pthread_barrier *ibarrier
+ = (union sparc_pthread_barrier *) barrier;
+ int result = 0;
+ int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
+
+ /* Make sure we are alone. */
+ lll_lock (ibarrier->b.lock, private);
+
+ /* One more arrival. */
+ --ibarrier->b.left;
+
+ /* Are these all? */
+ if (ibarrier->b.left == 0)
+ {
+ /* Yes. Increment the event counter to avoid invalid wake-ups and
+ tell the current waiters that it is their turn. */
+ ++ibarrier->b.curr_event;
+
+ /* Wake up everybody. */
+ lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private);
+
+ /* This is the thread which finished the serialization. */
+ result = PTHREAD_BARRIER_SERIAL_THREAD;
+ }
+ else
+ {
+ /* The number of the event we are waiting for. The barrier's event
+ number must be bumped before we continue. */
+ unsigned int event = ibarrier->b.curr_event;
+
+ /* Before suspending, make the barrier available to others. */
+ lll_unlock (ibarrier->b.lock, private);
+
+ /* Wait for the event counter of the barrier to change. */
+ do
+ lll_futex_wait (&ibarrier->b.curr_event, event, private);
+ while (event == ibarrier->b.curr_event);
+ }
+
+ /* Make sure the init_count is stored locally or in a register. */
+ unsigned int init_count = ibarrier->b.init_count;
+
+ /* If this was the last woken thread, unlock. */
+ if (atomic_increment_val (&ibarrier->b.left) == init_count)
+ /* We are done. */
+ lll_unlock (ibarrier->b.lock, private);
+
+ return result;
+}
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h.jj 2007-07-30 22:44:59.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h 2007-07-30 22:45:46.000000000 +0200
@@ -0,0 +1,18 @@
+#ifndef _INTERNALTYPES_H
+#include "../internaltypes.h"
+
+union sparc_pthread_barrier
+{
+ struct pthread_barrier b;
+ struct sparc_pthread_barrier_s
+ {
+ unsigned int curr_event;
+ int lock;
+ unsigned int left;
+ unsigned int init_count;
+ unsigned char left_lock;
+ unsigned char pshared;
+ } s;
+};
+
+#endif
--- libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c.jj 2006-02-15 18:01:17.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,8 +1,8 @@
#include <pthreadP.h>
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock (mutex)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock (mutex)
-#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_cond_lock (mutex, id)
+#define LLL_MUTEX_LOCK(mutex) lll_cond_lock (mutex, /* XYZ */ LLL_SHARED)
+#define LLL_MUTEX_TRYLOCK(mutex) lll_cond_trylock (mutex)
+#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_cond_lock (mutex, id, /* XYZ */ LLL_SHARED)
#define __pthread_mutex_lock __pthread_mutex_cond_lock
#define NO_INCR
--- libc/nptl/sysdeps/unix/sysv/linux/sem_wait.c.jj 2007-06-08 09:13:52.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/sem_wait.c 2007-07-29 11:48:55.000000000 +0200
@@ -57,8 +57,7 @@ __new_sem_wait (sem_t *sem)
int oldtype = __pthread_enable_asynccancel ();
err = lll_futex_wait (&isem->value, 0,
- // XYZ check mutex flag
- LLL_SHARED);
+ isem->private ^ FUTEX_PRIVATE_FLAG);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (oldtype);
--- libc/nptl/tpp.c.jj 2006-08-15 01:02:29.000000000 +0200
+++ libc/nptl/tpp.c 2007-07-29 11:48:55.000000000 +0200
@@ -1,5 +1,5 @@
/* Thread Priority Protect helpers.
- Copyright (C) 2006 Free Software Foundation, Inc.
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
@@ -93,7 +93,7 @@ __pthread_tpp_change_priority (int previ
if (priomax == newpriomax)
return 0;
- lll_lock (self->lock);
+ lll_lock (self->lock, LLL_PRIVATE);
tpp->priomax = newpriomax;
@@ -129,7 +129,7 @@ __pthread_tpp_change_priority (int previ
}
}
- lll_unlock (self->lock);
+ lll_unlock (self->lock, LLL_PRIVATE);
return result;
}
@@ -144,7 +144,7 @@ __pthread_current_priority (void)
int result = 0;
- lll_lock (self->lock);
+ lll_lock (self->lock, LLL_PRIVATE);
if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
{
@@ -166,7 +166,7 @@ __pthread_current_priority (void)
if (result != -1)
result = self->schedparam.sched_priority;
- lll_unlock (self->lock);
+ lll_unlock (self->lock, LLL_PRIVATE);
return result;
}
Jakub
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] lowlevellock.h cleanups, LLL_SHARED vs. LLL_PRIVATE on lll locks
2007-07-31 11:11 ` [PATCH] " Jakub Jelinek
@ 2007-08-01 23:00 ` Kaz Kojima
2007-08-03 15:48 ` Ulrich Drepper
0 siblings, 1 reply; 5+ messages in thread
From: Kaz Kojima @ 2007-08-01 23:00 UTC (permalink / raw)
To: libc-hacker
Jakub Jelinek <jakub@redhat.com> wrote:
> Here is an updated patch against CVS trunk plus
> http://sources.redhat.com/ml/libc-hacker/2007-07/msg00050.html
> which changes all in-tree arches but SH (will leave that to Kaz) and
> sparc sem_* stuff (will do when I find time for it).
The attached patch is the SH portion for this change and
a few other code cleanups.
Regards,
kaz
--
2007-08-01 Kaz Kojima <kkojima@rr.iij4u.or.jp>
* sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S: Remove
definitions for private futexes.
* sysdeps/unix/sysv/linux/sh/lowlevellock.S: Include
kernel-features.h and lowlevellock.h. Use private futexes if
they are available.
(__lll_lock_wait_private, __lll_unlock_wake_private): New.
(__lll_mutex_lock_wait): Rename to
(__lll_lock_wait): ... this. Don't compile in for libc.so.
(__lll_mutex_timedlock_wait): Rename to ...
(__lll_timedlock_wait): ... this. Use __NR_gettimeofday.
Don't compile in for libc.so.
(__lll_mutex_unlock_wake): Rename to ...
(__lll_unlock_wake): ... this. Don't compile in for libc.so.
(__lll_timedwait_tid): Use __NR_gettimeofday.
* sysdeps/unix/sysv/linux/sh/lowlevellock.h: Allow including
the header from assembler. Renamed all lll_mutex_* resp.
lll_robust_mutex_* macros to lll_* resp. lll_robust_*.
Renamed all LLL_MUTEX_LOCK_* macros to LLL_LOCK_*.
(FUTEX_CMP_REQUEUE, FUTEX_WAKE_OP, FUTEX_OP_CLEAR_WAKE_IF_GT_ONE):
Define.
(__lll_lock_wait_private): Add prototype.
(__lll_lock_wait, __lll_timedlock_wait, __lll_robust_lock_wait,
__lll_robust_timedlock_wait, __lll_unlock_wake_private,
__lll_unlock_wake): Likewise.
(lll_lock): Add private argument. Call __lll_lock_wait_private
if private is constant LLL_PRIVATE.
(lll_robust_lock, lll_cond_lock, lll_robust_cond_lock,
lll_timedlock, lll_robust_timedlock): Add private argument.
(lll_unlock): Add private argument. Call __lll_unlock_wake_private
if private is constant LLL_PRIVATE.
(lll_robust_unlock, lll_robust_dead): Add private argument.
(lll_lock_t): Remove.
(__lll_cond_wait, __lll_cond_timedwait, __lll_cond_wake,
__lll_cond_broadcast, lll_cond_wait, lll_cond_timedwait,
lll_cond_wake, lll_cond_broadcast): Remove.
* sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S: Include
kernel-features.h and lowlevellock.h.
(SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Remove.
(LOAD_FUTEX_WAIT): Define.
(__lll_robust_mutex_lock_wait): Rename to ...
(__lll_robust_lock_wait): ... this. Add private argument.
Use LOAD_FUTEX_WAIT macro.
(__lll_robust_mutex_timedlock_wait): Rename to ...
(__lll_robust_timedlock_wait): ... this. Add private argument.
Use __NR_gettimeofday. Use LOAD_FUTEX_WAIT macro.
* sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S: Include
lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Remove.
(pthread_barrier_wait): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*.
* sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S: Include
lowlevellock.h and pthread-errnos.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE,
FUTEX_CMP_REQUEUE, EINVAL): Remove.
(__pthread_cond_broadcast): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*.
* sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S: Include
lowlevellock.h and pthread-errnos.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_REQUEUE, EINVAL): Remove.
(__pthread_cond_signal): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*.
* sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S: Include
lowlevellock.h.
(SYS_futex, SYS_gettimeofday, FUTEX_WAIT, FUTEX_WAKE): Remove.
(__pthread_cond_timedwait): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*. Use __NR_gettimeofday.
(__condvar_tw_cleanup): Likewise.
* sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S: Include
lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE): Remove.
(__pthread_cond_wait): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*.
( __condvar_w_cleanup): Likewise.
* sysdeps/unix/sysv/linux/sh/pthread_once.S: Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
* sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S: Include
lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
(__pthread_rwlock_rdlock): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*.
* sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S: Include
lowlevellock.h.
(SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE,
FUTEX_PRIVATE_FLAG): Remove.
(pthread_rwlock_timedrdlock): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*. Use __NR_gettimeofday.
* sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S: Include
lowlevellock.h.
(SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE,
FUTEX_PRIVATE_FLAG): Remove.
(pthread_rwlock_timedwrlock): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*. Use __NR_gettimeofday.
* sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S: Include
lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
(__pthread_rwlock_unlock): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*.
* sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S: Include
lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
(__pthread_rwlock_wrlock): Use __lll_{lock,unlock}_* instead of
__lll_mutex_{lock,unlock}_*.
* sysdeps/unix/sysv/linux/sh/sem_post.S: Include lowlevellock.h.
(SYS_futex, FUTEX_WAIT, FUTEX_WAKE, FUTEX_PRIVATE_FLAG): Remove.
(__new_sem_post): Use standard initial exec code sequences.
* sysdeps/unix/sysv/linux/sh/sem_timedwait.S: Include
lowlevellock.h.
(SYS_gettimeofday, SYS_futex, FUTEX_WAIT, FUTEX_WAKE,
FUTEX_PRIVATE_FLAG): Remove.
(sem_timedwait): Use __NR_gettimeofday. Use standard initial
exec code sequences.
* sysdeps/unix/sysv/linux/sh/sem_trywait.S: Include lowlevellock.h.
(__new_sem_trywait): Use standard initial exec code sequences.
* sysdeps/unix/sysv/linux/sh/sem_wait.S: Include lowlevellock.h.
(__new_sem_wait): Use standard initial exec code sequences.
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S 2007-07-31 21:24:55.000000000 +0900
@@ -16,32 +16,4 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-#include <kernel-features.h>
-
-/* All locks in libc are private. Use the kernel feature if possible. */
-#define FUTEX_PRIVATE_FLAG 128
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define FUTEX_WAIT (0 | FUTEX_PRIVATE_FLAG)
-# define FUTEX_WAKE (1 | FUTEX_PRIVATE_FLAG)
-#else
-# define LOAD_FUTEX_WAIT(reg,tmp) \
- stc gbr, tmp ; \
- mov.w 99f, reg ; \
- add reg, tmp ; \
- bra 98f ; \
- mov.l @tmp, reg ; \
-99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
-98:
-
-# define LOAD_FUTEX_WAKE(reg,tmp) \
- stc gbr, tmp ; \
- mov.w 99f, reg ; \
- add reg, tmp ; \
- mov.l @tmp, reg ; \
- bra 98f ; \
- mov #FUTEX_WAKE, tmp ; \
-99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
-98: or tmp, reg
-#endif
-
#include "lowlevellock.S"
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S 2007-08-01 13:28:04.000000000 +0900
@@ -18,45 +18,112 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <kernel-features.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
.text
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#ifndef FUTEX_WAIT
-# define FUTEX_WAIT 0
-# define FUTEX_WAKE 1
-#endif
-
-#ifndef LOAD_FUTEX_WAIT
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+ mov #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg; \
+ extu.b reg, reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \
+ mov #(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg; \
+ extu.b reg, reg
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ mov #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), tmp; \
+ extu.b tmp, tmp; \
+ xor tmp, reg
+# define LOAD_FUTEX_WAKE(reg,tmp,tmp2) \
+ mov #(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), tmp; \
+ extu.b tmp, tmp; \
+ xor tmp, reg
+#else
# if FUTEX_WAIT == 0
-# define LOAD_FUTEX_WAIT(reg,tmp) \
- xor reg, reg
+# define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, reg ; \
+ add reg, tmp ; \
+ bra 98f ; \
+ mov.l @tmp, reg ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:
# else
-# define LOAD_FUTEX_WAIT(reg,tmp) \
- mov #FUTEX_WAIT, reg; \
- extu.b reg, reg
+# define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, reg ; \
+ add reg, tmp ; \
+ mov.l @tmp, reg ; \
+ bra 98f ; \
+ mov #FUTEX_WAIT, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: or tmp, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, reg ; \
+ add reg, tmp ; \
+ mov.l @tmp, reg ; \
+ bra 98f ; \
+ mov #FUTEX_WAKE, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: or tmp, reg
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg
+# else
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg ; \
+ mov #FUTEX_WAIT, tmp ; \
+ or tmp, reg
# endif
# define LOAD_FUTEX_WAKE(reg,tmp) \
- mov #FUTEX_WAKE, reg; \
- extu.b reg, reg
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg ; \
+ mov #FUTEX_WAKE, tmp ; \
+ or tmp, reg
#endif
-
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 5
cfi_startproc
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
mov.l r8, @-r15
cfi_adjust_cfa_offset(4)
cfi_rel_offset (r8, 0)
mov r4, r6
mov r5, r8
mov #0, r7 /* No timeout. */
- LOAD_FUTEX_WAIT (r5, r0)
+ LOAD_PRIVATE_FUTEX_WAIT (r5, r0, r1)
mov #2, r4
cmp/eq r4, r6
@@ -79,22 +146,67 @@ __lll_mutex_lock_wait:
ret
mov r2, r0
cfi_endproc
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
.align 5
cfi_startproc
-__lll_mutex_timedlock_wait:
+__lll_lock_wait:
+ mov.l r9, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r9, 0)
+ mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
+ mov r6, r9
+ mov r4, r6
+ mov r5, r8
+ mov #0, r7 /* No timeout. */
+ mov r9, r5
+ LOAD_FUTEX_WAIT (r5, r0, r1)
+
+ mov #2, r4
+ cmp/eq r4, r6
+ bf 2f
+
+1:
+ mov r8, r4
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x14
+ SYSCALL_INST_PAD
+
+2:
+ mov #2, r6
+ XCHG (r6, @r8, r2)
+ tst r2, r2
+ bf 1b
+
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ ret
+ mov r2, r0
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
+ .align 5
+ cfi_startproc
+__lll_timedlock_wait:
/* Check for a valid timeout value. */
mov.l @(4,r6), r1
mov.l .L1g, r0
cmp/hs r0, r1
bt 3f
+ mov.l r11, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r11, 0)
mov.l r10, @-r15
cfi_adjust_cfa_offset(4)
cfi_rel_offset (r10, 0)
@@ -104,6 +216,7 @@ __lll_mutex_timedlock_wait:
mov.l r8, @-r15
cfi_adjust_cfa_offset(4)
cfi_rel_offset (r8, 0)
+ mov r7, r11
mov r4, r10
mov r6, r9
mov r5, r8
@@ -116,7 +229,7 @@ __lll_mutex_timedlock_wait:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -149,7 +262,8 @@ __lll_mutex_timedlock_wait:
bt 8f
mov r8, r4
- LOAD_FUTEX_WAIT (r5, r0)
+ mov r11, r5
+ LOAD_FUTEX_WAIT (r5, r0, r1)
mov r10, r6
mov r15, r7
mov #SYS_futex, r3
@@ -169,8 +283,9 @@ __lll_mutex_timedlock_wait:
add #8, r15
mov.l @r15+, r8
mov.l @r15+, r9
+ mov.l @r15+, r10
rts
- mov.l @r15+, r10
+ mov.l @r15+, r11
7:
/* Check whether the time expired. */
mov #-ETIMEDOUT, r1
@@ -198,17 +313,16 @@ __lll_mutex_timedlock_wait:
.L1g:
.long 1000000000
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
-
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
.align 5
cfi_startproc
-__lll_mutex_unlock_wake:
- LOAD_FUTEX_WAKE (r5, r0)
+__lll_unlock_wake_private:
+ LOAD_PRIVATE_FUTEX_WAKE (r5, r0, r1)
mov #1, r6 /* Wake one thread. */
mov #0, r7
mov.l r7, @r4 /* Stores 0. */
@@ -219,10 +333,28 @@ __lll_mutex_unlock_wake:
rts
nop
cfi_endproc
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
-
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
+ .align 5
+ cfi_startproc
+__lll_unlock_wake:
+ LOAD_FUTEX_WAKE (r5, r0, r1)
+ mov #1, r6 /* Wake one thread. */
+ mov #0, r7
+ mov.l r7, @r4 /* Stores 0. */
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x14
+ SYSCALL_INST_PAD
+ rts
+ nop
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
+
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
@@ -246,7 +378,7 @@ __lll_timedwait_tid:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h 2007-07-31 20:25:06.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h 2007-08-01 15:41:56.000000000 +0900
@@ -19,19 +19,24 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
+#ifndef __ASSEMBLER__
#include <time.h>
#include <sys/param.h>
#include <bits/pthreadtypes.h>
#include <kernel-features.h>
+#endif
#define SYS_futex 240
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
/* Values for 'private' parameter of locking macros. Yes, the
definition seems to be backwards. But it is not. The bit will be
@@ -64,20 +69,30 @@
# endif
#endif
+#ifndef __ASSEMBLER__
/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
-
-extern int __lll_mutex_lock_wait (int val, int *__futex) attribute_hidden;
-extern int __lll_mutex_timedlock_wait (int val, int *__futex,
- const struct timespec *abstime)
- attribute_hidden;
-extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
+extern int __lll_lock_wait_private (int val, int *__futex)
+ attribute_hidden;
+extern int __lll_lock_wait (int val, int *__futex, int private)
+ attribute_hidden;
+extern int __lll_timedlock_wait (int val, int *__futex,
+ const struct timespec *abstime, int private)
+ attribute_hidden;
+extern int __lll_robust_lock_wait (int val, int *__futex, int private)
+ attribute_hidden;
+extern int __lll_robust_timedlock_wait (int val, int *__futex,
+ const struct timespec *abstime,
+ int private)
+ attribute_hidden;
+extern int __lll_unlock_wake_private (int *__futex) attribute_hidden;
+extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
-#define lll_mutex_trylock(futex) \
+#define lll_trylock(futex) \
({ unsigned char __result; \
__asm __volatile ("\
.align 2\n\
@@ -94,12 +109,12 @@ extern int __lll_mutex_unlock_wake (int
negc %0,%0"\
: "=r" (__result) \
: "r" (&(futex)), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER) \
+ "r" (LLL_LOCK_INITIALIZER_LOCKED), \
+ "r" (LLL_LOCK_INITIALIZER) \
: "r0", "r1", "r2", "t", "memory"); \
__result; })
-#define lll_robust_mutex_trylock(futex, id) \
+#define lll_robust_trylock(futex, id) \
({ unsigned char __result; \
__asm __volatile ("\
.align 2\n\
@@ -117,11 +132,11 @@ extern int __lll_mutex_unlock_wake (int
: "=r" (__result) \
: "r" (&(futex)), \
"r" (id), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER) \
+ "r" (LLL_LOCK_INITIALIZER) \
: "r0", "r1", "r2", "t", "memory"); \
__result; })
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
({ unsigned char __result; \
__asm __volatile ("\
.align 2\n\
@@ -138,13 +153,13 @@ extern int __lll_mutex_unlock_wake (int
negc %0,%0"\
: "=r" (__result) \
: "r" (&(futex)), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER) \
+ "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "r" (LLL_LOCK_INITIALIZER) \
: "r0", "r1", "r2", "t", "memory"); \
__result; })
-#define lll_mutex_lock(futex) \
- (void) ({ int __result, val, *__futex = &(futex); \
+#define lll_lock(futex, private) \
+ (void) ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
@@ -159,10 +174,17 @@ extern int __lll_mutex_unlock_wake (int
: "=&r" (__result) : "r" (1), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __lll_mutex_lock_wait (__result, __futex); })
+ { \
+ if (__builtin_constant_p (private) \
+ && (private) == LLL_PRIVATE) \
+ __lll_lock_wait_private (__result, __futex); \
+ else \
+ __lll_lock_wait (__result, __futex, (private)); \
+ } \
+ })
-#define lll_robust_mutex_lock(futex, id) \
- ({ int __result, val, *__futex = &(futex); \
+#define lll_robust_lock(futex, id, private) \
+ ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
@@ -177,13 +199,13 @@ extern int __lll_mutex_unlock_wake (int
: "=&r" (__result) : "r" (id), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __result = __lll_robust_mutex_lock_wait (__result, __futex); \
+ __result = __lll_robust_lock_wait (__result, __futex, private); \
__result; })
/* Special version of lll_mutex_lock which causes the unlock function to
always wakeup waiters. */
-#define lll_mutex_cond_lock(futex) \
- (void) ({ int __result, val, *__futex = &(futex); \
+#define lll_cond_lock(futex, private) \
+ (void) ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
@@ -198,10 +220,10 @@ extern int __lll_mutex_unlock_wake (int
: "=&r" (__result) : "r" (2), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __lll_mutex_lock_wait (__result, __futex); })
+ __lll_lock_wait (__result, __futex, private); })
-#define lll_robust_mutex_cond_lock(futex, id) \
- ({ int __result, val, *__futex = &(futex); \
+#define lll_robust_cond_lock(futex, id, private) \
+ ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
@@ -216,11 +238,11 @@ extern int __lll_mutex_unlock_wake (int
: "=&r" (__result) : "r" (id | FUTEX_WAITERS), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __result = __lll_robust_mutex_lock_wait (__result, __futex); \
+ __result = __lll_robust_lock_wait (__result, __futex, private); \
__result; })
-#define lll_mutex_timedlock(futex, timeout) \
- ({ int __result, val, *__futex = &(futex); \
+#define lll_timedlock(futex, timeout, private) \
+ ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
@@ -235,11 +257,11 @@ extern int __lll_mutex_unlock_wake (int
: "=&r" (__result) : "r" (1), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __result = __lll_mutex_timedlock_wait (__result, __futex, timeout); \
+ __result = __lll_timedlock_wait (__result, __futex, timeout, private); \
__result; })
-#define lll_robust_mutex_timedlock(futex, timeout, id) \
- ({ int __result, val, *__futex = &(futex); \
+#define lll_robust_timedlock(futex, timeout, id, private) \
+ ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
mova 1f,r0\n\
@@ -254,11 +276,11 @@ extern int __lll_mutex_unlock_wake (int
: "=&r" (__result) : "r" (id), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __result = __lll_robust_mutex_timedlock_wait (__result, __futex, \
- timeout); \
+ __result = __lll_robust_timedlock_wait (__result, __futex, \
+ timeout, private); \
__result; })
-#define lll_mutex_unlock(futex) \
+#define lll_unlock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
@@ -272,9 +294,16 @@ extern int __lll_mutex_unlock_wake (int
: "=&r" (__result) : "r" (__futex) \
: "r0", "r1", "memory"); \
if (__result) \
- __lll_mutex_unlock_wake (__futex); })
+ { \
+ if (__builtin_constant_p (private) \
+ && (private) == LLL_PRIVATE) \
+ __lll_unlock_wake_private (__futex); \
+ else \
+ __lll_unlock_wake (__futex, (private)); \
+ } \
+ })
-#define lll_robust_mutex_unlock(futex) \
+#define lll_robust_unlock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
@@ -288,9 +317,9 @@ extern int __lll_mutex_unlock_wake (int
: "=&r" (__result) : "r" (__futex), "r" (FUTEX_WAITERS) \
: "r0", "r1", "memory"); \
if (__result) \
- __lll_mutex_unlock_wake (__futex); })
+ __lll_unlock_wake (__futex, private); })
-#define lll_robust_mutex_dead(futex) \
+#define lll_robust_dead(futex, private) \
(void) ({ int __ignore, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
@@ -303,22 +332,7 @@ extern int __lll_mutex_unlock_wake (int
1: mov r1,r15"\
: "=&r" (__ignore) : "r" (__futex), "r" (FUTEX_OWNER_DIED) \
: "r0", "r1", "memory"); \
- lll_futex_wake (__futex, 1, LLL_SHARED); })
-
-#define lll_mutex_islocked(futex) \
- (futex != 0)
-
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
-/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
-
+ lll_futex_wake (__futex, 1, private); })
# ifdef NEED_SYSCALL_INST_PAD
# define SYSCALL_WITH_INST_PAD "\
@@ -367,25 +381,14 @@ typedef int lll_lock_t;
} while (0)
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- 2 - taken by more users */
-
-#define lll_trylock(futex) lll_mutex_trylock (futex)
-#define lll_lock(futex) lll_mutex_lock (futex)
-#define lll_unlock(futex) lll_mutex_unlock (futex)
-
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
afterwards. */
-extern int __lll_wait_tid (int *tid) attribute_hidden;
#define lll_wait_tid(tid) \
do { \
__typeof (tid) *__tid = &(tid); \
@@ -407,24 +410,6 @@ extern int __lll_timedwait_tid (int *tid
} \
__result; })
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
+#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock.h */
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S 2006-02-18 00:36:10.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S 2007-08-01 14:29:53.000000000 +0900
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,31 +19,64 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <lowlevellock.h>
#include <lowlevelrobustlock.h>
+#include <kernel-features.h>
#include "lowlevel-atomic.h"
.text
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
#define FUTEX_WAITERS 0x80000000
#define FUTEX_OWNER_DIED 0x40000000
-
- .globl __lll_robust_mutex_lock_wait
- .type __lll_robust_mutex_lock_wait,@function
- .hidden __lll_robust_mutex_lock_wait
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ mov #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), tmp; \
+ extu.b tmp, tmp; \
+ xor tmp, reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg
+# else
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg ; \
+ mov #FUTEX_WAIT, tmp ; \
+ or tmp, reg
+# endif
+#endif
+
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
.align 5
cfi_startproc
-__lll_robust_mutex_lock_wait:
+__lll_robust_lock_wait:
mov.l r8, @-r15
cfi_adjust_cfa_offset(4)
cfi_rel_offset (r8, 0)
mov r5, r8
mov #0, r7 /* No timeout. */
- mov #FUTEX_WAIT, r5
+ mov r6, r5
+ LOAD_FUTEX_WAIT (r5, r0, r1)
4:
mov r4, r6
@@ -90,21 +124,24 @@ __lll_robust_mutex_lock_wait:
.long FUTEX_WAITERS
.Ltidoff:
.word TID - TLS_PRE_TCB_SIZE
- .size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
- .globl __lll_robust_mutex_timedlock_wait
- .type __lll_robust_mutex_timedlock_wait,@function
- .hidden __lll_robust_mutex_timedlock_wait
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
.align 5
cfi_startproc
-__lll_robust_mutex_timedlock_wait:
+__lll_robust_timedlock_wait:
/* Check for a valid timeout value. */
mov.l @(4,r6), r1
mov.l .L1g, r0
cmp/hs r0, r1
bt 3f
+ mov.l r11, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r11, 0)
mov.l r10, @-r15
cfi_adjust_cfa_offset(4)
cfi_rel_offset (r10, 0)
@@ -114,6 +151,7 @@ __lll_robust_mutex_timedlock_wait:
mov.l r8, @-r15
cfi_adjust_cfa_offset(4)
cfi_rel_offset (r8, 0)
+ mov r7, r11
mov r4, r10
mov r6, r9
mov r5, r8
@@ -126,7 +164,7 @@ __lll_robust_mutex_timedlock_wait:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -167,7 +205,8 @@ __lll_robust_mutex_timedlock_wait:
2:
mov r8, r4
- mov #FUTEX_WAIT, r5
+ mov r11, r5
+ LOAD_FUTEX_WAIT (r5, r0, r1)
mov r10, r6
mov r15, r7
mov #SYS_futex, r3
@@ -196,8 +235,9 @@ __lll_robust_mutex_timedlock_wait:
add #8, r15
mov.l @r15+, r8
mov.l @r15+, r9
+ mov.l @r15+, r10
rts
- mov.l @r15+, r10
+ mov.l @r15+, r11
7:
/* Check whether the time expired. */
@@ -221,4 +261,4 @@ __lll_robust_mutex_timedlock_wait:
.word TID - TLS_PRE_TCB_SIZE
.L1k:
.word 1000
- .size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S 2007-08-01 09:38:20.000000000 +0900
@@ -17,14 +17,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelbarrier.h>
#include "lowlevel-atomic.h"
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
.text
.globl pthread_barrier_wait
@@ -152,6 +148,10 @@ pthread_barrier_wait:
mov.l @r15+, r9
1:
+ mov.l @(PRIVATE,r8), r6
+ mov #LLL_SHARED, r0
+ extu.b r0, r0
+ xor r0, r6
mov r2, r4
mov r8, r5
mov.l .Lwait0, r1
@@ -162,6 +162,10 @@ pthread_barrier_wait:
nop
4:
+ mov.l @(PRIVATE,r8), r5
+ mov #LLL_SHARED, r0
+ extu.b r0, r0
+ xor r0, r5
mov r8, r4
mov.l .Lwake0, r1
bsrf r1
@@ -172,6 +176,10 @@ pthread_barrier_wait:
6:
mov r6, r9
+ mov.l @(PRIVATE,r8), r5
+ mov #LLL_SHARED, r0
+ extu.b r0, r0
+ xor r0, r5
mov r8, r4
mov.l .Lwake1, r1
bsrf r1
@@ -182,6 +190,10 @@ pthread_barrier_wait:
9:
mov r6, r9
+ mov.l @(PRIVATE,r8), r5
+ mov #LLL_SHARED, r0
+ extu.b r0, r0
+ xor r0, r5
mov r8, r4
mov.l .Lwake2, r1
bsrf r1
@@ -194,11 +206,11 @@ pthread_barrier_wait:
.Lall:
.long 0x7fffffff
.Lwait0:
- .long __lll_mutex_lock_wait-.Lwait0b
+ .long __lll_lock_wait-.Lwait0b
.Lwake0:
- .long __lll_mutex_unlock_wake-.Lwake0b
+ .long __lll_unlock_wake-.Lwake0b
.Lwake1:
- .long __lll_mutex_unlock_wake-.Lwake1b
+ .long __lll_unlock_wake-.Lwake1b
.Lwake2:
- .long __lll_mutex_unlock_wake-.Lwake2b
+ .long __lll_unlock_wake-.Lwake2b
.size pthread_barrier_wait,.-pthread_barrier_wait
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S 2006-09-18 06:49:04.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S 2007-08-01 09:49:37.000000000 +0900
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,19 +18,13 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <kernel-features.h>
#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
#include "lowlevel-atomic.h"
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_CMP_REQUEUE 4
-
-#define EINVAL 22
-
.text
/* int pthread_cond_broadcast (pthread_cond_t *cond) */
@@ -162,10 +156,12 @@ __pthread_cond_broadcast:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait5, r1
+ mov #LLL_SHARED, r6
+ extu.b r6, r6
+ mov.l .Lwait5, r1
bsrf r1
mov r2, r4
-.Lmwait5b:
+.Lwait5b:
bra 2b
nop
@@ -175,10 +171,11 @@ __pthread_cond_broadcast:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake5, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lwake5, r1
bsrf r1
- nop
-.Lmwake5b:
+ extu.b r5, r5
+.Lwake5b:
bra 6b
nop
@@ -188,10 +185,11 @@ __pthread_cond_broadcast:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake6, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lwake6, r1
bsrf r1
- nop
-.Lmwake6b:
+ extu.b r5, r5
+.Lwake6b:
bra 8b
nop
@@ -208,12 +206,12 @@ __pthread_cond_broadcast:
nop
.align 2
-.Lmwait5:
- .long __lll_mutex_lock_wait-.Lmwait5b
-.Lmwake5:
- .long __lll_mutex_unlock_wake-.Lmwake5b
-.Lmwake6:
- .long __lll_mutex_unlock_wake-.Lmwake6b
+.Lwait5:
+ .long __lll_lock_wait-.Lwait5b
+.Lwake5:
+ .long __lll_unlock_wake-.Lwake5b
+.Lwake6:
+ .long __lll_unlock_wake-.Lwake6b
.size __pthread_cond_broadcast, .-__pthread_cond_broadcast
versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast,
GLIBC_2_3_2)
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S 2004-06-18 08:55:56.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S 2007-08-01 09:52:30.000000000 +0900
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,17 +18,12 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <kernel-features.h>
+#include <pthread-errnos.h>
#include "lowlevel-atomic.h"
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-
-#define EINVAL 22
-
.text
/* int pthread_cond_signal (pthread_cond_t *cond) */
@@ -108,10 +103,12 @@ __pthread_cond_signal:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait4, r1
+ mov #LLL_SHARED, r6
+ extu.b r6, r6
+ mov.l .Lwait4, r1
bsrf r1
mov r2, r4
-.Lmwait4b:
+.Lwait4b:
bra 2b
nop
@@ -121,18 +118,19 @@ __pthread_cond_signal:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake4, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lwake4, r1
bsrf r1
- nop
-.Lmwake4b:
+ extu.b r5, r5
+.Lwake4b:
bra 6b
nop
.align 2
-.Lmwait4:
- .long __lll_mutex_lock_wait-.Lmwait4b
-.Lmwake4:
- .long __lll_mutex_unlock_wake-.Lmwake4b
+.Lwait4:
+ .long __lll_lock_wait-.Lwait4b
+.Lwake4:
+ .long __lll_unlock_wake-.Lwake4b
.size __pthread_cond_signal, .-__pthread_cond_signal
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
GLIBC_2_3_2)
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S 2007-08-01 10:00:23.000000000 +0900
@@ -18,16 +18,11 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <pthread-errnos.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
.text
/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
@@ -164,7 +159,7 @@ __pthread_cond_timedwait:
mov r15, r4
add #16, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -182,7 +177,7 @@ __pthread_cond_timedwait:
mov r15, r4
add #16, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -403,10 +398,12 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait2, r1
+ mov #LLL_SHARED, r6
+ extu.b r6, r6
+ mov.l .Lwait2, r1
bsrf r1
mov r2, r4
-.Lmwait2b:
+.Lwait2b:
bra 2b
nop
@@ -416,10 +413,11 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake2, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lmwait2, r1
bsrf r1
- nop
-.Lmwake2b:
+ extu.b r5, r5
+.Lmwait2b:
bra 4b
nop
@@ -429,10 +427,12 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait3, r1
+ mov #LLL_SHARED, r6
+ extu.b r6, r6
+ mov.l .Lwait3, r1
bsrf r1
mov r2, r4
-.Lmwait3b:
+.Lwait3b:
bra 6b
nop
@@ -442,10 +442,11 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake3, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lmwait3, r1
bsrf r1
- nop
-.Lmwake3b:
+ extu.b r5, r5
+.Lmwait3b:
bra 11b
nop
@@ -464,25 +465,26 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake4, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lmwait4, r1
bsrf r1
- nop
-.Lmwake4b:
+ extu.b r5, r5
+.Lmwait4b:
17:
bra 18b
mov.l @(24,r15), r0
.align 2
+.Lwait2:
+ .long __lll_lock_wait-.Lwait2b
.Lmwait2:
- .long __lll_mutex_lock_wait-.Lmwait2b
-.Lmwake2:
- .long __lll_mutex_unlock_wake-.Lmwake2b
+ .long __lll_unlock_wake-.Lmwait2b
+.Lwait3:
+ .long __lll_lock_wait-.Lwait3b
.Lmwait3:
- .long __lll_mutex_lock_wait-.Lmwait3b
-.Lmwake3:
- .long __lll_mutex_unlock_wake-.Lmwake3b
-.Lmwake4:
- .long __lll_mutex_unlock_wake-.Lmwake4b
+ .long __lll_unlock_wake-.Lmwait3b
+.Lmwait4:
+ .long __lll_unlock_wake-.Lmwait4b
.size __pthread_cond_timedwait, .-__pthread_cond_timedwait
versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
GLIBC_2_3_2)
@@ -507,10 +509,12 @@ __condvar_tw_cleanup:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait5, r1
+ mov #LLL_SHARED, r6
+ extu.b r6, r6
+ mov.l .Lwait5, r1
bsrf r1
mov r2, r4
-.Lmwait5b:
+.Lwait5b:
1:
mov.l @(broadcast_seq,r8), r0
@@ -600,10 +604,11 @@ __condvar_tw_cleanup:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake5, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lmwait5, r1
bsrf r1
- nop
-.Lmwake5b:
+ extu.b r5, r5
+.Lmwait5b:
2:
/* Wake up all waiters to make sure no signal gets lost. */
@@ -636,10 +641,10 @@ __condvar_tw_cleanup:
sleep
.align 2
+.Lwait5:
+ .long __lll_lock_wait-.Lwait5b
.Lmwait5:
- .long __lll_mutex_lock_wait-.Lmwait5b
-.Lmwake5:
- .long __lll_mutex_unlock_wake-.Lmwake5b
+ .long __lll_unlock_wake-.Lmwait5b
.Lmlocki5:
.long __pthread_mutex_cond_lock-.Lmlocki5b
.Lresume:
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S 2007-08-01 10:04:30.000000000 +0900
@@ -18,14 +18,10 @@
#include <sysdep.h>
#include <shlib-compat.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include "lowlevel-atomic.h"
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
.text
/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
@@ -267,10 +263,12 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait0, r1
+ mov #LLL_SHARED, r6
+ extu.b r6, r6
+ mov.l .Lwait0, r1
bsrf r1
mov r2, r4
-.Lmwait0b:
+.Lwait0b:
bra 2b
nop
3:
@@ -279,10 +277,11 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake0, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lwake0, r1
bsrf r1
- nop
-.Lmwake0b:
+ extu.b r5, r5
+.Lwake0b:
bra 4b
nop
@@ -292,10 +291,12 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait1, r1
+ mov #LLL_SHARED, r6
+ extu.b r6, r6
+ mov.l .Lwait1, r1
bsrf r1
mov r2, r4
-.Lmwait1b:
+.Lwait1b:
bra 6b
nop
@@ -305,10 +306,11 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake1, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lwake1, r1
bsrf r1
- nop
-.Lmwake1b:
+ extu.b r5, r5
+.Lwake1b:
bra 11b
nop
@@ -327,26 +329,27 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake2, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lwake2, r1
bsrf r1
- nop
-.Lmwake2b:
+ extu.b r5, r5
+.Lwake2b:
13:
bra 14b
mov.l @(12,r15), r0
.align 2
-.Lmwait0:
- .long __lll_mutex_lock_wait-.Lmwait0b
-.Lmwake0:
- .long __lll_mutex_unlock_wake-.Lmwake0b
-.Lmwait1:
- .long __lll_mutex_lock_wait-.Lmwait1b
-.Lmwake1:
- .long __lll_mutex_unlock_wake-.Lmwake1b
-.Lmwake2:
- .long __lll_mutex_unlock_wake-.Lmwake2b
+.Lwait0:
+ .long __lll_lock_wait-.Lwait0b
+.Lwake0:
+ .long __lll_unlock_wake-.Lwake0b
+.Lwait1:
+ .long __lll_lock_wait-.Lwait1b
+.Lwake1:
+ .long __lll_unlock_wake-.Lwake1b
+.Lwake2:
+ .long __lll_unlock_wake-.Lwake2b
.size __pthread_cond_wait, .-__pthread_cond_wait
versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
GLIBC_2_3_2)
@@ -371,10 +374,12 @@ __condvar_w_cleanup:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait3, r1
+ mov #LLL_SHARED, r6
+ extu.b r6, r6
+ mov.l .Lwait3, r1
bsrf r1
mov r2, r4
-.Lmwait3b:
+.Lwait3b:
1:
mov.l @(broadcast_seq,r8), r0
@@ -464,10 +469,11 @@ __condvar_w_cleanup:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake3, r1
+ mov #LLL_SHARED, r5
+ mov.l .Lwake3, r1
bsrf r1
- nop
-.Lmwake3b:
+ extu.b r5, r5
+.Lwake3b:
2:
/* Wake up all waiters to make sure no signal gets lost. */
@@ -500,10 +506,10 @@ __condvar_w_cleanup:
sleep
.align 2
-.Lmwait3:
- .long __lll_mutex_lock_wait-.Lmwait3b
-.Lmwake3:
- .long __lll_mutex_unlock_wake-.Lmwake3b
+.Lwait3:
+ .long __lll_lock_wait-.Lwait3b
+.Lwake3:
+ .long __lll_unlock_wake-.Lwake3b
.Lmlocki3:
.long __pthread_mutex_cond_lock-.Lmlocki3b
.Lresume:
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S 2007-07-31 20:37:32.000000000 +0900
@@ -19,12 +19,9 @@
#include <unwindbuf.h>
#include <sysdep.h>
#include <kernel-features.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
.comm __fork_generation, 4, 4
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S 2007-08-01 10:22:01.000000000 +0900
@@ -17,17 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
#include <kernel-features.h>
#include "lowlevel-atomic.h"
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
.text
@@ -156,10 +152,12 @@ __pthread_rwlock_rdlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait0, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait0b:
bra 2b
nop
@@ -182,6 +180,9 @@ __pthread_rwlock_rdlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake0, r1
bsrf r1
nop
@@ -210,6 +211,9 @@ __pthread_rwlock_rdlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake1, r1
bsrf r1
nop
@@ -222,23 +226,25 @@ __pthread_rwlock_rdlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait1, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait1b:
bra 13b
nop
.align 2
.Lwait0:
- .long __lll_mutex_lock_wait-.Lwait0b
+ .long __lll_lock_wait-.Lwait0b
.Lwake0:
- .long __lll_mutex_unlock_wake-.Lwake0b
+ .long __lll_unlock_wake-.Lwake0b
.Lwait1:
- .long __lll_mutex_lock_wait-.Lwait1b
+ .long __lll_lock_wait-.Lwait1b
.Lwake1:
- .long __lll_mutex_unlock_wake-.Lwake1b
+ .long __lll_unlock_wake-.Lwake1b
.size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
.globl pthread_rwlock_rdlock
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S 2007-08-01 14:42:21.000000000 +0900
@@ -17,18 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
#include <kernel-features.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
.text
@@ -92,7 +87,7 @@ pthread_rwlock_timedrdlock:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -213,10 +208,12 @@ pthread_rwlock_timedrdlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait2, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait2b:
bra 2b
nop
@@ -239,6 +236,9 @@ pthread_rwlock_timedrdlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake2, r1
bsrf r1
nop
@@ -267,6 +267,9 @@ pthread_rwlock_timedrdlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake3, r1
bsrf r1
nop
@@ -279,10 +282,12 @@ pthread_rwlock_timedrdlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait3, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait3b:
bra 13b
nop
@@ -297,11 +302,11 @@ pthread_rwlock_timedrdlock:
.align 2
.Lwait2:
- .long __lll_mutex_lock_wait-.Lwait2b
+ .long __lll_lock_wait-.Lwait2b
.Lwake2:
- .long __lll_mutex_unlock_wake-.Lwake2b
+ .long __lll_unlock_wake-.Lwake2b
.Lwait3:
- .long __lll_mutex_lock_wait-.Lwait3b
+ .long __lll_lock_wait-.Lwait3b
.Lwake3:
- .long __lll_mutex_unlock_wake-.Lwake3b
+ .long __lll_unlock_wake-.Lwake3b
.size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S 2007-08-01 10:28:41.000000000 +0900
@@ -17,18 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
#include <kernel-features.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
.text
@@ -88,7 +83,7 @@ pthread_rwlock_timedwrlock:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -211,10 +206,12 @@ pthread_rwlock_timedwrlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait6, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait6b:
bra 2b
nop
@@ -232,6 +229,9 @@ pthread_rwlock_timedwrlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake6, r1
bsrf r1
nop
@@ -255,6 +255,9 @@ pthread_rwlock_timedwrlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake7, r1
bsrf r1
nop
@@ -267,10 +270,12 @@ pthread_rwlock_timedwrlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait7, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait7b:
bra 13b
nop
@@ -281,11 +286,11 @@ pthread_rwlock_timedwrlock:
.align 2
.Lwait6:
- .long __lll_mutex_lock_wait-.Lwait6b
+ .long __lll_lock_wait-.Lwait6b
.Lwake6:
- .long __lll_mutex_unlock_wake-.Lwake6b
+ .long __lll_unlock_wake-.Lwake6b
.Lwait7:
- .long __lll_mutex_lock_wait-.Lwait7b
+ .long __lll_lock_wait-.Lwait7b
.Lwake7:
- .long __lll_mutex_unlock_wake-.Lwake7b
+ .long __lll_unlock_wake-.Lwake7b
.size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S 2007-08-01 10:32:50.000000000 +0900
@@ -17,15 +17,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <kernel-features.h>
#include "lowlevel-atomic.h"
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
.text
@@ -138,10 +134,12 @@ __pthread_rwlock_unlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait8, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait8b:
bra 2b
nop
@@ -150,6 +148,9 @@ __pthread_rwlock_unlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake8, r1
bsrf r1
nop
@@ -164,6 +165,9 @@ __pthread_rwlock_unlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake9, r1
bsrf r1
nop
@@ -179,11 +183,11 @@ __pthread_rwlock_unlock:
#endif
.align 2
.Lwait8:
- .long __lll_mutex_lock_wait-.Lwait8b
+ .long __lll_lock_wait-.Lwait8b
.Lwake8:
- .long __lll_mutex_unlock_wake-.Lwake8b
+ .long __lll_unlock_wake-.Lwake8b
.Lwake9:
- .long __lll_mutex_unlock_wake-.Lwake9b
+ .long __lll_unlock_wake-.Lwake9b
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
.globl pthread_rwlock_unlock
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S 2007-08-01 10:36:18.000000000 +0900
@@ -17,17 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
#include <kernel-features.h>
#include "lowlevel-atomic.h"
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_PRIVATE_FLAG 128
-
.text
@@ -145,10 +141,12 @@ __pthread_rwlock_wrlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait4, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait4b:
bra 2b
nop
@@ -166,6 +164,9 @@ __pthread_rwlock_wrlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake4, r1
bsrf r1
nop
@@ -192,6 +193,9 @@ __pthread_rwlock_wrlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake5, r1
bsrf r1
nop
@@ -204,23 +208,25 @@ __pthread_rwlock_wrlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait5, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait5b:
bra 13b
nop
.align 2
.Lwait4:
- .long __lll_mutex_lock_wait-.Lwait4b
+ .long __lll_lock_wait-.Lwait4b
.Lwake4:
- .long __lll_mutex_unlock_wake-.Lwake4b
+ .long __lll_unlock_wake-.Lwake4b
.Lwait5:
- .long __lll_mutex_lock_wait-.Lwait5b
+ .long __lll_lock_wait-.Lwait5b
.Lwake5:
- .long __lll_mutex_unlock_wake-.Lwake5b
+ .long __lll_unlock_wake-.Lwake5b
.globl pthread_rwlock_wrlock
pthread_rwlock_wrlock = __pthread_rwlock_wrlock
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S 2007-08-01 17:23:23.000000000 +0900
@@ -20,14 +20,10 @@
#include <shlib-compat.h>
#include <pthread-errnos.h>
#include <structsem.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
.globl __new_sem_post
@@ -65,7 +61,12 @@ __new_sem_post:
mov.l .Lerrno3, r0
stc gbr, r1
mov.l @(r0, r12), r0
- add r1, r0
+ bra .Lexit
+ add r1, r0
+ .align 2
+.Lerrno3:
+ .long errno@GOTTPOFF
+.Lexit:
#else
mov.l .Lerrloc3, r1
bsrf r1
@@ -81,10 +82,7 @@ __new_sem_post:
.align 2
.Lgot3:
.long _GLOBAL_OFFSET_TABLE_
-#if USE___THREAD
-.Lerrno3:
- .long errno@GOTTPOFF
-#else
+#if !USE___THREAD
.Lerrloc3:
.long __errno_location@PLT-(.Lerrloc3b-.)
#endif
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S 2007-08-01 17:24:02.000000000 +0900
@@ -21,13 +21,10 @@
#include <pthread-errnos.h>
#include <tcb-offsets.h>
#include <structsem.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-
#if VALUE != 0
# error "code needs to be rewritten for VALUE != 0"
#endif
@@ -81,7 +78,7 @@ sem_timedwait:
/* Compute relative timeout. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -180,15 +177,19 @@ sem_timedwait:
mov.l .Lerrno2, r0
stc gbr, r1
mov.l @(r0, r12), r0
- add r1, r0
- mov.l r10, @r0
+ bra .Lexit
+ add r1, r0
+ .align 2
+.Lerrno2:
+ .long errno@GOTTPOFF
+.Lexit:
#else
mov.l .Lerrloc2, r1
bsrf r1
nop
.Lerrloc2b:
- mov.l r10, @r0
#endif
+ mov.l r10, @r0
DEC (@(NWAITERS,r8), r2)
bra 10b
mov #-1, r0
@@ -200,10 +201,7 @@ sem_timedwait:
.long 1000000000
.Lgot2:
.long _GLOBAL_OFFSET_TABLE_
-#if USE___THREAD
-.Lerrno2:
- .long errno@GOTTPOFF
-#else
+#if !USE___THREAD
.Lerrloc2:
.long __errno_location@PLT-(.Lerrloc2b-.)
#endif
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S 2004-06-30 00:51:02.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S 2007-08-01 17:22:26.000000000 +0900
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,6 +19,7 @@
#include <sysdep.h>
#include <shlib-compat.h>
#include <pthread-errnos.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
@@ -60,15 +61,19 @@ __new_sem_trywait:
mov.l .Lerrno1, r0
stc gbr, r1
mov.l @(r0, r12), r0
- add r1, r0
- mov.l r8, @r0
+ bra .Lexit
+ add r1, r0
+ .align 2
+.Lerrno1:
+ .long errno@GOTTPOFF
+.Lexit:
#else
mov.l .Lerrloc1, r1
bsrf r1
nop
.Lerrloc1b:
- mov.l r8, @r0
#endif
+ mov.l r8, @r0
lds.l @r15+, pr
mov.l @r15+, r8
mov.l @r15+, r12
@@ -78,10 +83,7 @@ __new_sem_trywait:
.align 2
.Lgot1:
.long _GLOBAL_OFFSET_TABLE_
-#if USE___THREAD
-.Lerrno1:
- .long errno@GOTTPOFF
-#else
+#if !USE___THREAD
.Lerrloc1:
.long __errno_location@PLT-(.Lerrloc1b-.)
#endif
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S 2007-06-18 09:03:43.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S 2007-08-01 17:21:55.000000000 +0900
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,13 +21,10 @@
#include <pthread-errnos.h>
#include <tcb-offsets.h>
#include <structsem.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-
#if VALUE != 0
# error "code needs to be rewritten for VALUE != 0"
#endif
@@ -138,24 +135,26 @@ __new_sem_wait:
mov.l .Lerrno0, r0
stc gbr, r1
mov.l @(r0, r12), r0
- add r1, r0
- mov.l r8, @r0
+ bra .Lexit
+ add r1, r0
+ .align 2
+.Lerrno0:
+ .long errno@GOTTPOFF
+.Lexit:
#else
mov.l .Lerrloc0, r1
bsrf r1
nop
.Lerrloc0b:
- mov.l r8, @r0
#endif
+ mov.l r8, @r0
bra 9b
mov #-1, r0
+ .align 2
.Lgot0:
.long _GLOBAL_OFFSET_TABLE_
-#if USE___THREAD
-.Lerrno0:
- .long errno@GOTTPOFF
-#else
+#if !USE___THREAD
.Lerrloc0:
.long __errno_location@PLT-(.Lerrloc0b-.)
#endif
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] lowlevellock.h cleanups, LLL_SHARED vs. LLL_PRIVATE on lll locks
2007-08-01 23:00 ` Kaz Kojima
@ 2007-08-03 15:48 ` Ulrich Drepper
2007-08-04 1:38 ` Kaz Kojima
0 siblings, 1 reply; 5+ messages in thread
From: Ulrich Drepper @ 2007-08-03 15:48 UTC (permalink / raw)
To: Kaz Kojima; +Cc: libc-hacker
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA1
Applied.
- --
â§ Ulrich Drepper â§ Red Hat, Inc. â§ 444 Castro St â§ Mountain View, CA â
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.7 (GNU/Linux)
iD8DBQFGs0422ijCOnn/RHQRAjJ0AJ4kH4+GaJbXvsCKYnT418gmeKvTDQCfTgXS
QZtQ8kPhh8CGRDJOot8JNbg=
=dWBY
-----END PGP SIGNATURE-----
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] lowlevellock.h cleanups, LLL_SHARED vs. LLL_PRIVATE on lll locks
2007-08-03 15:48 ` Ulrich Drepper
@ 2007-08-04 1:38 ` Kaz Kojima
0 siblings, 0 replies; 5+ messages in thread
From: Kaz Kojima @ 2007-08-04 1:38 UTC (permalink / raw)
To: drepper; +Cc: libc-hacker
Ulrich Drepper <drepper@redhat.com> wrote:
> Applied.
Thanks!
Regards,
kaz
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2007-08-04 1:38 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-07-25 20:11 [RFC PATCH] lowlevellock.h cleanups, LLL_SHARED vs. LLL_PRIVATE on lll locks Jakub Jelinek
2007-07-31 11:11 ` [PATCH] " Jakub Jelinek
2007-08-01 23:00 ` Kaz Kojima
2007-08-03 15:48 ` Ulrich Drepper
2007-08-04 1:38 ` Kaz Kojima
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).