* [PATCH] glibc build fixes
@ 2006-02-13 13:02 Jakub Jelinek
2006-02-15 17:20 ` Ulrich Drepper
0 siblings, 1 reply; 7+ messages in thread
From: Jakub Jelinek @ 2006-02-13 13:02 UTC (permalink / raw)
To: Ulrich Drepper, Roland McGrath; +Cc: Glibc hackers
Hi!
With this I got glibc to build again on the 7 arches I did the build on.
2006-02-13 Jakub Jelinek <jakub@redhat.com>
* sysdeps/unix/sysv/linux/not-cancel.h (__openat_not_cancel,
__openat64_not_cancel): Remove prototypes.
(__openat_nocancel, __openat64_nocancel): New prototypes or defines.
(openat_not_cancel, openat_not_cancel_3, openat64_not_cancel,
openat64_not_cancel_3): Use them.
nptl/
* descr.h [!__PTHREAD_MUTEX_HAVE_PREV] (DEQUEUE_MUTEX):
Set robust_list.__next rather than robust_list.
* sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
(__pthread_list_t): New typedef.
(pthread_mutex_t): Replace __next and __prev fields with __list.
* sysdeps/unix/sysv/linux/ia64/bits/pthreadtypes.h
(__pthread_list_t): New typedef.
(pthread_mutex_t): Replace __next and __prev fields with __list.
* sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h
(__pthread_list_t, __pthread_slist_t): New typedefs.
(pthread_mutex_t): Replace __next and __prev fields with __list.
* sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h
(__pthread_list_t, __pthread_slist_t): New typedefs.
(pthread_mutex_t): Replace __next and __prev fields with __list.
* sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h
(__pthread_list_t, __pthread_slist_t): New typedefs.
(pthread_mutex_t): Replace __next and __prev fields with __list.
* sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h
(__pthread_slist_t): New typedef.
(pthread_mutex_t): Replace __next field with __list.
--- libc/nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h 30 Jan 2006 09:29:48 -0000 1.7.2.4
+++ libc/nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h 13 Feb 2006 08:18:12 -0000
@@ -43,6 +43,13 @@ typedef union
} pthread_attr_t;
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+
+
/* Data structures for mutex handling. The structure of the attribute
type is deliberately not exposed. */
typedef union
@@ -57,8 +64,7 @@ typedef union
binary compatibility. */
int __kind;
int __spins;
- struct __pthread_mutex_s *__next;
- struct __pthread_mutex_s *__prev;
+ __pthread_list_t __list;
#define __PTHREAD_MUTEX_HAVE_PREV 1
} __data;
char __size[__SIZEOF_PTHREAD_MUTEX_T];
--- libc/nptl/sysdeps/unix/sysv/linux/ia64/bits/pthreadtypes.h 6 Jan 2006 21:55:55 -0000 1.13.2.3
+++ libc/nptl/sysdeps/unix/sysv/linux/ia64/bits/pthreadtypes.h 13 Feb 2006 08:18:12 -0000
@@ -43,6 +43,13 @@ typedef union
} pthread_attr_t;
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+
+
/* Data structures for mutex handling. The structure of the attribute
type is not exposed on purpose. */
typedef union
@@ -57,8 +64,7 @@ typedef union
binary compatibility. */
int __kind;
int __spins;
- struct __pthread_mutex_s *__next;
- struct __pthread_mutex_s *__prev;
+ __pthread_list_t __list;
#define __PTHREAD_MUTEX_HAVE_PREV 1
} __data;
char __size[__SIZEOF_PTHREAD_MUTEX_T];
--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h 9 Jan 2006 21:54:59 -0000 1.14.2.4
+++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h 13 Feb 2006 08:18:12 -0000
@@ -58,6 +58,20 @@ typedef union
} pthread_attr_t;
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
/* Data structures for mutex handling. The structure of the attribute
type is deliberately not exposed. */
typedef union
@@ -75,15 +89,14 @@ typedef union
int __kind;
#if __WORDSIZE == 64
int __spins;
- struct __pthread_mutex_s *__next;
- struct __pthread_mutex_s *__prev;
+ __pthread_list_t __list;
# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
unsigned int __nusers;
__extension__ union
{
int __spins;
- struct __pthread_mutex_s *__next;
+ __pthread_slist_t __list;
};
#endif
} __data;
--- libc/nptl/sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h 9 Jan 2006 21:54:59 -0000 1.13.2.4
+++ libc/nptl/sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h 13 Feb 2006 08:18:12 -0000
@@ -57,6 +57,20 @@ typedef union
} pthread_attr_t;
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
/* Data structures for mutex handling. The structure of the attribute
type is not exposed on purpose. */
typedef union
@@ -74,15 +88,14 @@ typedef union
int __kind;
#if __WORDSIZE == 64
int __spins;
- struct __pthread_mutex_s *__next;
- struct __pthread_mutex_s *__prev;
+ __pthread_list_t __list;
# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
unsigned int __nusers;
__extension__ union
{
int __spins;
- struct __pthread_mutex_s *__next;
+ __pthread_slist_t __list;
};
#endif
} __data;
--- libc/nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h 30 Jan 2006 09:29:48 -0000 1.10.2.4
+++ libc/nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h 13 Feb 2006 08:18:12 -0000
@@ -44,6 +44,12 @@ typedef union
} pthread_attr_t;
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+
+
/* Data structures for mutex handling. The structure of the attribute
type is not exposed on purpose. */
typedef union
@@ -60,7 +66,7 @@ typedef union
__extension__ union
{
int __spins;
- struct __pthread_mutex_s *__next;
+ __pthread_slist_t __list;
};
} __data;
char __size[__SIZEOF_PTHREAD_MUTEX_T];
--- libc/nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h 30 Jan 2006 09:29:48 -0000 1.7.2.4
+++ libc/nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h 13 Feb 2006 08:18:12 -0000
@@ -58,6 +58,20 @@ typedef union
} pthread_attr_t;
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+ struct __pthread_internal_list *__prev;
+ struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+ struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
/* Data structures for mutex handling. The structure of the attribute
type is deliberately not exposed. */
typedef union
@@ -75,15 +89,14 @@ typedef union
int __kind;
#if __WORDSIZE == 64
int __spins;
- struct __pthread_mutex_s *__next;
- struct __pthread_mutex_s *__prev;
+ __pthread_list_t __list;
# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
unsigned int __nusers;
__extension__ union
{
int __spins;
- struct __pthread_mutex_s *__next;
+ __pthread_slist_t __list;
};
#endif
} __data;
--- libc/nptl/descr.h 13 Feb 2006 07:30:04 -0000 1.23.2.10
+++ libc/nptl/descr.h 13 Feb 2006 09:52:50 -0000
@@ -166,7 +166,7 @@ struct pthread
do { \
__pthread_slist_t *runp = THREAD_GETMEM (THREAD_SELF, robust_list.__next);\
if (runp == &mutex->__data.__list) \
- THREAD_SETMEM (THREAD_SELF, robust_list, runp->__next); \
+ THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next); \
else \
{ \
while (runp->__next != &mutex->__data.__list) \
--- libc/sysdeps/unix/sysv/linux/not-cancel.h 13 Feb 2006 07:30:05 -0000 1.5.2.1
+++ libc/sysdeps/unix/sysv/linux/not-cancel.h 13 Feb 2006 09:52:50 -0000
@@ -28,18 +28,26 @@
INLINE_SYSCALL (open, 2, (const char *) (name), (flags))
/* Uncancelable openat. */
-extern int __openat_not_cancel (int fd, const char *fname, int oflag,
+#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
+extern int __openat_nocancel (int fd, const char *fname, int oflag,
+ mode_t mode) attribute_hidden;
+extern int __openat64_nocancel (int fd, const char *fname, int oflag,
mode_t mode) attribute_hidden;
+#else
+# define __openat_nocancel(fd, fname, oflag, mode) \
+ openat (fd, fname, oflag, mode)
+# define __openat64_nocancel(fd, fname, oflag, mode) \
+ openat64 (fd, fname, oflag, mode)
+#endif
+
#define openat_not_cancel(fd, fname, oflag, mode) \
- __openat_not_cancel (fd, fname, oflag, mode)
+ __openat_nocancel (fd, fname, oflag, mode)
#define openat_not_cancel_3(fd, fname, oflag) \
- __openat_not_cancel (fd, fname, oflag, 0)
-extern int __openat64_not_cancel (int fd, const char *fname, int oflag,
- mode_t mode) attribute_hidden;
+ __openat_nocancel (fd, fname, oflag, 0)
#define openat64_not_cancel(fd, fname, oflag, mode) \
- __openat64_not_cancel (fd, fname, oflag, mode)
+ __openat64_nocancel (fd, fname, oflag, mode)
#define openat64_not_cancel_3(fd, fname, oflag) \
- __openat64_not_cancel (fd, fname, oflag, 0)
+ __openat64_nocancel (fd, fname, oflag, 0)
/* Uncancelable close. */
#define close_not_cancel(fd) \
Jakub
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] glibc build fixes
2006-02-13 13:02 [PATCH] glibc build fixes Jakub Jelinek
@ 2006-02-15 17:20 ` Ulrich Drepper
2006-02-17 13:27 ` Kaz Kojima
0 siblings, 1 reply; 7+ messages in thread
From: Ulrich Drepper @ 2006-02-15 17:20 UTC (permalink / raw)
To: Jakub Jelinek; +Cc: Glibc hackers
[-- Attachment #1: Type: text/plain, Size: 292 bytes --]
Applied. But with the changes I checked in before all archs but x86 and
x86-64 are broken again. Somebody needs to write the generic
lowlevelrobustlock.c file and adjust the lowlevellock.h files.
--
➧ Ulrich Drepper ➧ Red Hat, Inc. ➧ 444 Castro St ➧ Mountain View, CA ❖
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 251 bytes --]
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] glibc build fixes
2006-02-15 17:20 ` Ulrich Drepper
@ 2006-02-17 13:27 ` Kaz Kojima
2006-02-17 15:37 ` Ulrich Drepper
2006-02-17 16:08 ` Jakub Jelinek
0 siblings, 2 replies; 7+ messages in thread
From: Kaz Kojima @ 2006-02-17 13:27 UTC (permalink / raw)
To: libc-hacker
> Applied. But with the changes I checked in before all archs but x86 and
> x86-64 are broken again. Somebody needs to write the generic
> lowlevelrobustlock.c file and adjust the lowlevellock.h files.
Here is a patch for SH to add *_robust_mutex_* stuff. It's tested
only with "make -k check" with no new failures.
Regards,
kaz
--
nplt/ChangeLog
2006-02-17 Kaz Kojima <kkojima@rr.iij4u.or.jp>
* sysdeps/unix/sysv/linux/sh/lowlevellock.h: Add lll_robust_mutex_*
definitions.
* sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S: New file.
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h 2006-01-21 07:09:32.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h 2006-02-17 17:07:33.000000000 +0900
@@ -62,6 +62,28 @@ extern int __lll_mutex_unlock_wake (int
: "r0", "r1", "r2", "t", "memory"); \
__result; })
+#define lll_robust_mutex_trylock(futex, id) \
+ ({ unsigned char __result; \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ nop\n\
+ mov r15,r1\n\
+ mov #-8,r15\n\
+ 0: mov.l @%1,r2\n\
+ cmp/eq r2,%3\n\
+ bf 1f\n\
+ mov.l %2,@%1\n\
+ 1: mov r1,r15\n\
+ mov #-1,%0\n\
+ negc %0,%0"\
+ : "=r" (__result) \
+ : "r" (&(futex)), \
+ "r" (id), \
+ "r" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r0", "r1", "r2", "t", "memory"); \
+ __result; })
+
#define lll_mutex_cond_trylock(futex) \
({ unsigned char __result; \
__asm __volatile ("\
@@ -102,6 +124,25 @@ extern int __lll_mutex_unlock_wake (int
if (__result) \
__lll_mutex_lock_wait (__result, __futex); })
+#define lll_robust_mutex_lock(futex, id) \
+ ({ int __result, val, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ nop\n\
+ mov r15,r1\n\
+ mov #-8,r15\n\
+ 0: mov.l @%2,%0\n\
+ tst %0,%0\n\
+ bf 1f\n\
+ mov.l %1,@%2\n\
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (id), "r" (__futex) \
+ : "r0", "r1", "t", "memory"); \
+ if (__result) \
+ __result = __lll_robust_mutex_lock_wait (__result, __futex); \
+ __result; })
+
/* Special version of lll_mutex_lock which causes the unlock function to
always wakeup waiters. */
#define lll_mutex_cond_lock(futex) \
@@ -122,6 +163,25 @@ extern int __lll_mutex_unlock_wake (int
if (__result) \
__lll_mutex_lock_wait (__result, __futex); })
+#define lll_robust_mutex_cond_lock(futex, id) \
+ ({ int __result, val, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ nop\n\
+ mov r15,r1\n\
+ mov #-8,r15\n\
+ 0: mov.l @%2,%0\n\
+ tst %0,%0\n\
+ bf 1f\n\
+ mov.l %1,@%2\n\
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (id | FUTEX_WAITERS), "r" (__futex) \
+ : "r0", "r1", "t", "memory"); \
+ if (__result) \
+ __result = __lll_robust_mutex_lock_wait (__result, __futex); \
+ __result; })
+
#define lll_mutex_timedlock(futex, timeout) \
({ int __result, val, *__futex = &(futex); \
__asm __volatile ("\
@@ -141,6 +201,26 @@ extern int __lll_mutex_unlock_wake (int
__result = __lll_mutex_timedlock_wait (__result, __futex, timeout); \
__result; })
+#define lll_robust_mutex_timedlock(futex, timeout, id) \
+ ({ int __result, val, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ nop\n\
+ mov r15,r1\n\
+ mov #-8,r15\n\
+ 0: mov.l @%2,%0\n\
+ tst %0,%0\n\
+ bf 1f\n\
+ mov.l %1,@%2\n\
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (id), "r" (__futex) \
+ : "r0", "r1", "t", "memory"); \
+ if (__result) \
+ __result = __lll_robust_mutex_timedlock_wait (__result, __futex, \
+ timeout); \
+ __result; })
+
#define lll_mutex_unlock(futex) \
(void) ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
@@ -157,6 +237,37 @@ extern int __lll_mutex_unlock_wake (int
if (__result) \
__lll_mutex_unlock_wake (__futex); })
+#define lll_robust_mutex_unlock(futex) \
+ (void) ({ int __result, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ mov r15,r1\n\
+ mov #-6,r15\n\
+ 0: mov.l @%1,%0\n\
+ and %2,%0\n\
+ mov.l %0,@%1\n\
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (__futex), "r" (FUTEX_TID_MASK) \
+ : "r0", "r1", "memory"); \
+ if (__result) \
+ __lll_mutex_unlock_wake (__futex); })
+
+#define lll_robust_mutex_dead(futex) \
+ (void) ({ int __ignore, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ mov r15,r1\n\
+ mov #-6,r15\n\
+ 0: mov.l @%1,%0\n\
+ or %2,%0\n\
+ mov.l %0,@%1\n\
+ 1: mov r1,r15"\
+ : "=&r" (__ignore) : "r" (__futex), "r" (FUTEX_OWNER_DIED) \
+ : "r0", "r1", "memory"); \
+ lll_futex_wake (__futex, 1); })
+
#define lll_mutex_islocked(futex) \
(futex != 0)
diff -uprN ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S
--- ORIG/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S 1970-01-01 09:00:00.000000000 +0900
+++ LOCAL/libc/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S 2006-02-17 17:23:47.000000000 +0900
@@ -0,0 +1,224 @@
+/* Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevelrobustlock.h>
+#include "lowlevel-atomic.h"
+
+ .text
+
+#define SYS_gettimeofday __NR_gettimeofday
+#define SYS_futex 240
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+
+
+ .globl __lll_robust_mutex_lock_wait
+ .type __lll_robust_mutex_lock_wait,@function
+ .hidden __lll_robust_mutex_lock_wait
+ .align 5
+ cfi_startproc
+__lll_robust_mutex_lock_wait:
+ mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
+ mov r5, r8
+ mov #0, r7 /* No timeout. */
+ mov #FUTEX_WAIT, r5
+
+4:
+ mov r4, r6
+ mov.l .L_FUTEX_WAITERS, r0
+ or r0, r6
+ shlr r0 /* r0 = FUTEX_OWNER_DIED */
+ tst r0, r4
+ bf/s 3f
+ cmp/eq r4, r6
+ bt 1f
+
+ CMPXCHG (r4, @r8, r6, r2)
+ bf 2f
+
+1:
+ mov r8, r4
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x14
+ SYSCALL_INST_PAD
+
+ mov.l @r8, r2
+
+2:
+ tst r2, r2
+ bf/s 4b
+ mov r2, r4
+
+ stc gbr, r1
+ mov.w .Ltidoff, r2
+ add r2, r1
+ mov.l @r1, r6
+ mov #0, r3
+ CMPXCHG (r3, @r8, r6, r4)
+ bf 4b
+ mov #0, r4
+
+3:
+ mov.l @r15+, r8
+ ret
+ mov r4, r0
+ cfi_endproc
+ .align 2
+.L_FUTEX_WAITERS:
+ .long FUTEX_WAITERS
+.Ltidoff:
+ .word TID - TLS_PRE_TCB_SIZE
+ .size __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+
+
+ .globl __lll_robust_mutex_timedlock_wait
+ .type __lll_robust_mutex_timedlock_wait,@function
+ .hidden __lll_robust_mutex_timedlock_wait
+ .align 5
+ cfi_startproc
+__lll_robust_mutex_timedlock_wait:
+ /* Check for a valid timeout value. */
+ mov.l @(4,r6), r1
+ mov.l .L1g, r0
+ cmp/hs r0, r1
+ bt 3f
+
+ mov.l r10, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r10, 0)
+ mov.l r9, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r9, 0)
+ mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
+ mov r4, r10
+ mov r6, r9
+ mov r5, r8
+
+ /* Stack frame for the timespec and timeval structs. */
+ add #-8, r15
+ cfi_adjust_cfa_offset(8)
+
+1:
+ /* Get current time. */
+ mov r15, r4
+ mov #0, r5
+ mov #SYS_gettimeofday, r3
+ trapa #0x12
+ SYSCALL_INST_PAD
+
+ /* Compute relative timeout. */
+ mov.l @(4,r15), r0
+ mov.w .L1k, r1
+ dmulu.l r0, r1 /* Micro seconds to nano seconds. */
+ mov.l @r9, r2
+ mov.l @(4,r9), r3
+ mov.l @r15, r0
+ sts macl, r1
+ sub r0, r2
+ clrt
+ subc r1, r3
+ bf 4f
+ mov.l .L1g, r1
+ add r1, r3
+ add #-1, r2
+4:
+ cmp/pz r2
+ bf 8f /* Time is already up. */
+
+ mov.l r2, @r15 /* Store relative timeout. */
+ mov.l r3, @(4,r15)
+
+ mov r10, r6
+ mov.l .L_FUTEX_WAITERS2, r0
+ or r0, r6
+ shlr r0 /* r0 = FUTEX_OWNER_DIED */
+ tst r0, r4
+ bf/s 6f
+ cmp/eq r4, r6
+ bt 2f
+
+ CMPXCHG (r4, @r8, r6, r2)
+ bf/s 5f
+ mov #0, r5
+
+2:
+ mov r8, r4
+ mov #FUTEX_WAIT, r5
+ mov r10, r6
+ mov r15, r7
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x14
+ SYSCALL_INST_PAD
+ mov r0, r5
+
+ mov.l @r8, r2
+
+5:
+ tst r2, r2
+ bf/s 7f
+ mov r2, r10
+
+ stc gbr, r1
+ mov.w .Ltidoff2, r2
+ add r2, r1
+ mov.l @r1, r4
+ mov #0, r3
+ CMPXCHG (r3, @r8, r4, r10)
+ bf 7f
+ mov #0, r0
+
+6:
+ add #8, r15
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ rts
+ mov.l @r15+, r10
+
+7:
+ /* Check whether the time expired. */
+ mov #-ETIMEDOUT, r1
+ cmp/eq r5, r1
+ bf 1b
+
+8:
+ bra 6b
+ mov #ETIMEDOUT, r0
+3:
+ rts
+ mov #EINVAL, r0
+ cfi_endproc
+ .align 2
+.L_FUTEX_WAITERS2:
+ .long FUTEX_WAITERS
+.L1g:
+ .long 1000000000
+.Ltidoff2:
+ .word TID - TLS_PRE_TCB_SIZE
+.L1k:
+ .word 1000
+ .size __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] glibc build fixes
2006-02-17 13:27 ` Kaz Kojima
@ 2006-02-17 15:37 ` Ulrich Drepper
2006-02-17 16:08 ` Jakub Jelinek
1 sibling, 0 replies; 7+ messages in thread
From: Ulrich Drepper @ 2006-02-17 15:37 UTC (permalink / raw)
To: Kaz Kojima; +Cc: libc-hacker
[-- Attachment #1: Type: text/plain, Size: 109 bytes --]
Thanks, applied.
--
➧ Ulrich Drepper ➧ Red Hat, Inc. ➧ 444 Castro St ➧ Mountain View, CA ❖
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 251 bytes --]
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] glibc build fixes
2006-02-17 13:27 ` Kaz Kojima
2006-02-17 15:37 ` Ulrich Drepper
@ 2006-02-17 16:08 ` Jakub Jelinek
2006-02-17 18:23 ` Jakub Jelinek
1 sibling, 1 reply; 7+ messages in thread
From: Jakub Jelinek @ 2006-02-17 16:08 UTC (permalink / raw)
To: Ulrich Drepper; +Cc: libc-hacker
On Fri, Feb 17, 2006 at 10:27:47PM +0900, Kaz Kojima wrote:
> > Applied. But with the changes I checked in before all archs but x86 and
> > x86-64 are broken again. Somebody needs to write the generic
> > lowlevelrobustlock.c file and adjust the lowlevellock.h files.
>
> Here is a patch for SH to add *_robust_mutex_* stuff. It's tested
> only with "make -k check" with no new failures.
And here it is for ppc*/s390*/ia64/alpha, so far tested on ppc64
only. sparc* not done yet, as although sparcv9/sparc64 is easy,
sparc32 pre-v9 will be much harder (and probably will need to
never register the robust list with kernel, so on 32-bit sparc
kernels the only robustness guaranteed will be if a thread is cancelled,
but not if it crashes).
2006-02-17 Jakub Jelinek <jakub@redhat.com>
* include/atomic.h (atomic_and, atomic_or): Define.
nptl/
* sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_robust_mutex_dead,
lll_robust_mutex_trylock, lll_robust_mutex_lock,
lll_robust_mutex_cond_lock, lll_robust_mutex_timedlock,
lll_robust_mutex_unlock): New macros.
(__lll_robust_lock_wait, __lll_robust_timedlock_wait): New prototypes.
* sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/lowlevelrobustlock.c: New file.
--- libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h 2006-02-17 16:55:38.000000000 +0100
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -64,6 +64,15 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ int *__futexp = &(futexv); \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ lll_futex_wake (__futexp, 1); \
+ } \
+ while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
({ \
@@ -106,7 +115,16 @@ __lll_mutex_cond_trylock(int *futex)
#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
+static inline int __attribute__((always_inline))
+__lll_robust_mutex_trylock(int *futex, int id)
+{
+ return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
+}
+#define lll_robust_mutex_trylock(lock, id) \
+ __lll_robust_mutex_trylock (&(lock), id)
+
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
static inline void __attribute__((always_inline))
__lll_mutex_lock(int *futex)
@@ -117,6 +135,18 @@ __lll_mutex_lock(int *futex)
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+static inline int __attribute__ ((always_inline))
+__lll_robust_mutex_lock (int *futex, int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_lock_wait (futex);
+ return result;
+}
+#define lll_robust_mutex_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), id)
+
+
static inline void __attribute__ ((always_inline))
__lll_mutex_cond_lock (int *futex)
{
@@ -126,8 +156,14 @@ __lll_mutex_cond_lock (int *futex)
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+
+
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
+ attribute_hidden;
static inline int __attribute__ ((always_inline))
__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
@@ -141,6 +177,19 @@ __lll_mutex_timedlock (int *futex, const
__lll_mutex_timedlock (&(futex), abstime)
+static inline int __attribute__ ((always_inline))
+__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
+ int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_timedlock_wait (futex, abstime);
+ return result;
+}
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
+
static inline void __attribute__ ((always_inline))
__lll_mutex_unlock (int *futex)
{
@@ -152,6 +201,16 @@ __lll_mutex_unlock (int *futex)
static inline void __attribute__ ((always_inline))
+__lll_robust_mutex_unlock (int *futex)
+{
+ int val = atomic_exchange_rel (futex, 0);
+ if (__builtin_expect (val & FUTEX_WAITERS, 0))
+ lll_futex_wake (futex, 1);
+}
+#define lll_robust_mutex_unlock(futex) __lll_robust_mutex_unlock(&(futex))
+
+
+static inline void __attribute__ ((always_inline))
__lll_mutex_unlock_force (int *futex)
{
(void) atomic_exchange_rel (futex, 0);
--- libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h 2006-02-17 16:57:18.000000000 +0100
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -85,6 +85,17 @@
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ int *__futexp = &(futexv); \
+ \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ lll_futex_wake (__futexp, 1); \
+ } \
+ while (0)
+
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futex, nr_wake, nr_move, mutex, val) \
({ \
@@ -167,7 +178,23 @@ __lll_mutex_cond_trylock (int *futex)
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_trylock (int *futex, int id)
+{
+ unsigned int old;
+
+ __asm __volatile ("cs %0,%3,%1"
+ : "=d" (old), "=Q" (*futex)
+ : "0" (0), "d" (id), "m" (*futex) : "cc", "memory" );
+ return old != 0;
+}
+#define lll_robust_mutex_trylock(futex, id) \
+ __lll_robust_mutex_trylock (&(futex), id)
+
+
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
static inline void
__attribute__ ((always_inline))
@@ -178,6 +205,17 @@ __lll_mutex_lock (int *futex)
}
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_lock (int *futex, int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_lock_wait (futex);
+ return result;
+}
+#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+
static inline void
__attribute__ ((always_inline))
__lll_mutex_cond_lock (int *futex)
@@ -187,8 +225,13 @@ __lll_mutex_cond_lock (int *futex)
}
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+
extern int __lll_timedlock_wait
(int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+ (int *futex, const struct timespec *) attribute_hidden;
static inline int
__attribute__ ((always_inline))
@@ -202,6 +245,19 @@ __lll_mutex_timedlock (int *futex, const
#define lll_mutex_timedlock(futex, abstime) \
__lll_mutex_timedlock (&(futex), abstime)
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
+ int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_timedlock_wait (futex, abstime);
+ return result;
+}
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
static inline void
__attribute__ ((always_inline))
@@ -220,6 +276,21 @@ __lll_mutex_unlock (int *futex)
static inline void
__attribute__ ((always_inline))
+__lll_robust_mutex_unlock (int *futex)
+{
+ int oldval;
+ int newval = 0;
+
+ lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
+ if (oldval & FUTEX_WAITERS)
+ lll_futex_wake (futex, 1);
+}
+#define lll_robust_mutex_unlock(futex) \
+ __lll_robust_mutex_unlock(&(futex))
+
+
+static inline void
+__attribute__ ((always_inline))
__lll_mutex_unlock_force (int *futex)
{
*futex = 0;
--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h 2006-02-17 14:36:19.000000000 +0100
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -69,6 +69,17 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ INTERNAL_SYSCALL_DECL (__err); \
+ int *__futexp = &(futexv); \
+ \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0); \
+ } \
+ while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
({ \
@@ -102,8 +113,8 @@
# define __lll_rel_instr "sync"
#endif
-/* Set *futex to 1 if it is 0, atomically. Returns the old value */
-#define __lll_trylock(futex) \
+/* Set *futex to ID if it is 0, atomically. Returns the old value */
+#define __lll_robust_trylock(futex, id) \
({ int __val; \
__asm __volatile ("1: lwarx %0,0,%2\n" \
" cmpwi 0,%0,0\n" \
@@ -112,31 +123,26 @@
" bne- 1b\n" \
"2: " __lll_acq_instr \
: "=&r" (__val), "=m" (*futex) \
- : "r" (futex), "r" (1), "m" (*futex) \
+ : "r" (futex), "r" (id), "m" (*futex) \
: "cr0", "memory"); \
__val; \
})
+#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+
+/* Set *futex to 1 if it is 0, atomically. Returns the old value */
+#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
+
#define lll_mutex_trylock(lock) __lll_trylock (&(lock))
/* Set *futex to 2 if it is 0, atomically. Returns the old value */
-#define __lll_cond_trylock(futex) \
- ({ int __val; \
- __asm __volatile ("1: lwarx %0,0,%2\n" \
- " cmpwi 0,%0,0\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%2\n" \
- " bne- 1b\n" \
- "2: " __lll_acq_instr \
- : "=&r" (__val), "=m" (*futex) \
- : "r" (futex), "r" (2), "m" (*futex) \
- : "cr0", "memory"); \
- __val; \
- })
+#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
+
#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock))
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
#define lll_mutex_lock(lock) \
(void) ({ \
@@ -146,6 +152,16 @@ extern void __lll_lock_wait (int *futex)
__lll_lock_wait (__futex); \
})
+#define lll_robust_mutex_lock(lock, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+
#define lll_mutex_cond_lock(lock) \
(void) ({ \
int *__futex = &(lock); \
@@ -154,8 +170,22 @@ extern void __lll_lock_wait (int *futex)
__lll_lock_wait (__futex); \
})
+#define lll_robust_mutex_cond_lock(lock, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ int __id = id | FUTEX_WAITERS; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+
+
extern int __lll_timedlock_wait
(int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+ (int *futex, const struct timespec *) attribute_hidden;
#define lll_mutex_timedlock(lock, abstime) \
({ \
@@ -167,6 +197,16 @@ extern int __lll_timedlock_wait
__val; \
})
+#define lll_robust_mutex_timedlock(lock, abstime, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime); \
+ __val; \
+ })
+
#define lll_mutex_unlock(lock) \
((void) ({ \
int *__futex = &(lock); \
@@ -175,6 +215,14 @@ extern int __lll_timedlock_wait
lll_futex_wake (__futex, 1); \
}))
+#define lll_robust_mutex_unlock(lock) \
+ ((void) ({ \
+ int *__futex = &(lock); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1); \
+ }))
+
#define lll_mutex_unlock_force(lock) \
((void) ({ \
int *__futex = &(lock); \
--- libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h 2006-02-17 15:31:59.000000000 +0100
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@@ -55,6 +55,15 @@
_r10 == -1 ? -_retval : _retval; \
})
+#define lll_robust_mutex_dead(futexv) \
+do \
+ { \
+ int *__futexp = &(futexv); \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ DO_INLINE_SYSCALL(futex, 3, (long) __futexp, FUTEX_WAKE, 1); \
+ } \
+while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \
({ \
@@ -79,12 +88,19 @@
#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define __lll_robust_mutex_trylock(futex, id) \
+ (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
+#define lll_robust_mutex_trylock(futex, id) \
+ __lll_robust_mutex_trylock (&(futex), id)
+
+
#define __lll_mutex_cond_trylock(futex) \
(atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0)
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
#define __lll_mutex_lock(futex) \
@@ -96,6 +112,18 @@ extern void __lll_lock_wait (int *futex)
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define __lll_robust_mutex_lock(futex, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ }))
+#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+
+
#define __lll_mutex_cond_lock(futex) \
((void) ({ \
int *__futex = (futex); \
@@ -105,8 +133,24 @@ extern void __lll_lock_wait (int *futex)
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define __lll_robust_mutex_cond_lock(futex, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ int __id = (id) | FUTEX_WAITERS; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, __id, 0) != 0) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ }))
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_cond_lock (&(futex), id)
+
+
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
+ attribute_hidden;
#define __lll_mutex_timedlock(futex, abstime) \
@@ -122,6 +166,19 @@ extern int __lll_timedlock_wait (int *fu
__lll_mutex_timedlock (&(futex), abstime)
+#define __lll_robust_mutex_timedlock(futex, abstime, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime); \
+ __val; \
+ }))
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
+
#define __lll_mutex_unlock(futex) \
((void) ({ \
int *__futex = (futex); \
@@ -134,6 +191,18 @@ extern int __lll_timedlock_wait (int *fu
__lll_mutex_unlock(&(futex))
+#define __lll_robust_mutex_unlock(futex) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1); \
+ }))
+#define lll_robust_mutex_unlock(futex) \
+ __lll_robust_mutex_unlock(&(futex))
+
+
#define __lll_mutex_unlock_force(futex) \
((void) ({ \
int *__futex = (futex); \
--- libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c.jj 2006-02-17 09:13:58.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c 2006-02-17 09:13:58.000000000 +0100
@@ -0,0 +1,95 @@
+/* Copyright (C) 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <sys/time.h>
+#include <pthreadP.h>
+
+
+int
+__lll_robust_lock_wait (int *futex)
+{
+ int oldval = *futex;
+ int tid = THREAD_GETMEM (THREAD_SELF, tid);
+
+ do
+ {
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ return oldval;
+
+ int newval = oldval | FUTEX_WAITERS;
+ if (oldval != newval
+ && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
+ continue;
+
+ lll_futex_wait (futex, newval);
+ }
+ while ((oldval = atomic_compare_and_exchange_val_acq (futex, tid, 0)) != 0);
+ return 0;
+}
+
+
+int
+__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
+{
+ /* Reject invalid timeouts. */
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+ return EINVAL;
+
+ int tid = THREAD_GETMEM (THREAD_SELF, tid);
+
+ do
+ {
+ struct timeval tv;
+ struct timespec rt;
+
+ /* Get the current time. */
+ (void) __gettimeofday (&tv, NULL);
+
+ /* Compute relative timeout. */
+ rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+ if (rt.tv_nsec < 0)
+ {
+ rt.tv_nsec += 1000000000;
+ --rt.tv_sec;
+ }
+
+ /* Already timed out? */
+ if (rt.tv_sec < 0)
+ return ETIMEDOUT;
+
+ /* Wait. */
+ int oldval = *futex;
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ return oldval;
+
+ int newval = oldval | FUTEX_WAITERS;
+ if (oldval != newval
+ && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
+ continue;
+
+ lll_futex_timed_wait (futex, newval, &rt);
+ }
+ while (atomic_compare_and_exchange_bool_acq (futex, tid, 0));
+
+ return 0;
+}
--- libc/include/atomic.h.jj 2005-08-23 12:00:25.000000000 +0200
+++ libc/include/atomic.h 2006-02-17 09:13:58.000000000 +0100
@@ -1,5 +1,5 @@
/* Internal macros for atomic operations for GNU C Library.
- Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -273,6 +273,41 @@
__oldval & __mask; })
#endif
+/* Atomically *mem &= mask and return the old value of *mem. */
+#ifndef atomic_and
+# define atomic_and(mem, mask) \
+ ({ __typeof (*(mem)) __oldval; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __mask = (mask); \
+ \
+ do \
+ __oldval = (*__memp); \
+ while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
+ __oldval \
+ & __mask, \
+ __oldval),\
+ 0)); \
+ \
+ __oldval; })
+#endif
+
+/* Atomically *mem |= mask and return the old value of *mem. */
+#ifndef atomic_or
+# define atomic_or(mem, mask) \
+ ({ __typeof (*(mem)) __oldval; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __mask = (mask); \
+ \
+ do \
+ __oldval = (*__memp); \
+ while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
+ __oldval \
+ | __mask, \
+ __oldval),\
+ 0)); \
+ \
+ __oldval; })
+#endif
#ifndef atomic_full_barrier
# define atomic_full_barrier() __asm ("" ::: "memory")
Jakub
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] glibc build fixes
2006-02-17 16:08 ` Jakub Jelinek
@ 2006-02-17 18:23 ` Jakub Jelinek
2006-02-17 18:51 ` Ulrich Drepper
0 siblings, 1 reply; 7+ messages in thread
From: Jakub Jelinek @ 2006-02-17 18:23 UTC (permalink / raw)
To: Ulrich Drepper; +Cc: libc-hacker
On Fri, Feb 17, 2006 at 05:08:14PM +0100, Jakub Jelinek wrote:
> On Fri, Feb 17, 2006 at 10:27:47PM +0900, Kaz Kojima wrote:
> > > Applied. But with the changes I checked in before all archs but x86 and
> > > x86-64 are broken again. Somebody needs to write the generic
> > > lowlevelrobustlock.c file and adjust the lowlevellock.h files.
> >
> > Here is a patch for SH to add *_robust_mutex_* stuff. It's tested
> > only with "make -k check" with no new failures.
>
> And here it is for ppc*/s390*/ia64/alpha, so far tested on ppc64
> only. sparc* not done yet, as although sparcv9/sparc64 is easy,
> sparc32 pre-v9 will be much harder (and probably will need to
> never register the robust list with kernel, so on 32-bit sparc
> kernels the only robustness guaranteed will be if a thread is cancelled,
> but not if it crashes).
That one had a few minor issues, here is one tested on
ia64/ppc32/ppc64/s390/s390x:
2006-02-17 Jakub Jelinek <jakub@redhat.com>
* include/atomic.h (atomic_and, atomic_or): Define.
nptl/
* sysdeps/unix/sysv/linux/alpha/lowlevellock.h (lll_robust_mutex_dead,
lll_robust_mutex_trylock, lll_robust_mutex_lock,
lll_robust_mutex_cond_lock, lll_robust_mutex_timedlock,
lll_robust_mutex_unlock): New macros.
(__lll_robust_lock_wait, __lll_robust_timedlock_wait): New prototypes.
* sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
* sysdeps/unix/sysv/linux/lowlevelrobustlock.c: New file.
--- libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h 2006-02-17 19:20:29.000000000 +0100
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -64,6 +64,15 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ int *__futexp = &(futexv); \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ lll_futex_wake (__futexp, 1); \
+ } \
+ while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
({ \
@@ -106,7 +115,16 @@ __lll_mutex_cond_trylock(int *futex)
#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
+static inline int __attribute__((always_inline))
+__lll_robust_mutex_trylock(int *futex, int id)
+{
+ return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
+}
+#define lll_robust_mutex_trylock(lock, id) \
+ __lll_robust_mutex_trylock (&(lock), id)
+
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
static inline void __attribute__((always_inline))
__lll_mutex_lock(int *futex)
@@ -117,6 +135,18 @@ __lll_mutex_lock(int *futex)
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+static inline int __attribute__ ((always_inline))
+__lll_robust_mutex_lock (int *futex, int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_lock_wait (futex);
+ return result;
+}
+#define lll_robust_mutex_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), id)
+
+
static inline void __attribute__ ((always_inline))
__lll_mutex_cond_lock (int *futex)
{
@@ -126,8 +156,14 @@ __lll_mutex_cond_lock (int *futex)
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+
+
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
+ attribute_hidden;
static inline int __attribute__ ((always_inline))
__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
@@ -141,6 +177,19 @@ __lll_mutex_timedlock (int *futex, const
__lll_mutex_timedlock (&(futex), abstime)
+static inline int __attribute__ ((always_inline))
+__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
+ int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_timedlock_wait (futex, abstime);
+ return result;
+}
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
+
static inline void __attribute__ ((always_inline))
__lll_mutex_unlock (int *futex)
{
@@ -152,6 +201,17 @@ __lll_mutex_unlock (int *futex)
static inline void __attribute__ ((always_inline))
+__lll_robust_mutex_unlock (int *futex, int mask)
+{
+ int val = atomic_exchange_rel (futex, 0);
+ if (__builtin_expect (val & mask, 0))
+ lll_futex_wake (futex, 1);
+}
+#define lll_robust_mutex_unlock(futex) \
+ __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
+
+
+static inline void __attribute__ ((always_inline))
__lll_mutex_unlock_force (int *futex)
{
(void) atomic_exchange_rel (futex, 0);
--- libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/s390/lowlevellock.h 2006-02-17 19:20:29.000000000 +0100
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -85,6 +85,17 @@
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ int *__futexp = &(futexv); \
+ \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ lll_futex_wake (__futexp, 1); \
+ } \
+ while (0)
+
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futex, nr_wake, nr_move, mutex, val) \
({ \
@@ -167,7 +178,23 @@ __lll_mutex_cond_trylock (int *futex)
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_trylock (int *futex, int id)
+{
+ unsigned int old;
+
+ __asm __volatile ("cs %0,%3,%1"
+ : "=d" (old), "=Q" (*futex)
+ : "0" (0), "d" (id), "m" (*futex) : "cc", "memory" );
+ return old != 0;
+}
+#define lll_robust_mutex_trylock(futex, id) \
+ __lll_robust_mutex_trylock (&(futex), id)
+
+
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
static inline void
__attribute__ ((always_inline))
@@ -178,6 +205,17 @@ __lll_mutex_lock (int *futex)
}
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_lock (int *futex, int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_lock_wait (futex);
+ return result;
+}
+#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+
static inline void
__attribute__ ((always_inline))
__lll_mutex_cond_lock (int *futex)
@@ -187,8 +225,13 @@ __lll_mutex_cond_lock (int *futex)
}
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
+
extern int __lll_timedlock_wait
(int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+ (int *futex, const struct timespec *) attribute_hidden;
static inline int
__attribute__ ((always_inline))
@@ -202,6 +245,19 @@ __lll_mutex_timedlock (int *futex, const
#define lll_mutex_timedlock(futex, abstime) \
__lll_mutex_timedlock (&(futex), abstime)
+static inline int
+__attribute__ ((always_inline))
+__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
+ int id)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_timedlock_wait (futex, abstime);
+ return result;
+}
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
static inline void
__attribute__ ((always_inline))
@@ -220,6 +276,21 @@ __lll_mutex_unlock (int *futex)
static inline void
__attribute__ ((always_inline))
+__lll_robust_mutex_unlock (int *futex, int mask)
+{
+ int oldval;
+ int newval = 0;
+
+ lll_compare_and_swap (futex, oldval, newval, "slr %2,%2");
+ if (oldval & mask)
+ lll_futex_wake (futex, 1);
+}
+#define lll_robust_mutex_unlock(futex) \
+ __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
+
+
+static inline void
+__attribute__ ((always_inline))
__lll_mutex_unlock_force (int *futex)
{
*futex = 0;
--- libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h 2006-02-17 14:36:19.000000000 +0100
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -69,6 +69,17 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
+#define lll_robust_mutex_dead(futexv) \
+ do \
+ { \
+ INTERNAL_SYSCALL_DECL (__err); \
+ int *__futexp = &(futexv); \
+ \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ INTERNAL_SYSCALL (futex, __err, 4, __futexp, FUTEX_WAKE, 1, 0); \
+ } \
+ while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
({ \
@@ -102,8 +113,8 @@
# define __lll_rel_instr "sync"
#endif
-/* Set *futex to 1 if it is 0, atomically. Returns the old value */
-#define __lll_trylock(futex) \
+/* Set *futex to ID if it is 0, atomically. Returns the old value */
+#define __lll_robust_trylock(futex, id) \
({ int __val; \
__asm __volatile ("1: lwarx %0,0,%2\n" \
" cmpwi 0,%0,0\n" \
@@ -112,31 +123,26 @@
" bne- 1b\n" \
"2: " __lll_acq_instr \
: "=&r" (__val), "=m" (*futex) \
- : "r" (futex), "r" (1), "m" (*futex) \
+ : "r" (futex), "r" (id), "m" (*futex) \
: "cr0", "memory"); \
__val; \
})
+#define lll_robust_mutex_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+
+/* Set *futex to 1 if it is 0, atomically. Returns the old value */
+#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
+
#define lll_mutex_trylock(lock) __lll_trylock (&(lock))
/* Set *futex to 2 if it is 0, atomically. Returns the old value */
-#define __lll_cond_trylock(futex) \
- ({ int __val; \
- __asm __volatile ("1: lwarx %0,0,%2\n" \
- " cmpwi 0,%0,0\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%2\n" \
- " bne- 1b\n" \
- "2: " __lll_acq_instr \
- : "=&r" (__val), "=m" (*futex) \
- : "r" (futex), "r" (2), "m" (*futex) \
- : "cr0", "memory"); \
- __val; \
- })
+#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
+
#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock))
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
#define lll_mutex_lock(lock) \
(void) ({ \
@@ -146,6 +152,16 @@ extern void __lll_lock_wait (int *futex)
__lll_lock_wait (__futex); \
})
+#define lll_robust_mutex_lock(lock, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+
#define lll_mutex_cond_lock(lock) \
(void) ({ \
int *__futex = &(lock); \
@@ -154,8 +170,22 @@ extern void __lll_lock_wait (int *futex)
__lll_lock_wait (__futex); \
})
+#define lll_robust_mutex_cond_lock(lock, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ int __id = id | FUTEX_WAITERS; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+
+
extern int __lll_timedlock_wait
(int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+ (int *futex, const struct timespec *) attribute_hidden;
#define lll_mutex_timedlock(lock, abstime) \
({ \
@@ -167,6 +197,16 @@ extern int __lll_timedlock_wait
__val; \
})
+#define lll_robust_mutex_timedlock(lock, abstime, id) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime); \
+ __val; \
+ })
+
#define lll_mutex_unlock(lock) \
((void) ({ \
int *__futex = &(lock); \
@@ -175,6 +215,14 @@ extern int __lll_timedlock_wait
lll_futex_wake (__futex, 1); \
}))
+#define lll_robust_mutex_unlock(lock) \
+ ((void) ({ \
+ int *__futex = &(lock); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1); \
+ }))
+
#define lll_mutex_unlock_force(lock) \
((void) ({ \
int *__futex = &(lock); \
--- libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h.jj 2005-09-09 12:58:42.000000000 +0200
+++ libc/nptl/sysdeps/unix/sysv/linux/ia64/lowlevellock.h 2006-02-17 19:20:29.000000000 +0100
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@@ -55,6 +55,15 @@
_r10 == -1 ? -_retval : _retval; \
})
+#define lll_robust_mutex_dead(futexv) \
+do \
+ { \
+ int *__futexp = &(futexv); \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ DO_INLINE_SYSCALL(futex, 3, (long) __futexp, FUTEX_WAKE, 1); \
+ } \
+while (0)
+
/* Returns non-zero if error happened, zero if success. */
#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val) \
({ \
@@ -79,12 +88,19 @@
#define lll_mutex_trylock(futex) __lll_mutex_trylock (&(futex))
+#define __lll_robust_mutex_trylock(futex, id) \
+ (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
+#define lll_robust_mutex_trylock(futex, id) \
+ __lll_robust_mutex_trylock (&(futex), id)
+
+
#define __lll_mutex_cond_trylock(futex) \
(atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0)
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
extern void __lll_lock_wait (int *futex) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
#define __lll_mutex_lock(futex) \
@@ -96,6 +112,18 @@ extern void __lll_lock_wait (int *futex)
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define __lll_robust_mutex_lock(futex, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+#define lll_robust_mutex_lock(futex, id) __lll_robust_mutex_lock (&(futex), id)
+
+
#define __lll_mutex_cond_lock(futex) \
((void) ({ \
int *__futex = (futex); \
@@ -105,8 +133,24 @@ extern void __lll_lock_wait (int *futex)
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define __lll_robust_mutex_cond_lock(futex, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ int __id = (id) | FUTEX_WAITERS; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, __id, 0) != 0) \
+ __val = __lll_robust_lock_wait (__futex); \
+ __val; \
+ })
+#define lll_robust_mutex_cond_lock(futex, id) \
+ __lll_robust_mutex_cond_lock (&(futex), id)
+
+
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
+ attribute_hidden;
#define __lll_mutex_timedlock(futex, abstime) \
@@ -122,6 +166,19 @@ extern int __lll_timedlock_wait (int *fu
__lll_mutex_timedlock (&(futex), abstime)
+#define __lll_robust_mutex_timedlock(futex, abstime, id) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (atomic_compare_and_exchange_bool_acq (__futex, id, 0) != 0) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime); \
+ __val; \
+ })
+#define lll_robust_mutex_timedlock(futex, abstime, id) \
+ __lll_robust_mutex_timedlock (&(futex), abstime, id)
+
+
#define __lll_mutex_unlock(futex) \
((void) ({ \
int *__futex = (futex); \
@@ -134,6 +191,18 @@ extern int __lll_timedlock_wait (int *fu
__lll_mutex_unlock(&(futex))
+#define __lll_robust_mutex_unlock(futex) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1); \
+ }))
+#define lll_robust_mutex_unlock(futex) \
+ __lll_robust_mutex_unlock(&(futex))
+
+
#define __lll_mutex_unlock_force(futex) \
((void) ({ \
int *__futex = (futex); \
--- libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c.jj 2006-02-17 09:13:58.000000000 +0100
+++ libc/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.c 2006-02-17 09:13:58.000000000 +0100
@@ -0,0 +1,95 @@
+/* Copyright (C) 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <sys/time.h>
+#include <pthreadP.h>
+
+
+int
+__lll_robust_lock_wait (int *futex)
+{
+ int oldval = *futex;
+ int tid = THREAD_GETMEM (THREAD_SELF, tid);
+
+ do
+ {
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ return oldval;
+
+ int newval = oldval | FUTEX_WAITERS;
+ if (oldval != newval
+ && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
+ continue;
+
+ lll_futex_wait (futex, newval);
+ }
+ while ((oldval = atomic_compare_and_exchange_val_acq (futex, tid, 0)) != 0);
+ return 0;
+}
+
+
+int
+__lll_robust_timedlock_wait (int *futex, const struct timespec *abstime)
+{
+ /* Reject invalid timeouts. */
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+ return EINVAL;
+
+ int tid = THREAD_GETMEM (THREAD_SELF, tid);
+
+ do
+ {
+ struct timeval tv;
+ struct timespec rt;
+
+ /* Get the current time. */
+ (void) __gettimeofday (&tv, NULL);
+
+ /* Compute relative timeout. */
+ rt.tv_sec = abstime->tv_sec - tv.tv_sec;
+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+ if (rt.tv_nsec < 0)
+ {
+ rt.tv_nsec += 1000000000;
+ --rt.tv_sec;
+ }
+
+ /* Already timed out? */
+ if (rt.tv_sec < 0)
+ return ETIMEDOUT;
+
+ /* Wait. */
+ int oldval = *futex;
+ if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ return oldval;
+
+ int newval = oldval | FUTEX_WAITERS;
+ if (oldval != newval
+ && atomic_compare_and_exchange_bool_acq (futex, newval, oldval))
+ continue;
+
+ lll_futex_timed_wait (futex, newval, &rt);
+ }
+ while (atomic_compare_and_exchange_bool_acq (futex, tid, 0));
+
+ return 0;
+}
--- libc/include/atomic.h.jj 2005-08-23 12:00:25.000000000 +0200
+++ libc/include/atomic.h 2006-02-17 09:13:58.000000000 +0100
@@ -1,5 +1,5 @@
/* Internal macros for atomic operations for GNU C Library.
- Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -273,6 +273,41 @@
__oldval & __mask; })
#endif
+/* Atomically *mem &= mask and return the old value of *mem. */
+#ifndef atomic_and
+# define atomic_and(mem, mask) \
+ ({ __typeof (*(mem)) __oldval; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __mask = (mask); \
+ \
+ do \
+ __oldval = (*__memp); \
+ while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
+ __oldval \
+ & __mask, \
+ __oldval),\
+ 0)); \
+ \
+ __oldval; })
+#endif
+
+/* Atomically *mem |= mask and return the old value of *mem. */
+#ifndef atomic_or
+# define atomic_or(mem, mask) \
+ ({ __typeof (*(mem)) __oldval; \
+ __typeof (mem) __memp = (mem); \
+ __typeof (*(mem)) __mask = (mask); \
+ \
+ do \
+ __oldval = (*__memp); \
+ while (__builtin_expect (atomic_compare_and_exchange_bool_acq (__memp, \
+ __oldval \
+ | __mask, \
+ __oldval),\
+ 0)); \
+ \
+ __oldval; })
+#endif
#ifndef atomic_full_barrier
# define atomic_full_barrier() __asm ("" ::: "memory")
Jakub
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] glibc build fixes
2006-02-17 18:23 ` Jakub Jelinek
@ 2006-02-17 18:51 ` Ulrich Drepper
0 siblings, 0 replies; 7+ messages in thread
From: Ulrich Drepper @ 2006-02-17 18:51 UTC (permalink / raw)
To: Jakub Jelinek; +Cc: libc-hacker
[-- Attachment #1: Type: text/plain, Size: 101 bytes --]
Applied.
--
➧ Ulrich Drepper ➧ Red Hat, Inc. ➧ 444 Castro St ➧ Mountain View, CA ❖
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 251 bytes --]
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2006-02-17 18:51 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2006-02-13 13:02 [PATCH] glibc build fixes Jakub Jelinek
2006-02-15 17:20 ` Ulrich Drepper
2006-02-17 13:27 ` Kaz Kojima
2006-02-17 15:37 ` Ulrich Drepper
2006-02-17 16:08 ` Jakub Jelinek
2006-02-17 18:23 ` Jakub Jelinek
2006-02-17 18:51 ` Ulrich Drepper
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).