* [hurd,commited 0/7] hurd: add pshared semaphore support.
@ 2020-12-16 0:59 Samuel Thibault
2020-12-16 0:59 ` [hurd, commited 1/7] hurd: Rename LLL_INITIALIZER to LLL_LOCK_INITIALIZER Samuel Thibault
` (6 more replies)
0 siblings, 7 replies; 8+ messages in thread
From: Samuel Thibault @ 2020-12-16 0:59 UTC (permalink / raw)
To: libc-alpha; +Cc: Samuel Thibault, commit-hurd
Samuel Thibault (7):
hurd: Rename LLL_INITIALIZER to LLL_LOCK_INITIALIZER
hurd: make lll_* take a variable instead of a ptr
hurd: Add __lll_abstimed_wait_intr
htl: Add futex-internal.h
hurd: Add __libc_open and __libc_close
hurd: Add LLL_PRIVATE and LLL_SHARED
htl: Add pshared semaphore support
htl/Makefile | 2 +-
htl/pt-internal.h | 33 +++
hurd/Makefile | 1 +
hurd/RPC_gsync_wait_intr.c | 4 +
hurd/Versions | 3 +-
hurd/hurdlock.c | 25 +-
hurd/hurdlock.h | 54 +++--
hurd/hurdpid.c | 2 +-
hurd/setauth.c | 2 +-
mach/lock-intern.h | 8 +-
mach/lowlevellock.h | 47 +++-
mach/mutex-init.c | 2 +-
sysdeps/htl/bits/semaphore.h | 20 +-
sysdeps/htl/futex-internal.h | 39 +++
sysdeps/htl/sem-destroy.c | 10 +-
sysdeps/htl/sem-getvalue.c | 10 +-
sysdeps/htl/sem-init.c | 10 +-
sysdeps/htl/sem-post.c | 54 +++--
sysdeps/htl/sem-timedwait.c | 263 +++++++++++----------
sysdeps/htl/sem-trywait.c | 15 +-
sysdeps/htl/sem-waitfast.c | 55 +++++
sysdeps/mach/hurd/close.c | 1 +
sysdeps/mach/hurd/htl/pt-mutex-lock.c | 8 +-
sysdeps/mach/hurd/htl/pt-mutex-timedlock.c | 6 +-
sysdeps/mach/hurd/htl/pt-mutex-trylock.c | 8 +-
sysdeps/mach/hurd/htl/pt-mutex-unlock.c | 8 +-
sysdeps/mach/hurd/htl/pt-mutex.h | 2 +-
sysdeps/mach/hurd/i386/Makefile | 1 -
sysdeps/mach/hurd/setpgid.c | 2 +-
sysdeps/mach/hurd/setsid.c | 2 +-
sysdeps/mach/hurd/tls.h | 4 +-
sysdeps/mach/libc-lock.h | 20 +-
32 files changed, 466 insertions(+), 255 deletions(-)
create mode 100644 hurd/RPC_gsync_wait_intr.c
create mode 100644 sysdeps/htl/futex-internal.h
create mode 100644 sysdeps/htl/sem-waitfast.c
--
2.29.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [hurd, commited 1/7] hurd: Rename LLL_INITIALIZER to LLL_LOCK_INITIALIZER
2020-12-16 0:59 [hurd,commited 0/7] hurd: add pshared semaphore support Samuel Thibault
@ 2020-12-16 0:59 ` Samuel Thibault
2020-12-16 0:59 ` [hurd, commited 2/7] hurd: make lll_* take a variable instead of a ptr Samuel Thibault
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Samuel Thibault @ 2020-12-16 0:59 UTC (permalink / raw)
To: libc-alpha; +Cc: Samuel Thibault, commit-hurd
To get coherent with other ports.
---
hurd/setauth.c | 2 +-
mach/lock-intern.h | 2 +-
mach/lowlevellock.h | 2 +-
mach/mutex-init.c | 2 +-
sysdeps/mach/libc-lock.h | 8 ++++----
5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/hurd/setauth.c b/hurd/setauth.c
index 5493db5e5d..b1037172ba 100644
--- a/hurd/setauth.c
+++ b/hurd/setauth.c
@@ -24,7 +24,7 @@
/* Things in the library which want to be run when the auth port changes. */
DEFINE_HOOK (_hurd_reauth_hook, (auth_t new_auth));
-static unsigned int reauth_lock = LLL_INITIALIZER;
+static unsigned int reauth_lock = LLL_LOCK_INITIALIZER;
/* Set the auth port to NEW, and reauthenticate
everything used by the library. */
diff --git a/mach/lock-intern.h b/mach/lock-intern.h
index a68674cef4..62faf98039 100644
--- a/mach/lock-intern.h
+++ b/mach/lock-intern.h
@@ -31,7 +31,7 @@
typedef unsigned int __spin_lock_t;
/* Static initializer for spinlocks. */
-#define __SPIN_LOCK_INITIALIZER LLL_INITIALIZER
+#define __SPIN_LOCK_INITIALIZER LLL_LOCK_INITIALIZER
/* Initialize LOCK. */
diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h
index 6d92adca1c..cf67ccd589 100644
--- a/mach/lowlevellock.h
+++ b/mach/lowlevellock.h
@@ -32,7 +32,7 @@
#endif
/* Static initializer for low-level locks. */
-#define LLL_INITIALIZER 0
+#define LLL_LOCK_INITIALIZER 0
/* Wait on address PTR, without blocking if its contents
* are different from VAL. */
diff --git a/mach/mutex-init.c b/mach/mutex-init.c
index acacec2fb0..fc898f66d4 100644
--- a/mach/mutex-init.c
+++ b/mach/mutex-init.c
@@ -22,6 +22,6 @@
void
__mutex_init (void *lock)
{
- *(int *)lock = LLL_INITIALIZER;
+ *(int *)lock = LLL_LOCK_INITIALIZER;
}
libc_hidden_def (__mutex_init)
diff --git a/sysdeps/mach/libc-lock.h b/sysdeps/mach/libc-lock.h
index e04dcc445d..3993a57b26 100644
--- a/sysdeps/mach/libc-lock.h
+++ b/sysdeps/mach/libc-lock.h
@@ -57,13 +57,13 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
CLASS __libc_lock_t NAME;
/* Define an initialized lock variable NAME with storage class CLASS. */
-#define _LIBC_LOCK_INITIALIZER LLL_INITIALIZER
+#define _LIBC_LOCK_INITIALIZER LLL_LOCK_INITIALIZER
#define __libc_lock_define_initialized(CLASS,NAME) \
- CLASS __libc_lock_t NAME = LLL_INITIALIZER;
+ CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
/* Initialize the named lock variable, leaving it in a consistent, unlocked
state. */
-#define __libc_lock_init(NAME) (NAME) = LLL_INITIALIZER
+#define __libc_lock_init(NAME) (NAME) = LLL_LOCK_INITIALIZER
/* Finalize the named lock variable, which must be locked. It cannot be
used again until __libc_lock_init is called again on it. This must be
@@ -86,7 +86,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
#define __libc_lock_define_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME;
-#define _LIBC_LOCK_RECURSIVE_INITIALIZER { LLL_INITIALIZER, 0, 0 }
+#define _LIBC_LOCK_RECURSIVE_INITIALIZER { LLL_LOCK_INITIALIZER, 0, 0 }
#define __libc_lock_define_initialized_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
--
2.29.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [hurd, commited 2/7] hurd: make lll_* take a variable instead of a ptr
2020-12-16 0:59 [hurd,commited 0/7] hurd: add pshared semaphore support Samuel Thibault
2020-12-16 0:59 ` [hurd, commited 1/7] hurd: Rename LLL_INITIALIZER to LLL_LOCK_INITIALIZER Samuel Thibault
@ 2020-12-16 0:59 ` Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 3/7] hurd: Add __lll_abstimed_wait_intr Samuel Thibault
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Samuel Thibault @ 2020-12-16 0:59 UTC (permalink / raw)
To: libc-alpha; +Cc: Samuel Thibault, commit-hurd
To be coherent with other ports, let's make lll_* take a variable, and
rename those that keep taking a ptr into __lll_*.
---
hurd/hurdlock.c | 14 ++++----
hurd/hurdlock.h | 38 +++++++++++++---------
hurd/hurdpid.c | 2 +-
mach/lock-intern.h | 6 ++--
mach/lowlevellock.h | 24 ++++++++++----
sysdeps/mach/hurd/htl/pt-mutex-lock.c | 8 ++---
sysdeps/mach/hurd/htl/pt-mutex-timedlock.c | 6 ++--
sysdeps/mach/hurd/htl/pt-mutex-trylock.c | 8 ++---
sysdeps/mach/hurd/htl/pt-mutex-unlock.c | 8 ++---
sysdeps/mach/hurd/htl/pt-mutex.h | 2 +-
sysdeps/mach/hurd/setpgid.c | 2 +-
sysdeps/mach/hurd/setsid.c | 2 +-
sysdeps/mach/hurd/tls.h | 4 +--
sysdeps/mach/libc-lock.h | 12 +++----
14 files changed, 76 insertions(+), 60 deletions(-)
diff --git a/hurd/hurdlock.c b/hurd/hurdlock.c
index 59d017fc02..3b9974bee5 100644
--- a/hurd/hurdlock.c
+++ b/hurd/hurdlock.c
@@ -51,7 +51,7 @@ __lll_abstimed_wait (void *ptr, int val,
return EINVAL;
int mlsec = compute_reltime (tsp, clk);
- return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_wait (ptr, val, mlsec, flags);
+ return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait (ptr, val, mlsec, flags);
}
int
@@ -62,7 +62,7 @@ __lll_abstimed_xwait (void *ptr, int lo, int hi,
return EINVAL;
int mlsec = compute_reltime (tsp, clk);
- return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_xwait (ptr, lo, hi, mlsec,
+ return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_xwait (ptr, lo, hi, mlsec,
flags);
}
@@ -73,7 +73,7 @@ __lll_abstimed_lock (void *ptr,
if (clk != CLOCK_REALTIME)
return EINVAL;
- if (lll_trylock (ptr) == 0)
+ if (__lll_trylock (ptr) == 0)
return 0;
while (1)
@@ -84,7 +84,7 @@ __lll_abstimed_lock (void *ptr,
return EINVAL;
int mlsec = compute_reltime (tsp, clk);
- if (mlsec < 0 || lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
+ if (mlsec < 0 || __lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
return ETIMEDOUT;
}
}
@@ -140,7 +140,7 @@ __lll_robust_lock (void *ptr, int flags)
}
else
{
- lll_timed_wait (iptr, val, wait_time, flags);
+ __lll_timed_wait (iptr, val, wait_time, flags);
if (wait_time < MAX_WAIT_TIME)
wait_time <<= 1;
}
@@ -187,7 +187,7 @@ __lll_robust_abstimed_lock (void *ptr,
else if (mlsec > wait_time)
mlsec = wait_time;
- int res = lll_timed_wait (iptr, val, mlsec, flags);
+ int res = __lll_timed_wait (iptr, val, mlsec, flags);
if (res == KERN_TIMEDOUT)
return ETIMEDOUT;
else if (wait_time < MAX_WAIT_TIME)
@@ -223,7 +223,7 @@ __lll_robust_unlock (void *ptr, int flags)
{
if (val & LLL_WAITERS)
{
- lll_set_wake (ptr, 0, flags);
+ __lll_set_wake (ptr, 0, flags);
break;
}
else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
diff --git a/hurd/hurdlock.h b/hurd/hurdlock.h
index 362bcf6cc2..c1df42bea4 100644
--- a/hurd/hurdlock.h
+++ b/hurd/hurdlock.h
@@ -31,21 +31,21 @@ struct timespec;
/* Wait on 64-bit address PTR, without blocking if its contents
are different from the pair <LO, HI>. */
-#define lll_xwait(ptr, lo, hi, flags) \
+#define __lll_xwait(ptr, lo, hi, flags) \
__gsync_wait (__mach_task_self (), \
(vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)
-/* Same as 'lll_wait', but only block for MLSEC milliseconds. */
-#define lll_timed_wait(ptr, val, mlsec, flags) \
+/* Same as '__lll_wait', but only block for MLSEC milliseconds. */
+#define __lll_timed_wait(ptr, val, mlsec, flags) \
__gsync_wait (__mach_task_self (), \
(vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
-/* Same as 'lll_xwait', but only block for MLSEC milliseconds. */
-#define lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
+/* Same as '__lll_xwait', but only block for MLSEC milliseconds. */
+#define __lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
__gsync_wait (__mach_task_self (), (vm_offset_t)ptr, \
lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)
-/* Same as 'lll_wait', but only block until TSP elapses,
+/* Same as '__lll_wait', but only block until TSP elapses,
using clock CLK. */
extern int __lll_abstimed_wait (void *__ptr, int __val,
const struct timespec *__tsp, int __flags, int __clk);
@@ -63,6 +63,8 @@ extern int __lll_abstimed_lock (void *__ptr,
/* Acquire the lock at PTR, but return with an error if
the process containing the owner thread dies. */
extern int __lll_robust_lock (void *__ptr, int __flags);
+#define lll_robust_lock(var, flags) \
+ __lll_robust_lock (&(var), flags)
/* Same as '__lll_robust_lock', but only block until TSP
elapses, using clock CLK. */
@@ -72,19 +74,23 @@ extern int __lll_robust_abstimed_lock (void *__ptr,
/* Same as '__lll_robust_lock', but return with an error
if the lock cannot be acquired without blocking. */
extern int __lll_robust_trylock (void *__ptr);
+#define lll_robust_trylock(var) \
+ __lll_robust_trylock (&(var))
/* Wake one or more threads waiting on address PTR,
setting its value to VAL before doing so. */
-#define lll_set_wake(ptr, val, flags) \
+#define __lll_set_wake(ptr, val, flags) \
__gsync_wake (__mach_task_self (), \
(vm_offset_t)ptr, val, flags | GSYNC_MUTATE)
/* Release the robust lock at PTR. */
extern void __lll_robust_unlock (void *__ptr, int __flags);
+#define lll_robust_unlock(var, flags) \
+ __lll_robust_unlock (&(var), flags)
/* Rearrange threads waiting on address SRC to instead wait on
DST, waking one of them if WAIT_ONE is non-zero. */
-#define lll_requeue(src, dst, wake_one, flags) \
+#define __lll_requeue(src, dst, wake_one, flags) \
__gsync_requeue (__mach_task_self (), (vm_offset_t)src, \
(vm_offset_t)dst, (boolean_t)wake_one, flags)
@@ -93,31 +99,31 @@ extern void __lll_robust_unlock (void *__ptr, int __flags);
every one of these calls, defaulting to CLOCK_REALTIME if
no argument is passed. */
-#define lll_abstimed_wait(ptr, val, tsp, flags, ...) \
+#define lll_abstimed_wait(var, val, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
- __lll_abstimed_wait ((ptr), (val), (tsp), (flags), \
+ __lll_abstimed_wait (&(var), (val), (tsp), (flags), \
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
-#define lll_abstimed_xwait(ptr, lo, hi, tsp, flags, ...) \
+#define lll_abstimed_xwait(var, lo, hi, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
- __lll_abstimed_xwait ((ptr), (lo), (hi), (tsp), (flags), \
+ __lll_abstimed_xwait (&(var), (lo), (hi), (tsp), (flags), \
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
-#define lll_abstimed_lock(ptr, tsp, flags, ...) \
+#define lll_abstimed_lock(var, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
- __lll_abstimed_lock ((ptr), (tsp), (flags), \
+ __lll_abstimed_lock (&(var), (tsp), (flags), \
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
-#define lll_robust_abstimed_lock(ptr, tsp, flags, ...) \
+#define lll_robust_abstimed_lock(var, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
- __lll_robust_abstimed_lock ((ptr), (tsp), (flags), \
+ __lll_robust_abstimed_lock (&(var), (tsp), (flags), \
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
diff --git a/hurd/hurdpid.c b/hurd/hurdpid.c
index dd8281cda7..5607570d4b 100644
--- a/hurd/hurdpid.c
+++ b/hurd/hurdpid.c
@@ -66,7 +66,7 @@ _S_msg_proc_newids (mach_port_t me,
/* Notify any waiting user threads that the id change as been completed. */
++_hurd_pids_changed_stamp;
- lll_wake (&_hurd_pids_changed_stamp, GSYNC_BROADCAST);
+ lll_wake (_hurd_pids_changed_stamp, GSYNC_BROADCAST);
return 0;
}
diff --git a/mach/lock-intern.h b/mach/lock-intern.h
index 62faf98039..77fc4d17f8 100644
--- a/mach/lock-intern.h
+++ b/mach/lock-intern.h
@@ -57,7 +57,7 @@ extern void __spin_lock (__spin_lock_t *__lock);
_EXTERN_INLINE void
__spin_lock (__spin_lock_t *__lock)
{
- lll_lock (__lock, 0);
+ __lll_lock (__lock, 0);
}
#endif
@@ -68,7 +68,7 @@ extern void __spin_unlock (__spin_lock_t *__lock);
_EXTERN_INLINE void
__spin_unlock (__spin_lock_t *__lock)
{
- lll_unlock (__lock, 0);
+ __lll_unlock (__lock, 0);
}
#endif
@@ -79,7 +79,7 @@ extern int __spin_try_lock (__spin_lock_t *__lock);
_EXTERN_INLINE int
__spin_try_lock (__spin_lock_t *__lock)
{
- return (lll_trylock (__lock) == 0);
+ return (__lll_trylock (__lock) == 0);
}
#endif
diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h
index cf67ccd589..0a22a030b4 100644
--- a/mach/lowlevellock.h
+++ b/mach/lowlevellock.h
@@ -36,16 +36,20 @@
/* Wait on address PTR, without blocking if its contents
* are different from VAL. */
-#define lll_wait(ptr, val, flags) \
+#define __lll_wait(ptr, val, flags) \
__gsync_wait (__mach_task_self (), \
(vm_offset_t)(ptr), (val), 0, 0, (flags))
+#define lll_wait(var, val, flags) \
+ __lll_wait (&(var), val, flags)
/* Wake one or more threads waiting on address PTR. */
-#define lll_wake(ptr, flags) \
+#define __lll_wake(ptr, flags) \
__gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags))
+#define lll_wake(var, flags) \
+ __lll_wake (&(var), flags)
/* Acquire the lock at PTR. */
-#define lll_lock(ptr, flags) \
+#define __lll_lock(ptr, flags) \
({ \
int *__iptr = (int *)(ptr); \
int __flags = (flags); \
@@ -55,27 +59,33 @@
{ \
if (atomic_exchange_acq (__iptr, 2) == 0) \
break; \
- lll_wait (__iptr, 2, __flags); \
+ __lll_wait (__iptr, 2, __flags); \
} \
(void)0; \
})
+#define lll_lock(var, flags) \
+ __lll_lock (&(var), flags)
/* Try to acquire the lock at PTR, without blocking.
Evaluates to zero on success. */
-#define lll_trylock(ptr) \
+#define __lll_trylock(ptr) \
({ \
int *__iptr = (int *)(ptr); \
*__iptr == 0 \
&& atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1; \
})
+#define lll_trylock(var) \
+ __lll_trylock (&(var))
/* Release the lock at PTR. */
-#define lll_unlock(ptr, flags) \
+#define __lll_unlock(ptr, flags) \
({ \
int *__iptr = (int *)(ptr); \
if (atomic_exchange_rel (__iptr, 0) == 2) \
- lll_wake (__iptr, (flags)); \
+ __lll_wake (__iptr, (flags)); \
(void)0; \
})
+#define lll_unlock(var, flags) \
+ __lll_unlock (&(var), flags)
#endif
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-lock.c b/sysdeps/mach/hurd/htl/pt-mutex-lock.c
index 22510701d8..ed1f6c13a1 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex-lock.c
+++ b/sysdeps/mach/hurd/htl/pt-mutex-lock.c
@@ -33,7 +33,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
switch (MTX_TYPE (mtxp))
{
case PT_MTX_NORMAL:
- lll_lock (&mtxp->__lock, flags);
+ lll_lock (mtxp->__lock, flags);
break;
case PT_MTX_RECURSIVE:
@@ -47,7 +47,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
return ret;
}
- lll_lock (&mtxp->__lock, flags);
+ lll_lock (mtxp->__lock, flags);
mtx_set_owner (mtxp, self, flags);
mtxp->__cnt = 1;
break;
@@ -57,7 +57,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
if (mtx_owned_p (mtxp, self, flags))
return EDEADLK;
- lll_lock (&mtxp->__lock, flags);
+ lll_lock (mtxp->__lock, flags);
mtx_set_owner (mtxp, self, flags);
break;
@@ -65,7 +65,7 @@ __pthread_mutex_lock (pthread_mutex_t *mtxp)
case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
self = _pthread_self ();
- ROBUST_LOCK (self, mtxp, __lll_robust_lock, flags);
+ ROBUST_LOCK (self, mtxp, lll_robust_lock, flags);
break;
default:
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c b/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c
index 198b340429..965e8b24fb 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c
+++ b/sysdeps/mach/hurd/htl/pt-mutex-timedlock.c
@@ -34,7 +34,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
switch (MTX_TYPE (mtxp))
{
case PT_MTX_NORMAL:
- ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid);
+ ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid);
break;
case PT_MTX_RECURSIVE:
@@ -47,7 +47,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
++mtxp->__cnt;
ret = 0;
}
- else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid)) == 0)
+ else if ((ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid)) == 0)
{
mtx_set_owner (mtxp, self, flags);
mtxp->__cnt = 1;
@@ -59,7 +59,7 @@ __pthread_mutex_clocklock (pthread_mutex_t *mtxp,
self = _pthread_self ();
if (mtx_owned_p (mtxp, self, flags))
ret = EDEADLK;
- else if ((ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags, clockid)) == 0)
+ else if ((ret = lll_abstimed_lock (mtxp->__lock, tsp, flags, clockid)) == 0)
mtx_set_owner (mtxp, self, flags);
break;
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-trylock.c b/sysdeps/mach/hurd/htl/pt-mutex-trylock.c
index f883ec3f30..62183b0299 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex-trylock.c
+++ b/sysdeps/mach/hurd/htl/pt-mutex-trylock.c
@@ -32,7 +32,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
switch (MTX_TYPE (mtxp))
{
case PT_MTX_NORMAL:
- ret = lll_trylock (&mtxp->__lock);
+ ret = lll_trylock (mtxp->__lock);
if (ret)
ret = EBUSY;
break;
@@ -47,7 +47,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
++mtxp->__cnt;
ret = 0;
}
- else if ((ret = lll_trylock (&mtxp->__lock)) == 0)
+ else if ((ret = lll_trylock (mtxp->__lock)) == 0)
{
mtx_set_owner (mtxp, self, mtxp->__flags);
mtxp->__cnt = 1;
@@ -59,7 +59,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
case PT_MTX_ERRORCHECK:
self = _pthread_self ();
- if ((ret = lll_trylock (&mtxp->__lock)) == 0)
+ if ((ret = lll_trylock (mtxp->__lock)) == 0)
mtx_set_owner (mtxp, self, mtxp->__flags);
else
ret = EBUSY;
@@ -69,7 +69,7 @@ __pthread_mutex_trylock (pthread_mutex_t *mtxp)
case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
self = _pthread_self ();
- ROBUST_LOCK (self, mtxp, __lll_robust_trylock);
+ ROBUST_LOCK (self, mtxp, lll_robust_trylock);
break;
default:
diff --git a/sysdeps/mach/hurd/htl/pt-mutex-unlock.c b/sysdeps/mach/hurd/htl/pt-mutex-unlock.c
index aabe9eafbb..f2e87a7c6f 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex-unlock.c
+++ b/sysdeps/mach/hurd/htl/pt-mutex-unlock.c
@@ -32,7 +32,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
switch (MTX_TYPE (mtxp))
{
case PT_MTX_NORMAL:
- lll_unlock (&mtxp->__lock, flags);
+ lll_unlock (mtxp->__lock, flags);
break;
case PT_MTX_RECURSIVE:
@@ -42,7 +42,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
else if (--mtxp->__cnt == 0)
{
mtxp->__owner_id = mtxp->__shpid = 0;
- lll_unlock (&mtxp->__lock, flags);
+ lll_unlock (mtxp->__lock, flags);
}
break;
@@ -54,7 +54,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
else
{
mtxp->__owner_id = mtxp->__shpid = 0;
- lll_unlock (&mtxp->__lock, flags);
+ lll_unlock (mtxp->__lock, flags);
}
break;
@@ -74,7 +74,7 @@ __pthread_mutex_unlock (pthread_mutex_t *mtxp)
* state, mark it as irrecoverable. */
mtxp->__owner_id = ((mtxp->__lock & LLL_DEAD_OWNER)
? NOTRECOVERABLE_ID : 0);
- __lll_robust_unlock (&mtxp->__lock, flags);
+ lll_robust_unlock (mtxp->__lock, flags);
}
break;
diff --git a/sysdeps/mach/hurd/htl/pt-mutex.h b/sysdeps/mach/hurd/htl/pt-mutex.h
index 578478fcaf..ead7c91c4f 100644
--- a/sysdeps/mach/hurd/htl/pt-mutex.h
+++ b/sysdeps/mach/hurd/htl/pt-mutex.h
@@ -42,7 +42,7 @@
return EDEADLK; \
} \
\
- ret = cb (&mtxp->__lock, ##__VA_ARGS__); \
+ ret = cb (mtxp->__lock, ##__VA_ARGS__); \
if (ret == 0 || ret == EOWNERDEAD) \
{ \
if (mtxp->__owner_id == ENOTRECOVERABLE) \
diff --git a/sysdeps/mach/hurd/setpgid.c b/sysdeps/mach/hurd/setpgid.c
index 4bb90c48c7..41562b77e5 100644
--- a/sysdeps/mach/hurd/setpgid.c
+++ b/sysdeps/mach/hurd/setpgid.c
@@ -39,7 +39,7 @@ __setpgid (pid_t pid, pid_t pgid)
/* Synchronize with the signal thread to make sure we have
received and processed proc_newids before returning to the user. */
while (_hurd_pids_changed_stamp == stamp)
- lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
+ lll_wait (_hurd_pids_changed_stamp, stamp, 0);
return 0;
diff --git a/sysdeps/mach/hurd/setsid.c b/sysdeps/mach/hurd/setsid.c
index b297473a86..f5c95a334e 100644
--- a/sysdeps/mach/hurd/setsid.c
+++ b/sysdeps/mach/hurd/setsid.c
@@ -56,7 +56,7 @@ __setsid (void)
returned by `getpgrp ()' in other threads) has been updated before
we return. */
while (_hurd_pids_changed_stamp == stamp)
- lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
+ lll_wait (_hurd_pids_changed_stamp, stamp, 0);
}
HURD_CRITICAL_END;
diff --git a/sysdeps/mach/hurd/tls.h b/sysdeps/mach/hurd/tls.h
index a6a3586785..a0d70c2026 100644
--- a/sysdeps/mach/hurd/tls.h
+++ b/sysdeps/mach/hurd/tls.h
@@ -60,7 +60,7 @@
#define THREAD_GSCOPE_RESET_FLAG() \
do \
if (atomic_exchange_and_add_rel (&GL(dl_thread_gscope_count), -1) == 1) \
- lll_wake (&GL(dl_thread_gscope_count), 0); \
+ lll_wake (GL(dl_thread_gscope_count), 0); \
while (0)
#define THREAD_GSCOPE_WAIT() \
do \
@@ -68,7 +68,7 @@
int count; \
atomic_write_barrier (); \
while ((count = GL(dl_thread_gscope_count))) \
- lll_wait (&GL(dl_thread_gscope_count), count, 0); \
+ lll_wait (GL(dl_thread_gscope_count), count, 0); \
} \
while (0)
diff --git a/sysdeps/mach/libc-lock.h b/sysdeps/mach/libc-lock.h
index 3993a57b26..d9a2c42ebe 100644
--- a/sysdeps/mach/libc-lock.h
+++ b/sysdeps/mach/libc-lock.h
@@ -74,14 +74,14 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
/* Lock the named lock variable. */
#define __libc_lock_lock(NAME) \
- ({ lll_lock (&(NAME), 0); 0; })
+ ({ lll_lock ((NAME), 0); 0; })
/* Lock the named lock variable. */
-#define __libc_lock_trylock(NAME) lll_trylock (&(NAME))
+#define __libc_lock_trylock(NAME) lll_trylock (NAME)
/* Unlock the named lock variable. */
#define __libc_lock_unlock(NAME) \
- ({ lll_unlock (&(NAME), 0); 0; })
+ ({ lll_unlock ((NAME), 0); 0; })
#define __libc_lock_define_recursive(CLASS,NAME) \
CLASS __libc_lock_recursive_t NAME;
@@ -111,7 +111,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
int __r = 0; \
if (__self == __lock->owner) \
++__lock->cnt; \
- else if ((__r = lll_trylock (&__lock->lock)) == 0) \
+ else if ((__r = lll_trylock (__lock->lock)) == 0) \
__lock->owner = __self, __lock->cnt = 1; \
__r; \
})
@@ -122,7 +122,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
void *__self = __libc_lock_owner_self (); \
if (__self != __lock->owner) \
{ \
- lll_lock (&__lock->lock, 0); \
+ lll_lock (__lock->lock, 0); \
__lock->owner = __self; \
} \
++__lock->cnt; \
@@ -135,7 +135,7 @@ typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
if (--__lock->cnt == 0) \
{ \
__lock->owner = 0; \
- lll_unlock (&__lock->lock, 0); \
+ lll_unlock (__lock->lock, 0); \
} \
})
--
2.29.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [hurd,commited 3/7] hurd: Add __lll_abstimed_wait_intr
2020-12-16 0:59 [hurd,commited 0/7] hurd: add pshared semaphore support Samuel Thibault
2020-12-16 0:59 ` [hurd, commited 1/7] hurd: Rename LLL_INITIALIZER to LLL_LOCK_INITIALIZER Samuel Thibault
2020-12-16 0:59 ` [hurd, commited 2/7] hurd: make lll_* take a variable instead of a ptr Samuel Thibault
@ 2020-12-16 0:59 ` Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 4/7] htl: Add futex-internal.h Samuel Thibault
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Samuel Thibault @ 2020-12-16 0:59 UTC (permalink / raw)
To: libc-alpha; +Cc: Samuel Thibault, commit-hurd
For semaphores, we need an interruptible version of low-level locks.
---
hurd/Makefile | 1 +
hurd/RPC_gsync_wait_intr.c | 4 ++++
hurd/Versions | 2 +-
hurd/hurdlock.c | 11 +++++++++++
hurd/hurdlock.h | 16 ++++++++++++++++
mach/lowlevellock.h | 18 ++++++++++++++++++
6 files changed, 51 insertions(+), 1 deletion(-)
create mode 100644 hurd/RPC_gsync_wait_intr.c
diff --git a/hurd/Makefile b/hurd/Makefile
index 861bbf7842..02b2456aa0 100644
--- a/hurd/Makefile
+++ b/hurd/Makefile
@@ -93,6 +93,7 @@ CFLAGS-RPC_exec_startup_get_info.o = $(no-stack-protector)
# Make generated headers compatible with all support standards
migheaderpipe := | sed -e 's/\<ino64_t\>/__ino64_t/' -e 's/\<loff_t\>/__loff_t/'
include ../mach/Machrules
+libhurduser-routines += RPC_gsync_wait_intr
include ../Rules
\f
# intr-rpc.defs defines the INTR_INTERFACE macro to make the generated RPC
diff --git a/hurd/RPC_gsync_wait_intr.c b/hurd/RPC_gsync_wait_intr.c
new file mode 100644
index 0000000000..51b63217d6
--- /dev/null
+++ b/hurd/RPC_gsync_wait_intr.c
@@ -0,0 +1,4 @@
+#include "intr-rpc.h"
+#define gsync_wait gsync_wait_intr
+#define __gsync_wait __gsync_wait_intr
+#include "RPC_gsync_wait.c"
diff --git a/hurd/Versions b/hurd/Versions
index f37e359ac8..1aad27d91f 100644
--- a/hurd/Versions
+++ b/hurd/Versions
@@ -149,7 +149,7 @@ libc {
GLIBC_PRIVATE {
# Used by other libs.
- __lll_abstimed_wait; __lll_abstimed_xwait;
+ __lll_abstimed_wait; __lll_abstimed_wait_intr; __lll_abstimed_xwait;
__lll_abstimed_lock; __lll_robust_lock;
__lll_robust_abstimed_lock; __lll_robust_trylock;
__lll_robust_unlock;
diff --git a/hurd/hurdlock.c b/hurd/hurdlock.c
index 3b9974bee5..e2a5312036 100644
--- a/hurd/hurdlock.c
+++ b/hurd/hurdlock.c
@@ -54,6 +54,17 @@ __lll_abstimed_wait (void *ptr, int val,
return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait (ptr, val, mlsec, flags);
}
+int
+__lll_abstimed_wait_intr (void *ptr, int val,
+ const struct timespec *tsp, int flags, int clk)
+{
+ if (clk != CLOCK_REALTIME)
+ return EINVAL;
+
+ int mlsec = compute_reltime (tsp, clk);
+ return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait_intr (ptr, val, mlsec, flags);
+}
+
int
__lll_abstimed_xwait (void *ptr, int lo, int hi,
const struct timespec *tsp, int flags, int clk)
diff --git a/hurd/hurdlock.h b/hurd/hurdlock.h
index c1df42bea4..0a7f6eaf90 100644
--- a/hurd/hurdlock.h
+++ b/hurd/hurdlock.h
@@ -40,6 +40,11 @@ struct timespec;
__gsync_wait (__mach_task_self (), \
(vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
+/* Interruptible version. */
+#define __lll_timed_wait_intr(ptr, val, mlsec, flags) \
+ __gsync_wait_intr (__mach_task_self (), \
+ (vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)
+
/* Same as '__lll_xwait', but only block for MLSEC milliseconds. */
#define __lll_timed_xwait(ptr, lo, hi, mlsec, flags) \
__gsync_wait (__mach_task_self (), (vm_offset_t)ptr, \
@@ -50,6 +55,10 @@ struct timespec;
extern int __lll_abstimed_wait (void *__ptr, int __val,
const struct timespec *__tsp, int __flags, int __clk);
+/* Interruptible version. */
+extern int __lll_abstimed_wait_intr (void *__ptr, int __val,
+ const struct timespec *__tsp, int __flags, int __clk);
+
/* Same as 'lll_xwait', but only block until TSP elapses,
using clock CLK. */
extern int __lll_abstimed_xwait (void *__ptr, int __lo, int __hi,
@@ -106,6 +115,13 @@ extern void __lll_robust_unlock (void *__ptr, int __flags);
__clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
})
+#define lll_abstimed_wait_intr(var, val, tsp, flags, ...) \
+ ({ \
+ const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
+ __lll_abstimed_wait_intr (&(var), (val), (tsp), (flags), \
+ __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \
+ })
+
#define lll_abstimed_xwait(var, lo, hi, tsp, flags, ...) \
({ \
const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \
diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h
index 0a22a030b4..b872d0fe1e 100644
--- a/mach/lowlevellock.h
+++ b/mach/lowlevellock.h
@@ -34,6 +34,17 @@
/* Static initializer for low-level locks. */
#define LLL_LOCK_INITIALIZER 0
+/* Interruptible version of __gsync_wait. */
+extern kern_return_t __gsync_wait_intr
+(
+ mach_port_t task,
+ vm_offset_t addr,
+ unsigned val1,
+ unsigned val2,
+ natural_t msec,
+ int flags
+);
+
/* Wait on address PTR, without blocking if its contents
* are different from VAL. */
#define __lll_wait(ptr, val, flags) \
@@ -42,6 +53,13 @@
#define lll_wait(var, val, flags) \
__lll_wait (&(var), val, flags)
+/* Interruptible version. */
+#define __lll_wait_intr(ptr, val, flags) \
+ __gsync_wait_intr (__mach_task_self (), \
+ (vm_offset_t)(ptr), (val), 0, 0, (flags))
+#define lll_wait_intr(var, val, flags) \
+ __lll_wait_intr ((&var), val, flags)
+
/* Wake one or more threads waiting on address PTR. */
#define __lll_wake(ptr, flags) \
__gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags))
--
2.29.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [hurd,commited 4/7] htl: Add futex-internal.h
2020-12-16 0:59 [hurd,commited 0/7] hurd: add pshared semaphore support Samuel Thibault
` (2 preceding siblings ...)
2020-12-16 0:59 ` [hurd,commited 3/7] hurd: Add __lll_abstimed_wait_intr Samuel Thibault
@ 2020-12-16 0:59 ` Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 5/7] hurd: Add __libc_open and __libc_close Samuel Thibault
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Samuel Thibault @ 2020-12-16 0:59 UTC (permalink / raw)
To: libc-alpha; +Cc: Samuel Thibault, commit-hurd
That provides futex_supports_pshared
---
sysdeps/htl/futex-internal.h | 39 ++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
create mode 100644 sysdeps/htl/futex-internal.h
diff --git a/sysdeps/htl/futex-internal.h b/sysdeps/htl/futex-internal.h
new file mode 100644
index 0000000000..2b7a13aff0
--- /dev/null
+++ b/sysdeps/htl/futex-internal.h
@@ -0,0 +1,39 @@
+/* futex operations for glibc-internal use. Stub version; do not include
+ this file directly.
+ Copyright (C) 2014-2020 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#ifndef STUB_FUTEX_INTERNAL_H
+#define STUB_FUTEX_INTERNAL_H
+
+#include <pthread.h>
+
+/* Returns EINVAL if PSHARED is neither PTHREAD_PROCESS_PRIVATE nor
+ PTHREAD_PROCESS_SHARED; otherwise, returns 0 if PSHARED is supported, and
+ ENOTSUP if not. */
+static __always_inline int
+futex_supports_pshared (int pshared)
+{
+ if (__glibc_likely (pshared == PTHREAD_PROCESS_PRIVATE))
+ return 0;
+ else if (pshared == PTHREAD_PROCESS_SHARED)
+ return 0;
+ else
+ return EINVAL;
+}
+
+#endif /* futex-internal.h */
--
2.29.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [hurd,commited 5/7] hurd: Add __libc_open and __libc_close
2020-12-16 0:59 [hurd,commited 0/7] hurd: add pshared semaphore support Samuel Thibault
` (3 preceding siblings ...)
2020-12-16 0:59 ` [hurd,commited 4/7] htl: Add futex-internal.h Samuel Thibault
@ 2020-12-16 0:59 ` Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 6/7] hurd: Add LLL_PRIVATE and LLL_SHARED Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 7/7] htl: Add pshared semaphore support Samuel Thibault
6 siblings, 0 replies; 8+ messages in thread
From: Samuel Thibault @ 2020-12-16 0:59 UTC (permalink / raw)
To: libc-alpha; +Cc: Samuel Thibault, commit-hurd
Needed by libpthread for sem_open and sem_close
---
hurd/Versions | 1 +
sysdeps/mach/hurd/close.c | 1 +
2 files changed, 2 insertions(+)
diff --git a/hurd/Versions b/hurd/Versions
index 1aad27d91f..b66d37be07 100644
--- a/hurd/Versions
+++ b/hurd/Versions
@@ -153,6 +153,7 @@ libc {
__lll_abstimed_lock; __lll_robust_lock;
__lll_robust_abstimed_lock; __lll_robust_trylock;
__lll_robust_unlock;
+ __libc_open; __libc_close;
# Used by libpthread.
_hurd_sigstate_set_global_rcv;
diff --git a/sysdeps/mach/hurd/close.c b/sysdeps/mach/hurd/close.c
index b461087447..b6530dfeee 100644
--- a/sysdeps/mach/hurd/close.c
+++ b/sysdeps/mach/hurd/close.c
@@ -35,4 +35,5 @@ __close (int fd)
return err ? __hurd_fail (err) : 0;
}
libc_hidden_def (__close)
+strong_alias (__close, __libc_close)
weak_alias (__close, close)
--
2.29.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [hurd,commited 6/7] hurd: Add LLL_PRIVATE and LLL_SHARED
2020-12-16 0:59 [hurd,commited 0/7] hurd: add pshared semaphore support Samuel Thibault
` (4 preceding siblings ...)
2020-12-16 0:59 ` [hurd,commited 5/7] hurd: Add __libc_open and __libc_close Samuel Thibault
@ 2020-12-16 0:59 ` Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 7/7] htl: Add pshared semaphore support Samuel Thibault
6 siblings, 0 replies; 8+ messages in thread
From: Samuel Thibault @ 2020-12-16 0:59 UTC (permalink / raw)
To: libc-alpha; +Cc: Samuel Thibault, commit-hurd
---
mach/lowlevellock.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h
index b872d0fe1e..9798d153cf 100644
--- a/mach/lowlevellock.h
+++ b/mach/lowlevellock.h
@@ -34,6 +34,9 @@
/* Static initializer for low-level locks. */
#define LLL_LOCK_INITIALIZER 0
+#define LLL_PRIVATE 0
+#define LLL_SHARED SYNC_SHARED
+
/* Interruptible version of __gsync_wait. */
extern kern_return_t __gsync_wait_intr
(
--
2.29.2
^ permalink raw reply [flat|nested] 8+ messages in thread
* [hurd,commited 7/7] htl: Add pshared semaphore support
2020-12-16 0:59 [hurd,commited 0/7] hurd: add pshared semaphore support Samuel Thibault
` (5 preceding siblings ...)
2020-12-16 0:59 ` [hurd,commited 6/7] hurd: Add LLL_PRIVATE and LLL_SHARED Samuel Thibault
@ 2020-12-16 0:59 ` Samuel Thibault
6 siblings, 0 replies; 8+ messages in thread
From: Samuel Thibault @ 2020-12-16 0:59 UTC (permalink / raw)
To: libc-alpha; +Cc: Samuel Thibault, commit-hurd
The implementation is extremely similar to the nptl implementation, but
with slight differences in the futex interface. This fixes some of BZ
25521.
---
htl/Makefile | 2 +-
htl/pt-internal.h | 33 ++++
sysdeps/htl/bits/semaphore.h | 20 +--
sysdeps/htl/sem-destroy.c | 10 +-
sysdeps/htl/sem-getvalue.c | 10 +-
sysdeps/htl/sem-init.c | 10 +-
sysdeps/htl/sem-post.c | 54 +++----
sysdeps/htl/sem-timedwait.c | 263 +++++++++++++++++---------------
sysdeps/htl/sem-trywait.c | 15 +-
sysdeps/htl/sem-waitfast.c | 55 +++++++
sysdeps/mach/hurd/i386/Makefile | 1 -
11 files changed, 287 insertions(+), 186 deletions(-)
create mode 100644 sysdeps/htl/sem-waitfast.c
diff --git a/htl/Makefile b/htl/Makefile
index 326a920fb3..901deae5f9 100644
--- a/htl/Makefile
+++ b/htl/Makefile
@@ -130,7 +130,7 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate \
\
sem-close sem-destroy sem-getvalue sem-init sem-open \
sem-post sem-timedwait sem-trywait sem-unlink \
- sem-wait \
+ sem-wait sem-waitfast \
\
shm-directory \
\
diff --git a/htl/pt-internal.h b/htl/pt-internal.h
index 9dffa0e32e..62204d79e5 100644
--- a/htl/pt-internal.h
+++ b/htl/pt-internal.h
@@ -331,4 +331,37 @@ extern const struct __pthread_rwlockattr __pthread_default_rwlockattr;
/* Default condition attributes. */
extern const struct __pthread_condattr __pthread_default_condattr;
+/* Semaphore encoding.
+ See nptl implementation for the details. */
+struct new_sem
+{
+#if __HAVE_64B_ATOMICS
+ /* The data field holds both value (in the least-significant 32 bits) and
+ nwaiters. */
+# if __BYTE_ORDER == __LITTLE_ENDIAN
+# define SEM_VALUE_OFFSET 0
+# elif __BYTE_ORDER == __BIG_ENDIAN
+# define SEM_VALUE_OFFSET 1
+# else
+# error Unsupported byte order.
+# endif
+# define SEM_NWAITERS_SHIFT 32
+# define SEM_VALUE_MASK (~(unsigned int)0)
+ uint64_t data;
+ int pshared;
+#define __SEMAPHORE_INITIALIZER(value, pshared) \
+ { (value), (pshared) }
+#else
+# define SEM_VALUE_SHIFT 1
+# define SEM_NWAITERS_MASK ((unsigned int)1)
+ unsigned int value;
+ unsigned int nwaiters;
+ int pshared;
+#define __SEMAPHORE_INITIALIZER(value, pshared) \
+ { (value) << SEM_VALUE_SHIFT, 0, (pshared) }
+#endif
+};
+
+extern int __sem_waitfast (struct new_sem *isem, int definitive_result);
+
#endif /* pt-internal.h */
diff --git a/sysdeps/htl/bits/semaphore.h b/sysdeps/htl/bits/semaphore.h
index 8611bac5ce..77a2be13d3 100644
--- a/sysdeps/htl/bits/semaphore.h
+++ b/sysdeps/htl/bits/semaphore.h
@@ -27,21 +27,15 @@
#include <bits/pthread.h>
/* User visible part of a semaphore. */
-struct __semaphore
-{
- __pthread_spinlock_t __lock;
- struct __pthread *__queue;
- int __pshared;
- int __value;
- void *__data;
-};
-typedef struct __semaphore sem_t;
+#define __SIZEOF_SEM_T 20
-#define SEM_FAILED ((void *) 0)
+typedef union
+{
+ char __size[__SIZEOF_SEM_T];
+ long int __align;
+} sem_t;
-/* Initializer for a semaphore. */
-#define __SEMAPHORE_INITIALIZER(pshared, value) \
- { __PTHREAD_SPIN_LOCK_INITIALIZER, NULL, (pshared), (value), NULL }
+#define SEM_FAILED ((void *) 0)
#endif /* bits/semaphore.h */
diff --git a/sysdeps/htl/sem-destroy.c b/sysdeps/htl/sem-destroy.c
index 4caa004444..ebfeb2a0e6 100644
--- a/sysdeps/htl/sem-destroy.c
+++ b/sysdeps/htl/sem-destroy.c
@@ -24,7 +24,15 @@
int
__sem_destroy (sem_t *sem)
{
- if (sem->__queue)
+ struct new_sem *isem = (struct new_sem *) sem;
+ if (
+#if __HAVE_64B_ATOMICS
+ atomic_load_relaxed (&isem->data) >> SEM_NWAITERS_SHIFT
+#else
+ atomic_load_relaxed (&isem->value) & SEM_NWAITERS_MASK
+ || isem->nwaiters
+#endif
+ )
/* There are threads waiting on *SEM. */
{
errno = EBUSY;
diff --git a/sysdeps/htl/sem-getvalue.c b/sysdeps/htl/sem-getvalue.c
index 2d72a63824..728f763f9e 100644
--- a/sysdeps/htl/sem-getvalue.c
+++ b/sysdeps/htl/sem-getvalue.c
@@ -22,9 +22,13 @@
int
__sem_getvalue (sem_t *restrict sem, int *restrict value)
{
- __pthread_spin_wait (&sem->__lock);
- *value = sem->__value;
- __pthread_spin_unlock (&sem->__lock);
+ struct new_sem *isem = (struct new_sem *) sem;
+
+#if __HAVE_64B_ATOMICS
+ *value = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK;
+#else
+ *value = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT;
+#endif
return 0;
}
diff --git a/sysdeps/htl/sem-init.c b/sysdeps/htl/sem-init.c
index 2be6ab449b..196846d311 100644
--- a/sysdeps/htl/sem-init.c
+++ b/sysdeps/htl/sem-init.c
@@ -24,12 +24,6 @@
int
__sem_init (sem_t *sem, int pshared, unsigned value)
{
- if (pshared != 0)
- {
- errno = EOPNOTSUPP;
- return -1;
- }
-
#ifdef SEM_VALUE_MAX
if (value > SEM_VALUE_MAX)
{
@@ -38,7 +32,9 @@ __sem_init (sem_t *sem, int pshared, unsigned value)
}
#endif
- *sem = (sem_t) __SEMAPHORE_INITIALIZER (pshared, value);
+ struct new_sem *isem = (struct new_sem *) sem;
+
+ *isem = (struct new_sem) __SEMAPHORE_INITIALIZER (value, pshared);
return 0;
}
diff --git a/sysdeps/htl/sem-post.c b/sysdeps/htl/sem-post.c
index 720b73a059..83a3279c84 100644
--- a/sysdeps/htl/sem-post.c
+++ b/sysdeps/htl/sem-post.c
@@ -19,48 +19,50 @@
#include <semaphore.h>
#include <assert.h>
+#include <hurdlock.h>
+
#include <pt-internal.h>
int
__sem_post (sem_t *sem)
{
- struct __pthread *wakeup;
+ struct new_sem *isem = (struct new_sem *) sem;
+ int flags = isem->pshared ? GSYNC_SHARED : 0;
+
+#if __HAVE_64B_ATOMICS
+ uint64_t d = atomic_load_relaxed (&isem->data);
- __pthread_spin_wait (&sem->__lock);
- if (sem->__value > 0)
- /* Do a quick up. */
+ do
{
- if (sem->__value == SEM_VALUE_MAX)
+ if ((d & SEM_VALUE_MASK) == SEM_VALUE_MAX)
{
- __pthread_spin_unlock (&sem->__lock);
errno = EOVERFLOW;
return -1;
}
-
- assert (sem->__queue == NULL);
- sem->__value++;
- __pthread_spin_unlock (&sem->__lock);
- return 0;
}
+ while (!atomic_compare_exchange_weak_release (&isem->data, &d, d + 1));
- if (sem->__queue == NULL)
- /* No one waiting. */
+ if ((d >> SEM_NWAITERS_SHIFT) != 0)
+ /* Wake one waiter. */
+ __lll_wake (((unsigned int *) &isem->data) + SEM_VALUE_OFFSET, flags);
+#else
+ unsigned int v = atomic_load_relaxed (&isem->value);
+
+ do
{
- sem->__value = 1;
- __pthread_spin_unlock (&sem->__lock);
- return 0;
+ if ((v >> SEM_VALUE_SHIFT) == SEM_VALUE_MAX)
+ {
+ errno = EOVERFLOW;
+ return -1;
+ }
}
+ while (!atomic_compare_exchange_weak_release
+ (&isem->value, &v, v + (1 << SEM_VALUE_SHIFT)));
- /* Wake someone up. */
-
- /* First dequeue someone. */
- wakeup = sem->__queue;
- __pthread_dequeue (wakeup);
-
- /* Then drop the lock and transfer control. */
- __pthread_spin_unlock (&sem->__lock);
-
- __pthread_wakeup (wakeup);
+ if ((v & SEM_NWAITERS_MASK) != 0)
+ /* Wake one waiter. */
+ __lll_wake (&isem->value, flags);
+#endif
return 0;
}
diff --git a/sysdeps/htl/sem-timedwait.c b/sysdeps/htl/sem-timedwait.c
index 5095d49b28..4afccd88fc 100644
--- a/sysdeps/htl/sem-timedwait.c
+++ b/sysdeps/htl/sem-timedwait.c
@@ -20,37 +20,27 @@
#include <errno.h>
#include <assert.h>
#include <time.h>
+#include <hurdlock.h>
+#include <hurd/hurd.h>
+#include <sysdep-cancel.h>
#include <pt-internal.h>
-struct cancel_ctx
-{
- struct __pthread *wakeup;
- sem_t *sem;
- int cancel_wake;
-};
+#if !__HAVE_64B_ATOMICS
+static void
+__sem_wait_32_finish (struct new_sem *isem);
+#endif
static void
-cancel_hook (void *arg)
+__sem_wait_cleanup (void *arg)
{
- struct cancel_ctx *ctx = arg;
- struct __pthread *wakeup = ctx->wakeup;
- sem_t *sem = ctx->sem;
- int unblock;
-
- __pthread_spin_wait (&sem->__lock);
- /* The thread only needs to be awaken if it's blocking or about to block.
- If it was already unblocked, it's not queued any more. */
- unblock = wakeup->prevp != NULL;
- if (unblock)
- {
- __pthread_dequeue (wakeup);
- ctx->cancel_wake = 1;
- }
- __pthread_spin_unlock (&sem->__lock);
+ struct new_sem *isem = arg;
- if (unblock)
- __pthread_wakeup (wakeup);
+#if __HAVE_64B_ATOMICS
+ atomic_fetch_add_relaxed (&isem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
+#else
+ __sem_wait_32_finish (isem);
+#endif
}
int
@@ -58,123 +48,148 @@ __sem_timedwait_internal (sem_t *restrict sem,
clockid_t clock_id,
const struct timespec *restrict timeout)
{
- error_t err;
- int cancelled, oldtype, drain;
- int ret = 0;
-
- struct __pthread *self = _pthread_self ();
- struct cancel_ctx ctx;
- ctx.wakeup = self;
- ctx.sem = sem;
- ctx.cancel_wake = 0;
-
- /* Test for a pending cancellation request, switch to deferred mode for
- safer resource handling, and prepare the hook to call in case we're
- cancelled while blocking. Once CANCEL_LOCK is released, the cancellation
- hook can be called by another thread at any time. Whatever happens,
- this function must exit with MUTEX locked.
-
- This function contains inline implementations of pthread_testcancel and
- pthread_setcanceltype to reduce locking overhead. */
- __pthread_mutex_lock (&self->cancel_lock);
- cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
- && self->cancel_pending;
-
- if (cancelled)
- {
- __pthread_mutex_unlock (&self->cancel_lock);
- __pthread_exit (PTHREAD_CANCELED);
- }
+ struct new_sem *isem = (struct new_sem *) sem;
+ int err, ret = 0;
+ int flags = isem->pshared ? GSYNC_SHARED : 0;
- self->cancel_hook = cancel_hook;
- self->cancel_hook_arg = &ctx;
- oldtype = self->cancel_type;
+ __pthread_testcancel ();
- if (oldtype != PTHREAD_CANCEL_DEFERRED)
- self->cancel_type = PTHREAD_CANCEL_DEFERRED;
+ if (__sem_waitfast (isem, 0) == 0)
+ return 0;
- /* Add ourselves to the list of waiters. This is done while setting
- the cancellation hook to simplify the cancellation procedure, i.e.
- if the thread is queued, it can be cancelled, otherwise it is
- already unblocked, progressing on the return path. */
- __pthread_spin_wait (&sem->__lock);
- if (sem->__value > 0)
- /* Successful down. */
- {
- sem->__value--;
- __pthread_spin_unlock (&sem->__lock);
- goto out_locked;
- }
+ int cancel_oldtype = LIBC_CANCEL_ASYNC();
- if (timeout != NULL && ! valid_nanoseconds (timeout->tv_nsec))
- {
- errno = EINVAL;
- ret = -1;
- __pthread_spin_unlock (&sem->__lock);
- goto out_locked;
- }
+#if __HAVE_64B_ATOMICS
+ uint64_t d = atomic_fetch_add_relaxed (&sem->data,
+ (uint64_t) 1 << SEM_NWAITERS_SHIFT);
+
+ pthread_cleanup_push (__sem_wait_cleanup, isem);
- /* Add ourselves to the queue. */
- __pthread_enqueue (&sem->__queue, self);
- __pthread_spin_unlock (&sem->__lock);
-
- __pthread_mutex_unlock (&self->cancel_lock);
-
- /* Block the thread. */
- if (timeout != NULL)
- err = __pthread_timedblock_intr (self, timeout, clock_id);
- else
- err = __pthread_block_intr (self);
-
- __pthread_spin_wait (&sem->__lock);
- if (self->prevp == NULL)
- /* Another thread removed us from the queue, which means a wakeup message
- has been sent. It was either consumed while we were blocking, or
- queued after we timed out and before we acquired the semaphore lock, in
- which case the message queue must be drained. */
- drain = err ? 1 : 0;
- else
+ for (;;)
{
- /* We're still in the queue. Noone attempted to wake us up, i.e. we
- timed out. */
- __pthread_dequeue (self);
- drain = 0;
+ if ((d & SEM_VALUE_MASK) == 0)
+ {
+ /* No token, sleep. */
+ if (timeout)
+ err = __lll_abstimed_wait_intr (
+ ((unsigned int *) &sem->data) + SEM_VALUE_OFFSET,
+ 0, timeout, flags, clock_id);
+ else
+ err = __lll_wait_intr (
+ ((unsigned int *) &sem->data) + SEM_VALUE_OFFSET,
+ 0, flags);
+
+ if (err != 0)
+ {
+ /* Error, interruption or timeout, abort. */
+ if (err == KERN_TIMEDOUT)
+ err = ETIMEDOUT;
+ if (err == KERN_INTERRUPTED)
+ err = EINTR;
+ ret = __hurd_fail (err);
+ __sem_wait_cleanup (isem);
+ break;
+ }
+
+ /* Token changed */
+ d = atomic_load_relaxed (&sem->data);
+ }
+ else
+ {
+ /* Try to acquire and dequeue. */
+ if (atomic_compare_exchange_weak_acquire (&sem->data,
+ &d, d - 1 - ((uint64_t) 1 << SEM_NWAITERS_SHIFT)))
+ {
+ /* Success */
+ ret = 0;
+ break;
+ }
+ }
}
- __pthread_spin_unlock (&sem->__lock);
- if (drain)
- __pthread_block (self);
+ pthread_cleanup_pop (0);
+#else
+ unsigned int v;
+
+ atomic_fetch_add_acquire (&isem->nwaiters, 1);
- if (err)
+ pthread_cleanup_push (__sem_wait_cleanup, isem);
+
+ v = atomic_load_relaxed (&isem->value);
+ do
{
- assert (err == ETIMEDOUT || err == EINTR);
- errno = err;
- ret = -1;
+ do
+ {
+ do
+ {
+ if ((v & SEM_NWAITERS_MASK) != 0)
+ break;
+ }
+ while (!atomic_compare_exchange_weak_release (&isem->value,
+ &v, v | SEM_NWAITERS_MASK));
+
+ if ((v >> SEM_VALUE_SHIFT) == 0)
+ {
+ /* No token, sleep. */
+ if (timeout)
+ err = __lll_abstimed_wait_intr (&isem->value,
+ SEM_NWAITERS_MASK, timeout, flags, clock_id);
+ else
+ err = __lll_wait_intr (&isem->value,
+ SEM_NWAITERS_MASK, flags);
+
+ if (err != 0)
+ {
+ /* Error, interruption or timeout, abort. */
+ if (err == KERN_TIMEDOUT)
+ err = ETIMEDOUT;
+ if (err == KERN_INTERRUPTED)
+ err = EINTR;
+ ret = __hurd_fail (err);
+ goto error;
+ }
+
+ /* Token changed */
+ v = atomic_load_relaxed (&isem->value);
+ }
+ }
+ while ((v >> SEM_VALUE_SHIFT) == 0);
}
+ while (!atomic_compare_exchange_weak_acquire (&isem->value,
+ &v, v - (1 << SEM_VALUE_SHIFT)));
- /* We're almost done. Remove the unblock hook, restore the previous
- cancellation type, and check for a pending cancellation request. */
- __pthread_mutex_lock (&self->cancel_lock);
-out_locked:
- self->cancel_hook = NULL;
- self->cancel_hook_arg = NULL;
- self->cancel_type = oldtype;
- cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
- && self->cancel_pending;
- __pthread_mutex_unlock (&self->cancel_lock);
-
- if (cancelled)
- {
- if (ret == 0 && ctx.cancel_wake == 0)
- /* We were cancelled while waking up with a token, put it back. */
- __sem_post (sem);
+error:
+ pthread_cleanup_pop (0);
- __pthread_exit (PTHREAD_CANCELED);
- }
+ __sem_wait_32_finish (isem);
+#endif
+
+ LIBC_CANCEL_RESET (cancel_oldtype);
return ret;
}
+#if !__HAVE_64B_ATOMICS
+/* Stop being a registered waiter (non-64b-atomics code only). */
+static void
+__sem_wait_32_finish (struct new_sem *isem)
+{
+ unsigned int wguess = atomic_load_relaxed (&isem->nwaiters);
+ if (wguess == 1)
+ atomic_fetch_and_acquire (&isem->value, ~SEM_NWAITERS_MASK);
+
+ unsigned int wfinal = atomic_fetch_add_release (&isem->nwaiters, -1);
+ if (wfinal > 1 && wguess == 1)
+ {
+ unsigned int v = atomic_fetch_or_relaxed (&isem->value,
+ SEM_NWAITERS_MASK);
+ v >>= SEM_VALUE_SHIFT;
+ while (v--)
+ __lll_wake (&isem->value, isem->pshared ? GSYNC_SHARED : 0);
+ }
+}
+#endif
+
int
__sem_clockwait (sem_t *sem, clockid_t clockid,
const struct timespec *restrict timeout)
diff --git a/sysdeps/htl/sem-trywait.c b/sysdeps/htl/sem-trywait.c
index 6a0633bfef..b9301963ab 100644
--- a/sysdeps/htl/sem-trywait.c
+++ b/sysdeps/htl/sem-trywait.c
@@ -24,18 +24,13 @@
int
__sem_trywait (sem_t *sem)
{
- __pthread_spin_wait (&sem->__lock);
- if (sem->__value > 0)
- /* Successful down. */
- {
- sem->__value--;
- __pthread_spin_unlock (&sem->__lock);
- return 0;
- }
- __pthread_spin_unlock (&sem->__lock);
+ struct new_sem *isem = (struct new_sem *) sem;
+
+ if (__sem_waitfast (isem, 1) == 0)
+ return 0;
errno = EAGAIN;
return -1;
}
-strong_alias (__sem_trywait, sem_trywait);
+weak_alias (__sem_trywait, sem_trywait);
diff --git a/sysdeps/htl/sem-waitfast.c b/sysdeps/htl/sem-waitfast.c
new file mode 100644
index 0000000000..7ece73da26
--- /dev/null
+++ b/sysdeps/htl/sem-waitfast.c
@@ -0,0 +1,55 @@
+/* Lock a semaphore if it does not require blocking. Generic version.
+ Copyright (C) 2005-2020 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <semaphore.h>
+#include <errno.h>
+
+#include <pt-internal.h>
+
+int
+__sem_waitfast (struct new_sem *isem, int definitive_result)
+{
+#if __HAVE_64B_ATOMICS
+ uint64_t d = atomic_load_relaxed (&isem->data);
+
+ do
+ {
+ if ((d & SEM_VALUE_MASK) == 0)
+ break;
+ if (atomic_compare_exchange_weak_acquire (&isem->data, &d, d - 1))
+ /* Successful down. */
+ return 0;
+ }
+ while (definitive_result);
+ return -1;
+#else
+ unsigned v = atomic_load_relaxed (&isem->value);
+
+ do
+ {
+ if ((v >> SEM_VALUE_SHIFT) == 0)
+ break;
+ if (atomic_compare_exchange_weak_acquire (&isem->value,
+ &v, v - (1 << SEM_VALUE_SHIFT)))
+ /* Successful down. */
+ return 0;
+ }
+ while (definitive_result);
+ return -1;
+#endif
+}
diff --git a/sysdeps/mach/hurd/i386/Makefile b/sysdeps/mach/hurd/i386/Makefile
index 8e5f12a533..d056e06278 100644
--- a/sysdeps/mach/hurd/i386/Makefile
+++ b/sysdeps/mach/hurd/i386/Makefile
@@ -116,7 +116,6 @@ test-xfail-tst-cond13 = yes
test-xfail-tst-cond23 = yes
test-xfail-tst-rwlock4 = yes
test-xfail-tst-rwlock12 = yes
-test-xfail-tst-sem3 = yes
test-xfail-tst-barrier2 = yes
test-xfail-tst-pututxline-cache = yes
test-xfail-tst-pututxline-lockfail = yes
--
2.29.2
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2020-12-16 0:59 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-16 0:59 [hurd,commited 0/7] hurd: add pshared semaphore support Samuel Thibault
2020-12-16 0:59 ` [hurd, commited 1/7] hurd: Rename LLL_INITIALIZER to LLL_LOCK_INITIALIZER Samuel Thibault
2020-12-16 0:59 ` [hurd, commited 2/7] hurd: make lll_* take a variable instead of a ptr Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 3/7] hurd: Add __lll_abstimed_wait_intr Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 4/7] htl: Add futex-internal.h Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 5/7] hurd: Add __libc_open and __libc_close Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 6/7] hurd: Add LLL_PRIVATE and LLL_SHARED Samuel Thibault
2020-12-16 0:59 ` [hurd,commited 7/7] htl: Add pshared semaphore support Samuel Thibault
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).