* [PATCH] Fix synchronization of TPP min/max priorities.
@ 2014-11-25 18:55 Torvald Riegel
2014-11-25 21:44 ` Roland McGrath
0 siblings, 1 reply; 2+ messages in thread
From: Torvald Riegel @ 2014-11-25 18:55 UTC (permalink / raw)
To: GLIBC Devel
[-- Attachment #1: Type: text/plain, Size: 597 bytes --]
This fixes a synchronization bug in nptl/tpp.c and consumers. The
previous code attempts to do at-least-once initialization using one of
the initialized values as flag for initialization and an
atomic_write_barrier -- but none of the readers had a matching
atomic_read_barrier. This is not correct on weak-memory-model
architectures, or if the compiler reorders.
The new scheme fixes the synchronization and avoids the acquire barrier
by treating each of the two values as separate at-least-once initialized
values. See the code comments for details.
No regressions on x86_64. OK to commit?
[-- Attachment #2: atomics-fixes-tpp.patch --]
[-- Type: text/x-patch, Size: 9033 bytes --]
commit f4150b812bc02af8f801fcae8e0e4f9043468630
Author: Torvald Riegel <triegel@redhat.com>
Date: Tue Nov 25 19:48:56 2014 +0100
Fix synchronization of TPP min/max priorities.
* nptl/tpp.c (__init_sched_fifo_prio, __pthread_tpp_change_priority):
Change synchronization of __sched_fifo_min_prio and
__sched_fifo_max_prio.
* nptl/pthread_mutexattr_getprioceiling.c
(pthread_mutexattr_getprioceiling): Likewise.
* nptl/pthread_mutexattr_setprioceiling.c
(pthread_mutexattr_setprioceiling): Likewise.
* nptl/pthread_mutex_init.c (__pthread_mutex_init): Likewise.
* nptl/pthread_mutex_setprioceiling.c (pthread_mutex_setprioceiling):
Likewise.
diff --git a/nptl/pthread_mutex_init.c b/nptl/pthread_mutex_init.c
index 9f28b8d..38597ab 100644
--- a/nptl/pthread_mutex_init.c
+++ b/nptl/pthread_mutex_init.c
@@ -22,6 +22,7 @@
#include <string.h>
#include <kernel-features.h>
#include "pthreadP.h"
+#include <atomic.h>
#include <stap-probe.h>
@@ -117,10 +118,11 @@ __pthread_mutex_init (mutex, mutexattr)
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT;
if (! ceiling)
{
- if (__sched_fifo_min_prio == -1)
+ /* See __init_sched_fifo_prio. */
+ if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1)
__init_sched_fifo_prio ();
- if (ceiling < __sched_fifo_min_prio)
- ceiling = __sched_fifo_min_prio;
+ if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio))
+ ceiling = atomic_load_relaxed (&__sched_fifo_min_prio);
}
mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
break;
diff --git a/nptl/pthread_mutex_setprioceiling.c b/nptl/pthread_mutex_setprioceiling.c
index 52f65a0..63f11bb 100644
--- a/nptl/pthread_mutex_setprioceiling.c
+++ b/nptl/pthread_mutex_setprioceiling.c
@@ -20,6 +20,7 @@
#include <stdbool.h>
#include <errno.h>
#include <pthreadP.h>
+#include <atomic.h>
int
@@ -33,15 +34,19 @@ pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling)
if ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_PROTECT_NP) == 0)
return EINVAL;
- if (__sched_fifo_min_prio == -1)
+ /* See __init_sched_fifo_prio. */
+ if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1
+ || atomic_load_relaxed (&__sched_fifo_max_prio) == -1)
__init_sched_fifo_prio ();
- if (__builtin_expect (prioceiling < __sched_fifo_min_prio, 0)
- || __builtin_expect (prioceiling > __sched_fifo_max_prio, 0)
- || __builtin_expect ((prioceiling
+ if (__glibc_unlikely (prioceiling
+ < atomic_load_relaxed (&__sched_fifo_min_prio))
+ || __glibc_unlikely (prioceiling
+ > atomic_load_relaxed (&__sched_fifo_max_prio))
+ || __glibc_unlikely ((prioceiling
& (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT))
- != prioceiling, 0))
+ != prioceiling))
return EINVAL;
/* Check whether we already hold the mutex. */
diff --git a/nptl/pthread_mutexattr_getprioceiling.c b/nptl/pthread_mutexattr_getprioceiling.c
index c3e93fa..df265e7 100644
--- a/nptl/pthread_mutexattr_getprioceiling.c
+++ b/nptl/pthread_mutexattr_getprioceiling.c
@@ -18,6 +18,7 @@
<http://www.gnu.org/licenses/>. */
#include <pthreadP.h>
+#include <atomic.h>
int
@@ -35,10 +36,11 @@ pthread_mutexattr_getprioceiling (attr, prioceiling)
if (! ceiling)
{
- if (__sched_fifo_min_prio == -1)
+ /* See __init_sched_fifo_prio. */
+ if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1)
__init_sched_fifo_prio ();
- if (ceiling < __sched_fifo_min_prio)
- ceiling = __sched_fifo_min_prio;
+ if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio))
+ ceiling = atomic_load_relaxed (&__sched_fifo_min_prio);
}
*prioceiling = ceiling;
diff --git a/nptl/pthread_mutexattr_setprioceiling.c b/nptl/pthread_mutexattr_setprioceiling.c
index d10e51c..d155bf0 100644
--- a/nptl/pthread_mutexattr_setprioceiling.c
+++ b/nptl/pthread_mutexattr_setprioceiling.c
@@ -19,6 +19,7 @@
#include <errno.h>
#include <pthreadP.h>
+#include <atomic.h>
int
@@ -26,15 +27,19 @@ pthread_mutexattr_setprioceiling (attr, prioceiling)
pthread_mutexattr_t *attr;
int prioceiling;
{
- if (__sched_fifo_min_prio == -1)
+ /* See __init_sched_fifo_prio. */
+ if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1
+ || atomic_load_relaxed (&__sched_fifo_max_prio) == -1)
__init_sched_fifo_prio ();
- if (__builtin_expect (prioceiling < __sched_fifo_min_prio, 0)
- || __builtin_expect (prioceiling > __sched_fifo_max_prio, 0)
- || __builtin_expect ((prioceiling
+ if (__glibc_unlikely (prioceiling
+ < atomic_load_relaxed (&__sched_fifo_min_prio))
+ || __glibc_unlikely (prioceiling
+ > atomic_load_relaxed (&__sched_fifo_max_prio))
+ || __glibc_unlikely ((prioceiling
& (PTHREAD_MUTEXATTR_PRIO_CEILING_MASK
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT))
- != prioceiling, 0))
+ != prioceiling))
return EINVAL;
struct pthread_mutexattr *iattr = (struct pthread_mutexattr *) attr;
diff --git a/nptl/tpp.c b/nptl/tpp.c
index ee9a2fe..9cfeea1 100644
--- a/nptl/tpp.c
+++ b/nptl/tpp.c
@@ -23,17 +23,29 @@
#include <pthreadP.h>
#include <sched.h>
#include <stdlib.h>
+#include <atomic.h>
int __sched_fifo_min_prio = -1;
int __sched_fifo_max_prio = -1;
+/* We only want to initialize __sched_fifo_min_prio and __sched_fifo_max_prio
+ once. The standard solution would be similar to pthread_once, but then
+ readers would need to use an acquire fence. In this specific case,
+ initialization is comprised of just idempotent writes to two variables
+ that have an initial value of -1. Therefore, we can treat each variable as
+ a separate, at-least-once initialized value. This enables using just
+ relaxed MO loads and stores, but requires that consumers check for
+ initialization of each value that is to be used; see
+ __pthread_tpp_change_priority for an example.
+ */
void
__init_sched_fifo_prio (void)
{
- __sched_fifo_max_prio = sched_get_priority_max (SCHED_FIFO);
- atomic_write_barrier ();
- __sched_fifo_min_prio = sched_get_priority_min (SCHED_FIFO);
+ atomic_store_relaxed (&__sched_fifo_max_prio,
+ sched_get_priority_max (SCHED_FIFO));
+ atomic_store_relaxed (&__sched_fifo_min_prio,
+ sched_get_priority_min (SCHED_FIFO));
}
int
@@ -41,49 +53,59 @@ __pthread_tpp_change_priority (int previous_prio, int new_prio)
{
struct pthread *self = THREAD_SELF;
struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);
+ int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
+ int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);
if (tpp == NULL)
{
- if (__sched_fifo_min_prio == -1)
- __init_sched_fifo_prio ();
+ /* See __init_sched_fifo_prio. We need both the min and max prio,
+ so need to check both, and run initialization if either one is
+ not initialized. The memory model's write-read coherence rule
+ makes this work. */
+ if (fifo_min_prio == -1 || fifo_max_prio == -1)
+ {
+ __init_sched_fifo_prio ();
+ fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
+ fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);
+ }
size_t size = sizeof *tpp;
- size += (__sched_fifo_max_prio - __sched_fifo_min_prio + 1)
+ size += (fifo_max_prio - fifo_min_prio + 1)
* sizeof (tpp->priomap[0]);
tpp = calloc (size, 1);
if (tpp == NULL)
return ENOMEM;
- tpp->priomax = __sched_fifo_min_prio - 1;
+ tpp->priomax = fifo_min_prio - 1;
THREAD_SETMEM (self, tpp, tpp);
}
assert (new_prio == -1
- || (new_prio >= __sched_fifo_min_prio
- && new_prio <= __sched_fifo_max_prio));
+ || (new_prio >= fifo_min_prio
+ && new_prio <= fifo_max_prio));
assert (previous_prio == -1
- || (previous_prio >= __sched_fifo_min_prio
- && previous_prio <= __sched_fifo_max_prio));
+ || (previous_prio >= fifo_min_prio
+ && previous_prio <= fifo_max_prio));
int priomax = tpp->priomax;
int newpriomax = priomax;
if (new_prio != -1)
{
- if (tpp->priomap[new_prio - __sched_fifo_min_prio] + 1 == 0)
+ if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0)
return EAGAIN;
- ++tpp->priomap[new_prio - __sched_fifo_min_prio];
+ ++tpp->priomap[new_prio - fifo_min_prio];
if (new_prio > priomax)
newpriomax = new_prio;
}
if (previous_prio != -1)
{
- if (--tpp->priomap[previous_prio - __sched_fifo_min_prio] == 0
+ if (--tpp->priomap[previous_prio - fifo_min_prio] == 0
&& priomax == previous_prio
&& previous_prio > new_prio)
{
int i;
- for (i = previous_prio - 1; i >= __sched_fifo_min_prio; --i)
- if (tpp->priomap[i - __sched_fifo_min_prio])
+ for (i = previous_prio - 1; i >= fifo_min_prio; --i)
+ if (tpp->priomap[i - fifo_min_prio])
break;
newpriomax = i;
}
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] Fix synchronization of TPP min/max priorities.
2014-11-25 18:55 [PATCH] Fix synchronization of TPP min/max priorities Torvald Riegel
@ 2014-11-25 21:44 ` Roland McGrath
0 siblings, 0 replies; 2+ messages in thread
From: Roland McGrath @ 2014-11-25 21:44 UTC (permalink / raw)
To: Torvald Riegel; +Cc: GLIBC Devel
Looks OK.
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2014-11-25 21:44 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-11-25 18:55 [PATCH] Fix synchronization of TPP min/max priorities Torvald Riegel
2014-11-25 21:44 ` Roland McGrath
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).