public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: malteskarupke@fastmail.fm
To: libc-alpha@sourceware.org
Cc: Malte Skarupke <malteskarupke@fastmail.fm>
Subject: [PATCH v3 6/6] nptl: Cleaning up __g1_start and related code in pthread_cond_wait
Date: Thu,  6 Oct 2022 17:43:29 -0400	[thread overview]
Message-ID: <20221006214329.1084244-7-malteskarupke@fastmail.fm> (raw)
In-Reply-To: <20221006214329.1084244-1-malteskarupke@fastmail.fm>

From: Malte Skarupke <malteskarupke@fastmail.fm>

After my previous changes, __g1_start can be simpler. It can not change
while a thread is in pthread_cond_wait, because groups are only allowed
to switch when all threads in g1 have left that function. So there is no
need to check if it has changed in pthread_cond_wait.

After __g1_start was no longer read in pthread_cond_wait, it was only
read in code that holds the internal lock of the condition variable. So
there was no longer a need for __g1_start to be atomic.

Finally, the low bit of __g1_start was only used in the block that's was
supposed to handle potential stealing of signals. Since I deleted that
block, we can stop shifting the count in __g1_start.
---
 nptl/pthread_cond_common.c              | 40 +++++-----------------
 nptl/pthread_cond_wait.c                | 45 +++++--------------------
 nptl/tst-cond22.c                       | 10 +++---
 sysdeps/nptl/bits/thread-shared-types.h |  2 +-
 sysdeps/nptl/pthread.h                  |  2 +-
 5 files changed, 23 insertions(+), 76 deletions(-)

diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c
index 220db22e9f..9f3e71fd69 100644
--- a/nptl/pthread_cond_common.c
+++ b/nptl/pthread_cond_common.c
@@ -21,11 +21,7 @@
 #include <stdint.h>
 #include <pthread.h>
 
-/* We need 3 least-significant bits on __crefs for something else.
-   This also matches __atomic_wide_counter requirements: The highest
-   value we add is __PTHREAD_COND_MAX_GROUP_SIZE << 2 to __g1_start
-   (the two extra bits are for the lock in the two LSBs of
-   __g1_start).  */
+/* We need 3 least-significant bits on __crefs for something else. */
 #define __PTHREAD_COND_MAX_GROUP_SIZE ((unsigned) 1 << 29)
 
 static inline uint64_t
@@ -40,18 +36,6 @@ __condvar_fetch_add_wseq_acquire (pthread_cond_t *cond, unsigned int val)
   return __atomic_wide_counter_fetch_add_acquire (&cond->__data.__wseq, val);
 }
 
-static inline uint64_t
-__condvar_load_g1_start_relaxed (pthread_cond_t *cond)
-{
-  return __atomic_wide_counter_load_relaxed (&cond->__data.__g1_start);
-}
-
-static inline void
-__condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val)
-{
-  __atomic_wide_counter_add_relaxed (&cond->__data.__g1_start, val);
-}
-
 #if __HAVE_64B_ATOMICS == 1
 
 static inline uint64_t
@@ -210,7 +194,7 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
      behavior.
      Note that this works correctly for a zero-initialized condvar too.  */
   unsigned int old_orig_size = __condvar_get_orig_size (cond);
-  uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond) >> 1;
+  uint64_t old_g1_start = cond->__data.__g1_start;
   if (((unsigned) (wseq - old_g1_start - old_orig_size)
 	  + cond->__data.__g_size[g1 ^ 1]) == 0)
 	return false;
@@ -240,10 +224,10 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
   /* Wait until there are no group references anymore.  The fetch-or operation
      injects us into the modification order of __g_refs; release MO ensures
      that waiters incrementing __g_refs after our fetch-or see the previous
-     changes to __g_signals and to __g1_start that had to happen before we can
-     switch this G1 and alias with an older group (we have two groups, so
-     aliasing requires switching group roles twice).  Note that nobody else
-     can have set the wake-request flag, so we do not have to act upon it.
+     change to __g_signals that had to happen before we can switch this G1
+     and alias with an older group (we have two groups, so aliasing requires
+     switching group roles twice).  Note that nobody else can have set the
+     wake-request flag, so we do not have to act upon it.
 
      Also note that it is harmless if older waiters or waiters from this G1
      get a group reference after we have quiesced the group because it will
@@ -281,15 +265,9 @@ __condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
      after the waiters we waited for.  */
   atomic_fetch_and_acquire (cond->__data.__g_refs + g1, ~(unsigned int)1);
 
-  /* Update __g1_start, which finishes closing this group.  The value we add
-     will never be negative because old_orig_size can only be zero when we
-     switch groups the first time after a condvar was initialized, in which
-     case G1 will be at index 1 and we will add a value of 1.  See above for
-     why this takes place after waiting for quiescence of the group.
-     Relaxed MO is fine because the change comes with no additional
-     constraints that others would have to observe.  */
-  __condvar_add_g1_start_relaxed (cond,
-      (old_orig_size << 1) + (g1 == 1 ? 1 : - 1));
+  /* Update __g1_start, which finishes closing this group.  See above for
+     why this takes place after waiting for quiescence of the group.  */
+  cond->__data.__g1_start += old_orig_size;
 
   /* Now reopen the group, thus enabling waiters to again block using the
      futex controlled by __g_signals.  Release MO so that observers that see
diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c
index 515b4ba60e..7fb3dfb5ac 100644
--- a/nptl/pthread_cond_wait.c
+++ b/nptl/pthread_cond_wait.c
@@ -71,7 +71,7 @@ __condvar_cancel_waiting (pthread_cond_t *cond, uint64_t seq, unsigned int g,
      not hold a reference on the group.  */
   __condvar_acquire_lock (cond, private);
 
-  uint64_t g1_start = __condvar_load_g1_start_relaxed (cond) >> 1;
+  uint64_t g1_start = cond->__data.__g1_start;
   if (g1_start > seq)
     {
       /* Our group is closed, so someone provided enough signals for it.
@@ -274,9 +274,8 @@ __condvar_cleanup_waiting (void *arg)
      * Waiters fetch-add while having acquire the mutex associated with the
        condvar.  Signalers load it and fetch-xor it concurrently.
    __g1_start: Starting position of G1 (inclusive)
-     * LSB is index of current G2.
-     * Modified by signalers while having acquired the condvar-internal lock
-       and observed concurrently by waiters.
+     * Modified by signalers  and observed by waiters, both only while having
+       acquired the condvar-internal lock.
    __g1_orig_size: Initial size of G1
      * The two least-significant bits represent the condvar-internal lock.
      * Only accessed while having acquired the condvar-internal lock.
@@ -313,16 +312,6 @@ __condvar_cleanup_waiting (void *arg)
    A PTHREAD_COND_INITIALIZER condvar has all fields set to zero, which yields
    a condvar that has G2 starting at position 0 and a G1 that is closed.
 
-   Because waiters do not claim ownership of a group right when obtaining a
-   position in __wseq but only reference count the group when using futexes
-   to block, it can happen that a group gets closed before a waiter can
-   increment the reference count.  Therefore, waiters have to check whether
-   their group is already closed using __g1_start.  They also have to perform
-   this check when spinning when trying to grab a signal from __g_signals.
-   Note that for these checks, using relaxed MO to load __g1_start is
-   sufficient because if a waiter can see a sufficiently large value, it could
-   have also consume a signal in the waiters group.
-
    It is essential that the last field in pthread_cond_t is __g_signals[1]:
    The previous condvar used a pointer-sized field in pthread_cond_t, so a
    PTHREAD_COND_INITIALIZER from that condvar implementation might only
@@ -414,8 +403,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
   /* Now wait until a signal is available in our group or it is closed.
      Acquire MO so that if we observe a value of zero written after group
      switching in __condvar_quiesce_and_switch_g1, we synchronize with that
-     store and will see the prior update of __g1_start done while switching
-     groups too.  */
+     store.  */
   unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);
 
   do
@@ -435,11 +423,6 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
 	  unsigned int spin = maxspin;
 	  while (signals == 0 && spin > 0)
 	    {
-	      /* Check that we are not spinning on a group that's already
-		 closed.  */
-	      if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1))
-		goto done;
-
 	      /* TODO Back off.  */
 
 	      /* Reload signals.  See above for MO.  */
@@ -456,19 +439,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
 	  if (signals != 0)
 	    break;
 
-	  /* No signals available after spinning, so prepare to block.
-	     First check the closed flag on __g_signals that designates a
-	     concurrent attempt to reuse the group's slot. We use acquire MO for
-	     the __g_signals check to make sure we read the current value of
-	     __g1_start (see above).  */
-	  if (((atomic_load_acquire (cond->__data.__g_signals + g) & 1) != 0)
-	      || (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)))
-	    {
-	      /* Our group is closed.  */
-	      goto done;
-	    }
-
-	  // Now block.
+	  // No signals available after spinning, so block.
 	  struct _pthread_cleanup_buffer buffer;
 	  struct _condvar_cleanup_buffer cbuffer;
 	  cbuffer.wseq = wseq;
@@ -500,9 +471,9 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
 	}
 
     }
-  /* Try to grab a signal.  Use acquire MO so that we see an up-to-date value
-     of __g1_start when spinning above.  */
-  while (!atomic_compare_exchange_weak_acquire (cond->__data.__g_signals + g,
+  /* Try to grab a signal.  Relaxed MO is enough because the group can't be
+     closed while we're in this loop, so there are no writes we could miss.  */
+  while (!atomic_compare_exchange_weak_relaxed (cond->__data.__g_signals + g,
 						&signals, signals - 2));
 
  done:
diff --git a/nptl/tst-cond22.c b/nptl/tst-cond22.c
index 9f8cfea5c3..f5ee62e639 100644
--- a/nptl/tst-cond22.c
+++ b/nptl/tst-cond22.c
@@ -106,11 +106,10 @@ do_test (void)
       status = 1;
     }
 
-  printf ("cond = { 0x%x:%x, 0x%x:%x, %u/%u/%u, %u/%u/%u, %u, %u }\n",
+  printf ("cond = { 0x%x:%x, %llu, %u/%u/%u, %u/%u/%u, %u, %u }\n",
 	  c.__data.__wseq.__value32.__high,
 	  c.__data.__wseq.__value32.__low,
-	  c.__data.__g1_start.__value32.__high,
-	  c.__data.__g1_start.__value32.__low,
+	  c.__data.__g1_start,
 	  c.__data.__g_signals[0], c.__data.__g_refs[0], c.__data.__g_size[0],
 	  c.__data.__g_signals[1], c.__data.__g_refs[1], c.__data.__g_size[1],
 	  c.__data.__g1_orig_size, c.__data.__crefs);
@@ -152,11 +151,10 @@ do_test (void)
       status = 1;
     }
 
-  printf ("cond = { 0x%x:%x, 0x%x:%x, %u/%u/%u, %u/%u/%u, %u, %u }\n",
+  printf ("cond = { 0x%x:%x, %llu, %u/%u/%u, %u/%u/%u, %u, %u }\n",
 	  c.__data.__wseq.__value32.__high,
 	  c.__data.__wseq.__value32.__low,
-	  c.__data.__g1_start.__value32.__high,
-	  c.__data.__g1_start.__value32.__low,
+	  c.__data.__g1_start,
 	  c.__data.__g_signals[0], c.__data.__g_refs[0], c.__data.__g_size[0],
 	  c.__data.__g_signals[1], c.__data.__g_refs[1], c.__data.__g_size[1],
 	  c.__data.__g1_orig_size, c.__data.__crefs);
diff --git a/sysdeps/nptl/bits/thread-shared-types.h b/sysdeps/nptl/bits/thread-shared-types.h
index 52decc49d6..52234b1512 100644
--- a/sysdeps/nptl/bits/thread-shared-types.h
+++ b/sysdeps/nptl/bits/thread-shared-types.h
@@ -94,7 +94,7 @@ typedef struct __pthread_internal_slist
 struct __pthread_cond_s
 {
   __atomic_wide_counter __wseq;
-  __atomic_wide_counter __g1_start;
+  unsigned long long int __g1_start;
   unsigned int __g_refs[2] __LOCK_ALIGNMENT;
   unsigned int __g_size[2];
   unsigned int __g1_orig_size;
diff --git a/sysdeps/nptl/pthread.h b/sysdeps/nptl/pthread.h
index dedad4ec86..b0ddbe2ee3 100644
--- a/sysdeps/nptl/pthread.h
+++ b/sysdeps/nptl/pthread.h
@@ -152,7 +152,7 @@ enum
 
 
 /* Conditional variable handling.  */
-#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, {0, 0}, 0, 0, {0, 0} } }
+#define PTHREAD_COND_INITIALIZER { { {0}, 0, {0, 0}, {0, 0}, 0, 0, {0, 0} } }
 
 
 /* Cleanup buffers */
-- 
2.25.1


      parent reply	other threads:[~2022-10-06 21:43 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-06 21:43 [PATCH v3 0/6] nptl: Fix pthread_cond_signal missing a sleeper malteskarupke
2022-10-06 21:43 ` [PATCH v3 1/6] nptl: Fix pthread_cond_signal missing a sleeper (#BZ 25847) malteskarupke
2022-10-06 21:43 ` [PATCH v3 2/6] nptl: Remove the signal-stealing code. It is no longer needed malteskarupke
2022-10-06 21:43 ` [PATCH v3 3/6] nptl: Optimization by not incrementing wrefs in pthread_cond_wait malteskarupke
2022-10-06 21:43 ` [PATCH v3 4/6] nptl: Make test-cond-printers check the number of waiters malteskarupke
2022-10-06 21:43 ` [PATCH v3 5/6] nptl: Rename __wrefs to __crefs because its meaning has changed malteskarupke
2022-10-06 21:43 ` malteskarupke [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221006214329.1084244-7-malteskarupke@fastmail.fm \
    --to=malteskarupke@fastmail.fm \
    --cc=libc-alpha@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).