public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: Florian Weimer <fweimer@redhat.com>
To: libc-alpha@sourceware.org
Subject: [PATCH 08/13] nptl: Move more stack management variables into _rtld_global
Date: Thu, 06 May 2021 20:10:54 +0200	[thread overview]
Message-ID: <452b4e33920e6d9e1dd7fa1699af443dbb83ee88.1620323953.git.fweimer@redhat.com> (raw)
In-Reply-To: <cover.1620323953.git.fweimer@redhat.com>

Permissions of the cached stacks may have to be updated if an object
is loaded that requires executable stacks, so the dynamic loader
needs to know about these cached stacks.

The move of in_flight_stack and stack_cache_actsize is a requirement for
merging __reclaim_stacks into the fork implementation in libc.
---
 elf/dl-support.c              |  3 +++
 nptl/allocatestack.c          | 51 +++++++++++++++--------------------
 sysdeps/generic/ldsodefs.h    | 11 ++++++++
 sysdeps/nptl/dl-tls_init_tp.c |  1 +
 4 files changed, 36 insertions(+), 30 deletions(-)

diff --git a/elf/dl-support.c b/elf/dl-support.c
index f966a2e7cd..580b0202ad 100644
--- a/elf/dl-support.c
+++ b/elf/dl-support.c
@@ -192,6 +192,9 @@ int (*_dl_make_stack_executable_hook) (void **) = _dl_make_stack_executable;
 #if THREAD_GSCOPE_IN_TCB
 list_t _dl_stack_used;
 list_t _dl_stack_user;
+list_t _dl_stack_cache;
+size_t _dl_stack_cache_actsize;
+uintptr_t _dl_in_flight_stack;
 int _dl_stack_cache_lock;
 #else
 int _dl_thread_gscope_count;
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 88c49f8154..71cfa874d1 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -103,15 +103,6 @@
 
 /* Maximum size in kB of cache.  */
 static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default.  */
-static size_t stack_cache_actsize;
-
-/* List of queued stack frames.  */
-static LIST_HEAD (stack_cache);
-
-/* We need to record what list operations we are going to do so that,
-   in case of an asynchronous interruption due to a fork() call, we
-   can correct for the work.  */
-static uintptr_t in_flight_stack;
 
 /* Check whether the stack is still used or not.  */
 #define FREE_P(descr) ((descr)->tid <= 0)
@@ -120,7 +111,7 @@ static uintptr_t in_flight_stack;
 static void
 stack_list_del (list_t *elem)
 {
-  in_flight_stack = (uintptr_t) elem;
+  GL (dl_in_flight_stack) = (uintptr_t) elem;
 
   atomic_write_barrier ();
 
@@ -128,14 +119,14 @@ stack_list_del (list_t *elem)
 
   atomic_write_barrier ();
 
-  in_flight_stack = 0;
+  GL (dl_in_flight_stack) = 0;
 }
 
 
 static void
 stack_list_add (list_t *elem, list_t *list)
 {
-  in_flight_stack = (uintptr_t) elem | 1;
+  GL (dl_in_flight_stack) = (uintptr_t) elem | 1;
 
   atomic_write_barrier ();
 
@@ -143,7 +134,7 @@ stack_list_add (list_t *elem, list_t *list)
 
   atomic_write_barrier ();
 
-  in_flight_stack = 0;
+  GL (dl_in_flight_stack) = 0;
 }
 
 
@@ -168,7 +159,7 @@ get_cached_stack (size_t *sizep, void **memp)
      same.  As the very least there are only a few different sizes.
      Therefore this loop will exit early most of the time with an
      exact match.  */
-  list_for_each (entry, &stack_cache)
+  list_for_each (entry, &GL (dl_stack_cache))
     {
       struct pthread *curr;
 
@@ -208,7 +199,7 @@ get_cached_stack (size_t *sizep, void **memp)
   stack_list_add (&result->list, &GL (dl_stack_used));
 
   /* And decrease the cache size.  */
-  stack_cache_actsize -= result->stackblock_size;
+  GL (dl_stack_cache_actsize) -= result->stackblock_size;
 
   /* Release the lock early.  */
   lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
@@ -249,7 +240,7 @@ free_stacks (size_t limit)
   list_t *prev;
 
   /* Search from the end of the list.  */
-  list_for_each_prev_safe (entry, prev, &stack_cache)
+  list_for_each_prev_safe (entry, prev, &GL (dl_stack_cache))
     {
       struct pthread *curr;
 
@@ -260,7 +251,7 @@ free_stacks (size_t limit)
 	  stack_list_del (entry);
 
 	  /* Account for the freed memory.  */
-	  stack_cache_actsize -= curr->stackblock_size;
+	  GL (dl_stack_cache_actsize) -= curr->stackblock_size;
 
 	  /* Free the memory associated with the ELF TLS.  */
 	  _dl_deallocate_tls (TLS_TPADJ (curr), false);
@@ -271,7 +262,7 @@ free_stacks (size_t limit)
 	    abort ();
 
 	  /* Maybe we have freed enough.  */
-	  if (stack_cache_actsize <= limit)
+	  if (GL (dl_stack_cache_actsize) <= limit)
 	    break;
 	}
     }
@@ -293,10 +284,10 @@ queue_stack (struct pthread *stack)
   /* We unconditionally add the stack to the list.  The memory may
      still be in use but it will not be reused until the kernel marks
      the stack as not used anymore.  */
-  stack_list_add (&stack->list, &stack_cache);
+  stack_list_add (&stack->list, &GL (dl_stack_cache));
 
-  stack_cache_actsize += stack->stackblock_size;
-  if (__glibc_unlikely (stack_cache_actsize > stack_cache_maxsize))
+  GL (dl_stack_cache_actsize) += stack->stackblock_size;
+  if (__glibc_unlikely (GL (dl_stack_cache_actsize) > stack_cache_maxsize))
     free_stacks (stack_cache_maxsize);
 }
 
@@ -827,7 +818,7 @@ __make_stacks_executable (void **stack_endp)
      might be wasted time but better spend it here than adding a check
      in the fast path.  */
   if (err == 0)
-    list_for_each (runp, &stack_cache)
+    list_for_each (runp, &GL (dl_stack_cache))
       {
 	err = change_stack_perm (list_entry (runp, struct pthread, list)
 #ifdef NEED_SEPARATE_REGISTER_STACK
@@ -857,10 +848,10 @@ __reclaim_stacks (void)
      we have to be aware that we might have interrupted a list
      operation.  */
 
-  if (in_flight_stack != 0)
+  if (GL (dl_in_flight_stack) != 0)
     {
-      bool add_p = in_flight_stack & 1;
-      list_t *elem = (list_t *) (in_flight_stack & ~(uintptr_t) 1);
+      bool add_p = GL (dl_in_flight_stack) & 1;
+      list_t *elem = (list_t *) (GL (dl_in_flight_stack) & ~(uintptr_t) 1);
 
       if (add_p)
 	{
@@ -871,8 +862,8 @@ __reclaim_stacks (void)
 
 	  if (GL (dl_stack_used).next->prev != &GL (dl_stack_used))
 	    l = &GL (dl_stack_used);
-	  else if (stack_cache.next->prev != &stack_cache)
-	    l = &stack_cache;
+	  else if (GL (dl_stack_cache).next->prev != &GL (dl_stack_cache))
+	    l = &GL (dl_stack_cache);
 
 	  if (l != NULL)
 	    {
@@ -901,7 +892,7 @@ __reclaim_stacks (void)
 	  curp->tid = 0;
 
 	  /* Account for the size of the stack.  */
-	  stack_cache_actsize += curp->stackblock_size;
+	  GL (dl_stack_cache_actsize) += curp->stackblock_size;
 
 	  if (curp->specific_used)
 	    {
@@ -926,7 +917,7 @@ __reclaim_stacks (void)
     }
 
   /* Add the stack of all running threads to the cache.  */
-  list_splice (&GL (dl_stack_used), &stack_cache);
+  list_splice (&GL (dl_stack_used), &GL (dl_stack_cache));
 
   /* Remove the entry for the current thread to from the cache list
      and add it to the list of running threads.  Which of the two
@@ -945,7 +936,7 @@ __reclaim_stacks (void)
   /* There is one thread running.  */
   __nptl_nthreads = 1;
 
-  in_flight_stack = 0;
+  GL (dl_in_flight_stack) = 0;
 
   /* Initialize locks.  */
   GL (dl_stack_cache_lock) = LLL_LOCK_INITIALIZER;
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index ee851ac789..81cce2e4d5 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -481,6 +481,17 @@ struct rtld_global
   /* List of thread stacks that were allocated by the application.  */
   EXTERN list_t _dl_stack_user;
 
+  /* List of queued thread stacks.  */
+  EXTERN list_t _dl_stack_cache;
+
+  /* Total size of all stacks in the cache (sum over stackblock_size).  */
+  EXTERN size_t _dl_stack_cache_actsize;
+
+  /* We need to record what list operations we are going to do so
+     that, in case of an asynchronous interruption due to a fork()
+     call, we can correct for the work.  */
+  EXTERN uintptr_t _dl_in_flight_stack;
+
   /* Mutex protecting the stack lists.  */
   EXTERN int _dl_stack_cache_lock;
 #else
diff --git a/sysdeps/nptl/dl-tls_init_tp.c b/sysdeps/nptl/dl-tls_init_tp.c
index cb29222727..f1aaa5aa9d 100644
--- a/sysdeps/nptl/dl-tls_init_tp.c
+++ b/sysdeps/nptl/dl-tls_init_tp.c
@@ -43,6 +43,7 @@ __tls_pre_init_tp (void)
      initialized.  */
   INIT_LIST_HEAD (&GL (dl_stack_used));
   INIT_LIST_HEAD (&GL (dl_stack_user));
+  INIT_LIST_HEAD (&GL (dl_stack_cache));
 
 #ifdef SHARED
   ___rtld_mutex_lock = rtld_mutex_dummy;
-- 
2.30.2



  parent reply	other threads:[~2021-05-06 18:10 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-06 18:08 [PATCH 00/13] Linux: Move most stack management out of libpthread Florian Weimer
2021-05-06 18:08 ` [PATCH 01/13] scripts/versions.awk: Add strings and hashes to <first-versions.h> Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:09 ` [PATCH v2 02/13] elf, nptl: Resolve recursive lock implementation early Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-10  5:54     ` Florian Weimer
2021-05-06 18:10 ` [PATCH 03/13] nptl: Export __libc_multiple_threads from libc as an internal symbol Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:10 ` [PATCH 04/13] Linux: Explicitly disable cancellation checking in the dynamic loader Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:10 ` [PATCH 05/13] Linux: Simplify and fix the definition of SINGLE_THREAD_P Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:10 ` [PATCH 06/13] nptl: Eliminate __pthread_multiple_threads Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:10 ` [PATCH 07/13] elf: Introduce __tls_pre_init_tp Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:10 ` Florian Weimer [this message]
2021-05-09 21:42   ` [PATCH 08/13] nptl: Move more stack management variables into _rtld_global Carlos O'Donell
2021-05-06 18:11 ` [PATCH 09/13] nptl: Simplify the change_stack_perm calling convention Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:11 ` [PATCH 10/13] nptl: Move changing of stack permissions into ld.so Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:11 ` [PATCH 11/13] nptl: Simplify resetting the in-flight stack in __reclaim_stacks Florian Weimer
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:11 ` [PATCH 12/13] nptl: Move __default_pthread_attr, __default_pthread_attr_lock into libc Florian Weimer
2021-05-09 21:41   ` Carlos O'Donell
2021-05-09 21:42   ` Carlos O'Donell
2021-05-06 18:11 ` [PATCH 13/13] Linux: Move __reclaim_stacks into the fork implementation in libc Florian Weimer
2021-05-09 21:41   ` Carlos O'Donell
2021-05-09 21:42 ` [PATCH 00/13] Linux: Move most stack management out of libpthread Carlos O'Donell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=452b4e33920e6d9e1dd7fa1699af443dbb83ee88.1620323953.git.fweimer@redhat.com \
    --to=fweimer@redhat.com \
    --cc=libc-alpha@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).