public inbox for libstdc++-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r12-8121] libstdc++: Reduce memory usage in std::stacktrace::current
@ 2022-04-12 21:39 Jonathan Wakely
  0 siblings, 0 replies; only message in thread
From: Jonathan Wakely @ 2022-04-12 21:39 UTC (permalink / raw)
  To: gcc-cvs, libstdc++-cvs

https://gcc.gnu.org/g:2ce0f5185ba95131b3c538507323d8ecb561a0c2

commit r12-8121-g2ce0f5185ba95131b3c538507323d8ecb561a0c2
Author: Jonathan Wakely <jwakely@redhat.com>
Date:   Mon Apr 11 21:15:40 2022 +0100

    libstdc++: Reduce memory usage in std::stacktrace::current
    
    This adds an alternative callback for use in the overload of
    basic_stacktrace::current that takes a max_depth parameter. The new
    callback will not allow the container to grow past the initial capacity,
    which is set to the specified maximum depth.  This avoids allocating
    memory for hundreds of frames only to discard them again because of a
    small maximum depth limit.
    
    For larger maximum depths the normal callback is used, with a smaller
    initial capacity that can grow as needed. The container will be resized
    to the given max depth after the entire backtrace has been produced
    (relying on the fact that std::stacktrace_entry objects are trivially
    destructible to elide their destruction).
    
    Currently the value for "larger" limits is 128, so a max depth <= 128
    will allocate capacity for exactly that many frames. A larger max depth
    (or an unspecified max depth) will use an initial capacity of 64 frames
    and grow as needed. Since each frame is only a uintptr_t value it might
    be reasonable to increase the first value so that memory usage can be
    capped for larger maximum depths.
    
    This change also delays the creation of the libbacktrace state until we
    actually need it, so that the state is not created if allocation fails.
    
    libstdc++-v3/ChangeLog:
    
            * include/std/stacktrace (basic_stacktrace::current): Replace
            calls to _M_reserve and _S_curr_cb with call to _M_prepare.
            Check return value of backtrace_simple when max depth given.
            (basic_stacktrace::_M_reserve): Remove.
            (basic_stacktrace::_S_curr_cb): Remove.
            (basic_stacktrace::_M_prepare(size_type)): New function to
            reserve initial capacity and return callback.
            (basic_stacktrace::_Impl::_M_allocate): Remove check for 0 < n
            and remove redundant zeroing of _M_frames and _M_capacity.
            (basic_stacktrace::_Impl::_M_push_back): Add [[unlikely]]
            attribute. Assign _Impl instead of swapping.
            * testsuite/19_diagnostics/stacktrace/current.cc: New test.

Diff:
---
 libstdc++-v3/include/std/stacktrace                | 110 ++++++++++++---------
 .../testsuite/19_diagnostics/stacktrace/current.cc |  86 ++++++++++++++++
 2 files changed, 148 insertions(+), 48 deletions(-)

diff --git a/libstdc++-v3/include/std/stacktrace b/libstdc++-v3/include/std/stacktrace
index 79038e803f2..5f928f10dee 100644
--- a/libstdc++-v3/include/std/stacktrace
+++ b/libstdc++-v3/include/std/stacktrace
@@ -237,15 +237,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       static basic_stacktrace
       current(const allocator_type& __alloc = allocator_type()) noexcept
       {
-	auto __state = stacktrace_entry::_S_init();
 	basic_stacktrace __ret(__alloc);
-	if (!__ret._M_reserve(64)) [[unlikely]]
-	  return __ret;
-
-	if (__glibcxx_backtrace_simple(__state, 1, _S_curr_cb(),
-				       nullptr, std::__addressof(__ret)))
-	  __ret._M_clear();
-
+	if (auto __cb = __ret._M_prepare()) [[likely]]
+	  {
+	    auto __state = stacktrace_entry::_S_init();
+	    if (__glibcxx_backtrace_simple(__state, 1, __cb, nullptr,
+					   std::__addressof(__ret)))
+	      __ret._M_clear();
+	  }
 	return __ret;
       }
 
@@ -254,16 +253,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       current(size_type __skip,
 	      const allocator_type& __alloc = allocator_type()) noexcept
       {
-	auto __state = stacktrace_entry::_S_init();
 	basic_stacktrace __ret(__alloc);
 	if (__skip >= __INT_MAX__) [[unlikely]]
 	  return __ret;
-	if (!__ret._M_reserve(64)) [[unlikely]]
-	  return __ret;
-
-	if (__glibcxx_backtrace_simple(__state, __skip + 1, _S_curr_cb(),
-				       nullptr, std::__addressof(__ret)))
-	  __ret._M_clear();
+	if (auto __cb = __ret._M_prepare()) [[likely]]
+	  {
+	    auto __state = stacktrace_entry::_S_init();
+	    if (__glibcxx_backtrace_simple(__state, __skip + 1, __cb, nullptr,
+					   std::__addressof(__ret)))
+	      __ret._M_clear();
+	  }
 
 	return __ret;
       }
@@ -275,19 +274,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       {
 	__glibcxx_assert(__skip <= (size_type(-1) - __max_depth));
 
-	auto __state = stacktrace_entry::_S_init();
 	basic_stacktrace __ret(__alloc);
-	if (__max_depth == 0 || __skip >= __INT_MAX__) [[unlikely]]
+	if (__max_depth == 0) [[unlikely]]
 	  return __ret;
-	if (!__ret._M_reserve(std::min<int>(__max_depth, 64))) [[unlikely]]
+	if (__skip >= __INT_MAX__) [[unlikely]]
 	  return __ret;
-
-	if (__glibcxx_backtrace_simple(__state, __skip + 1, _S_curr_cb(),
-				       nullptr, std::__addressof(__ret)))
-	  __ret._M_clear();
-	else if (__ret.size() > __max_depth)
-	  __ret.resize(__max_depth);
-
+	if (auto __cb = __ret._M_prepare(__max_depth)) [[likely]]
+	  {
+	    auto __state = stacktrace_entry::_S_init();
+	    int __err = __glibcxx_backtrace_simple(__state, __skip + 1, __cb,
+						   nullptr,
+						   std::__addressof(__ret));
+	    if (__err < 0)
+	      __ret._M_clear();
+	    else if (__ret.size() > __max_depth)
+	      __ret._M_impl._M_size = __max_depth;
+	  }
 	return __ret;
       }
 
@@ -524,12 +526,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       }
 
     private:
-      bool
-      _M_reserve(size_type __n) noexcept
-      {
-	return _M_impl._M_allocate(_M_alloc, __n) != nullptr;
-      }
-
+      // Precondition: _M_capacity != 0
       bool
       _M_push_back(const value_type& __x) noexcept
       {
@@ -543,18 +540,37 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_impl._M_deallocate(_M_alloc);
       }
 
-      static auto
-      _S_curr_cb() noexcept
+      // Precondition: __max_depth != 0
+      auto
+      _M_prepare(size_type __max_depth = -1) noexcept
       -> int (*) (void*, uintptr_t)
       {
-	return [](void* __data, uintptr_t __pc) {
+	auto __cb = +[](void* __data, uintptr_t __pc) {
 	  auto& __s = *static_cast<basic_stacktrace*>(__data);
 	  stacktrace_entry __f;
 	  __f._M_pc = __pc;
-	  if (__s._M_push_back(__f))
-	    return 0;
-	  return 1;
+	  if (__s._M_push_back(__f)) [[likely]]
+	    return 0; // continue tracing
+	  return -1; // stop tracing due to error
 	};
+
+	if (__max_depth > 128)
+	  __max_depth = 64; // soft limit, _M_push_back will reallocate
+	else
+	  __cb = [](void* __data, uintptr_t __pc) {
+	    auto& __s = *static_cast<basic_stacktrace*>(__data);
+	    stacktrace_entry __f;
+	    __f._M_pc = __pc;
+	    if (__s.size() == __s._M_impl._M_capacity) [[unlikely]]
+	      return 1; // stop tracing due to reaching max depth
+	    if (__s._M_push_back(__f)) [[likely]]
+	      return 0; // continue tracing
+	    return -1; // stop tracing due to error
+	  };
+
+	if (_M_impl._M_allocate(_M_alloc, __max_depth)) [[likely]]
+	  return __cb;
+	return nullptr;
       }
 
       struct _Impl
@@ -573,24 +589,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  return std::min(__size_max, __alloc_max);
 	}
 
-	// Precondition: _M_frames == nullptr
+	// Precondition: _M_frames == nullptr && __n != 0
 	pointer
 	_M_allocate(allocator_type& __alloc, size_type __n) noexcept
 	{
-	  __try
+	  if (__n <= _S_max_size(__alloc)) [[likely]]
 	    {
-	      if (0 < __n && __n <= _S_max_size(__alloc)) [[likely]]
+	      __try
 		{
 		  _M_frames = __alloc.allocate(__n);
 		  _M_capacity = __n;
 		  return _M_frames;
 		}
+	      __catch (...)
+		{
+		}
 	    }
-	  __catch (...)
-	    {
-	    }
-	  _M_frames = nullptr;
-	  _M_capacity = 0;
 	  return nullptr;;
 	}
 
@@ -612,11 +626,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  _M_size = 0;
 	}
 
+	// Precondition: _M_capacity != 0
 	bool
 	_M_push_back(allocator_type& __alloc,
 		     const stacktrace_entry& __f) noexcept
 	{
-	  if (_M_size == _M_capacity)
+	  if (_M_size == _M_capacity) [[unlikely]]
 	    {
 	      _Impl __tmp;
 	      if (auto __f = __tmp._M_allocate(__alloc, _M_capacity * 2))
@@ -624,13 +639,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      else
 		return false;
 	      _M_deallocate(__alloc);
-	      std::swap(*this, __tmp);
+	      *this = __tmp;
 	    }
 	  stacktrace_entry* __addr = std::to_address(_M_frames + _M_size++);
 	  std::construct_at(__addr, __f);
 	  return true;
 	}
-
       };
 
       [[no_unique_address]] allocator_type  _M_alloc{};
diff --git a/libstdc++-v3/testsuite/19_diagnostics/stacktrace/current.cc b/libstdc++-v3/testsuite/19_diagnostics/stacktrace/current.cc
new file mode 100644
index 00000000000..184e23b460e
--- /dev/null
+++ b/libstdc++-v3/testsuite/19_diagnostics/stacktrace/current.cc
@@ -0,0 +1,86 @@
+// { dg-options "-std=gnu++23 -lstdc++_libbacktrace" }
+// { dg-do run { target c++23 } }
+// { dg-require-effective-target stacktrace }
+
+#include <stacktrace>
+#include <memory>
+#include <new>
+#include "testsuite_hooks.h"
+
+template<typename T>
+struct Allocator
+{
+  using value_type = T;
+  using propagate_on_container_move_assignment = std::true_type;
+
+  explicit
+  Allocator(unsigned max = -1u) : max_size(max) { }
+
+  template<typename U>
+    Allocator(const Allocator<U>& a) : max_size(a.max_size) { }
+
+  T*
+  allocate(std::size_t n)
+  {
+    if (n > max_size)
+      throw std::bad_alloc();
+
+    return std::allocator<T>().allocate(n);
+  }
+
+  void
+  deallocate(T* p, std::size_t n) noexcept
+  {
+    std::allocator<T>().deallocate(p, n);
+  }
+
+  bool operator==(const Allocator&) const = default;
+
+private:
+  unsigned max_size;
+};
+
+[[gnu::optimize("O0")]]
+void
+test_max_depth()
+{
+  using Stacktrace = std::basic_stacktrace<Allocator<std::stacktrace_entry>>;
+  using Alloc = typename Stacktrace::allocator_type;
+
+  [] { [] { [] { [] { [] { [] { [] { [] {
+    auto t = Stacktrace::current();
+    VERIFY( ! t.empty() );
+    const auto n = t.size(); // total number of frames
+    t = Stacktrace::current(8);
+    VERIFY( t.size() == (n - 8) );
+    t = Stacktrace::current(n);
+    VERIFY( t.empty() );
+    t = Stacktrace::current(n - 2);
+    VERIFY( t.size() == 2 );
+    t = Stacktrace::current(2, 6);
+    VERIFY( t.size() == 6 );
+    t = Stacktrace::current(n - 2, 6);
+    VERIFY( t.size() == 2 );
+
+    t = Stacktrace::current(Alloc(3));
+    // Full stacktrace is larger than 3 frames, so allocation fails:
+    VERIFY( t.empty() );
+    t = Stacktrace::current(3, Alloc(2));
+    // Stacktrace still too large after skipping 3 frames, so allocation fails:
+    VERIFY( t.empty() );
+    t = Stacktrace::current(0, 3, Alloc(3));
+    // Capacity for exactly 3 frames is allocated:
+    VERIFY( t.size() == 3 );
+    t = Stacktrace::current(2, 4, Alloc(4));
+    // Capacity for exactly 4 frames is allocated:
+    VERIFY( t.size() == 4 );
+    t = Stacktrace::current(0, 4, Alloc(3));
+    // Capacity for exactly 4 frames is requested, but allocation fails:
+    VERIFY( t.empty() );
+  }(); }(); }(); }(); }(); }(); }(); }();
+}
+
+int main()
+{
+  test_max_depth();
+}


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-04-12 21:39 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-12 21:39 [gcc r12-8121] libstdc++: Reduce memory usage in std::stacktrace::current Jonathan Wakely

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).