public inbox for gdb-patches@sourceware.org
 help / color / mirror / Atom feed
From: Tom Tromey <tom@tromey.com>
To: gdb-patches@sourceware.org
Cc: Tom Tromey <tom@tromey.com>
Subject: [PATCH 11/30] Return vector of results from parallel_for_each
Date: Wed, 25 Aug 2021 20:19:18 -0600	[thread overview]
Message-ID: <20210826021937.1490292-12-tom@tromey.com> (raw)
In-Reply-To: <20210826021937.1490292-1-tom@tromey.com>

This changes gdb::parallel_for_each to return a vector of the results.
However, if the passed-in function returns void, the return type
remains 'void'.  This functionality is used later, to parallelize the
new indexer.
---
 gdbsupport/parallel-for.h | 142 +++++++++++++++++++++++++++++++-------
 gdbsupport/thread-pool.cc |   6 +-
 gdbsupport/thread-pool.h  |  23 +++++-
 3 files changed, 140 insertions(+), 31 deletions(-)

diff --git a/gdbsupport/parallel-for.h b/gdbsupport/parallel-for.h
index 54027e69402..e9098f94fb3 100644
--- a/gdbsupport/parallel-for.h
+++ b/gdbsupport/parallel-for.h
@@ -21,6 +21,7 @@
 #define GDBSUPPORT_PARALLEL_FOR_H
 
 #include <algorithm>
+#include <type_traits>
 #if CXX_STD_THREAD
 #include <thread>
 #include "gdbsupport/thread-pool.h"
@@ -29,6 +30,89 @@
 namespace gdb
 {
 
+namespace detail
+{
+
+/* This is a helper class that is used to accumulate results for
+   parallel_for.  There is a specialization for 'void', below.  */
+template<typename T>
+struct par_for_accumulator
+{
+public:
+
+  explicit par_for_accumulator (size_t n_threads)
+    : m_futures (n_threads)
+  {
+  }
+
+  /* The result type that is accumulated.  */
+  typedef std::vector<T> result_type;
+
+  /* Post the Ith task to a background thread, and store a future for
+     later.  */
+  void post (size_t i, std::function<T ()> task)
+  {
+    m_futures[i]
+      = gdb::thread_pool::g_thread_pool->post_task (std::move (task));
+  }
+
+  /* Invoke TASK in the current thread, then compute all the results
+     from all background tasks and put them into a result vector,
+     which is returned.  */
+  result_type finish (std::function<T ()> task)
+  {
+    result_type result (m_futures.size () + 1);
+
+    result.back () = task ();
+
+    for (size_t i = 0; i < m_futures.size (); ++i)
+      result[i] = m_futures[i].get ();
+
+    return result;
+  }
+
+private:
+  
+  /* A vector of futures coming from the tasks run in the
+     background.  */
+  std::vector<std::future<T>> m_futures;
+};
+
+/* See the generic template.  */
+template<>
+struct par_for_accumulator<void>
+{
+public:
+
+  explicit par_for_accumulator (size_t n_threads)
+    : m_futures (n_threads)
+  {
+  }
+
+  /* This specialization does not compute results.  */
+  typedef void result_type;
+
+  void post (size_t i, std::function<void ()> task)
+  {
+    m_futures[i]
+      = gdb::thread_pool::g_thread_pool->post_task (std::move (task));
+  }
+
+  result_type finish (std::function<void ()> task)
+  {
+    task ();
+
+    for (auto &future : m_futures)
+      future.wait ();
+  }
+
+private:
+
+  std::vector<std::future<void>> m_futures;
+};
+
+}
+
 /* A very simple "parallel for".  This splits the range of iterators
    into subranges, and then passes each subrange to the callback.  The
    work may or may not be done in separate threads.
@@ -39,22 +123,28 @@ namespace gdb
 
    The parameter N says how batching ought to be done -- there will be
    at least N elements processed per thread.  Setting N to 0 is not
-   allowed.  */
+   allowed.
+
+   If the function returns a non-void type, then a vector of the
+   results is returned.  The size of the resulting vector depends on
+   the number of threads that were used.  */
 
 template<class RandomIt, class RangeFunction>
-void
+typename gdb::detail::par_for_accumulator<
+    std::result_of_t<RangeFunction (RandomIt, RandomIt)>
+  >::result_type
 parallel_for_each (unsigned n, RandomIt first, RandomIt last,
 		   RangeFunction callback)
 {
-#if CXX_STD_THREAD
-  /* So we can use a local array below.  */
-  const size_t local_max = 16;
-  size_t n_threads = std::min (thread_pool::g_thread_pool->thread_count (),
-			       local_max);
-  size_t n_actual_threads = 0;
-  std::future<void> futures[local_max];
+  typedef typename std::result_of_t<RangeFunction (RandomIt, RandomIt)>
+    result_type;
+
+  size_t n_threads = 1;
 
+#if CXX_STD_THREAD
+  n_threads = thread_pool::g_thread_pool->thread_count ();
   size_t n_elements = last - first;
+  size_t elts_per_thread = 0;
   if (n_threads > 1)
     {
       /* Require that there should be at least N elements in a
@@ -62,29 +152,29 @@ parallel_for_each (unsigned n, RandomIt first, RandomIt last,
       gdb_assert (n > 0);
       if (n_elements / n_threads < n)
 	n_threads = std::max (n_elements / n, (size_t) 1);
-      size_t elts_per_thread = n_elements / n_threads;
-      n_actual_threads = n_threads - 1;
-      for (int i = 0; i < n_actual_threads; ++i)
-	{
-	  RandomIt end = first + elts_per_thread;
-	  auto task = [=] ()
-		      {
-			callback (first, end);
-		      };
-
-	  futures[i] = gdb::thread_pool::g_thread_pool->post_task (task);
-	  first = end;
-	}
+      elts_per_thread = n_elements / n_threads;
     }
 #endif /* CXX_STD_THREAD */
 
-  /* Process all the remaining elements in the main thread.  */
-  callback (first, last);
+  gdb::detail::par_for_accumulator<result_type> results (n_threads - 1);
 
 #if CXX_STD_THREAD
-  for (int i = 0; i < n_actual_threads; ++i)
-    futures[i].wait ();
+  for (int i = 0; i < n_threads - 1; ++i)
+    {
+      RandomIt end = first + elts_per_thread;
+      results.post (i, [=] ()
+        {
+	  return callback (first, end);
+	});
+      first = end;
+    }
 #endif /* CXX_STD_THREAD */
+
+  /* Process all the remaining elements in the main thread.  */
+  return results.finish ([=] ()
+    {
+      return callback (first, last);
+    });
 }
 
 }
diff --git a/gdbsupport/thread-pool.cc b/gdbsupport/thread-pool.cc
index 2bb75cc9cef..f2252ffc9fd 100644
--- a/gdbsupport/thread-pool.cc
+++ b/gdbsupport/thread-pool.cc
@@ -129,11 +129,10 @@ thread_pool::set_thread_count (size_t num_threads)
   m_thread_count = num_threads;
 }
 
-std::future<void>
-thread_pool::post_task (std::function<void ()> &&func)
+void
+thread_pool::do_post_task (std::packaged_task<void ()> &&func)
 {
   std::packaged_task<void ()> t (std::move (func));
-  std::future<void> f = t.get_future ();
 
   if (m_thread_count == 0)
     {
@@ -146,7 +145,6 @@ thread_pool::post_task (std::function<void ()> &&func)
       m_tasks.emplace (std::move (t));
       m_tasks_cv.notify_one ();
     }
-  return f;
 }
 
 void
diff --git a/gdbsupport/thread-pool.h b/gdbsupport/thread-pool.h
index 9bddaa9eaae..bf0e03f005a 100644
--- a/gdbsupport/thread-pool.h
+++ b/gdbsupport/thread-pool.h
@@ -58,7 +58,24 @@ class thread_pool
 
   /* Post a task to the thread pool.  A future is returned, which can
      be used to wait for the result.  */
-  std::future<void> post_task (std::function<void ()> &&func);
+  std::future<void> post_task (std::function<void ()> &&func)
+  {
+    std::packaged_task<void ()> task (std::move (func));
+    std::future<void> result = task.get_future ();
+    do_post_task (std::packaged_task<void ()> (std::move (task)));
+    return result;
+  }
+
+  /* Post a task to the thread pool.  A future is returned, which can
+     be used to wait for the result.  */
+  template<typename T>
+  std::future<T> post_task (std::function<T ()> &&func)
+  {
+    std::packaged_task<T ()> task (std::move (func));
+    std::future<T> result = task.get_future ();
+    do_post_task (std::packaged_task<void ()> (std::move (task)));
+    return result;
+  }
 
 private:
 
@@ -67,6 +84,10 @@ class thread_pool
   /* The callback for each worker thread.  */
   void thread_function ();
 
+  /* Post a task to the thread pool.  A future is returned, which can
+     be used to wait for the result.  */
+  void do_post_task (std::packaged_task<void ()> &&func);
+
   /* The current thread count.  */
   size_t m_thread_count = 0;
 
-- 
2.31.1


  parent reply	other threads:[~2021-08-26  2:19 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-26  2:19 [PATCH 00/30] Rewrite the DWARF "partial" reader Tom Tromey
2021-08-26  2:19 ` [PATCH 01/30] Introduce make_unique_xstrndup Tom Tromey
2021-08-26  2:19 ` [PATCH 02/30] Split create_addrmap_from_aranges Tom Tromey
2021-08-26  2:19 ` [PATCH 03/30] Add dwarf2_per_cu_data::addresses_seen Tom Tromey
2021-08-26  2:19 ` [PATCH 04/30] Refactor dwarf2_get_pc_bounds Tom Tromey
2021-08-26  2:19 ` [PATCH 05/30] Allow ada_decode not to decode operators Tom Tromey
2021-08-26  2:19 ` [PATCH 06/30] Let skip_one_die not skip children Tom Tromey
2021-08-26  2:19 ` [PATCH 07/30] Add name splitting Tom Tromey
2021-08-26  2:19 ` [PATCH 08/30] Add new overload of dwarf5_djb_hash Tom Tromey
2021-08-26  2:19 ` [PATCH 09/30] Refactor build_type_psymtabs_reader Tom Tromey
2021-08-26  2:19 ` [PATCH 10/30] Add batching parameter to parallel_for_each Tom Tromey
2021-08-26  2:19 ` Tom Tromey [this message]
2021-08-27  6:20   ` [PATCH 11/30] Return vector of results from parallel_for_each Tom de Vries
2021-08-28 19:20     ` Tom Tromey
2021-08-26  2:19 ` [PATCH 12/30] Introduce DWARF abbrev cache Tom Tromey
2021-08-26  2:19 ` [PATCH 13/30] Statically examine abbrev properties Tom Tromey
2021-09-06 22:31   ` Lancelot SIX
2021-11-04 18:00     ` Tom Tromey
2021-08-26  2:19 ` [PATCH 14/30] Update skip_one_die for new " Tom Tromey
2021-08-26  2:19 ` [PATCH 15/30] Introduce the new DWARF index class Tom Tromey
2021-09-09 23:32   ` Lancelot SIX
2021-11-04 18:03     ` Tom Tromey
2021-08-26  2:19 ` [PATCH 16/30] The new DWARF indexer Tom Tromey
2021-08-26  2:19 ` [PATCH 17/30] Implement quick_symbol_functions for cooked DWARF index Tom Tromey
2021-08-26  2:19 ` [PATCH 18/30] Wire in the new DWARF indexer Tom Tromey
2021-08-26  2:19 ` [PATCH 19/30] Pre-read DWARF section data Tom Tromey
2021-08-26  2:19 ` [PATCH 20/30] Parallelize DWARF indexing Tom Tromey
2021-08-26  2:19 ` [PATCH 21/30] "Finalize" the DWARF index in the background Tom Tromey
2021-08-26  2:19 ` [PATCH 22/30] Rename write_psymtabs_to_index Tom Tromey
2021-08-26  2:19 ` [PATCH 23/30] Change the key type in psym_index_map Tom Tromey
2021-08-26  2:19 ` [PATCH 24/30] Change parameters to write_address_map Tom Tromey
2021-08-26  2:19 ` [PATCH 25/30] Genericize addrmap handling in the DWARF index writer Tom Tromey
2021-08-26  2:19 ` [PATCH 26/30] Adapt .gdb_index writer to new DWARF scanner Tom Tromey
2021-08-26  2:19 ` [PATCH 27/30] Adapt .debug_names " Tom Tromey
2021-08-26  2:19 ` [PATCH 28/30] Enable the new DWARF indexer Tom Tromey
2021-08-26  2:19 ` [PATCH 29/30] Delete DWARF psymtab code Tom Tromey
2021-08-26  2:19 ` [PATCH 30/30] Remove dwarf2_per_cu_data::v Tom Tromey
2021-08-26 20:32 ` [PATCH 00/30] Rewrite the DWARF "partial" reader Tom de Vries
2021-08-26 21:29   ` Tom Tromey
2021-08-27  7:31     ` Tom de Vries
2021-08-30 15:04       ` Tom Tromey
2021-09-06 19:46         ` Tom Tromey
2021-09-07 10:58           ` Tom de Vries
2021-09-07 12:16             ` Tom de Vries
2021-10-29 23:06               ` Tom Tromey
2021-09-09 19:00 ` Wei-min Pan
2021-09-11 21:08   ` Tom Tromey
2021-09-13 16:50     ` Weimin Pan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210826021937.1490292-12-tom@tromey.com \
    --to=tom@tromey.com \
    --cc=gdb-patches@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).