* [PATCH v5 01/25] gdb: rename abbrev_cache to abbrev_table_cache
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 02/25] gdb: constification around abbrev_table_cache and abbrev_table Simon Marchi
` (24 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi
This cache holds `abbrev_table` objects, so I think it's clearer and
more consistent to name it `abbrev_table_cache`. Rename it and
everything that goes along with it.
Change-Id: I43448c0aa538dd2c3ae5efd2f7b3e7b827409d8c
---
gdb/Makefile.in | 2 +-
.../{abbrev-cache.c => abbrev-table-cache.c} | 10 +++++-----
.../{abbrev-cache.h => abbrev-table-cache.h} | 14 +++++++-------
gdb/dwarf2/cooked-index.h | 15 +++++++--------
gdb/dwarf2/read.c | 12 ++++++------
5 files changed, 26 insertions(+), 27 deletions(-)
rename gdb/dwarf2/{abbrev-cache.c => abbrev-table-cache.c} (87%)
rename gdb/dwarf2/{abbrev-cache.h => abbrev-table-cache.h} (86%)
diff --git a/gdb/Makefile.in b/gdb/Makefile.in
index ecb323d8f022..823817889920 100644
--- a/gdb/Makefile.in
+++ b/gdb/Makefile.in
@@ -1088,7 +1088,7 @@ COMMON_SFILES = \
displaced-stepping.c \
dummy-frame.c \
dwarf2/abbrev.c \
- dwarf2/abbrev-cache.c \
+ dwarf2/abbrev-table-cache.c \
dwarf2/ada-imported.c \
dwarf2/aranges.c \
dwarf2/attribute.c \
diff --git a/gdb/dwarf2/abbrev-cache.c b/gdb/dwarf2/abbrev-table-cache.c
similarity index 87%
rename from gdb/dwarf2/abbrev-cache.c
rename to gdb/dwarf2/abbrev-table-cache.c
index 7e1ff9ceb15d..c29cd09478f4 100644
--- a/gdb/dwarf2/abbrev-cache.c
+++ b/gdb/dwarf2/abbrev-table-cache.c
@@ -17,12 +17,12 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
-#include "dwarf2/abbrev-cache.h"
+#include "dwarf2/abbrev-table-cache.h"
/* Hash function for an abbrev table. */
hashval_t
-abbrev_cache::hash_table (const void *item)
+abbrev_table_cache::hash_table (const void *item)
{
const struct abbrev_table *table = (const struct abbrev_table *) item;
return to_underlying (table->sect_off);
@@ -31,7 +31,7 @@ abbrev_cache::hash_table (const void *item)
/* Comparison function for abbrev table. */
int
-abbrev_cache::eq_table (const void *lhs, const void *rhs)
+abbrev_table_cache::eq_table (const void *lhs, const void *rhs)
{
const struct abbrev_table *l_table = (const struct abbrev_table *) lhs;
const search_key *key = (const search_key *) rhs;
@@ -39,7 +39,7 @@ abbrev_cache::eq_table (const void *lhs, const void *rhs)
&& l_table->sect_off == key->offset);
}
-abbrev_cache::abbrev_cache ()
+abbrev_table_cache::abbrev_table_cache ()
: m_tables (htab_create_alloc (20, hash_table, eq_table,
htab_delete_entry<abbrev_table>,
xcalloc, xfree))
@@ -47,7 +47,7 @@ abbrev_cache::abbrev_cache ()
}
void
-abbrev_cache::add (abbrev_table_up table)
+abbrev_table_cache::add (abbrev_table_up table)
{
/* We allow this as a convenience to the caller. */
if (table == nullptr)
diff --git a/gdb/dwarf2/abbrev-cache.h b/gdb/dwarf2/abbrev-table-cache.h
similarity index 86%
rename from gdb/dwarf2/abbrev-cache.h
rename to gdb/dwarf2/abbrev-table-cache.h
index 99d1f93f750a..4f9111c51f0a 100644
--- a/gdb/dwarf2/abbrev-cache.h
+++ b/gdb/dwarf2/abbrev-table-cache.h
@@ -17,17 +17,17 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
-#ifndef GDB_DWARF2_ABBREV_CACHE_H
-#define GDB_DWARF2_ABBREV_CACHE_H
+#ifndef GDB_DWARF2_ABBREV_TABLE_CACHE_H
+#define GDB_DWARF2_ABBREV_TABLE_CACHE_H
#include "dwarf2/abbrev.h"
-/* An abbrev cache holds abbrev tables for easier reuse. */
-class abbrev_cache
+/* An abbrev table cache holds abbrev tables for easier reuse. */
+class abbrev_table_cache
{
public:
- abbrev_cache ();
- DISABLE_COPY_AND_ASSIGN (abbrev_cache);
+ abbrev_table_cache ();
+ DISABLE_COPY_AND_ASSIGN (abbrev_table_cache);
/* Find an abbrev table coming from the abbrev section SECTION at
offset OFFSET. Return the table, or nullptr if it has not yet
@@ -62,4 +62,4 @@ class abbrev_cache
htab_up m_tables;
};
-#endif /* GDB_DWARF2_ABBREV_CACHE_H */
+#endif /* GDB_DWARF2_ABBREV_TABLE_CACHE_H */
diff --git a/gdb/dwarf2/cooked-index.h b/gdb/dwarf2/cooked-index.h
index 2807f5c3b4ab..4b781529c53e 100644
--- a/gdb/dwarf2/cooked-index.h
+++ b/gdb/dwarf2/cooked-index.h
@@ -30,7 +30,7 @@
#include "gdbsupport/iterator-range.h"
#include "dwarf2/mapped-index.h"
#include "dwarf2/read.h"
-#include "dwarf2/abbrev-cache.h"
+#include "dwarf2/abbrev-table-cache.h"
#include "dwarf2/parent-map.h"
#include "gdbsupport/range-chain.h"
#include "complaints.h"
@@ -376,11 +376,9 @@ class cooked_index_storage
cooked_index_storage ();
DISABLE_COPY_AND_ASSIGN (cooked_index_storage);
- /* Return the current abbrev cache. */
- abbrev_cache *get_abbrev_cache ()
- {
- return &m_abbrev_cache;
- }
+ /* Return the current abbrev table_cache. */
+ abbrev_table_cache *get_abbrev_table_cache ()
+ { return &m_abbrev_table_cache; }
/* Return the DIE reader corresponding to PER_CU. If no such reader
has been registered, return NULL. */
@@ -436,8 +434,9 @@ class cooked_index_storage
/* Equality function for cutu_reader. */
static int eq_cutu_reader (const void *a, const void *b);
- /* The abbrev cache used by this indexer. */
- abbrev_cache m_abbrev_cache;
+ /* The abbrev table cache used by this indexer. */
+ abbrev_table_cache m_abbrev_table_cache;
+
/* A hash table of cutu_reader objects. */
htab_up m_reader_hash;
/* The index shard that is being constructed. */
diff --git a/gdb/dwarf2/read.c b/gdb/dwarf2/read.c
index 30ef69aea303..1a67aff8a84f 100644
--- a/gdb/dwarf2/read.c
+++ b/gdb/dwarf2/read.c
@@ -89,7 +89,7 @@
#include "gdbsupport/pathstuff.h"
#include "count-one-bits.h"
#include <unordered_set>
-#include "dwarf2/abbrev-cache.h"
+#include "dwarf2/abbrev-table-cache.h"
#include "cooked-index.h"
#include "gdbsupport/thread-pool.h"
#include "run-on-main-thread.h"
@@ -588,7 +588,7 @@ class cutu_reader : public die_reader_specs
struct abbrev_table *abbrev_table,
dwarf2_cu *existing_cu,
bool skip_partial,
- abbrev_cache *cache = nullptr);
+ abbrev_table_cache *cache = nullptr);
explicit cutu_reader (struct dwarf2_per_cu_data *this_cu,
dwarf2_per_objfile *per_objfile,
@@ -4012,7 +4012,7 @@ cutu_reader::cutu_reader (dwarf2_per_cu_data *this_cu,
struct abbrev_table *abbrev_table,
dwarf2_cu *existing_cu,
bool skip_partial,
- abbrev_cache *cache)
+ abbrev_table_cache *cache)
: die_reader_specs {},
m_this_cu (this_cu)
{
@@ -4431,7 +4431,7 @@ cooked_index_storage::get_reader (dwarf2_per_cu_data *per_cu)
cutu_reader *
cooked_index_storage::preserve (std::unique_ptr<cutu_reader> reader)
{
- m_abbrev_cache.add (reader->release_abbrev_table ());
+ m_abbrev_table_cache.add (reader->release_abbrev_table ());
int index = reader->cu->per_cu->index;
void **slot = htab_find_slot_with_hash (m_reader_hash.get (), &index,
@@ -4609,7 +4609,7 @@ process_psymtab_comp_unit (dwarf2_per_cu_data *this_cu,
if (reader == nullptr)
{
cutu_reader new_reader (this_cu, per_objfile, nullptr, nullptr, false,
- storage->get_abbrev_cache ());
+ storage->get_abbrev_table_cache ());
if (new_reader.comp_unit_die == nullptr || new_reader.dummy_p)
return;
@@ -16140,7 +16140,7 @@ cooked_indexer::ensure_cu_exists (cutu_reader *reader,
if (result == nullptr)
{
cutu_reader new_reader (per_cu, per_objfile, nullptr, nullptr, false,
- m_index_storage->get_abbrev_cache ());
+ m_index_storage->get_abbrev_table_cache ());
if (new_reader.dummy_p || new_reader.comp_unit_die == nullptr
|| !new_reader.comp_unit_die->has_children)
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 02/25] gdb: constification around abbrev_table_cache and abbrev_table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
2024-11-04 18:27 ` [PATCH v5 01/25] gdb: rename abbrev_cache to abbrev_table_cache Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 03/25] gdb: make `cooked_index_storage::get_abbrev_table_cache` return a reference Simon Marchi
` (23 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi
Make `abbrev_table_cache::find` const, make it return a pointer to
`const abbrev_table`, adjust the fallouts.
Make `cooked_index_storage::get_abbrev_table_cache` const, make itreturn
a pointer to const `abbrev_table_cache`.
Change-Id: If63b4b3a4c253f3bd640b13bce4a854eb2d75ece
---
gdb/dwarf2/abbrev-table-cache.h | 3 ++-
gdb/dwarf2/cooked-index.h | 2 +-
gdb/dwarf2/read.c | 12 ++++++------
3 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/gdb/dwarf2/abbrev-table-cache.h b/gdb/dwarf2/abbrev-table-cache.h
index 4f9111c51f0a..bcec005f2c98 100644
--- a/gdb/dwarf2/abbrev-table-cache.h
+++ b/gdb/dwarf2/abbrev-table-cache.h
@@ -32,7 +32,8 @@ class abbrev_table_cache
/* Find an abbrev table coming from the abbrev section SECTION at
offset OFFSET. Return the table, or nullptr if it has not yet
been registered. */
- abbrev_table *find (struct dwarf2_section_info *section, sect_offset offset)
+ const abbrev_table *find (dwarf2_section_info *section,
+ sect_offset offset) const
{
search_key key = { section, offset };
diff --git a/gdb/dwarf2/cooked-index.h b/gdb/dwarf2/cooked-index.h
index 4b781529c53e..6f9311bd108c 100644
--- a/gdb/dwarf2/cooked-index.h
+++ b/gdb/dwarf2/cooked-index.h
@@ -377,7 +377,7 @@ class cooked_index_storage
DISABLE_COPY_AND_ASSIGN (cooked_index_storage);
/* Return the current abbrev table_cache. */
- abbrev_table_cache *get_abbrev_table_cache ()
+ const abbrev_table_cache *get_abbrev_table_cache () const
{ return &m_abbrev_table_cache; }
/* Return the DIE reader corresponding to PER_CU. If no such reader
diff --git a/gdb/dwarf2/read.c b/gdb/dwarf2/read.c
index 1a67aff8a84f..34d2b7d5abad 100644
--- a/gdb/dwarf2/read.c
+++ b/gdb/dwarf2/read.c
@@ -573,7 +573,7 @@ struct die_reader_specs
const gdb_byte *buffer_end;
/* The abbreviation table to use when reading the DIEs. */
- struct abbrev_table *abbrev_table;
+ const struct abbrev_table *abbrev_table;
};
/* A subclass of die_reader_specs that holds storage and has complex
@@ -585,10 +585,10 @@ class cutu_reader : public die_reader_specs
cutu_reader (dwarf2_per_cu_data *this_cu,
dwarf2_per_objfile *per_objfile,
- struct abbrev_table *abbrev_table,
+ const struct abbrev_table *abbrev_table,
dwarf2_cu *existing_cu,
bool skip_partial,
- abbrev_table_cache *cache = nullptr);
+ const abbrev_table_cache *cache = nullptr);
explicit cutu_reader (struct dwarf2_per_cu_data *this_cu,
dwarf2_per_objfile *per_objfile,
@@ -3710,7 +3710,7 @@ init_cu_die_reader (struct die_reader_specs *reader,
struct dwarf2_cu *cu,
struct dwarf2_section_info *section,
struct dwo_file *dwo_file,
- struct abbrev_table *abbrev_table)
+ const abbrev_table *abbrev_table)
{
gdb_assert (section->readin && section->buffer != NULL);
reader->abfd = section->get_bfd_owner ();
@@ -4009,10 +4009,10 @@ cutu_reader::init_tu_and_read_dwo_dies (dwarf2_per_cu_data *this_cu,
cutu_reader::cutu_reader (dwarf2_per_cu_data *this_cu,
dwarf2_per_objfile *per_objfile,
- struct abbrev_table *abbrev_table,
+ const struct abbrev_table *abbrev_table,
dwarf2_cu *existing_cu,
bool skip_partial,
- abbrev_table_cache *cache)
+ const abbrev_table_cache *cache)
: die_reader_specs {},
m_this_cu (this_cu)
{
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 03/25] gdb: make `cooked_index_storage::get_abbrev_table_cache` return a reference
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
2024-11-04 18:27 ` [PATCH v5 01/25] gdb: rename abbrev_cache to abbrev_table_cache Simon Marchi
2024-11-04 18:27 ` [PATCH v5 02/25] gdb: constification around abbrev_table_cache and abbrev_table Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 04/25] gdbsupport: add unordered_dense.h 4.4.0 Simon Marchi
` (22 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi
It can never return nullptr, return a reference instead of a pointer.
Change-Id: Ibc6f16eb74dc16059152982600ca9f426d7f80a4
---
gdb/dwarf2/cooked-index.h | 4 ++--
gdb/dwarf2/read.c | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/gdb/dwarf2/cooked-index.h b/gdb/dwarf2/cooked-index.h
index 6f9311bd108c..d1d81f8e2a54 100644
--- a/gdb/dwarf2/cooked-index.h
+++ b/gdb/dwarf2/cooked-index.h
@@ -377,8 +377,8 @@ class cooked_index_storage
DISABLE_COPY_AND_ASSIGN (cooked_index_storage);
/* Return the current abbrev table_cache. */
- const abbrev_table_cache *get_abbrev_table_cache () const
- { return &m_abbrev_table_cache; }
+ const abbrev_table_cache &get_abbrev_table_cache () const
+ { return m_abbrev_table_cache; }
/* Return the DIE reader corresponding to PER_CU. If no such reader
has been registered, return NULL. */
diff --git a/gdb/dwarf2/read.c b/gdb/dwarf2/read.c
index 34d2b7d5abad..e2be5b150c84 100644
--- a/gdb/dwarf2/read.c
+++ b/gdb/dwarf2/read.c
@@ -4609,7 +4609,7 @@ process_psymtab_comp_unit (dwarf2_per_cu_data *this_cu,
if (reader == nullptr)
{
cutu_reader new_reader (this_cu, per_objfile, nullptr, nullptr, false,
- storage->get_abbrev_table_cache ());
+ &storage->get_abbrev_table_cache ());
if (new_reader.comp_unit_die == nullptr || new_reader.dummy_p)
return;
@@ -16140,7 +16140,7 @@ cooked_indexer::ensure_cu_exists (cutu_reader *reader,
if (result == nullptr)
{
cutu_reader new_reader (per_cu, per_objfile, nullptr, nullptr, false,
- m_index_storage->get_abbrev_table_cache ());
+ &m_index_storage->get_abbrev_table_cache ());
if (new_reader.dummy_p || new_reader.comp_unit_die == nullptr
|| !new_reader.comp_unit_die->has_children)
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 04/25] gdbsupport: add unordered_dense.h 4.4.0
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (2 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 03/25] gdb: make `cooked_index_storage::get_abbrev_table_cache` return a reference Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 05/25] Convert compile-c-symbols.c to new hash table Simon Marchi
` (21 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi
Add a copy of unordered_dense.h from [1]. This file implements an
efficient hash map and hash set with a nice C++ interface (a near
drop-in for std::unordered_map and std::unordered_set). This is
expected to be used to replace uses of `htab_t`, for improved code
readability and type safety. Performance-wise, it is preferred to the
std types (std::unordered_map and std::unordered_set) due to it using
open addressing vs closed addressing for the std types.
I chose this particular implementation because it is simple to use, it's
a standalone header and it typically performs well in benchmarks I have
seen (e.g. [2]). The license being MIT, my understanding is that it's
not a problem to use it and re-distribute it.
Add two additional files, gdbsupport/unordered_map.h and
gdbsupport/unordered_set.h, which make the map and set offered by
gdbsupport/unordered_dense.h available as gdb::unordered_map and
gdb::unordered_set.
[1] https://github.com/martinus/unordered_dense
[2] https://jacksonallan.github.io/c_cpp_hash_tables_benchmark/#conclusion-which-hash-table-to-choose
Change-Id: I0c7469ccf9a14540465479e58b2a5140a2440a7d
---
gdbsupport/unordered_dense.h | 2032 ++++++++++++++++++++++++++++++++++
gdbsupport/unordered_map.h | 37 +
gdbsupport/unordered_set.h | 36 +
3 files changed, 2105 insertions(+)
create mode 100644 gdbsupport/unordered_dense.h
create mode 100644 gdbsupport/unordered_map.h
create mode 100644 gdbsupport/unordered_set.h
diff --git a/gdbsupport/unordered_dense.h b/gdbsupport/unordered_dense.h
new file mode 100644
index 000000000000..2aaacd617a6f
--- /dev/null
+++ b/gdbsupport/unordered_dense.h
@@ -0,0 +1,2032 @@
+///////////////////////// ankerl::unordered_dense::{map, set} /////////////////////////
+
+// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion.
+// Version 4.4.0
+// https://github.com/martinus/unordered_dense
+//
+// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+// SPDX-License-Identifier: MIT
+// Copyright (c) 2022-2023 Martin Leitner-Ankerl <martin.ankerl@gmail.com>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+#ifndef ANKERL_UNORDERED_DENSE_H
+#define ANKERL_UNORDERED_DENSE_H
+
+// see https://semver.org/spec/v2.0.0.html
+#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes
+#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 4 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality
+#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes
+
+// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/
+
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch)
+#define ANKERL_UNORDERED_DENSE_NAMESPACE \
+ ANKERL_UNORDERED_DENSE_VERSION_CONCAT( \
+ ANKERL_UNORDERED_DENSE_VERSION_MAJOR, ANKERL_UNORDERED_DENSE_VERSION_MINOR, ANKERL_UNORDERED_DENSE_VERSION_PATCH)
+
+#if defined(_MSVC_LANG)
+# define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG
+#else
+# define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus
+#endif
+
+#if defined(__GNUC__)
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__))
+#elif defined(_MSC_VER)
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop))
+#endif
+
+// exceptions
+#if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)
+# define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 1 // NOLINT(cppcoreguidelines-macro-usage)
+#else
+# define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 0 // NOLINT(cppcoreguidelines-macro-usage)
+#endif
+#ifdef _MSC_VER
+# define ANKERL_UNORDERED_DENSE_NOINLINE __declspec(noinline)
+#else
+# define ANKERL_UNORDERED_DENSE_NOINLINE __attribute__((noinline))
+#endif
+
+// defined in unordered_dense.cpp
+#if !defined(ANKERL_UNORDERED_DENSE_EXPORT)
+# define ANKERL_UNORDERED_DENSE_EXPORT
+#endif
+
+#if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L
+# error ankerl::unordered_dense requires C++17 or higher
+#else
+# include <array> // for array
+# include <cstdint> // for uint64_t, uint32_t, uint8_t, UINT64_C
+# include <cstring> // for size_t, memcpy, memset
+# include <functional> // for equal_to, hash
+# include <initializer_list> // for initializer_list
+# include <iterator> // for pair, distance
+# include <limits> // for numeric_limits
+# include <memory> // for allocator, allocator_traits, shared_ptr
+# include <optional> // for optional
+# include <stdexcept> // for out_of_range
+# include <string> // for basic_string
+# include <string_view> // for basic_string_view, hash
+# include <tuple> // for forward_as_tuple
+# include <type_traits> // for enable_if_t, declval, conditional_t, ena...
+# include <utility> // for forward, exchange, pair, as_const, piece...
+# include <vector> // for vector
+# if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0
+# include <cstdlib> // for abort
+# endif
+
+# if defined(__has_include)
+# if __has_include(<memory_resource>)
+# define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage)
+# include <memory_resource> // for polymorphic_allocator
+# elif __has_include(<experimental/memory_resource>)
+# define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage)
+# include <experimental/memory_resource> // for polymorphic_allocator
+# endif
+# endif
+
+# if defined(_MSC_VER) && defined(_M_X64)
+# include <intrin.h>
+# pragma intrinsic(_umul128)
+# endif
+
+# if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__)
+# define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) // NOLINT(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage)
+# else
+# define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage)
+# endif
+
+namespace ankerl::unordered_dense {
+inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE {
+
+namespace detail {
+
+# if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS()
+
+// make sure this is not inlined as it is slow and dramatically enlarges code, thus making other
+// inlinings more difficult. Throws are also generally the slow path.
+[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_key_not_found() {
+ throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found");
+}
+[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_bucket_overflow() {
+ throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size");
+}
+[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_too_many_elements() {
+ throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements");
+}
+
+# else
+
+[[noreturn]] inline void on_error_key_not_found() {
+ abort();
+}
+[[noreturn]] inline void on_error_bucket_overflow() {
+ abort();
+}
+[[noreturn]] inline void on_error_too_many_elements() {
+ abort();
+}
+
+# endif
+
+} // namespace detail
+
+// hash ///////////////////////////////////////////////////////////////////////
+
+// This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash
+// No big-endian support (because different values on different machines don't matter),
+// hardcodes seed and the secret, reformats the code, and clang-tidy fixes.
+namespace detail::wyhash {
+
+inline void mum(uint64_t* a, uint64_t* b) {
+# if defined(__SIZEOF_INT128__)
+ __uint128_t r = *a;
+ r *= *b;
+ *a = static_cast<uint64_t>(r);
+ *b = static_cast<uint64_t>(r >> 64U);
+# elif defined(_MSC_VER) && defined(_M_X64)
+ *a = _umul128(*a, *b, b);
+# else
+ uint64_t ha = *a >> 32U;
+ uint64_t hb = *b >> 32U;
+ uint64_t la = static_cast<uint32_t>(*a);
+ uint64_t lb = static_cast<uint32_t>(*b);
+ uint64_t hi{};
+ uint64_t lo{};
+ uint64_t rh = ha * hb;
+ uint64_t rm0 = ha * lb;
+ uint64_t rm1 = hb * la;
+ uint64_t rl = la * lb;
+ uint64_t t = rl + (rm0 << 32U);
+ auto c = static_cast<uint64_t>(t < rl);
+ lo = t + (rm1 << 32U);
+ c += static_cast<uint64_t>(lo < t);
+ hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c;
+ *a = lo;
+ *b = hi;
+# endif
+}
+
+// multiply and xor mix function, aka MUM
+[[nodiscard]] inline auto mix(uint64_t a, uint64_t b) -> uint64_t {
+ mum(&a, &b);
+ return a ^ b;
+}
+
+// read functions. WARNING: we don't care about endianness, so results are different on big endian!
+[[nodiscard]] inline auto r8(const uint8_t* p) -> uint64_t {
+ uint64_t v{};
+ std::memcpy(&v, p, 8U);
+ return v;
+}
+
+[[nodiscard]] inline auto r4(const uint8_t* p) -> uint64_t {
+ uint32_t v{};
+ std::memcpy(&v, p, 4);
+ return v;
+}
+
+// reads 1, 2, or 3 bytes
+[[nodiscard]] inline auto r3(const uint8_t* p, size_t k) -> uint64_t {
+ return (static_cast<uint64_t>(p[0]) << 16U) | (static_cast<uint64_t>(p[k >> 1U]) << 8U) | p[k - 1];
+}
+
+[[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, size_t len) -> uint64_t {
+ static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f),
+ UINT64_C(0xe7037ed1a0b428db),
+ UINT64_C(0x8ebc6af09c88c6e3),
+ UINT64_C(0x589965cc75374cc3)};
+
+ auto const* p = static_cast<uint8_t const*>(key);
+ uint64_t seed = secret[0];
+ uint64_t a{};
+ uint64_t b{};
+ if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) {
+ if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) {
+ a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U));
+ b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U));
+ } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) {
+ a = r3(p, len);
+ b = 0;
+ } else {
+ a = 0;
+ b = 0;
+ }
+ } else {
+ size_t i = len;
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) {
+ uint64_t see1 = seed;
+ uint64_t see2 = seed;
+ do {
+ seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
+ see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1);
+ see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2);
+ p += 48;
+ i -= 48;
+ } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48));
+ seed ^= see1 ^ see2;
+ }
+ while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) {
+ seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed);
+ i -= 16;
+ p += 16;
+ }
+ a = r8(p + i - 16);
+ b = r8(p + i - 8);
+ }
+
+ return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed));
+}
+
+[[nodiscard]] inline auto hash(uint64_t x) -> uint64_t {
+ return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15));
+}
+
+} // namespace detail::wyhash
+
+ANKERL_UNORDERED_DENSE_EXPORT template <typename T, typename Enable = void>
+struct hash {
+ auto operator()(T const& obj) const noexcept(noexcept(std::declval<std::hash<T>>().operator()(std::declval<T const&>())))
+ -> uint64_t {
+ return std::hash<T>{}(obj);
+ }
+};
+
+template <typename CharT>
+struct hash<std::basic_string<CharT>> {
+ using is_avalanching = void;
+ auto operator()(std::basic_string<CharT> const& str) const noexcept -> uint64_t {
+ return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size());
+ }
+};
+
+template <typename CharT>
+struct hash<std::basic_string_view<CharT>> {
+ using is_avalanching = void;
+ auto operator()(std::basic_string_view<CharT> const& sv) const noexcept -> uint64_t {
+ return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size());
+ }
+};
+
+template <class T>
+struct hash<T*> {
+ using is_avalanching = void;
+ auto operator()(T* ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr));
+ }
+};
+
+template <class T>
+struct hash<std::unique_ptr<T>> {
+ using is_avalanching = void;
+ auto operator()(std::unique_ptr<T> const& ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
+ }
+};
+
+template <class T>
+struct hash<std::shared_ptr<T>> {
+ using is_avalanching = void;
+ auto operator()(std::shared_ptr<T> const& ptr) const noexcept -> uint64_t {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
+ return detail::wyhash::hash(reinterpret_cast<uintptr_t>(ptr.get()));
+ }
+};
+
+template <typename Enum>
+struct hash<Enum, typename std::enable_if<std::is_enum<Enum>::value>::type> {
+ using is_avalanching = void;
+ auto operator()(Enum e) const noexcept -> uint64_t {
+ using underlying = typename std::underlying_type_t<Enum>;
+ return detail::wyhash::hash(static_cast<underlying>(e));
+ }
+};
+
+template <typename... Args>
+struct tuple_hash_helper {
+ // Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest.
+ // If it isn't an integral we need to hash it.
+ template <typename Arg>
+ [[nodiscard]] constexpr static auto to64(Arg const& arg) -> uint64_t {
+ if constexpr (std::is_integral_v<Arg> || std::is_enum_v<Arg>) {
+ return static_cast<uint64_t>(arg);
+ } else {
+ return hash<Arg>{}(arg);
+ }
+ }
+
+ [[nodiscard]] static auto mix64(uint64_t state, uint64_t v) -> uint64_t {
+ return detail::wyhash::mix(state + v, uint64_t{0x9ddfea08eb382d69});
+ }
+
+ // Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. If
+ // not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is optimized
+ // away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer.
+ template <typename T, std::size_t... Idx>
+ [[nodiscard]] static auto calc_hash(T const& t, std::index_sequence<Idx...>) noexcept -> uint64_t {
+ auto h = uint64_t{};
+ ((h = mix64(h, to64(std::get<Idx>(t)))), ...);
+ return h;
+ }
+};
+
+template <typename... Args>
+struct hash<std::tuple<Args...>> : tuple_hash_helper<Args...> {
+ using is_avalanching = void;
+ auto operator()(std::tuple<Args...> const& t) const noexcept -> uint64_t {
+ return tuple_hash_helper<Args...>::calc_hash(t, std::index_sequence_for<Args...>{});
+ }
+};
+
+template <typename A, typename B>
+struct hash<std::pair<A, B>> : tuple_hash_helper<A, B> {
+ using is_avalanching = void;
+ auto operator()(std::pair<A, B> const& t) const noexcept -> uint64_t {
+ return tuple_hash_helper<A, B>::calc_hash(t, std::index_sequence_for<A, B>{});
+ }
+};
+
+// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
+# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \
+ template <> \
+ struct hash<T> { \
+ using is_avalanching = void; \
+ auto operator()(T const& obj) const noexcept -> uint64_t { \
+ return detail::wyhash::hash(static_cast<uint64_t>(obj)); \
+ } \
+ }
+
+# if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wuseless-cast"
+# endif
+// see https://en.cppreference.com/w/cpp/utility/hash
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char);
+# if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L && defined(__cpp_char8_t)
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t);
+# endif
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long);
+ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long);
+
+# if defined(__GNUC__) && !defined(__clang__)
+# pragma GCC diagnostic pop
+# endif
+
+// bucket_type //////////////////////////////////////////////////////////
+
+namespace bucket_type {
+
+struct standard {
+ static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
+ static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
+
+ uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
+ uint32_t m_value_idx; // index into the m_values vector.
+};
+
+ANKERL_UNORDERED_DENSE_PACK(struct big {
+ static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint
+ static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint
+
+ uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash
+ size_t m_value_idx; // index into the m_values vector.
+});
+
+} // namespace bucket_type
+
+namespace detail {
+
+struct nonesuch {};
+
+template <class Default, class AlwaysVoid, template <class...> class Op, class... Args>
+struct detector {
+ using value_t = std::false_type;
+ using type = Default;
+};
+
+template <class Default, template <class...> class Op, class... Args>
+struct detector<Default, std::void_t<Op<Args...>>, Op, Args...> {
+ using value_t = std::true_type;
+ using type = Op<Args...>;
+};
+
+template <template <class...> class Op, class... Args>
+using is_detected = typename detail::detector<detail::nonesuch, void, Op, Args...>::value_t;
+
+template <template <class...> class Op, class... Args>
+constexpr bool is_detected_v = is_detected<Op, Args...>::value;
+
+template <typename T>
+using detect_avalanching = typename T::is_avalanching;
+
+template <typename T>
+using detect_is_transparent = typename T::is_transparent;
+
+template <typename T>
+using detect_iterator = typename T::iterator;
+
+template <typename T>
+using detect_reserve = decltype(std::declval<T&>().reserve(size_t{}));
+
+// enable_if helpers
+
+template <typename Mapped>
+constexpr bool is_map_v = !std::is_void_v<Mapped>;
+
+// clang-format off
+template <typename Hash, typename KeyEqual>
+constexpr bool is_transparent_v = is_detected_v<detect_is_transparent, Hash> && is_detected_v<detect_is_transparent, KeyEqual>;
+// clang-format on
+
+template <typename From, typename To1, typename To2>
+constexpr bool is_neither_convertible_v = !std::is_convertible_v<From, To1> && !std::is_convertible_v<From, To2>;
+
+template <typename T>
+constexpr bool has_reserve = is_detected_v<detect_reserve, T>;
+
+// base type for map has mapped_type
+template <class T>
+struct base_table_type_map {
+ using mapped_type = T;
+};
+
+// base type for set doesn't have mapped_type
+struct base_table_type_set {};
+
+} // namespace detail
+
+// Very much like std::deque, but faster for indexing (in most cases). As of now this doesn't implement the full std::vector
+// API, but merely what's necessary to work as an underlying container for ankerl::unordered_dense::{map, set}.
+// It allocates blocks of equal size and puts them into the m_blocks vector. That means it can grow simply by adding a new
+// block to the back of m_blocks, and doesn't double its size like an std::vector. The disadvantage is that memory is not
+// linear and thus there is one more indirection necessary for indexing.
+template <typename T, typename Allocator = std::allocator<T>, size_t MaxSegmentSizeBytes = 4096>
+class segmented_vector {
+ template <bool IsConst>
+ class iter_t;
+
+public:
+ using allocator_type = Allocator;
+ using pointer = typename std::allocator_traits<allocator_type>::pointer;
+ using const_pointer = typename std::allocator_traits<allocator_type>::const_pointer;
+ using difference_type = typename std::allocator_traits<allocator_type>::difference_type;
+ using value_type = T;
+ using size_type = std::size_t;
+ using reference = T&;
+ using const_reference = T const&;
+ using iterator = iter_t<false>;
+ using const_iterator = iter_t<true>;
+
+private:
+ using vec_alloc = typename std::allocator_traits<Allocator>::template rebind_alloc<pointer>;
+ std::vector<pointer, vec_alloc> m_blocks{};
+ size_t m_size{};
+
+ // Calculates the maximum number for x in (s << x) <= max_val
+ static constexpr auto num_bits_closest(size_t max_val, size_t s) -> size_t {
+ auto f = size_t{0};
+ while (s << (f + 1) <= max_val) {
+ ++f;
+ }
+ return f;
+ }
+
+ using self_t = segmented_vector<T, Allocator, MaxSegmentSizeBytes>;
+ static constexpr auto num_bits = num_bits_closest(MaxSegmentSizeBytes, sizeof(T));
+ static constexpr auto num_elements_in_block = 1U << num_bits;
+ static constexpr auto mask = num_elements_in_block - 1U;
+
+ /**
+ * Iterator class doubles as const_iterator and iterator
+ */
+ template <bool IsConst>
+ class iter_t {
+ using ptr_t = typename std::conditional_t<IsConst, segmented_vector::const_pointer const*, segmented_vector::pointer*>;
+ ptr_t m_data{};
+ size_t m_idx{};
+
+ template <bool B>
+ friend class iter_t;
+
+ public:
+ using difference_type = segmented_vector::difference_type;
+ using value_type = T;
+ using reference = typename std::conditional_t<IsConst, value_type const&, value_type&>;
+ using pointer = typename std::conditional_t<IsConst, segmented_vector::const_pointer, segmented_vector::pointer>;
+ using iterator_category = std::forward_iterator_tag;
+
+ iter_t() noexcept = default;
+
+ template <bool OtherIsConst, typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
+ // NOLINTNEXTLINE(google-explicit-constructor,hicpp-explicit-conversions)
+ constexpr iter_t(iter_t<OtherIsConst> const& other) noexcept
+ : m_data(other.m_data)
+ , m_idx(other.m_idx) {}
+
+ constexpr iter_t(ptr_t data, size_t idx) noexcept
+ : m_data(data)
+ , m_idx(idx) {}
+
+ template <bool OtherIsConst, typename = typename std::enable_if<IsConst && !OtherIsConst>::type>
+ constexpr auto operator=(iter_t<OtherIsConst> const& other) noexcept -> iter_t& {
+ m_data = other.m_data;
+ m_idx = other.m_idx;
+ return *this;
+ }
+
+ constexpr auto operator++() noexcept -> iter_t& {
+ ++m_idx;
+ return *this;
+ }
+
+ constexpr auto operator+(difference_type diff) noexcept -> iter_t {
+ return {m_data, static_cast<size_t>(static_cast<difference_type>(m_idx) + diff)};
+ }
+
+ template <bool OtherIsConst>
+ constexpr auto operator-(iter_t<OtherIsConst> const& other) noexcept -> difference_type {
+ return static_cast<difference_type>(m_idx) - static_cast<difference_type>(other.m_idx);
+ }
+
+ constexpr auto operator*() const noexcept -> reference {
+ return m_data[m_idx >> num_bits][m_idx & mask];
+ }
+
+ constexpr auto operator->() const noexcept -> pointer {
+ return &m_data[m_idx >> num_bits][m_idx & mask];
+ }
+
+ template <bool O>
+ constexpr auto operator==(iter_t<O> const& o) const noexcept -> bool {
+ return m_idx == o.m_idx;
+ }
+
+ template <bool O>
+ constexpr auto operator!=(iter_t<O> const& o) const noexcept -> bool {
+ return !(*this == o);
+ }
+ };
+
+ // slow path: need to allocate a new segment every once in a while
+ void increase_capacity() {
+ auto ba = Allocator(m_blocks.get_allocator());
+ pointer block = std::allocator_traits<Allocator>::allocate(ba, num_elements_in_block);
+ m_blocks.push_back(block);
+ }
+
+ // Moves everything from other
+ void append_everything_from(segmented_vector&& other) {
+ reserve(size() + other.size());
+ for (auto&& o : other) {
+ emplace_back(std::move(o));
+ }
+ }
+
+ // Copies everything from other
+ void append_everything_from(segmented_vector const& other) {
+ reserve(size() + other.size());
+ for (auto const& o : other) {
+ emplace_back(o);
+ }
+ }
+
+ void dealloc() {
+ auto ba = Allocator(m_blocks.get_allocator());
+ for (auto ptr : m_blocks) {
+ std::allocator_traits<Allocator>::deallocate(ba, ptr, num_elements_in_block);
+ }
+ }
+
+ [[nodiscard]] static constexpr auto calc_num_blocks_for_capacity(size_t capacity) {
+ return (capacity + num_elements_in_block - 1U) / num_elements_in_block;
+ }
+
+public:
+ segmented_vector() = default;
+
+ // NOLINTNEXTLINE(google-explicit-constructor,hicpp-explicit-conversions)
+ segmented_vector(Allocator alloc)
+ : m_blocks(vec_alloc(alloc)) {}
+
+ segmented_vector(segmented_vector&& other, Allocator alloc)
+ : segmented_vector(alloc) {
+ *this = std::move(other);
+ }
+
+ segmented_vector(segmented_vector const& other, Allocator alloc)
+ : m_blocks(vec_alloc(alloc)) {
+ append_everything_from(other);
+ }
+
+ segmented_vector(segmented_vector&& other) noexcept
+ : segmented_vector(std::move(other), get_allocator()) {}
+
+ segmented_vector(segmented_vector const& other) {
+ append_everything_from(other);
+ }
+
+ auto operator=(segmented_vector const& other) -> segmented_vector& {
+ if (this == &other) {
+ return *this;
+ }
+ clear();
+ append_everything_from(other);
+ return *this;
+ }
+
+ auto operator=(segmented_vector&& other) noexcept -> segmented_vector& {
+ clear();
+ dealloc();
+ if (other.get_allocator() == get_allocator()) {
+ m_blocks = std::move(other.m_blocks);
+ m_size = std::exchange(other.m_size, {});
+ } else {
+ // make sure to construct with other's allocator!
+ m_blocks = std::vector<pointer, vec_alloc>(vec_alloc(other.get_allocator()));
+ append_everything_from(std::move(other));
+ }
+ return *this;
+ }
+
+ ~segmented_vector() {
+ clear();
+ dealloc();
+ }
+
+ [[nodiscard]] constexpr auto size() const -> size_t {
+ return m_size;
+ }
+
+ [[nodiscard]] constexpr auto capacity() const -> size_t {
+ return m_blocks.size() * num_elements_in_block;
+ }
+
+ // Indexing is highly performance critical
+ [[nodiscard]] constexpr auto operator[](size_t i) const noexcept -> T const& {
+ return m_blocks[i >> num_bits][i & mask];
+ }
+
+ [[nodiscard]] constexpr auto operator[](size_t i) noexcept -> T& {
+ return m_blocks[i >> num_bits][i & mask];
+ }
+
+ [[nodiscard]] constexpr auto begin() -> iterator {
+ return {m_blocks.data(), 0U};
+ }
+ [[nodiscard]] constexpr auto begin() const -> const_iterator {
+ return {m_blocks.data(), 0U};
+ }
+ [[nodiscard]] constexpr auto cbegin() const -> const_iterator {
+ return {m_blocks.data(), 0U};
+ }
+
+ [[nodiscard]] constexpr auto end() -> iterator {
+ return {m_blocks.data(), m_size};
+ }
+ [[nodiscard]] constexpr auto end() const -> const_iterator {
+ return {m_blocks.data(), m_size};
+ }
+ [[nodiscard]] constexpr auto cend() const -> const_iterator {
+ return {m_blocks.data(), m_size};
+ }
+
+ [[nodiscard]] constexpr auto back() -> reference {
+ return operator[](m_size - 1);
+ }
+ [[nodiscard]] constexpr auto back() const -> const_reference {
+ return operator[](m_size - 1);
+ }
+
+ void pop_back() {
+ back().~T();
+ --m_size;
+ }
+
+ [[nodiscard]] auto empty() const {
+ return 0 == m_size;
+ }
+
+ void reserve(size_t new_capacity) {
+ m_blocks.reserve(calc_num_blocks_for_capacity(new_capacity));
+ while (new_capacity > capacity()) {
+ increase_capacity();
+ }
+ }
+
+ [[nodiscard]] auto get_allocator() const -> allocator_type {
+ return allocator_type{m_blocks.get_allocator()};
+ }
+
+ template <class... Args>
+ auto emplace_back(Args&&... args) -> reference {
+ if (m_size == capacity()) {
+ increase_capacity();
+ }
+ auto* ptr = static_cast<void*>(&operator[](m_size));
+ auto& ref = *new (ptr) T(std::forward<Args>(args)...);
+ ++m_size;
+ return ref;
+ }
+
+ void clear() {
+ if constexpr (!std::is_trivially_destructible_v<T>) {
+ for (size_t i = 0, s = size(); i < s; ++i) {
+ operator[](i).~T();
+ }
+ }
+ m_size = 0;
+ }
+
+ void shrink_to_fit() {
+ auto ba = Allocator(m_blocks.get_allocator());
+ auto num_blocks_required = calc_num_blocks_for_capacity(m_size);
+ while (m_blocks.size() > num_blocks_required) {
+ std::allocator_traits<Allocator>::deallocate(ba, m_blocks.back(), num_elements_in_block);
+ m_blocks.pop_back();
+ }
+ m_blocks.shrink_to_fit();
+ }
+};
+
+namespace detail {
+
+// This is it, the table. Doubles as map and set, and uses `void` for T when its used as a set.
+template <class Key,
+ class T, // when void, treat it as a set.
+ class Hash,
+ class KeyEqual,
+ class AllocatorOrContainer,
+ class Bucket,
+ bool IsSegmented>
+class table : public std::conditional_t<is_map_v<T>, base_table_type_map<T>, base_table_type_set> {
+ using underlying_value_type = typename std::conditional_t<is_map_v<T>, std::pair<Key, T>, Key>;
+ using underlying_container_type = std::conditional_t<IsSegmented,
+ segmented_vector<underlying_value_type, AllocatorOrContainer>,
+ std::vector<underlying_value_type, AllocatorOrContainer>>;
+
+public:
+ using value_container_type = std::
+ conditional_t<is_detected_v<detect_iterator, AllocatorOrContainer>, AllocatorOrContainer, underlying_container_type>;
+
+private:
+ using bucket_alloc =
+ typename std::allocator_traits<typename value_container_type::allocator_type>::template rebind_alloc<Bucket>;
+ using bucket_alloc_traits = std::allocator_traits<bucket_alloc>;
+
+ static constexpr uint8_t initial_shifts = 64 - 2; // 2^(64-m_shift) number of buckets
+ static constexpr float default_max_load_factor = 0.8F;
+
+public:
+ using key_type = Key;
+ using value_type = typename value_container_type::value_type;
+ using size_type = typename value_container_type::size_type;
+ using difference_type = typename value_container_type::difference_type;
+ using hasher = Hash;
+ using key_equal = KeyEqual;
+ using allocator_type = typename value_container_type::allocator_type;
+ using reference = typename value_container_type::reference;
+ using const_reference = typename value_container_type::const_reference;
+ using pointer = typename value_container_type::pointer;
+ using const_pointer = typename value_container_type::const_pointer;
+ using const_iterator = typename value_container_type::const_iterator;
+ using iterator = std::conditional_t<is_map_v<T>, typename value_container_type::iterator, const_iterator>;
+ using bucket_type = Bucket;
+
+private:
+ using value_idx_type = decltype(Bucket::m_value_idx);
+ using dist_and_fingerprint_type = decltype(Bucket::m_dist_and_fingerprint);
+
+ static_assert(std::is_trivially_destructible_v<Bucket>, "assert there's no need to call destructor / std::destroy");
+ static_assert(std::is_trivially_copyable_v<Bucket>, "assert we can just memset / memcpy");
+
+ value_container_type m_values{}; // Contains all the key-value pairs in one densely stored container. No holes.
+ using bucket_pointer = typename std::allocator_traits<bucket_alloc>::pointer;
+ bucket_pointer m_buckets{};
+ size_t m_num_buckets = 0;
+ size_t m_max_bucket_capacity = 0;
+ float m_max_load_factor = default_max_load_factor;
+ Hash m_hash{};
+ KeyEqual m_equal{};
+ uint8_t m_shifts = initial_shifts;
+
+ [[nodiscard]] auto next(value_idx_type bucket_idx) const -> value_idx_type {
+ return ANKERL_UNORDERED_DENSE_UNLIKELY(bucket_idx + 1U == m_num_buckets)
+ ? 0
+ : static_cast<value_idx_type>(bucket_idx + 1U);
+ }
+
+ // Helper to access bucket through pointer types
+ [[nodiscard]] static constexpr auto at(bucket_pointer bucket_ptr, size_t offset) -> Bucket& {
+ return *(bucket_ptr + static_cast<typename std::allocator_traits<bucket_alloc>::difference_type>(offset));
+ }
+
+ // use the dist_inc and dist_dec functions so that uint16_t types work without warning
+ [[nodiscard]] static constexpr auto dist_inc(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
+ return static_cast<dist_and_fingerprint_type>(x + Bucket::dist_inc);
+ }
+
+ [[nodiscard]] static constexpr auto dist_dec(dist_and_fingerprint_type x) -> dist_and_fingerprint_type {
+ return static_cast<dist_and_fingerprint_type>(x - Bucket::dist_inc);
+ }
+
+ // The goal of mixed_hash is to always produce a high quality 64bit hash.
+ template <typename K>
+ [[nodiscard]] constexpr auto mixed_hash(K const& key) const -> uint64_t {
+ if constexpr (is_detected_v<detect_avalanching, Hash>) {
+ // we know that the hash is good because is_avalanching.
+ if constexpr (sizeof(decltype(m_hash(key))) < sizeof(uint64_t)) {
+ // 32bit hash and is_avalanching => multiply with a constant to avalanche bits upwards
+ return m_hash(key) * UINT64_C(0x9ddfea08eb382d69);
+ } else {
+ // 64bit and is_avalanching => only use the hash itself.
+ return m_hash(key);
+ }
+ } else {
+ // not is_avalanching => apply wyhash
+ return wyhash::hash(m_hash(key));
+ }
+ }
+
+ [[nodiscard]] constexpr auto dist_and_fingerprint_from_hash(uint64_t hash) const -> dist_and_fingerprint_type {
+ return Bucket::dist_inc | (static_cast<dist_and_fingerprint_type>(hash) & Bucket::fingerprint_mask);
+ }
+
+ [[nodiscard]] constexpr auto bucket_idx_from_hash(uint64_t hash) const -> value_idx_type {
+ return static_cast<value_idx_type>(hash >> m_shifts);
+ }
+
+ [[nodiscard]] static constexpr auto get_key(value_type const& vt) -> key_type const& {
+ if constexpr (is_map_v<T>) {
+ return vt.first;
+ } else {
+ return vt;
+ }
+ }
+
+ template <typename K>
+ [[nodiscard]] auto next_while_less(K const& key) const -> Bucket {
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint < at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+ return {dist_and_fingerprint, bucket_idx};
+ }
+
+ void place_and_shift_up(Bucket bucket, value_idx_type place) {
+ while (0 != at(m_buckets, place).m_dist_and_fingerprint) {
+ bucket = std::exchange(at(m_buckets, place), bucket);
+ bucket.m_dist_and_fingerprint = dist_inc(bucket.m_dist_and_fingerprint);
+ place = next(place);
+ }
+ at(m_buckets, place) = bucket;
+ }
+
+ [[nodiscard]] static constexpr auto calc_num_buckets(uint8_t shifts) -> size_t {
+ return (std::min)(max_bucket_count(), size_t{1} << (64U - shifts));
+ }
+
+ [[nodiscard]] constexpr auto calc_shifts_for_size(size_t s) const -> uint8_t {
+ auto shifts = initial_shifts;
+ while (shifts > 0 && static_cast<size_t>(static_cast<float>(calc_num_buckets(shifts)) * max_load_factor()) < s) {
+ --shifts;
+ }
+ return shifts;
+ }
+
+ // assumes m_values has data, m_buckets=m_buckets_end=nullptr, m_shifts is INITIAL_SHIFTS
+ void copy_buckets(table const& other) {
+ // assumes m_values has already the correct data copied over.
+ if (empty()) {
+ // when empty, at least allocate an initial buckets and clear them.
+ allocate_buckets_from_shift();
+ clear_buckets();
+ } else {
+ m_shifts = other.m_shifts;
+ allocate_buckets_from_shift();
+ std::memcpy(m_buckets, other.m_buckets, sizeof(Bucket) * bucket_count());
+ }
+ }
+
+ /**
+ * True when no element can be added any more without increasing the size
+ */
+ [[nodiscard]] auto is_full() const -> bool {
+ return size() > m_max_bucket_capacity;
+ }
+
+ void deallocate_buckets() {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ if (nullptr != m_buckets) {
+ bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
+ m_buckets = nullptr;
+ }
+ m_num_buckets = 0;
+ m_max_bucket_capacity = 0;
+ }
+
+ void allocate_buckets_from_shift() {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ m_num_buckets = calc_num_buckets(m_shifts);
+ m_buckets = bucket_alloc_traits::allocate(ba, m_num_buckets);
+ if (m_num_buckets == max_bucket_count()) {
+ // reached the maximum, make sure we can use each bucket
+ m_max_bucket_capacity = max_bucket_count();
+ } else {
+ m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(m_num_buckets) * max_load_factor());
+ }
+ }
+
+ void clear_buckets() {
+ if (m_buckets != nullptr) {
+ std::memset(&*m_buckets, 0, sizeof(Bucket) * bucket_count());
+ }
+ }
+
+ void clear_and_fill_buckets_from_values() {
+ clear_buckets();
+ for (value_idx_type value_idx = 0, end_idx = static_cast<value_idx_type>(m_values.size()); value_idx < end_idx;
+ ++value_idx) {
+ auto const& key = get_key(m_values[value_idx]);
+ auto [dist_and_fingerprint, bucket] = next_while_less(key);
+
+ // we know for certain that key has not yet been inserted, so no need to check it.
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket);
+ }
+ }
+
+ void increase_size() {
+ if (m_max_bucket_capacity == max_bucket_count()) {
+ // remove the value again, we can't add it!
+ m_values.pop_back();
+ on_error_bucket_overflow();
+ }
+ --m_shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+
+ template <typename Op>
+ void do_erase(value_idx_type bucket_idx, Op handle_erased_value) {
+ auto const value_idx_to_remove = at(m_buckets, bucket_idx).m_value_idx;
+
+ // shift down until either empty or an element with correct spot is found
+ auto next_bucket_idx = next(bucket_idx);
+ while (at(m_buckets, next_bucket_idx).m_dist_and_fingerprint >= Bucket::dist_inc * 2) {
+ at(m_buckets, bucket_idx) = {dist_dec(at(m_buckets, next_bucket_idx).m_dist_and_fingerprint),
+ at(m_buckets, next_bucket_idx).m_value_idx};
+ bucket_idx = std::exchange(next_bucket_idx, next(next_bucket_idx));
+ }
+ at(m_buckets, bucket_idx) = {};
+ handle_erased_value(std::move(m_values[value_idx_to_remove]));
+
+ // update m_values
+ if (value_idx_to_remove != m_values.size() - 1) {
+ // no luck, we'll have to replace the value with the last one and update the index accordingly
+ auto& val = m_values[value_idx_to_remove];
+ val = std::move(m_values.back());
+
+ // update the values_idx of the moved entry. No need to play the info game, just look until we find the values_idx
+ auto mh = mixed_hash(get_key(val));
+ bucket_idx = bucket_idx_from_hash(mh);
+
+ auto const values_idx_back = static_cast<value_idx_type>(m_values.size() - 1);
+ while (values_idx_back != at(m_buckets, bucket_idx).m_value_idx) {
+ bucket_idx = next(bucket_idx);
+ }
+ at(m_buckets, bucket_idx).m_value_idx = value_idx_to_remove;
+ }
+ m_values.pop_back();
+ }
+
+ template <typename K, typename Op>
+ auto do_erase_key(K&& key, Op handle_erased_value) -> size_t {
+ if (empty()) {
+ return 0;
+ }
+
+ auto [dist_and_fingerprint, bucket_idx] = next_while_less(key);
+
+ while (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ !m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ if (dist_and_fingerprint != at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ return 0;
+ }
+ do_erase(bucket_idx, handle_erased_value);
+ return 1;
+ }
+
+ template <class K, class M>
+ auto do_insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
+ auto it_isinserted = try_emplace(std::forward<K>(key), std::forward<M>(mapped));
+ if (!it_isinserted.second) {
+ it_isinserted.first->second = std::forward<M>(mapped);
+ }
+ return it_isinserted;
+ }
+
+ template <typename... Args>
+ auto do_place_element(dist_and_fingerprint_type dist_and_fingerprint, value_idx_type bucket_idx, Args&&... args)
+ -> std::pair<iterator, bool> {
+
+ // emplace the new value. If that throws an exception, no harm done; index is still in a valid state
+ m_values.emplace_back(std::forward<Args>(args)...);
+
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
+ increase_size();
+ } else {
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ }
+
+ // place element and shift up until we find an empty spot
+ return {begin() + static_cast<difference_type>(value_idx), true};
+ }
+
+ template <typename K, typename... Args>
+ auto do_try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (true) {
+ auto* bucket = &at(m_buckets, bucket_idx);
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
+ if (m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return {begin() + static_cast<difference_type>(bucket->m_value_idx), false};
+ }
+ } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
+ return do_place_element(dist_and_fingerprint,
+ bucket_idx,
+ std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+ }
+
+ template <typename K>
+ auto do_find(K const& key) -> iterator {
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(empty())) {
+ return end();
+ }
+
+ auto mh = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(mh);
+ auto bucket_idx = bucket_idx_from_hash(mh);
+ auto* bucket = &at(m_buckets, bucket_idx);
+
+ // unrolled loop. *Always* check a few directly, then enter the loop. This is faster.
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint && m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+
+ while (true) {
+ if (dist_and_fingerprint == bucket->m_dist_and_fingerprint) {
+ if (m_equal(key, get_key(m_values[bucket->m_value_idx]))) {
+ return begin() + static_cast<difference_type>(bucket->m_value_idx);
+ }
+ } else if (dist_and_fingerprint > bucket->m_dist_and_fingerprint) {
+ return end();
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ bucket = &at(m_buckets, bucket_idx);
+ }
+ }
+
+ template <typename K>
+ auto do_find(K const& key) const -> const_iterator {
+ return const_cast<table*>(this)->do_find(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ }
+
+ template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto do_at(K const& key) -> Q& {
+ if (auto it = find(key); ANKERL_UNORDERED_DENSE_LIKELY(end() != it)) {
+ return it->second;
+ }
+ on_error_key_not_found();
+ }
+
+ template <typename K, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto do_at(K const& key) const -> Q const& {
+ return const_cast<table*>(this)->at(key); // NOLINT(cppcoreguidelines-pro-type-const-cast)
+ }
+
+public:
+ explicit table(size_t bucket_count,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc_or_container = allocator_type())
+ : m_values(alloc_or_container)
+ , m_hash(hash)
+ , m_equal(equal) {
+ if (0 != bucket_count) {
+ reserve(bucket_count);
+ } else {
+ allocate_buckets_from_shift();
+ clear_buckets();
+ }
+ }
+
+ table()
+ : table(0) {}
+
+ table(size_t bucket_count, allocator_type const& alloc)
+ : table(bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ table(size_t bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(bucket_count, hash, KeyEqual(), alloc) {}
+
+ explicit table(allocator_type const& alloc)
+ : table(0, Hash(), KeyEqual(), alloc) {}
+
+ template <class InputIt>
+ table(InputIt first,
+ InputIt last,
+ size_type bucket_count = 0,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc = allocator_type())
+ : table(bucket_count, hash, equal, alloc) {
+ insert(first, last);
+ }
+
+ template <class InputIt>
+ table(InputIt first, InputIt last, size_type bucket_count, allocator_type const& alloc)
+ : table(first, last, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ template <class InputIt>
+ table(InputIt first, InputIt last, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(first, last, bucket_count, hash, KeyEqual(), alloc) {}
+
+ table(table const& other)
+ : table(other, other.m_values.get_allocator()) {}
+
+ table(table const& other, allocator_type const& alloc)
+ : m_values(other.m_values, alloc)
+ , m_max_load_factor(other.m_max_load_factor)
+ , m_hash(other.m_hash)
+ , m_equal(other.m_equal) {
+ copy_buckets(other);
+ }
+
+ table(table&& other) noexcept
+ : table(std::move(other), other.m_values.get_allocator()) {}
+
+ table(table&& other, allocator_type const& alloc) noexcept
+ : m_values(alloc) {
+ *this = std::move(other);
+ }
+
+ table(std::initializer_list<value_type> ilist,
+ size_t bucket_count = 0,
+ Hash const& hash = Hash(),
+ KeyEqual const& equal = KeyEqual(),
+ allocator_type const& alloc = allocator_type())
+ : table(bucket_count, hash, equal, alloc) {
+ insert(ilist);
+ }
+
+ table(std::initializer_list<value_type> ilist, size_type bucket_count, allocator_type const& alloc)
+ : table(ilist, bucket_count, Hash(), KeyEqual(), alloc) {}
+
+ table(std::initializer_list<value_type> init, size_type bucket_count, Hash const& hash, allocator_type const& alloc)
+ : table(init, bucket_count, hash, KeyEqual(), alloc) {}
+
+ ~table() {
+ if (nullptr != m_buckets) {
+ auto ba = bucket_alloc(m_values.get_allocator());
+ bucket_alloc_traits::deallocate(ba, m_buckets, bucket_count());
+ }
+ }
+
+ auto operator=(table const& other) -> table& {
+ if (&other != this) {
+ deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
+ m_values = other.m_values;
+ m_max_load_factor = other.m_max_load_factor;
+ m_hash = other.m_hash;
+ m_equal = other.m_equal;
+ m_shifts = initial_shifts;
+ copy_buckets(other);
+ }
+ return *this;
+ }
+
+ auto operator=(table&& other) noexcept(noexcept(std::is_nothrow_move_assignable_v<value_container_type> &&
+ std::is_nothrow_move_assignable_v<Hash> &&
+ std::is_nothrow_move_assignable_v<KeyEqual>)) -> table& {
+ if (&other != this) {
+ deallocate_buckets(); // deallocate before m_values is set (might have another allocator)
+ m_values = std::move(other.m_values);
+ other.m_values.clear();
+
+ // we can only reuse m_buckets when both maps have the same allocator!
+ if (get_allocator() == other.get_allocator()) {
+ m_buckets = std::exchange(other.m_buckets, nullptr);
+ m_num_buckets = std::exchange(other.m_num_buckets, 0);
+ m_max_bucket_capacity = std::exchange(other.m_max_bucket_capacity, 0);
+ m_shifts = std::exchange(other.m_shifts, initial_shifts);
+ m_max_load_factor = std::exchange(other.m_max_load_factor, default_max_load_factor);
+ m_hash = std::exchange(other.m_hash, {});
+ m_equal = std::exchange(other.m_equal, {});
+ other.allocate_buckets_from_shift();
+ other.clear_buckets();
+ } else {
+ // set max_load_factor *before* copying the other's buckets, so we have the same
+ // behavior
+ m_max_load_factor = other.m_max_load_factor;
+
+ // copy_buckets sets m_buckets, m_num_buckets, m_max_bucket_capacity, m_shifts
+ copy_buckets(other);
+ // clear's the other's buckets so other is now already usable.
+ other.clear_buckets();
+ m_hash = other.m_hash;
+ m_equal = other.m_equal;
+ }
+ // map "other" is now already usable, it's empty.
+ }
+ return *this;
+ }
+
+ auto operator=(std::initializer_list<value_type> ilist) -> table& {
+ clear();
+ insert(ilist);
+ return *this;
+ }
+
+ auto get_allocator() const noexcept -> allocator_type {
+ return m_values.get_allocator();
+ }
+
+ // iterators //////////////////////////////////////////////////////////////
+
+ auto begin() noexcept -> iterator {
+ return m_values.begin();
+ }
+
+ auto begin() const noexcept -> const_iterator {
+ return m_values.begin();
+ }
+
+ auto cbegin() const noexcept -> const_iterator {
+ return m_values.cbegin();
+ }
+
+ auto end() noexcept -> iterator {
+ return m_values.end();
+ }
+
+ auto cend() const noexcept -> const_iterator {
+ return m_values.cend();
+ }
+
+ auto end() const noexcept -> const_iterator {
+ return m_values.end();
+ }
+
+ // capacity ///////////////////////////////////////////////////////////////
+
+ [[nodiscard]] auto empty() const noexcept -> bool {
+ return m_values.empty();
+ }
+
+ [[nodiscard]] auto size() const noexcept -> size_t {
+ return m_values.size();
+ }
+
+ [[nodiscard]] static constexpr auto max_size() noexcept -> size_t {
+ if constexpr ((std::numeric_limits<value_idx_type>::max)() == (std::numeric_limits<size_t>::max)()) {
+ return size_t{1} << (sizeof(value_idx_type) * 8 - 1);
+ } else {
+ return size_t{1} << (sizeof(value_idx_type) * 8);
+ }
+ }
+
+ // modifiers //////////////////////////////////////////////////////////////
+
+ void clear() {
+ m_values.clear();
+ clear_buckets();
+ }
+
+ auto insert(value_type const& value) -> std::pair<iterator, bool> {
+ return emplace(value);
+ }
+
+ auto insert(value_type&& value) -> std::pair<iterator, bool> {
+ return emplace(std::move(value));
+ }
+
+ template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
+ auto insert(P&& value) -> std::pair<iterator, bool> {
+ return emplace(std::forward<P>(value));
+ }
+
+ auto insert(const_iterator /*hint*/, value_type const& value) -> iterator {
+ return insert(value).first;
+ }
+
+ auto insert(const_iterator /*hint*/, value_type&& value) -> iterator {
+ return insert(std::move(value)).first;
+ }
+
+ template <class P, std::enable_if_t<std::is_constructible_v<value_type, P&&>, bool> = true>
+ auto insert(const_iterator /*hint*/, P&& value) -> iterator {
+ return insert(std::forward<P>(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ while (first != last) {
+ insert(*first);
+ ++first;
+ }
+ }
+
+ void insert(std::initializer_list<value_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ // nonstandard API: *this is emptied.
+ // Also see "A Standard flat_map" https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2022/p0429r9.pdf
+ auto extract() && -> value_container_type {
+ return std::move(m_values);
+ }
+
+ // nonstandard API:
+ // Discards the internally held container and replaces it with the one passed. Erases non-unique elements.
+ auto replace(value_container_type&& container) {
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(container.size() > max_size())) {
+ on_error_too_many_elements();
+ }
+ auto shifts = calc_shifts_for_size(container.size());
+ if (0 == m_num_buckets || shifts < m_shifts || container.get_allocator() != m_values.get_allocator()) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ }
+ clear_buckets();
+
+ m_values = std::move(container);
+
+ // can't use clear_and_fill_buckets_from_values() because container elements might not be unique
+ auto value_idx = value_idx_type{};
+
+ // loop until we reach the end of the container. duplicated entries will be replaced with back().
+ while (value_idx != static_cast<value_idx_type>(m_values.size())) {
+ auto const& key = get_key(m_values[value_idx]);
+
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ bool key_found = false;
+ while (true) {
+ auto const& bucket = at(m_buckets, bucket_idx);
+ if (dist_and_fingerprint > bucket.m_dist_and_fingerprint) {
+ break;
+ }
+ if (dist_and_fingerprint == bucket.m_dist_and_fingerprint &&
+ m_equal(key, get_key(m_values[bucket.m_value_idx]))) {
+ key_found = true;
+ break;
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ if (key_found) {
+ if (value_idx != static_cast<value_idx_type>(m_values.size() - 1)) {
+ m_values[value_idx] = std::move(m_values.back());
+ }
+ m_values.pop_back();
+ } else {
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ ++value_idx;
+ }
+ }
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(Key const& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(key, std::forward<M>(mapped));
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(Key&& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(std::move(key), std::forward<M>(mapped));
+ }
+
+ template <typename K,
+ typename M,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto insert_or_assign(K&& key, M&& mapped) -> std::pair<iterator, bool> {
+ return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped));
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, Key const& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(key, std::forward<M>(mapped)).first;
+ }
+
+ template <class M, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, Key&& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(std::move(key), std::forward<M>(mapped)).first;
+ }
+
+ template <typename K,
+ typename M,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto insert_or_assign(const_iterator /*hint*/, K&& key, M&& mapped) -> iterator {
+ return do_insert_or_assign(std::forward<K>(key), std::forward<M>(mapped)).first;
+ }
+
+ // Single arguments for unordered_set can be used without having to construct the value_type
+ template <class K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<!is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto emplace(K&& key) -> std::pair<iterator, bool> {
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ m_equal(key, m_values[at(m_buckets, bucket_idx).m_value_idx])) {
+ // found it, return without ever actually creating anything
+ return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ // value is new, insert element first, so when exception happens we are in a valid state
+ return do_place_element(dist_and_fingerprint, bucket_idx, std::forward<K>(key));
+ }
+
+ template <class... Args>
+ auto emplace(Args&&... args) -> std::pair<iterator, bool> {
+ // we have to instantiate the value_type to be able to access the key.
+ // 1. emplace_back the object so it is constructed. 2. If the key is already there, pop it later in the loop.
+ auto& key = get_key(m_values.emplace_back(std::forward<Args>(args)...));
+ auto hash = mixed_hash(key);
+ auto dist_and_fingerprint = dist_and_fingerprint_from_hash(hash);
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ while (dist_and_fingerprint <= at(m_buckets, bucket_idx).m_dist_and_fingerprint) {
+ if (dist_and_fingerprint == at(m_buckets, bucket_idx).m_dist_and_fingerprint &&
+ m_equal(key, get_key(m_values[at(m_buckets, bucket_idx).m_value_idx]))) {
+ m_values.pop_back(); // value was already there, so get rid of it
+ return {begin() + static_cast<difference_type>(at(m_buckets, bucket_idx).m_value_idx), false};
+ }
+ dist_and_fingerprint = dist_inc(dist_and_fingerprint);
+ bucket_idx = next(bucket_idx);
+ }
+
+ // value is new, place the bucket and shift up until we find an empty spot
+ auto value_idx = static_cast<value_idx_type>(m_values.size() - 1);
+ if (ANKERL_UNORDERED_DENSE_UNLIKELY(is_full())) {
+ // increase_size just rehashes all the data we have in m_values
+ increase_size();
+ } else {
+ // place element and shift up until we find an empty spot
+ place_and_shift_up({dist_and_fingerprint, value_idx}, bucket_idx);
+ }
+ return {begin() + static_cast<difference_type>(value_idx), true};
+ }
+
+ template <class... Args>
+ auto emplace_hint(const_iterator /*hint*/, Args&&... args) -> iterator {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(Key const& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(key, std::forward<Args>(args)...);
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(Key&& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(std::move(key), std::forward<Args>(args)...);
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(const_iterator /*hint*/, Key const& key, Args&&... args) -> iterator {
+ return do_try_emplace(key, std::forward<Args>(args)...).first;
+ }
+
+ template <class... Args, typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto try_emplace(const_iterator /*hint*/, Key&& key, Args&&... args) -> iterator {
+ return do_try_emplace(std::move(key), std::forward<Args>(args)...).first;
+ }
+
+ template <
+ typename K,
+ typename... Args,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
+ bool> = true>
+ auto try_emplace(K&& key, Args&&... args) -> std::pair<iterator, bool> {
+ return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...);
+ }
+
+ template <
+ typename K,
+ typename... Args,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE> && is_neither_convertible_v<K&&, iterator, const_iterator>,
+ bool> = true>
+ auto try_emplace(const_iterator /*hint*/, K&& key, Args&&... args) -> iterator {
+ return do_try_emplace(std::forward<K>(key), std::forward<Args>(args)...).first;
+ }
+
+ auto erase(iterator it) -> iterator {
+ auto hash = mixed_hash(get_key(*it));
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ auto const value_idx_to_remove = static_cast<value_idx_type>(it - cbegin());
+ while (at(m_buckets, bucket_idx).m_value_idx != value_idx_to_remove) {
+ bucket_idx = next(bucket_idx);
+ }
+
+ do_erase(bucket_idx, [](value_type&& /*unused*/) {
+ });
+ return begin() + static_cast<difference_type>(value_idx_to_remove);
+ }
+
+ auto extract(iterator it) -> value_type {
+ auto hash = mixed_hash(get_key(*it));
+ auto bucket_idx = bucket_idx_from_hash(hash);
+
+ auto const value_idx_to_remove = static_cast<value_idx_type>(it - cbegin());
+ while (at(m_buckets, bucket_idx).m_value_idx != value_idx_to_remove) {
+ bucket_idx = next(bucket_idx);
+ }
+
+ auto tmp = std::optional<value_type>{};
+ do_erase(bucket_idx, [&tmp](value_type&& val) {
+ tmp = std::move(val);
+ });
+ return std::move(tmp).value();
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto erase(const_iterator it) -> iterator {
+ return erase(begin() + (it - cbegin()));
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto extract(const_iterator it) -> value_type {
+ return extract(begin() + (it - cbegin()));
+ }
+
+ auto erase(const_iterator first, const_iterator last) -> iterator {
+ auto const idx_first = first - cbegin();
+ auto const idx_last = last - cbegin();
+ auto const first_to_last = std::distance(first, last);
+ auto const last_to_end = std::distance(last, cend());
+
+ // remove elements from left to right which moves elements from the end back
+ auto const mid = idx_first + (std::min)(first_to_last, last_to_end);
+ auto idx = idx_first;
+ while (idx != mid) {
+ erase(begin() + idx);
+ ++idx;
+ }
+
+ // all elements from the right are moved, now remove the last element until all done
+ idx = idx_last;
+ while (idx != mid) {
+ --idx;
+ erase(begin() + idx);
+ }
+
+ return begin() + idx_first;
+ }
+
+ auto erase(Key const& key) -> size_t {
+ return do_erase_key(key, [](value_type&& /*unused*/) {
+ });
+ }
+
+ auto extract(Key const& key) -> std::optional<value_type> {
+ auto tmp = std::optional<value_type>{};
+ do_erase_key(key, [&tmp](value_type&& val) {
+ tmp = std::move(val);
+ });
+ return tmp;
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto erase(K&& key) -> size_t {
+ return do_erase_key(std::forward<K>(key), [](value_type&& /*unused*/) {
+ });
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto extract(K&& key) -> std::optional<value_type> {
+ auto tmp = std::optional<value_type>{};
+ do_erase_key(std::forward<K>(key), [&tmp](value_type&& val) {
+ tmp = std::move(val);
+ });
+ return tmp;
+ }
+
+ void swap(table& other) noexcept(noexcept(std::is_nothrow_swappable_v<value_container_type> &&
+ std::is_nothrow_swappable_v<Hash> && std::is_nothrow_swappable_v<KeyEqual>)) {
+ using std::swap;
+ swap(other, *this);
+ }
+
+ // lookup /////////////////////////////////////////////////////////////////
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto at(key_type const& key) -> Q& {
+ return do_at(key);
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto at(K const& key) -> Q& {
+ return do_at(key);
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto at(key_type const& key) const -> Q const& {
+ return do_at(key);
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto at(K const& key) const -> Q const& {
+ return do_at(key);
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto operator[](Key const& key) -> Q& {
+ return try_emplace(key).first->second;
+ }
+
+ template <typename Q = T, std::enable_if_t<is_map_v<Q>, bool> = true>
+ auto operator[](Key&& key) -> Q& {
+ return try_emplace(std::move(key)).first->second;
+ }
+
+ template <typename K,
+ typename Q = T,
+ typename H = Hash,
+ typename KE = KeyEqual,
+ std::enable_if_t<is_map_v<Q> && is_transparent_v<H, KE>, bool> = true>
+ auto operator[](K&& key) -> Q& {
+ return try_emplace(std::forward<K>(key)).first->second;
+ }
+
+ auto count(Key const& key) const -> size_t {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto count(K const& key) const -> size_t {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ auto find(Key const& key) -> iterator {
+ return do_find(key);
+ }
+
+ auto find(Key const& key) const -> const_iterator {
+ return do_find(key);
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto find(K const& key) -> iterator {
+ return do_find(key);
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto find(K const& key) const -> const_iterator {
+ return do_find(key);
+ }
+
+ auto contains(Key const& key) const -> bool {
+ return find(key) != end();
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto contains(K const& key) const -> bool {
+ return find(key) != end();
+ }
+
+ auto equal_range(Key const& key) -> std::pair<iterator, iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ auto equal_range(const Key& key) const -> std::pair<const_iterator, const_iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto equal_range(K const& key) -> std::pair<iterator, iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ template <class K, class H = Hash, class KE = KeyEqual, std::enable_if_t<is_transparent_v<H, KE>, bool> = true>
+ auto equal_range(K const& key) const -> std::pair<const_iterator, const_iterator> {
+ auto it = do_find(key);
+ return {it, it == end() ? end() : it + 1};
+ }
+
+ // bucket interface ///////////////////////////////////////////////////////
+
+ auto bucket_count() const noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
+ return m_num_buckets;
+ }
+
+ static constexpr auto max_bucket_count() noexcept -> size_t { // NOLINT(modernize-use-nodiscard)
+ return max_size();
+ }
+
+ // hash policy ////////////////////////////////////////////////////////////
+
+ [[nodiscard]] auto load_factor() const -> float {
+ return bucket_count() ? static_cast<float>(size()) / static_cast<float>(bucket_count()) : 0.0F;
+ }
+
+ [[nodiscard]] auto max_load_factor() const -> float {
+ return m_max_load_factor;
+ }
+
+ void max_load_factor(float ml) {
+ m_max_load_factor = ml;
+ if (m_num_buckets != max_bucket_count()) {
+ m_max_bucket_capacity = static_cast<value_idx_type>(static_cast<float>(bucket_count()) * max_load_factor());
+ }
+ }
+
+ void rehash(size_t count) {
+ count = (std::min)(count, max_size());
+ auto shifts = calc_shifts_for_size((std::max)(count, size()));
+ if (shifts != m_shifts) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ m_values.shrink_to_fit();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+ }
+
+ void reserve(size_t capa) {
+ capa = (std::min)(capa, max_size());
+ if constexpr (has_reserve<value_container_type>) {
+ // std::deque doesn't have reserve(). Make sure we only call when available
+ m_values.reserve(capa);
+ }
+ auto shifts = calc_shifts_for_size((std::max)(capa, size()));
+ if (0 == m_num_buckets || shifts < m_shifts) {
+ m_shifts = shifts;
+ deallocate_buckets();
+ allocate_buckets_from_shift();
+ clear_and_fill_buckets_from_values();
+ }
+ }
+
+ // observers //////////////////////////////////////////////////////////////
+
+ auto hash_function() const -> hasher {
+ return m_hash;
+ }
+
+ auto key_eq() const -> key_equal {
+ return m_equal;
+ }
+
+ // nonstandard API: expose the underlying values container
+ [[nodiscard]] auto values() const noexcept -> value_container_type const& {
+ return m_values;
+ }
+
+ // non-member functions ///////////////////////////////////////////////////
+
+ friend auto operator==(table const& a, table const& b) -> bool {
+ if (&a == &b) {
+ return true;
+ }
+ if (a.size() != b.size()) {
+ return false;
+ }
+ for (auto const& b_entry : b) {
+ auto it = a.find(get_key(b_entry));
+ if constexpr (is_map_v<T>) {
+ // map: check that key is here, then also check that value is the same
+ if (a.end() == it || !(b_entry.second == it->second)) {
+ return false;
+ }
+ } else {
+ // set: only check that the key is here
+ if (a.end() == it) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ friend auto operator!=(table const& a, table const& b) -> bool {
+ return !(a == b);
+ }
+};
+
+} // namespace detail
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class T,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
+ class Bucket = bucket_type::standard>
+using map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, false>;
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class T,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class AllocatorOrContainer = std::allocator<std::pair<Key, T>>,
+ class Bucket = bucket_type::standard>
+using segmented_map = detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, true>;
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class AllocatorOrContainer = std::allocator<Key>,
+ class Bucket = bucket_type::standard>
+using set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket, false>;
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class AllocatorOrContainer = std::allocator<Key>,
+ class Bucket = bucket_type::standard>
+using segmented_set = detail::table<Key, void, Hash, KeyEqual, AllocatorOrContainer, Bucket, true>;
+
+# if defined(ANKERL_UNORDERED_DENSE_PMR)
+
+namespace pmr {
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class T,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Bucket = bucket_type::standard>
+using map =
+ detail::table<Key, T, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator<std::pair<Key, T>>, Bucket, false>;
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class T,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Bucket = bucket_type::standard>
+using segmented_map =
+ detail::table<Key, T, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator<std::pair<Key, T>>, Bucket, true>;
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Bucket = bucket_type::standard>
+using set = detail::table<Key, void, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator<Key>, Bucket, false>;
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class Hash = hash<Key>,
+ class KeyEqual = std::equal_to<Key>,
+ class Bucket = bucket_type::standard>
+using segmented_set =
+ detail::table<Key, void, Hash, KeyEqual, ANKERL_UNORDERED_DENSE_PMR::polymorphic_allocator<Key>, Bucket, true>;
+
+} // namespace pmr
+
+# endif
+
+// deduction guides ///////////////////////////////////////////////////////////
+
+// deduction guides for alias templates are only possible since C++20
+// see https://en.cppreference.com/w/cpp/language/class_template_argument_deduction
+
+} // namespace ANKERL_UNORDERED_DENSE_NAMESPACE
+} // namespace ankerl::unordered_dense
+
+// std extensions /////////////////////////////////////////////////////////////
+
+namespace std { // NOLINT(cert-dcl58-cpp)
+
+ANKERL_UNORDERED_DENSE_EXPORT template <class Key,
+ class T,
+ class Hash,
+ class KeyEqual,
+ class AllocatorOrContainer,
+ class Bucket,
+ class Pred,
+ bool IsSegmented>
+// NOLINTNEXTLINE(cert-dcl58-cpp)
+auto erase_if(ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, IsSegmented>& map,
+ Pred pred) -> size_t {
+ using map_t = ankerl::unordered_dense::detail::table<Key, T, Hash, KeyEqual, AllocatorOrContainer, Bucket, IsSegmented>;
+
+ // going back to front because erase() invalidates the end iterator
+ auto const old_size = map.size();
+ auto idx = old_size;
+ while (idx) {
+ --idx;
+ auto it = map.begin() + static_cast<typename map_t::difference_type>(idx);
+ if (pred(*it)) {
+ map.erase(it);
+ }
+ }
+
+ return old_size - map.size();
+}
+
+} // namespace std
+
+#endif
+#endif
diff --git a/gdbsupport/unordered_map.h b/gdbsupport/unordered_map.h
new file mode 100644
index 000000000000..96407b5edcf3
--- /dev/null
+++ b/gdbsupport/unordered_map.h
@@ -0,0 +1,37 @@
+/* Copyright (C) 2024 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef GDBSUPPORT_UNORDERED_MAP_H
+#define GDBSUPPORT_UNORDERED_MAP_H
+
+#include "unordered_dense.h"
+
+namespace gdb
+{
+
+template<typename Key,
+ typename T,
+ typename Hash = ankerl::unordered_dense::hash<Key>,
+ typename KeyEqual = std::equal_to<Key>>
+using unordered_map
+ = ankerl::unordered_dense::map
+ <Key, T, Hash, KeyEqual, std::allocator<std::pair<Key, T>>,
+ ankerl::unordered_dense::bucket_type::standard>;
+
+} /* namespace gdb */
+
+#endif /* GDBSUPPORT_UNORDERED_MAP_H */
diff --git a/gdbsupport/unordered_set.h b/gdbsupport/unordered_set.h
new file mode 100644
index 000000000000..73a4fa45d864
--- /dev/null
+++ b/gdbsupport/unordered_set.h
@@ -0,0 +1,36 @@
+/* Copyright (C) 2024 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef GDBSUPPORT_UNORDERED_SET_H
+#define GDBSUPPORT_UNORDERED_SET_H
+
+#include "unordered_dense.h"
+
+namespace gdb
+{
+
+template<typename Key,
+ typename Hash = ankerl::unordered_dense::hash<Key>,
+ typename KeyEqual = std::equal_to<Key>>
+using unordered_set
+ = ankerl::unordered_dense::set
+ <Key, Hash, KeyEqual, std::allocator<Key>,
+ ankerl::unordered_dense::bucket_type::standard>;
+
+} /* namespace gdb */
+
+#endif /* GDBSUPPORT_UNORDERED_SET_H */
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 05/25] Convert compile-c-symbols.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (3 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 04/25] gdbsupport: add unordered_dense.h 4.4.0 Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 06/25] Convert filename-seen-cache.h " Simon Marchi
` (20 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts compile-c-symbols.c to use the new hash table.
I made it use a set of string_view instead of a set of `symbol *`, to
avoid calling `symbol::natural_name` over and over. This appears safe
to do, since I don't expect the storage behing the natural names to
change during the lifetime of the map.
Change-Id: Ie9f9334d4f03b9a8ae6886287f82cd435eee217c
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/compile/compile-c-symbols.c | 53 ++++-----------------------------
1 file changed, 5 insertions(+), 48 deletions(-)
diff --git a/gdb/compile/compile-c-symbols.c b/gdb/compile/compile-c-symbols.c
index 7a38d3a6d593..7070e5e88f38 100644
--- a/gdb/compile/compile-c-symbols.c
+++ b/gdb/compile/compile-c-symbols.c
@@ -30,7 +30,7 @@
#include "gdbtypes.h"
#include "dwarf2/loc.h"
#include "inferior.h"
-
+#include "gdbsupport/unordered_set.h"
/* Compute the name of the pointer representing a local symbol's
address. */
@@ -441,46 +441,6 @@ gcc_symbol_address (void *datum, struct gcc_c_context *gcc_context,
return result;
}
-\f
-
-/* A hash function for symbol names. */
-
-static hashval_t
-hash_symname (const void *a)
-{
- const struct symbol *sym = (const struct symbol *) a;
-
- return htab_hash_string (sym->natural_name ());
-}
-
-/* A comparison function for hash tables that just looks at symbol
- names. */
-
-static int
-eq_symname (const void *a, const void *b)
-{
- const struct symbol *syma = (const struct symbol *) a;
- const struct symbol *symb = (const struct symbol *) b;
-
- return strcmp (syma->natural_name (), symb->natural_name ()) == 0;
-}
-
-/* If a symbol with the same name as SYM is already in HASHTAB, return
- 1. Otherwise, add SYM to HASHTAB and return 0. */
-
-static int
-symbol_seen (htab_t hashtab, struct symbol *sym)
-{
- void **slot;
-
- slot = htab_find_slot (hashtab, sym, INSERT);
- if (*slot != NULL)
- return 1;
-
- *slot = sym;
- return 0;
-}
-
/* Generate C code to compute the length of a VLA. */
static void
@@ -626,19 +586,16 @@ generate_c_for_variable_locations (compile_instance *compiler,
/* Ensure that a given name is only entered once. This reflects the
reality of shadowing. */
- htab_up symhash (htab_create_alloc (1, hash_symname, eq_symname, NULL,
- xcalloc, xfree));
+ gdb::unordered_set<std::string_view> symset;
while (1)
{
/* Iterate over symbols in this block, generating code to
compute the location of each local variable. */
for (struct symbol *sym : block_iterator_range (block))
- {
- if (!symbol_seen (symhash.get (), sym))
- generate_c_for_for_one_variable (compiler, stream, gdbarch,
- registers_used, pc, sym);
- }
+ if (symset.insert (sym->natural_name ()).second)
+ generate_c_for_for_one_variable (compiler, stream, gdbarch,
+ registers_used, pc, sym);
/* If we just finished the outermost block of a function, we're
done. */
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 06/25] Convert filename-seen-cache.h to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (4 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 05/25] Convert compile-c-symbols.c to new hash table Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 07/25] Convert linespec.c " Simon Marchi
` (19 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts filename-seen-cache.h to use the new hash table.
filename-seen-cache.c is removed.
Change-Id: Iffac1d5e49d1610049b7deeef6e98d49e644366a
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/Makefile.in | 1 -
gdb/filename-seen-cache.c | 58 ---------------------------------------
gdb/filename-seen-cache.h | 41 ++++++++++++++-------------
3 files changed, 20 insertions(+), 80 deletions(-)
delete mode 100644 gdb/filename-seen-cache.c
diff --git a/gdb/Makefile.in b/gdb/Makefile.in
index 823817889920..93a789cae33d 100644
--- a/gdb/Makefile.in
+++ b/gdb/Makefile.in
@@ -1122,7 +1122,6 @@ COMMON_SFILES = \
f-lang.c \
f-typeprint.c \
f-valprint.c \
- filename-seen-cache.c \
filesystem.c \
findcmd.c \
findvar.c \
diff --git a/gdb/filename-seen-cache.c b/gdb/filename-seen-cache.c
deleted file mode 100644
index a08927fb9fd4..000000000000
--- a/gdb/filename-seen-cache.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Filename-seen cache for the GNU debugger, GDB.
-
- Copyright (C) 1986-2024 Free Software Foundation, Inc.
-
- This file is part of GDB.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>. */
-
-#include "filename-seen-cache.h"
-#include "filenames.h"
-
- /* Initial size of the table. It automagically grows from here. */
-#define INITIAL_FILENAME_SEEN_CACHE_SIZE 100
-
-/* filename_seen_cache constructor. */
-
-filename_seen_cache::filename_seen_cache ()
- : m_tab (htab_create_alloc (INITIAL_FILENAME_SEEN_CACHE_SIZE,
- filename_hash, filename_eq,
- NULL, xcalloc, xfree))
-{
-}
-
-/* See filename-seen-cache.h. */
-
-void
-filename_seen_cache::clear ()
-{
- htab_empty (m_tab.get ());
-}
-
-/* See filename-seen-cache.h. */
-
-bool
-filename_seen_cache::seen (const char *file)
-{
- void **slot;
-
- /* Is FILE in tab? */
- slot = htab_find_slot (m_tab.get (), file, INSERT);
- if (*slot != NULL)
- return true;
-
- /* No; add it to tab. */
- *slot = (char *) file;
- return false;
-}
diff --git a/gdb/filename-seen-cache.h b/gdb/filename-seen-cache.h
index 5dc800d2b163..4bcfeb5c8983 100644
--- a/gdb/filename-seen-cache.h
+++ b/gdb/filename-seen-cache.h
@@ -20,46 +20,45 @@
#ifndef FILENAME_SEEN_CACHE_H
#define FILENAME_SEEN_CACHE_H
-#include "gdbsupport/function-view.h"
-#include "gdbsupport/gdb-hashtab.h"
+#include "gdbsupport/unordered_set.h"
+#include "filenames.h"
/* Cache to watch for file names already seen. */
class filename_seen_cache
{
public:
- filename_seen_cache ();
+ filename_seen_cache () = default;
DISABLE_COPY_AND_ASSIGN (filename_seen_cache);
- /* Empty the cache, but do not delete it. */
- void clear ();
+ /* Empty the cache. */
+ void clear ()
+ { m_tab.clear (); }
- /* If FILE is not already in the table of files in CACHE, add it and
+ /* If FILE is not already in the table of files of the cache, add it and
return false; otherwise return true.
NOTE: We don't manage space for FILE, we assume FILE lives as
long as the caller needs. */
- bool seen (const char *file);
+ bool seen (const char *file)
+ { return !m_tab.insert (file).second; }
- /* Traverse all cache entries, calling CALLBACK on each. The
- filename is passed as argument to CALLBACK. */
- void traverse (gdb::function_view<void (const char *filename)> callback)
+private:
+ struct hash
{
- auto erased_cb = [] (void **slot, void *info) -> int
- {
- auto filename = (const char *) *slot;
- auto restored_cb = (decltype (callback) *) info;
- (*restored_cb) (filename);
- return 1;
- };
+ std::size_t operator() (const char *s) const noexcept
+ { return filename_hash (s); }
+ };
- htab_traverse_noresize (m_tab.get (), erased_cb, &callback);
- }
+ struct eq
+ {
+ bool operator() (const char *lhs, const char *rhs) const noexcept
+ { return filename_eq (lhs, rhs); }
+ };
-private:
/* Table of files seen so far. */
- htab_up m_tab;
+ gdb::unordered_set<const char *, hash, eq> m_tab;
};
#endif /* FILENAME_SEEN_CACHE_H */
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 07/25] Convert linespec.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (5 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 06/25] Convert filename-seen-cache.h " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 08/25] Convert target-descriptions.c " Simon Marchi
` (18 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts linespec.c to use the new hash table.
Note that more simplification could perhaps be done. Currently, the
collectors in this code insert an element into a set and then, if the
element has not been seen before, append it to a vector. If we know
the order does not matter, or if the results can be sorted later, we
could dispense with the vector. This would simplify the code some
more. (This technique is used in the vtable patch, later in this
series.)
Change-Id: Ie6828b1520d918d189ab5140dc8094a609152cf2
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/linespec.c | 54 +++++++++++++++++---------------------------------
1 file changed, 18 insertions(+), 36 deletions(-)
diff --git a/gdb/linespec.c b/gdb/linespec.c
index d5256261eff5..7eacc390b68c 100644
--- a/gdb/linespec.c
+++ b/gdb/linespec.c
@@ -45,6 +45,7 @@
#include "gdbsupport/def-vector.h"
#include <algorithm>
#include "inferior.h"
+#include "gdbsupport/unordered_set.h"
/* An enumeration of the various things a user might attempt to
complete for a linespec location. */
@@ -3404,12 +3405,9 @@ namespace {
class decode_compound_collector
{
public:
- decode_compound_collector ()
- : m_unique_syms (htab_create_alloc (1, htab_hash_pointer,
- htab_eq_pointer, NULL,
- xcalloc, xfree))
- {
- }
+ decode_compound_collector () = default;
+
+ DISABLE_COPY_AND_ASSIGN (decode_compound_collector);
/* Return all symbols collected. */
std::vector<block_symbol> release_symbols ()
@@ -3423,7 +3421,7 @@ class decode_compound_collector
private:
/* A hash table of all symbols we found. We use this to avoid
adding any symbol more than once. */
- htab_up m_unique_syms;
+ gdb::unordered_set<const symbol *> m_unique_syms;
/* The result vector. */
std::vector<block_symbol> m_symbols;
@@ -3432,7 +3430,6 @@ class decode_compound_collector
bool
decode_compound_collector::operator () (block_symbol *bsym)
{
- void **slot;
struct type *t;
struct symbol *sym = bsym->symbol;
@@ -3446,12 +3443,8 @@ decode_compound_collector::operator () (block_symbol *bsym)
&& t->code () != TYPE_CODE_NAMESPACE)
return true; /* Continue iterating. */
- slot = htab_find_slot (m_unique_syms.get (), sym, INSERT);
- if (!*slot)
- {
- *slot = sym;
- m_symbols.push_back (*bsym);
- }
+ if (m_unique_syms.insert (sym).second)
+ m_symbols.push_back (*bsym);
return true; /* Continue iterating. */
}
@@ -3675,14 +3668,18 @@ namespace {
class symtab_collector
{
public:
- symtab_collector ()
- : m_symtab_table (htab_create (1, htab_hash_pointer, htab_eq_pointer,
- NULL))
- {
- }
+ symtab_collector () = default;
+
+ DISABLE_COPY_AND_ASSIGN (symtab_collector);
/* Callable as a symbol_found_callback_ftype callback. */
- bool operator () (symtab *sym);
+ bool operator () (struct symtab *symtab)
+ {
+ if (m_symtab_table.insert (symtab).second)
+ m_symtabs.push_back (symtab);
+
+ return false;
+ }
/* Return an rvalue reference to the collected symtabs. */
std::vector<symtab *> &&release_symtabs ()
@@ -3695,24 +3692,9 @@ class symtab_collector
std::vector<symtab *> m_symtabs;
/* This is used to ensure the symtabs are unique. */
- htab_up m_symtab_table;
+ gdb::unordered_set<const symtab *> m_symtab_table;
};
-bool
-symtab_collector::operator () (struct symtab *symtab)
-{
- void **slot;
-
- slot = htab_find_slot (m_symtab_table.get (), symtab, INSERT);
- if (!*slot)
- {
- *slot = symtab;
- m_symtabs.push_back (symtab);
- }
-
- return false;
-}
-
} // namespace
/* Given a file name, return a list of all matching symtabs. If
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 08/25] Convert target-descriptions.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (6 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 07/25] Convert linespec.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 09/25] Convert dwarf2/macro.c " Simon Marchi
` (17 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts target-descriptions.c to use the new hash table.
Change-Id: I03dfc6053c9856c5578548afcfdf58abf8b7ec2c
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/target-descriptions.c | 20 +++++++++-----------
1 file changed, 9 insertions(+), 11 deletions(-)
diff --git a/gdb/target-descriptions.c b/gdb/target-descriptions.c
index 1bd22c273a29..3ee8a751f3b3 100644
--- a/gdb/target-descriptions.c
+++ b/gdb/target-descriptions.c
@@ -21,6 +21,7 @@
#include "arch-utils.h"
#include "cli/cli-cmds.h"
+#include "gdbsupport/unordered_set.h"
#include "gdbtypes.h"
#include "reggroups.h"
#include "target.h"
@@ -30,7 +31,6 @@
#include "osabi.h"
#include "gdbsupport/gdb_obstack.h"
-#include "hashtab.h"
#include "inferior.h"
#include <algorithm>
#include "completer.h"
@@ -1042,16 +1042,14 @@ tdesc_use_registers (struct gdbarch *gdbarch,
data->arch_regs = std::move (early_data->arch_regs);
/* Build up a set of all registers, so that we can assign register
- numbers where needed. The hash table expands as necessary, so
- the initial size is arbitrary. */
- htab_up reg_hash (htab_create (37, htab_hash_pointer, htab_eq_pointer,
- NULL));
+ numbers where needed. */
+ gdb::unordered_set<tdesc_reg *> reg_hash;
+
for (const tdesc_feature_up &feature : target_desc->features)
for (const tdesc_reg_up ® : feature->registers)
{
- void **slot = htab_find_slot (reg_hash.get (), reg.get (), INSERT);
+ reg_hash.insert (reg.get ());
- *slot = reg.get ();
/* Add reggroup if its new. */
if (!reg->group.empty ())
if (reggroup_find (gdbarch, reg->group.c_str ()) == NULL)
@@ -1064,7 +1062,7 @@ tdesc_use_registers (struct gdbarch *gdbarch,
architecture. */
for (const tdesc_arch_reg &arch_reg : data->arch_regs)
if (arch_reg.reg != NULL)
- htab_remove_elt (reg_hash.get (), arch_reg.reg);
+ reg_hash.erase (arch_reg.reg);
/* Assign numbers to the remaining registers and add them to the
list of registers. The new numbers are always above gdbarch_num_regs.
@@ -1082,7 +1080,7 @@ tdesc_use_registers (struct gdbarch *gdbarch,
{
for (const tdesc_feature_up &feature : target_desc->features)
for (const tdesc_reg_up ® : feature->registers)
- if (htab_find (reg_hash.get (), reg.get ()) != NULL)
+ if (reg_hash.contains (reg.get ()))
{
int regno = unk_reg_cb (gdbarch, feature.get (),
reg->name.c_str (), num_regs);
@@ -1093,7 +1091,7 @@ tdesc_use_registers (struct gdbarch *gdbarch,
data->arch_regs.emplace_back (nullptr, nullptr);
data->arch_regs[regno] = tdesc_arch_reg (reg.get (), NULL);
num_regs = regno + 1;
- htab_remove_elt (reg_hash.get (), reg.get ());
+ reg_hash.erase (reg.get ());
}
}
}
@@ -1105,7 +1103,7 @@ tdesc_use_registers (struct gdbarch *gdbarch,
unnumbered registers. */
for (const tdesc_feature_up &feature : target_desc->features)
for (const tdesc_reg_up ® : feature->registers)
- if (htab_find (reg_hash.get (), reg.get ()) != NULL)
+ if (reg_hash.contains (reg.get ()))
{
data->arch_regs.emplace_back (reg.get (), nullptr);
num_regs++;
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 09/25] Convert dwarf2/macro.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (7 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 08/25] Convert target-descriptions.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 10/25] Convert breakpoint.c " Simon Marchi
` (16 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts dwarf2/macro.c to use the new hash table.
Change-Id: I6af0d1178e2db330fe3a89d57763974145ed17c4
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/dwarf2/macro.c | 22 ++++++++--------------
1 file changed, 8 insertions(+), 14 deletions(-)
diff --git a/gdb/dwarf2/macro.c b/gdb/dwarf2/macro.c
index 5371acf3d59c..aab855ac28a6 100644
--- a/gdb/dwarf2/macro.c
+++ b/gdb/dwarf2/macro.c
@@ -35,6 +35,7 @@
#include "macrotab.h"
#include "complaints.h"
#include "objfiles.h"
+#include "gdbsupport/unordered_set.h"
static void
dwarf2_macro_malformed_definition_complaint (const char *arg1)
@@ -422,7 +423,8 @@ dwarf_decode_macro_bytes (dwarf2_per_objfile *per_objfile,
struct dwarf2_section_info *str_section,
struct dwarf2_section_info *str_offsets_section,
std::optional<ULONGEST> str_offsets_base,
- htab_t include_hash, struct dwarf2_cu *cu)
+ gdb::unordered_set<const gdb_byte *> &include_hash,
+ struct dwarf2_cu *cu)
{
struct objfile *objfile = per_objfile->objfile;
enum dwarf_macro_record_type macinfo_type;
@@ -697,7 +699,6 @@ dwarf_decode_macro_bytes (dwarf2_per_objfile *per_objfile,
case DW_MACRO_import_sup:
{
LONGEST offset;
- void **slot;
bfd *include_bfd = abfd;
const struct dwarf2_section_info *include_section = section;
const gdb_byte *include_mac_end = mac_end;
@@ -719,9 +720,8 @@ dwarf_decode_macro_bytes (dwarf2_per_objfile *per_objfile,
}
new_mac_ptr = include_section->buffer + offset;
- slot = htab_find_slot (include_hash, new_mac_ptr, INSERT);
- if (*slot != NULL)
+ if (!include_hash.insert (new_mac_ptr).second)
{
/* This has actually happened; see
http://sourceware.org/bugzilla/show_bug.cgi?id=13568. */
@@ -730,8 +730,6 @@ dwarf_decode_macro_bytes (dwarf2_per_objfile *per_objfile,
}
else
{
- *slot = (void *) new_mac_ptr;
-
dwarf_decode_macro_bytes (per_objfile, builder, include_bfd,
new_mac_ptr, include_mac_end,
current_file, lh, section,
@@ -739,7 +737,7 @@ dwarf_decode_macro_bytes (dwarf2_per_objfile *per_objfile,
str_section, str_offsets_section,
str_offsets_base, include_hash, cu);
- htab_remove_elt (include_hash, (void *) new_mac_ptr);
+ include_hash.erase (new_mac_ptr);
}
}
break;
@@ -788,7 +786,6 @@ dwarf_decode_macros (dwarf2_per_objfile *per_objfile,
struct macro_source_file *current_file = 0;
enum dwarf_macro_record_type macinfo_type;
const gdb_byte *opcode_definitions[256];
- void **slot;
abfd = section->get_bfd_owner ();
@@ -933,14 +930,11 @@ dwarf_decode_macros (dwarf2_per_objfile *per_objfile,
command-line macro definitions/undefinitions. This flag is unset when we
reach the first DW_MACINFO_start_file entry. */
- htab_up include_hash (htab_create_alloc (1, htab_hash_pointer,
- htab_eq_pointer,
- NULL, xcalloc, xfree));
+ gdb::unordered_set<const gdb_byte *> include_hash;
mac_ptr = section->buffer + offset;
- slot = htab_find_slot (include_hash.get (), mac_ptr, INSERT);
- *slot = (void *) mac_ptr;
+ include_hash.insert (mac_ptr);
dwarf_decode_macro_bytes (per_objfile, builder, abfd, mac_ptr, mac_end,
current_file, lh, section, section_is_gnu, 0,
offset_size, str_section, str_offsets_section,
- str_offsets_base, include_hash.get (), cu);
+ str_offsets_base, include_hash, cu);
}
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 10/25] Convert breakpoint.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (8 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 09/25] Convert dwarf2/macro.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 11/25] Convert py-framefilter.c " Simon Marchi
` (15 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts breakpoint.c to use the new hash table.
Change-Id: I6d997a6242969586a7f8f9eb22cc8dd8d3ac97ff
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/breakpoint.c | 13 +++----------
1 file changed, 3 insertions(+), 10 deletions(-)
diff --git a/gdb/breakpoint.c b/gdb/breakpoint.c
index b7e4f5d0a454..eb8c91dc1a10 100644
--- a/gdb/breakpoint.c
+++ b/gdb/breakpoint.c
@@ -21,7 +21,7 @@
#include <ctype.h>
#include "event-top.h"
#include "exceptions.h"
-#include "hashtab.h"
+#include "gdbsupport/unordered_set.h"
#include "symtab.h"
#include "frame.h"
#include "breakpoint.h"
@@ -12736,25 +12736,18 @@ all_locations_are_pending (struct breakpoint *b, struct program_space *pspace)
static bool
ambiguous_names_p (const bp_location_range &locs)
{
- htab_up htab (htab_create_alloc (13, htab_hash_string, htab_eq_string, NULL,
- xcalloc, xfree));
+ gdb::unordered_set<std::string_view> htab;
for (const bp_location &l : locs)
{
- const char **slot;
const char *name = l.function_name.get ();
/* Allow for some names to be NULL, ignore them. */
if (name == NULL)
continue;
- slot = (const char **) htab_find_slot (htab.get (), (const void *) name,
- INSERT);
- /* NOTE: We can assume slot != NULL here because xcalloc never
- returns NULL. */
- if (*slot != NULL)
+ if (!htab.insert (name).second)
return true;
- *slot = name;
}
return false;
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 11/25] Convert py-framefilter.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (9 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 10/25] Convert breakpoint.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 12/25] Convert disasm.c " Simon Marchi
` (14 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts py-framefilter.c to use the new hash table.
Change-Id: I38f4eaa8ebbcd4fd6e5e8ddc462502a92bf62f5e
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/python/py-framefilter.c | 62 +++++++++++++++++++------------------
1 file changed, 32 insertions(+), 30 deletions(-)
diff --git a/gdb/python/py-framefilter.c b/gdb/python/py-framefilter.c
index daec6dd91514..567b070ccec4 100644
--- a/gdb/python/py-framefilter.c
+++ b/gdb/python/py-framefilter.c
@@ -17,6 +17,7 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
+#include "gdbsupport/unordered_set.h"
#include "objfiles.h"
#include "symtab.h"
#include "language.h"
@@ -27,7 +28,6 @@
#include "stack.h"
#include "source.h"
#include "annotate.h"
-#include "hashtab.h"
#include "demangle.h"
#include "mi/mi-cmds.h"
#include "python-internal.h"
@@ -731,25 +731,37 @@ py_print_args (PyObject *filter,
return EXT_LANG_BT_OK;
}
+using levels_printed_hash = gdb::unordered_set<frame_info *>;
+
/* Print a single frame to the designated output stream, detecting
whether the output is MI or console, and formatting the output
- according to the conventions of that protocol. FILTER is the
- frame-filter associated with this frame. FLAGS is an integer
- describing the various print options. The FLAGS variables is
- described in "apply_frame_filter" function. ARGS_TYPE is an
- enumerator describing the argument format. OUT is the output
- stream to print, INDENT is the level of indention for this frame
- (in the case of elided frames), and LEVELS_PRINTED is a hash-table
- containing all the frames level that have already been printed.
- If a frame level has been printed, do not print it again (in the
- case of elided frames). Returns EXT_LANG_BT_ERROR on error, with any
- GDB exceptions converted to a Python exception, or EXT_LANG_BT_OK
- on success. It can also throw an exception RETURN_QUIT. */
+ according to the conventions of that protocol.
+
+ FILTER is the frame-filter associated with this frame.
+
+ FLAGS is an integer describing the various print options. The FLAGS
+ variables is described in "apply_frame_filter" function.
+
+ ARGS_TYPE is an enumerator describing the argument format.
+
+ OUT is the output stream to print to.
+
+ INDENT is the level of indention for this frame (in the case of elided
+ frames).
+
+ LEVELS_PRINTED is a hash-table containing all the frames for which the
+ level has already been printed. If a level has been printed, do not print
+ it again (in the case of elided frames).
+
+ Returns EXT_LANG_BT_ERROR on error, with any GDB exceptions converted to a
+ Python exception, or EXT_LANG_BT_OK on success. It can also throw an
+ exception RETURN_QUIT. */
static enum ext_lang_bt_status
py_print_frame (PyObject *filter, frame_filter_flags flags,
enum ext_lang_frame_args args_type,
- struct ui_out *out, int indent, htab_t levels_printed)
+ struct ui_out *out, int indent,
+ levels_printed_hash &levels_printed)
{
int has_addr = 0;
CORE_ADDR address = 0;
@@ -859,23 +871,16 @@ py_print_frame (PyObject *filter, frame_filter_flags flags,
&& (location_print
|| (out->is_mi_like_p () && (print_frame_info || print_args))))
{
- struct frame_info **slot;
- int level;
-
- slot = (frame_info **) htab_find_slot (levels_printed,
- frame.get(), INSERT);
-
- level = frame_relative_level (frame);
-
/* Check if this frame has already been printed (there are cases
where elided synthetic dummy-frames have to 'borrow' the frame
architecture from the eliding frame. If that is the case, do
- not print 'level', but print spaces. */
- if (*slot == frame)
+ not print the level, but print spaces. */
+ if (!levels_printed.insert (frame.get ()).second)
out->field_skip ("level");
else
{
- *slot = frame.get ();
+ int level = frame_relative_level (frame);
+
annotate_frame_begin (print_level ? level : 0,
gdbarch, address);
out->text ("#");
@@ -1197,10 +1202,7 @@ gdbpy_apply_frame_filter (const struct extension_language_defn *extlang,
if (iterable == Py_None)
return EXT_LANG_BT_NO_FILTERS;
- htab_up levels_printed (htab_create (20,
- htab_hash_pointer,
- htab_eq_pointer,
- NULL));
+ levels_printed_hash levels_printed;
while (true)
{
@@ -1232,7 +1234,7 @@ gdbpy_apply_frame_filter (const struct extension_language_defn *extlang,
try
{
success = py_print_frame (item.get (), flags, args_type, out, 0,
- levels_printed.get ());
+ levels_printed);
}
catch (const gdb_exception_error &except)
{
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 12/25] Convert disasm.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (10 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 11/25] Convert py-framefilter.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 13/25] Convert compile/compile.c " Simon Marchi
` (13 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts disasm.c to use the new hash table.
Change-Id: I2efbe7ecc2964ec49e0b726ad4674e8eafc929f7
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/disasm.c | 85 +++++++++++-----------------------------------------
1 file changed, 18 insertions(+), 67 deletions(-)
diff --git a/gdb/disasm.c b/gdb/disasm.c
index 541293ac4fcd..11d6efd923ad 100644
--- a/gdb/disasm.c
+++ b/gdb/disasm.c
@@ -19,6 +19,7 @@
#include "arch-utils.h"
#include "event-top.h"
+#include "gdbsupport/unordered_set.h"
#include "target.h"
#include "value.h"
#include "ui-out.h"
@@ -122,73 +123,25 @@ struct deprecated_dis_line_entry
struct dis_line_entry
{
+ dis_line_entry (struct symtab *symtab, int line) noexcept
+ : symtab (symtab),
+ line (line)
+ {}
+
+ bool operator== (const dis_line_entry &other) const noexcept
+ { return this->symtab == other.symtab && this->line == other.line; }
+
struct symtab *symtab;
int line;
};
/* Hash function for dis_line_entry. */
-static hashval_t
-hash_dis_line_entry (const void *item)
-{
- const struct dis_line_entry *dle = (const struct dis_line_entry *) item;
-
- return htab_hash_pointer (dle->symtab) + dle->line;
-}
-
-/* Equal function for dis_line_entry. */
-
-static int
-eq_dis_line_entry (const void *item_lhs, const void *item_rhs)
-{
- const struct dis_line_entry *lhs = (const struct dis_line_entry *) item_lhs;
- const struct dis_line_entry *rhs = (const struct dis_line_entry *) item_rhs;
-
- return (lhs->symtab == rhs->symtab
- && lhs->line == rhs->line);
-}
-
-/* Create the table to manage lines for mixed source/disassembly. */
-
-static htab_t
-allocate_dis_line_table (void)
-{
- return htab_create_alloc (41,
- hash_dis_line_entry, eq_dis_line_entry,
- xfree, xcalloc, xfree);
-}
-
-/* Add a new dis_line_entry containing SYMTAB and LINE to TABLE. */
-
-static void
-add_dis_line_entry (htab_t table, struct symtab *symtab, int line)
-{
- void **slot;
- struct dis_line_entry dle, *dlep;
-
- dle.symtab = symtab;
- dle.line = line;
- slot = htab_find_slot (table, &dle, INSERT);
- if (*slot == NULL)
- {
- dlep = XNEW (struct dis_line_entry);
- dlep->symtab = symtab;
- dlep->line = line;
- *slot = dlep;
- }
-}
-
-/* Return non-zero if SYMTAB, LINE are in TABLE. */
-
-static int
-line_has_code_p (htab_t table, struct symtab *symtab, int line)
+struct dis_line_entry_hash
{
- struct dis_line_entry dle;
-
- dle.symtab = symtab;
- dle.line = line;
- return htab_find (table, &dle) != NULL;
-}
+ std::size_t operator() (const dis_line_entry &x) const noexcept
+ { return std::hash<symtab *> () (x.symtab) + std::hash<int> () (x.line); }
+};
/* Wrapper of target_read_code. */
@@ -747,7 +700,7 @@ do_mixed_source_and_assembly (struct gdbarch *gdbarch,
but if that text is for code that will be disassembled later, then
we'll want to defer printing it until later with its associated code. */
- htab_up dis_line_table (allocate_dis_line_table ());
+ gdb::unordered_set<dis_line_entry, dis_line_entry_hash> dis_line_table;
struct objfile *objfile = main_symtab->compunit ()->objfile ();
@@ -786,7 +739,7 @@ do_mixed_source_and_assembly (struct gdbarch *gdbarch,
pc += length;
if (sal.symtab != NULL)
- add_dis_line_entry (dis_line_table.get (), sal.symtab, sal.line);
+ dis_line_table.emplace (sal.symtab, sal.line);
}
/* Second pass: print the disassembly.
@@ -859,11 +812,9 @@ do_mixed_source_and_assembly (struct gdbarch *gdbarch,
/* Several preceding source lines. Print the trailing ones
not associated with code that we'll print later. */
for (l = sal.line - 1; l > last_line; --l)
- {
- if (line_has_code_p (dis_line_table.get (),
- sal.symtab, l))
- break;
- }
+ if (dis_line_table.contains ({sal.symtab, l}))
+ break;
+
if (l < sal.line - 1)
{
start_preceding_line_to_display = l + 1;
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 13/25] Convert compile/compile.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (11 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 12/25] Convert disasm.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 14/25] Convert type copying " Simon Marchi
` (12 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts compile/compile.c to use the new hash table.
Change-Id: I7df3b8d791ece731ae0d1d64cdc91a2e372f5d4f
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/compile/compile.c | 154 ++++--------------------------------------
gdb/compile/compile.h | 11 +--
2 files changed, 19 insertions(+), 146 deletions(-)
diff --git a/gdb/compile/compile.c b/gdb/compile/compile.c
index 89f979000baa..d6bcc1f61f11 100644
--- a/gdb/compile/compile.c
+++ b/gdb/compile/compile.c
@@ -60,112 +60,15 @@ static struct cmd_list_element *compile_command_list;
bool compile_debug;
-/* Object of this type are stored in the compiler's symbol_err_map. */
-
-struct symbol_error
-{
- /* The symbol. */
-
- const struct symbol *sym;
-
- /* The error message to emit. This is malloc'd and owned by the
- hash table. */
-
- char *message;
-};
-
-/* An object that maps a gdb type to a gcc type. */
-
-struct type_map_instance
-{
- /* The gdb type. */
-
- struct type *type;
-
- /* The corresponding gcc type handle. */
-
- gcc_type gcc_type_handle;
-};
-
-/* Hash a type_map_instance. */
-
-static hashval_t
-hash_type_map_instance (const void *p)
-{
- const struct type_map_instance *inst = (const struct type_map_instance *) p;
-
- return htab_hash_pointer (inst->type);
-}
-
-/* Check two type_map_instance objects for equality. */
-
-static int
-eq_type_map_instance (const void *a, const void *b)
-{
- const struct type_map_instance *insta = (const struct type_map_instance *) a;
- const struct type_map_instance *instb = (const struct type_map_instance *) b;
-
- return insta->type == instb->type;
-}
-
-/* Hash function for struct symbol_error. */
-
-static hashval_t
-hash_symbol_error (const void *a)
-{
- const struct symbol_error *se = (const struct symbol_error *) a;
-
- return htab_hash_pointer (se->sym);
-}
-
-/* Equality function for struct symbol_error. */
-
-static int
-eq_symbol_error (const void *a, const void *b)
-{
- const struct symbol_error *sea = (const struct symbol_error *) a;
- const struct symbol_error *seb = (const struct symbol_error *) b;
-
- return sea->sym == seb->sym;
-}
-
-/* Deletion function for struct symbol_error. */
-
-static void
-del_symbol_error (void *a)
-{
- struct symbol_error *se = (struct symbol_error *) a;
-
- xfree (se->message);
- xfree (se);
-}
-
-/* Constructor for compile_instance. */
-
-compile_instance::compile_instance (struct gcc_base_context *gcc_fe,
- const char *options)
- : m_gcc_fe (gcc_fe), m_gcc_target_options (options),
- m_type_map (htab_create_alloc (10, hash_type_map_instance,
- eq_type_map_instance,
- xfree, xcalloc, xfree)),
- m_symbol_err_map (htab_create_alloc (10, hash_symbol_error,
- eq_symbol_error, del_symbol_error,
- xcalloc, xfree))
-{
-}
-
/* See compile-internal.h. */
bool
compile_instance::get_cached_type (struct type *type, gcc_type *ret) const
{
- struct type_map_instance inst, *found;
-
- inst.type = type;
- found = (struct type_map_instance *) htab_find (m_type_map.get (), &inst);
- if (found != NULL)
+ if (auto iter = m_type_map.find (type);
+ iter != m_type_map.end ())
{
- *ret = found->gcc_type_handle;
+ *ret = iter->second;
return true;
}
@@ -177,25 +80,12 @@ compile_instance::get_cached_type (struct type *type, gcc_type *ret) const
void
compile_instance::insert_type (struct type *type, gcc_type gcc_type)
{
- struct type_map_instance inst, *add;
- void **slot;
-
- inst.type = type;
- inst.gcc_type_handle = gcc_type;
- slot = htab_find_slot (m_type_map.get (), &inst, INSERT);
+ auto [it, inserted] = m_type_map.emplace (type, gcc_type);
- add = (struct type_map_instance *) *slot;
/* The type might have already been inserted in order to handle
recursive types. */
- if (add != NULL && add->gcc_type_handle != gcc_type)
+ if (!inserted && it->second != gcc_type)
error (_("Unexpected type id from GCC, check you use recent enough GCC."));
-
- if (add == NULL)
- {
- add = XNEW (struct type_map_instance);
- *add = inst;
- *slot = add;
- }
}
/* See compile-internal.h. */
@@ -204,19 +94,7 @@ void
compile_instance::insert_symbol_error (const struct symbol *sym,
const char *text)
{
- struct symbol_error e;
- void **slot;
-
- e.sym = sym;
- slot = htab_find_slot (m_symbol_err_map.get (), &e, INSERT);
- if (*slot == NULL)
- {
- struct symbol_error *ep = XNEW (struct symbol_error);
-
- ep->sym = sym;
- ep->message = xstrdup (text);
- *slot = ep;
- }
+ m_symbol_err_map.emplace (sym, text);
}
/* See compile-internal.h. */
@@ -224,20 +102,12 @@ compile_instance::insert_symbol_error (const struct symbol *sym,
void
compile_instance::error_symbol_once (const struct symbol *sym)
{
- struct symbol_error search;
- struct symbol_error *err;
-
- if (m_symbol_err_map == NULL)
- return;
-
- search.sym = sym;
- err = (struct symbol_error *) htab_find (m_symbol_err_map.get (), &search);
- if (err == NULL || err->message == NULL)
- return;
-
- gdb::unique_xmalloc_ptr<char> message (err->message);
- err->message = NULL;
- error (_("%s"), message.get ());
+ if (auto iter = m_symbol_err_map.find (sym);
+ iter != m_symbol_err_map.end () && !iter->second.empty ())
+ {
+ std::string message = std::move (iter->second);
+ error (_("%s"), message.c_str ());
+ }
}
/* Implement "show debug compile". */
diff --git a/gdb/compile/compile.h b/gdb/compile/compile.h
index 4be6f50d4f38..1f57d670c988 100644
--- a/gdb/compile/compile.h
+++ b/gdb/compile/compile.h
@@ -19,7 +19,7 @@
#define COMPILE_COMPILE_H
#include "gcc-c-interface.h"
-#include "gdbsupport/gdb-hashtab.h"
+#include "gdbsupport/unordered_map.h"
struct ui_file;
struct gdbarch;
@@ -61,7 +61,10 @@ enum compile_i_scope_types
class compile_instance
{
public:
- compile_instance (struct gcc_base_context *gcc_fe, const char *options);
+ compile_instance (struct gcc_base_context *gcc_fe, const char *options)
+ : m_gcc_fe (gcc_fe),
+ m_gcc_target_options (options)
+ {}
virtual ~compile_instance ()
{
@@ -163,10 +166,10 @@ class compile_instance
std::string m_gcc_target_options;
/* Map from gdb types to gcc types. */
- htab_up m_type_map;
+ gdb::unordered_map<type *, gcc_type> m_type_map;
/* Map from gdb symbols to gcc error messages to emit. */
- htab_up m_symbol_err_map;
+ gdb::unordered_map<const symbol *, std::string> m_symbol_err_map;
};
/* Public function that is called from compile_control case in the
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 14/25] Convert type copying to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (12 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 13/25] Convert compile/compile.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 15/25] Convert static links " Simon Marchi
` (11 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts the type copying code to use the new hash map.
Change-Id: I35f0a4946dcc5c5eb84820126cf716b600f3302f
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/compile/compile-object-run.c | 6 ++--
gdb/extension-priv.h | 3 +-
gdb/extension.c | 3 +-
gdb/extension.h | 5 +--
gdb/gdbtypes.c | 59 ++++----------------------------
gdb/gdbtypes.h | 7 ++--
gdb/guile/guile-internal.h | 2 +-
gdb/guile/scm-type.c | 7 ++--
gdb/guile/scm-value.c | 3 +-
gdb/python/py-type.c | 7 ++--
gdb/python/py-value.c | 3 +-
gdb/python/python-internal.h | 2 +-
gdb/value.c | 17 +++++----
gdb/value.h | 4 +--
14 files changed, 41 insertions(+), 87 deletions(-)
diff --git a/gdb/compile/compile-object-run.c b/gdb/compile/compile-object-run.c
index 5a810d5a16d4..83163774662c 100644
--- a/gdb/compile/compile-object-run.c
+++ b/gdb/compile/compile-object-run.c
@@ -105,9 +105,9 @@ do_module_cleanup (void *arg, int registers_valid)
static type *
create_copied_type_recursive (objfile *objfile, type *func_type)
{
- htab_up copied_types = create_copied_types_hash ();
- func_type = copy_type_recursive (func_type, copied_types.get ());
- return func_type;
+ copied_types_hash_t copied_types;
+
+ return copy_type_recursive (func_type, copied_types);
}
/* Perform inferior call of MODULE. This function may throw an error.
diff --git a/gdb/extension-priv.h b/gdb/extension-priv.h
index 653fd51e2f11..c14143e98b86 100644
--- a/gdb/extension-priv.h
+++ b/gdb/extension-priv.h
@@ -201,7 +201,8 @@ struct extension_language_ops
COPIED_TYPES is used to prevent cycles / duplicates and is passed to
preserve_one_value. */
void (*preserve_values) (const struct extension_language_defn *,
- struct objfile *objfile, htab_t copied_types);
+ struct objfile *objfile,
+ copied_types_hash_t &copied_types);
/* Return non-zero if there is a stop condition for the breakpoint.
This is used to implement the restriction that a breakpoint may have
diff --git a/gdb/extension.c b/gdb/extension.c
index 897bf25dad4a..b49c538d1ff3 100644
--- a/gdb/extension.c
+++ b/gdb/extension.c
@@ -584,7 +584,8 @@ apply_ext_lang_ptwrite_filter (btrace_thread_info *btinfo)
preserve_one_value. */
void
-preserve_ext_lang_values (struct objfile *objfile, htab_t copied_types)
+preserve_ext_lang_values (struct objfile *objfile,
+ copied_types_hash_t &copied_types)
{
for (const struct extension_language_defn *extlang : extension_languages)
{
diff --git a/gdb/extension.h b/gdb/extension.h
index 5b0830b66751..8a70e72888a0 100644
--- a/gdb/extension.h
+++ b/gdb/extension.h
@@ -22,8 +22,8 @@
#include "mi/mi-cmds.h"
#include "gdbsupport/array-view.h"
-#include "hashtab.h"
#include <optional>
+#include "gdbtypes.h"
struct breakpoint;
struct command_line;
@@ -306,7 +306,8 @@ extern enum ext_lang_bt_status apply_ext_lang_frame_filter
extern void apply_ext_lang_ptwrite_filter
(struct btrace_thread_info *btinfo);
-extern void preserve_ext_lang_values (struct objfile *, htab_t copied_types);
+extern void preserve_ext_lang_values (struct objfile *,
+ copied_types_hash_t &copied_types);
extern const struct extension_language_defn *get_breakpoint_cond_ext_lang
(struct breakpoint *b, enum extension_language skip_lang);
diff --git a/gdb/gdbtypes.c b/gdb/gdbtypes.c
index 2a3aea229cb0..1497149ef103 100644
--- a/gdb/gdbtypes.c
+++ b/gdb/gdbtypes.c
@@ -5407,46 +5407,6 @@ recursive_dump_type (struct type *type, int spaces)
obstack_free (&dont_print_type_obstack, NULL);
}
\f
-/* Trivial helpers for the libiberty hash table, for mapping one
- type to another. */
-
-struct type_pair
-{
- type_pair (struct type *old_, struct type *newobj_)
- : old (old_), newobj (newobj_)
- {}
-
- struct type * const old, * const newobj;
-};
-
-static hashval_t
-type_pair_hash (const void *item)
-{
- const struct type_pair *pair = (const struct type_pair *) item;
-
- return htab_hash_pointer (pair->old);
-}
-
-static int
-type_pair_eq (const void *item_lhs, const void *item_rhs)
-{
- const struct type_pair *lhs = (const struct type_pair *) item_lhs;
- const struct type_pair *rhs = (const struct type_pair *) item_rhs;
-
- return lhs->old == rhs->old;
-}
-
-/* Allocate the hash table used by copy_type_recursive to walk
- types without duplicates. */
-
-htab_up
-create_copied_types_hash ()
-{
- return htab_up (htab_create_alloc (1, type_pair_hash, type_pair_eq,
- htab_delete_entry<type_pair>,
- xcalloc, xfree));
-}
-
/* Recursively copy (deep copy) a dynamic attribute list of a type. */
static struct dynamic_prop_list *
@@ -5478,27 +5438,20 @@ copy_dynamic_prop_list (struct obstack *storage,
it is not associated with OBJFILE. */
struct type *
-copy_type_recursive (struct type *type, htab_t copied_types)
+copy_type_recursive (struct type *type, copied_types_hash_t &copied_types)
{
- void **slot;
- struct type *new_type;
-
if (!type->is_objfile_owned ())
return type;
- struct type_pair pair (type, nullptr);
+ if (auto iter = copied_types.find (type);
+ iter != copied_types.end ())
+ return iter->second;
- slot = htab_find_slot (copied_types, &pair, INSERT);
- if (*slot != NULL)
- return ((struct type_pair *) *slot)->newobj;
-
- new_type = type_allocator (type->arch ()).new_type ();
+ struct type *new_type = type_allocator (type->arch ()).new_type ();
/* We must add the new type to the hash table immediately, in case
we encounter this type again during a recursive call below. */
- struct type_pair *stored = new type_pair (type, new_type);
-
- *slot = stored;
+ copied_types.emplace (type, new_type);
/* Copy the common fields of types. For the main type, we simply
copy the entire thing and then update specific fields as needed. */
diff --git a/gdb/gdbtypes.h b/gdb/gdbtypes.h
index 7fdb377c263a..4cdc48b9a2f9 100644
--- a/gdb/gdbtypes.h
+++ b/gdb/gdbtypes.h
@@ -44,14 +44,13 @@
written such that they can be used as both rvalues and lvalues.
*/
-#include "hashtab.h"
#include "gdbsupport/array-view.h"
-#include "gdbsupport/gdb-hashtab.h"
#include <optional>
#include "gdbsupport/enum-flags.h"
#include "dwarf2.h"
#include "gdbsupport/gdb_obstack.h"
#include "gmp-utils.h"
+#include "gdbsupport/unordered_map.h"
/* Forward declarations for prototypes. */
struct field;
@@ -2785,10 +2784,10 @@ extern int class_or_union_p (const struct type *);
extern void maintenance_print_type (const char *, int);
-extern htab_up create_copied_types_hash ();
+using copied_types_hash_t = gdb::unordered_map<type *, type *>;
extern struct type *copy_type_recursive (struct type *type,
- htab_t copied_types);
+ copied_types_hash_t &copied_types);
extern struct type *copy_type (const struct type *type);
diff --git a/gdb/guile/guile-internal.h b/gdb/guile/guile-internal.h
index 9b9bb21951dd..fbf689848a25 100644
--- a/gdb/guile/guile-internal.h
+++ b/gdb/guile/guile-internal.h
@@ -603,7 +603,7 @@ extern bool gdbscm_auto_load_enabled (const struct extension_language_defn *);
extern void gdbscm_preserve_values
(const struct extension_language_defn *,
- struct objfile *, htab_t copied_types);
+ struct objfile *, copied_types_hash_t &copied_types);
extern enum ext_lang_rc gdbscm_apply_val_pretty_printer
(const struct extension_language_defn *,
diff --git a/gdb/guile/scm-type.c b/gdb/guile/scm-type.c
index 19324a69810c..f22876265074 100644
--- a/gdb/guile/scm-type.c
+++ b/gdb/guile/scm-type.c
@@ -94,8 +94,8 @@ struct tyscm_deleter
return;
gdb_assert (htab != nullptr);
- htab_up copied_types = create_copied_types_hash ();
- htab_traverse_noresize (htab, tyscm_copy_type_recursive, copied_types.get ());
+ copied_types_hash_t copied_types;
+ htab_traverse_noresize (htab, tyscm_copy_type_recursive, &copied_types);
htab_delete (htab);
}
};
@@ -375,12 +375,11 @@ static int
tyscm_copy_type_recursive (void **slot, void *info)
{
type_smob *t_smob = (type_smob *) *slot;
- htab_t copied_types = (htab_t) info;
+ copied_types_hash_t &copied_types = *static_cast<copied_types_hash_t *> (info);
htab_t htab;
eqable_gdb_smob **new_slot;
type_smob t_smob_for_lookup;
- htab_empty (copied_types);
t_smob->type = copy_type_recursive (t_smob->type, copied_types);
/* The eq?-hashtab that the type lived in is going away.
diff --git a/gdb/guile/scm-value.c b/gdb/guile/scm-value.c
index a7b21707eba1..0f4a6a46da0e 100644
--- a/gdb/guile/scm-value.c
+++ b/gdb/guile/scm-value.c
@@ -86,7 +86,8 @@ static SCM substitute_symbol;
void
gdbscm_preserve_values (const struct extension_language_defn *extlang,
- struct objfile *objfile, htab_t copied_types)
+ struct objfile *objfile,
+ copied_types_hash_t &copied_types)
{
value_smob *iter;
diff --git a/gdb/python/py-type.c b/gdb/python/py-type.c
index 284960a3a879..11a96d52c2e0 100644
--- a/gdb/python/py-type.c
+++ b/gdb/python/py-type.c
@@ -1174,15 +1174,14 @@ struct typy_deleter
operating on. */
gdbpy_enter enter_py;
- htab_up copied_types = create_copied_types_hash ();
+ copied_types_hash_t copied_types;
while (obj)
{
type_object *next = obj->next;
- htab_empty (copied_types.get ());
-
- obj->type = copy_type_recursive (obj->type, copied_types.get ());
+ copied_types.clear ();
+ obj->type = copy_type_recursive (obj->type, copied_types);
obj->next = NULL;
obj->prev = NULL;
diff --git a/gdb/python/py-value.c b/gdb/python/py-value.c
index 119bf9f76d76..0e6024c370f8 100644
--- a/gdb/python/py-value.c
+++ b/gdb/python/py-value.c
@@ -233,7 +233,8 @@ valpy_init (PyObject *self, PyObject *args, PyObject *kwds)
each. */
void
gdbpy_preserve_values (const struct extension_language_defn *extlang,
- struct objfile *objfile, htab_t copied_types)
+ struct objfile *objfile,
+ copied_types_hash_t &copied_types)
{
value_object *iter;
diff --git a/gdb/python/python-internal.h b/gdb/python/python-internal.h
index d723c4d577b1..a3a7294c303b 100644
--- a/gdb/python/python-internal.h
+++ b/gdb/python/python-internal.h
@@ -474,7 +474,7 @@ extern enum ext_lang_bt_status gdbpy_apply_frame_filter
struct ui_out *out, int frame_low, int frame_high);
extern void gdbpy_preserve_values (const struct extension_language_defn *,
struct objfile *objfile,
- htab_t copied_types);
+ copied_types_hash_t &copied_types);
extern enum ext_lang_bp_stop gdbpy_breakpoint_cond_says_stop
(const struct extension_language_defn *, struct breakpoint *);
extern int gdbpy_breakpoint_has_cond (const struct extension_language_defn *,
diff --git a/gdb/value.c b/gdb/value.c
index d9b3c6ece04c..a18491602a8a 100644
--- a/gdb/value.c
+++ b/gdb/value.c
@@ -2468,7 +2468,7 @@ add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
}
void
-value::preserve (struct objfile *objfile, htab_t copied_types)
+value::preserve (struct objfile *objfile, copied_types_hash_t &copied_types)
{
if (m_type->objfile_owner () == objfile)
m_type = copy_type_recursive (m_type, copied_types);
@@ -2481,7 +2481,7 @@ value::preserve (struct objfile *objfile, htab_t copied_types)
static void
preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
- htab_t copied_types)
+ copied_types_hash_t &copied_types)
{
switch (var->kind)
{
@@ -2504,7 +2504,7 @@ preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
static void
preserve_one_varobj (struct varobj *varobj, struct objfile *objfile,
- htab_t copied_types)
+ copied_types_hash_t &copied_types)
{
if (varobj->type->is_objfile_owned ()
&& varobj->type->objfile_owner () == objfile)
@@ -2528,22 +2528,21 @@ preserve_values (struct objfile *objfile)
{
/* Create the hash table. We allocate on the objfile's obstack, since
it is soon to be deleted. */
- htab_up copied_types = create_copied_types_hash ();
+ copied_types_hash_t copied_types;
for (const value_ref_ptr &item : value_history)
- item->preserve (objfile, copied_types.get ());
+ item->preserve (objfile, copied_types);
for (auto &pair : internalvars)
- preserve_one_internalvar (&pair.second, objfile, copied_types.get ());
+ preserve_one_internalvar (&pair.second, objfile, copied_types);
/* For the remaining varobj, check that none has type owned by OBJFILE. */
all_root_varobjs ([&copied_types, objfile] (struct varobj *varobj)
{
- preserve_one_varobj (varobj, objfile,
- copied_types.get ());
+ preserve_one_varobj (varobj, objfile, copied_types);
});
- preserve_ext_lang_values (objfile, copied_types.get ());
+ preserve_ext_lang_values (objfile, copied_types);
}
static void
diff --git a/gdb/value.h b/gdb/value.h
index 13cfb007aa2c..c8166a77332e 100644
--- a/gdb/value.h
+++ b/gdb/value.h
@@ -24,12 +24,12 @@
#include "extension.h"
#include "gdbsupport/gdb_ref_ptr.h"
#include "gmp-utils.h"
+#include "gdbtypes.h"
struct block;
struct expression;
struct regcache;
struct symbol;
-struct type;
struct ui_file;
struct language_defn;
struct value_print_options;
@@ -593,7 +593,7 @@ struct value
/* Update this value before discarding OBJFILE. COPIED_TYPES is
used to prevent cycles / duplicates. */
- void preserve (struct objfile *objfile, htab_t copied_types);
+ void preserve (struct objfile *objfile, copied_types_hash_t &copied_types);
/* Unpack a bitfield of BITSIZE bits found at BITPOS in the object
at VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 15/25] Convert static links to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (13 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 14/25] Convert type copying " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 16/25] Convert gnu-v3-abi.c " Simon Marchi
` (10 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts the objfile static link table to the new hash map.
Change-Id: If978e895679899ca2af4ef01c12842b4184d88e6
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/objfiles.c | 74 ++++++--------------------------------------------
gdb/objfiles.h | 4 ++-
gdb/symfile.c | 2 +-
3 files changed, 13 insertions(+), 67 deletions(-)
diff --git a/gdb/objfiles.c b/gdb/objfiles.c
index 555195dc61f9..207a0cb22eb0 100644
--- a/gdb/objfiles.c
+++ b/gdb/objfiles.c
@@ -153,39 +153,6 @@ set_objfile_main_name (struct objfile *objfile,
objfile->per_bfd->language_of_main = lang;
}
-/* Helper structure to map blocks to static link properties in hash tables. */
-
-struct static_link_htab_entry
-{
- const struct block *block;
- const struct dynamic_prop *static_link;
-};
-
-/* Return a hash code for struct static_link_htab_entry *P. */
-
-static hashval_t
-static_link_htab_entry_hash (const void *p)
-{
- const struct static_link_htab_entry *e
- = (const struct static_link_htab_entry *) p;
-
- return htab_hash_pointer (e->block);
-}
-
-/* Return whether P1 an P2 (pointers to struct static_link_htab_entry) are
- mappings for the same block. */
-
-static int
-static_link_htab_entry_eq (const void *p1, const void *p2)
-{
- const struct static_link_htab_entry *e1
- = (const struct static_link_htab_entry *) p1;
- const struct static_link_htab_entry *e2
- = (const struct static_link_htab_entry *) p2;
-
- return e1->block == e2->block;
-}
-
/* Register STATIC_LINK as the static link for BLOCK, which is part of OBJFILE.
Must not be called more than once for each BLOCK. */
@@ -194,25 +161,10 @@ objfile_register_static_link (struct objfile *objfile,
const struct block *block,
const struct dynamic_prop *static_link)
{
- void **slot;
- struct static_link_htab_entry lookup_entry;
- struct static_link_htab_entry *entry;
-
- if (objfile->static_links == NULL)
- objfile->static_links.reset (htab_create_alloc
- (1, &static_link_htab_entry_hash, static_link_htab_entry_eq, NULL,
- xcalloc, xfree));
-
- /* Create a slot for the mapping, make sure it's the first mapping for this
- block and then create the mapping itself. */
- lookup_entry.block = block;
- slot = htab_find_slot (objfile->static_links.get (), &lookup_entry, INSERT);
- gdb_assert (*slot == NULL);
-
- entry = XOBNEW (&objfile->objfile_obstack, static_link_htab_entry);
- entry->block = block;
- entry->static_link = static_link;
- *slot = (void *) entry;
+ /* Enter the mapping and make sure it's the first mapping for this
+ block. */
+ bool inserted = objfile->static_links.emplace (block, static_link).second;
+ gdb_assert (inserted);
}
/* Look for a static link for BLOCK, which is part of OBJFILE. Return NULL if
@@ -222,19 +174,11 @@ const struct dynamic_prop *
objfile_lookup_static_link (struct objfile *objfile,
const struct block *block)
{
- struct static_link_htab_entry *entry;
- struct static_link_htab_entry lookup_entry;
-
- if (objfile->static_links == NULL)
- return NULL;
- lookup_entry.block = block;
- entry = ((struct static_link_htab_entry *)
- htab_find (objfile->static_links.get (), &lookup_entry));
- if (entry == NULL)
- return NULL;
-
- gdb_assert (entry->block == block);
- return entry->static_link;
+ if (auto iter = objfile->static_links.find (block);
+ iter != objfile->static_links.end ())
+ return iter->second;
+
+ return nullptr;
}
\f
diff --git a/gdb/objfiles.h b/gdb/objfiles.h
index d92570a00bdb..5e7d298d4ea7 100644
--- a/gdb/objfiles.h
+++ b/gdb/objfiles.h
@@ -32,6 +32,7 @@
#include "jit.h"
#include "quick-symbol.h"
#include <forward_list>
+#include "gdbsupport/unordered_map.h"
struct htab;
struct objfile_data;
@@ -857,7 +858,8 @@ struct objfile
Very few blocks have a static link, so it's more memory efficient to
store these here rather than in struct block. Static links must be
allocated on the objfile's obstack. */
- htab_up static_links;
+ gdb::unordered_map<const block *, const dynamic_prop *>
+ static_links;
/* JIT-related data for this objfile, if the objfile is a JITer;
that is, it produces JITed objfiles. */
diff --git a/gdb/symfile.c b/gdb/symfile.c
index 1502fdbe500b..89f7ab2dd759 100644
--- a/gdb/symfile.c
+++ b/gdb/symfile.c
@@ -2585,7 +2585,7 @@ reread_symbols (int from_tty)
objfile->sect_index_text = -1;
objfile->compunit_symtabs = NULL;
objfile->template_symbols = NULL;
- objfile->static_links.reset (nullptr);
+ objfile->static_links.clear ();
/* obstack_init also initializes the obstack so it is
empty. We could use obstack_specify_allocation but
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 16/25] Convert gnu-v3-abi.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (14 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 15/25] Convert static links " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 17/25] Convert abbrev cache " Simon Marchi
` (9 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts gnu-v3-abi.c to use the new hash table.
This change shows how a std::vector can easily be made directly from
the hash table, simplifying the earlier approach of constructing a
vector and a hash table at the same time.
Change-Id: Ia0c387a035a52300db6b6f5a3a2e5c69efa01155
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/gnu-v3-abi.c | 98 ++++++++++++++++--------------------------------
1 file changed, 32 insertions(+), 66 deletions(-)
diff --git a/gdb/gnu-v3-abi.c b/gdb/gnu-v3-abi.c
index aefbee542703..b51c4d23fc7f 100644
--- a/gdb/gnu-v3-abi.c
+++ b/gdb/gnu-v3-abi.c
@@ -33,6 +33,7 @@
#include "cli/cli-style.h"
#include "dwarf2/loc.h"
#include "inferior.h"
+#include "gdbsupport/unordered_map.h"
static struct cp_abi_ops gnu_v3_abi_ops;
@@ -791,51 +792,32 @@ gnuv3_method_ptr_to_value (struct value **this_p, struct value *method_ptr)
return value_from_pointer (lookup_pointer_type (method_type), ptr_value);
}
-/* Objects of this type are stored in a hash table and a vector when
- printing the vtables for a class. */
-
-struct value_and_voffset
+struct vtable_value_hash_t
{
- /* The value representing the object. */
- struct value *value;
-
- /* The maximum vtable offset we've found for any object at this
- offset in the outermost object. */
- int max_voffset;
+ std::size_t operator() (value *val) const noexcept
+ { return val->address () + val->embedded_offset (); }
};
-/* Hash function for value_and_voffset. */
-
-static hashval_t
-hash_value_and_voffset (const void *p)
-{
- const struct value_and_voffset *o = (const struct value_and_voffset *) p;
-
- return o->value->address () + o->value->embedded_offset ();
-}
-
-/* Equality function for value_and_voffset. */
-
-static int
-eq_value_and_voffset (const void *a, const void *b)
+struct vtable_value_eq_t
{
- const struct value_and_voffset *ova = (const struct value_and_voffset *) a;
- const struct value_and_voffset *ovb = (const struct value_and_voffset *) b;
+ bool operator() (value *lhs, value *rhs) const noexcept
+ {
+ return (lhs->address () + lhs->embedded_offset ()
+ == rhs->address () + rhs->embedded_offset ());
+ }
+};
- return (ova->value->address () + ova->value->embedded_offset ()
- == ovb->value->address () + ovb->value->embedded_offset ());
-}
+using vtable_hash_t
+ = gdb::unordered_map<value *, int, vtable_value_hash_t, vtable_value_eq_t>;
-/* Comparison function for value_and_voffset. */
+/* Comparison function used for sorting the vtable entries. */
static bool
-compare_value_and_voffset (const struct value_and_voffset *va,
- const struct value_and_voffset *vb)
+compare_value_and_voffset (const std::pair<value *, int> &va,
+ const std::pair<value *, int> &vb)
{
- CORE_ADDR addra = (va->value->address ()
- + va->value->embedded_offset ());
- CORE_ADDR addrb = (vb->value->address ()
- + vb->value->embedded_offset ());
+ CORE_ADDR addra = va.first->address () + va.first->embedded_offset ();
+ CORE_ADDR addrb = vb.first->address () + vb.first->embedded_offset ();
return addra < addrb;
}
@@ -843,18 +825,14 @@ compare_value_and_voffset (const struct value_and_voffset *va,
/* A helper function used when printing vtables. This determines the
key (most derived) sub-object at each address and also computes the
maximum vtable offset seen for the corresponding vtable. Updates
- OFFSET_HASH and OFFSET_VEC with a new value_and_voffset object, if
- needed. VALUE is the object to examine. */
+ OFFSET_HASH with a new value_and_voffset object, if needed. VALUE
+ is the object to examine. */
static void
-compute_vtable_size (htab_t offset_hash,
- std::vector<value_and_voffset *> *offset_vec,
- struct value *value)
+compute_vtable_size (vtable_hash_t &offset_hash, struct value *value)
{
int i;
struct type *type = check_typedef (value->type ());
- void **slot;
- struct value_and_voffset search_vo, *current_vo;
gdb_assert (type->code () == TYPE_CODE_STRUCT);
@@ -864,18 +842,7 @@ compute_vtable_size (htab_t offset_hash,
return;
/* Update the hash and the vec, if needed. */
- search_vo.value = value;
- slot = htab_find_slot (offset_hash, &search_vo, INSERT);
- if (*slot)
- current_vo = (struct value_and_voffset *) *slot;
- else
- {
- current_vo = XNEW (struct value_and_voffset);
- current_vo->value = value;
- current_vo->max_voffset = -1;
- *slot = current_vo;
- offset_vec->push_back (current_vo);
- }
+ int ¤t_max_voffset = offset_hash.emplace (value, -1).first->second;
/* Update the value_and_voffset object with the highest vtable
offset from this class. */
@@ -890,15 +857,15 @@ compute_vtable_size (htab_t offset_hash,
{
int voffset = TYPE_FN_FIELD_VOFFSET (fn, j);
- if (voffset > current_vo->max_voffset)
- current_vo->max_voffset = voffset;
+ if (voffset > current_max_voffset)
+ current_max_voffset = voffset;
}
}
}
/* Recurse into base classes. */
for (i = 0; i < TYPE_N_BASECLASSES (type); ++i)
- compute_vtable_size (offset_hash, offset_vec, value_field (value, i));
+ compute_vtable_size (offset_hash, value_field (value, i));
}
/* Helper for gnuv3_print_vtable that prints a single vtable. */
@@ -999,23 +966,22 @@ gnuv3_print_vtable (struct value *value)
return;
}
- htab_up offset_hash (htab_create_alloc (1, hash_value_and_voffset,
- eq_value_and_voffset,
- xfree, xcalloc, xfree));
- std::vector<value_and_voffset *> result_vec;
+ vtable_hash_t offset_hash;
+ compute_vtable_size (offset_hash, value);
- compute_vtable_size (offset_hash.get (), &result_vec, value);
+ std::vector<std::pair<struct value *, int>> result_vec (offset_hash.begin (),
+ offset_hash.end ());
std::sort (result_vec.begin (), result_vec.end (),
compare_value_and_voffset);
count = 0;
- for (value_and_voffset *iter : result_vec)
+ for (auto &item : result_vec)
{
- if (iter->max_voffset >= 0)
+ if (item.second >= 0)
{
if (count > 0)
gdb_printf ("\n");
- print_one_vtable (gdbarch, iter->value, iter->max_voffset, &opts);
+ print_one_vtable (gdbarch, item.first, item.second, &opts);
++count;
}
}
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 17/25] Convert abbrev cache to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (15 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 16/25] Convert gnu-v3-abi.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 18/25] Convert abbrevs " Simon Marchi
` (8 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts the DWARF abbrev cache to use the new hash table.
Change-Id: I5e88cd4030715954db2c43f873b77b6b8e73f5aa
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/dwarf2/abbrev-table-cache.c | 36 ++----------------------
gdb/dwarf2/abbrev-table-cache.h | 49 ++++++++++++++++++++++++++-------
2 files changed, 42 insertions(+), 43 deletions(-)
diff --git a/gdb/dwarf2/abbrev-table-cache.c b/gdb/dwarf2/abbrev-table-cache.c
index c29cd09478f4..2395ae478119 100644
--- a/gdb/dwarf2/abbrev-table-cache.c
+++ b/gdb/dwarf2/abbrev-table-cache.c
@@ -19,33 +19,6 @@
#include "dwarf2/abbrev-table-cache.h"
-/* Hash function for an abbrev table. */
-
-hashval_t
-abbrev_table_cache::hash_table (const void *item)
-{
- const struct abbrev_table *table = (const struct abbrev_table *) item;
- return to_underlying (table->sect_off);
-}
-
-/* Comparison function for abbrev table. */
-
-int
-abbrev_table_cache::eq_table (const void *lhs, const void *rhs)
-{
- const struct abbrev_table *l_table = (const struct abbrev_table *) lhs;
- const search_key *key = (const search_key *) rhs;
- return (l_table->section == key->section
- && l_table->sect_off == key->offset);
-}
-
-abbrev_table_cache::abbrev_table_cache ()
- : m_tables (htab_create_alloc (20, hash_table, eq_table,
- htab_delete_entry<abbrev_table>,
- xcalloc, xfree))
-{
-}
-
void
abbrev_table_cache::add (abbrev_table_up table)
{
@@ -53,11 +26,8 @@ abbrev_table_cache::add (abbrev_table_up table)
if (table == nullptr)
return;
- search_key key = { table->section, table->sect_off };
- void **slot = htab_find_slot_with_hash (m_tables.get (), &key,
- to_underlying (table->sect_off),
- INSERT);
+ bool inserted = m_tables.emplace (std::move (table)).second;
+
/* If this one already existed, then it should have been reused. */
- gdb_assert (*slot == nullptr);
- *slot = (void *) table.release ();
+ gdb_assert (inserted);
}
diff --git a/gdb/dwarf2/abbrev-table-cache.h b/gdb/dwarf2/abbrev-table-cache.h
index bcec005f2c98..84699487657a 100644
--- a/gdb/dwarf2/abbrev-table-cache.h
+++ b/gdb/dwarf2/abbrev-table-cache.h
@@ -21,12 +21,13 @@
#define GDB_DWARF2_ABBREV_TABLE_CACHE_H
#include "dwarf2/abbrev.h"
+#include "gdbsupport/unordered_set.h"
/* An abbrev table cache holds abbrev tables for easier reuse. */
class abbrev_table_cache
{
public:
- abbrev_table_cache ();
+ abbrev_table_cache () = default;
DISABLE_COPY_AND_ASSIGN (abbrev_table_cache);
/* Find an abbrev table coming from the abbrev section SECTION at
@@ -35,10 +36,13 @@ class abbrev_table_cache
const abbrev_table *find (dwarf2_section_info *section,
sect_offset offset) const
{
- search_key key = { section, offset };
+ abbrev_table_search_key key {section, offset};
- return (abbrev_table *) htab_find_with_hash (m_tables.get (), &key,
- to_underlying (offset));
+ if (auto iter = m_tables.find (key);
+ iter != m_tables.end ())
+ return iter->get ();
+
+ return nullptr;
}
/* Add TABLE to this cache. Ownership of TABLE is transferred to
@@ -49,18 +53,43 @@ class abbrev_table_cache
void add (abbrev_table_up table);
private:
+ /* Key used to search for an existing abbrev table in M_TABLES. */
+ struct abbrev_table_search_key
+ {
+ const dwarf2_section_info *section;
+ sect_offset sect_off;
+ };
+
+ struct abbrev_table_hash
+ {
+ using is_transparent = void;
- static hashval_t hash_table (const void *item);
- static int eq_table (const void *lhs, const void *rhs);
+ std::size_t operator() (const abbrev_table_search_key &key) const noexcept
+ {
+ return (std::hash<const dwarf2_section_info *> () (key.section)
+ + (std::hash<std::underlying_type_t<decltype (key.sect_off)>> ()
+ (to_underlying (key.sect_off))));
+ }
- struct search_key
+ std::size_t operator() (const abbrev_table_up &table) const noexcept
+ { return (*this) ({ table->section, table->sect_off }); }
+ };
+
+ struct abbrev_table_eq
{
- struct dwarf2_section_info *section;
- sect_offset offset;
+ using is_transparent = void;
+
+ bool operator() (const abbrev_table_search_key &key,
+ const abbrev_table_up &table) const noexcept
+ { return key.section == table->section && key.sect_off == table->sect_off; }
+
+ bool operator() (const abbrev_table_up &lhs,
+ const abbrev_table_up &rhs) const noexcept
+ { return (*this) ({ lhs->section, lhs->sect_off }, rhs); }
};
/* Hash table of abbrev tables. */
- htab_up m_tables;
+ gdb::unordered_set<abbrev_table_up, abbrev_table_hash, abbrev_table_eq> m_tables;
};
#endif /* GDB_DWARF2_ABBREV_TABLE_CACHE_H */
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 18/25] Convert abbrevs to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (16 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 17/25] Convert abbrev cache " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 19/25] Convert typedef hash " Simon Marchi
` (7 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts the DWARF abbrevs themselves to use the new hash table.
Change-Id: I0320a733ecefe2cffeb25c068f17322dd3ab23e2
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/dwarf2/abbrev.c | 46 ----------------------------------
gdb/dwarf2/abbrev.h | 61 +++++++++++++++++++++++++++++++++++----------
2 files changed, 48 insertions(+), 59 deletions(-)
diff --git a/gdb/dwarf2/abbrev.c b/gdb/dwarf2/abbrev.c
index c30db1ee31ae..9f7ead88f1fa 100644
--- a/gdb/dwarf2/abbrev.c
+++ b/gdb/dwarf2/abbrev.c
@@ -29,52 +29,6 @@
#include "dwarf2/section.h"
#include "bfd.h"
-/* Hash function for an abbrev. */
-
-static hashval_t
-hash_abbrev (const void *item)
-{
- const struct abbrev_info *info = (const struct abbrev_info *) item;
- /* Warning: if you change this next line, you must also update the
- other code in this class using the _with_hash functions. */
- return info->number;
-}
-
-/* Comparison function for abbrevs. */
-
-static int
-eq_abbrev (const void *lhs, const void *rhs)
-{
- const struct abbrev_info *l_info = (const struct abbrev_info *) lhs;
- const struct abbrev_info *r_info = (const struct abbrev_info *) rhs;
- return l_info->number == r_info->number;
-}
-
-/* Abbreviation tables.
-
- In DWARF version 2, the description of the debugging information is
- stored in a separate .debug_abbrev section. Before we read any
- dies from a section we read in all abbreviations and install them
- in a hash table. */
-
-abbrev_table::abbrev_table (sect_offset off, struct dwarf2_section_info *sect)
- : sect_off (off),
- section (sect),
- m_abbrevs (htab_create_alloc (20, hash_abbrev, eq_abbrev,
- nullptr, xcalloc, xfree))
-{
-}
-
-/* Add an abbreviation to the table. */
-
-void
-abbrev_table::add_abbrev (struct abbrev_info *abbrev)
-{
- void **slot = htab_find_slot_with_hash (m_abbrevs.get (), abbrev,
- abbrev->number, INSERT);
- *slot = abbrev;
-}
-
/* Helper function that returns true if a DIE with the given tag might
plausibly be indexed. */
diff --git a/gdb/dwarf2/abbrev.h b/gdb/dwarf2/abbrev.h
index 7eda95179dab..93e3b90c70e6 100644
--- a/gdb/dwarf2/abbrev.h
+++ b/gdb/dwarf2/abbrev.h
@@ -28,9 +28,8 @@
#define GDB_DWARF2_ABBREV_H
#include "dwarf2.h"
-#include "gdbsupport/gdb-hashtab.h"
#include "gdbsupport/gdb_obstack.h"
-#include "hashtab.h"
+#include "gdbsupport/unordered_set.h"
#include "types.h"
struct attr_abbrev
@@ -64,7 +63,12 @@ struct abbrev_info
struct abbrev_table;
typedef std::unique_ptr<struct abbrev_table> abbrev_table_up;
-/* Top level data structure to contain an abbreviation table. */
+/* Top level data structure to contain an abbreviation table.
+
+ In DWARF version 2, the description of the debugging information is
+ stored in a separate .debug_abbrev section. Before we read any
+ dies from a section we read in all abbreviations and install them
+ in a hash table. */
struct abbrev_table
{
@@ -78,14 +82,13 @@ struct abbrev_table
/* Look up an abbrev in the table.
Returns NULL if the abbrev is not found. */
- const struct abbrev_info *lookup_abbrev (unsigned int abbrev_number) const
+ const abbrev_info *lookup_abbrev (unsigned int abbrev_number) const
{
- struct abbrev_info search;
- search.number = abbrev_number;
+ if (auto iter = m_abbrevs.find (abbrev_number);
+ iter != m_abbrevs.end ())
+ return *iter;
- return (struct abbrev_info *) htab_find_with_hash (m_abbrevs.get (),
- &search,
- abbrev_number);
+ return nullptr;
}
/* Where the abbrev table came from.
@@ -96,15 +99,47 @@ struct abbrev_table
private:
- abbrev_table (sect_offset off, struct dwarf2_section_info *sect);
+ abbrev_table (sect_offset off, struct dwarf2_section_info *sect)
+ : sect_off (off),
+ section (sect)
+ {}
DISABLE_COPY_AND_ASSIGN (abbrev_table);
/* Add an abbreviation to the table. */
- void add_abbrev (struct abbrev_info *abbrev);
+ void add_abbrev (const abbrev_info *abbrev)
+ { m_abbrevs.emplace (abbrev); }
+
+ struct abbrev_info_number_eq
+ {
+ using is_transparent = void;
+
+ bool operator() (unsigned int number,
+ const abbrev_info *abbrev) const noexcept
+ { return number == abbrev->number; }
+
+ bool operator() (const abbrev_info *lhs,
+ const abbrev_info *rhs) const noexcept
+ { return (*this) (lhs->number, rhs); }
+ };
+
+ struct abbrev_info_number_hash
+ {
+ using is_transparent = void;
+
+ std::size_t operator() (unsigned int number) const noexcept
+ { return std::hash<unsigned int> () (number); }
+
+ std::size_t operator() (const abbrev_info *abbrev) const noexcept
+ { return (*this) (abbrev->number); }
+
+ };
- /* Hash table of abbrevs. */
- htab_up m_abbrevs;
+ /* Hash table of abbrev, identified by their number. */
+ gdb::unordered_set<const abbrev_info *,
+ abbrev_info_number_hash,
+ abbrev_info_number_eq>
+ m_abbrevs;
/* Storage for the abbrev table. */
auto_obstack m_abbrev_obstack;
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 19/25] Convert typedef hash to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (17 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 18/25] Convert abbrevs " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 20/25] Convert all_bfds " Simon Marchi
` (6 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts the typedef hash to use the new hash table.
This patch found a latent bug in the typedef code. Previously, the
hash function looked at the type name, but the hash equality function
used types_equal -- but that strips typedefs, meaning that equality of
types did not imply equality of hashes. This patch fixes the problem
and updates the relevant test.
Change-Id: I0d10236b01e74bac79621244a1c0c56f90d65594
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/testsuite/gdb.cp/ptype-flags.exp | 25 ++++--
gdb/typeprint.c | 109 ++++-----------------------
gdb/typeprint.h | 47 ++++++++++--
3 files changed, 71 insertions(+), 110 deletions(-)
diff --git a/gdb/testsuite/gdb.cp/ptype-flags.exp b/gdb/testsuite/gdb.cp/ptype-flags.exp
index bb92da6122ae..476c075f2872 100644
--- a/gdb/testsuite/gdb.cp/ptype-flags.exp
+++ b/gdb/testsuite/gdb.cp/ptype-flags.exp
@@ -78,10 +78,18 @@ proc do_check_holder {name {flags ""} {show_typedefs 1} {show_methods 1}
proc do_check_typedef_holder {name {flags ""} {show_typedefs 1} {show_methods 1}
{raw 0}} {
- set contents {
- { field public "double a;" }
- { field public "ns::scoped_double b;" }
- { field public "global_double c;" }
+ if {$raw} {
+ set contents {
+ { field public "double a;" }
+ { field public "ns::scoped_double b;" }
+ { field public "global_double c;" }
+ }
+ } else {
+ set contents {
+ { field public "class_double a;" }
+ { field public "class_double b;" }
+ { field public "class_double c;" }
+ }
}
if {$show_typedefs} {
@@ -89,8 +97,13 @@ proc do_check_typedef_holder {name {flags ""} {show_typedefs 1} {show_methods 1}
}
if {$show_methods} {
- lappend contents { method private "double method1(ns::scoped_double);" }
- lappend contents { method private "double method2(global_double);" }
+ if {$raw} {
+ lappend contents { method private "double method1(ns::scoped_double);" }
+ lappend contents { method private "double method2(global_double);" }
+ } else {
+ lappend contents { method private "class_double method1(class_double);" }
+ lappend contents { method private "class_double method2(class_double);" }
+ }
}
if {$raw} {
diff --git a/gdb/typeprint.c b/gdb/typeprint.c
index 456d8dcb00b4..a4553098011c 100644
--- a/gdb/typeprint.c
+++ b/gdb/typeprint.c
@@ -194,49 +194,16 @@ print_offset_data::finish (struct type *type, int level,
\f
-/* A hash function for a typedef_field. */
-
-static hashval_t
-hash_typedef_field (const void *p)
-{
- const struct decl_field *tf = (const struct decl_field *) p;
-
- return htab_hash_string (TYPE_SAFE_NAME (tf->type));
-}
-
-/* An equality function for a typedef field. */
-
-static int
-eq_typedef_field (const void *a, const void *b)
-{
- const struct decl_field *tfa = (const struct decl_field *) a;
- const struct decl_field *tfb = (const struct decl_field *) b;
-
- return types_equal (tfa->type, tfb->type);
-}
-
/* See typeprint.h. */
void
typedef_hash_table::recursively_update (struct type *t)
{
- int i;
-
- for (i = 0; i < TYPE_TYPEDEF_FIELD_COUNT (t); ++i)
- {
- struct decl_field *tdef = &TYPE_TYPEDEF_FIELD (t, i);
- void **slot;
-
- slot = htab_find_slot (m_table.get (), tdef, INSERT);
- /* Only add a given typedef name once. Really this shouldn't
- happen; but it is safe enough to do the updates breadth-first
- and thus use the most specific typedef. */
- if (*slot == NULL)
- *slot = tdef;
- }
+ for (int i = 0; i < TYPE_TYPEDEF_FIELD_COUNT (t); ++i)
+ m_table.emplace (&TYPE_TYPEDEF_FIELD (t, i));
/* Recurse into superclasses. */
- for (i = 0; i < TYPE_N_BASECLASSES (t); ++i)
+ for (int i = 0; i < TYPE_N_BASECLASSES (t); ++i)
recursively_update (TYPE_BASECLASS (t, i));
}
@@ -250,7 +217,6 @@ typedef_hash_table::add_template_parameters (struct type *t)
for (i = 0; i < TYPE_N_TEMPLATE_ARGUMENTS (t); ++i)
{
struct decl_field *tf;
- void **slot;
/* We only want type-valued template parameters in the hash. */
if (TYPE_TEMPLATE_ARGUMENT (t, i)->aclass () != LOC_TYPEDEF)
@@ -260,45 +226,10 @@ typedef_hash_table::add_template_parameters (struct type *t)
tf->name = TYPE_TEMPLATE_ARGUMENT (t, i)->linkage_name ();
tf->type = TYPE_TEMPLATE_ARGUMENT (t, i)->type ();
- slot = htab_find_slot (m_table.get (), tf, INSERT);
- if (*slot == NULL)
- *slot = tf;
+ m_table.emplace (tf);
}
}
-/* See typeprint.h. */
-
-typedef_hash_table::typedef_hash_table ()
- : m_table (htab_create_alloc (10, hash_typedef_field, eq_typedef_field,
- NULL, xcalloc, xfree))
-{
-}
-
-/* Helper function for typedef_hash_table::copy. */
-
-static int
-copy_typedef_hash_element (void **slot, void *nt)
-{
- htab_t new_table = (htab_t) nt;
- void **new_slot;
-
- new_slot = htab_find_slot (new_table, *slot, INSERT);
- if (*new_slot == NULL)
- *new_slot = *slot;
-
- return 1;
-}
-
-/* See typeprint.h. */
-
-typedef_hash_table::typedef_hash_table (const typedef_hash_table &table)
-{
- m_table.reset (htab_create_alloc (10, hash_typedef_field, eq_typedef_field,
- NULL, xcalloc, xfree));
- htab_traverse_noresize (table.m_table.get (), copy_typedef_hash_element,
- m_table.get ());
-}
-
/* Look up the type T in the global typedef hash. If it is found,
return the typedef name. If it is not found, apply the
type-printers, if any, given by start_script_type_printers and return the
@@ -308,29 +239,21 @@ const char *
typedef_hash_table::find_global_typedef (const struct type_print_options *flags,
struct type *t)
{
- void **slot;
- struct decl_field tf, *new_tf;
-
if (flags->global_typedefs == NULL)
return NULL;
- tf.name = NULL;
- tf.type = t;
-
- slot = htab_find_slot (flags->global_typedefs->m_table.get (), &tf, INSERT);
- if (*slot != NULL)
- {
- new_tf = (struct decl_field *) *slot;
- return new_tf->name;
- }
+ if (auto it = flags->global_typedefs->m_table.find (t);
+ it != flags->global_typedefs->m_table.end ())
+ return (*it)->name;
/* Put an entry into the hash table now, in case
apply_ext_lang_type_printers recurses. */
- new_tf = XOBNEW (&flags->global_typedefs->m_storage, struct decl_field);
+ decl_field *new_tf
+ = XOBNEW (&flags->global_typedefs->m_storage, struct decl_field);
new_tf->name = NULL;
new_tf->type = t;
- *slot = new_tf;
+ flags->global_typedefs->m_table.emplace (new_tf);
gdb::unique_xmalloc_ptr<char> applied
= apply_ext_lang_type_printers (flags->global_printers, t);
@@ -350,15 +273,9 @@ typedef_hash_table::find_typedef (const struct type_print_options *flags,
{
if (flags->local_typedefs != NULL)
{
- struct decl_field tf, *found;
-
- tf.name = NULL;
- tf.type = t;
- htab_t table = flags->local_typedefs->m_table.get ();
- found = (struct decl_field *) htab_find (table, &tf);
-
- if (found != NULL)
- return found->name;
+ if (auto iter = flags->local_typedefs->m_table.find (t);
+ iter != flags->local_typedefs->m_table.end ())
+ return (*iter)->name;
}
return find_global_typedef (flags, t);
diff --git a/gdb/typeprint.h b/gdb/typeprint.h
index fe43fc148f49..3cba02f7172c 100644
--- a/gdb/typeprint.h
+++ b/gdb/typeprint.h
@@ -19,8 +19,10 @@
#ifndef TYPEPRINT_H
#define TYPEPRINT_H
-#include "gdbsupport/gdb-hashtab.h"
#include "gdbsupport/gdb_obstack.h"
+#include "gdbsupport/unordered_set.h"
+#include "gdbtypes.h"
+#include "hashtab.h"
enum language;
struct ui_file;
@@ -115,19 +117,21 @@ struct print_offset_data
extern const struct type_print_options type_print_raw_options;
-/* A hash table holding typedef_field objects. This is more
- complicated than an ordinary hash because it must also track the
- lifetime of some -- but not all -- of the contained objects. */
+/* A hash table holding decl_field objects. This is more complicated than an
+ ordinary hash because it must also track the lifetime of some -- but not all
+ -- of the contained objects. */
class typedef_hash_table
{
public:
/* Create a new typedef-lookup hash table. */
- typedef_hash_table ();
+ typedef_hash_table () = default;
/* Copy a typedef hash. */
- typedef_hash_table (const typedef_hash_table &);
+ typedef_hash_table (const typedef_hash_table &other)
+ : m_table (other.m_table)
+ {}
typedef_hash_table &operator= (const typedef_hash_table &) = delete;
@@ -150,9 +154,36 @@ class typedef_hash_table
static const char *find_global_typedef (const struct type_print_options *flags,
struct type *t);
+ struct decl_field_type_hash
+ {
+ using is_transparent = void;
- /* The actual hash table. */
- htab_up m_table;
+ std::size_t operator() (type *t) const noexcept
+ {
+ /* Use check_typedef: the hash must agree with equals, and types_equal
+ strips typedefs. */
+ return htab_hash_string (TYPE_SAFE_NAME (check_typedef (t)));
+ }
+
+ std::size_t operator() (const decl_field *f) const noexcept
+ { return (*this) (f->type); }
+ };
+
+ struct decl_field_type_eq
+ {
+ using is_transparent = void;
+
+ bool operator () (type *t, const decl_field *f) const noexcept
+ { return types_equal (t, f->type); }
+
+ bool operator() (const decl_field *lhs,
+ const decl_field *rhs) const noexcept
+ { return (*this) (lhs->type, rhs); }
+ };
+
+ /* The actual hash table of `decl_field *` identified by their type field. */
+ gdb::unordered_set<decl_field *, decl_field_type_hash, decl_field_type_eq>
+ m_table;
/* Storage for typedef_field objects that must be synthesized. */
auto_obstack m_storage;
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 20/25] Convert all_bfds to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (18 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 19/25] Convert typedef hash " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 21/25] Convert more DWARF code " Simon Marchi
` (5 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts gdb_bfd.c to use the new hash table for all_bfds.
This patch slightly changes the htab_t pretty-printer test, which was
relying on all_bfds. Note that with the new hash table, gdb-specific
printers aren't needed; the libstdc++ printers suffice -- in fact,
they are better, because the true types of the contents are available.
Change-Id: I48b7bd142085287b34bdef8b6db5587581f94280
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/gdb_bfd.c | 47 +++++++++----------------
gdb/testsuite/gdb.gdb/python-helper.exp | 3 +-
2 files changed, 19 insertions(+), 31 deletions(-)
diff --git a/gdb/gdb_bfd.c b/gdb/gdb_bfd.c
index 330d91828034..32132342f392 100644
--- a/gdb/gdb_bfd.c
+++ b/gdb/gdb_bfd.c
@@ -34,6 +34,7 @@
#include "inferior.h"
#include "cli/cli-style.h"
#include <unordered_map>
+#include "gdbsupport/unordered_set.h"
#if CXX_STD_THREAD
@@ -80,12 +81,12 @@ struct gdb_bfd_section_data
void *map_addr;
};
-/* A hash table holding every BFD that gdb knows about. This is not
+/* A hash set holding every BFD that gdb knows about. This is not
to be confused with 'gdb_bfd_cache', which is used for sharing
BFDs; in contrast, this hash is used just to implement
"maint info bfd". */
-static htab_t all_bfds;
+static gdb::unordered_set<bfd *> all_bfds;
/* An object of this type is stored in each BFD's user data. */
@@ -495,7 +496,6 @@ static void
gdb_bfd_init_data (struct bfd *abfd, struct stat *st)
{
struct gdb_bfd_data *gdata;
- void **slot;
gdb_assert (bfd_usrdata (abfd) == nullptr);
@@ -506,9 +506,8 @@ gdb_bfd_init_data (struct bfd *abfd, struct stat *st)
bfd_set_usrdata (abfd, gdata);
/* This is the first we've seen it, so add it to the hash table. */
- slot = htab_find_slot (all_bfds, abfd, INSERT);
- gdb_assert (slot && !*slot);
- *slot = abfd;
+ bool inserted = all_bfds.emplace (abfd).second;
+ gdb_assert (inserted);
}
/* See gdb_bfd.h. */
@@ -749,7 +748,7 @@ gdb_bfd_unref (struct bfd *abfd)
delete gdata;
bfd_set_usrdata (abfd, NULL); /* Paranoia. */
- htab_remove_elt (all_bfds, abfd);
+ all_bfds.erase (abfd);
gdb_bfd_close_or_warn (abfd);
@@ -1170,25 +1169,6 @@ gdb_bfd_errmsg (bfd_error_type error_tag, char **matching)
return ret;
}
-/* A callback for htab_traverse that prints a single BFD. */
-
-static int
-print_one_bfd (void **slot, void *data)
-{
- bfd *abfd = (struct bfd *) *slot;
- struct gdb_bfd_data *gdata = (struct gdb_bfd_data *) bfd_usrdata (abfd);
- struct ui_out *uiout = (struct ui_out *) data;
-
- ui_out_emit_tuple tuple_emitter (uiout, NULL);
- uiout->field_signed ("refcount", gdata->refc);
- uiout->field_string ("addr", host_address_to_string (abfd));
- uiout->field_string ("filename", bfd_get_filename (abfd),
- file_name_style.style ());
- uiout->text ("\n");
-
- return 1;
-}
-
/* Implement the 'maint info bfd' command. */
static void
@@ -1202,7 +1182,17 @@ maintenance_info_bfds (const char *arg, int from_tty)
uiout->table_header (40, ui_left, "filename", "Filename");
uiout->table_body ();
- htab_traverse_noresize (all_bfds, print_one_bfd, uiout);
+
+ for (auto abfd : all_bfds)
+ {
+ auto gdata = static_cast<gdb_bfd_data *> (bfd_usrdata (abfd));
+ ui_out_emit_tuple tuple_emitter (uiout, nullptr);
+ uiout->field_signed ("refcount", gdata->refc);
+ uiout->field_string ("addr", host_address_to_string (abfd));
+ uiout->field_string ("filename", bfd_get_filename (abfd),
+ file_name_style.style ());
+ uiout->text ("\n");
+ }
}
/* BFD related per-inferior data. */
@@ -1297,9 +1287,6 @@ void _initialize_gdb_bfd ();
void
_initialize_gdb_bfd ()
{
- all_bfds = htab_create_alloc (10, htab_hash_pointer, htab_eq_pointer,
- NULL, xcalloc, xfree);
-
add_cmd ("bfds", class_maintenance, maintenance_info_bfds, _("\
List the BFDs that are currently open."),
&maintenanceinfolist);
diff --git a/gdb/testsuite/gdb.gdb/python-helper.exp b/gdb/testsuite/gdb.gdb/python-helper.exp
index c17523a1576d..d136d687daca 100644
--- a/gdb/testsuite/gdb.gdb/python-helper.exp
+++ b/gdb/testsuite/gdb.gdb/python-helper.exp
@@ -261,7 +261,8 @@ proc test_python_helper {} {
}
# Test the htab_t pretty-printer.
- gdb_test -prompt $outer_prompt_re "print all_bfds" "htab_t with ${::decimal} elements = \\{${::hex}.*\\}"
+ gdb_test -prompt $outer_prompt_re "print varobj_table" \
+ "htab_t with ${::decimal} elements"
# Test the intrusive_list pretty-printer. A bug occurred in the
# pretty-printer for lists with more than one element. Verify that
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 21/25] Convert more DWARF code to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (19 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 20/25] Convert all_bfds " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 22/25] Convert gdb_bfd.c " Simon Marchi
` (4 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
From: Simon Marchi <simon.marchi@polymtl.ca>
This converts more code in the DWARF reader to use the new hash table.
Change-Id: I86f8c0072f0a09642de3d6f033fefd0c8acbc4a3
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/dwarf2/cu.c | 52 ++++++++++++------------------------------
gdb/dwarf2/cu.h | 8 ++++---
gdb/dwarf2/read.c | 57 ++++++++++++++++++++---------------------------
3 files changed, 43 insertions(+), 74 deletions(-)
diff --git a/gdb/dwarf2/cu.c b/gdb/dwarf2/cu.c
index 76cdd6fb4b91..1029b2426bba 100644
--- a/gdb/dwarf2/cu.c
+++ b/gdb/dwarf2/cu.c
@@ -122,53 +122,29 @@ dwarf2_cu::addr_type () const
return addr_type;
}
-/* A hashtab traversal function that marks the dependent CUs. */
-
-static int
-dwarf2_mark_helper (void **slot, void *data)
-{
- dwarf2_per_cu_data *per_cu = (dwarf2_per_cu_data *) *slot;
- dwarf2_per_objfile *per_objfile = (dwarf2_per_objfile *) data;
- dwarf2_cu *cu = per_objfile->get_cu (per_cu);
-
- /* cu->m_dependencies references may not yet have been ever read if
- QUIT aborts reading of the chain. As such dependencies remain
- valid it is not much useful to track and undo them during QUIT
- cleanups. */
- if (cu != nullptr)
- cu->mark ();
- return 1;
-}
-
/* See dwarf2/cu.h. */
void
dwarf2_cu::mark ()
{
- if (!m_mark)
- {
- m_mark = true;
- if (m_dependencies != nullptr)
- htab_traverse_noresize (m_dependencies.get (), dwarf2_mark_helper,
- per_objfile);
- }
-}
+ if (m_mark)
+ return;
-/* See dwarf2/cu.h. */
+ m_mark = true;
-void
-dwarf2_cu::add_dependence (struct dwarf2_per_cu_data *ref_per_cu)
-{
- void **slot;
+ for (dwarf2_per_cu_data *per_cu : m_dependencies)
+ {
+ /* cu->m_dependencies references may not yet have been ever
+ read if QUIT aborts reading of the chain. As such
+ dependencies remain valid it is not much useful to track
+ and undo them during QUIT cleanups. */
+ dwarf2_cu *cu = per_objfile->get_cu (per_cu);
- if (m_dependencies == nullptr)
- m_dependencies.reset (htab_create_alloc
- (5, htab_hash_pointer, htab_eq_pointer,
- nullptr, xcalloc, xfree));
+ if (cu == nullptr)
+ continue;
- slot = htab_find_slot (m_dependencies.get (), ref_per_cu, INSERT);
- if (*slot == nullptr)
- *slot = ref_per_cu;
+ cu->mark ();
+ }
}
/* See dwarf2/cu.h. */
diff --git a/gdb/dwarf2/cu.h b/gdb/dwarf2/cu.h
index b0ec2d6fabce..ea8e14770bd2 100644
--- a/gdb/dwarf2/cu.h
+++ b/gdb/dwarf2/cu.h
@@ -24,6 +24,7 @@
#include "dwarf2/comp-unit-head.h"
#include <optional>
#include "language.h"
+#include "gdbsupport/unordered_set.h"
/* Type used for delaying computation of method physnames.
See comments for compute_delayed_physnames. */
@@ -95,7 +96,8 @@ struct dwarf2_cu
}
/* Add a dependence relationship from this cu to REF_PER_CU. */
- void add_dependence (struct dwarf2_per_cu_data *ref_per_cu);
+ void add_dependence (struct dwarf2_per_cu_data *ref_per_cu)
+ { m_dependencies.emplace (ref_per_cu); }
/* The header of the compilation unit. */
struct comp_unit_head header;
@@ -120,9 +122,9 @@ struct dwarf2_cu
std::unique_ptr<buildsym_compunit> m_builder;
/* A set of pointers to dwarf2_per_cu_data objects for compilation
- units referenced by this one. Only set during full symbol processing;
+ units referenced by this one. Only used during full symbol processing;
partial symbol tables do not have dependencies. */
- htab_up m_dependencies;
+ gdb::unordered_set<dwarf2_per_cu_data *> m_dependencies;
public:
/* The generic symbol table building routines have separate lists for
diff --git a/gdb/dwarf2/read.c b/gdb/dwarf2/read.c
index e2be5b150c84..d0a7a9dd1b2a 100644
--- a/gdb/dwarf2/read.c
+++ b/gdb/dwarf2/read.c
@@ -96,6 +96,7 @@
#include "dwarf2/parent-map.h"
#include "dwarf2/error.h"
#include <variant>
+#include "gdbsupport/unordered_set.h"
/* When == 1, print basic high level tracing messages.
When > 1, be more verbose.
@@ -2901,12 +2902,8 @@ dw_expand_symtabs_matching_file_matcher
if (file_matcher == NULL)
return;
- htab_up visited_found (htab_create_alloc (10, htab_hash_pointer,
- htab_eq_pointer,
- NULL, xcalloc, xfree));
- htab_up visited_not_found (htab_create_alloc (10, htab_hash_pointer,
- htab_eq_pointer,
- NULL, xcalloc, xfree));
+ gdb::unordered_set<quick_file_names *> visited_found;
+ gdb::unordered_set<quick_file_names *> visited_not_found;
/* The rule is CUs specify all the files, including those used by
any TU, so there's no need to scan TUs here. */
@@ -2949,9 +2946,9 @@ dw_expand_symtabs_matching_file_matcher
if (file_data == NULL)
continue;
- if (htab_find (visited_not_found.get (), file_data) != NULL)
+ if (visited_not_found.contains (file_data))
continue;
- else if (htab_find (visited_found.get (), file_data) != NULL)
+ else if (visited_found.contains (file_data))
{
per_cu->mark = 1;
continue;
@@ -2982,11 +2979,10 @@ dw_expand_symtabs_matching_file_matcher
}
}
- void **slot = htab_find_slot (per_cu->mark
- ? visited_found.get ()
- : visited_not_found.get (),
- file_data, INSERT);
- *slot = file_data;
+ if (per_cu->mark)
+ visited_found.insert (file_data);
+ else
+ visited_not_found.insert (file_data);
}
}
@@ -6117,21 +6113,21 @@ void dwarf2_per_objfile::set_type_for_signatured_type
included by PER_CU. */
static void
-recursively_compute_inclusions (std::vector<compunit_symtab *> *result,
- htab_t all_children, htab_t all_type_symtabs,
- dwarf2_per_cu_data *per_cu,
- dwarf2_per_objfile *per_objfile,
- struct compunit_symtab *immediate_parent)
+recursively_compute_inclusions
+ (std::vector<compunit_symtab *> *result,
+ gdb::unordered_set<dwarf2_per_cu_data *> &all_children,
+ gdb::unordered_set<compunit_symtab *> &all_type_symtabs,
+ dwarf2_per_cu_data *per_cu,
+ dwarf2_per_objfile *per_objfile,
+ struct compunit_symtab *immediate_parent)
{
- void **slot = htab_find_slot (all_children, per_cu, INSERT);
- if (*slot != NULL)
+ if (bool inserted = all_children.emplace (per_cu).second;
+ !inserted)
{
/* This inclusion and its children have been processed. */
return;
}
- *slot = per_cu;
-
/* Only add a CU if it has a symbol table. */
compunit_symtab *cust = per_objfile->get_symtab (per_cu);
if (cust != NULL)
@@ -6140,10 +6136,9 @@ recursively_compute_inclusions (std::vector<compunit_symtab *> *result,
seen it yet (type unit per_cu's can share symtabs). */
if (per_cu->is_debug_types)
{
- slot = htab_find_slot (all_type_symtabs, cust, INSERT);
- if (*slot == NULL)
+ if (bool inserted = all_type_symtabs.insert (cust).second;
+ inserted)
{
- *slot = cust;
result->push_back (cust);
if (cust->user == NULL)
cust->user = immediate_parent;
@@ -6182,16 +6177,12 @@ compute_compunit_symtab_includes (dwarf2_per_cu_data *per_cu,
if (cust == NULL)
return;
- htab_up all_children (htab_create_alloc (1, htab_hash_pointer,
- htab_eq_pointer,
- NULL, xcalloc, xfree));
- htab_up all_type_symtabs (htab_create_alloc (1, htab_hash_pointer,
- htab_eq_pointer,
- NULL, xcalloc, xfree));
+ gdb::unordered_set<dwarf2_per_cu_data *> all_children;
+ gdb::unordered_set<compunit_symtab *> all_type_symtabs;
for (dwarf2_per_cu_data *ptr : per_cu->imported_symtabs)
- recursively_compute_inclusions (&result_symtabs, all_children.get (),
- all_type_symtabs.get (), ptr,
+ recursively_compute_inclusions (&result_symtabs, all_children,
+ all_type_symtabs, ptr,
per_objfile, cust);
/* Now we have a transitive closure of all the included symtabs. */
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 22/25] Convert gdb_bfd.c to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (20 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 21/25] Convert more DWARF code " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 23/25] Convert dwarf_cu::die_hash " Simon Marchi
` (3 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi, Tom Tromey
This converts the BFD cache in gdb_bfd.c to use the new hash table.
Change-Id: Ib6257fe9d4f7f8ef793a2c82d53935a8d2c245a3
Co-Authored-By: Tom Tromey <tom@tromey.com>
---
gdb/gdb_bfd.c | 112 +++++++++++++++++++++-----------------------------
1 file changed, 47 insertions(+), 65 deletions(-)
diff --git a/gdb/gdb_bfd.c b/gdb/gdb_bfd.c
index 32132342f392..3683eeba52bf 100644
--- a/gdb/gdb_bfd.c
+++ b/gdb/gdb_bfd.c
@@ -167,10 +167,6 @@ registry_accessor<bfd>::get (bfd *abfd)
return &gdata->registry_fields;
}
-/* A hash table storing all the BFDs maintained in the cache. */
-
-static htab_t gdb_bfd_cache;
-
/* When true gdb will reuse an existing bfd object if the filename,
modification time, and file size all match. */
@@ -216,34 +212,42 @@ struct gdb_bfd_cache_search
dev_t device_id;
};
-/* A hash function for BFDs. */
-
-static hashval_t
-hash_bfd (const void *b)
+struct bfd_cache_hash
{
- const bfd *abfd = (const struct bfd *) b;
+ using is_transparent = void;
- /* It is simplest to just hash the filename. */
- return htab_hash_string (bfd_get_filename (abfd));
-}
+ std::size_t operator() (bfd *abfd) const noexcept
+ {
+ /* It is simplest to just hash the filename. */
+ return htab_hash_string (bfd_get_filename (abfd));
+ }
-/* An equality function for BFDs. Note that this expects the caller
- to search using struct gdb_bfd_cache_search only, not BFDs. */
+ std::size_t operator() (const gdb_bfd_cache_search &search) const noexcept
+ { return htab_hash_string (search.filename); }
+};
-static int
-eq_bfd (const void *a, const void *b)
+struct bfd_cache_eq
{
- const bfd *abfd = (const struct bfd *) a;
- const struct gdb_bfd_cache_search *s
- = (const struct gdb_bfd_cache_search *) b;
- struct gdb_bfd_data *gdata = (struct gdb_bfd_data *) bfd_usrdata (abfd);
+ using is_transparent = void;
- return (gdata->mtime == s->mtime
- && gdata->size == s->size
- && gdata->inode == s->inode
- && gdata->device_id == s->device_id
- && strcmp (bfd_get_filename (abfd), s->filename) == 0);
-}
+ bool operator() (bfd *lhs, bfd *rhs) const noexcept
+ { return lhs == rhs; }
+
+ bool operator() (const gdb_bfd_cache_search &s, bfd *abfd) const noexcept
+ {
+ auto gdata = static_cast<gdb_bfd_data *> (bfd_usrdata (abfd));
+
+ return (gdata->mtime == s.mtime
+ && gdata->size == s.size
+ && gdata->inode == s.inode
+ && gdata->device_id == s.device_id
+ && strcmp (bfd_get_filename (abfd), s.filename) == 0);
+ }
+};
+
+/* A hash set storing all the BFDs maintained in the cache. */
+
+static gdb::unordered_set<bfd *, bfd_cache_hash, bfd_cache_eq> gdb_bfd_cache;
/* See gdb_bfd.h. */
@@ -516,10 +520,7 @@ gdb_bfd_ref_ptr
gdb_bfd_open (const char *name, const char *target, int fd,
bool warn_if_slow)
{
- hashval_t hash;
- void **slot;
bfd *abfd;
- struct gdb_bfd_cache_search search;
struct stat st;
if (is_target_filename (name))
@@ -544,10 +545,6 @@ gdb_bfd_open (const char *name, const char *target, int fd,
std::lock_guard<std::recursive_mutex> guard (gdb_bfd_mutex);
#endif
- if (gdb_bfd_cache == NULL)
- gdb_bfd_cache = htab_create_alloc (1, hash_bfd, eq_bfd, NULL,
- xcalloc, xfree);
-
if (fd == -1)
{
fd = gdb_open_cloexec (name, O_RDONLY | O_BINARY, 0).release ();
@@ -568,25 +565,26 @@ gdb_bfd_open (const char *name, const char *target, int fd,
return gdb_bfd_ref_ptr::new_reference (abfd);
}
+ gdb_bfd_cache_search search;
+
search.filename = name;
search.mtime = st.st_mtime;
search.size = st.st_size;
search.inode = st.st_ino;
search.device_id = st.st_dev;
- /* Note that this must compute the same result as hash_bfd. */
- hash = htab_hash_string (name);
- /* Note that we cannot use htab_find_slot_with_hash here, because
- opening the BFD may fail; and this would violate hashtab
- invariants. */
- abfd = (struct bfd *) htab_find_with_hash (gdb_bfd_cache, &search, hash);
- if (bfd_sharing && abfd != NULL)
+ if (bfd_sharing)
{
- bfd_cache_debug_printf ("Reusing cached bfd %s for %s",
- host_address_to_string (abfd),
- bfd_get_filename (abfd));
- close (fd);
- return gdb_bfd_ref_ptr::new_reference (abfd);
+ if (auto iter = gdb_bfd_cache.find (search);
+ iter != gdb_bfd_cache.end ())
+ {
+ abfd = *iter;
+ bfd_cache_debug_printf ("Reusing cached bfd %s for %s",
+ host_address_to_string (abfd),
+ bfd_get_filename (abfd));
+ close (fd);
+ return gdb_bfd_ref_ptr::new_reference (abfd);
+ }
}
abfd = bfd_fopen (name, target, FOPEN_RB, fd);
@@ -609,9 +607,8 @@ gdb_bfd_open (const char *name, const char *target, int fd,
if (bfd_sharing)
{
- slot = htab_find_slot_with_hash (gdb_bfd_cache, &search, hash, INSERT);
- gdb_assert (!*slot);
- *slot = abfd;
+ bool inserted = gdb_bfd_cache.emplace (abfd).second;
+ gdb_assert (inserted);
}
return gdb_bfd_ref_ptr (abfd);
@@ -700,7 +697,6 @@ void
gdb_bfd_unref (struct bfd *abfd)
{
struct gdb_bfd_data *gdata;
- struct gdb_bfd_cache_search search;
bfd *archive_bfd;
if (abfd == NULL)
@@ -727,23 +723,9 @@ gdb_bfd_unref (struct bfd *abfd)
bfd_get_filename (abfd));
archive_bfd = gdata->archive_bfd;
- search.filename = bfd_get_filename (abfd);
- if (gdb_bfd_cache && search.filename)
- {
- hashval_t hash = htab_hash_string (search.filename);
- void **slot;
-
- search.mtime = gdata->mtime;
- search.size = gdata->size;
- search.inode = gdata->inode;
- search.device_id = gdata->device_id;
- slot = htab_find_slot_with_hash (gdb_bfd_cache, &search, hash,
- NO_INSERT);
-
- if (slot && *slot)
- htab_clear_slot (gdb_bfd_cache, slot);
- }
+ if (bfd_get_filename (abfd) != nullptr)
+ gdb_bfd_cache.erase (abfd);
delete gdata;
bfd_set_usrdata (abfd, NULL); /* Paranoia. */
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 23/25] Convert dwarf_cu::die_hash to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (21 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 22/25] Convert gdb_bfd.c " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 24/25] Convert dwarf2_cu::call_site_htab " Simon Marchi
` (2 subsequent siblings)
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi
From: Simon Marchi <simon.marchi@polymtl.ca>
Convert one use of htab_t, mapping offsets to die_info object, to
`gdb::unordered_set`.
Change-Id: Ic80df22bda551e2d4c2511d167e057f4d6cd2b3e
---
gdb/dwarf2/cu.h | 5 ++++-
gdb/dwarf2/die.c | 21 ---------------------
gdb/dwarf2/die.h | 37 ++++++++++++++++++++++++++++---------
gdb/dwarf2/read.c | 39 ++++++++++++---------------------------
4 files changed, 44 insertions(+), 58 deletions(-)
diff --git a/gdb/dwarf2/cu.h b/gdb/dwarf2/cu.h
index ea8e14770bd2..23b06dc1fb68 100644
--- a/gdb/dwarf2/cu.h
+++ b/gdb/dwarf2/cu.h
@@ -25,6 +25,7 @@
#include <optional>
#include "language.h"
#include "gdbsupport/unordered_set.h"
+#include "dwarf2/die.h"
/* Type used for delaying computation of method physnames.
See comments for compute_delayed_physnames. */
@@ -153,7 +154,9 @@ struct dwarf2_cu
/* A hash table of DIE cu_offset for following references with
die_info->offset.sect_off as hash. */
- htab_up die_hash;
+ using die_hash_t = gdb::unordered_set<die_info *, die_info_hash_sect_off,
+ die_info_eq_sect_off>;
+ die_hash_t die_hash;
/* Full DIEs if read in. */
struct die_info *dies = nullptr;
diff --git a/gdb/dwarf2/die.c b/gdb/dwarf2/die.c
index bfa54e517abb..500d7bfe0818 100644
--- a/gdb/dwarf2/die.c
+++ b/gdb/dwarf2/die.c
@@ -35,27 +35,6 @@ die_info::allocate (struct obstack *obstack, int num_attrs)
return die;
}
-/* See die.h. */
-
-hashval_t
-die_info::hash (const void *item)
-{
- const struct die_info *die = (const struct die_info *) item;
-
- return to_underlying (die->sect_off);
-}
-
-/* See die.h. */
-
-int
-die_info::eq (const void *item_lhs, const void *item_rhs)
-{
- const struct die_info *die_lhs = (const struct die_info *) item_lhs;
- const struct die_info *die_rhs = (const struct die_info *) item_rhs;
-
- return die_lhs->sect_off == die_rhs->sect_off;
-}
-
static void
dump_die_shallow (struct ui_file *f, int indent, struct die_info *die)
{
diff --git a/gdb/dwarf2/die.h b/gdb/dwarf2/die.h
index d4eab0838bfe..770964eb9a83 100644
--- a/gdb/dwarf2/die.h
+++ b/gdb/dwarf2/die.h
@@ -22,7 +22,6 @@
#include "complaints.h"
#include "dwarf2/attribute.h"
-#include "hashtab.h"
/* This data structure holds a complete die structure. */
struct die_info
@@ -31,14 +30,6 @@ struct die_info
attributes that are needed. */
static die_info *allocate (struct obstack *obstack, int num_attrs);
- /* Trivial hash function for die_info: the hash value of a DIE is
- its offset in .debug_info for this objfile. */
- static hashval_t hash (const void *item);
-
- /* Trivial comparison function for die_info structures: two DIEs
- are equal if they have the same offset. */
- static int eq (const void *item_lhs, const void *item_rhs);
-
/* Dump this DIE and any children to MAX_LEVEL. They are written to
gdb_stdlog. Note this is called from the pdie user command in
gdb-gdb.gdb. */
@@ -148,4 +139,32 @@ struct die_info
struct attribute attrs[1];
};
+/* Key hash type to store die_info objects in gdb::unordered_set, identified by
+ their section offsets. */
+
+struct die_info_hash_sect_off final
+{
+ using is_transparent = void;
+
+ std::size_t operator() (const die_info *die) const noexcept
+ { return (*this) (die->sect_off); }
+
+ std::size_t operator() (sect_offset offset) const noexcept
+ { return std::hash<sect_offset> () (offset); }
+};
+
+/* Key equal type to store die_info objects in gdb::unordered_set, identified by
+ their section offsets. */
+
+struct die_info_eq_sect_off final
+{
+ using is_transparent = void;
+
+ bool operator() (const die_info *a, const die_info *b) const noexcept
+ { return (*this) (a->sect_off, b); }
+
+ bool operator() (sect_offset offset, const die_info *die) const noexcept
+ { return offset == die->sect_off; }
+};
+
#endif /* GDB_DWARF2_DIE_H */
diff --git a/gdb/dwarf2/read.c b/gdb/dwarf2/read.c
index d0a7a9dd1b2a..c543517dbb3d 100644
--- a/gdb/dwarf2/read.c
+++ b/gdb/dwarf2/read.c
@@ -5572,11 +5572,8 @@ load_full_comp_unit (dwarf2_per_cu_data *this_cu,
struct dwarf2_cu *cu = reader.cu;
const gdb_byte *info_ptr = reader.info_ptr;
- gdb_assert (cu->die_hash == NULL);
- cu->die_hash.reset (htab_create_alloc
- (cu->header.get_length_without_initial () / 12,
- die_info::hash, die_info::eq,
- nullptr, xcalloc, xfree));
+ gdb_assert (cu->die_hash.empty ());
+ cu->die_hash.reserve (cu->header.get_length_without_initial () / 12);
if (reader.comp_unit_die->has_children)
reader.comp_unit_die->child
@@ -15864,10 +15861,8 @@ read_die_and_children (const struct die_reader_specs *reader,
return NULL;
}
- void **slot = htab_find_slot_with_hash (reader->cu->die_hash.get (), die,
- to_underlying (die->sect_off),
- INSERT);
- *slot = die;
+ bool inserted = reader->cu->die_hash.emplace (die).second;
+ gdb_assert (inserted);
if (die->has_children)
die->child = read_die_and_siblings_1 (reader, cur_ptr, new_info_ptr, die);
@@ -20484,7 +20479,6 @@ static struct die_info *
follow_die_offset (sect_offset sect_off, int offset_in_dwz,
struct dwarf2_cu **ref_cu)
{
- struct die_info temp_die;
struct dwarf2_cu *target_cu, *cu = *ref_cu;
dwarf2_per_objfile *per_objfile = cu->per_objfile;
@@ -20545,11 +20539,9 @@ follow_die_offset (sect_offset sect_off, int offset_in_dwz,
}
*ref_cu = target_cu;
- temp_die.sect_off = sect_off;
- return (struct die_info *) htab_find_with_hash (target_cu->die_hash.get (),
- &temp_die,
- to_underlying (sect_off));
+ auto it = target_cu->die_hash.find (sect_off);
+ return it != target_cu->die_hash.end () ? *it : nullptr;
}
/* Follow reference attribute ATTR of SRC_DIE.
@@ -20908,9 +20900,7 @@ static struct die_info *
follow_die_sig_1 (struct die_info *src_die, struct signatured_type *sig_type,
struct dwarf2_cu **ref_cu)
{
- struct die_info temp_die;
struct dwarf2_cu *sig_cu;
- struct die_info *die;
dwarf2_per_objfile *per_objfile = (*ref_cu)->per_objfile;
@@ -20931,11 +20921,9 @@ follow_die_sig_1 (struct die_info *src_die, struct signatured_type *sig_type,
sig_cu = per_objfile->get_cu (sig_type);
gdb_assert (sig_cu != NULL);
gdb_assert (to_underlying (sig_type->type_offset_in_section) != 0);
- temp_die.sect_off = sig_type->type_offset_in_section;
- die = (struct die_info *) htab_find_with_hash (sig_cu->die_hash.get (),
- &temp_die,
- to_underlying (temp_die.sect_off));
- if (die)
+
+ if (auto die_it = sig_cu->die_hash.find (sig_type->type_offset_in_section);
+ die_it != sig_cu->die_hash.end ())
{
/* For .gdb_index version 7 keep track of included TUs.
http://sourceware.org/bugzilla/show_bug.cgi?id=15021. */
@@ -20944,7 +20932,7 @@ follow_die_sig_1 (struct die_info *src_die, struct signatured_type *sig_type,
(*ref_cu)->per_cu->imported_symtabs.push_back (sig_cu->per_cu);
*ref_cu = sig_cu;
- return die;
+ return *die_it;
}
return NULL;
@@ -21126,11 +21114,8 @@ read_signatured_type (signatured_type *sig_type,
struct dwarf2_cu *cu = reader.cu;
const gdb_byte *info_ptr = reader.info_ptr;
- gdb_assert (cu->die_hash == NULL);
- cu->die_hash.reset (htab_create_alloc
- (cu->header.get_length_without_initial () / 12,
- die_info::hash, die_info::eq,
- nullptr, xcalloc, xfree));
+ gdb_assert (cu->die_hash.empty ());
+ cu->die_hash.reserve (cu->header.get_length_without_initial () / 12);
if (reader.comp_unit_die->has_children)
reader.comp_unit_die->child
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 24/25] Convert dwarf2_cu::call_site_htab to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (22 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 23/25] Convert dwarf_cu::die_hash " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-04 18:27 ` [PATCH v5 25/25] Convert dwarf2_per_objfile::die_type_hash " Simon Marchi
2024-11-22 19:10 ` [PATCH v5 00/25] Add a C++ hash table to gdbsupport Tom Tromey
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi
From: Simon Marchi <simon.marchi@polymtl.ca>
Convert one use of htab_t, mapping (unrelocated) pc to call_site
objects, to `gdb::unordered_map<unrelocated_addr, call_site *>`.
Change-Id: I40a0903253a8589dbdcb75d52ad4d233931f6641
---
gdb/dwarf2/call-site.h | 65 ++++++++++++++++++++++++++----------------
gdb/dwarf2/cu.h | 2 +-
gdb/dwarf2/read.c | 26 ++++++-----------
gdb/symtab.c | 30 +++++++------------
gdb/symtab.h | 5 ++--
5 files changed, 65 insertions(+), 63 deletions(-)
diff --git a/gdb/dwarf2/call-site.h b/gdb/dwarf2/call-site.h
index 0a0c7e83b813..82426c71c1ba 100644
--- a/gdb/dwarf2/call-site.h
+++ b/gdb/dwarf2/call-site.h
@@ -25,6 +25,7 @@
#include "dwarf2/types.h"
#include "../frame.h"
#include "gdbsupport/function-view.h"
+#include "gdbsupport/unordered_set.h"
struct dwarf2_locexpr_baton;
struct dwarf2_per_cu_data;
@@ -168,33 +169,16 @@ struct call_site
: per_cu (per_cu), per_objfile (per_objfile), m_unrelocated_pc (pc)
{}
- static int
- eq (const call_site *a, const call_site *b)
- {
- return a->m_unrelocated_pc == b->m_unrelocated_pc;
- }
+ /* Return the relocated (using the objfile from PER_OBJFILE) address of the
+ first instruction after this call. */
- static hashval_t
- hash (const call_site *a)
- {
- return (hashval_t) a->m_unrelocated_pc;
- }
-
- static int
- eq (const void *a, const void *b)
- {
- return eq ((const call_site *)a, (const call_site *)b);
- }
-
- static hashval_t
- hash (const void *a)
- {
- return hash ((const call_site *)a);
- }
+ CORE_ADDR pc () const;
- /* Return the address of the first instruction after this call. */
+ /* Return the unrelocated address of the first instruction after this
+ call. */
- CORE_ADDR pc () const;
+ unrelocated_addr unrelocated_pc () const noexcept
+ { return m_unrelocated_pc; }
/* Call CALLBACK for each target address. CALLER_FRAME (for
registers) can be NULL if it is not known. This function may
@@ -241,4 +225,37 @@ struct call_site
struct call_site_parameter parameter[];
};
+/* Key hash type to store call_site objects in gdb::unordered_set, identified by
+ their unrelocated PC. */
+
+struct call_site_hash_pc
+{
+ using is_transparent = void;
+
+ std::size_t operator() (const call_site *site) const noexcept
+ { return (*this) (site->unrelocated_pc ()); }
+
+ std::size_t operator() (unrelocated_addr pc) const noexcept
+ { return std::hash<unrelocated_addr> () (pc); }
+};
+
+/* Key equal type to store call_site objects in gdb::unordered_set, identified
+ by their unrelocated PC. */
+
+struct call_site_eq_pc
+{
+ using is_transparent = void;
+
+ bool operator() (const call_site *a, const call_site *b) const noexcept
+ { return (*this) (a->unrelocated_pc (), b); }
+
+ bool operator() (unrelocated_addr pc, const call_site *site) const noexcept
+ { return pc == site->unrelocated_pc (); }
+};
+
+/* Set of call_site objects identified by their unrelocated PC. */
+
+using call_site_htab_t
+ = gdb::unordered_set<call_site *, call_site_hash_pc, call_site_eq_pc>;
+
#endif /* CALL_SITE_H */
diff --git a/gdb/dwarf2/cu.h b/gdb/dwarf2/cu.h
index 23b06dc1fb68..737d3ba9acc3 100644
--- a/gdb/dwarf2/cu.h
+++ b/gdb/dwarf2/cu.h
@@ -175,7 +175,7 @@ struct dwarf2_cu
std::vector<delayed_method_info> method_list;
/* To be copied to symtab->call_site_htab. */
- htab_up call_site_htab;
+ call_site_htab_t call_site_htab;
/* Non-NULL if this CU came from a DWO file.
There is an invariant here that is important to remember:
diff --git a/gdb/dwarf2/read.c b/gdb/dwarf2/read.c
index c543517dbb3d..3abf4e7b47ba 100644
--- a/gdb/dwarf2/read.c
+++ b/gdb/dwarf2/read.c
@@ -10286,7 +10286,6 @@ read_call_site_scope (struct die_info *die, struct dwarf2_cu *cu)
struct objfile *objfile = per_objfile->objfile;
struct gdbarch *gdbarch = objfile->arch ();
struct attribute *attr;
- void **slot;
int nparams;
struct die_info *child_die;
@@ -10306,21 +10305,6 @@ read_call_site_scope (struct die_info *die, struct dwarf2_cu *cu)
}
unrelocated_addr pc = attr->as_address ();
- if (cu->call_site_htab == nullptr)
- cu->call_site_htab.reset (htab_create_alloc (16, call_site::hash,
- call_site::eq, nullptr,
- xcalloc, xfree));
- struct call_site call_site_local (pc, nullptr, nullptr);
- slot = htab_find_slot (cu->call_site_htab.get (), &call_site_local, INSERT);
- if (*slot != NULL)
- {
- complaint (_("Duplicate PC %s for DW_TAG_call_site "
- "DIE %s [in module %s]"),
- paddress (gdbarch, (CORE_ADDR) pc), sect_offset_str (die->sect_off),
- objfile_name (objfile));
- return;
- }
-
/* Count parameters at the caller. */
nparams = 0;
@@ -10345,7 +10329,15 @@ read_call_site_scope (struct die_info *die, struct dwarf2_cu *cu)
struct call_site,
sizeof (*call_site) + sizeof (call_site->parameter[0]) * nparams))
struct call_site (pc, cu->per_cu, per_objfile);
- *slot = call_site;
+
+ if (!cu->call_site_htab.emplace (call_site).second)
+ {
+ complaint (_("Duplicate PC %s for DW_TAG_call_site "
+ "DIE %s [in module %s]"),
+ paddress (gdbarch, (CORE_ADDR) pc), sect_offset_str (die->sect_off),
+ objfile_name (objfile));
+ return;
+ }
/* We never call the destructor of call_site, so we must ensure it is
trivially destructible. */
diff --git a/gdb/symtab.c b/gdb/symtab.c
index 7b11d43bd706..27dc33cc80f9 100644
--- a/gdb/symtab.c
+++ b/gdb/symtab.c
@@ -396,42 +396,35 @@ linetable_entry::pc (const struct objfile *objfile) const
call_site *
compunit_symtab::find_call_site (CORE_ADDR pc) const
{
- if (m_call_site_htab == nullptr)
- return nullptr;
-
CORE_ADDR delta = this->objfile ()->text_section_offset ();
- unrelocated_addr unrelocated_pc = (unrelocated_addr) (pc - delta);
- struct call_site call_site_local (unrelocated_pc, nullptr, nullptr);
- void **slot
- = htab_find_slot (m_call_site_htab, &call_site_local, NO_INSERT);
- if (slot != nullptr)
- return (call_site *) *slot;
+ if (auto it = m_call_site_htab->find (static_cast<unrelocated_addr> (pc - delta));
+ it != m_call_site_htab->end ())
+ return *it;
/* See if the arch knows another PC we should try. On some
platforms, GCC emits a DWARF call site that is offset from the
actual return location. */
struct gdbarch *arch = objfile ()->arch ();
CORE_ADDR new_pc = gdbarch_update_call_site_pc (arch, pc);
+
if (pc == new_pc)
return nullptr;
- unrelocated_pc = (unrelocated_addr) (new_pc - delta);
- call_site new_call_site_local (unrelocated_pc, nullptr, nullptr);
- slot = htab_find_slot (m_call_site_htab, &new_call_site_local, NO_INSERT);
- if (slot == nullptr)
- return nullptr;
+ if (auto it = m_call_site_htab->find (static_cast<unrelocated_addr> (new_pc - delta));
+ it != m_call_site_htab->end ())
+ return *it;
- return (call_site *) *slot;
+ return nullptr;
}
/* See symtab.h. */
void
-compunit_symtab::set_call_site_htab (htab_up call_site_htab)
+compunit_symtab::set_call_site_htab (call_site_htab_t &&call_site_htab)
{
gdb_assert (m_call_site_htab == nullptr);
- m_call_site_htab = call_site_htab.release ();
+ m_call_site_htab = new call_site_htab_t (std::move (call_site_htab));
}
/* See symtab.h. */
@@ -500,8 +493,7 @@ void
compunit_symtab::finalize ()
{
this->forget_cached_source_info ();
- if (m_call_site_htab != nullptr)
- htab_delete (m_call_site_htab);
+ delete m_call_site_htab;
}
/* The relocated address of the minimal symbol, using the section
diff --git a/gdb/symtab.h b/gdb/symtab.h
index aa86a8041211..fbe40729ef51 100644
--- a/gdb/symtab.h
+++ b/gdb/symtab.h
@@ -24,6 +24,7 @@
#include <vector>
#include <string>
#include <set>
+#include "dwarf2/call-site.h"
#include "gdbsupport/gdb_vecs.h"
#include "gdbtypes.h"
#include "gdbsupport/gdb_obstack.h"
@@ -1947,7 +1948,7 @@ struct compunit_symtab
symtab *primary_filetab () const;
/* Set m_call_site_htab. */
- void set_call_site_htab (htab_up call_site_htab);
+ void set_call_site_htab (call_site_htab_t &&call_site_htab);
/* Find call_site info for PC. */
call_site *find_call_site (CORE_ADDR pc) const;
@@ -2014,7 +2015,7 @@ struct compunit_symtab
unsigned int m_epilogue_unwind_valid : 1;
/* struct call_site entries for this compilation unit or NULL. */
- htab_t m_call_site_htab;
+ call_site_htab_t *m_call_site_htab;
/* The macro table for this symtab. Like the blockvector, this
is shared between different symtabs in a given compilation unit.
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v5 25/25] Convert dwarf2_per_objfile::die_type_hash to new hash table
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (23 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 24/25] Convert dwarf2_cu::call_site_htab " Simon Marchi
@ 2024-11-04 18:27 ` Simon Marchi
2024-11-22 19:10 ` [PATCH v5 00/25] Add a C++ hash table to gdbsupport Tom Tromey
25 siblings, 0 replies; 27+ messages in thread
From: Simon Marchi @ 2024-11-04 18:27 UTC (permalink / raw)
To: gdb-patches; +Cc: Simon Marchi
Convert dwarf2_per_objfile::die_type_hash, which maps debug info
offsets to `type *`, to gdb::unordered_map.
Change-Id: I5c174af64ee46d38a465008090e812acf03704ec
---
gdb/dwarf2/read.c | 83 ++++-------------------------------------------
gdb/dwarf2/read.h | 40 ++++++++++++++++++++---
2 files changed, 43 insertions(+), 80 deletions(-)
diff --git a/gdb/dwarf2/read.c b/gdb/dwarf2/read.c
index 3abf4e7b47ba..1b5f2734f5d2 100644
--- a/gdb/dwarf2/read.c
+++ b/gdb/dwarf2/read.c
@@ -21915,52 +21915,6 @@ dwarf2_per_objfile::~dwarf2_per_objfile ()
remove_all_cus ();
}
-/* A set of CU "per_cu" pointer, DIE offset, and GDB type pointer.
- We store these in a hash table separate from the DIEs, and preserve them
- when the DIEs are flushed out of cache.
-
- The CU "per_cu" pointer is needed because offset alone is not enough to
- uniquely identify the type. A file may have multiple .debug_types sections,
- or the type may come from a DWO file. Furthermore, while it's more logical
- to use per_cu->section+offset, with Fission the section with the data is in
- the DWO file but we don't know that section at the point we need it.
- We have to use something in dwarf2_per_cu_data (or the pointer to it)
- because we can enter the lookup routine, get_die_type_at_offset, from
- outside this file, and thus won't necessarily have PER_CU->cu.
- Fortunately, PER_CU is stable for the life of the objfile. */
-
-struct dwarf2_per_cu_offset_and_type
-{
- const struct dwarf2_per_cu_data *per_cu;
- sect_offset sect_off;
- struct type *type;
-};
-
-/* Hash function for a dwarf2_per_cu_offset_and_type. */
-
-static hashval_t
-per_cu_offset_and_type_hash (const void *item)
-{
- const struct dwarf2_per_cu_offset_and_type *ofs
- = (const struct dwarf2_per_cu_offset_and_type *) item;
-
- return (uintptr_t) ofs->per_cu + to_underlying (ofs->sect_off);
-}
-
-/* Equality function for a dwarf2_per_cu_offset_and_type. */
-
-static int
-per_cu_offset_and_type_eq (const void *item_lhs, const void *item_rhs)
-{
- const struct dwarf2_per_cu_offset_and_type *ofs_lhs
- = (const struct dwarf2_per_cu_offset_and_type *) item_lhs;
- const struct dwarf2_per_cu_offset_and_type *ofs_rhs
- = (const struct dwarf2_per_cu_offset_and_type *) item_rhs;
-
- return (ofs_lhs->per_cu == ofs_rhs->per_cu
- && ofs_lhs->sect_off == ofs_rhs->sect_off);
-}
-
/* Set the type associated with DIE to TYPE. Save it in CU's hash
table if necessary. For convenience, return TYPE.
@@ -21984,8 +21938,6 @@ set_die_type (struct die_info *die, struct type *type, struct dwarf2_cu *cu,
bool skip_data_location)
{
dwarf2_per_objfile *per_objfile = cu->per_objfile;
- struct dwarf2_per_cu_offset_and_type **slot, ofs;
- struct objfile *objfile = per_objfile->objfile;
struct attribute *attr;
struct dynamic_prop prop;
@@ -22041,24 +21993,13 @@ set_die_type (struct die_info *die, struct type *type, struct dwarf2_cu *cu,
type->add_dyn_prop (DYN_PROP_DATA_LOCATION, prop);
}
- if (per_objfile->die_type_hash == NULL)
- per_objfile->die_type_hash
- = htab_up (htab_create_alloc (127,
- per_cu_offset_and_type_hash,
- per_cu_offset_and_type_eq,
- NULL, xcalloc, xfree));
-
- ofs.per_cu = cu->per_cu;
- ofs.sect_off = die->sect_off;
- ofs.type = type;
- slot = (struct dwarf2_per_cu_offset_and_type **)
- htab_find_slot (per_objfile->die_type_hash.get (), &ofs, INSERT);
- if (*slot)
+ bool inserted
+ = per_objfile->die_type_hash.emplace
+ (per_cu_and_offset {cu->per_cu, die->sect_off}, type).second;
+ if (!inserted)
complaint (_("A problem internal to GDB: DIE %s has type already set"),
sect_offset_str (die->sect_off));
- *slot = XOBNEW (&objfile->objfile_obstack,
- struct dwarf2_per_cu_offset_and_type);
- **slot = ofs;
+
return type;
}
@@ -22070,19 +22011,9 @@ get_die_type_at_offset (sect_offset sect_off,
dwarf2_per_cu_data *per_cu,
dwarf2_per_objfile *per_objfile)
{
- struct dwarf2_per_cu_offset_and_type *slot, ofs;
-
- if (per_objfile->die_type_hash == NULL)
- return NULL;
+ auto it = per_objfile->die_type_hash.find ({per_cu, sect_off});
- ofs.per_cu = per_cu;
- ofs.sect_off = sect_off;
- slot = ((struct dwarf2_per_cu_offset_and_type *)
- htab_find (per_objfile->die_type_hash.get (), &ofs));
- if (slot)
- return slot->type;
- else
- return NULL;
+ return it != per_objfile->die_type_hash.end () ? it->second : nullptr;
}
/* Look up the type for DIE in CU in die_type_hash,
diff --git a/gdb/dwarf2/read.h b/gdb/dwarf2/read.h
index e311309dd5c5..1cc30eb285a0 100644
--- a/gdb/dwarf2/read.h
+++ b/gdb/dwarf2/read.h
@@ -655,6 +655,26 @@ struct type_unit_group_unshareable
struct symtab **symtabs = nullptr;
};
+struct per_cu_and_offset
+{
+ dwarf2_per_cu_data *per_cu;
+ sect_offset offset;
+
+ bool operator== (const per_cu_and_offset &other) const noexcept
+ {
+ return this->per_cu == other.per_cu && this->offset == other.offset;
+ }
+};
+
+struct per_cu_and_offset_hash
+{
+ std::uint64_t operator() (const per_cu_and_offset &key) const noexcept
+ {
+ return (std::hash<dwarf2_per_cu_data *> () (key.per_cu)
+ + std::hash<sect_offset> () (key.offset));
+ }
+};
+
/* Collection of data recorded per objfile.
This hangs off of dwarf2_objfile_data_key.
@@ -729,10 +749,22 @@ struct dwarf2_per_objfile
other objfiles backed by the same BFD. */
struct dwarf2_per_bfd *per_bfd;
- /* Table mapping type DIEs to their struct type *.
- This is nullptr if not allocated yet.
- The mapping is done via (CU/TU + DIE offset) -> type. */
- htab_up die_type_hash;
+ /* A mapping of (CU "per_cu" pointer, DIE offset) to GDB type pointer.
+
+ We store these in a hash table separate from the DIEs, and preserve them
+ when the DIEs are flushed out of cache.
+
+ The CU "per_cu" pointer is needed because offset alone is not enough to
+ uniquely identify the type. A file may have multiple .debug_types sections,
+ or the type may come from a DWO file. Furthermore, while it's more logical
+ to use per_cu->section+offset, with Fission the section with the data is in
+ the DWO file but we don't know that section at the point we need it.
+ We have to use something in dwarf2_per_cu_data (or the pointer to it)
+ because we can enter the lookup routine, get_die_type_at_offset, from
+ outside this file, and thus won't necessarily have PER_CU->cu.
+ Fortunately, PER_CU is stable for the life of the objfile. */
+ gdb::unordered_map<per_cu_and_offset, type *, per_cu_and_offset_hash>
+ die_type_hash;
/* Table containing line_header indexed by offset and offset_in_dwz. */
htab_up line_header_hash;
--
2.47.0
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v5 00/25] Add a C++ hash table to gdbsupport
2024-11-04 18:27 [PATCH v5 00/25] Add a C++ hash table to gdbsupport Simon Marchi
` (24 preceding siblings ...)
2024-11-04 18:27 ` [PATCH v5 25/25] Convert dwarf2_per_objfile::die_type_hash " Simon Marchi
@ 2024-11-22 19:10 ` Tom Tromey
25 siblings, 0 replies; 27+ messages in thread
From: Tom Tromey @ 2024-11-22 19:10 UTC (permalink / raw)
To: Simon Marchi; +Cc: gdb-patches
>>>>> "Simon" == Simon Marchi <simon.marchi@efficios.com> writes:
Simon> This is v5 of:
Simon> https://inbox.sourceware.org/gdb-patches/20240823184910.883268-1-simon.marchi@efficios.com/T/#mf7bcbcdcff5141ee39fc7c5d02298b6ebaccc11b
Thanks.
Approved-By: Tom Tromey <tom@tromey.com>
Tom
^ permalink raw reply [flat|nested] 27+ messages in thread