From: "François Dumont" <frs.dumont@gmail.com>
To: "libstdc++@gcc.gnu.org" <libstdc++@gcc.gnu.org>,
gcc-patches <gcc-patches@gcc.gnu.org>
Subject: Hashtable comment cleanups & renamings
Date: Fri, 17 May 2019 16:19:00 -0000 [thread overview]
Message-ID: <08e5ed09-2950-09d9-56a5-f33cf1ca60a0@gmail.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 835 bytes --]
Hi
   I got tired of '__n' being used in _Hashtable for many different
purposes: node, bucket, bucket count, bucket hint. It makes the code
difficult to read. This code makes sure that __n is a node except is
some very limited use cases where the method name is clear enough to
tell what __n means.
   So I'd like to commit this patch which only change that and some
comments before moving forward to more serious stuff. The only code
change is a use of auto return type on _M_allocate_node.
   My main concern is the ChangeLog entry. Is the following entry ok ?
   Rename variables and cleanup comments.
   * include/bits/hashtable_policy.h
   * include/bits/hashtable.h
   Tested under Linux x86_64 (even if it can't be otherwise)
François
[-- Attachment #2: hashtable_renames.patch --]
[-- Type: text/x-patch, Size: 38046 bytes --]
diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index ab24b5bb537..78e6aeed5b1 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -253,7 +253,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Equal, _H1, _H2, _Hash,
_RehashPolicy, _Traits>;
- using __reuse_or_alloc_node_type =
+ using __reuse_or_alloc_node_gen_t =
__detail::_ReuseOrAllocNode<__node_alloc_type>;
// Metaprogramming for picking apart hash caching.
@@ -278,9 +278,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
"Cache the hash code or qualify your functors involved"
" in hash code and bucket index computation with noexcept");
- // Following two static assertions are necessary to guarantee
- // that local_iterator will be default constructible.
-
// When hash codes are cached local iterator inherits from H2 functor
// which must then be default constructible.
static_assert(__if_hash_cached<is_default_constructible<_H2>>::value,
@@ -331,7 +328,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_RehashPolicy _M_rehash_policy;
// A single bucket used when only need for 1 bucket. Especially
- // interesting in move semantic to leave hashtable with only 1 buckets
+ // interesting in move semantic to leave hashtable with only 1 bucket
// which is not allocated so that we can have those operations noexcept
// qualified.
// Note that we can't leave hashtable with 0 bucket without adding
@@ -350,24 +347,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_base_alloc() { return *this; }
__bucket_type*
- _M_allocate_buckets(size_type __n)
+ _M_allocate_buckets(size_type __bkt_count)
{
- if (__builtin_expect(__n == 1, false))
+ if (__builtin_expect(__bkt_count == 1, false))
{
_M_single_bucket = nullptr;
return &_M_single_bucket;
}
- return __hashtable_alloc::_M_allocate_buckets(__n);
+ return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
}
void
- _M_deallocate_buckets(__bucket_type* __bkts, size_type __n)
+ _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
{
if (_M_uses_single_bucket(__bkts))
return;
- __hashtable_alloc::_M_deallocate_buckets(__bkts, __n);
+ __hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
}
void
@@ -394,10 +391,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_M_assign(const _Hashtable&, const _NodeGenerator&);
void
- _M_move_assign(_Hashtable&&, std::true_type);
+ _M_move_assign(_Hashtable&&, true_type);
void
- _M_move_assign(_Hashtable&&, std::false_type);
+ _M_move_assign(_Hashtable&&, false_type);
void
_M_reset() noexcept;
@@ -439,30 +436,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{ }
explicit
- _Hashtable(size_type __n,
+ _Hashtable(size_type __bkt_hint,
const _H1& __hf = _H1(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
- : _Hashtable(__n, __hf, _H2(), _Hash(), __eql,
+ : _Hashtable(__bkt_hint, __hf, _H2(), _Hash(), __eql,
__key_extract(), __a)
{ }
template<typename _InputIterator>
_Hashtable(_InputIterator __f, _InputIterator __l,
- size_type __n = 0,
+ size_type __bkt_hint = 0,
const _H1& __hf = _H1(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
- : _Hashtable(__f, __l, __n, __hf, _H2(), _Hash(), __eql,
+ : _Hashtable(__f, __l, __bkt_hint, __hf, _H2(), _Hash(), __eql,
__key_extract(), __a)
{ }
_Hashtable(initializer_list<value_type> __l,
- size_type __n = 0,
+ size_type __bkt_hint = 0,
const _H1& __hf = _H1(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
- : _Hashtable(__l.begin(), __l.end(), __n, __hf, _H2(), _Hash(), __eql,
+ : _Hashtable(__l.begin(), __l.end(), __bkt_hint,
+ __hf, _H2(), _Hash(), __eql,
__key_extract(), __a)
{ }
@@ -485,7 +483,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
_Hashtable&
operator=(initializer_list<value_type> __l)
{
- __reuse_or_alloc_node_type __roan(_M_begin(), *this);
+ __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
_M_before_begin._M_nxt = nullptr;
clear();
this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys());
@@ -557,46 +555,46 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{ return max_size(); }
size_type
- bucket_size(size_type __n) const
- { return std::distance(begin(__n), end(__n)); }
+ bucket_size(size_type __bkt) const
+ { return std::distance(begin(__bkt), end(__bkt)); }
size_type
bucket(const key_type& __k) const
{ return _M_bucket_index(__k, this->_M_hash_code(__k)); }
local_iterator
- begin(size_type __n)
+ begin(size_type __bkt)
{
- return local_iterator(*this, _M_bucket_begin(__n),
- __n, _M_bucket_count);
+ return local_iterator(*this, _M_bucket_begin(__bkt),
+ __bkt, _M_bucket_count);
}
local_iterator
- end(size_type __n)
- { return local_iterator(*this, nullptr, __n, _M_bucket_count); }
+ end(size_type __bkt)
+ { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
const_local_iterator
- begin(size_type __n) const
+ begin(size_type __bkt) const
{
- return const_local_iterator(*this, _M_bucket_begin(__n),
- __n, _M_bucket_count);
+ return const_local_iterator(*this, _M_bucket_begin(__bkt),
+ __bkt, _M_bucket_count);
}
const_local_iterator
- end(size_type __n) const
- { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
+ end(size_type __bkt) const
+ { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
// DR 691.
const_local_iterator
- cbegin(size_type __n) const
+ cbegin(size_type __bkt) const
{
- return const_local_iterator(*this, _M_bucket_begin(__n),
- __n, _M_bucket_count);
+ return const_local_iterator(*this, _M_bucket_begin(__bkt),
+ __bkt, _M_bucket_count);
}
const_local_iterator
- cend(size_type __n) const
- { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
+ cend(size_type __bkt) const
+ { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
float
load_factor() const noexcept
@@ -686,22 +684,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
template<typename... _Args>
std::pair<iterator, bool>
- _M_emplace(std::true_type, _Args&&... __args);
+ _M_emplace(true_type, _Args&&... __args);
template<typename... _Args>
iterator
- _M_emplace(std::false_type __uk, _Args&&... __args)
+ _M_emplace(false_type __uk, _Args&&... __args)
{ return _M_emplace(cend(), __uk, std::forward<_Args>(__args)...); }
// Emplace with hint, useless when keys are unique.
template<typename... _Args>
iterator
- _M_emplace(const_iterator, std::true_type __uk, _Args&&... __args)
+ _M_emplace(const_iterator, true_type __uk, _Args&&... __args)
{ return _M_emplace(__uk, std::forward<_Args>(__args)...).first; }
template<typename... _Args>
iterator
- _M_emplace(const_iterator, std::false_type, _Args&&... __args);
+ _M_emplace(const_iterator, false_type, _Args&&... __args);
template<typename _Arg, typename _NodeGenerator>
std::pair<iterator, bool>
@@ -733,10 +731,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
const _NodeGenerator&, false_type);
size_type
- _M_erase(std::true_type, const key_type&);
+ _M_erase(true_type, const key_type&);
size_type
- _M_erase(std::false_type, const key_type&);
+ _M_erase(false_type, const key_type&);
iterator
_M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
@@ -777,8 +775,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
clear() noexcept;
- // Set number of buckets to be appropriate for container of n element.
- void rehash(size_type __n);
+ // Set number of buckets keeping it appropriate for container's number
+ // of elements.
+ void rehash(size_type __bkt_count);
// DR 1189.
// reserve, if present, comes from _Rehash_base.
@@ -918,14 +917,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
private:
// Helper rehash method used when keys are unique.
- void _M_rehash_aux(size_type __n, std::true_type);
+ void _M_rehash_aux(size_type __bkt_count, true_type);
// Helper rehash method used when keys can be non-unique.
- void _M_rehash_aux(size_type __n, std::false_type);
+ void _M_rehash_aux(size_type __bkt_count, false_type);
// Unconditionally change size of bucket array to n, restore
// hash policy state to __state on exception.
- void _M_rehash(size_type __n, const __rehash_state& __state);
+ void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
};
@@ -1044,7 +1043,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
// Reuse allocated buckets and nodes.
_M_assign_elements(__ht,
- [](const __reuse_or_alloc_node_type& __roan, const __node_type* __n)
+ [](const __reuse_or_alloc_node_gen_t& __roan, const __node_type* __n)
{ return __roan(__n->_M_v()); });
return *this;
}
@@ -1078,7 +1077,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
__hashtable_base::operator=(std::forward<_Ht>(__ht));
_M_element_count = __ht._M_element_count;
_M_rehash_policy = __ht._M_rehash_policy;
- __reuse_or_alloc_node_type __roan(_M_begin(), *this);
+ __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
_M_before_begin._M_nxt = nullptr;
_M_assign(__ht,
[&__node_gen, &__roan](__node_type* __n)
@@ -1175,7 +1174,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_move_assign(_Hashtable&& __ht, std::true_type)
+ _M_move_assign(_Hashtable&& __ht, true_type)
{
this->_M_deallocate_nodes(_M_begin());
_M_deallocate_buckets();
@@ -1207,15 +1206,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_move_assign(_Hashtable&& __ht, std::false_type)
+ _M_move_assign(_Hashtable&& __ht, false_type)
{
if (__ht._M_node_allocator() == this->_M_node_allocator())
- _M_move_assign(std::move(__ht), std::true_type());
+ _M_move_assign(std::move(__ht), true_type());
else
{
// Can't move memory, move elements then.
_M_assign_elements(std::move(__ht),
- [](const __reuse_or_alloc_node_type& __roan, __node_type* __n)
+ [](const __reuse_or_alloc_node_gen_t& __roan, __node_type* __n)
{ return __roan(std::move_if_noexcept(__n->_M_v())); });
__ht.clear();
}
@@ -1415,9 +1414,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
-> iterator
{
__hash_code __code = this->_M_hash_code(__k);
- std::size_t __n = _M_bucket_index(__k, __code);
- __node_type* __p = _M_find_node(__n, __k, __code);
- return __p ? iterator(__p) : end();
+ std::size_t __bkt = _M_bucket_index(__k, __code);
+ __node_type* __n = _M_find_node(__bkt, __k, __code);
+ return __n ? iterator(__n) : end();
}
template<typename _Key, typename _Value,
@@ -1431,9 +1430,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
-> const_iterator
{
__hash_code __code = this->_M_hash_code(__k);
- std::size_t __n = _M_bucket_index(__k, __code);
- __node_type* __p = _M_find_node(__n, __k, __code);
- return __p ? const_iterator(__p) : end();
+ std::size_t __bkt = _M_bucket_index(__k, __code);
+ __node_type* __n = _M_find_node(__bkt, __k, __code);
+ return __n ? const_iterator(__n) : end();
}
template<typename _Key, typename _Value,
@@ -1447,22 +1446,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
-> size_type
{
__hash_code __code = this->_M_hash_code(__k);
- std::size_t __n = _M_bucket_index(__k, __code);
- __node_type* __p = _M_bucket_begin(__n);
- if (!__p)
+ std::size_t __bkt = _M_bucket_index(__k, __code);
+ __node_type* __n = _M_bucket_begin(__bkt);
+ if (!__n)
return 0;
std::size_t __result = 0;
- for (;; __p = __p->_M_next())
+ for (;; __n = __n->_M_next())
{
- if (this->_M_equals(__k, __code, __p))
+ if (this->_M_equals(__k, __code, __n))
++__result;
else if (__result)
// All equivalent values are next to each other, if we
// found a non-equivalent value after an equivalent one it
// means that we won't find any new equivalent value.
break;
- if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
+ if (!__n->_M_nxt || _M_bucket_index(__n->_M_next()) != __bkt)
break;
}
return __result;
@@ -1479,17 +1478,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
-> pair<iterator, iterator>
{
__hash_code __code = this->_M_hash_code(__k);
- std::size_t __n = _M_bucket_index(__k, __code);
- __node_type* __p = _M_find_node(__n, __k, __code);
+ std::size_t __bkt = _M_bucket_index(__k, __code);
+ __node_type* __n = _M_find_node(__bkt, __k, __code);
- if (__p)
+ if (__n)
{
- __node_type* __p1 = __p->_M_next();
- while (__p1 && _M_bucket_index(__p1) == __n
- && this->_M_equals(__k, __code, __p1))
- __p1 = __p1->_M_next();
+ __node_type* __n1 = __n->_M_next();
+ while (__n1 && _M_bucket_index(__n1) == __bkt
+ && this->_M_equals(__k, __code, __n1))
+ __n1 = __n1->_M_next();
- return std::make_pair(iterator(__p), iterator(__p1));
+ return std::make_pair(iterator(__n), iterator(__n1));
}
else
return std::make_pair(end(), end());
@@ -1506,23 +1505,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
-> pair<const_iterator, const_iterator>
{
__hash_code __code = this->_M_hash_code(__k);
- std::size_t __n = _M_bucket_index(__k, __code);
- __node_type* __p = _M_find_node(__n, __k, __code);
+ std::size_t __bkt = _M_bucket_index(__k, __code);
+ __node_type* __n = _M_find_node(__bkt, __k, __code);
- if (__p)
+ if (__n)
{
- __node_type* __p1 = __p->_M_next();
- while (__p1 && _M_bucket_index(__p1) == __n
- && this->_M_equals(__k, __code, __p1))
- __p1 = __p1->_M_next();
+ __node_type* __n1 = __n->_M_next();
+ while (__n1 && _M_bucket_index(__n1) == __bkt
+ && this->_M_equals(__k, __code, __n1))
+ __n1 = __n1->_M_next();
- return std::make_pair(const_iterator(__p), const_iterator(__p1));
+ return std::make_pair(const_iterator(__n), const_iterator(__n1));
}
else
return std::make_pair(end(), end());
}
- // Find the node whose key compares equal to k in the bucket n.
+ // Find the node whose key compares equal to k in the bucket bkt.
// Return nullptr if no node is found.
template<typename _Key, typename _Value,
typename _Alloc, typename _ExtractKey, typename _Equal,
@@ -1531,23 +1530,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_find_before_node(size_type __n, const key_type& __k,
+ _M_find_before_node(size_type __bkt, const key_type& __k,
__hash_code __code) const
-> __node_base*
{
- __node_base* __prev_p = _M_buckets[__n];
- if (!__prev_p)
+ __node_base* __prev_n = _M_buckets[__bkt];
+ if (!__prev_n)
return nullptr;
- for (__node_type* __p = static_cast<__node_type*>(__prev_p->_M_nxt);;
- __p = __p->_M_next())
+ for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
+ __n = __n->_M_next())
{
- if (this->_M_equals(__k, __code, __p))
- return __prev_p;
+ if (this->_M_equals(__k, __code, __n))
+ return __prev_n;
- if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
+ if (!__n->_M_nxt || _M_bucket_index(__n->_M_next()) != __bkt)
break;
- __prev_p = __p;
+ __prev_n = __n;
}
return nullptr;
}
@@ -1631,11 +1630,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_emplace(std::true_type, _Args&&... __args)
+ _M_emplace(true_type, _Args&&... __args)
-> pair<iterator, bool>
{
// First build the node to get access to the hash code
- __node_type* __node = this->_M_allocate_node(std::forward<_Args>(__args)...);
+ __node_type* __node
+ = this->_M_allocate_node(std::forward<_Args>(__args)...);
const key_type& __k = this->_M_extract()(__node->_M_v());
__hash_code __code;
__try
@@ -1649,11 +1649,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
}
size_type __bkt = _M_bucket_index(__k, __code);
- if (__node_type* __p = _M_find_node(__bkt, __k, __code))
+ if (__node_type* __n = _M_find_node(__bkt, __k, __code))
{
// There is already an equivalent node, no insertion
this->_M_deallocate_node(__node);
- return std::make_pair(iterator(__p), false);
+ return std::make_pair(iterator(__n), false);
}
// Insert the node
@@ -1669,7 +1669,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_emplace(const_iterator __hint, std::false_type, _Args&&... __args)
+ _M_emplace(const_iterator __hint, false_type, _Args&&... __args)
-> iterator
{
// First build the node to get its hash code.
@@ -1711,7 +1711,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
if (__do_rehash.first)
{
_M_rehash(__do_rehash.second, __saved_state);
- __bkt = _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
+ __bkt
+ = _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
}
this->_M_store_code(__node, __code);
@@ -1896,7 +1897,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_erase(std::true_type, const key_type& __k)
+ _M_erase(true_type, const key_type& __k)
-> size_type
{
__hash_code __code = this->_M_hash_code(__k);
@@ -1920,7 +1921,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
auto
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_erase(std::false_type, const key_type& __k)
+ _M_erase(false_type, const key_type& __k)
-> size_type
{
__hash_code __code = this->_M_hash_code(__k);
@@ -2038,16 +2039,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- rehash(size_type __n)
+ rehash(size_type __bkt_count)
{
const __rehash_state& __saved_state = _M_rehash_policy._M_state();
- std::size_t __buckets
+ __bkt_count
= std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
- __n);
- __buckets = _M_rehash_policy._M_next_bkt(__buckets);
+ __bkt_count);
+ __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
- if (__buckets != _M_bucket_count)
- _M_rehash(__buckets, __saved_state);
+ if (__bkt_count != _M_bucket_count)
+ _M_rehash(__bkt_count, __saved_state);
else
// No rehash, restore previous state to keep it consistent with
// container state.
@@ -2061,11 +2062,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_rehash(size_type __n, const __rehash_state& __state)
+ _M_rehash(size_type __bkt_count, const __rehash_state& __state)
{
__try
{
- _M_rehash_aux(__n, __unique_keys());
+ _M_rehash_aux(__bkt_count, __unique_keys());
}
__catch(...)
{
@@ -2084,35 +2085,36 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_rehash_aux(size_type __n, std::true_type)
+ _M_rehash_aux(size_type __bkt_count, true_type)
{
- __bucket_type* __new_buckets = _M_allocate_buckets(__n);
- __node_type* __p = _M_begin();
+ __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
+ __node_type* __n = _M_begin();
_M_before_begin._M_nxt = nullptr;
std::size_t __bbegin_bkt = 0;
- while (__p)
+ while (__n)
{
- __node_type* __next = __p->_M_next();
- std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n);
+ __node_type* __next = __n->_M_next();
+ std::size_t __bkt
+ = __hash_code_base::_M_bucket_index(__n, __bkt_count);
if (!__new_buckets[__bkt])
{
- __p->_M_nxt = _M_before_begin._M_nxt;
- _M_before_begin._M_nxt = __p;
+ __n->_M_nxt = _M_before_begin._M_nxt;
+ _M_before_begin._M_nxt = __n;
__new_buckets[__bkt] = &_M_before_begin;
- if (__p->_M_nxt)
- __new_buckets[__bbegin_bkt] = __p;
+ if (__n->_M_nxt)
+ __new_buckets[__bbegin_bkt] = __n;
__bbegin_bkt = __bkt;
}
else
{
- __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
- __new_buckets[__bkt]->_M_nxt = __p;
+ __n->_M_nxt = __new_buckets[__bkt]->_M_nxt;
+ __new_buckets[__bkt]->_M_nxt = __n;
}
- __p = __next;
+ __n = __next;
}
_M_deallocate_buckets();
- _M_bucket_count = __n;
+ _M_bucket_count = __bkt_count;
_M_buckets = __new_buckets;
}
@@ -2125,29 +2127,30 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
_Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits>::
- _M_rehash_aux(size_type __n, std::false_type)
+ _M_rehash_aux(size_type __bkt_count, false_type)
{
- __bucket_type* __new_buckets = _M_allocate_buckets(__n);
+ __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
- __node_type* __p = _M_begin();
+ __node_type* __n = _M_begin();
_M_before_begin._M_nxt = nullptr;
std::size_t __bbegin_bkt = 0;
std::size_t __prev_bkt = 0;
- __node_type* __prev_p = nullptr;
+ __node_type* __prev_n = nullptr;
bool __check_bucket = false;
- while (__p)
+ while (__n)
{
- __node_type* __next = __p->_M_next();
- std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n);
+ __node_type* __next = __n->_M_next();
+ std::size_t __bkt
+ = __hash_code_base::_M_bucket_index(__n, __bkt_count);
- if (__prev_p && __prev_bkt == __bkt)
+ if (__prev_n && __prev_bkt == __bkt)
{
// Previous insert was already in this bucket, we insert after
// the previously inserted one to preserve equivalent elements
// relative order.
- __p->_M_nxt = __prev_p->_M_nxt;
- __prev_p->_M_nxt = __p;
+ __n->_M_nxt = __prev_n->_M_nxt;
+ __prev_n->_M_nxt = __n;
// Inserting after a node in a bucket require to check that we
// haven't change the bucket last node, in this case next
@@ -2162,47 +2165,48 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
{
// Check if we shall update the next bucket because of
// insertions into __prev_bkt bucket.
- if (__prev_p->_M_nxt)
+ if (__prev_n->_M_nxt)
{
std::size_t __next_bkt
- = __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
- __n);
+ = __hash_code_base::_M_bucket_index(__prev_n->_M_next(),
+ __bkt_count);
if (__next_bkt != __prev_bkt)
- __new_buckets[__next_bkt] = __prev_p;
+ __new_buckets[__next_bkt] = __prev_n;
}
__check_bucket = false;
}
if (!__new_buckets[__bkt])
{
- __p->_M_nxt = _M_before_begin._M_nxt;
- _M_before_begin._M_nxt = __p;
+ __n->_M_nxt = _M_before_begin._M_nxt;
+ _M_before_begin._M_nxt = __n;
__new_buckets[__bkt] = &_M_before_begin;
- if (__p->_M_nxt)
- __new_buckets[__bbegin_bkt] = __p;
+ if (__n->_M_nxt)
+ __new_buckets[__bbegin_bkt] = __n;
__bbegin_bkt = __bkt;
}
else
{
- __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
- __new_buckets[__bkt]->_M_nxt = __p;
+ __n->_M_nxt = __new_buckets[__bkt]->_M_nxt;
+ __new_buckets[__bkt]->_M_nxt = __n;
}
}
- __prev_p = __p;
+ __prev_n = __n;
__prev_bkt = __bkt;
- __p = __next;
+ __n = __next;
}
- if (__check_bucket && __prev_p->_M_nxt)
+ if (__check_bucket && __prev_n->_M_nxt)
{
std::size_t __next_bkt
- = __hash_code_base::_M_bucket_index(__prev_p->_M_next(), __n);
+ = __hash_code_base::_M_bucket_index(__prev_n->_M_next(),
+ __bkt_count);
if (__next_bkt != __prev_bkt)
- __new_buckets[__next_bkt] = __prev_p;
+ __new_buckets[__next_bkt] = __prev_n;
}
_M_deallocate_buckets();
- _M_bucket_count = __n;
+ _M_bucket_count = __bkt_count;
_M_buckets = __new_buckets;
}
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index a4d2a97f4f3..bb2e7b762ff 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -181,7 +181,7 @@ namespace __detail
* @tparam _Cache_hash_code Boolean value. True if the value of
* the hash function is stored along with the value. This is a
* time-space tradeoff. Storing it may improve lookup speed by
- * reducing the number of times we need to call the _Equal
+ * reducing the number of times we need to call the _Hash
* function.
*
* @tparam _Constant_iterators Boolean value. True if iterator and
@@ -291,8 +291,8 @@ namespace __detail
__node_type* _M_cur;
- _Node_iterator_base(__node_type* __p) noexcept
- : _M_cur(__p) { }
+ _Node_iterator_base(__node_type* __n) noexcept
+ : _M_cur(__n) { }
void
_M_incr() noexcept
@@ -337,8 +337,8 @@ namespace __detail
: __base_type(0) { }
explicit
- _Node_iterator(__node_type* __p) noexcept
- : __base_type(__p) { }
+ _Node_iterator(__node_type* __n) noexcept
+ : __base_type(__n) { }
reference
operator*() const noexcept
@@ -385,8 +385,8 @@ namespace __detail
: __base_type(0) { }
explicit
- _Node_const_iterator(__node_type* __p) noexcept
- : __base_type(__p) { }
+ _Node_const_iterator(__node_type* __n) noexcept
+ : __base_type(__n) { }
_Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
__cache>& __x) noexcept
@@ -444,7 +444,7 @@ namespace __detail
/// smallest prime that keeps the load factor small enough.
struct _Prime_rehash_policy
{
- using __has_load_factor = std::true_type;
+ using __has_load_factor = true_type;
_Prime_rehash_policy(float __z = 1.0) noexcept
: _M_max_load_factor(__z), _M_next_resize(0) { }
@@ -521,7 +521,7 @@ namespace __detail
/// operations.
struct _Power2_rehash_policy
{
- using __has_load_factor = std::true_type;
+ using __has_load_factor = true_type;
_Power2_rehash_policy(float __z = 1.0) noexcept
: _M_max_load_factor(__z), _M_next_resize(0) { }
@@ -705,18 +705,18 @@ namespace __detail
{
__hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
- std::size_t __n = __h->_M_bucket_index(__k, __code);
- __node_type* __p = __h->_M_find_node(__n, __k, __code);
+ std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+ __node_type* __n = __h->_M_find_node(__bkt, __k, __code);
- if (!__p)
+ if (!__n)
{
- __p = __h->_M_allocate_node(std::piecewise_construct,
+ __n = __h->_M_allocate_node(std::piecewise_construct,
std::tuple<const key_type&>(__k),
std::tuple<>());
- return __h->_M_insert_unique_node(__n, __code, __p)->second;
+ return __h->_M_insert_unique_node(__bkt, __code, __n)->second;
}
- return __p->_M_v().second;
+ return __n->_M_v().second;
}
template<typename _Key, typename _Pair, typename _Alloc, typename _Equal,
@@ -730,18 +730,18 @@ namespace __detail
{
__hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
- std::size_t __n = __h->_M_bucket_index(__k, __code);
- __node_type* __p = __h->_M_find_node(__n, __k, __code);
+ std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+ __node_type* __n = __h->_M_find_node(__bkt, __k, __code);
- if (!__p)
+ if (!__n)
{
- __p = __h->_M_allocate_node(std::piecewise_construct,
+ __n = __h->_M_allocate_node(std::piecewise_construct,
std::forward_as_tuple(std::move(__k)),
std::tuple<>());
- return __h->_M_insert_unique_node(__n, __code, __p)->second;
+ return __h->_M_insert_unique_node(__bkt, __code, __n)->second;
}
- return __p->_M_v().second;
+ return __n->_M_v().second;
}
template<typename _Key, typename _Pair, typename _Alloc, typename _Equal,
@@ -755,12 +755,12 @@ namespace __detail
{
__hashtable* __h = static_cast<__hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
- std::size_t __n = __h->_M_bucket_index(__k, __code);
- __node_type* __p = __h->_M_find_node(__n, __k, __code);
+ std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+ __node_type* __n = __h->_M_find_node(__bkt, __k, __code);
- if (!__p)
+ if (!__n)
__throw_out_of_range(__N("_Map_base::at"));
- return __p->_M_v().second;
+ return __n->_M_v().second;
}
template<typename _Key, typename _Pair, typename _Alloc, typename _Equal,
@@ -774,12 +774,12 @@ namespace __detail
{
const __hashtable* __h = static_cast<const __hashtable*>(this);
__hash_code __code = __h->_M_hash_code(__k);
- std::size_t __n = __h->_M_bucket_index(__k, __code);
- __node_type* __p = __h->_M_find_node(__n, __k, __code);
+ std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+ __node_type* __n = __h->_M_find_node(__bkt, __k, __code);
- if (!__p)
+ if (!__n)
__throw_out_of_range(__N("_Map_base::at"));
- return __p->_M_v().second;
+ return __n->_M_v().second;
}
/**
@@ -1041,7 +1041,7 @@ namespace __detail
typename _H1, typename _H2, typename _Hash,
typename _RehashPolicy, typename _Traits,
typename =
- __detected_or_t<std::false_type, __has_load_factor, _RehashPolicy>>
+ __detected_or_t<false_type, __has_load_factor, _RehashPolicy>>
struct _Rehash_base;
/// Specialization when rehash policy doesn't provide load factor management.
@@ -1051,7 +1051,7 @@ namespace __detail
typename _RehashPolicy, typename _Traits>
struct _Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits,
- std::false_type>
+ false_type>
{
};
@@ -1062,7 +1062,7 @@ namespace __detail
typename _RehashPolicy, typename _Traits>
struct _Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
_H1, _H2, _Hash, _RehashPolicy, _Traits,
- std::true_type>
+ true_type>
{
using __hashtable = _Hashtable<_Key, _Value, _Alloc, _ExtractKey,
_Equal, _H1, _H2, _Hash,
@@ -1199,14 +1199,15 @@ namespace __detail
{ return 0; }
std::size_t
- _M_bucket_index(const _Key& __k, __hash_code, std::size_t __n) const
- { return _M_ranged_hash()(__k, __n); }
+ _M_bucket_index(const _Key& __k, __hash_code,
+ std::size_t __bkt_count) const
+ { return _M_ranged_hash()(__k, __bkt_count); }
std::size_t
- _M_bucket_index(const __node_type* __p, std::size_t __n) const
+ _M_bucket_index(const __node_type* __n, std::size_t __bkt_count) const
noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>(),
(std::size_t)0)) )
- { return _M_ranged_hash()(_M_extract()(__p->_M_v()), __n); }
+ { return _M_ranged_hash()(_M_extract()(__n->_M_v()), __bkt_count); }
void
_M_store_code(__node_type*, __hash_code) const
@@ -1290,15 +1291,16 @@ namespace __detail
}
std::size_t
- _M_bucket_index(const _Key&, __hash_code __c, std::size_t __n) const
- { return _M_h2()(__c, __n); }
+ _M_bucket_index(const _Key&, __hash_code __c,
+ std::size_t __bkt_count) const
+ { return _M_h2()(__c, __bkt_count); }
std::size_t
- _M_bucket_index(const __node_type* __p, std::size_t __n) const
+ _M_bucket_index(const __node_type* __n, std::size_t __bkt_count) const
noexcept( noexcept(declval<const _H1&>()(declval<const _Key&>()))
&& noexcept(declval<const _H2&>()((__hash_code)0,
(std::size_t)0)) )
- { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __n); }
+ { return _M_h2()(_M_h1()(_M_extract()(__n->_M_v())), __bkt_count); }
void
_M_store_code(__node_type*, __hash_code) const
@@ -1375,14 +1377,14 @@ namespace __detail
std::size_t
_M_bucket_index(const _Key&, __hash_code __c,
- std::size_t __n) const
- { return _M_h2()(__c, __n); }
+ std::size_t __bkt_count) const
+ { return _M_h2()(__c, __bkt_count); }
std::size_t
- _M_bucket_index(const __node_type* __p, std::size_t __n) const
+ _M_bucket_index(const __node_type* __n, std::size_t __bkt_count) const
noexcept( noexcept(declval<const _H2&>()((__hash_code)0,
(std::size_t)0)) )
- { return _M_h2()(__p->_M_hash_code, __n); }
+ { return _M_h2()(__n->_M_hash_code, __bkt_count); }
void
_M_store_code(__node_type* __n, __hash_code __c) const
@@ -1425,10 +1427,10 @@ namespace __detail
_Local_iterator_base() = default;
_Local_iterator_base(const __hash_code_base& __base,
- _Hash_node<_Value, true>* __p,
+ _Hash_node<_Value, true>* __n,
std::size_t __bkt, std::size_t __bkt_count)
: __base_type(__base._M_h2()),
- _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
+ _M_cur(__n), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
void
_M_incr()
@@ -1507,9 +1509,9 @@ namespace __detail
_Local_iterator_base() : _M_bucket_count(-1) { }
_Local_iterator_base(const __hash_code_base& __base,
- _Hash_node<_Value, false>* __p,
+ _Hash_node<_Value, false>* __n,
std::size_t __bkt, std::size_t __bkt_count)
- : _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+ : _M_cur(__n), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
{ _M_init(__base); }
~_Local_iterator_base()
@@ -1615,9 +1617,9 @@ namespace __detail
_Local_iterator() = default;
_Local_iterator(const __hash_code_base& __base,
- _Hash_node<_Value, __cache>* __p,
+ _Hash_node<_Value, __cache>* __n,
std::size_t __bkt, std::size_t __bkt_count)
- : __base_type(__base, __p, __bkt, __bkt_count)
+ : __base_type(__base, __n, __bkt, __bkt_count)
{ }
reference
@@ -1667,9 +1669,9 @@ namespace __detail
_Local_const_iterator() = default;
_Local_const_iterator(const __hash_code_base& __base,
- _Hash_node<_Value, __cache>* __p,
+ _Hash_node<_Value, __cache>* __n,
std::size_t __bkt, std::size_t __bkt_count)
- : __base_type(__base, __p, __bkt, __bkt_count)
+ : __base_type(__base, __n, __bkt, __bkt_count)
{ }
_Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
@@ -2025,18 +2027,19 @@ namespace __detail
_M_deallocate_nodes(__node_type* __n);
__bucket_type*
- _M_allocate_buckets(std::size_t __n);
+ _M_allocate_buckets(std::size_t __bkt_count);
void
- _M_deallocate_buckets(__bucket_type*, std::size_t __n);
+ _M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
};
// Definitions of class template _Hashtable_alloc's out-of-line member
// functions.
template<typename _NodeAlloc>
template<typename... _Args>
- typename _Hashtable_alloc<_NodeAlloc>::__node_type*
+ auto
_Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
+ -> __node_type*
{
auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
__node_type* __n = std::__to_address(__nptr);
@@ -2087,25 +2090,25 @@ namespace __detail
template<typename _NodeAlloc>
typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
- _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __n)
+ _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
{
__bucket_alloc_type __alloc(_M_node_allocator());
- auto __ptr = __bucket_alloc_traits::allocate(__alloc, __n);
+ auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
__bucket_type* __p = std::__to_address(__ptr);
- __builtin_memset(__p, 0, __n * sizeof(__bucket_type));
+ __builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
return __p;
}
template<typename _NodeAlloc>
void
_Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
- std::size_t __n)
+ std::size_t __bkt_count)
{
typedef typename __bucket_alloc_traits::pointer _Ptr;
auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
__bucket_alloc_type __alloc(_M_node_allocator());
- __bucket_alloc_traits::deallocate(__alloc, __ptr, __n);
+ __bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
}
//@} hashtable-detail
next reply other threads:[~2019-05-17 16:19 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-17 16:19 François Dumont [this message]
2019-05-17 20:24 ` Jonathan Wakely
2019-05-20 5:52 ` François Dumont
2019-05-21 5:42 ` François Dumont
2019-05-27 21:20 ` François Dumont
2019-05-31 10:53 ` Jonathan Wakely
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=08e5ed09-2950-09d9-56a5-f33cf1ca60a0@gmail.com \
--to=frs.dumont@gmail.com \
--cc=gcc-patches@gcc.gnu.org \
--cc=libstdc++@gcc.gnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).