public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* Hashtable comment cleanups & renamings
@ 2019-05-17 16:19 François Dumont
  2019-05-17 20:24 ` Jonathan Wakely
  0 siblings, 1 reply; 6+ messages in thread
From: François Dumont @ 2019-05-17 16:19 UTC (permalink / raw)
  To: libstdc++, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 835 bytes --]

Hi

     I got tired of '__n' being used in _Hashtable for many different 
purposes: node, bucket, bucket count, bucket hint. It makes the code 
difficult to read. This code makes sure that __n is a node except is 
some very limited use cases where the method name is clear enough to 
tell what __n means.

     So I'd like to commit this patch which only change that and some 
comments before moving forward to more serious stuff. The only code 
change is a use of auto return type on _M_allocate_node.

     My main concern is the ChangeLog entry. Is the following entry ok ?

     Rename variables and cleanup comments.
     * include/bits/hashtable_policy.h
     * include/bits/hashtable.h

     Tested under Linux x86_64 (even if it can't be otherwise)

François


[-- Attachment #2: hashtable_renames.patch --]
[-- Type: text/x-patch, Size: 38046 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index ab24b5bb537..78e6aeed5b1 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -253,7 +253,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 					    _Equal, _H1, _H2, _Hash,
 					    _RehashPolicy, _Traits>;
 
-      using __reuse_or_alloc_node_type =
+      using __reuse_or_alloc_node_gen_t =
 	__detail::_ReuseOrAllocNode<__node_alloc_type>;
 
       // Metaprogramming for picking apart hash caching.
@@ -278,9 +278,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		    "Cache the hash code or qualify your functors involved"
 		    " in hash code and bucket index computation with noexcept");
 
-      // Following two static assertions are necessary to guarantee
-      // that local_iterator will be default constructible.
-
       // When hash codes are cached local iterator inherits from H2 functor
       // which must then be default constructible.
       static_assert(__if_hash_cached<is_default_constructible<_H2>>::value,
@@ -331,7 +328,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _RehashPolicy		_M_rehash_policy;
 
       // A single bucket used when only need for 1 bucket. Especially
-      // interesting in move semantic to leave hashtable with only 1 buckets
+      // interesting in move semantic to leave hashtable with only 1 bucket
       // which is not allocated so that we can have those operations noexcept
       // qualified.
       // Note that we can't leave hashtable with 0 bucket without adding
@@ -350,24 +347,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_base_alloc() { return *this; }
 
       __bucket_type*
-      _M_allocate_buckets(size_type __n)
+      _M_allocate_buckets(size_type __bkt_count)
       {
-	if (__builtin_expect(__n == 1, false))
+	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
 	    return &_M_single_bucket;
 	  }
 
-	return __hashtable_alloc::_M_allocate_buckets(__n);
+	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
       }
 
       void
-      _M_deallocate_buckets(__bucket_type* __bkts, size_type __n)
+      _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
       {
 	if (_M_uses_single_bucket(__bkts))
 	  return;
 
-	__hashtable_alloc::_M_deallocate_buckets(__bkts, __n);
+	__hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
       }
 
       void
@@ -394,10 +391,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_assign(const _Hashtable&, const _NodeGenerator&);
 
       void
-      _M_move_assign(_Hashtable&&, std::true_type);
+      _M_move_assign(_Hashtable&&, true_type);
 
       void
-      _M_move_assign(_Hashtable&&, std::false_type);
+      _M_move_assign(_Hashtable&&, false_type);
 
       void
       _M_reset() noexcept;
@@ -439,30 +436,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       { }
 
       explicit
-      _Hashtable(size_type __n,
+      _Hashtable(size_type __bkt_hint,
 		 const _H1& __hf = _H1(),
 		 const key_equal& __eql = key_equal(),
 		 const allocator_type& __a = allocator_type())
-      : _Hashtable(__n, __hf, _H2(), _Hash(), __eql,
+      : _Hashtable(__bkt_hint, __hf, _H2(), _Hash(), __eql,
 		   __key_extract(), __a)
       { }
 
       template<typename _InputIterator>
 	_Hashtable(_InputIterator __f, _InputIterator __l,
-		   size_type __n = 0,
+		   size_type __bkt_hint = 0,
 		   const _H1& __hf = _H1(),
 		   const key_equal& __eql = key_equal(),
 		   const allocator_type& __a = allocator_type())
-	: _Hashtable(__f, __l, __n, __hf, _H2(), _Hash(), __eql,
+	: _Hashtable(__f, __l, __bkt_hint, __hf, _H2(), _Hash(), __eql,
 		     __key_extract(), __a)
 	{ }
 
       _Hashtable(initializer_list<value_type> __l,
-		 size_type __n = 0,
+		 size_type __bkt_hint = 0,
 		 const _H1& __hf = _H1(),
 		 const key_equal& __eql = key_equal(),
 		 const allocator_type& __a = allocator_type())
-      : _Hashtable(__l.begin(), __l.end(), __n, __hf, _H2(), _Hash(), __eql,
+      : _Hashtable(__l.begin(), __l.end(), __bkt_hint,
+		   __hf, _H2(), _Hash(), __eql,
 		   __key_extract(), __a)
       { }
 
@@ -485,7 +483,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_type __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 	this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys());
@@ -557,46 +555,46 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       { return max_size(); }
 
       size_type
-      bucket_size(size_type __n) const
-      { return std::distance(begin(__n), end(__n)); }
+      bucket_size(size_type __bkt) const
+      { return std::distance(begin(__bkt), end(__bkt)); }
 
       size_type
       bucket(const key_type& __k) const
       { return _M_bucket_index(__k, this->_M_hash_code(__k)); }
 
       local_iterator
-      begin(size_type __n)
+      begin(size_type __bkt)
       {
-	return local_iterator(*this, _M_bucket_begin(__n),
-			      __n, _M_bucket_count);
+	return local_iterator(*this, _M_bucket_begin(__bkt),
+			      __bkt, _M_bucket_count);
       }
 
       local_iterator
-      end(size_type __n)
-      { return local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      end(size_type __bkt)
+      { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       const_local_iterator
-      begin(size_type __n) const
+      begin(size_type __bkt) const
       {
-	return const_local_iterator(*this, _M_bucket_begin(__n),
-				    __n, _M_bucket_count);
+	return const_local_iterator(*this, _M_bucket_begin(__bkt),
+				    __bkt, _M_bucket_count);
       }
 
       const_local_iterator
-      end(size_type __n) const
-      { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      end(size_type __bkt) const
+      { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       // DR 691.
       const_local_iterator
-      cbegin(size_type __n) const
+      cbegin(size_type __bkt) const
       {
-	return const_local_iterator(*this, _M_bucket_begin(__n),
-				    __n, _M_bucket_count);
+	return const_local_iterator(*this, _M_bucket_begin(__bkt),
+				    __bkt, _M_bucket_count);
       }
 
       const_local_iterator
-      cend(size_type __n) const
-      { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      cend(size_type __bkt) const
+      { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       float
       load_factor() const noexcept
@@ -686,22 +684,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       template<typename... _Args>
 	std::pair<iterator, bool>
-	_M_emplace(std::true_type, _Args&&... __args);
+	_M_emplace(true_type, _Args&&... __args);
 
       template<typename... _Args>
 	iterator
-	_M_emplace(std::false_type __uk, _Args&&... __args)
+	_M_emplace(false_type __uk, _Args&&... __args)
 	{ return _M_emplace(cend(), __uk, std::forward<_Args>(__args)...); }
 
       // Emplace with hint, useless when keys are unique.
       template<typename... _Args>
 	iterator
-	_M_emplace(const_iterator, std::true_type __uk, _Args&&... __args)
+	_M_emplace(const_iterator, true_type __uk, _Args&&... __args)
 	{ return _M_emplace(__uk, std::forward<_Args>(__args)...).first; }
 
       template<typename... _Args>
 	iterator
-	_M_emplace(const_iterator, std::false_type, _Args&&... __args);
+	_M_emplace(const_iterator, false_type, _Args&&... __args);
 
       template<typename _Arg, typename _NodeGenerator>
 	std::pair<iterator, bool>
@@ -733,10 +731,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		  const _NodeGenerator&, false_type);
 
       size_type
-      _M_erase(std::true_type, const key_type&);
+      _M_erase(true_type, const key_type&);
 
       size_type
-      _M_erase(std::false_type, const key_type&);
+      _M_erase(false_type, const key_type&);
 
       iterator
       _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
@@ -777,8 +775,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       void
       clear() noexcept;
 
-      // Set number of buckets to be appropriate for container of n element.
-      void rehash(size_type __n);
+      // Set number of buckets keeping it appropriate for container's number
+      // of elements.
+      void rehash(size_type __bkt_count);
 
       // DR 1189.
       // reserve, if present, comes from _Rehash_base.
@@ -918,14 +917,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       // Helper rehash method used when keys are unique.
-      void _M_rehash_aux(size_type __n, std::true_type);
+      void _M_rehash_aux(size_type __bkt_count, true_type);
 
       // Helper rehash method used when keys can be non-unique.
-      void _M_rehash_aux(size_type __n, std::false_type);
+      void _M_rehash_aux(size_type __bkt_count, false_type);
 
       // Unconditionally change size of bucket array to n, restore
       // hash policy state to __state on exception.
-      void _M_rehash(size_type __n, const __rehash_state& __state);
+      void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
     };
 
 
@@ -1044,7 +1043,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Reuse allocated buckets and nodes.
       _M_assign_elements(__ht,
-	[](const __reuse_or_alloc_node_type& __roan, const __node_type* __n)
+	[](const __reuse_or_alloc_node_gen_t& __roan, const __node_type* __n)
 	{ return __roan(__n->_M_v()); });
       return *this;
     }
@@ -1078,7 +1077,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_type __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(__ht,
 		      [&__node_gen, &__roan](__node_type* __n)
@@ -1175,7 +1174,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_move_assign(_Hashtable&& __ht, std::true_type)
+    _M_move_assign(_Hashtable&& __ht, true_type)
     {
       this->_M_deallocate_nodes(_M_begin());
       _M_deallocate_buckets();
@@ -1207,15 +1206,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_move_assign(_Hashtable&& __ht, std::false_type)
+    _M_move_assign(_Hashtable&& __ht, false_type)
     {
       if (__ht._M_node_allocator() == this->_M_node_allocator())
-	_M_move_assign(std::move(__ht), std::true_type());
+	_M_move_assign(std::move(__ht), true_type());
       else
 	{
 	  // Can't move memory, move elements then.
 	  _M_assign_elements(std::move(__ht),
-		[](const __reuse_or_alloc_node_type& __roan, __node_type* __n)
+		[](const __reuse_or_alloc_node_gen_t& __roan, __node_type* __n)
 		{ return __roan(std::move_if_noexcept(__n->_M_v())); });
 	  __ht.clear();
 	}
@@ -1415,9 +1414,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> iterator
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
-      return __p ? iterator(__p) : end();
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __n = _M_find_node(__bkt, __k, __code);
+      return __n ? iterator(__n) : end();
     }
 
   template<typename _Key, typename _Value,
@@ -1431,9 +1430,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> const_iterator
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
-      return __p ? const_iterator(__p) : end();
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __n = _M_find_node(__bkt, __k, __code);
+      return __n ? const_iterator(__n) : end();
     }
 
   template<typename _Key, typename _Value,
@@ -1447,22 +1446,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_bucket_begin(__n);
-      if (!__p)
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __n = _M_bucket_begin(__bkt);
+      if (!__n)
 	return 0;
 
       std::size_t __result = 0;
-      for (;; __p = __p->_M_next())
+      for (;; __n = __n->_M_next())
 	{
-	  if (this->_M_equals(__k, __code, __p))
+	  if (this->_M_equals(__k, __code, __n))
 	    ++__result;
 	  else if (__result)
 	    // All equivalent values are next to each other, if we
 	    // found a non-equivalent value after an equivalent one it
 	    // means that we won't find any new equivalent value.
 	    break;
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
+	  if (!__n->_M_nxt || _M_bucket_index(__n->_M_next()) != __bkt)
 	    break;
 	}
       return __result;
@@ -1479,17 +1478,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> pair<iterator, iterator>
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __n = _M_find_node(__bkt, __k, __code);
 
-      if (__p)
+      if (__n)
 	{
-	  __node_type* __p1 = __p->_M_next();
-	  while (__p1 && _M_bucket_index(__p1) == __n
-		 && this->_M_equals(__k, __code, __p1))
-	    __p1 = __p1->_M_next();
+	  __node_type* __n1 = __n->_M_next();
+	  while (__n1 && _M_bucket_index(__n1) == __bkt
+		 && this->_M_equals(__k, __code, __n1))
+	    __n1 = __n1->_M_next();
 
-	  return std::make_pair(iterator(__p), iterator(__p1));
+	  return std::make_pair(iterator(__n), iterator(__n1));
 	}
       else
 	return std::make_pair(end(), end());
@@ -1506,23 +1505,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> pair<const_iterator, const_iterator>
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __n = _M_find_node(__bkt, __k, __code);
 
-      if (__p)
+      if (__n)
 	{
-	  __node_type* __p1 = __p->_M_next();
-	  while (__p1 && _M_bucket_index(__p1) == __n
-		 && this->_M_equals(__k, __code, __p1))
-	    __p1 = __p1->_M_next();
+	  __node_type* __n1 = __n->_M_next();
+	  while (__n1 && _M_bucket_index(__n1) == __bkt
+		 && this->_M_equals(__k, __code, __n1))
+	    __n1 = __n1->_M_next();
 
-	  return std::make_pair(const_iterator(__p), const_iterator(__p1));
+	  return std::make_pair(const_iterator(__n), const_iterator(__n1));
 	}
       else
 	return std::make_pair(end(), end());
     }
 
-  // Find the node whose key compares equal to k in the bucket n.
+  // Find the node whose key compares equal to k in the bucket bkt.
   // Return nullptr if no node is found.
   template<typename _Key, typename _Value,
 	   typename _Alloc, typename _ExtractKey, typename _Equal,
@@ -1531,23 +1530,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_find_before_node(size_type __n, const key_type& __k,
+    _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
     -> __node_base*
     {
-      __node_base* __prev_p = _M_buckets[__n];
-      if (!__prev_p)
+      __node_base* __prev_n = _M_buckets[__bkt];
+      if (!__prev_n)
 	return nullptr;
 
-      for (__node_type* __p = static_cast<__node_type*>(__prev_p->_M_nxt);;
-	   __p = __p->_M_next())
+      for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
+	   __n = __n->_M_next())
 	{
-	  if (this->_M_equals(__k, __code, __p))
-	    return __prev_p;
+	  if (this->_M_equals(__k, __code, __n))
+	    return __prev_n;
 
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
+	  if (!__n->_M_nxt || _M_bucket_index(__n->_M_next()) != __bkt)
 	    break;
-	  __prev_p = __p;
+	  __prev_n = __n;
 	}
       return nullptr;
     }
@@ -1631,11 +1630,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       auto
       _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-      _M_emplace(std::true_type, _Args&&... __args)
+      _M_emplace(true_type, _Args&&... __args)
       -> pair<iterator, bool>
       {
 	// First build the node to get access to the hash code
-	__node_type* __node = this->_M_allocate_node(std::forward<_Args>(__args)...);
+	__node_type* __node
+	  = this->_M_allocate_node(std::forward<_Args>(__args)...);
 	const key_type& __k = this->_M_extract()(__node->_M_v());
 	__hash_code __code;
 	__try
@@ -1649,11 +1649,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  }
 
 	size_type __bkt = _M_bucket_index(__k, __code);
-	if (__node_type* __p = _M_find_node(__bkt, __k, __code))
+	if (__node_type* __n = _M_find_node(__bkt, __k, __code))
 	  {
 	    // There is already an equivalent node, no insertion
 	    this->_M_deallocate_node(__node);
-	    return std::make_pair(iterator(__p), false);
+	    return std::make_pair(iterator(__n), false);
 	  }
 
 	// Insert the node
@@ -1669,7 +1669,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       auto
       _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-      _M_emplace(const_iterator __hint, std::false_type, _Args&&... __args)
+      _M_emplace(const_iterator __hint, false_type, _Args&&... __args)
       -> iterator
       {
 	// First build the node to get its hash code.
@@ -1711,7 +1711,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (__do_rehash.first)
 	    {
 	      _M_rehash(__do_rehash.second, __saved_state);
-	      __bkt = _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
+	      __bkt
+		= _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
 	    }
 
 	  this->_M_store_code(__node, __code);
@@ -1896,7 +1897,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_erase(std::true_type, const key_type& __k)
+    _M_erase(true_type, const key_type& __k)
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
@@ -1920,7 +1921,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_erase(std::false_type, const key_type& __k)
+    _M_erase(false_type, const key_type& __k)
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
@@ -2038,16 +2039,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    rehash(size_type __n)
+    rehash(size_type __bkt_count)
     {
       const __rehash_state& __saved_state = _M_rehash_policy._M_state();
-      std::size_t __buckets
+      __bkt_count
 	= std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
-		   __n);
-      __buckets = _M_rehash_policy._M_next_bkt(__buckets);
+		   __bkt_count);
+      __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
 
-      if (__buckets != _M_bucket_count)
-	_M_rehash(__buckets, __saved_state);
+      if (__bkt_count != _M_bucket_count)
+	_M_rehash(__bkt_count, __saved_state);
       else
 	// No rehash, restore previous state to keep it consistent with
 	// container state.
@@ -2061,11 +2062,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash(size_type __n, const __rehash_state& __state)
+    _M_rehash(size_type __bkt_count, const __rehash_state& __state)
     {
       __try
 	{
-	  _M_rehash_aux(__n, __unique_keys());
+	  _M_rehash_aux(__bkt_count, __unique_keys());
 	}
       __catch(...)
 	{
@@ -2084,35 +2085,36 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash_aux(size_type __n, std::true_type)
+    _M_rehash_aux(size_type __bkt_count, true_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__n);
-      __node_type* __p = _M_begin();
+      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
+      __node_type* __n = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
-      while (__p)
+      while (__n)
 	{
-	  __node_type* __next = __p->_M_next();
-	  std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n);
+	  __node_type* __next = __n->_M_next();
+	  std::size_t __bkt
+	    = __hash_code_base::_M_bucket_index(__n, __bkt_count);
 	  if (!__new_buckets[__bkt])
 	    {
-	      __p->_M_nxt = _M_before_begin._M_nxt;
-	      _M_before_begin._M_nxt = __p;
+	      __n->_M_nxt = _M_before_begin._M_nxt;
+	      _M_before_begin._M_nxt = __n;
 	      __new_buckets[__bkt] = &_M_before_begin;
-	      if (__p->_M_nxt)
-		__new_buckets[__bbegin_bkt] = __p;
+	      if (__n->_M_nxt)
+		__new_buckets[__bbegin_bkt] = __n;
 	      __bbegin_bkt = __bkt;
 	    }
 	  else
 	    {
-	      __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
-	      __new_buckets[__bkt]->_M_nxt = __p;
+	      __n->_M_nxt = __new_buckets[__bkt]->_M_nxt;
+	      __new_buckets[__bkt]->_M_nxt = __n;
 	    }
-	  __p = __next;
+	  __n = __next;
 	}
 
       _M_deallocate_buckets();
-      _M_bucket_count = __n;
+      _M_bucket_count = __bkt_count;
       _M_buckets = __new_buckets;
     }
 
@@ -2125,29 +2127,30 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash_aux(size_type __n, std::false_type)
+    _M_rehash_aux(size_type __bkt_count, false_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__n);
+      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
 
-      __node_type* __p = _M_begin();
+      __node_type* __n = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       std::size_t __prev_bkt = 0;
-      __node_type* __prev_p = nullptr;
+      __node_type* __prev_n = nullptr;
       bool __check_bucket = false;
 
-      while (__p)
+      while (__n)
 	{
-	  __node_type* __next = __p->_M_next();
-	  std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n);
+	  __node_type* __next = __n->_M_next();
+	  std::size_t __bkt
+	    = __hash_code_base::_M_bucket_index(__n, __bkt_count);
 
-	  if (__prev_p && __prev_bkt == __bkt)
+	  if (__prev_n && __prev_bkt == __bkt)
 	    {
 	      // Previous insert was already in this bucket, we insert after
 	      // the previously inserted one to preserve equivalent elements
 	      // relative order.
-	      __p->_M_nxt = __prev_p->_M_nxt;
-	      __prev_p->_M_nxt = __p;
+	      __n->_M_nxt = __prev_n->_M_nxt;
+	      __prev_n->_M_nxt = __n;
 
 	      // Inserting after a node in a bucket require to check that we
 	      // haven't change the bucket last node, in this case next
@@ -2162,47 +2165,48 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		{
 		  // Check if we shall update the next bucket because of
 		  // insertions into __prev_bkt bucket.
-		  if (__prev_p->_M_nxt)
+		  if (__prev_n->_M_nxt)
 		    {
 		      std::size_t __next_bkt
-			= __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
-							    __n);
+			= __hash_code_base::_M_bucket_index(__prev_n->_M_next(),
+							    __bkt_count);
 		      if (__next_bkt != __prev_bkt)
-			__new_buckets[__next_bkt] = __prev_p;
+			__new_buckets[__next_bkt] = __prev_n;
 		    }
 		  __check_bucket = false;
 		}
 
 	      if (!__new_buckets[__bkt])
 		{
-		  __p->_M_nxt = _M_before_begin._M_nxt;
-		  _M_before_begin._M_nxt = __p;
+		  __n->_M_nxt = _M_before_begin._M_nxt;
+		  _M_before_begin._M_nxt = __n;
 		  __new_buckets[__bkt] = &_M_before_begin;
-		  if (__p->_M_nxt)
-		    __new_buckets[__bbegin_bkt] = __p;
+		  if (__n->_M_nxt)
+		    __new_buckets[__bbegin_bkt] = __n;
 		  __bbegin_bkt = __bkt;
 		}
 	      else
 		{
-		  __p->_M_nxt = __new_buckets[__bkt]->_M_nxt;
-		  __new_buckets[__bkt]->_M_nxt = __p;
+		  __n->_M_nxt = __new_buckets[__bkt]->_M_nxt;
+		  __new_buckets[__bkt]->_M_nxt = __n;
 		}
 	    }
-	  __prev_p = __p;
+	  __prev_n = __n;
 	  __prev_bkt = __bkt;
-	  __p = __next;
+	  __n = __next;
 	}
 
-      if (__check_bucket && __prev_p->_M_nxt)
+      if (__check_bucket && __prev_n->_M_nxt)
 	{
 	  std::size_t __next_bkt
-	    = __hash_code_base::_M_bucket_index(__prev_p->_M_next(), __n);
+	    = __hash_code_base::_M_bucket_index(__prev_n->_M_next(),
+						__bkt_count);
 	  if (__next_bkt != __prev_bkt)
-	    __new_buckets[__next_bkt] = __prev_p;
+	    __new_buckets[__next_bkt] = __prev_n;
 	}
 
       _M_deallocate_buckets();
-      _M_bucket_count = __n;
+      _M_bucket_count = __bkt_count;
       _M_buckets = __new_buckets;
     }
 
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index a4d2a97f4f3..bb2e7b762ff 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -181,7 +181,7 @@ namespace __detail
    *  @tparam _Cache_hash_code  Boolean value. True if the value of
    *  the hash function is stored along with the value. This is a
    *  time-space tradeoff.  Storing it may improve lookup speed by
-   *  reducing the number of times we need to call the _Equal
+   *  reducing the number of times we need to call the _Hash
    *  function.
    *
    *  @tparam _Constant_iterators  Boolean value. True if iterator and
@@ -291,8 +291,8 @@ namespace __detail
 
       __node_type*  _M_cur;
 
-      _Node_iterator_base(__node_type* __p) noexcept
-      : _M_cur(__p) { }
+      _Node_iterator_base(__node_type* __n) noexcept
+      : _M_cur(__n) { }
 
       void
       _M_incr() noexcept
@@ -337,8 +337,8 @@ namespace __detail
       : __base_type(0) { }
 
       explicit
-      _Node_iterator(__node_type* __p) noexcept
-      : __base_type(__p) { }
+      _Node_iterator(__node_type* __n) noexcept
+      : __base_type(__n) { }
 
       reference
       operator*() const noexcept
@@ -385,8 +385,8 @@ namespace __detail
       : __base_type(0) { }
 
       explicit
-      _Node_const_iterator(__node_type* __p) noexcept
-      : __base_type(__p) { }
+      _Node_const_iterator(__node_type* __n) noexcept
+      : __base_type(__n) { }
 
       _Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
 			   __cache>& __x) noexcept
@@ -444,7 +444,7 @@ namespace __detail
   /// smallest prime that keeps the load factor small enough.
   struct _Prime_rehash_policy
   {
-    using __has_load_factor = std::true_type;
+    using __has_load_factor = true_type;
 
     _Prime_rehash_policy(float __z = 1.0) noexcept
     : _M_max_load_factor(__z), _M_next_resize(0) { }
@@ -521,7 +521,7 @@ namespace __detail
   /// operations.
   struct _Power2_rehash_policy
   {
-    using __has_load_factor = std::true_type;
+    using __has_load_factor = true_type;
 
     _Power2_rehash_policy(float __z = 1.0) noexcept
     : _M_max_load_factor(__z), _M_next_resize(0) { }
@@ -705,18 +705,18 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __n = __h->_M_find_node(__bkt, __k, __code);
 
-      if (!__p)
+      if (!__n)
 	{
-	  __p = __h->_M_allocate_node(std::piecewise_construct,
+	  __n = __h->_M_allocate_node(std::piecewise_construct,
 				      std::tuple<const key_type&>(__k),
 				      std::tuple<>());
-	  return __h->_M_insert_unique_node(__n, __code, __p)->second;
+	  return __h->_M_insert_unique_node(__bkt, __code, __n)->second;
 	}
 
-      return __p->_M_v().second;
+      return __n->_M_v().second;
     }
 
   template<typename _Key, typename _Pair, typename _Alloc, typename _Equal,
@@ -730,18 +730,18 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __n = __h->_M_find_node(__bkt, __k, __code);
 
-      if (!__p)
+      if (!__n)
 	{
-	  __p = __h->_M_allocate_node(std::piecewise_construct,
+	  __n = __h->_M_allocate_node(std::piecewise_construct,
 				      std::forward_as_tuple(std::move(__k)),
 				      std::tuple<>());
-	  return __h->_M_insert_unique_node(__n, __code, __p)->second;
+	  return __h->_M_insert_unique_node(__bkt, __code, __n)->second;
 	}
 
-      return __p->_M_v().second;
+      return __n->_M_v().second;
     }
 
   template<typename _Key, typename _Pair, typename _Alloc, typename _Equal,
@@ -755,12 +755,12 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __n = __h->_M_find_node(__bkt, __k, __code);
 
-      if (!__p)
+      if (!__n)
 	__throw_out_of_range(__N("_Map_base::at"));
-      return __p->_M_v().second;
+      return __n->_M_v().second;
     }
 
   template<typename _Key, typename _Pair, typename _Alloc, typename _Equal,
@@ -774,12 +774,12 @@ namespace __detail
     {
       const __hashtable* __h = static_cast<const __hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __n = __h->_M_find_node(__bkt, __k, __code);
 
-      if (!__p)
+      if (!__n)
 	__throw_out_of_range(__N("_Map_base::at"));
-      return __p->_M_v().second;
+      return __n->_M_v().second;
     }
 
   /**
@@ -1041,7 +1041,7 @@ namespace __detail
 	   typename _H1, typename _H2, typename _Hash,
 	   typename _RehashPolicy, typename _Traits,
 	   typename =
-	     __detected_or_t<std::false_type, __has_load_factor, _RehashPolicy>>
+	     __detected_or_t<false_type, __has_load_factor, _RehashPolicy>>
     struct _Rehash_base;
 
   /// Specialization when rehash policy doesn't provide load factor management.
@@ -1051,7 +1051,7 @@ namespace __detail
 	   typename _RehashPolicy, typename _Traits>
     struct _Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		      _H1, _H2, _Hash, _RehashPolicy, _Traits,
-		      std::false_type>
+		      false_type>
     {
     };
 
@@ -1062,7 +1062,7 @@ namespace __detail
 	   typename _RehashPolicy, typename _Traits>
     struct _Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 			_H1, _H2, _Hash, _RehashPolicy, _Traits,
-			std::true_type>
+			true_type>
     {
       using __hashtable = _Hashtable<_Key, _Value, _Alloc, _ExtractKey,
 				     _Equal, _H1, _H2, _Hash,
@@ -1199,14 +1199,15 @@ namespace __detail
       { return 0; }
 
       std::size_t
-      _M_bucket_index(const _Key& __k, __hash_code, std::size_t __n) const
-      { return _M_ranged_hash()(__k, __n); }
+      _M_bucket_index(const _Key& __k, __hash_code,
+		      std::size_t __bkt_count) const
+      { return _M_ranged_hash()(__k, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __n, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>(),
 						   (std::size_t)0)) )
-      { return _M_ranged_hash()(_M_extract()(__p->_M_v()), __n); }
+      { return _M_ranged_hash()(_M_extract()(__n->_M_v()), __bkt_count); }
 
       void
       _M_store_code(__node_type*, __hash_code) const
@@ -1290,15 +1291,16 @@ namespace __detail
       }
 
       std::size_t
-      _M_bucket_index(const _Key&, __hash_code __c, std::size_t __n) const
-      { return _M_h2()(__c, __n); }
+      _M_bucket_index(const _Key&, __hash_code __c,
+		      std::size_t __bkt_count) const
+      { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __n, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H1&>()(declval<const _Key&>()))
 		  && noexcept(declval<const _H2&>()((__hash_code)0,
 						    (std::size_t)0)) )
-      { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __n); }
+      { return _M_h2()(_M_h1()(_M_extract()(__n->_M_v())), __bkt_count); }
 
       void
       _M_store_code(__node_type*, __hash_code) const
@@ -1375,14 +1377,14 @@ namespace __detail
 
       std::size_t
       _M_bucket_index(const _Key&, __hash_code __c,
-		      std::size_t __n) const
-      { return _M_h2()(__c, __n); }
+		      std::size_t __bkt_count) const
+      { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __n, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H2&>()((__hash_code)0,
 						 (std::size_t)0)) )
-      { return _M_h2()(__p->_M_hash_code, __n); }
+      { return _M_h2()(__n->_M_hash_code, __bkt_count); }
 
       void
       _M_store_code(__node_type* __n, __hash_code __c) const
@@ -1425,10 +1427,10 @@ namespace __detail
 
       _Local_iterator_base() = default;
       _Local_iterator_base(const __hash_code_base& __base,
-			   _Hash_node<_Value, true>* __p,
+			   _Hash_node<_Value, true>* __n,
 			   std::size_t __bkt, std::size_t __bkt_count)
       : __base_type(__base._M_h2()),
-	_M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
+	_M_cur(__n), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
 
       void
       _M_incr()
@@ -1507,9 +1509,9 @@ namespace __detail
       _Local_iterator_base() : _M_bucket_count(-1) { }
 
       _Local_iterator_base(const __hash_code_base& __base,
-			   _Hash_node<_Value, false>* __p,
+			   _Hash_node<_Value, false>* __n,
 			   std::size_t __bkt, std::size_t __bkt_count)
-      : _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      : _M_cur(__n), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
       { _M_init(__base); }
 
       ~_Local_iterator_base()
@@ -1615,9 +1617,9 @@ namespace __detail
       _Local_iterator() = default;
 
       _Local_iterator(const __hash_code_base& __base,
-		      _Hash_node<_Value, __cache>* __p,
+		      _Hash_node<_Value, __cache>* __n,
 		      std::size_t __bkt, std::size_t __bkt_count)
-	: __base_type(__base, __p, __bkt, __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
       { }
 
       reference
@@ -1667,9 +1669,9 @@ namespace __detail
       _Local_const_iterator() = default;
 
       _Local_const_iterator(const __hash_code_base& __base,
-			    _Hash_node<_Value, __cache>* __p,
+			    _Hash_node<_Value, __cache>* __n,
 			    std::size_t __bkt, std::size_t __bkt_count)
-	: __base_type(__base, __p, __bkt, __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
       { }
 
       _Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
@@ -2025,18 +2027,19 @@ namespace __detail
       _M_deallocate_nodes(__node_type* __n);
 
       __bucket_type*
-      _M_allocate_buckets(std::size_t __n);
+      _M_allocate_buckets(std::size_t __bkt_count);
 
       void
-      _M_deallocate_buckets(__bucket_type*, std::size_t __n);
+      _M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
     };
 
   // Definitions of class template _Hashtable_alloc's out-of-line member
   // functions.
   template<typename _NodeAlloc>
     template<typename... _Args>
-      typename _Hashtable_alloc<_NodeAlloc>::__node_type*
+      auto
       _Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
+      -> __node_type*
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
 	__node_type* __n = std::__to_address(__nptr);
@@ -2087,25 +2090,25 @@ namespace __detail
 
   template<typename _NodeAlloc>
     typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
-    _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
     {
       __bucket_alloc_type __alloc(_M_node_allocator());
 
-      auto __ptr = __bucket_alloc_traits::allocate(__alloc, __n);
+      auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
       __bucket_type* __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __n * sizeof(__bucket_type));
+      __builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
       return __p;
     }
 
   template<typename _NodeAlloc>
     void
     _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
-							std::size_t __n)
+							std::size_t __bkt_count)
     {
       typedef typename __bucket_alloc_traits::pointer _Ptr;
       auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
       __bucket_alloc_type __alloc(_M_node_allocator());
-      __bucket_alloc_traits::deallocate(__alloc, __ptr, __n);
+      __bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
     }
 
  //@} hashtable-detail

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Hashtable comment cleanups & renamings
  2019-05-17 16:19 Hashtable comment cleanups & renamings François Dumont
@ 2019-05-17 20:24 ` Jonathan Wakely
  2019-05-20  5:52   ` François Dumont
  2019-05-21  5:42   ` François Dumont
  0 siblings, 2 replies; 6+ messages in thread
From: Jonathan Wakely @ 2019-05-17 20:24 UTC (permalink / raw)
  To: François Dumont; +Cc: libstdc++, gcc-patches

On 17/05/19 18:19 +0200, François Dumont wrote:
>Hi
>
>    I got tired of '__n' being used in _Hashtable for many different 
>purposes: node, bucket, bucket count, bucket hint. It makes the code 
>difficult to read. This code makes sure that __n is a node except is 
>some very limited use cases where the method name is clear enough to 
>tell what __n means.
>
>    So I'd like to commit this patch which only change that and some 
>comments before moving forward to more serious stuff. The only code 
>change is a use of auto return type on _M_allocate_node.
>
>    My main concern is the ChangeLog entry. Is the following entry ok ?
>
>    Rename variables and cleanup comments.
>    * include/bits/hashtable_policy.h
>    * include/bits/hashtable.h
>
>    Tested under Linux x86_64 (even if it can't be otherwise)
>
>François
>

>@@ -350,24 +347,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>       _M_base_alloc() { return *this; }
> 
>       __bucket_type*
>-      _M_allocate_buckets(size_type __n)
>+      _M_allocate_buckets(size_type __bkt_count)

This is a much more helpful name, thanks.

>@@ -439,30 +436,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>       { }
> 
>       explicit
>-      _Hashtable(size_type __n,
>+      _Hashtable(size_type __bkt_hint,

This seems less helpful. Would __num_bkts_hint be clearer?
Or for consistency, __bkt_count_hint?

>@@ -1415,9 +1414,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>     -> iterator
>     {
>       __hash_code __code = this->_M_hash_code(__k);
>-      std::size_t __n = _M_bucket_index(__k, __code);
>-      __node_type* __p = _M_find_node(__n, __k, __code);
>-      return __p ? iterator(__p) : end();
>+      std::size_t __bkt = _M_bucket_index(__k, __code);
>+      __node_type* __n = _M_find_node(__bkt, __k, __code);
>+      return __n ? iterator(__n) : end();

Is __n really an improvement over __p here?

If you're changing it, __node or __ptr might be an improvement, but
changing __p to __n seems like unnecessary churn.

I'm not convinced that __n is a big enough improvement over __p to
bother changing dozens of lines, for not much benefit. All those
changes will make it slower to use git blame to track down when thigns
changed, and will make it harder to review diffs between trunk and
older branches.


>@@ -1479,17 +1478,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>     -> pair<iterator, iterator>
>     {
>       __hash_code __code = this->_M_hash_code(__k);
>-      std::size_t __n = _M_bucket_index(__k, __code);
>-      __node_type* __p = _M_find_node(__n, __k, __code);
>+      std::size_t __bkt = _M_bucket_index(__k, __code);
>+      __node_type* __n = _M_find_node(__bkt, __k, __code);
> 
>-      if (__p)
>+      if (__n)
> 	{
>-	  __node_type* __p1 = __p->_M_next();
>-	  while (__p1 && _M_bucket_index(__p1) == __n
>-		 && this->_M_equals(__k, __code, __p1))
>-	    __p1 = __p1->_M_next();
>+	  __node_type* __n1 = __n->_M_next();

__p1 is not a good name, but __n1 is no better.

At least with __p the second pointer could be __q, which is a fairly
idiomatic pairing of letters :-)

How about __first and __last? Or __n and __next?  Even __n1 and __n2
seems better than __n and __n1. Those pointers end up being used for
the 'first' and 'second' members of a pair, so __n1 and __n2 makes
more sense than setting 'first' from __n and 'second' from __n1.

But I don't feel strongly about it, so if it's just me who dislikes
__n and __n1 then it doesn't matter.

>diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
>index a4d2a97f4f3..bb2e7b762ff 100644
>--- a/libstdc++-v3/include/bits/hashtable_policy.h
>+++ b/libstdc++-v3/include/bits/hashtable_policy.h
>@@ -181,7 +181,7 @@ namespace __detail
>    *  @tparam _Cache_hash_code  Boolean value. True if the value of
>    *  the hash function is stored along with the value. This is a
>    *  time-space tradeoff.  Storing it may improve lookup speed by
>-   *  reducing the number of times we need to call the _Equal
>+   *  reducing the number of times we need to call the _Hash

Doesn't it reduce both?

In _M_equals we don't bother calling the _Equal predicate if the
cached hash code doesn't match the one for the key we're comparing.


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Hashtable comment cleanups & renamings
  2019-05-17 20:24 ` Jonathan Wakely
@ 2019-05-20  5:52   ` François Dumont
  2019-05-21  5:42   ` François Dumont
  1 sibling, 0 replies; 6+ messages in thread
From: François Dumont @ 2019-05-20  5:52 UTC (permalink / raw)
  To: Jonathan Wakely; +Cc: libstdc++, gcc-patches

On 5/17/19 10:24 PM, Jonathan Wakely wrote:
> On 17/05/19 18:19 +0200, François Dumont wrote:
>> Hi
>>
>>     I got tired of '__n' being used in _Hashtable for many different 
>> purposes: node, bucket, bucket count, bucket hint. It makes the code 
>> difficult to read. This code makes sure that __n is a node except is 
>> some very limited use cases where the method name is clear enough to 
>> tell what __n means.
>>
>>     So I'd like to commit this patch which only change that and some 
>> comments before moving forward to more serious stuff. The only code 
>> change is a use of auto return type on _M_allocate_node.
>>
>>     My main concern is the ChangeLog entry. Is the following entry ok ?
>>
>>     Rename variables and cleanup comments.
>>     * include/bits/hashtable_policy.h
>>     * include/bits/hashtable.h
>>
>>     Tested under Linux x86_64 (even if it can't be otherwise)
>>
>> François
>>
>
>> @@ -350,24 +347,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>       _M_base_alloc() { return *this; }
>>
>>       __bucket_type*
>> -      _M_allocate_buckets(size_type __n)
>> +      _M_allocate_buckets(size_type __bkt_count)
>
> This is a much more helpful name, thanks.
>
>> @@ -439,30 +436,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>       { }
>>
>>       explicit
>> -      _Hashtable(size_type __n,
>> +      _Hashtable(size_type __bkt_hint,
>
> This seems less helpful. Would __num_bkts_hint be clearer?
> Or for consistency, __bkt_count_hint?

I thought also about a longer name like this one but then I considered 
that I didn't want to make it too long and that '__bkt_hint' was enough 
know that this parameter was related to the buckets. But I can use 
__bkt_count_hint if you prefer.

>
>> @@ -1415,9 +1414,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>     -> iterator
>>     {
>>       __hash_code __code = this->_M_hash_code(__k);
>> -      std::size_t __n = _M_bucket_index(__k, __code);
>> -      __node_type* __p = _M_find_node(__n, __k, __code);
>> -      return __p ? iterator(__p) : end();
>> +      std::size_t __bkt = _M_bucket_index(__k, __code);
>> +      __node_type* __n = _M_find_node(__bkt, __k, __code);
>> +      return __n ? iterator(__n) : end();
>
> Is __n really an improvement over __p here?

Outside any context no but within _Hashtable implementation __n is 
already used most of the time to indicate a node. This is patch just try 
to fix this 'most of the time' part.


>
> If you're changing it, __node or __ptr might be an improvement, but
> changing __p to __n seems like unnecessary churn.
>
> I'm not convinced that __n is a big enough improvement over __p to
> bother changing dozens of lines, for not much benefit. All those
> changes will make it slower to use git blame to track down when thigns
> changed, and will make it harder to review diffs between trunk and
> older branches.

Yes, this is why I wanted to commit it outside of any real change so 
that this commit can be ignore from any git log or blame more easily.

So I can limit the patch to renaming __n occurences into __bkt, 
__bkt_count_hint, __bkt_count when possible and leave other __n but also 
__p & al untouch.

>
>
>> @@ -1479,17 +1478,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>     -> pair<iterator, iterator>
>>     {
>>       __hash_code __code = this->_M_hash_code(__k);
>> -      std::size_t __n = _M_bucket_index(__k, __code);
>> -      __node_type* __p = _M_find_node(__n, __k, __code);
>> +      std::size_t __bkt = _M_bucket_index(__k, __code);
>> +      __node_type* __n = _M_find_node(__bkt, __k, __code);
>>
>> -      if (__p)
>> +      if (__n)
>>     {
>> -      __node_type* __p1 = __p->_M_next();
>> -      while (__p1 && _M_bucket_index(__p1) == __n
>> -         && this->_M_equals(__k, __code, __p1))
>> -        __p1 = __p1->_M_next();
>> +      __node_type* __n1 = __n->_M_next();
>
> __p1 is not a good name, but __n1 is no better.
>
> At least with __p the second pointer could be __q, which is a fairly
> idiomatic pairing of letters :-)
>
> How about __first and __last? Or __n and __next?  Even __n1 and __n2
> seems better than __n and __n1. Those pointers end up being used for
> the 'first' and 'second' members of a pair, so __n1 and __n2 makes
> more sense than setting 'first' from __n and 'second' from __n1.
>
> But I don't feel strongly about it, so if it's just me who dislikes
> __n and __n1 then it doesn't matter.
>
>> diff --git a/libstdc++-v3/include/bits/hashtable_policy.h 
>> b/libstdc++-v3/include/bits/hashtable_policy.h
>> index a4d2a97f4f3..bb2e7b762ff 100644
>> --- a/libstdc++-v3/include/bits/hashtable_policy.h
>> +++ b/libstdc++-v3/include/bits/hashtable_policy.h
>> @@ -181,7 +181,7 @@ namespace __detail
>>    *  @tparam _Cache_hash_code  Boolean value. True if the value of
>>    *  the hash function is stored along with the value. This is a
>>    *  time-space tradeoff.  Storing it may improve lookup speed by
>> -   *  reducing the number of times we need to call the _Equal
>> +   *  reducing the number of times we need to call the _Hash
>
> Doesn't it reduce both?
>
> In _M_equals we don't bother calling the _Equal predicate if the
> cached hash code doesn't match the one for the key we're comparing.
>
>
>
Sure it reduces both, I'll just add _Hash (but first)

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Hashtable comment cleanups & renamings
  2019-05-17 20:24 ` Jonathan Wakely
  2019-05-20  5:52   ` François Dumont
@ 2019-05-21  5:42   ` François Dumont
  2019-05-27 21:20     ` François Dumont
  1 sibling, 1 reply; 6+ messages in thread
From: François Dumont @ 2019-05-21  5:42 UTC (permalink / raw)
  To: Jonathan Wakely; +Cc: libstdc++, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 5263 bytes --]

Here is a simplified form.

     Rename variables and cleanup comments.
     * include/bits/hashtable_policy.h
     * include/bits/hashtable.h

Ok to commit ?

François

On 5/17/19 10:24 PM, Jonathan Wakely wrote:
> On 17/05/19 18:19 +0200, François Dumont wrote:
>> Hi
>>
>>     I got tired of '__n' being used in _Hashtable for many different 
>> purposes: node, bucket, bucket count, bucket hint. It makes the code 
>> difficult to read. This code makes sure that __n is a node except is 
>> some very limited use cases where the method name is clear enough to 
>> tell what __n means.
>>
>>     So I'd like to commit this patch which only change that and some 
>> comments before moving forward to more serious stuff. The only code 
>> change is a use of auto return type on _M_allocate_node.
>>
>>     My main concern is the ChangeLog entry. Is the following entry ok ?
>>
>>     Rename variables and cleanup comments.
>>     * include/bits/hashtable_policy.h
>>     * include/bits/hashtable.h
>>
>>     Tested under Linux x86_64 (even if it can't be otherwise)
>>
>> François
>>
>
>> @@ -350,24 +347,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>       _M_base_alloc() { return *this; }
>>
>>       __bucket_type*
>> -      _M_allocate_buckets(size_type __n)
>> +      _M_allocate_buckets(size_type __bkt_count)
>
> This is a much more helpful name, thanks.
>
>> @@ -439,30 +436,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>       { }
>>
>>       explicit
>> -      _Hashtable(size_type __n,
>> +      _Hashtable(size_type __bkt_hint,
>
> This seems less helpful. Would __num_bkts_hint be clearer?
> Or for consistency, __bkt_count_hint?
>
>> @@ -1415,9 +1414,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>     -> iterator
>>     {
>>       __hash_code __code = this->_M_hash_code(__k);
>> -      std::size_t __n = _M_bucket_index(__k, __code);
>> -      __node_type* __p = _M_find_node(__n, __k, __code);
>> -      return __p ? iterator(__p) : end();
>> +      std::size_t __bkt = _M_bucket_index(__k, __code);
>> +      __node_type* __n = _M_find_node(__bkt, __k, __code);
>> +      return __n ? iterator(__n) : end();
>
> Is __n really an improvement over __p here?
>
> If you're changing it, __node or __ptr might be an improvement, but
> changing __p to __n seems like unnecessary churn.
>
> I'm not convinced that __n is a big enough improvement over __p to
> bother changing dozens of lines, for not much benefit. All those
> changes will make it slower to use git blame to track down when thigns
> changed, and will make it harder to review diffs between trunk and
> older branches.
>
>
>> @@ -1479,17 +1478,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>     -> pair<iterator, iterator>
>>     {
>>       __hash_code __code = this->_M_hash_code(__k);
>> -      std::size_t __n = _M_bucket_index(__k, __code);
>> -      __node_type* __p = _M_find_node(__n, __k, __code);
>> +      std::size_t __bkt = _M_bucket_index(__k, __code);
>> +      __node_type* __n = _M_find_node(__bkt, __k, __code);
>>
>> -      if (__p)
>> +      if (__n)
>>     {
>> -      __node_type* __p1 = __p->_M_next();
>> -      while (__p1 && _M_bucket_index(__p1) == __n
>> -         && this->_M_equals(__k, __code, __p1))
>> -        __p1 = __p1->_M_next();
>> +      __node_type* __n1 = __n->_M_next();
>
> __p1 is not a good name, but __n1 is no better.
>
> At least with __p the second pointer could be __q, which is a fairly
> idiomatic pairing of letters :-)
>
> How about __first and __last? Or __n and __next?  Even __n1 and __n2
> seems better than __n and __n1. Those pointers end up being used for
> the 'first' and 'second' members of a pair, so __n1 and __n2 makes
> more sense than setting 'first' from __n and 'second' from __n1.
>
> But I don't feel strongly about it, so if it's just me who dislikes
> __n and __n1 then it doesn't matter.
>
>> diff --git a/libstdc++-v3/include/bits/hashtable_policy.h 
>> b/libstdc++-v3/include/bits/hashtable_policy.h
>> index a4d2a97f4f3..bb2e7b762ff 100644
>> --- a/libstdc++-v3/include/bits/hashtable_policy.h
>> +++ b/libstdc++-v3/include/bits/hashtable_policy.h
>> @@ -181,7 +181,7 @@ namespace __detail
>>    *  @tparam _Cache_hash_code  Boolean value. True if the value of
>>    *  the hash function is stored along with the value. This is a
>>    *  time-space tradeoff.  Storing it may improve lookup speed by
>> -   *  reducing the number of times we need to call the _Equal
>> +   *  reducing the number of times we need to call the _Hash
>
> Doesn't it reduce both?
>
> In _M_equals we don't bother calling the _Equal predicate if the
> cached hash code doesn't match the one for the key we're comparing.
>
>
>


[-- Attachment #2: hashtable_renames.patch --]
[-- Type: text/x-patch, Size: 34315 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index ab24b5bb537..444c247a315 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -253,7 +253,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 					    _Equal, _H1, _H2, _Hash,
 					    _RehashPolicy, _Traits>;
 
-      using __reuse_or_alloc_node_type =
+      using __reuse_or_alloc_node_gen_t =
 	__detail::_ReuseOrAllocNode<__node_alloc_type>;
 
       // Metaprogramming for picking apart hash caching.
@@ -278,9 +278,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		    "Cache the hash code or qualify your functors involved"
 		    " in hash code and bucket index computation with noexcept");
 
-      // Following two static assertions are necessary to guarantee
-      // that local_iterator will be default constructible.
-
       // When hash codes are cached local iterator inherits from H2 functor
       // which must then be default constructible.
       static_assert(__if_hash_cached<is_default_constructible<_H2>>::value,
@@ -331,7 +328,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _RehashPolicy		_M_rehash_policy;
 
       // A single bucket used when only need for 1 bucket. Especially
-      // interesting in move semantic to leave hashtable with only 1 buckets
+      // interesting in move semantic to leave hashtable with only 1 bucket
       // which is not allocated so that we can have those operations noexcept
       // qualified.
       // Note that we can't leave hashtable with 0 bucket without adding
@@ -350,24 +347,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_base_alloc() { return *this; }
 
       __bucket_type*
-      _M_allocate_buckets(size_type __n)
+      _M_allocate_buckets(size_type __bkt_count)
       {
-	if (__builtin_expect(__n == 1, false))
+	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
 	    return &_M_single_bucket;
 	  }
 
-	return __hashtable_alloc::_M_allocate_buckets(__n);
+	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
       }
 
       void
-      _M_deallocate_buckets(__bucket_type* __bkts, size_type __n)
+      _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
       {
 	if (_M_uses_single_bucket(__bkts))
 	  return;
 
-	__hashtable_alloc::_M_deallocate_buckets(__bkts, __n);
+	__hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
       }
 
       void
@@ -394,10 +391,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_assign(const _Hashtable&, const _NodeGenerator&);
 
       void
-      _M_move_assign(_Hashtable&&, std::true_type);
+      _M_move_assign(_Hashtable&&, true_type);
 
       void
-      _M_move_assign(_Hashtable&&, std::false_type);
+      _M_move_assign(_Hashtable&&, false_type);
 
       void
       _M_reset() noexcept;
@@ -405,8 +402,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable(const _H1& __h1, const _H2& __h2, const _Hash& __h,
 		 const _Equal& __eq, const _ExtractKey& __exk,
 		 const allocator_type& __a)
-	: __hashtable_base(__exk, __h1, __h2, __h, __eq),
-	  __hashtable_alloc(__node_alloc_type(__a))
+      : __hashtable_base(__exk, __h1, __h2, __h, __eq),
+	__hashtable_alloc(__node_alloc_type(__a))
       { }
 
     public:
@@ -435,34 +432,35 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Use delegating constructors.
       explicit
       _Hashtable(const allocator_type& __a)
-	: __hashtable_alloc(__node_alloc_type(__a))
+      : __hashtable_alloc(__node_alloc_type(__a))
       { }
 
       explicit
-      _Hashtable(size_type __n,
+      _Hashtable(size_type __bkt_count_hint,
 		 const _H1& __hf = _H1(),
 		 const key_equal& __eql = key_equal(),
 		 const allocator_type& __a = allocator_type())
-      : _Hashtable(__n, __hf, _H2(), _Hash(), __eql,
+      : _Hashtable(__bkt_count_hint, __hf, _H2(), _Hash(), __eql,
 		   __key_extract(), __a)
       { }
 
       template<typename _InputIterator>
 	_Hashtable(_InputIterator __f, _InputIterator __l,
-		   size_type __n = 0,
+		   size_type __bkt_count_hint = 0,
 		   const _H1& __hf = _H1(),
 		   const key_equal& __eql = key_equal(),
 		   const allocator_type& __a = allocator_type())
-	: _Hashtable(__f, __l, __n, __hf, _H2(), _Hash(), __eql,
+	: _Hashtable(__f, __l, __bkt_count_hint, __hf, _H2(), _Hash(), __eql,
 		     __key_extract(), __a)
 	{ }
 
       _Hashtable(initializer_list<value_type> __l,
-		 size_type __n = 0,
+		 size_type __bkt_count_hint = 0,
 		 const _H1& __hf = _H1(),
 		 const key_equal& __eql = key_equal(),
 		 const allocator_type& __a = allocator_type())
-      : _Hashtable(__l.begin(), __l.end(), __n, __hf, _H2(), _Hash(), __eql,
+      : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
+		   __hf, _H2(), _Hash(), __eql,
 		   __key_extract(), __a)
       { }
 
@@ -485,7 +483,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_type __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 	this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys());
@@ -557,46 +555,46 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       { return max_size(); }
 
       size_type
-      bucket_size(size_type __n) const
-      { return std::distance(begin(__n), end(__n)); }
+      bucket_size(size_type __bkt) const
+      { return std::distance(begin(__bkt), end(__bkt)); }
 
       size_type
       bucket(const key_type& __k) const
       { return _M_bucket_index(__k, this->_M_hash_code(__k)); }
 
       local_iterator
-      begin(size_type __n)
+      begin(size_type __bkt)
       {
-	return local_iterator(*this, _M_bucket_begin(__n),
-			      __n, _M_bucket_count);
+	return local_iterator(*this, _M_bucket_begin(__bkt),
+			      __bkt, _M_bucket_count);
       }
 
       local_iterator
-      end(size_type __n)
-      { return local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      end(size_type __bkt)
+      { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       const_local_iterator
-      begin(size_type __n) const
+      begin(size_type __bkt) const
       {
-	return const_local_iterator(*this, _M_bucket_begin(__n),
-				    __n, _M_bucket_count);
+	return const_local_iterator(*this, _M_bucket_begin(__bkt),
+				    __bkt, _M_bucket_count);
       }
 
       const_local_iterator
-      end(size_type __n) const
-      { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      end(size_type __bkt) const
+      { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       // DR 691.
       const_local_iterator
-      cbegin(size_type __n) const
+      cbegin(size_type __bkt) const
       {
-	return const_local_iterator(*this, _M_bucket_begin(__n),
-				    __n, _M_bucket_count);
+	return const_local_iterator(*this, _M_bucket_begin(__bkt),
+				    __bkt, _M_bucket_count);
       }
 
       const_local_iterator
-      cend(size_type __n) const
-      { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      cend(size_type __bkt) const
+      { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       float
       load_factor() const noexcept
@@ -686,22 +684,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       template<typename... _Args>
 	std::pair<iterator, bool>
-	_M_emplace(std::true_type, _Args&&... __args);
+	_M_emplace(true_type, _Args&&... __args);
 
       template<typename... _Args>
 	iterator
-	_M_emplace(std::false_type __uk, _Args&&... __args)
+	_M_emplace(false_type __uk, _Args&&... __args)
 	{ return _M_emplace(cend(), __uk, std::forward<_Args>(__args)...); }
 
       // Emplace with hint, useless when keys are unique.
       template<typename... _Args>
 	iterator
-	_M_emplace(const_iterator, std::true_type __uk, _Args&&... __args)
+	_M_emplace(const_iterator, true_type __uk, _Args&&... __args)
 	{ return _M_emplace(__uk, std::forward<_Args>(__args)...).first; }
 
       template<typename... _Args>
 	iterator
-	_M_emplace(const_iterator, std::false_type, _Args&&... __args);
+	_M_emplace(const_iterator, false_type, _Args&&... __args);
 
       template<typename _Arg, typename _NodeGenerator>
 	std::pair<iterator, bool>
@@ -733,10 +731,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		  const _NodeGenerator&, false_type);
 
       size_type
-      _M_erase(std::true_type, const key_type&);
+      _M_erase(true_type, const key_type&);
 
       size_type
-      _M_erase(std::false_type, const key_type&);
+      _M_erase(false_type, const key_type&);
 
       iterator
       _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
@@ -777,8 +775,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       void
       clear() noexcept;
 
-      // Set number of buckets to be appropriate for container of n element.
-      void rehash(size_type __n);
+      // Set number of buckets keeping it appropriate for container's number
+      // of elements.
+      void rehash(size_type __bkt_count);
 
       // DR 1189.
       // reserve, if present, comes from _Rehash_base.
@@ -918,14 +917,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       // Helper rehash method used when keys are unique.
-      void _M_rehash_aux(size_type __n, std::true_type);
+      void _M_rehash_aux(size_type __bkt_count, true_type);
 
       // Helper rehash method used when keys can be non-unique.
-      void _M_rehash_aux(size_type __n, std::false_type);
+      void _M_rehash_aux(size_type __bkt_count, false_type);
 
       // Unconditionally change size of bucket array to n, restore
       // hash policy state to __state on exception.
-      void _M_rehash(size_type __n, const __rehash_state& __state);
+      void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
     };
 
 
@@ -954,7 +953,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       const _H1& __h1, const _H2& __h2, const _Hash& __h,
 	       const _Equal& __eq, const _ExtractKey& __exk,
 	       const allocator_type& __a)
-      : _Hashtable(__h1, __h2, __h, __eq, __exk, __a)
+    : _Hashtable(__h1, __h2, __h, __eq, __exk, __a)
     {
       auto __bkt = _M_rehash_policy._M_next_bkt(__bucket_hint);
       if (__bkt > _M_bucket_count)
@@ -976,7 +975,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		 const _H1& __h1, const _H2& __h2, const _Hash& __h,
 		 const _Equal& __eq, const _ExtractKey& __exk,
 		 const allocator_type& __a)
-	: _Hashtable(__h1, __h2, __h, __eq, __exk, __a)
+      : _Hashtable(__h1, __h2, __h, __eq, __exk, __a)
       {
 	auto __nb_elems = __detail::__distance_fw(__f, __l);
 	auto __bkt_count =
@@ -1044,7 +1043,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Reuse allocated buckets and nodes.
       _M_assign_elements(__ht,
-	[](const __reuse_or_alloc_node_type& __roan, const __node_type* __n)
+	[](const __reuse_or_alloc_node_gen_t& __roan, const __node_type* __n)
 	{ return __roan(__n->_M_v()); });
       return *this;
     }
@@ -1078,7 +1077,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_type __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(__ht,
 		      [&__node_gen, &__roan](__node_type* __n)
@@ -1175,7 +1174,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_move_assign(_Hashtable&& __ht, std::true_type)
+    _M_move_assign(_Hashtable&& __ht, true_type)
     {
       this->_M_deallocate_nodes(_M_begin());
       _M_deallocate_buckets();
@@ -1207,15 +1206,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_move_assign(_Hashtable&& __ht, std::false_type)
+    _M_move_assign(_Hashtable&& __ht, false_type)
     {
       if (__ht._M_node_allocator() == this->_M_node_allocator())
-	_M_move_assign(std::move(__ht), std::true_type());
+	_M_move_assign(std::move(__ht), true_type());
       else
 	{
 	  // Can't move memory, move elements then.
 	  _M_assign_elements(std::move(__ht),
-		[](const __reuse_or_alloc_node_type& __roan, __node_type* __n)
+		[](const __reuse_or_alloc_node_gen_t& __roan, __node_type* __n)
 		{ return __roan(std::move_if_noexcept(__n->_M_v())); });
 	  __ht.clear();
 	}
@@ -1415,8 +1414,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> iterator
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_find_node(__bkt, __k, __code);
       return __p ? iterator(__p) : end();
     }
 
@@ -1431,8 +1430,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> const_iterator
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_find_node(__bkt, __k, __code);
       return __p ? const_iterator(__p) : end();
     }
 
@@ -1447,8 +1446,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_bucket_begin(__n);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_bucket_begin(__bkt);
       if (!__p)
 	return 0;
 
@@ -1462,7 +1461,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    // found a non-equivalent value after an equivalent one it
 	    // means that we won't find any new equivalent value.
 	    break;
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
 	    break;
 	}
       return __result;
@@ -1479,13 +1478,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> pair<iterator, iterator>
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_find_node(__bkt, __k, __code);
 
       if (__p)
 	{
 	  __node_type* __p1 = __p->_M_next();
-	  while (__p1 && _M_bucket_index(__p1) == __n
+	  while (__p1 && _M_bucket_index(__p1) == __bkt
 		 && this->_M_equals(__k, __code, __p1))
 	    __p1 = __p1->_M_next();
 
@@ -1506,13 +1505,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> pair<const_iterator, const_iterator>
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_find_node(__bkt, __k, __code);
 
       if (__p)
 	{
 	  __node_type* __p1 = __p->_M_next();
-	  while (__p1 && _M_bucket_index(__p1) == __n
+	  while (__p1 && _M_bucket_index(__p1) == __bkt
 		 && this->_M_equals(__k, __code, __p1))
 	    __p1 = __p1->_M_next();
 
@@ -1522,7 +1521,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	return std::make_pair(end(), end());
     }
 
-  // Find the node whose key compares equal to k in the bucket n.
+  // Find the node whose key compares equal to k in the bucket bkt.
   // Return nullptr if no node is found.
   template<typename _Key, typename _Value,
 	   typename _Alloc, typename _ExtractKey, typename _Equal,
@@ -1531,11 +1530,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_find_before_node(size_type __n, const key_type& __k,
+    _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
     -> __node_base*
     {
-      __node_base* __prev_p = _M_buckets[__n];
+      __node_base* __prev_p = _M_buckets[__bkt];
       if (!__prev_p)
 	return nullptr;
 
@@ -1545,7 +1544,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (this->_M_equals(__k, __code, __p))
 	    return __prev_p;
 
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
 	    break;
 	  __prev_p = __p;
 	}
@@ -1631,11 +1630,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       auto
       _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-      _M_emplace(std::true_type, _Args&&... __args)
+      _M_emplace(true_type, _Args&&... __args)
       -> pair<iterator, bool>
       {
 	// First build the node to get access to the hash code
-	__node_type* __node = this->_M_allocate_node(std::forward<_Args>(__args)...);
+	__node_type* __node
+	  = this->_M_allocate_node(std::forward<_Args>(__args)...);
 	const key_type& __k = this->_M_extract()(__node->_M_v());
 	__hash_code __code;
 	__try
@@ -1669,7 +1669,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       auto
       _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-      _M_emplace(const_iterator __hint, std::false_type, _Args&&... __args)
+      _M_emplace(const_iterator __hint, false_type, _Args&&... __args)
       -> iterator
       {
 	// First build the node to get its hash code.
@@ -1711,7 +1711,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (__do_rehash.first)
 	    {
 	      _M_rehash(__do_rehash.second, __saved_state);
-	      __bkt = _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
+	      __bkt
+		= _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
 	    }
 
 	  this->_M_store_code(__node, __code);
@@ -1896,7 +1897,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_erase(std::true_type, const key_type& __k)
+    _M_erase(true_type, const key_type& __k)
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
@@ -1920,7 +1921,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_erase(std::false_type, const key_type& __k)
+    _M_erase(false_type, const key_type& __k)
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
@@ -2038,16 +2039,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    rehash(size_type __n)
+    rehash(size_type __bkt_count)
     {
       const __rehash_state& __saved_state = _M_rehash_policy._M_state();
-      std::size_t __buckets
+      __bkt_count
 	= std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
-		   __n);
-      __buckets = _M_rehash_policy._M_next_bkt(__buckets);
+		   __bkt_count);
+      __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
 
-      if (__buckets != _M_bucket_count)
-	_M_rehash(__buckets, __saved_state);
+      if (__bkt_count != _M_bucket_count)
+	_M_rehash(__bkt_count, __saved_state);
       else
 	// No rehash, restore previous state to keep it consistent with
 	// container state.
@@ -2061,11 +2062,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash(size_type __n, const __rehash_state& __state)
+    _M_rehash(size_type __bkt_count, const __rehash_state& __state)
     {
       __try
 	{
-	  _M_rehash_aux(__n, __unique_keys());
+	  _M_rehash_aux(__bkt_count, __unique_keys());
 	}
       __catch(...)
 	{
@@ -2084,16 +2085,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash_aux(size_type __n, std::true_type)
+    _M_rehash_aux(size_type __bkt_count, true_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__n);
+      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
       __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       while (__p)
 	{
 	  __node_type* __next = __p->_M_next();
-	  std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n);
+	  std::size_t __bkt
+	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
 	  if (!__new_buckets[__bkt])
 	    {
 	      __p->_M_nxt = _M_before_begin._M_nxt;
@@ -2112,7 +2114,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	}
 
       _M_deallocate_buckets();
-      _M_bucket_count = __n;
+      _M_bucket_count = __bkt_count;
       _M_buckets = __new_buckets;
     }
 
@@ -2125,9 +2127,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash_aux(size_type __n, std::false_type)
+    _M_rehash_aux(size_type __bkt_count, false_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__n);
+      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
 
       __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
@@ -2139,7 +2141,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       while (__p)
 	{
 	  __node_type* __next = __p->_M_next();
-	  std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n);
+	  std::size_t __bkt
+	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
 
 	  if (__prev_p && __prev_bkt == __bkt)
 	    {
@@ -2166,7 +2169,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		    {
 		      std::size_t __next_bkt
 			= __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
-							    __n);
+							    __bkt_count);
 		      if (__next_bkt != __prev_bkt)
 			__new_buckets[__next_bkt] = __prev_p;
 		    }
@@ -2196,13 +2199,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__check_bucket && __prev_p->_M_nxt)
 	{
 	  std::size_t __next_bkt
-	    = __hash_code_base::_M_bucket_index(__prev_p->_M_next(), __n);
+	    = __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+						__bkt_count);
 	  if (__next_bkt != __prev_bkt)
 	    __new_buckets[__next_bkt] = __prev_p;
 	}
 
       _M_deallocate_buckets();
-      _M_bucket_count = __n;
+      _M_bucket_count = __bkt_count;
       _M_buckets = __new_buckets;
     }
 
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index a4d2a97f4f3..878154ae210 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -111,7 +111,7 @@ namespace __detail
 
     public:
       _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h)
-	: _M_nodes(__nodes), _M_h(__h) { }
+      : _M_nodes(__nodes), _M_h(__h) { }
       _ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete;
 
       ~_ReuseOrAllocNode()
@@ -159,7 +159,7 @@ namespace __detail
 
     public:
       _AllocNode(__hashtable_alloc& __h)
-	: _M_h(__h) { }
+      : _M_h(__h) { }
 
       template<typename _Arg>
 	__node_type*
@@ -181,8 +181,8 @@ namespace __detail
    *  @tparam _Cache_hash_code  Boolean value. True if the value of
    *  the hash function is stored along with the value. This is a
    *  time-space tradeoff.  Storing it may improve lookup speed by
-   *  reducing the number of times we need to call the _Equal
-   *  function.
+   *  reducing the number of times we need to call the _Hash or _Equal
+   *  functors.
    *
    *  @tparam _Constant_iterators  Boolean value. True if iterator and
    *  const_iterator are both constant iterator types. This is true
@@ -444,7 +444,7 @@ namespace __detail
   /// smallest prime that keeps the load factor small enough.
   struct _Prime_rehash_policy
   {
-    using __has_load_factor = std::true_type;
+    using __has_load_factor = true_type;
 
     _Prime_rehash_policy(float __z = 1.0) noexcept
     : _M_max_load_factor(__z), _M_next_resize(0) { }
@@ -521,7 +521,7 @@ namespace __detail
   /// operations.
   struct _Power2_rehash_policy
   {
-    using __has_load_factor = std::true_type;
+    using __has_load_factor = true_type;
 
     _Power2_rehash_policy(float __z = 1.0) noexcept
     : _M_max_load_factor(__z), _M_next_resize(0) { }
@@ -705,15 +705,15 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	{
 	  __p = __h->_M_allocate_node(std::piecewise_construct,
 				      std::tuple<const key_type&>(__k),
 				      std::tuple<>());
-	  return __h->_M_insert_unique_node(__n, __code, __p)->second;
+	  return __h->_M_insert_unique_node(__bkt, __code, __p)->second;
 	}
 
       return __p->_M_v().second;
@@ -730,15 +730,15 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	{
 	  __p = __h->_M_allocate_node(std::piecewise_construct,
 				      std::forward_as_tuple(std::move(__k)),
 				      std::tuple<>());
-	  return __h->_M_insert_unique_node(__n, __code, __p)->second;
+	  return __h->_M_insert_unique_node(__bkt, __code, __p)->second;
 	}
 
       return __p->_M_v().second;
@@ -755,8 +755,8 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	__throw_out_of_range(__N("_Map_base::at"));
@@ -774,8 +774,8 @@ namespace __detail
     {
       const __hashtable* __h = static_cast<const __hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	__throw_out_of_range(__N("_Map_base::at"));
@@ -1041,7 +1041,7 @@ namespace __detail
 	   typename _H1, typename _H2, typename _Hash,
 	   typename _RehashPolicy, typename _Traits,
 	   typename =
-	     __detected_or_t<std::false_type, __has_load_factor, _RehashPolicy>>
+	     __detected_or_t<false_type, __has_load_factor, _RehashPolicy>>
     struct _Rehash_base;
 
   /// Specialization when rehash policy doesn't provide load factor management.
@@ -1051,7 +1051,7 @@ namespace __detail
 	   typename _RehashPolicy, typename _Traits>
     struct _Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		      _H1, _H2, _Hash, _RehashPolicy, _Traits,
-		      std::false_type>
+		      false_type>
     {
     };
 
@@ -1062,7 +1062,7 @@ namespace __detail
 	   typename _RehashPolicy, typename _Traits>
     struct _Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 			_H1, _H2, _Hash, _RehashPolicy, _Traits,
-			std::true_type>
+			true_type>
     {
       using __hashtable = _Hashtable<_Key, _Value, _Alloc, _ExtractKey,
 				     _Equal, _H1, _H2, _Hash,
@@ -1109,7 +1109,7 @@ namespace __detail
 
       template<typename _OtherTp>
 	_Hashtable_ebo_helper(_OtherTp&& __tp)
-	  : _Tp(std::forward<_OtherTp>(__tp))
+	: _Tp(std::forward<_OtherTp>(__tp))
 	{ }
 
       const _Tp& _M_cget() const { return static_cast<const _Tp&>(*this); }
@@ -1124,7 +1124,7 @@ namespace __detail
 
       template<typename _OtherTp>
 	_Hashtable_ebo_helper(_OtherTp&& __tp)
-	  : _M_tp(std::forward<_OtherTp>(__tp))
+	: _M_tp(std::forward<_OtherTp>(__tp))
 	{ }
 
       const _Tp& _M_cget() const { return _M_tp; }
@@ -1199,14 +1199,15 @@ namespace __detail
       { return 0; }
 
       std::size_t
-      _M_bucket_index(const _Key& __k, __hash_code, std::size_t __n) const
-      { return _M_ranged_hash()(__k, __n); }
+      _M_bucket_index(const _Key& __k, __hash_code,
+		      std::size_t __bkt_count) const
+      { return _M_ranged_hash()(__k, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>(),
 						   (std::size_t)0)) )
-      { return _M_ranged_hash()(_M_extract()(__p->_M_v()), __n); }
+      { return _M_ranged_hash()(_M_extract()(__p->_M_v()), __bkt_count); }
 
       void
       _M_store_code(__node_type*, __hash_code) const
@@ -1290,15 +1291,16 @@ namespace __detail
       }
 
       std::size_t
-      _M_bucket_index(const _Key&, __hash_code __c, std::size_t __n) const
-      { return _M_h2()(__c, __n); }
+      _M_bucket_index(const _Key&, __hash_code __c,
+		      std::size_t __bkt_count) const
+      { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H1&>()(declval<const _Key&>()))
 		  && noexcept(declval<const _H2&>()((__hash_code)0,
 						    (std::size_t)0)) )
-      { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __n); }
+      { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __bkt_count); }
 
       void
       _M_store_code(__node_type*, __hash_code) const
@@ -1375,14 +1377,14 @@ namespace __detail
 
       std::size_t
       _M_bucket_index(const _Key&, __hash_code __c,
-		      std::size_t __n) const
-      { return _M_h2()(__c, __n); }
+		      std::size_t __bkt_count) const
+      { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H2&>()((__hash_code)0,
 						 (std::size_t)0)) )
-      { return _M_h2()(__p->_M_hash_code, __n); }
+      { return _M_h2()(__p->_M_hash_code, __bkt_count); }
 
       void
       _M_store_code(__node_type* __n, __hash_code __c) const
@@ -1615,9 +1617,9 @@ namespace __detail
       _Local_iterator() = default;
 
       _Local_iterator(const __hash_code_base& __base,
-		      _Hash_node<_Value, __cache>* __p,
+		      _Hash_node<_Value, __cache>* __n,
 		      std::size_t __bkt, std::size_t __bkt_count)
-	: __base_type(__base, __p, __bkt, __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
       { }
 
       reference
@@ -1667,16 +1669,16 @@ namespace __detail
       _Local_const_iterator() = default;
 
       _Local_const_iterator(const __hash_code_base& __base,
-			    _Hash_node<_Value, __cache>* __p,
+			    _Hash_node<_Value, __cache>* __n,
 			    std::size_t __bkt, std::size_t __bkt_count)
-	: __base_type(__base, __p, __bkt, __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
       { }
 
       _Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
 						  _H1, _H2, _Hash,
 						  __constant_iterators,
 						  __cache>& __x)
-	: __base_type(__x)
+      : __base_type(__x)
       { }
 
       reference
@@ -1999,7 +2001,7 @@ namespace __detail
 
       template<typename _Alloc>
 	_Hashtable_alloc(_Alloc&& __a)
-	  : __ebo_node_alloc(std::forward<_Alloc>(__a))
+	: __ebo_node_alloc(std::forward<_Alloc>(__a))
 	{ }
 
       __node_alloc_type&
@@ -2025,18 +2027,19 @@ namespace __detail
       _M_deallocate_nodes(__node_type* __n);
 
       __bucket_type*
-      _M_allocate_buckets(std::size_t __n);
+      _M_allocate_buckets(std::size_t __bkt_count);
 
       void
-      _M_deallocate_buckets(__bucket_type*, std::size_t __n);
+      _M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
     };
 
   // Definitions of class template _Hashtable_alloc's out-of-line member
   // functions.
   template<typename _NodeAlloc>
     template<typename... _Args>
-      typename _Hashtable_alloc<_NodeAlloc>::__node_type*
+      auto
       _Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
+      -> __node_type*
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
 	__node_type* __n = std::__to_address(__nptr);
@@ -2087,25 +2090,25 @@ namespace __detail
 
   template<typename _NodeAlloc>
     typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
-    _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
     {
       __bucket_alloc_type __alloc(_M_node_allocator());
 
-      auto __ptr = __bucket_alloc_traits::allocate(__alloc, __n);
+      auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
       __bucket_type* __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __n * sizeof(__bucket_type));
+      __builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
       return __p;
     }
 
   template<typename _NodeAlloc>
     void
     _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
-							std::size_t __n)
+							std::size_t __bkt_count)
     {
       typedef typename __bucket_alloc_traits::pointer _Ptr;
       auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
       __bucket_alloc_type __alloc(_M_node_allocator());
-      __bucket_alloc_traits::deallocate(__alloc, __ptr, __n);
+      __bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
     }
 
  //@} hashtable-detail

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Hashtable comment cleanups & renamings
  2019-05-21  5:42   ` François Dumont
@ 2019-05-27 21:20     ` François Dumont
  2019-05-31 10:53       ` Jonathan Wakely
  0 siblings, 1 reply; 6+ messages in thread
From: François Dumont @ 2019-05-27 21:20 UTC (permalink / raw)
  To: Jonathan Wakely; +Cc: libstdc++, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 5605 bytes --]

I had miss some occurences of __bucket_hint to replace with 
__bucket_count_hint so here is a new version.

Ok to commit with the simple ChangeLog entry below ?


On 5/21/19 7:42 AM, François Dumont wrote:
> Here is a simplified form.
>
>     Rename variables and cleanup comments.
>     * include/bits/hashtable_policy.h
>     * include/bits/hashtable.h
>
> Ok to commit ?
>
> François
>
> On 5/17/19 10:24 PM, Jonathan Wakely wrote:
>> On 17/05/19 18:19 +0200, François Dumont wrote:
>>> Hi
>>>
>>>     I got tired of '__n' being used in _Hashtable for many different 
>>> purposes: node, bucket, bucket count, bucket hint. It makes the code 
>>> difficult to read. This code makes sure that __n is a node except is 
>>> some very limited use cases where the method name is clear enough to 
>>> tell what __n means.
>>>
>>>     So I'd like to commit this patch which only change that and some 
>>> comments before moving forward to more serious stuff. The only code 
>>> change is a use of auto return type on _M_allocate_node.
>>>
>>>     My main concern is the ChangeLog entry. Is the following entry ok ?
>>>
>>>     Rename variables and cleanup comments.
>>>     * include/bits/hashtable_policy.h
>>>     * include/bits/hashtable.h
>>>
>>>     Tested under Linux x86_64 (even if it can't be otherwise)
>>>
>>> François
>>>
>>
>>> @@ -350,24 +347,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>>       _M_base_alloc() { return *this; }
>>>
>>>       __bucket_type*
>>> -      _M_allocate_buckets(size_type __n)
>>> +      _M_allocate_buckets(size_type __bkt_count)
>>
>> This is a much more helpful name, thanks.
>>
>>> @@ -439,30 +436,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>>       { }
>>>
>>>       explicit
>>> -      _Hashtable(size_type __n,
>>> +      _Hashtable(size_type __bkt_hint,
>>
>> This seems less helpful. Would __num_bkts_hint be clearer?
>> Or for consistency, __bkt_count_hint?
>>
>>> @@ -1415,9 +1414,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>>     -> iterator
>>>     {
>>>       __hash_code __code = this->_M_hash_code(__k);
>>> -      std::size_t __n = _M_bucket_index(__k, __code);
>>> -      __node_type* __p = _M_find_node(__n, __k, __code);
>>> -      return __p ? iterator(__p) : end();
>>> +      std::size_t __bkt = _M_bucket_index(__k, __code);
>>> +      __node_type* __n = _M_find_node(__bkt, __k, __code);
>>> +      return __n ? iterator(__n) : end();
>>
>> Is __n really an improvement over __p here?
>>
>> If you're changing it, __node or __ptr might be an improvement, but
>> changing __p to __n seems like unnecessary churn.
>>
>> I'm not convinced that __n is a big enough improvement over __p to
>> bother changing dozens of lines, for not much benefit. All those
>> changes will make it slower to use git blame to track down when thigns
>> changed, and will make it harder to review diffs between trunk and
>> older branches.
>>
>>
>>> @@ -1479,17 +1478,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>>     -> pair<iterator, iterator>
>>>     {
>>>       __hash_code __code = this->_M_hash_code(__k);
>>> -      std::size_t __n = _M_bucket_index(__k, __code);
>>> -      __node_type* __p = _M_find_node(__n, __k, __code);
>>> +      std::size_t __bkt = _M_bucket_index(__k, __code);
>>> +      __node_type* __n = _M_find_node(__bkt, __k, __code);
>>>
>>> -      if (__p)
>>> +      if (__n)
>>>     {
>>> -      __node_type* __p1 = __p->_M_next();
>>> -      while (__p1 && _M_bucket_index(__p1) == __n
>>> -         && this->_M_equals(__k, __code, __p1))
>>> -        __p1 = __p1->_M_next();
>>> +      __node_type* __n1 = __n->_M_next();
>>
>> __p1 is not a good name, but __n1 is no better.
>>
>> At least with __p the second pointer could be __q, which is a fairly
>> idiomatic pairing of letters :-)
>>
>> How about __first and __last? Or __n and __next?  Even __n1 and __n2
>> seems better than __n and __n1. Those pointers end up being used for
>> the 'first' and 'second' members of a pair, so __n1 and __n2 makes
>> more sense than setting 'first' from __n and 'second' from __n1.
>>
>> But I don't feel strongly about it, so if it's just me who dislikes
>> __n and __n1 then it doesn't matter.
>>
>>> diff --git a/libstdc++-v3/include/bits/hashtable_policy.h 
>>> b/libstdc++-v3/include/bits/hashtable_policy.h
>>> index a4d2a97f4f3..bb2e7b762ff 100644
>>> --- a/libstdc++-v3/include/bits/hashtable_policy.h
>>> +++ b/libstdc++-v3/include/bits/hashtable_policy.h
>>> @@ -181,7 +181,7 @@ namespace __detail
>>>    *  @tparam _Cache_hash_code  Boolean value. True if the value of
>>>    *  the hash function is stored along with the value. This is a
>>>    *  time-space tradeoff.  Storing it may improve lookup speed by
>>> -   *  reducing the number of times we need to call the _Equal
>>> +   *  reducing the number of times we need to call the _Hash
>>
>> Doesn't it reduce both?
>>
>> In _M_equals we don't bother calling the _Equal predicate if the
>> cached hash code doesn't match the one for the key we're comparing.
>>
>>
>>
>


[-- Attachment #2: hashtable_renames.patch --]
[-- Type: text/x-patch, Size: 33664 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index ab24b5bb537..33711ea4573 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -253,7 +253,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 					    _Equal, _H1, _H2, _Hash,
 					    _RehashPolicy, _Traits>;
 
-      using __reuse_or_alloc_node_type =
+      using __reuse_or_alloc_node_gen_t =
 	__detail::_ReuseOrAllocNode<__node_alloc_type>;
 
       // Metaprogramming for picking apart hash caching.
@@ -278,9 +278,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		    "Cache the hash code or qualify your functors involved"
 		    " in hash code and bucket index computation with noexcept");
 
-      // Following two static assertions are necessary to guarantee
-      // that local_iterator will be default constructible.
-
       // When hash codes are cached local iterator inherits from H2 functor
       // which must then be default constructible.
       static_assert(__if_hash_cached<is_default_constructible<_H2>>::value,
@@ -331,7 +328,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _RehashPolicy		_M_rehash_policy;
 
       // A single bucket used when only need for 1 bucket. Especially
-      // interesting in move semantic to leave hashtable with only 1 buckets
+      // interesting in move semantic to leave hashtable with only 1 bucket
       // which is not allocated so that we can have those operations noexcept
       // qualified.
       // Note that we can't leave hashtable with 0 bucket without adding
@@ -350,24 +347,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_base_alloc() { return *this; }
 
       __bucket_type*
-      _M_allocate_buckets(size_type __n)
+      _M_allocate_buckets(size_type __bkt_count)
       {
-	if (__builtin_expect(__n == 1, false))
+	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
 	    return &_M_single_bucket;
 	  }
 
-	return __hashtable_alloc::_M_allocate_buckets(__n);
+	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
       }
 
       void
-      _M_deallocate_buckets(__bucket_type* __bkts, size_type __n)
+      _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
       {
 	if (_M_uses_single_bucket(__bkts))
 	  return;
 
-	__hashtable_alloc::_M_deallocate_buckets(__bkts, __n);
+	__hashtable_alloc::_M_deallocate_buckets(__bkts, __bkt_count);
       }
 
       void
@@ -394,10 +391,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_assign(const _Hashtable&, const _NodeGenerator&);
 
       void
-      _M_move_assign(_Hashtable&&, std::true_type);
+      _M_move_assign(_Hashtable&&, true_type);
 
       void
-      _M_move_assign(_Hashtable&&, std::false_type);
+      _M_move_assign(_Hashtable&&, false_type);
 
       void
       _M_reset() noexcept;
@@ -412,14 +409,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     public:
       // Constructor, destructor, assignment, swap
       _Hashtable() = default;
-      _Hashtable(size_type __bucket_hint,
+      _Hashtable(size_type __bkt_count_hint,
 		 const _H1&, const _H2&, const _Hash&,
 		 const _Equal&, const _ExtractKey&,
 		 const allocator_type&);
 
       template<typename _InputIterator>
 	_Hashtable(_InputIterator __first, _InputIterator __last,
-		   size_type __bucket_hint,
+		   size_type __bkt_count_hint,
 		   const _H1&, const _H2&, const _Hash&,
 		   const _Equal&, const _ExtractKey&,
 		   const allocator_type&);
@@ -439,30 +436,31 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       { }
 
       explicit
-      _Hashtable(size_type __n,
+      _Hashtable(size_type __bkt_count_hint,
 		 const _H1& __hf = _H1(),
 		 const key_equal& __eql = key_equal(),
 		 const allocator_type& __a = allocator_type())
-      : _Hashtable(__n, __hf, _H2(), _Hash(), __eql,
+      : _Hashtable(__bkt_count_hint, __hf, _H2(), _Hash(), __eql,
 		   __key_extract(), __a)
       { }
 
       template<typename _InputIterator>
 	_Hashtable(_InputIterator __f, _InputIterator __l,
-		   size_type __n = 0,
+		   size_type __bkt_count_hint = 0,
 		   const _H1& __hf = _H1(),
 		   const key_equal& __eql = key_equal(),
 		   const allocator_type& __a = allocator_type())
-	: _Hashtable(__f, __l, __n, __hf, _H2(), _Hash(), __eql,
+	: _Hashtable(__f, __l, __bkt_count_hint, __hf, _H2(), _Hash(), __eql,
 		     __key_extract(), __a)
 	{ }
 
       _Hashtable(initializer_list<value_type> __l,
-		 size_type __n = 0,
+		 size_type __bkt_count_hint = 0,
 		 const _H1& __hf = _H1(),
 		 const key_equal& __eql = key_equal(),
 		 const allocator_type& __a = allocator_type())
-      : _Hashtable(__l.begin(), __l.end(), __n, __hf, _H2(), _Hash(), __eql,
+      : _Hashtable(__l.begin(), __l.end(), __bkt_count_hint,
+		   __hf, _H2(), _Hash(), __eql,
 		   __key_extract(), __a)
       { }
 
@@ -485,7 +483,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_type __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 	this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys());
@@ -557,46 +555,46 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       { return max_size(); }
 
       size_type
-      bucket_size(size_type __n) const
-      { return std::distance(begin(__n), end(__n)); }
+      bucket_size(size_type __bkt) const
+      { return std::distance(begin(__bkt), end(__bkt)); }
 
       size_type
       bucket(const key_type& __k) const
       { return _M_bucket_index(__k, this->_M_hash_code(__k)); }
 
       local_iterator
-      begin(size_type __n)
+      begin(size_type __bkt)
       {
-	return local_iterator(*this, _M_bucket_begin(__n),
-			      __n, _M_bucket_count);
+	return local_iterator(*this, _M_bucket_begin(__bkt),
+			      __bkt, _M_bucket_count);
       }
 
       local_iterator
-      end(size_type __n)
-      { return local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      end(size_type __bkt)
+      { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       const_local_iterator
-      begin(size_type __n) const
+      begin(size_type __bkt) const
       {
-	return const_local_iterator(*this, _M_bucket_begin(__n),
-				    __n, _M_bucket_count);
+	return const_local_iterator(*this, _M_bucket_begin(__bkt),
+				    __bkt, _M_bucket_count);
       }
 
       const_local_iterator
-      end(size_type __n) const
-      { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      end(size_type __bkt) const
+      { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       // DR 691.
       const_local_iterator
-      cbegin(size_type __n) const
+      cbegin(size_type __bkt) const
       {
-	return const_local_iterator(*this, _M_bucket_begin(__n),
-				    __n, _M_bucket_count);
+	return const_local_iterator(*this, _M_bucket_begin(__bkt),
+				    __bkt, _M_bucket_count);
       }
 
       const_local_iterator
-      cend(size_type __n) const
-      { return const_local_iterator(*this, nullptr, __n, _M_bucket_count); }
+      cend(size_type __bkt) const
+      { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
 
       float
       load_factor() const noexcept
@@ -686,22 +684,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       template<typename... _Args>
 	std::pair<iterator, bool>
-	_M_emplace(std::true_type, _Args&&... __args);
+	_M_emplace(true_type, _Args&&... __args);
 
       template<typename... _Args>
 	iterator
-	_M_emplace(std::false_type __uk, _Args&&... __args)
+	_M_emplace(false_type __uk, _Args&&... __args)
 	{ return _M_emplace(cend(), __uk, std::forward<_Args>(__args)...); }
 
       // Emplace with hint, useless when keys are unique.
       template<typename... _Args>
 	iterator
-	_M_emplace(const_iterator, std::true_type __uk, _Args&&... __args)
+	_M_emplace(const_iterator, true_type __uk, _Args&&... __args)
 	{ return _M_emplace(__uk, std::forward<_Args>(__args)...).first; }
 
       template<typename... _Args>
 	iterator
-	_M_emplace(const_iterator, std::false_type, _Args&&... __args);
+	_M_emplace(const_iterator, false_type, _Args&&... __args);
 
       template<typename _Arg, typename _NodeGenerator>
 	std::pair<iterator, bool>
@@ -733,10 +731,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		  const _NodeGenerator&, false_type);
 
       size_type
-      _M_erase(std::true_type, const key_type&);
+      _M_erase(true_type, const key_type&);
 
       size_type
-      _M_erase(std::false_type, const key_type&);
+      _M_erase(false_type, const key_type&);
 
       iterator
       _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
@@ -777,8 +775,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       void
       clear() noexcept;
 
-      // Set number of buckets to be appropriate for container of n element.
-      void rehash(size_type __n);
+      // Set number of buckets keeping it appropriate for container's number
+      // of elements.
+      void rehash(size_type __bkt_count);
 
       // DR 1189.
       // reserve, if present, comes from _Rehash_base.
@@ -918,14 +917,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       // Helper rehash method used when keys are unique.
-      void _M_rehash_aux(size_type __n, std::true_type);
+      void _M_rehash_aux(size_type __bkt_count, true_type);
 
       // Helper rehash method used when keys can be non-unique.
-      void _M_rehash_aux(size_type __n, std::false_type);
+      void _M_rehash_aux(size_type __bkt_count, false_type);
 
       // Unconditionally change size of bucket array to n, restore
       // hash policy state to __state on exception.
-      void _M_rehash(size_type __n, const __rehash_state& __state);
+      void _M_rehash(size_type __bkt_count, const __rehash_state& __state);
     };
 
 
@@ -950,17 +949,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	   typename _Traits>
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _Hashtable(size_type __bucket_hint,
+    _Hashtable(size_type __bkt_count_hint,
 	       const _H1& __h1, const _H2& __h2, const _Hash& __h,
 	       const _Equal& __eq, const _ExtractKey& __exk,
 	       const allocator_type& __a)
     : _Hashtable(__h1, __h2, __h, __eq, __exk, __a)
     {
-      auto __bkt = _M_rehash_policy._M_next_bkt(__bucket_hint);
-      if (__bkt > _M_bucket_count)
+      auto __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count_hint);
+      if (__bkt_count > _M_bucket_count)
 	{
-	  _M_buckets = _M_allocate_buckets(__bkt);
-	  _M_bucket_count = __bkt;
+	  _M_buckets = _M_allocate_buckets(__bkt_count);
+	  _M_bucket_count = __bkt_count;
 	}
     }
 
@@ -972,7 +971,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
       _Hashtable(_InputIterator __f, _InputIterator __l,
-		 size_type __bucket_hint,
+		 size_type __bkt_count_hint,
 		 const _H1& __h1, const _H2& __h2, const _Hash& __h,
 		 const _Equal& __eq, const _ExtractKey& __exk,
 		 const allocator_type& __a)
@@ -982,7 +981,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	auto __bkt_count =
 	  _M_rehash_policy._M_next_bkt(
 	    std::max(_M_rehash_policy._M_bkt_for_elements(__nb_elems),
-		     __bucket_hint));
+		     __bkt_count_hint));
 
 	if (__bkt_count > _M_bucket_count)
 	  {
@@ -1044,7 +1043,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Reuse allocated buckets and nodes.
       _M_assign_elements(__ht,
-	[](const __reuse_or_alloc_node_type& __roan, const __node_type* __n)
+	[](const __reuse_or_alloc_node_gen_t& __roan, const __node_type* __n)
 	{ return __roan(__n->_M_v()); });
       return *this;
     }
@@ -1078,7 +1077,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_type __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(__ht,
 		      [&__node_gen, &__roan](__node_type* __n)
@@ -1175,7 +1174,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_move_assign(_Hashtable&& __ht, std::true_type)
+    _M_move_assign(_Hashtable&& __ht, true_type)
     {
       this->_M_deallocate_nodes(_M_begin());
       _M_deallocate_buckets();
@@ -1207,15 +1206,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_move_assign(_Hashtable&& __ht, std::false_type)
+    _M_move_assign(_Hashtable&& __ht, false_type)
     {
       if (__ht._M_node_allocator() == this->_M_node_allocator())
-	_M_move_assign(std::move(__ht), std::true_type());
+	_M_move_assign(std::move(__ht), true_type());
       else
 	{
 	  // Can't move memory, move elements then.
 	  _M_assign_elements(std::move(__ht),
-		[](const __reuse_or_alloc_node_type& __roan, __node_type* __n)
+		[](const __reuse_or_alloc_node_gen_t& __roan, __node_type* __n)
 		{ return __roan(std::move_if_noexcept(__n->_M_v())); });
 	  __ht.clear();
 	}
@@ -1415,8 +1414,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> iterator
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_find_node(__bkt, __k, __code);
       return __p ? iterator(__p) : end();
     }
 
@@ -1431,8 +1430,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> const_iterator
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_find_node(__bkt, __k, __code);
       return __p ? const_iterator(__p) : end();
     }
 
@@ -1447,8 +1446,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_bucket_begin(__n);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_bucket_begin(__bkt);
       if (!__p)
 	return 0;
 
@@ -1462,7 +1461,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    // found a non-equivalent value after an equivalent one it
 	    // means that we won't find any new equivalent value.
 	    break;
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
 	    break;
 	}
       return __result;
@@ -1479,13 +1478,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> pair<iterator, iterator>
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_find_node(__bkt, __k, __code);
 
       if (__p)
 	{
 	  __node_type* __p1 = __p->_M_next();
-	  while (__p1 && _M_bucket_index(__p1) == __n
+	  while (__p1 && _M_bucket_index(__p1) == __bkt
 		 && this->_M_equals(__k, __code, __p1))
 	    __p1 = __p1->_M_next();
 
@@ -1506,13 +1505,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     -> pair<const_iterator, const_iterator>
     {
       __hash_code __code = this->_M_hash_code(__k);
-      std::size_t __n = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__n, __k, __code);
+      std::size_t __bkt = _M_bucket_index(__k, __code);
+      __node_type* __p = _M_find_node(__bkt, __k, __code);
 
       if (__p)
 	{
 	  __node_type* __p1 = __p->_M_next();
-	  while (__p1 && _M_bucket_index(__p1) == __n
+	  while (__p1 && _M_bucket_index(__p1) == __bkt
 		 && this->_M_equals(__k, __code, __p1))
 	    __p1 = __p1->_M_next();
 
@@ -1522,7 +1521,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	return std::make_pair(end(), end());
     }
 
-  // Find the node whose key compares equal to k in the bucket n.
+  // Find the node whose key compares equal to k in the bucket bkt.
   // Return nullptr if no node is found.
   template<typename _Key, typename _Value,
 	   typename _Alloc, typename _ExtractKey, typename _Equal,
@@ -1531,11 +1530,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_find_before_node(size_type __n, const key_type& __k,
+    _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
     -> __node_base*
     {
-      __node_base* __prev_p = _M_buckets[__n];
+      __node_base* __prev_p = _M_buckets[__bkt];
       if (!__prev_p)
 	return nullptr;
 
@@ -1545,7 +1544,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (this->_M_equals(__k, __code, __p))
 	    return __prev_p;
 
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __n)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
 	    break;
 	  __prev_p = __p;
 	}
@@ -1631,11 +1630,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       auto
       _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-      _M_emplace(std::true_type, _Args&&... __args)
+      _M_emplace(true_type, _Args&&... __args)
       -> pair<iterator, bool>
       {
 	// First build the node to get access to the hash code
-	__node_type* __node = this->_M_allocate_node(std::forward<_Args>(__args)...);
+	__node_type* __node
+	  = this->_M_allocate_node(std::forward<_Args>(__args)...);
 	const key_type& __k = this->_M_extract()(__node->_M_v());
 	__hash_code __code;
 	__try
@@ -1669,7 +1669,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       auto
       _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-      _M_emplace(const_iterator __hint, std::false_type, _Args&&... __args)
+      _M_emplace(const_iterator __hint, false_type, _Args&&... __args)
       -> iterator
       {
 	// First build the node to get its hash code.
@@ -1711,7 +1711,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (__do_rehash.first)
 	    {
 	      _M_rehash(__do_rehash.second, __saved_state);
-	      __bkt = _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
+	      __bkt
+		= _M_bucket_index(this->_M_extract()(__node->_M_v()), __code);
 	    }
 
 	  this->_M_store_code(__node, __code);
@@ -1896,7 +1897,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_erase(std::true_type, const key_type& __k)
+    _M_erase(true_type, const key_type& __k)
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
@@ -1920,7 +1921,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_erase(std::false_type, const key_type& __k)
+    _M_erase(false_type, const key_type& __k)
     -> size_type
     {
       __hash_code __code = this->_M_hash_code(__k);
@@ -2038,16 +2039,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    rehash(size_type __n)
+    rehash(size_type __bkt_count)
     {
       const __rehash_state& __saved_state = _M_rehash_policy._M_state();
-      std::size_t __buckets
+      __bkt_count
 	= std::max(_M_rehash_policy._M_bkt_for_elements(_M_element_count + 1),
-		   __n);
-      __buckets = _M_rehash_policy._M_next_bkt(__buckets);
+		   __bkt_count);
+      __bkt_count = _M_rehash_policy._M_next_bkt(__bkt_count);
 
-      if (__buckets != _M_bucket_count)
-	_M_rehash(__buckets, __saved_state);
+      if (__bkt_count != _M_bucket_count)
+	_M_rehash(__bkt_count, __saved_state);
       else
 	// No rehash, restore previous state to keep it consistent with
 	// container state.
@@ -2061,11 +2062,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash(size_type __n, const __rehash_state& __state)
+    _M_rehash(size_type __bkt_count, const __rehash_state& __state)
     {
       __try
 	{
-	  _M_rehash_aux(__n, __unique_keys());
+	  _M_rehash_aux(__bkt_count, __unique_keys());
 	}
       __catch(...)
 	{
@@ -2084,16 +2085,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash_aux(size_type __n, std::true_type)
+    _M_rehash_aux(size_type __bkt_count, true_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__n);
+      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
       __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       while (__p)
 	{
 	  __node_type* __next = __p->_M_next();
-	  std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n);
+	  std::size_t __bkt
+	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
 	  if (!__new_buckets[__bkt])
 	    {
 	      __p->_M_nxt = _M_before_begin._M_nxt;
@@ -2112,7 +2114,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	}
 
       _M_deallocate_buckets();
-      _M_bucket_count = __n;
+      _M_bucket_count = __bkt_count;
       _M_buckets = __new_buckets;
     }
 
@@ -2125,9 +2127,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_rehash_aux(size_type __n, std::false_type)
+    _M_rehash_aux(size_type __bkt_count, false_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__n);
+      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
 
       __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
@@ -2139,7 +2141,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       while (__p)
 	{
 	  __node_type* __next = __p->_M_next();
-	  std::size_t __bkt = __hash_code_base::_M_bucket_index(__p, __n);
+	  std::size_t __bkt
+	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
 
 	  if (__prev_p && __prev_bkt == __bkt)
 	    {
@@ -2166,7 +2169,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		    {
 		      std::size_t __next_bkt
 			= __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
-							    __n);
+							    __bkt_count);
 		      if (__next_bkt != __prev_bkt)
 			__new_buckets[__next_bkt] = __prev_p;
 		    }
@@ -2196,13 +2199,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__check_bucket && __prev_p->_M_nxt)
 	{
 	  std::size_t __next_bkt
-	    = __hash_code_base::_M_bucket_index(__prev_p->_M_next(), __n);
+	    = __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+						__bkt_count);
 	  if (__next_bkt != __prev_bkt)
 	    __new_buckets[__next_bkt] = __prev_p;
 	}
 
       _M_deallocate_buckets();
-      _M_bucket_count = __n;
+      _M_bucket_count = __bkt_count;
       _M_buckets = __new_buckets;
     }
 
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index a4d2a97f4f3..878154ae210 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -181,8 +181,8 @@ namespace __detail
    *  @tparam _Cache_hash_code  Boolean value. True if the value of
    *  the hash function is stored along with the value. This is a
    *  time-space tradeoff.  Storing it may improve lookup speed by
-   *  reducing the number of times we need to call the _Equal
-   *  function.
+   *  reducing the number of times we need to call the _Hash or _Equal
+   *  functors.
    *
    *  @tparam _Constant_iterators  Boolean value. True if iterator and
    *  const_iterator are both constant iterator types. This is true
@@ -444,7 +444,7 @@ namespace __detail
   /// smallest prime that keeps the load factor small enough.
   struct _Prime_rehash_policy
   {
-    using __has_load_factor = std::true_type;
+    using __has_load_factor = true_type;
 
     _Prime_rehash_policy(float __z = 1.0) noexcept
     : _M_max_load_factor(__z), _M_next_resize(0) { }
@@ -521,7 +521,7 @@ namespace __detail
   /// operations.
   struct _Power2_rehash_policy
   {
-    using __has_load_factor = std::true_type;
+    using __has_load_factor = true_type;
 
     _Power2_rehash_policy(float __z = 1.0) noexcept
     : _M_max_load_factor(__z), _M_next_resize(0) { }
@@ -705,15 +705,15 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	{
 	  __p = __h->_M_allocate_node(std::piecewise_construct,
 				      std::tuple<const key_type&>(__k),
 				      std::tuple<>());
-	  return __h->_M_insert_unique_node(__n, __code, __p)->second;
+	  return __h->_M_insert_unique_node(__bkt, __code, __p)->second;
 	}
 
       return __p->_M_v().second;
@@ -730,15 +730,15 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	{
 	  __p = __h->_M_allocate_node(std::piecewise_construct,
 				      std::forward_as_tuple(std::move(__k)),
 				      std::tuple<>());
-	  return __h->_M_insert_unique_node(__n, __code, __p)->second;
+	  return __h->_M_insert_unique_node(__bkt, __code, __p)->second;
 	}
 
       return __p->_M_v().second;
@@ -755,8 +755,8 @@ namespace __detail
     {
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	__throw_out_of_range(__N("_Map_base::at"));
@@ -774,8 +774,8 @@ namespace __detail
     {
       const __hashtable* __h = static_cast<const __hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
-      std::size_t __n = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__n, __k, __code);
+      std::size_t __bkt = __h->_M_bucket_index(__k, __code);
+      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	__throw_out_of_range(__N("_Map_base::at"));
@@ -1041,7 +1041,7 @@ namespace __detail
 	   typename _H1, typename _H2, typename _Hash,
 	   typename _RehashPolicy, typename _Traits,
 	   typename =
-	     __detected_or_t<std::false_type, __has_load_factor, _RehashPolicy>>
+	     __detected_or_t<false_type, __has_load_factor, _RehashPolicy>>
     struct _Rehash_base;
 
   /// Specialization when rehash policy doesn't provide load factor management.
@@ -1051,7 +1051,7 @@ namespace __detail
 	   typename _RehashPolicy, typename _Traits>
     struct _Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 		      _H1, _H2, _Hash, _RehashPolicy, _Traits,
-		      std::false_type>
+		      false_type>
     {
     };
 
@@ -1062,7 +1062,7 @@ namespace __detail
 	   typename _RehashPolicy, typename _Traits>
     struct _Rehash_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 			_H1, _H2, _Hash, _RehashPolicy, _Traits,
-			std::true_type>
+			true_type>
     {
       using __hashtable = _Hashtable<_Key, _Value, _Alloc, _ExtractKey,
 				     _Equal, _H1, _H2, _Hash,
@@ -1199,14 +1199,15 @@ namespace __detail
       { return 0; }
 
       std::size_t
-      _M_bucket_index(const _Key& __k, __hash_code, std::size_t __n) const
-      { return _M_ranged_hash()(__k, __n); }
+      _M_bucket_index(const _Key& __k, __hash_code,
+		      std::size_t __bkt_count) const
+      { return _M_ranged_hash()(__k, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>(),
 						   (std::size_t)0)) )
-      { return _M_ranged_hash()(_M_extract()(__p->_M_v()), __n); }
+      { return _M_ranged_hash()(_M_extract()(__p->_M_v()), __bkt_count); }
 
       void
       _M_store_code(__node_type*, __hash_code) const
@@ -1290,15 +1291,16 @@ namespace __detail
       }
 
       std::size_t
-      _M_bucket_index(const _Key&, __hash_code __c, std::size_t __n) const
-      { return _M_h2()(__c, __n); }
+      _M_bucket_index(const _Key&, __hash_code __c,
+		      std::size_t __bkt_count) const
+      { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H1&>()(declval<const _Key&>()))
 		  && noexcept(declval<const _H2&>()((__hash_code)0,
 						    (std::size_t)0)) )
-      { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __n); }
+      { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __bkt_count); }
 
       void
       _M_store_code(__node_type*, __hash_code) const
@@ -1375,14 +1377,14 @@ namespace __detail
 
       std::size_t
       _M_bucket_index(const _Key&, __hash_code __c,
-		      std::size_t __n) const
-      { return _M_h2()(__c, __n); }
+		      std::size_t __bkt_count) const
+      { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __n) const
+      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H2&>()((__hash_code)0,
 						 (std::size_t)0)) )
-      { return _M_h2()(__p->_M_hash_code, __n); }
+      { return _M_h2()(__p->_M_hash_code, __bkt_count); }
 
       void
       _M_store_code(__node_type* __n, __hash_code __c) const
@@ -1615,9 +1617,9 @@ namespace __detail
       _Local_iterator() = default;
 
       _Local_iterator(const __hash_code_base& __base,
-		      _Hash_node<_Value, __cache>* __p,
+		      _Hash_node<_Value, __cache>* __n,
 		      std::size_t __bkt, std::size_t __bkt_count)
-	: __base_type(__base, __p, __bkt, __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
       { }
 
       reference
@@ -1667,9 +1669,9 @@ namespace __detail
       _Local_const_iterator() = default;
 
       _Local_const_iterator(const __hash_code_base& __base,
-			    _Hash_node<_Value, __cache>* __p,
+			    _Hash_node<_Value, __cache>* __n,
 			    std::size_t __bkt, std::size_t __bkt_count)
-	: __base_type(__base, __p, __bkt, __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
       { }
 
       _Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
@@ -2025,18 +2027,19 @@ namespace __detail
       _M_deallocate_nodes(__node_type* __n);
 
       __bucket_type*
-      _M_allocate_buckets(std::size_t __n);
+      _M_allocate_buckets(std::size_t __bkt_count);
 
       void
-      _M_deallocate_buckets(__bucket_type*, std::size_t __n);
+      _M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
     };
 
   // Definitions of class template _Hashtable_alloc's out-of-line member
   // functions.
   template<typename _NodeAlloc>
     template<typename... _Args>
-      typename _Hashtable_alloc<_NodeAlloc>::__node_type*
+      auto
       _Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
+      -> __node_type*
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
 	__node_type* __n = std::__to_address(__nptr);
@@ -2087,25 +2090,25 @@ namespace __detail
 
   template<typename _NodeAlloc>
     typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
-    _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
     {
       __bucket_alloc_type __alloc(_M_node_allocator());
 
-      auto __ptr = __bucket_alloc_traits::allocate(__alloc, __n);
+      auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
       __bucket_type* __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __n * sizeof(__bucket_type));
+      __builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
       return __p;
     }
 
   template<typename _NodeAlloc>
     void
     _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
-							std::size_t __n)
+							std::size_t __bkt_count)
     {
       typedef typename __bucket_alloc_traits::pointer _Ptr;
       auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
       __bucket_alloc_type __alloc(_M_node_allocator());
-      __bucket_alloc_traits::deallocate(__alloc, __ptr, __n);
+      __bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
     }
 
  //@} hashtable-detail

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Hashtable comment cleanups & renamings
  2019-05-27 21:20     ` François Dumont
@ 2019-05-31 10:53       ` Jonathan Wakely
  0 siblings, 0 replies; 6+ messages in thread
From: Jonathan Wakely @ 2019-05-31 10:53 UTC (permalink / raw)
  To: François Dumont; +Cc: libstdc++, gcc-patches

On 27/05/19 22:11 +0200, François Dumont wrote:
>I had miss some occurences of __bucket_hint to replace with 
>__bucket_count_hint so here is a new version.
>
>Ok to commit with the simple ChangeLog entry below ?

OK for trunk, thanks.


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-05-31 10:43 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-17 16:19 Hashtable comment cleanups & renamings François Dumont
2019-05-17 20:24 ` Jonathan Wakely
2019-05-20  5:52   ` François Dumont
2019-05-21  5:42   ` François Dumont
2019-05-27 21:20     ` François Dumont
2019-05-31 10:53       ` Jonathan Wakely

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).