public inbox for libstdc++@gcc.gnu.org
 help / color / mirror / Atom feed
* libstdc++ PR 57272 Fancy pointer support in Hashtable
@ 2020-04-19 17:31 François Dumont
  2020-05-15 21:12 ` François Dumont
  0 siblings, 1 reply; 10+ messages in thread
From: François Dumont @ 2020-04-19 17:31 UTC (permalink / raw)
  To: libstdc++

[-- Attachment #1: Type: text/plain, Size: 774 bytes --]

Here is my work in progress to use allocator pointer type. This type is 
used both as the node pointer and as the buckets pointer.

Rather than adapting _Local_iterator_base like _Node_iterator_base I 
prefer to just make it inherits from _Node_iterator_base. It simplifies 
its implementation and avoids to provided dedicated comparison operators.

Now I wonder if I need to consider Phil Bouchard comment regarding how 
node pointers are being passed, either by value or reference. I already 
chose to pass them as rvalue references in some occasions and even 
lvalue reference like in _M_bucket_index method. Do you think I need to 
continue this way ? Maybe I should use some conditional type, if raw 
pointer we pass by value and otherwise we pass by ref ?

François


[-- Attachment #2: hashtable_ext_ptr.patch --]
[-- Type: text/x-patch, Size: 83210 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index b00319a668b..995a7a568a3 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -171,8 +171,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	   typename _H1, typename _H2, typename _Hash,
 	   typename _RehashPolicy, typename _Traits>
     class _Hashtable
-    : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
-				       _H1, _H2, _Hash, _Traits>,
+    : public __detail::_Hashtable_base<_Key, _Value, _Alloc,
+					 _ExtractKey, _Equal,
+					 _H1, _H2, _Hash, _Traits>,
       public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 				 _H1, _H2, _Hash, _RehashPolicy, _Traits>,
       public __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey, _Equal,
@@ -182,9 +183,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 				 _H1, _H2, _Hash, _RehashPolicy, _Traits>,
       private __detail::_Hashtable_alloc<
-	__alloc_rebind<_Alloc,
-		       __detail::_Hash_node<_Value,
-					    _Traits::__hash_cached::value>>>
+	__alloc_rebind<_Alloc, __detail::_Hash_node<
+	  typename std::allocator_traits<_Alloc>::pointer, _Value,
+				 _Traits::__hash_cached::value>>>
     {
       static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
 	  "unordered container must have a non-const, non-volatile value_type");
@@ -195,8 +196,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       using __traits_type = _Traits;
       using __hash_cached = typename __traits_type::__hash_cached;
-      using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
+      using __hashtable_base = __detail::
+			      _Hashtable_base<_Key, _Value, _Alloc, _ExtractKey,
+					      _Equal, _H1, _H2, _Hash, _Traits>;
+
+      using __node_type = typename __hashtable_base::__node_type;
       using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
+      using __node_pointer = typename __hashtable_base::__node_pointer;
 
       using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
 
@@ -206,6 +212,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	typename __hashtable_alloc::__node_alloc_traits;
       using __node_base = typename __hashtable_alloc::__node_base;
       using __bucket_type = typename __hashtable_alloc::__bucket_type;
+      using __bucket_pointer = typename __hashtable_alloc::__bucket_pointer;
+      using __bucket_ptr_traits = std::pointer_traits<__bucket_pointer>;
+      using __node_base_ptr_traits = std::pointer_traits<__bucket_type>;
 
     public:
       typedef _Key						key_type;
@@ -232,10 +241,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 				       	     __detail::_Identity,
 					     __detail::_Select1st>::type;
 
-      using __hashtable_base = __detail::
-			       _Hashtable_base<_Key, _Value, _ExtractKey,
-					      _Equal, _H1, _H2, _Hash, _Traits>;
-
       using __hash_code_base =  typename __hashtable_base::__hash_code_base;
       using __hash_code =  typename __hashtable_base::__hash_code;
       using __ireturn_type = typename __hashtable_base::__ireturn_type;
@@ -262,8 +267,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       struct _Scoped_node
       {
 	// Take ownership of a node with a constructed element.
-	_Scoped_node(__node_type* __n, __hashtable_alloc* __h)
-	: _M_h(__h), _M_node(__n) { }
+	_Scoped_node(__node_pointer&& __n, __hashtable_alloc* __h)
+	  : _M_h(__h), _M_node(std::move(__n)) { }
 
 	// Allocate a node and construct an element within it.
 	template<typename... _Args>
@@ -279,7 +284,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_Scoped_node& operator=(const _Scoped_node&) = delete;
 
 	__hashtable_alloc* _M_h;
-	__node_type* _M_node;
+	__node_pointer _M_node;
       };
 
       template<typename _Ht>
@@ -306,7 +311,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Getting a bucket index from a node shall not throw because it is used
       // in methods (erase, swap...) that shall not throw.
       static_assert(noexcept(declval<const __hash_code_base_access&>()
-			     ._M_bucket_index((const __node_type*)nullptr,
+			     ._M_bucket_index(declval<const __node_pointer&>(),
 					      (std::size_t)0)),
 		    "Cache the hash code or qualify your functors involved"
 		    " in hash code and bucket index computation with noexcept");
@@ -361,7 +366,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 #endif
 
     private:
-      __bucket_type*		_M_buckets		= &_M_single_bucket;
+      __bucket_pointer		_M_buckets
+	= __bucket_ptr_traits::pointer_to(_M_single_bucket);
       size_type			_M_bucket_count		= 1;
       __node_base		_M_before_begin;
       size_type			_M_element_count	= 0;
@@ -376,8 +382,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __bucket_type		_M_single_bucket	= nullptr;
 
       bool
-      _M_uses_single_bucket(__bucket_type* __bkts) const
-      { return __builtin_expect(__bkts == &_M_single_bucket, false); }
+      _M_uses_single_bucket(const __bucket_pointer& __bkts) const
+      {
+	return __builtin_expect(std::__to_address(__bkts) == &_M_single_bucket,
+				false);
+      }
 
       bool
       _M_uses_single_bucket() const
@@ -386,20 +395,20 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __hashtable_alloc&
       _M_base_alloc() { return *this; }
 
-      __bucket_type*
+      __bucket_pointer
       _M_allocate_buckets(size_type __bkt_count)
       {
 	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
-	    return &_M_single_bucket;
+	    return __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	  }
 
 	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
       }
 
       void
-      _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
+      _M_deallocate_buckets(__bucket_pointer __bkts, size_type __bkt_count)
       {
 	if (_M_uses_single_bucket(__bkts))
 	  return;
@@ -413,13 +422,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Gets bucket begin, deals with the fact that non-empty buckets contain
       // their before begin node.
-      __node_type*
+      __node_pointer
       _M_bucket_begin(size_type __bkt) const;
 
-      __node_type*
-      _M_begin() const
-      { return static_cast<__node_type*>(_M_before_begin._M_nxt); }
-
       // Assign *this using another _Hashtable instance. Whether elements
       // are copied or moved depends on the _Ht reference.
       template<typename _Ht>
@@ -523,7 +528,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(std::move(_M_before_begin._M_nxt),
+					   *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 	this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys());
@@ -540,11 +546,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Basic container operations
       iterator
       begin() noexcept
-      { return iterator(_M_begin()); }
+      { return iterator(_M_before_begin._M_nxt); }
 
       const_iterator
       begin() const noexcept
-      { return const_iterator(_M_begin()); }
+      { return const_iterator(_M_before_begin._M_nxt); }
 
       iterator
       end() noexcept
@@ -556,7 +562,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       const_iterator
       cbegin() const noexcept
-      { return const_iterator(_M_begin()); }
+      { return const_iterator(_M_before_begin._M_nxt); }
 
       const_iterator
       cend() const noexcept
@@ -674,7 +680,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     protected:
       // Bucket index computation helpers.
       size_type
-      _M_bucket_index(__node_type* __n) const noexcept
+      _M_bucket_index(const __node_pointer& __n) const noexcept
       { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
 
       size_type
@@ -683,45 +689,45 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find and insert helper functions and types
       // Find the node before the one matching the criteria.
-      __node_base*
+      __bucket_type
       _M_find_before_node(size_type, const key_type&, __hash_code) const;
 
-      __node_type*
+      __node_pointer
       _M_find_node(size_type __bkt, const key_type& __key,
 		   __hash_code __c) const
       {
-	__node_base* __before_n = _M_find_before_node(__bkt, __key, __c);
+	__bucket_type __before_n = _M_find_before_node(__bkt, __key, __c);
 	if (__before_n)
-	  return static_cast<__node_type*>(__before_n->_M_nxt);
+	  return __before_n->_M_nxt;
 	return nullptr;
       }
 
       // Insert a node at the beginning of a bucket.
       void
-      _M_insert_bucket_begin(size_type, __node_type*);
+      _M_insert_bucket_begin(size_type, __node_pointer);
 
       // Remove the bucket first node
       void
-      _M_remove_bucket_begin(size_type __bkt, __node_type* __next_n,
+      _M_remove_bucket_begin(size_type __bkt, __node_pointer __next_n,
 			     size_type __next_bkt);
 
       // Get the node before __n in the bucket __bkt
-      __node_base*
-      _M_get_previous_node(size_type __bkt, __node_base* __n);
+      __bucket_type
+      _M_get_previous_node(size_type __bkt, const __node_pointer& __n);
 
       // Insert node __n with key __k and hash code __code, in bucket __bkt
       // if no rehash (assumes no element with same key already present).
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
       _M_insert_unique_node(const key_type& __k, size_type __bkt,
-			    __hash_code __code, __node_type* __n,
+			    __hash_code __code, __node_pointer __n,
 			    size_type __n_elt = 1);
 
       // Insert node __n with key __k and hash code __code.
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
-      _M_insert_multi_node(__node_type* __hint, const key_type& __k,
-			   __hash_code __code, __node_type* __n);
+      _M_insert_multi_node(__node_pointer __hint, const key_type& __k,
+			   __hash_code __code, __node_pointer __n);
 
       template<typename... _Args>
 	std::pair<iterator, bool>
@@ -778,7 +784,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_erase(false_type, const key_type&);
 
       iterator
-      _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
+      _M_erase(size_type __bkt, __bucket_type __prev_n, __node_pointer __n);
 
     public:
       // Emplace
@@ -838,7 +844,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    const key_type& __k = __nh._M_key();
 	    __hash_code __code = this->_M_hash_code(__k);
 	    size_type __bkt = _M_bucket_index(__k, __code);
-	    if (__node_type* __n = _M_find_node(__bkt, __k, __code))
+	    if (__node_pointer __n = _M_find_node(__bkt, __k, __code))
 	      {
 		__ret.node = std::move(__nh);
 		__ret.position = iterator(__n);
@@ -874,15 +880,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       node_type
-      _M_extract_node(size_t __bkt, __node_base* __prev_n)
+      _M_extract_node(size_t __bkt, __bucket_type __prev_n)
       {
-	__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
+	__node_pointer __n = __prev_n->_M_nxt;
 	if (__prev_n == _M_buckets[__bkt])
-	  _M_remove_bucket_begin(__bkt, __n->_M_next(),
-	     __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
+	  _M_remove_bucket_begin(__bkt, __n->_M_nxt,
+	     __n->_M_nxt ? _M_bucket_index(__n->_M_nxt) : 0);
 	else if (__n->_M_nxt)
 	  {
-	    size_type __next_bkt = _M_bucket_index(__n->_M_next());
+	    size_type __next_bkt = _M_bucket_index(__n->_M_nxt);
 	    if (__next_bkt != __bkt)
 	      _M_buckets[__next_bkt] = __prev_n;
 	  }
@@ -910,7 +916,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	node_type __nh;
 	__hash_code __code = this->_M_hash_code(__k);
 	std::size_t __bkt = _M_bucket_index(__k, __code);
-	if (__node_base* __prev_node = _M_find_before_node(__bkt, __k, __code))
+	if (__bucket_type __prev_node = _M_find_before_node(__bkt, __k, __code))
 	  __nh = _M_extract_node(__bkt, __prev_node);
 	return __nh;
       }
@@ -981,10 +987,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_bucket_begin(size_type __bkt) const
-    -> __node_type*
+    -> __node_pointer
     {
-      __node_base* __n = _M_buckets[__bkt];
-      return __n ? static_cast<__node_type*>(__n->_M_nxt) : nullptr;
+      __bucket_type __n = _M_buckets[__bkt];
+      return __n ? __n->_M_nxt : nullptr;
     }
 
   template<typename _Key, typename _Value,
@@ -1058,7 +1064,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      && __this_alloc != __that_alloc)
 	    {
 	      // Replacement allocator cannot free existing storage.
-	      this->_M_deallocate_nodes(_M_begin());
+	      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
 	      _M_before_begin._M_nxt = nullptr;
 	      _M_deallocate_buckets();
 	      _M_buckets = nullptr;
@@ -1099,7 +1105,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
       _M_assign_elements(_Ht&& __ht)
       {
-	__bucket_type* __former_buckets = nullptr;
+	__bucket_pointer __former_buckets = nullptr;
 	std::size_t __former_bucket_count = _M_bucket_count;
 	const __rehash_state& __former_state = _M_rehash_policy._M_state();
 
@@ -1118,7 +1124,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t
+	      __roan(std::move(_M_before_begin._M_nxt), *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(std::forward<_Ht>(__ht), __roan);
 	    if (__former_buckets)
@@ -1150,7 +1157,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
       _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
       {
-	__bucket_type* __buckets = nullptr;
+	__bucket_pointer __buckets = nullptr;
 	if (!_M_buckets)
 	  _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
 
@@ -1161,16 +1168,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	    // First deal with the special first node pointed to by
 	    // _M_before_begin.
-	    __node_type* __ht_n = __ht._M_begin();
-	    __node_type* __this_n
+	    __node_pointer __ht_n = __ht._M_before_begin._M_nxt;
+	    __node_pointer __this_n
 	      = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
 	    this->_M_copy_code(__this_n, __ht_n);
 	    _M_before_begin._M_nxt = __this_n;
-	    _M_buckets[_M_bucket_index(__this_n)] = &_M_before_begin;
+	    _M_buckets[_M_bucket_index(__this_n)] =
+	      __node_base_ptr_traits::pointer_to(_M_before_begin);
 
 	    // Then deal with other nodes.
-	    __node_base* __prev_n = __this_n;
-	    for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
+	    __node_pointer __prev_n = __this_n;
+	    for (__ht_n = __ht_n->_M_nxt; __ht_n; __ht_n = __ht_n->_M_nxt)
 	      {
 		__this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
 		__prev_n->_M_nxt = __this_n;
@@ -1202,7 +1210,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_rehash_policy._M_reset();
       _M_bucket_count = 1;
       _M_single_bucket = nullptr;
-      _M_buckets = &_M_single_bucket;
+      _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
       _M_before_begin._M_nxt = nullptr;
       _M_element_count = 0;
     }
@@ -1216,26 +1224,28 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_move_assign(_Hashtable&& __ht, true_type)
     {
-      this->_M_deallocate_nodes(_M_begin());
+      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
       _M_deallocate_buckets();
       __hashtable_base::operator=(std::move(__ht));
       _M_rehash_policy = __ht._M_rehash_policy;
       if (!__ht._M_uses_single_bucket())
-	_M_buckets = __ht._M_buckets;
+	_M_buckets = std::move(__ht._M_buckets);
       else
 	{
-	  _M_buckets = &_M_single_bucket;
-	  _M_single_bucket = __ht._M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
+	  _M_single_bucket = std::move(__ht._M_single_bucket);
 	}
+
       _M_bucket_count = __ht._M_bucket_count;
-      _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
+      _M_before_begin._M_nxt = std::move(__ht._M_before_begin._M_nxt);
       _M_element_count = __ht._M_element_count;
       std::__alloc_on_move(this->_M_node_allocator(), __ht._M_node_allocator());
 
       // Fix buckets containing the _M_before_begin pointers that can't be
       // moved.
-      if (_M_begin())
-	_M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+      if (_M_before_begin._M_nxt)
+	_M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+	  __node_base_ptr_traits::pointer_to(_M_before_begin);
       __ht._M_reset();
     }
 
@@ -1290,23 +1300,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __map_base(__ht),
       __rehash_base(__ht),
       __hashtable_alloc(std::move(__ht._M_base_alloc())),
-      _M_buckets(__ht._M_buckets),
+      _M_buckets(std::move(__ht._M_buckets)),
       _M_bucket_count(__ht._M_bucket_count),
-      _M_before_begin(__ht._M_before_begin._M_nxt),
+      _M_before_begin(std::move(__ht._M_before_begin._M_nxt)),
       _M_element_count(__ht._M_element_count),
       _M_rehash_policy(__ht._M_rehash_policy)
     {
       // Update, if necessary, buckets if __ht is using its single bucket.
-      if (__ht._M_uses_single_bucket())
+      if (std::__to_address(_M_buckets) == &__ht._M_single_bucket)
 	{
-	  _M_buckets = &_M_single_bucket;
-	  _M_single_bucket = __ht._M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
+	  _M_single_bucket = std::move(__ht._M_single_bucket);
 	}
 
       // Update, if necessary, bucket pointing to before begin that hasn't
       // moved.
-      if (_M_begin())
-	_M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+      if (_M_before_begin._M_nxt)
+	_M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+	  __node_base_ptr_traits::pointer_to(_M_before_begin);
 
       __ht._M_reset();
     }
@@ -1322,7 +1333,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __map_base(__ht),
       __rehash_base(__ht),
       __hashtable_alloc(__node_alloc_type(__a)),
-      _M_buckets(),
+      _M_buckets(nullptr),
       _M_bucket_count(__ht._M_bucket_count),
       _M_element_count(__ht._M_element_count),
       _M_rehash_policy(__ht._M_rehash_policy)
@@ -1351,17 +1362,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	{
 	  if (__ht._M_uses_single_bucket())
 	    {
-	      _M_buckets = &_M_single_bucket;
-	      _M_single_bucket = __ht._M_single_bucket;
+	      _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
+	      _M_single_bucket = std::move(__ht._M_single_bucket);
 	    }
 	  else
-	    _M_buckets = __ht._M_buckets;
+	    _M_buckets = std::move(__ht._M_buckets);
 
-	  _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
+	  _M_before_begin._M_nxt = std::move(__ht._M_before_begin._M_nxt);
 	  // Update, if necessary, bucket pointing to before begin that hasn't
 	  // moved.
-	  if (_M_begin())
-	    _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+	  if (_M_before_begin._M_nxt)
+	    _M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+	      __node_base_ptr_traits::pointer_to(_M_before_begin);
 	  __ht._M_reset();
 	}
       else
@@ -1413,13 +1425,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (!__x._M_uses_single_bucket())
 	    {
 	      _M_buckets = __x._M_buckets;
-	      __x._M_buckets = &__x._M_single_bucket;
+	      __x._M_buckets =
+		__bucket_ptr_traits::pointer_to(__x._M_single_bucket);
 	    }
 	}
       else if (__x._M_uses_single_bucket())
 	{
 	  __x._M_buckets = _M_buckets;
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	}	
       else
 	std::swap(_M_buckets, __x._M_buckets);
@@ -1431,12 +1444,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Fix buckets containing the _M_before_begin pointers that can't be
       // swapped.
-      if (_M_begin())
-	_M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+      if (_M_before_begin._M_nxt)
+	_M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+	  __node_base_ptr_traits::pointer_to(_M_before_begin);
 
-      if (__x._M_begin())
-	__x._M_buckets[__x._M_bucket_index(__x._M_begin())]
-	  = &__x._M_before_begin;
+      if (__x._M_before_begin._M_nxt)
+	__x._M_buckets[__x._M_bucket_index(__x._M_before_begin._M_nxt)]
+	  = __node_base_ptr_traits::pointer_to(__x._M_before_begin);
     }
 
   template<typename _Key, typename _Value,
@@ -1451,7 +1465,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__bkt, __k, __code);
+      __node_pointer __p = _M_find_node(__bkt, __k, __code);
       return __p ? iterator(__p) : end();
     }
 
@@ -1467,7 +1481,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__bkt, __k, __code);
+      __node_pointer __p = _M_find_node(__bkt, __k, __code);
       return __p ? const_iterator(__p) : end();
     }
 
@@ -1483,12 +1497,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_bucket_begin(__bkt);
+      __node_pointer __p = _M_bucket_begin(__bkt);
       if (!__p)
 	return 0;
 
       std::size_t __result = 0;
-      for (;; __p = __p->_M_next())
+      for (;; __p = __p->_M_nxt)
 	{
 	  if (this->_M_equals(__k, __code, __p))
 	    ++__result;
@@ -1497,7 +1511,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    // found a non-equivalent value after an equivalent one it
 	    // means that we won't find any new equivalent value.
 	    break;
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_nxt) != __bkt)
 	    break;
 	}
       return __result;
@@ -1515,14 +1529,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__bkt, __k, __code);
+      __node_pointer __p = _M_find_node(__bkt, __k, __code);
 
       if (__p)
 	{
-	  __node_type* __p1 = __p->_M_next();
+	  __node_pointer __p1 = __p->_M_nxt;
 	  while (__p1 && _M_bucket_index(__p1) == __bkt
 		 && this->_M_equals(__k, __code, __p1))
-	    __p1 = __p1->_M_next();
+	    __p1 = __p1->_M_nxt;
 
 	  return std::make_pair(iterator(__p), iterator(__p1));
 	}
@@ -1542,14 +1556,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__bkt, __k, __code);
+      __node_pointer __p = _M_find_node(__bkt, __k, __code);
 
       if (__p)
 	{
-	  __node_type* __p1 = __p->_M_next();
+	  __node_pointer __p1 = __p->_M_nxt;
 	  while (__p1 && _M_bucket_index(__p1) == __bkt
 		 && this->_M_equals(__k, __code, __p1))
-	    __p1 = __p1->_M_next();
+	    __p1 = __p1->_M_nxt;
 
 	  return std::make_pair(const_iterator(__p), const_iterator(__p1));
 	}
@@ -1568,19 +1582,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
-    -> __node_base*
+    -> __bucket_type
     {
-      __node_base* __prev_p = _M_buckets[__bkt];
+      __bucket_type __prev_p = _M_buckets[__bkt];
       if (!__prev_p)
 	return nullptr;
 
-      for (__node_type* __p = static_cast<__node_type*>(__prev_p->_M_nxt);;
-	   __p = __p->_M_next())
+      for (__node_pointer __p = __prev_p->_M_nxt;; __p = __p->_M_nxt)
 	{
 	  if (this->_M_equals(__k, __code, __p))
 	    return __prev_p;
 
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_nxt) != __bkt)
 	    break;
 	  __prev_p = __p;
 	}
@@ -1594,7 +1607,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_insert_bucket_begin(size_type __bkt, __node_type* __node)
+    _M_insert_bucket_begin(size_type __bkt, __node_pointer __node)
     {
       if (_M_buckets[__bkt])
 	{
@@ -1613,8 +1626,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (__node->_M_nxt)
 	    // We must update former begin bucket that is pointing to
 	    // _M_before_begin.
-	    _M_buckets[_M_bucket_index(__node->_M_next())] = __node;
-	  _M_buckets[__bkt] = &_M_before_begin;
+	    _M_buckets[_M_bucket_index(__node->_M_nxt)] = __node;
+	  _M_buckets[__bkt] =
+	    __node_base_ptr_traits::pointer_to(_M_before_begin);
 	}
     }
 
@@ -1625,7 +1639,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_remove_bucket_begin(size_type __bkt, __node_type* __next,
+    _M_remove_bucket_begin(size_type __bkt, __node_pointer __next,
 			   size_type __next_bkt)
     {
       if (!__next || __next_bkt != __bkt)
@@ -1636,7 +1650,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    _M_buckets[__next_bkt] = _M_buckets[__bkt];
 
 	  // Second update before begin node if necessary
-	  if (&_M_before_begin == _M_buckets[__bkt])
+	  if (&_M_before_begin == std::__to_address(_M_buckets[__bkt]))
 	    _M_before_begin._M_nxt = __next;
 	  _M_buckets[__bkt] = nullptr;
 	}
@@ -1649,10 +1663,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_get_previous_node(size_type __bkt, __node_base* __n)
-    -> __node_base*
+    _M_get_previous_node(size_type __bkt, const __node_pointer& __n)
+    -> __bucket_type
     {
-      __node_base* __prev_n = _M_buckets[__bkt];
+      __bucket_type __prev_n = _M_buckets[__bkt];
       while (__prev_n->_M_nxt != __n)
 	__prev_n = __prev_n->_M_nxt;
       return __prev_n;
@@ -1674,7 +1688,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__k, __code);
-	if (__node_type* __p = _M_find_node(__bkt, __k, __code))
+	if (__node_pointer __p = _M_find_node(__bkt, __k, __code))
 	  // There is already an equivalent node, no insertion
 	  return std::make_pair(iterator(__p), false);
 
@@ -1714,7 +1728,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_insert_unique_node(const key_type& __k, size_type __bkt,
-			  __hash_code __code, __node_type* __node,
+			  __hash_code __code, __node_pointer __node,
 			  size_type __n_elt)
     -> iterator
     {
@@ -1744,8 +1758,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_insert_multi_node(__node_type* __hint, const key_type& __k,
-			 __hash_code __code, __node_type* __node)
+    _M_insert_multi_node(__node_pointer __hint, const key_type& __k,
+			 __hash_code __code, __node_pointer __node)
     -> iterator
     {
       const __rehash_state& __saved_state = _M_rehash_policy._M_state();
@@ -1760,7 +1774,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find the node before an equivalent one or use hint if it exists and
       // if it is equivalent.
-      __node_base* __prev
+      __bucket_type __prev
 	= __builtin_expect(__hint != nullptr, false)
 	  && this->_M_equals(__k, __code, __hint)
 	    ? __hint
@@ -1774,9 +1788,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    // hint might be the last bucket node, in this case we need to
 	    // update next bucket.
 	    if (__node->_M_nxt
-		&& !this->_M_equals(__k, __code, __node->_M_next()))
+		&& !this->_M_equals(__k, __code, __node->_M_nxt))
 	      {
-		size_type __next_bkt = _M_bucket_index(__node->_M_next());
+		size_type __next_bkt = _M_bucket_index(__node->_M_nxt);
 		if (__next_bkt != __bkt)
 		  _M_buckets[__next_bkt] = __node;
 	      }
@@ -1807,7 +1821,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__k, __code);
 
-	if (__node_type* __node = _M_find_node(__bkt, __k, __code))
+	if (__node_pointer __node = _M_find_node(__bkt, __k, __code))
 	  return { iterator(__node), false };
 
 	_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
@@ -1853,14 +1867,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __it)
     -> iterator
     {
-      __node_type* __n = __it._M_cur;
-      std::size_t __bkt = _M_bucket_index(__n);
+      std::size_t __bkt = _M_bucket_index(__it._M_cur);
 
       // Look for previous node to unlink it from the erased one, this
       // is why we need buckets to contain the before begin to make
       // this search fast.
-      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
-      return _M_erase(__bkt, __prev_n, __n);
+      __bucket_type __prev_n = _M_get_previous_node(__bkt, __it._M_cur);
+      return _M_erase(__bkt, __prev_n, __it._M_cur);
     }
 
   template<typename _Key, typename _Value,
@@ -1870,21 +1883,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n)
+    _M_erase(size_type __bkt, __bucket_type __prev_n, __node_pointer __n)
     -> iterator
     {
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n->_M_next(),
-	   __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
+	_M_remove_bucket_begin(__bkt, __n->_M_nxt,
+	   __n->_M_nxt ? _M_bucket_index(__n->_M_nxt) : 0);
       else if (__n->_M_nxt)
 	{
-	  size_type __next_bkt = _M_bucket_index(__n->_M_next());
+	  size_type __next_bkt = _M_bucket_index(__n->_M_nxt);
 	  if (__next_bkt != __bkt)
 	    _M_buckets[__next_bkt] = __prev_n;
 	}
 
       __prev_n->_M_nxt = __n->_M_nxt;
-      iterator __result(__n->_M_next());
+      iterator __result(__n->_M_nxt);
       this->_M_deallocate_node(__n);
       --_M_element_count;
 
@@ -1905,13 +1918,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__k, __code);
 
       // Look for the node before the first matching node.
-      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __bucket_type __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
       // We found a matching node, erase it.
-      __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
-      _M_erase(__bkt, __prev_n, __n);
+      _M_erase(__bkt, __prev_n, __prev_n->_M_nxt);
       return 1;
     }
 
@@ -1929,7 +1941,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__k, __code);
 
       // Look for the node before the first matching node.
-      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __bucket_type __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
@@ -1939,12 +1951,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // We use one loop to find all matching nodes and another to deallocate
       // them so that the key stays valid during the first loop. It might be
       // invalidated indirectly when destroying nodes.
-      __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
-      __node_type* __n_last = __n;
+      __node_pointer __n = __prev_n->_M_nxt;
+      __node_pointer __n_last = __n;
       std::size_t __n_last_bkt = __bkt;
       do
 	{
-	  __n_last = __n_last->_M_next();
+	  __n_last = __n_last->_M_nxt;
 	  if (!__n_last)
 	    break;
 	  __n_last_bkt = _M_bucket_index(__n_last);
@@ -1955,7 +1967,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       size_type __result = 0;
       do
 	{
-	  __node_type* __p = __n->_M_next();
+	  __node_pointer __p = __n->_M_nxt;
 	  this->_M_deallocate_node(__n);
 	  __n = __p;
 	  ++__result;
@@ -1981,22 +1993,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __first, const_iterator __last)
     -> iterator
     {
-      __node_type* __n = __first._M_cur;
-      __node_type* __last_n = __last._M_cur;
+      __node_pointer __n = __first._M_cur;
+      __node_pointer __last_n = __last._M_cur;
       if (__n == __last_n)
 	return iterator(__n);
 
       std::size_t __bkt = _M_bucket_index(__n);
 
-      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      __bucket_type __prev_n = _M_get_previous_node(__bkt, __n);
       bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
       std::size_t __n_bkt = __bkt;
       for (;;)
 	{
 	  do
 	    {
-	      __node_type* __tmp = __n;
-	      __n = __n->_M_next();
+	      __node_pointer __tmp = __n;
+	      __n = __n->_M_nxt;
 	      this->_M_deallocate_node(__tmp);
 	      --_M_element_count;
 	      if (!__n)
@@ -2027,8 +2039,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     clear() noexcept
     {
-      this->_M_deallocate_nodes(_M_begin());
-      __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(__bucket_type));
+      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
+      std::fill_n(_M_buckets, _M_bucket_count, nullptr);
       _M_element_count = 0;
       _M_before_begin._M_nxt = nullptr;
     }
@@ -2088,20 +2100,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_rehash_aux(size_type __bkt_count, true_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_type* __p = _M_begin();
+      __bucket_pointer __new_buckets = _M_allocate_buckets(__bkt_count);
+      auto __before_begin_ptr =
+	__node_base_ptr_traits::pointer_to(_M_before_begin);
+      __node_pointer __p = _M_before_begin._M_nxt;
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       while (__p)
 	{
-	  __node_type* __next = __p->_M_next();
+	  __node_pointer __next = __p->_M_nxt;
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
 	  if (!__new_buckets[__bkt])
 	    {
 	      __p->_M_nxt = _M_before_begin._M_nxt;
 	      _M_before_begin._M_nxt = __p;
-	      __new_buckets[__bkt] = &_M_before_begin;
+	      __new_buckets[__bkt] = __before_begin_ptr;
 	      if (__p->_M_nxt)
 		__new_buckets[__bbegin_bkt] = __p;
 	      __bbegin_bkt = __bkt;
@@ -2132,16 +2146,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
 
-      __node_type* __p = _M_begin();
+      auto __before_begin_ptr =
+	__node_base_ptr_traits::pointer_to(_M_before_begin);
+      __node_pointer __p = _M_before_begin._M_nxt;
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       std::size_t __prev_bkt = 0;
-      __node_type* __prev_p = nullptr;
+      __node_pointer __prev_p{};
       bool __check_bucket = false;
 
       while (__p)
 	{
-	  __node_type* __next = __p->_M_next();
+	  __node_pointer __next = __p->_M_nxt;
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
 
@@ -2169,7 +2185,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		  if (__prev_p->_M_nxt)
 		    {
 		      std::size_t __next_bkt
-			= __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+			= __hash_code_base::_M_bucket_index(__prev_p->_M_nxt,
 							    __bkt_count);
 		      if (__next_bkt != __prev_bkt)
 			__new_buckets[__next_bkt] = __prev_p;
@@ -2181,7 +2197,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		{
 		  __p->_M_nxt = _M_before_begin._M_nxt;
 		  _M_before_begin._M_nxt = __p;
-		  __new_buckets[__bkt] = &_M_before_begin;
+		  __new_buckets[__bkt] = __before_begin_ptr;
 		  if (__p->_M_nxt)
 		    __new_buckets[__bbegin_bkt] = __p;
 		  __bbegin_bkt = __bkt;
@@ -2200,7 +2216,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__check_bucket && __prev_p->_M_nxt)
 	{
 	  std::size_t __next_bkt
-	    = __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+	    = __hash_code_base::_M_bucket_index(__prev_p->_M_nxt,
 						__bkt_count);
 	  if (__next_bkt != __prev_bkt)
 	    __new_buckets[__next_bkt] = __prev_p;
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index ef120134914..35de9eaf2b0 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -52,7 +52,7 @@ namespace __detail
    *  @ingroup unordered_associative_containers
    *  @{
    */
-  template<typename _Key, typename _Value,
+  template<typename _Key, typename _Value, typename _Alloc,
 	   typename _ExtractKey, typename _Equal,
 	   typename _H1, typename _H2, typename _Hash, typename _Traits>
     struct _Hashtable_base;
@@ -107,24 +107,24 @@ namespace __detail
       using __hashtable_alloc = _Hashtable_alloc<__node_alloc_type>;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_pointer = typename __hashtable_alloc::__node_pointer;
 
     public:
-      _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h)
-      : _M_nodes(__nodes), _M_h(__h) { }
+      _ReuseOrAllocNode(__node_pointer&& __nodes, __hashtable_alloc& __h)
+      : _M_nodes(std::move(__nodes)), _M_h(__h) { }
       _ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete;
 
       ~_ReuseOrAllocNode()
       { _M_h._M_deallocate_nodes(_M_nodes); }
 
       template<typename _Arg>
-	__node_type*
+	__node_pointer
 	operator()(_Arg&& __arg) const
 	{
 	  if (_M_nodes)
 	    {
-	      __node_type* __node = _M_nodes;
-	      _M_nodes = _M_nodes->_M_next();
+	      __node_pointer __node = _M_nodes;
+	      _M_nodes = _M_nodes->_M_nxt;
 	      __node->_M_nxt = nullptr;
 	      auto& __a = _M_h._M_node_allocator();
 	      __node_alloc_traits::destroy(__a, __node->_M_valptr());
@@ -144,7 +144,7 @@ namespace __detail
 	}
 
     private:
-      mutable __node_type* _M_nodes;
+      mutable __node_pointer _M_nodes;
       __hashtable_alloc& _M_h;
     };
 
@@ -155,14 +155,14 @@ namespace __detail
     {
     private:
       using __hashtable_alloc = _Hashtable_alloc<_NodeAlloc>;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_pointer = typename __hashtable_alloc::__node_pointer;
 
     public:
       _AllocNode(__hashtable_alloc& __h)
       : _M_h(__h) { }
 
       template<typename _Arg>
-	__node_type*
+	__node_pointer
 	operator()(_Arg&& __arg) const
 	{ return _M_h._M_allocate_node(std::forward<_Arg>(__arg)); }
 
@@ -211,22 +211,25 @@ namespace __detail
    *  nodes also store a hash code. In some cases (e.g. strings) this
    *  may be a performance win.
    */
-  struct _Hash_node_base
-  {
-    _Hash_node_base* _M_nxt;
+  template<typename _NodePtr>
+    struct _Hash_node_base
+    {
+      _NodePtr _M_nxt;
 
-    _Hash_node_base() noexcept : _M_nxt() { }
+      _Hash_node_base() noexcept : _M_nxt(nullptr) { }
 
-    _Hash_node_base(_Hash_node_base* __next) noexcept : _M_nxt(__next) { }
-  };
+      template<typename _Ptr>
+	_Hash_node_base(_Ptr&& __next) noexcept
+	: _M_nxt(std::forward<_Ptr>(__next)) { }
+    };
 
   /**
    *  struct _Hash_node_value_base
    *
    *  Node type with the value to store.
    */
-  template<typename _Value>
-    struct _Hash_node_value_base : _Hash_node_base
+  template<typename _NodePtr, typename _Value>
+    struct _Hash_node_value_base : _Hash_node_base<_NodePtr>
     {
       typedef _Value value_type;
 
@@ -252,7 +255,7 @@ namespace __detail
   /**
    *  Primary template struct _Hash_node.
    */
-  template<typename _Value, bool _Cache_hash_code>
+  template<typename _Ptr, typename _Value, bool _Cache_hash_code>
     struct _Hash_node;
 
   /**
@@ -260,14 +263,14 @@ namespace __detail
    *
    *  Base class is __detail::_Hash_node_value_base.
    */
-  template<typename _Value>
-    struct _Hash_node<_Value, true> : _Hash_node_value_base<_Value>
+  template<typename _Ptr, typename _Value>
+    struct _Hash_node<_Ptr, _Value, true>
+    : _Hash_node_value_base<__ptr_rebind<_Ptr, _Hash_node<_Ptr, _Value, true>>,
+			    _Value>
     {
-      std::size_t  _M_hash_code;
+      using __node_pointer = __ptr_rebind<_Ptr, _Hash_node>;
 
-      _Hash_node*
-      _M_next() const noexcept
-      { return static_cast<_Hash_node*>(this->_M_nxt); }
+      std::size_t  _M_hash_code;
     };
 
   /**
@@ -275,69 +278,66 @@ namespace __detail
    *
    *  Base class is __detail::_Hash_node_value_base.
    */
-  template<typename _Value>
-    struct _Hash_node<_Value, false> : _Hash_node_value_base<_Value>
-    {
-      _Hash_node*
-      _M_next() const noexcept
-      { return static_cast<_Hash_node*>(this->_M_nxt); }
-    };
+  template<typename _Ptr, typename _Value>
+    struct _Hash_node<_Ptr, _Value, false>
+    : _Hash_node_value_base<__ptr_rebind<_Ptr, _Hash_node<_Ptr, _Value, false>>,
+			    _Value>
+    { using __node_pointer = __ptr_rebind<_Ptr, _Hash_node>; };
 
   /// Base class for node iterators.
-  template<typename _Value, bool _Cache_hash_code>
+  template<typename _NodePtr>
     struct _Node_iterator_base
     {
-      using __node_type = _Hash_node<_Value, _Cache_hash_code>;
+      using __node_type = typename std::pointer_traits<_NodePtr>::element_type;
 
-      __node_type*  _M_cur;
+      _NodePtr _M_cur;
 
-      _Node_iterator_base(__node_type* __p) noexcept
+      _Node_iterator_base(_NodePtr __p) noexcept
       : _M_cur(__p) { }
+      _Node_iterator_base() noexcept
+      : _Node_iterator_base(nullptr) { }
 
       void
       _M_incr() noexcept
-      { _M_cur = _M_cur->_M_next(); }
-    };
+      { _M_cur = _M_cur->_M_nxt; }
 
-  template<typename _Value, bool _Cache_hash_code>
-    inline bool
-    operator==(const _Node_iterator_base<_Value, _Cache_hash_code>& __x,
-	       const _Node_iterator_base<_Value, _Cache_hash_code >& __y)
-    noexcept
-    { return __x._M_cur == __y._M_cur; }
+      friend inline bool
+      operator==(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
+      noexcept
+      { return __x._M_cur == __y._M_cur; }
 
-  template<typename _Value, bool _Cache_hash_code>
-    inline bool
-    operator!=(const _Node_iterator_base<_Value, _Cache_hash_code>& __x,
-	       const _Node_iterator_base<_Value, _Cache_hash_code>& __y)
-    noexcept
-    { return __x._M_cur != __y._M_cur; }
+      friend inline bool
+      operator!=(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
+      noexcept
+      { return __x._M_cur != __y._M_cur; }
+    };
 
   /// Node iterators, used to iterate through all the hashtable.
-  template<typename _Value, bool __constant_iterators, bool __cache>
+  template<typename _NodePtr, bool __constant_iterators>
     struct _Node_iterator
-    : public _Node_iterator_base<_Value, __cache>
+    : public _Node_iterator_base<_NodePtr>
     {
     private:
-      using __base_type = _Node_iterator_base<_Value, __cache>;
+      using __base_type = _Node_iterator_base<_NodePtr>;
       using __node_type = typename __base_type::__node_type;
 
     public:
-      typedef _Value					value_type;
-      typedef std::ptrdiff_t				difference_type;
-      typedef std::forward_iterator_tag			iterator_category;
+      typedef typename __node_type::value_type	value_type;
+      typedef typename std::pointer_traits<_NodePtr>::difference_type
+						difference_type;
+      typedef std::forward_iterator_tag		iterator_category;
 
       using pointer = typename std::conditional<__constant_iterators,
-						const _Value*, _Value*>::type;
+				  const value_type*, value_type*>::type;
 
       using reference = typename std::conditional<__constant_iterators,
-						  const _Value&, _Value&>::type;
+				  const value_type&, value_type&>::type;
 
       _Node_iterator() noexcept
-      : __base_type(0) { }
+      : __base_type(nullptr) { }
 
       explicit
-      _Node_iterator(__node_type* __p) noexcept
+      _Node_iterator(_NodePtr __p) noexcept
       : __base_type(__p) { }
 
       reference
@@ -365,31 +365,32 @@ namespace __detail
     };
 
   /// Node const_iterators, used to iterate through all the hashtable.
-  template<typename _Value, bool __constant_iterators, bool __cache>
+  template<typename _NodePtr, bool __constant_iterators>
     struct _Node_const_iterator
-    : public _Node_iterator_base<_Value, __cache>
+    : public _Node_iterator_base<_NodePtr>
     {
     private:
-      using __base_type = _Node_iterator_base<_Value, __cache>;
+      using __base_type = _Node_iterator_base<_NodePtr>;
       using __node_type = typename __base_type::__node_type;
 
     public:
-      typedef _Value					value_type;
-      typedef std::ptrdiff_t				difference_type;
-      typedef std::forward_iterator_tag			iterator_category;
+      typedef typename __node_type::value_type	value_type;
+      typedef typename std::pointer_traits<_NodePtr>::difference_type
+						difference_type;
+      typedef std::forward_iterator_tag		iterator_category;
 
-      typedef const _Value*				pointer;
-      typedef const _Value&				reference;
+      typedef const value_type*			pointer;
+      typedef const value_type&			reference;
 
       _Node_const_iterator() noexcept
-      : __base_type(0) { }
+      : __base_type(nullptr) { }
 
       explicit
-      _Node_const_iterator(__node_type* __p) noexcept
+      _Node_const_iterator(_NodePtr __p) noexcept
       : __base_type(__p) { }
 
-      _Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
-			   __cache>& __x) noexcept
+      _Node_const_iterator(const _Node_iterator<_NodePtr,
+			   __constant_iterators>& __x) noexcept
       : __base_type(__x._M_cur) { }
 
       reference
@@ -662,17 +663,17 @@ namespace __detail
 		     _H1, _H2, _Hash, _RehashPolicy, _Traits, true>
     {
     private:
-      using __hashtable_base = __detail::_Hashtable_base<_Key, _Pair,
+      using __hashtable_base = __detail::_Hashtable_base<_Key, _Pair, _Alloc,
 							 _Select1st,
 							_Equal, _H1, _H2, _Hash,
-							  _Traits>;
+							 _Traits>;
 
       using __hashtable = _Hashtable<_Key, _Pair, _Alloc,
 				     _Select1st, _Equal,
 				     _H1, _H2, _Hash, _RehashPolicy, _Traits>;
 
       using __hash_code = typename __hashtable_base::__hash_code;
-      using __node_type = typename __hashtable_base::__node_type;
+      using __node_pointer = typename __hashtable_base::__node_pointer;
 
     public:
       using key_type = typename __hashtable_base::key_type;
@@ -706,7 +707,7 @@ namespace __detail
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__k, __code);
-      if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
+      if (__node_pointer __node = __h->_M_find_node(__bkt, __k, __code))
 	return __node->_M_v().second;
 
       typename __hashtable::_Scoped_node __node {
@@ -733,7 +734,7 @@ namespace __detail
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__k, __code);
-      if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
+      if (__node_pointer __node = __h->_M_find_node(__bkt, __k, __code))
 	return __node->_M_v().second;
 
       typename __hashtable::_Scoped_node __node {
@@ -760,7 +761,7 @@ namespace __detail
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
+      __node_pointer __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	__throw_out_of_range(__N("_Map_base::at"));
@@ -779,7 +780,7 @@ namespace __detail
       const __hashtable* __h = static_cast<const __hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
+      __node_pointer __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	__throw_out_of_range(__N("_Map_base::at"));
@@ -802,7 +803,8 @@ namespace __detail
 				     _Equal, _H1, _H2, _Hash,
 				     _RehashPolicy, _Traits>;
 
-      using __hashtable_base = _Hashtable_base<_Key, _Value, _ExtractKey,
+      using __hashtable_base = _Hashtable_base<_Key, _Value, _Alloc,
+					       _ExtractKey,
 					       _Equal, _H1, _H2, _Hash,
 					       _Traits>;
 
@@ -813,7 +815,7 @@ namespace __detail
 
       using __unique_keys = typename __hashtable_base::__unique_keys;
       using __ireturn_type = typename __hashtable_base::__ireturn_type;
-      using __node_type = _Hash_node<_Value, _Traits::__hash_cached::value>;
+      using __node_type = typename __hashtable_base::__node_type;
       using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
       using __node_gen_type = _AllocNode<__node_alloc_type>;
 
@@ -948,7 +950,8 @@ namespace __detail
 					_Equal, _H1, _H2, _Hash,
 					_RehashPolicy, _Traits>;
 
-      using __hashtable_base = _Hashtable_base<_Key, _Value, _ExtractKey,
+      using __hashtable_base = _Hashtable_base<_Key, _Value, _Alloc,
+					       _ExtractKey,
 					       _Equal, _H1, _H2, _Hash,
 					       _Traits>;
 
@@ -1144,9 +1147,9 @@ namespace __detail
    *  Base class for local iterators, used to iterate within a bucket
    *  but not between buckets.
    */
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash,
-	   bool __cache_hash_code>
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2, typename _Hash,
+	   typename _NodePtr, bool __cache_hash_code>
     struct _Local_iterator_base;
 
   /**
@@ -1169,26 +1172,30 @@ namespace __detail
    *
    *  Primary template is unused except as a hook for specializations.
    */
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey,
 	   typename _H1, typename _H2, typename _Hash,
 	   bool __cache_hash_code>
     struct _Hash_code_base;
 
   /// Specialization: ranged hash function, no caching hash codes.  H1
   /// and H2 are provided but ignored.  We define a dummy hash code type.
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash, false>
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2, typename _Hash>
+    struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
+			   _Hash, false>
     : private _Hashtable_ebo_helper<0, _ExtractKey>,
       private _Hashtable_ebo_helper<1, _Hash>
     {
     private:
       using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
       using __ebo_hash = _Hashtable_ebo_helper<1, _Hash>;
+      using __pointer = typename std::allocator_traits<_Alloc>::pointer;
 
     protected:
       typedef void* 					__hash_code;
-      typedef _Hash_node<_Value, false>			__node_type;
+      typedef _Hash_node<__pointer, _Value, false>	__node_type;
+      using __node_pointer = typename __node_type::__node_pointer;
 
       // We need the default constructor for the local iterators and _Hashtable
       // default constructor.
@@ -1208,17 +1215,17 @@ namespace __detail
       { return _M_ranged_hash()(__k, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+      _M_bucket_index(const __node_pointer& __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>(),
 						   (std::size_t)0)) )
       { return _M_ranged_hash()(_M_extract()(__p->_M_v()), __bkt_count); }
 
       void
-      _M_store_code(__node_type*, __hash_code) const
+      _M_store_code(const __node_pointer&, __hash_code) const
       { }
 
       void
-      _M_copy_code(__node_type*, const __node_type*) const
+      _M_copy_code(const __node_pointer&, const __node_pointer&) const
       { }
 
       void
@@ -1242,16 +1249,17 @@ namespace __detail
   /// Specialization: ranged hash function, cache hash codes.  This
   /// combination is meaningless, so we provide only a declaration
   /// and no definition.
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash, true>;
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2, typename _Hash>
+    struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
+			   _Hash, true>;
 
   /// Specialization: hash function and range-hashing function, no
   /// caching of hash codes.
   /// Provides typedef and accessor required by C++ 11.
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2,
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2>
+    struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
 			   _Default_ranged_hash, false>
     : private _Hashtable_ebo_helper<0, _ExtractKey>,
       private _Hashtable_ebo_helper<1, _H1>,
@@ -1261,10 +1269,7 @@ namespace __detail
       using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
       using __ebo_h1 = _Hashtable_ebo_helper<1, _H1>;
       using __ebo_h2 = _Hashtable_ebo_helper<2, _H2>;
-
-      // Gives the local iterator implementation access to _M_bucket_index().
-      friend struct _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2,
-					 _Default_ranged_hash, false>;
+      using __pointer = typename std::allocator_traits<_Alloc>::pointer;
 
     public:
       typedef _H1 					hasher;
@@ -1275,7 +1280,13 @@ namespace __detail
 
     protected:
       typedef std::size_t 				__hash_code;
-      typedef _Hash_node<_Value, false>			__node_type;
+      typedef _Hash_node<__pointer, _Value, false>	__node_type;
+      using __node_pointer = typename __node_type::__node_pointer;
+
+      // Gives the local iterator implementation access to _M_bucket_index().
+      friend struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+					 _H1, _H2, _Default_ranged_hash,
+					 __node_pointer, false>;
 
       // We need the default constructor for the local iterators and _Hashtable
       // default constructor.
@@ -1300,18 +1311,18 @@ namespace __detail
       { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+      _M_bucket_index(const __node_pointer& __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H1&>()(declval<const _Key&>()))
 		  && noexcept(declval<const _H2&>()((__hash_code)0,
 						    (std::size_t)0)) )
       { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __bkt_count); }
 
       void
-      _M_store_code(__node_type*, __hash_code) const
+      _M_store_code(const __node_pointer&, __hash_code) const
       { }
 
       void
-      _M_copy_code(__node_type*, const __node_type*) const
+      _M_copy_code(const __node_pointer&, const __node_pointer&) const
       { }
 
       void
@@ -1336,22 +1347,19 @@ namespace __detail
   /// Specialization: hash function and range-hashing function,
   /// caching hash codes.  H is provided but ignored.  Provides
   /// typedef and accessor required by C++ 11.
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2,
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2>
+    struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
 			   _Default_ranged_hash, true>
     : private _Hashtable_ebo_helper<0, _ExtractKey>,
       private _Hashtable_ebo_helper<1, _H1>,
       private _Hashtable_ebo_helper<2, _H2>
     {
     private:
-      // Gives the local iterator implementation access to _M_h2().
-      friend struct _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2,
-					 _Default_ranged_hash, true>;
-
       using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
       using __ebo_h1 = _Hashtable_ebo_helper<1, _H1>;
       using __ebo_h2 = _Hashtable_ebo_helper<2, _H2>;
+      using __pointer = typename std::allocator_traits<_Alloc>::pointer;
 
     public:
       typedef _H1 					hasher;
@@ -1362,7 +1370,13 @@ namespace __detail
 
     protected:
       typedef std::size_t 				__hash_code;
-      typedef _Hash_node<_Value, true>			__node_type;
+      typedef _Hash_node<__pointer, _Value, true>	__node_type;
+      using __node_pointer = typename __node_type::__node_pointer;
+
+      // Gives the local iterator implementation access to _M_h2().
+      friend struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+					 _H1, _H2, _Default_ranged_hash,
+					 __node_pointer, true>;
 
       // We need the default constructor for _Hashtable default constructor.
       _Hash_code_base() = default;
@@ -1385,17 +1399,18 @@ namespace __detail
       { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+      _M_bucket_index(const __node_pointer& __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H2&>()((__hash_code)0,
 						 (std::size_t)0)) )
       { return _M_h2()(__p->_M_hash_code, __bkt_count); }
 
       void
-      _M_store_code(__node_type* __n, __hash_code __c) const
+      _M_store_code(const __node_pointer& __n, __hash_code __c) const
       { __n->_M_hash_code = __c; }
 
       void
-      _M_copy_code(__node_type* __to, const __node_type* __from) const
+      _M_copy_code(const __node_pointer& __to,
+		   const __node_pointer& __from) const
       { __to->_M_hash_code = __from->_M_hash_code; }
 
       void
@@ -1418,46 +1433,44 @@ namespace __detail
     };
 
   /// Partial specialization used when nodes contain a cached hash code.
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash>
-    struct _Local_iterator_base<_Key, _Value, _ExtractKey,
-				_H1, _H2, _Hash, true>
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2,
+	   typename _Hash, typename _NodePtr>
+    struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+				_H1, _H2, _Hash, _NodePtr, true>
     : private _Hashtable_ebo_helper<0, _H2>
+    , public _Node_iterator_base<_NodePtr>
     {
     protected:
       using __base_type = _Hashtable_ebo_helper<0, _H2>;
-      using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
+      using __base_node_iter = _Node_iterator_base<_NodePtr>;
+      using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
 					       _H1, _H2, _Hash, true>;
 
       _Local_iterator_base() = default;
-      _Local_iterator_base(const __hash_code_base& __base,
-			   _Hash_node<_Value, true>* __p,
+      _Local_iterator_base(const __hash_code_base& __base, _NodePtr __p,
 			   std::size_t __bkt, std::size_t __bkt_count)
-      : __base_type(__base._M_h2()),
-	_M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
+	: __base_type(__base._M_h2()), __base_node_iter(__p)
+	, _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
 
       void
       _M_incr()
       {
-	_M_cur = _M_cur->_M_next();
-	if (_M_cur)
+	__base_node_iter::_M_incr();
+	if (this->_M_cur)
 	  {
 	    std::size_t __bkt
-	      = __base_type::_M_get()(_M_cur->_M_hash_code,
-					   _M_bucket_count);
+	      = __base_type::_M_get()(this->_M_cur->_M_hash_code,
+				      _M_bucket_count);
 	    if (__bkt != _M_bucket)
-	      _M_cur = nullptr;
+	      this->_M_cur = nullptr;
 	  }
       }
 
-      _Hash_node<_Value, true>*  _M_cur;
       std::size_t _M_bucket;
       std::size_t _M_bucket_count;
 
     public:
-      const void*
-      _M_curr() const { return _M_cur; }  // for equality ops
-
       std::size_t
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
@@ -1493,29 +1506,33 @@ namespace __detail
       _M_h() const { return reinterpret_cast<const _Tp*>(this); }
     };
 
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash>
-    using __hash_code_for_local_iter
-      = _Hash_code_storage<_Hash_code_base<_Key, _Value, _ExtractKey,
-					   _H1, _H2, _Hash, false>>;
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2,
+	   typename _Hash>
+    using __hash_code_for_local_iter =
+      _Hash_code_storage<_Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
+					 _H1, _H2, _Hash, false>>;
 
   // Partial specialization used when hash codes are not cached
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash>
-    struct _Local_iterator_base<_Key, _Value, _ExtractKey,
-				_H1, _H2, _Hash, false>
-    : __hash_code_for_local_iter<_Key, _Value, _ExtractKey, _H1, _H2, _Hash>
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2,
+	   typename _Hash, typename _NodePtr>
+    struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+				_H1, _H2, _Hash, _NodePtr, false>
+    : __hash_code_for_local_iter<_Key, _Value, _Alloc, _ExtractKey,
+				 _H1, _H2, _Hash>
+    , public _Node_iterator_base<_NodePtr>
     {
     protected:
-      using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
-					       _H1, _H2, _Hash, false>;
+      using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc,
+				_ExtractKey, _H1, _H2, _Hash, false>;
+      using __node_iter_base = _Node_iterator_base<_NodePtr>;
 
       _Local_iterator_base() : _M_bucket_count(-1) { }
 
-      _Local_iterator_base(const __hash_code_base& __base,
-			   _Hash_node<_Value, false>* __p,
+      _Local_iterator_base(const __hash_code_base& __base, _NodePtr __p,
 			   std::size_t __bkt, std::size_t __bkt_count)
-      : _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      : __node_iter_base(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
       { _M_init(__base); }
 
       ~_Local_iterator_base()
@@ -1525,7 +1542,7 @@ namespace __detail
       }
 
       _Local_iterator_base(const _Local_iterator_base& __iter)
-      : _M_cur(__iter._M_cur), _M_bucket(__iter._M_bucket),
+      : __node_iter_base(__iter._M_cur), _M_bucket(__iter._M_bucket),
         _M_bucket_count(__iter._M_bucket_count)
       {
 	if (_M_bucket_count != -1)
@@ -1537,7 +1554,7 @@ namespace __detail
       {
 	if (_M_bucket_count != -1)
 	  _M_destroy();
-	_M_cur = __iter._M_cur;
+	this->_M_cur = __iter._M_cur;
 	_M_bucket = __iter._M_bucket;
 	_M_bucket_count = __iter._M_bucket_count;
 	if (_M_bucket_count != -1)
@@ -1548,17 +1565,16 @@ namespace __detail
       void
       _M_incr()
       {
-	_M_cur = _M_cur->_M_next();
-	if (_M_cur)
+	__node_iter_base::_M_incr();
+	if (this->_M_cur)
 	  {
-	    std::size_t __bkt = this->_M_h()->_M_bucket_index(_M_cur,
+	    std::size_t __bkt = this->_M_h()->_M_bucket_index(this->_M_cur,
 							      _M_bucket_count);
 	    if (__bkt != _M_bucket)
-	      _M_cur = nullptr;
+	      this->_M_cur = nullptr;
 	  }
       }
 
-      _Hash_node<_Value, false>*  _M_cur;
       std::size_t _M_bucket;
       std::size_t _M_bucket_count;
 
@@ -1570,42 +1586,22 @@ namespace __detail
       _M_destroy() { this->_M_h()->~__hash_code_base(); }
 
     public:
-      const void*
-      _M_curr() const { return _M_cur; }  // for equality ops and debug mode
-
       std::size_t
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
 
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash, bool __cache>
-    inline bool
-    operator==(const _Local_iterator_base<_Key, _Value, _ExtractKey,
-					  _H1, _H2, _Hash, __cache>& __x,
-	       const _Local_iterator_base<_Key, _Value, _ExtractKey,
-					  _H1, _H2, _Hash, __cache>& __y)
-    { return __x._M_curr() == __y._M_curr(); }
-
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash, bool __cache>
-    inline bool
-    operator!=(const _Local_iterator_base<_Key, _Value, _ExtractKey,
-					  _H1, _H2, _Hash, __cache>& __x,
-	       const _Local_iterator_base<_Key, _Value, _ExtractKey,
-					  _H1, _H2, _Hash, __cache>& __y)
-    { return __x._M_curr() != __y._M_curr(); }
-
   /// local iterators
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash,
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2,
+	   typename _Hash, typename _NodePtr,
 	   bool __constant_iterators, bool __cache>
     struct _Local_iterator
-    : public _Local_iterator_base<_Key, _Value, _ExtractKey,
-				  _H1, _H2, _Hash, __cache>
+    : public _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+				  _H1, _H2, _Hash, _NodePtr, __cache>
     {
     private:
-      using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
-					       _H1, _H2, _Hash, __cache>;
+      using __base_type = _Local_iterator_base<_Key, _Value, _Alloc,
+			_ExtractKey, _H1, _H2, _Hash, _NodePtr, __cache>;
       using __hash_code_base = typename __base_type::__hash_code_base;
     public:
       typedef _Value					value_type;
@@ -1621,7 +1617,7 @@ namespace __detail
       _Local_iterator() = default;
 
       _Local_iterator(const __hash_code_base& __base,
-		      _Hash_node<_Value, __cache>* __n,
+		      _NodePtr __n,
 		      std::size_t __bkt, std::size_t __bkt_count)
       : __base_type(__base, __n, __bkt, __bkt_count)
       { }
@@ -1651,16 +1647,17 @@ namespace __detail
     };
 
   /// local const_iterators
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash,
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2,
+	   typename _Hash, typename _NodePtr,
 	   bool __constant_iterators, bool __cache>
     struct _Local_const_iterator
-    : public _Local_iterator_base<_Key, _Value, _ExtractKey,
-				  _H1, _H2, _Hash, __cache>
+    : public _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+				  _H1, _H2, _Hash, _NodePtr, __cache>
     {
     private:
-      using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
-					       _H1, _H2, _Hash, __cache>;
+      using __base_type = _Local_iterator_base<_Key, _Value, _Alloc,
+			_ExtractKey, _H1, _H2, _Hash, _NodePtr, __cache>;
       using __hash_code_base = typename __base_type::__hash_code_base;
 
     public:
@@ -1673,15 +1670,14 @@ namespace __detail
       _Local_const_iterator() = default;
 
       _Local_const_iterator(const __hash_code_base& __base,
-			    _Hash_node<_Value, __cache>* __n,
+			    _NodePtr __n,
 			    std::size_t __bkt, std::size_t __bkt_count)
       : __base_type(__base, __n, __bkt, __bkt_count)
       { }
 
-      _Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
-						  _H1, _H2, _Hash,
-						  __constant_iterators,
-						  __cache>& __x)
+      _Local_const_iterator(const _Local_iterator<_Key, _Value, _Alloc,
+			    _ExtractKey, _H1, _H2, _Hash, _NodePtr,
+			    __constant_iterators, __cache>& __x)
       : __base_type(__x)
       { }
 
@@ -1719,13 +1715,14 @@ namespace __detail
    *    - __detail::_Hash_code_base
    *    - __detail::_Hashtable_ebo_helper
    */
-  template<typename _Key, typename _Value,
+  template<typename _Key, typename _Value, typename _Alloc,
 	   typename _ExtractKey, typename _Equal,
-	   typename _H1, typename _H2, typename _Hash, typename _Traits>
-  struct _Hashtable_base
-  : public _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
-			   _Traits::__hash_cached::value>,
-    private _Hashtable_ebo_helper<0, _Equal>
+	   typename _H1, typename _H2, typename _Hash,
+	   typename _Traits>
+    struct _Hashtable_base
+    : public _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2, _Hash,
+			     _Traits::__hash_cached::value>
+    , private _Hashtable_ebo_helper<0, _Equal>
   {
   public:
     typedef _Key					key_type;
@@ -1739,31 +1736,28 @@ namespace __detail
     using __constant_iterators = typename __traits_type::__constant_iterators;
     using __unique_keys = typename __traits_type::__unique_keys;
 
-    using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
+    using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
 					     _H1, _H2, _Hash,
 					     __hash_cached::value>;
 
     using __hash_code = typename __hash_code_base::__hash_code;
     using __node_type = typename __hash_code_base::__node_type;
+    using __node_pointer = typename __hash_code_base::__node_pointer;
 
-    using iterator = __detail::_Node_iterator<value_type,
-					      __constant_iterators::value,
-					      __hash_cached::value>;
+    using iterator = __detail::_Node_iterator<__node_pointer,
+					      __constant_iterators::value>;
 
-    using const_iterator = __detail::_Node_const_iterator<value_type,
-						   __constant_iterators::value,
-						   __hash_cached::value>;
+    using const_iterator = __detail::_Node_const_iterator<__node_pointer,
+						   __constant_iterators::value>;
 
-    using local_iterator = __detail::_Local_iterator<key_type, value_type,
-						  _ExtractKey, _H1, _H2, _Hash,
-						  __constant_iterators::value,
-						     __hash_cached::value>;
+    using local_iterator = __detail::_Local_iterator<key_type, value_type, _Alloc,
+			_ExtractKey, _H1, _H2, _Hash, __node_pointer,
+			__constant_iterators::value, __hash_cached::value>;
 
-    using const_local_iterator = __detail::_Local_const_iterator<key_type,
-								 value_type,
-					_ExtractKey, _H1, _H2, _Hash,
-					__constant_iterators::value,
-					__hash_cached::value>;
+    using const_local_iterator = __detail::_Local_const_iterator<
+			key_type, value_type, _Alloc,
+			_ExtractKey, _H1, _H2, _Hash, __node_pointer,
+			__constant_iterators::value, __hash_cached::value>;
 
     using __ireturn_type = typename std::conditional<__unique_keys::value,
 						     std::pair<iterator, bool>,
@@ -1779,11 +1773,11 @@ namespace __detail
        { return true; }
       };
 
-    template<typename _Ptr2>
-      struct _Equal_hash_code<_Hash_node<_Ptr2, true>>
+    template<typename _Ptr2, typename _Value2>
+    struct _Equal_hash_code<_Hash_node<_Ptr2, _Value2, true>>
       {
        static bool
-       _S_equals(__hash_code __c, const _Hash_node<_Ptr2, true>& __n)
+       _S_equals(__hash_code __c, const _Hash_node<_Ptr2, _Value2, true>& __n)
        { return __c == __n._M_hash_code; }
       };
 
@@ -1795,7 +1789,7 @@ namespace __detail
     { }
 
     bool
-    _M_equals(const _Key& __k, __hash_code __c, __node_type* __n) const
+    _M_equals(const _Key& __k, __hash_code __c, const __node_pointer& __n) const
     {
       static_assert(__is_invocable<const _Equal&, const _Key&, const _Key&>{},
 	  "key equality predicate must be invocable with two arguments of "
@@ -1854,8 +1848,8 @@ namespace __detail
 	      _H1, _H2, _Hash, _RehashPolicy, _Traits, true>::
     _M_equal(const __hashtable& __other) const
     {
-      using __node_base = typename __hashtable::__node_base;
-      using __node_type = typename __hashtable::__node_type;
+      using __bucket_type = typename __hashtable::__bucket_type;
+      using __node_pointer = typename __hashtable::__node_pointer;
       const __hashtable* __this = static_cast<const __hashtable*>(this);
       if (__this->size() != __other.size())
 	return false;
@@ -1863,18 +1857,17 @@ namespace __detail
       for (auto __itx = __this->begin(); __itx != __this->end(); ++__itx)
 	{
 	  std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
-	  __node_base* __prev_n = __other._M_buckets[__ybkt];
+	  __bucket_type __prev_n = __other._M_buckets[__ybkt];
 	  if (!__prev_n)
 	    return false;
 
-	  for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
-	       __n = __n->_M_next())
+	  for (__node_pointer __n = __prev_n->_M_nxt;; __n = __n->_M_nxt)
 	    {
 	      if (__n->_M_v() == *__itx)
 		break;
 
 	      if (!__n->_M_nxt
-		  || __other._M_bucket_index(__n->_M_next()) != __ybkt)
+		  || __other._M_bucket_index(__n->_M_nxt) != __ybkt)
 		return false;
 	    }
 	}
@@ -1906,8 +1899,8 @@ namespace __detail
 	      _H1, _H2, _Hash, _RehashPolicy, _Traits, false>::
     _M_equal(const __hashtable& __other) const
     {
-      using __node_base = typename __hashtable::__node_base;
-      using __node_type = typename __hashtable::__node_type;
+      using __bucket_type = typename __hashtable::__bucket_type;
+      using __node_pointer = typename __hashtable::__node_pointer;
       const __hashtable* __this = static_cast<const __hashtable*>(this);
       if (__this->size() != __other.size())
 	return false;
@@ -1923,19 +1916,19 @@ namespace __detail
 	    ++__x_count;
 
 	  std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
-	  __node_base* __y_prev_n = __other._M_buckets[__ybkt];
+	  __bucket_type __y_prev_n = __other._M_buckets[__ybkt];
 	  if (!__y_prev_n)
 	    return false;
 
-	  __node_type* __y_n = static_cast<__node_type*>(__y_prev_n->_M_nxt);
-	  for (;; __y_n = __y_n->_M_next())
+	  __node_pointer __y_n = __y_prev_n->_M_nxt;
+	  for (;; __y_n = __y_n->_M_nxt)
 	    {
 	      if (__this->key_eq()(_ExtractKey()(__y_n->_M_v()),
 				   _ExtractKey()(*__itx)))
 		break;
 
 	      if (!__y_n->_M_nxt
-		  || __other._M_bucket_index(__y_n->_M_next()) != __ybkt)
+		  || __other._M_bucket_index(__y_n->_M_nxt) != __ybkt)
 		return false;
 	    }
 
@@ -1973,11 +1966,13 @@ namespace __detail
       using __value_alloc_traits = typename __node_alloc_traits::template
 	rebind_traits<typename __node_type::value_type>;
 
-      using __node_base = __detail::_Hash_node_base;
-      using __bucket_type = __node_base*;      
+      using __node_pointer = typename __node_alloc_traits::pointer;
+      using __node_base = __detail::_Hash_node_base<__node_pointer>;
+      using __bucket_type = __ptr_rebind<__node_pointer, __node_base>;
       using __bucket_alloc_type =
 	__alloc_rebind<__node_alloc_type, __bucket_type>;
       using __bucket_alloc_traits = std::allocator_traits<__bucket_alloc_type>;
+      using __bucket_pointer = typename __bucket_alloc_traits::pointer;
 
       _Hashtable_alloc() = default;
       _Hashtable_alloc(const _Hashtable_alloc&) = default;
@@ -1998,27 +1993,27 @@ namespace __detail
 
       // Allocate a node and construct an element within it.
       template<typename... _Args>
-	__node_type*
+	__node_pointer
 	_M_allocate_node(_Args&&... __args);
 
       // Destroy the element within a node and deallocate the node.
       void
-      _M_deallocate_node(__node_type* __n);
+      _M_deallocate_node(__node_pointer __n);
 
       // Deallocate a node.
       void
-      _M_deallocate_node_ptr(__node_type* __n);
+      _M_deallocate_node_ptr(__node_pointer __n);
 
       // Deallocate the linked list of nodes pointed to by __n.
       // The elements within the nodes are destroyed.
       void
-      _M_deallocate_nodes(__node_type* __n);
+      _M_deallocate_nodes(__node_pointer __n);
 
-      __bucket_type*
+      __bucket_pointer
       _M_allocate_buckets(std::size_t __bkt_count);
 
       void
-      _M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
+      _M_deallocate_buckets(__bucket_pointer, std::size_t __bkt_count);
     };
 
   // Definitions of class template _Hashtable_alloc's out-of-line member
@@ -2027,17 +2022,16 @@ namespace __detail
     template<typename... _Args>
       auto
       _Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
-      -> __node_type*
+      -> __node_pointer
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
-	__node_type* __n = std::__to_address(__nptr);
 	__try
 	  {
-	    ::new ((void*)__n) __node_type;
+	    ::new ((void*)std::__to_address(__nptr)) __node_type;
 	    __node_alloc_traits::construct(_M_node_allocator(),
-					   __n->_M_valptr(),
+					   __nptr->_M_valptr(),
 					   std::forward<_Args>(__args)...);
-	    return __n;
+	    return __nptr;
 	  }
 	__catch(...)
 	  {
@@ -2048,55 +2042,51 @@ namespace __detail
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_pointer __nptr)
     {
-      __node_alloc_traits::destroy(_M_node_allocator(), __n->_M_valptr());
-      _M_deallocate_node_ptr(__n);
+      __node_alloc_traits::destroy(_M_node_allocator(), __nptr->_M_valptr());
+      _M_deallocate_node_ptr(__nptr);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_pointer __nptr)
     {
-      typedef typename __node_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n);
-      __n->~__node_type();
-      __node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1);
+      __nptr->~__node_type();
+      __node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_pointer __nptr)
     {
-      while (__n)
+      while (__nptr)
 	{
-	  __node_type* __tmp = __n;
-	  __n = __n->_M_next();
+	  __node_pointer __tmp = __nptr;
+	  __nptr = __nptr->_M_nxt;
 	  _M_deallocate_node(__tmp);
 	}
     }
 
   template<typename _NodeAlloc>
-    typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
+    auto
     _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
+    -> __bucket_pointer
     {
       __bucket_alloc_type __alloc(_M_node_allocator());
 
       auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
-      __bucket_type* __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
-      return __p;
+      std::fill_n(__ptr, __bkt_count, nullptr);
+      return __ptr;
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_pointer __bkts,
 							std::size_t __bkt_count)
     {
-      typedef typename __bucket_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
       __bucket_alloc_type __alloc(_M_node_allocator());
-      __bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
+      __bucket_alloc_traits::deallocate(__alloc, __bkts, __bkt_count);
     }
 
  //@} hashtable-detail
diff --git a/libstdc++-v3/include/debug/unordered_map b/libstdc++-v3/include/debug/unordered_map
index 7be1d2ee952..41f169168aa 100644
--- a/libstdc++-v3/include/debug/unordered_map
+++ b/libstdc++-v3/include/debug/unordered_map
@@ -621,7 +621,7 @@ namespace __debug
 	  [__victim](_Base_const_iterator __it) { return __it == __victim; });
 	this->_M_invalidate_local_if(
 	  [__victim](_Base_const_local_iterator __it)
-	  { return __it._M_curr() == __victim._M_cur; });
+	  { return __it == __victim; });
       }
 
       _Base_iterator
@@ -1227,7 +1227,7 @@ namespace __debug
 	  [__victim](_Base_const_iterator __it) { return __it == __victim; });
 	this->_M_invalidate_local_if(
 	  [__victim](_Base_const_local_iterator __it)
-	  { return __it._M_curr() == __victim._M_cur; });
+	  { return __it == __victim; });
       }
 
       _Base_iterator
diff --git a/libstdc++-v3/include/debug/unordered_set b/libstdc++-v3/include/debug/unordered_set
index 9941bbe1c24..65539b75350 100644
--- a/libstdc++-v3/include/debug/unordered_set
+++ b/libstdc++-v3/include/debug/unordered_set
@@ -506,7 +506,7 @@ namespace __debug
 	  [__victim](_Base_const_iterator __it) { return __it == __victim; });
 	this->_M_invalidate_local_if(
 	  [__victim](_Base_const_local_iterator __it)
-	  { return __it._M_curr() == __victim._M_cur; });
+	  { return __it == __victim; });
       }
 
       _Base_iterator
@@ -1066,7 +1066,7 @@ namespace __debug
 	  [__victim](_Base_const_iterator __it) { return __it == __victim; });
 	this->_M_invalidate_local_if(
 	  [__victim](_Base_const_local_iterator __it)
-	  { return __it._M_curr() == __victim._M_cur; });
+	  { return __it == __victim; });
       }
 
       _Base_iterator
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
index f6b908ac03e..6026aeff140 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
@@ -17,8 +17,7 @@
 
 // This test fails to compile since C++17 (see xfail-if below) so we can only
 // do a "run" test for C++11 and C++14, and a "compile" test for C++17 and up.
-// { dg-do run { target { c++11_only || c++14_only } } }
-// { dg-do compile { target c++17 } }
+// { dg-do run { target c++11 } }
 
 #include <unordered_set>
 #include <memory>
@@ -26,15 +25,22 @@
 #include <testsuite_allocator.h>
 
 struct T { int i; };
-bool operator==(const T& l, const T& r) { return l.i == r.i; }
-struct H { std::size_t operator()(const T& t) const noexcept { return t.i; }
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
 };
-struct E : std::equal_to<T> { };
+
+struct E : std::equal_to<T>
+{ };
 
 using __gnu_test::CustomPointerAlloc;
 
-// { dg-xfail-if "node reinsertion assumes raw pointers" { c++17 } }
-// TODO when removing this xfail change the test back to "dg-do run".
 template class std::unordered_set<T, H, E, CustomPointerAlloc<T>>;
 
 void test01()

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-04-19 17:31 libstdc++ PR 57272 Fancy pointer support in Hashtable François Dumont
@ 2020-05-15 21:12 ` François Dumont
  2020-09-28 20:37   ` François Dumont
  0 siblings, 1 reply; 10+ messages in thread
From: François Dumont @ 2020-05-15 21:12 UTC (permalink / raw)
  To: libstdc++, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 4700 bytes --]

I think I completed this evolution.

I eventually used ref to node pointer as much as possible and even use 
move semantic on it.

My prerequisite for this to work is that nullptr can be assign on the 
fancy pointer and that a fancy pointer to __node_type is assignable 
implicitely to a fancy pointer to __node_base.

     * include/bits/hashtable_policy.h (_Hashtable_base): Add _Alloc
     template parameter.
         (_ReuseOrAllocNode<>::__node_type): Remove.
         (_ReuseOrAllocNode<>::__node_pointer): New.
         (_ReuseOrAllocNode(__node_pointer, __hashtable_alloc&)): Adapt 
to use
         latter.
         (_ReuseOrAllocNode<>::operator()(_Arg&&)): Return latter.
         (_AllocNode<>::__node_type): Remove.
         (_AllocNode<>::__node_pointer): New.
         (_AllocNode<>::operator()<>(_Arg&&)): Return latter.
         (_Hash_node_base<>): Add _NodePtr template parameter.
         (_Hash_node_value_base<>): Likewise.
         (_Hash_node<>): Add _Ptr template parameter.
         (_Hash_node<>::_M_next()): Remove.
         (_Node_iterator_base<>): Use _NodePtr template parameter.
         (operator==(const _Node_iterator_base&, const 
_Node_iterator_base&)):
         Make inline friend.
         (operator!=(const _Node_iterator_base&, const 
_Node_iterator_base&)):
         Likewise.
         (_Node_iterator<>): Use _NodePtr template parameter.
         (_Node_const_iterator<>): Use _NodePtr template parameter.
         (_Map_base<>::__node_type): Remove.
         (_Map_base<>::__node_pointer): New.
         (_Hash_code_base<>): Add _Alloc template parameter.
         (_Hash_code_base<>::__pointer): New.
         (_Hash_code_base<>::__node_pointer): New.
         (_Hash_code_base<>::__node_ptr_arg_t): New.
         (_Local_iterator_base<>): Add _Alloc template parameter. 
Inherit from
         _Node_iterator_base<>.
         (_Local_iterator_base<>::__base_node_iter): New.
         (_Local_iterator_base<>::_M_cur): Remove.
         (_Local_iterator_base<>::_M_incr()): Adapt.
         (_Local_iterator_base<>::_M_curr()): Remove.
     (operator==(const _Local_iterator_base<>&,
     const _Local_iterator_base<>&)): Remove.
         (operator!=(const _Local_iterator_base<>&,
         const _Local_iterator_base<>&)): Remove.
         (_Local_iterator<>): Add _Alloc and _NodePtr template parameters.
         (_Local_const_iterator<>): Likewise.
         (_Hashtable_base<>): Add _Alloc template parameter.
         (_Hashtable_alloc<>::__node_pointer): New.
         (_Hashtable_alloc<>::__bucket_pointer): New.
         (_Hashtable_alloc<>::_M_allocate_node): Adapt.
         (_Hashtable_alloc<>::_M_deallocate_node): Adapt.
         (_Hashtable_alloc<>::_M_deallocate_node_ptr): Adapt.
         (_Hashtable_alloc<>::_M_deallocate_nodes): Adapt.
         (_Hashtable_alloc<>::_M_allocate_buckets): Adapt.
         (_Hashtable_alloc<>::_M_deallocate_buckets): Adapt.
         * include/bits/hashtable.h (_Hashtable<>): Adapt.
     (_Hashtable<>::_M_begin()): Remove.
         * include/debug/unordered_map: Adapt.
         * include/debug/unordered_set: Adapt.
         * testsuite/23_containers/unordered_map/allocator/ext_ptr.cc: New.
         * 
testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc: New.
         * 
testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc: New.
         * testsuite/23_containers/unordered_set/allocator/ext_ptr.cc

Tested under Linux x86_64.

Ok to commit ?

François

On 19/04/20 7:31 pm, François Dumont wrote:
> Here is my work in progress to use allocator pointer type. This type 
> is used both as the node pointer and as the buckets pointer.
>
> Rather than adapting _Local_iterator_base like _Node_iterator_base I 
> prefer to just make it inherits from _Node_iterator_base. It 
> simplifies its implementation and avoids to provided dedicated 
> comparison operators.
>
> Now I wonder if I need to consider Phil Bouchard comment regarding how 
> node pointers are being passed, either by value or reference. I 
> already chose to pass them as rvalue references in some occasions and 
> even lvalue reference like in _M_bucket_index method. Do you think I 
> need to continue this way ? Maybe I should use some conditional type, 
> if raw pointer we pass by value and otherwise we pass by ref ?
>
> François
>


[-- Attachment #2: hashtable_ext_ptr.patch --]
[-- Type: text/x-patch, Size: 94662 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index b00319a668b..b735291bb56 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -171,7 +171,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	   typename _H1, typename _H2, typename _Hash,
 	   typename _RehashPolicy, typename _Traits>
     class _Hashtable
-    : public __detail::_Hashtable_base<_Key, _Value, _ExtractKey, _Equal,
+    : public __detail::_Hashtable_base<_Key, _Value, _Alloc,
+				       _ExtractKey, _Equal,
 				       _H1, _H2, _Hash, _Traits>,
       public __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 				 _H1, _H2, _Hash, _RehashPolicy, _Traits>,
@@ -182,9 +183,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       public __detail::_Equality<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 				 _H1, _H2, _Hash, _RehashPolicy, _Traits>,
       private __detail::_Hashtable_alloc<
-	__alloc_rebind<_Alloc,
-		       __detail::_Hash_node<_Value,
-					    _Traits::__hash_cached::value>>>
+	__alloc_rebind<_Alloc, __detail::_Hash_node<
+	  typename std::allocator_traits<_Alloc>::pointer, _Value,
+				 _Traits::__hash_cached::value>>>
     {
       static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
 	  "unordered container must have a non-const, non-volatile value_type");
@@ -195,8 +196,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       using __traits_type = _Traits;
       using __hash_cached = typename __traits_type::__hash_cached;
-      using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
+      using __hashtable_base = __detail::
+			      _Hashtable_base<_Key, _Value, _Alloc, _ExtractKey,
+					      _Equal, _H1, _H2, _Hash, _Traits>;
+
+      using __node_type = typename __hashtable_base::__node_type;
       using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
+      using __node_pointer = typename __hashtable_base::__node_pointer;
+      using __node_ptr_arg_t = typename __hashtable_base::__node_ptr_arg_t;
 
       using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
 
@@ -206,6 +213,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	typename __hashtable_alloc::__node_alloc_traits;
       using __node_base = typename __hashtable_alloc::__node_base;
       using __bucket_type = typename __hashtable_alloc::__bucket_type;
+      using __bucket_pointer = typename __hashtable_alloc::__bucket_pointer;
+      using __bucket_ptr_traits = std::pointer_traits<__bucket_pointer>;
+      using __node_base_ptr_traits = std::pointer_traits<__bucket_type>;
 
     public:
       typedef _Key						key_type;
@@ -232,10 +242,6 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 				       	     __detail::_Identity,
 					     __detail::_Select1st>::type;
 
-      using __hashtable_base = __detail::
-			       _Hashtable_base<_Key, _Value, _ExtractKey,
-					      _Equal, _H1, _H2, _Hash, _Traits>;
-
       using __hash_code_base =  typename __hashtable_base::__hash_code_base;
       using __hash_code =  typename __hashtable_base::__hash_code;
       using __ireturn_type = typename __hashtable_base::__ireturn_type;
@@ -262,8 +268,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       struct _Scoped_node
       {
 	// Take ownership of a node with a constructed element.
-	_Scoped_node(__node_type* __n, __hashtable_alloc* __h)
-	: _M_h(__h), _M_node(__n) { }
+	_Scoped_node(__node_pointer&& __n, __hashtable_alloc* __h)
+	: _M_h(__h), _M_node(std::move(__n)) { }
 
 	// Allocate a node and construct an element within it.
 	template<typename... _Args>
@@ -279,7 +285,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_Scoped_node& operator=(const _Scoped_node&) = delete;
 
 	__hashtable_alloc* _M_h;
-	__node_type* _M_node;
+	__node_pointer _M_node;
       };
 
       template<typename _Ht>
@@ -306,7 +312,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Getting a bucket index from a node shall not throw because it is used
       // in methods (erase, swap...) that shall not throw.
       static_assert(noexcept(declval<const __hash_code_base_access&>()
-			     ._M_bucket_index((const __node_type*)nullptr,
+			     ._M_bucket_index(declval<__node_ptr_arg_t>(),
 					      (std::size_t)0)),
 		    "Cache the hash code or qualify your functors involved"
 		    " in hash code and bucket index computation with noexcept");
@@ -361,7 +367,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 #endif
 
     private:
-      __bucket_type*		_M_buckets		= &_M_single_bucket;
+      __bucket_pointer		_M_buckets
+	= __bucket_ptr_traits::pointer_to(_M_single_bucket);
       size_type			_M_bucket_count		= 1;
       __node_base		_M_before_begin;
       size_type			_M_element_count	= 0;
@@ -376,8 +383,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __bucket_type		_M_single_bucket	= nullptr;
 
       bool
-      _M_uses_single_bucket(__bucket_type* __bkts) const
-      { return __builtin_expect(__bkts == &_M_single_bucket, false); }
+      _M_uses_single_bucket(__bucket_pointer __bkts) const
+      {
+	return __builtin_expect(std::__to_address(__bkts) == &_M_single_bucket,
+				false);
+      }
 
       bool
       _M_uses_single_bucket() const
@@ -386,20 +396,20 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __hashtable_alloc&
       _M_base_alloc() { return *this; }
 
-      __bucket_type*
+      __bucket_pointer
       _M_allocate_buckets(size_type __bkt_count)
       {
 	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
-	    return &_M_single_bucket;
+	    return __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	  }
 
 	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
       }
 
       void
-      _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
+      _M_deallocate_buckets(__bucket_pointer __bkts, size_type __bkt_count)
       {
 	if (_M_uses_single_bucket(__bkts))
 	  return;
@@ -413,13 +423,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Gets bucket begin, deals with the fact that non-empty buckets contain
       // their before begin node.
-      __node_type*
+      __node_pointer
       _M_bucket_begin(size_type __bkt) const;
 
-      __node_type*
-      _M_begin() const
-      { return static_cast<__node_type*>(_M_before_begin._M_nxt); }
-
       // Assign *this using another _Hashtable instance. Whether elements
       // are copied or moved depends on the _Ht reference.
       template<typename _Ht>
@@ -523,7 +529,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(std::move(_M_before_begin._M_nxt),
+					   *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 	this->_M_insert_range(__l.begin(), __l.end(), __roan, __unique_keys());
@@ -540,11 +547,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Basic container operations
       iterator
       begin() noexcept
-      { return iterator(_M_begin()); }
+      { return iterator(_M_before_begin._M_nxt); }
 
       const_iterator
       begin() const noexcept
-      { return const_iterator(_M_begin()); }
+      { return const_iterator(_M_before_begin._M_nxt); }
 
       iterator
       end() noexcept
@@ -556,7 +563,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       const_iterator
       cbegin() const noexcept
-      { return const_iterator(_M_begin()); }
+      { return const_iterator(_M_before_begin._M_nxt); }
 
       const_iterator
       cend() const noexcept
@@ -674,7 +681,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     protected:
       // Bucket index computation helpers.
       size_type
-      _M_bucket_index(__node_type* __n) const noexcept
+      _M_bucket_index(__node_ptr_arg_t __n) const noexcept
       { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
 
       size_type
@@ -683,45 +690,45 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find and insert helper functions and types
       // Find the node before the one matching the criteria.
-      __node_base*
+      __bucket_type
       _M_find_before_node(size_type, const key_type&, __hash_code) const;
 
-      __node_type*
+      __node_pointer
       _M_find_node(size_type __bkt, const key_type& __key,
 		   __hash_code __c) const
       {
-	__node_base* __before_n = _M_find_before_node(__bkt, __key, __c);
+	__bucket_type __before_n = _M_find_before_node(__bkt, __key, __c);
 	if (__before_n)
-	  return static_cast<__node_type*>(__before_n->_M_nxt);
+	  return __before_n->_M_nxt;
 	return nullptr;
       }
 
       // Insert a node at the beginning of a bucket.
-      void
-      _M_insert_bucket_begin(size_type, __node_type*);
+      __node_pointer
+      _M_insert_bucket_begin(size_type, __node_pointer&&);
 
       // Remove the bucket first node
       void
-      _M_remove_bucket_begin(size_type __bkt, __node_type* __next_n,
+      _M_remove_bucket_begin(size_type __bkt, __node_ptr_arg_t __next_n,
 			     size_type __next_bkt);
 
       // Get the node before __n in the bucket __bkt
-      __node_base*
-      _M_get_previous_node(size_type __bkt, __node_base* __n);
+      __bucket_type
+      _M_get_previous_node(size_type __bkt, __node_ptr_arg_t __n);
 
       // Insert node __n with key __k and hash code __code, in bucket __bkt
       // if no rehash (assumes no element with same key already present).
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
       _M_insert_unique_node(const key_type& __k, size_type __bkt,
-			    __hash_code __code, __node_type* __n,
+			    __hash_code __code, __node_pointer&& __n,
 			    size_type __n_elt = 1);
 
       // Insert node __n with key __k and hash code __code.
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
-      _M_insert_multi_node(__node_type* __hint, const key_type& __k,
-			   __hash_code __code, __node_type* __n);
+      _M_insert_multi_node(__node_pointer __hint, const key_type& __k,
+			   __hash_code __code, __node_pointer&& __n);
 
       template<typename... _Args>
 	std::pair<iterator, bool>
@@ -778,7 +785,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_erase(false_type, const key_type&);
 
       iterator
-      _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
+      _M_erase(size_type __bkt, __bucket_type __prev_n, __node_pointer __n);
 
     public:
       // Emplace
@@ -838,7 +845,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    const key_type& __k = __nh._M_key();
 	    __hash_code __code = this->_M_hash_code(__k);
 	    size_type __bkt = _M_bucket_index(__k, __code);
-	    if (__node_type* __n = _M_find_node(__bkt, __k, __code))
+	    if (__node_pointer __n = _M_find_node(__bkt, __k, __code))
 	      {
 		__ret.node = std::move(__nh);
 		__ret.position = iterator(__n);
@@ -846,8 +853,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      }
 	    else
 	      {
-		__ret.position
-		  = _M_insert_unique_node(__k, __bkt, __code, __nh._M_ptr);
+		__ret.position = _M_insert_unique_node(__k, __bkt, __code,
+						       std::move(__nh._M_ptr));
 		__nh._M_ptr = nullptr;
 		__ret.inserted = true;
 	      }
@@ -866,23 +873,23 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	const key_type& __k = __nh._M_key();
 	auto __code = this->_M_hash_code(__k);
-	auto __ret
-	  = _M_insert_multi_node(__hint._M_cur, __k, __code, __nh._M_ptr);
+	auto __ret = _M_insert_multi_node(__hint._M_cur, __k, __code,
+					  std::move(__nh._M_ptr));
 	__nh._M_ptr = nullptr;
 	return __ret;
       }
 
     private:
       node_type
-      _M_extract_node(size_t __bkt, __node_base* __prev_n)
+      _M_extract_node(size_t __bkt, __bucket_type __prev_n)
       {
-	__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
+	__node_pointer __n = __prev_n->_M_nxt;
 	if (__prev_n == _M_buckets[__bkt])
-	  _M_remove_bucket_begin(__bkt, __n->_M_next(),
-	     __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
+	  _M_remove_bucket_begin(__bkt, __n->_M_nxt,
+	     __n->_M_nxt ? _M_bucket_index(__n->_M_nxt) : 0);
 	else if (__n->_M_nxt)
 	  {
-	    size_type __next_bkt = _M_bucket_index(__n->_M_next());
+	    size_type __next_bkt = _M_bucket_index(__n->_M_nxt);
 	    if (__next_bkt != __bkt)
 	      _M_buckets[__next_bkt] = __prev_n;
 	  }
@@ -910,7 +917,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	node_type __nh;
 	__hash_code __code = this->_M_hash_code(__k);
 	std::size_t __bkt = _M_bucket_index(__k, __code);
-	if (__node_base* __prev_node = _M_find_before_node(__bkt, __k, __code))
+	if (__bucket_type __prev_node = _M_find_before_node(__bkt, __k, __code))
 	  __nh = _M_extract_node(__bkt, __prev_node);
 	return __nh;
       }
@@ -934,8 +941,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      if (_M_find_node(__bkt, __k, __code) == nullptr)
 		{
 		  auto __nh = __src.extract(__pos);
-		  _M_insert_unique_node(__k, __bkt, __code, __nh._M_ptr,
-					__n_elt);
+		  _M_insert_unique_node(__k, __bkt, __code,
+					std::move(__nh._M_ptr), __n_elt);
 		  __nh._M_ptr = nullptr;
 		  __n_elt = 1;
 		}
@@ -981,10 +988,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_bucket_begin(size_type __bkt) const
-    -> __node_type*
+    -> __node_pointer
     {
-      __node_base* __n = _M_buckets[__bkt];
-      return __n ? static_cast<__node_type*>(__n->_M_nxt) : nullptr;
+      __bucket_type __n = _M_buckets[__bkt];
+      return __n ? __n->_M_nxt : nullptr;
     }
 
   template<typename _Key, typename _Value,
@@ -1058,7 +1065,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      && __this_alloc != __that_alloc)
 	    {
 	      // Replacement allocator cannot free existing storage.
-	      this->_M_deallocate_nodes(_M_begin());
+	      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
 	      _M_before_begin._M_nxt = nullptr;
 	      _M_deallocate_buckets();
 	      _M_buckets = nullptr;
@@ -1099,7 +1106,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
       _M_assign_elements(_Ht&& __ht)
       {
-	__bucket_type* __former_buckets = nullptr;
+	__bucket_pointer __former_buckets = nullptr;
 	std::size_t __former_bucket_count = _M_bucket_count;
 	const __rehash_state& __former_state = _M_rehash_policy._M_state();
 
@@ -1118,7 +1125,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t
+	      __roan(std::move(_M_before_begin._M_nxt), *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(std::forward<_Ht>(__ht), __roan);
 	    if (__former_buckets)
@@ -1150,7 +1158,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		 _H1, _H2, _Hash, _RehashPolicy, _Traits>::
       _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
       {
-	__bucket_type* __buckets = nullptr;
+	__bucket_pointer __buckets = nullptr;
 	if (!_M_buckets)
 	  _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
 
@@ -1161,16 +1169,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	    // First deal with the special first node pointed to by
 	    // _M_before_begin.
-	    __node_type* __ht_n = __ht._M_begin();
-	    __node_type* __this_n
+	    __node_pointer __ht_n = __ht._M_before_begin._M_nxt;
+	    __node_pointer __this_n
 	      = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
 	    this->_M_copy_code(__this_n, __ht_n);
 	    _M_before_begin._M_nxt = __this_n;
-	    _M_buckets[_M_bucket_index(__this_n)] = &_M_before_begin;
+	    _M_buckets[_M_bucket_index(__this_n)] =
+	      __node_base_ptr_traits::pointer_to(_M_before_begin);
 
 	    // Then deal with other nodes.
-	    __node_base* __prev_n = __this_n;
-	    for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
+	    __node_pointer __prev_n = __this_n;
+	    for (__ht_n = __ht_n->_M_nxt; __ht_n; __ht_n = __ht_n->_M_nxt)
 	      {
 		__this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
 		__prev_n->_M_nxt = __this_n;
@@ -1202,7 +1211,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_rehash_policy._M_reset();
       _M_bucket_count = 1;
       _M_single_bucket = nullptr;
-      _M_buckets = &_M_single_bucket;
+      _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
       _M_before_begin._M_nxt = nullptr;
       _M_element_count = 0;
     }
@@ -1216,7 +1225,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_move_assign(_Hashtable&& __ht, true_type)
     {
-      this->_M_deallocate_nodes(_M_begin());
+      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
       _M_deallocate_buckets();
       __hashtable_base::operator=(std::move(__ht));
       _M_rehash_policy = __ht._M_rehash_policy;
@@ -1224,9 +1233,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_buckets = __ht._M_buckets;
       else
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
+
       _M_bucket_count = __ht._M_bucket_count;
       _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
       _M_element_count = __ht._M_element_count;
@@ -1234,8 +1244,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Fix buckets containing the _M_before_begin pointers that can't be
       // moved.
-      if (_M_begin())
-	_M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+      if (_M_before_begin._M_nxt)
+	_M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+	  __node_base_ptr_traits::pointer_to(_M_before_begin);
       __ht._M_reset();
     }
 
@@ -1290,23 +1301,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __map_base(__ht),
       __rehash_base(__ht),
       __hashtable_alloc(std::move(__ht._M_base_alloc())),
-      _M_buckets(__ht._M_buckets),
+      _M_buckets(std::move(__ht._M_buckets)),
       _M_bucket_count(__ht._M_bucket_count),
-      _M_before_begin(__ht._M_before_begin._M_nxt),
+      _M_before_begin(std::move(__ht._M_before_begin._M_nxt)),
       _M_element_count(__ht._M_element_count),
       _M_rehash_policy(__ht._M_rehash_policy)
     {
       // Update, if necessary, buckets if __ht is using its single bucket.
-      if (__ht._M_uses_single_bucket())
+      if (std::__to_address(_M_buckets) == &__ht._M_single_bucket)
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
       // Update, if necessary, bucket pointing to before begin that hasn't
       // moved.
-      if (_M_begin())
-	_M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+      if (_M_before_begin._M_nxt)
+	_M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+	  __node_base_ptr_traits::pointer_to(_M_before_begin);
 
       __ht._M_reset();
     }
@@ -1322,7 +1334,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __map_base(__ht),
       __rehash_base(__ht),
       __hashtable_alloc(__node_alloc_type(__a)),
-      _M_buckets(),
+      _M_buckets(nullptr),
       _M_bucket_count(__ht._M_bucket_count),
       _M_element_count(__ht._M_element_count),
       _M_rehash_policy(__ht._M_rehash_policy)
@@ -1351,17 +1363,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	{
 	  if (__ht._M_uses_single_bucket())
 	    {
-	      _M_buckets = &_M_single_bucket;
+	      _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	      _M_single_bucket = __ht._M_single_bucket;
 	    }
 	  else
-	    _M_buckets = __ht._M_buckets;
+	    _M_buckets = std::move(__ht._M_buckets);
 
-	  _M_before_begin._M_nxt = __ht._M_before_begin._M_nxt;
+	  _M_before_begin._M_nxt = std::move(__ht._M_before_begin._M_nxt);
 	  // Update, if necessary, bucket pointing to before begin that hasn't
 	  // moved.
-	  if (_M_begin())
-	    _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+	  if (_M_before_begin._M_nxt)
+	    _M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+	      __node_base_ptr_traits::pointer_to(_M_before_begin);
 	  __ht._M_reset();
 	}
       else
@@ -1413,13 +1426,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (!__x._M_uses_single_bucket())
 	    {
 	      _M_buckets = __x._M_buckets;
-	      __x._M_buckets = &__x._M_single_bucket;
+	      __x._M_buckets =
+		__bucket_ptr_traits::pointer_to(__x._M_single_bucket);
 	    }
 	}
       else if (__x._M_uses_single_bucket())
 	{
 	  __x._M_buckets = _M_buckets;
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	}	
       else
 	std::swap(_M_buckets, __x._M_buckets);
@@ -1431,12 +1445,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Fix buckets containing the _M_before_begin pointers that can't be
       // swapped.
-      if (_M_begin())
-	_M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+      if (_M_before_begin._M_nxt)
+	_M_buckets[_M_bucket_index(_M_before_begin._M_nxt)] =
+	  __node_base_ptr_traits::pointer_to(_M_before_begin);
 
-      if (__x._M_begin())
-	__x._M_buckets[__x._M_bucket_index(__x._M_begin())]
-	  = &__x._M_before_begin;
+      if (__x._M_before_begin._M_nxt)
+	__x._M_buckets[__x._M_bucket_index(__x._M_before_begin._M_nxt)]
+	  = __node_base_ptr_traits::pointer_to(__x._M_before_begin);
     }
 
   template<typename _Key, typename _Value,
@@ -1451,7 +1466,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__bkt, __k, __code);
+      __node_pointer __p = _M_find_node(__bkt, __k, __code);
       return __p ? iterator(__p) : end();
     }
 
@@ -1467,7 +1482,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__bkt, __k, __code);
+      __node_pointer __p = _M_find_node(__bkt, __k, __code);
       return __p ? const_iterator(__p) : end();
     }
 
@@ -1483,12 +1498,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_bucket_begin(__bkt);
+      __node_pointer __p = _M_bucket_begin(__bkt);
       if (!__p)
 	return 0;
 
       std::size_t __result = 0;
-      for (;; __p = __p->_M_next())
+      for (;; __p = __p->_M_nxt)
 	{
 	  if (this->_M_equals(__k, __code, __p))
 	    ++__result;
@@ -1497,7 +1512,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    // found a non-equivalent value after an equivalent one it
 	    // means that we won't find any new equivalent value.
 	    break;
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_nxt) != __bkt)
 	    break;
 	}
       return __result;
@@ -1515,14 +1530,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__bkt, __k, __code);
+      __node_pointer __p = _M_find_node(__bkt, __k, __code);
 
       if (__p)
 	{
-	  __node_type* __p1 = __p->_M_next();
+	  __node_pointer __p1 = __p->_M_nxt;
 	  while (__p1 && _M_bucket_index(__p1) == __bkt
 		 && this->_M_equals(__k, __code, __p1))
-	    __p1 = __p1->_M_next();
+	    __p1 = __p1->_M_nxt;
 
 	  return std::make_pair(iterator(__p), iterator(__p1));
 	}
@@ -1542,14 +1557,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     {
       __hash_code __code = this->_M_hash_code(__k);
       std::size_t __bkt = _M_bucket_index(__k, __code);
-      __node_type* __p = _M_find_node(__bkt, __k, __code);
+      __node_pointer __p = _M_find_node(__bkt, __k, __code);
 
       if (__p)
 	{
-	  __node_type* __p1 = __p->_M_next();
+	  __node_pointer __p1 = __p->_M_nxt;
 	  while (__p1 && _M_bucket_index(__p1) == __bkt
 		 && this->_M_equals(__k, __code, __p1))
-	    __p1 = __p1->_M_next();
+	    __p1 = __p1->_M_nxt;
 
 	  return std::make_pair(const_iterator(__p), const_iterator(__p1));
 	}
@@ -1568,19 +1583,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
-    -> __node_base*
+    -> __bucket_type
     {
-      __node_base* __prev_p = _M_buckets[__bkt];
+      __bucket_type __prev_p = _M_buckets[__bkt];
       if (!__prev_p)
 	return nullptr;
 
-      for (__node_type* __p = static_cast<__node_type*>(__prev_p->_M_nxt);;
-	   __p = __p->_M_next())
+      for (__node_pointer __p = __prev_p->_M_nxt;; __p = __p->_M_nxt)
 	{
 	  if (this->_M_equals(__k, __code, __p))
 	    return __prev_p;
 
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
+	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_nxt) != __bkt)
 	    break;
 	  __prev_p = __p;
 	}
@@ -1591,17 +1605,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	   typename _Alloc, typename _ExtractKey, typename _Equal,
 	   typename _H1, typename _H2, typename _Hash, typename _RehashPolicy,
 	   typename _Traits>
-    void
+    auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_insert_bucket_begin(size_type __bkt, __node_type* __node)
+    _M_insert_bucket_begin(size_type __bkt, __node_pointer&& __node)
+    -> __node_pointer
     {
       if (_M_buckets[__bkt])
 	{
 	  // Bucket is not empty, we just need to insert the new node
 	  // after the bucket before begin.
 	  __node->_M_nxt = _M_buckets[__bkt]->_M_nxt;
-	  _M_buckets[__bkt]->_M_nxt = __node;
+	  _M_buckets[__bkt]->_M_nxt = std::move(__node);
+	  return _M_buckets[__bkt]->_M_nxt;
 	}
       else
 	{
@@ -1609,12 +1625,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  // beginning of the singly-linked list and the bucket will
 	  // contain _M_before_begin pointer.
 	  __node->_M_nxt = _M_before_begin._M_nxt;
-	  _M_before_begin._M_nxt = __node;
-	  if (__node->_M_nxt)
+	  _M_before_begin._M_nxt = std::move(__node);
+	  if (_M_before_begin._M_nxt->_M_nxt)
 	    // We must update former begin bucket that is pointing to
 	    // _M_before_begin.
-	    _M_buckets[_M_bucket_index(__node->_M_next())] = __node;
-	  _M_buckets[__bkt] = &_M_before_begin;
+	    _M_buckets[_M_bucket_index(_M_before_begin._M_nxt->_M_nxt)] =
+	      _M_before_begin._M_nxt;
+	  _M_buckets[__bkt] =
+	    __node_base_ptr_traits::pointer_to(_M_before_begin);
+	  return _M_before_begin._M_nxt;
 	}
     }
 
@@ -1625,7 +1644,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_remove_bucket_begin(size_type __bkt, __node_type* __next,
+    _M_remove_bucket_begin(size_type __bkt, __node_ptr_arg_t __next,
 			   size_type __next_bkt)
     {
       if (!__next || __next_bkt != __bkt)
@@ -1636,7 +1655,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    _M_buckets[__next_bkt] = _M_buckets[__bkt];
 
 	  // Second update before begin node if necessary
-	  if (&_M_before_begin == _M_buckets[__bkt])
+	  if (&_M_before_begin == std::__to_address(_M_buckets[__bkt]))
 	    _M_before_begin._M_nxt = __next;
 	  _M_buckets[__bkt] = nullptr;
 	}
@@ -1649,10 +1668,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_get_previous_node(size_type __bkt, __node_base* __n)
-    -> __node_base*
+    _M_get_previous_node(size_type __bkt, __node_ptr_arg_t __n)
+    -> __bucket_type
     {
-      __node_base* __prev_n = _M_buckets[__bkt];
+      __bucket_type __prev_n = _M_buckets[__bkt];
       while (__prev_n->_M_nxt != __n)
 	__prev_n = __prev_n->_M_nxt;
       return __prev_n;
@@ -1674,12 +1693,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__k, __code);
-	if (__node_type* __p = _M_find_node(__bkt, __k, __code))
+	if (__node_pointer __p = _M_find_node(__bkt, __k, __code))
 	  // There is already an equivalent node, no insertion
 	  return std::make_pair(iterator(__p), false);
 
 	// Insert the node
-	auto __pos = _M_insert_unique_node(__k, __bkt, __code, __node._M_node);
+	auto __pos = _M_insert_unique_node(__k, __bkt, __code,
+					   std::move(__node._M_node));
 	__node._M_node = nullptr;
 	return { __pos, true };
       }
@@ -1700,8 +1720,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
 
 	__hash_code __code = this->_M_hash_code(__k);
-	auto __pos
-	  = _M_insert_multi_node(__hint._M_cur, __k, __code, __node._M_node);
+	auto __pos = _M_insert_multi_node(__hint._M_cur, __k, __code,
+					  std::move(__node._M_node));
 	__node._M_node = nullptr;
 	return __pos;
       }
@@ -1714,7 +1734,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_insert_unique_node(const key_type& __k, size_type __bkt,
-			  __hash_code __code, __node_type* __node,
+			  __hash_code __code, __node_pointer&& __node,
 			  size_type __n_elt)
     -> iterator
     {
@@ -1732,9 +1752,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       this->_M_store_code(__node, __code);
 
       // Always insert at the beginning of the bucket.
-      _M_insert_bucket_begin(__bkt, __node);
       ++_M_element_count;
-      return iterator(__node);
+      return iterator(_M_insert_bucket_begin(__bkt, std::move(__node)));
     }
 
   template<typename _Key, typename _Value,
@@ -1744,8 +1763,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_insert_multi_node(__node_type* __hint, const key_type& __k,
-			 __hash_code __code, __node_type* __node)
+    _M_insert_multi_node(__node_pointer __hint, const key_type& __k,
+			 __hash_code __code, __node_pointer&& __node)
     -> iterator
     {
       const __rehash_state& __saved_state = _M_rehash_policy._M_state();
@@ -1760,34 +1779,39 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find the node before an equivalent one or use hint if it exists and
       // if it is equivalent.
-      __node_base* __prev
-	= __builtin_expect(__hint != nullptr, false)
-	  && this->_M_equals(__k, __code, __hint)
-	    ? __hint
-	    : _M_find_before_node(__bkt, __k, __code);
+      __bucket_type __prev;
+      if (__builtin_expect(__hint != nullptr, false)
+	  && this->_M_equals(__k, __code, __hint))
+	__prev = __hint;
+      else
+	__prev = _M_find_before_node(__bkt, __k, __code);
+
       if (__prev)
 	{
 	  // Insert after the node before the equivalent one.
 	  __node->_M_nxt = __prev->_M_nxt;
-	  __prev->_M_nxt = __node;
+	  __prev->_M_nxt = std::move(__node);
 	  if (__builtin_expect(__prev == __hint, false))
 	    // hint might be the last bucket node, in this case we need to
 	    // update next bucket.
-	    if (__node->_M_nxt
-		&& !this->_M_equals(__k, __code, __node->_M_next()))
+	    if (__prev->_M_nxt->_M_nxt
+		&& !this->_M_equals(__k, __code, __prev->_M_nxt->_M_nxt))
 	      {
-		size_type __next_bkt = _M_bucket_index(__node->_M_next());
+		size_type __next_bkt = _M_bucket_index(__prev->_M_nxt->_M_nxt);
 		if (__next_bkt != __bkt)
-		  _M_buckets[__next_bkt] = __node;
+		  _M_buckets[__next_bkt] = __prev->_M_nxt;
 	      }
+	  ++_M_element_count;
+	  return iterator(__prev->_M_nxt);
 	}
       else
-	// The inserted node has no equivalent in the hashtable. We must
-	// insert the new node at the beginning of the bucket to preserve
-	// equivalent elements' relative positions.
-	_M_insert_bucket_begin(__bkt, __node);
-      ++_M_element_count;
-      return iterator(__node);
+	{
+	  // The inserted node has no equivalent in the hashtable. We must
+	  // insert the new node at the beginning of the bucket to preserve
+	  // equivalent elements' relative positions.
+	  ++_M_element_count;
+	  return iterator(_M_insert_bucket_begin(__bkt, std::move(__node)));
+	}
     }
 
   // Insert v if no element with its key is already present.
@@ -1807,12 +1831,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__k, __code);
 
-	if (__node_type* __node = _M_find_node(__bkt, __k, __code))
+	if (__node_pointer __node = _M_find_node(__bkt, __k, __code))
 	  return { iterator(__node), false };
 
 	_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
 	auto __pos
-	  = _M_insert_unique_node(__k, __bkt, __code, __node._M_node, __n_elt);
+	  = _M_insert_unique_node(__k, __bkt, __code,
+				  std::move(__node._M_node), __n_elt);
 	__node._M_node = nullptr;
 	return { __pos, true };
       }
@@ -1837,8 +1862,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	// Second allocate new node so that we don't rehash if it throws.
 	_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
 	const key_type& __k = this->_M_extract()(__node._M_node->_M_v());
-	auto __pos
-	  = _M_insert_multi_node(__hint._M_cur, __k, __code, __node._M_node);
+	auto __pos = _M_insert_multi_node(__hint._M_cur, __k, __code,
+					  std::move(__node._M_node));
 	__node._M_node = nullptr;
 	return __pos;
       }
@@ -1853,14 +1878,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __it)
     -> iterator
     {
-      __node_type* __n = __it._M_cur;
-      std::size_t __bkt = _M_bucket_index(__n);
+      std::size_t __bkt = _M_bucket_index(__it._M_cur);
 
       // Look for previous node to unlink it from the erased one, this
       // is why we need buckets to contain the before begin to make
       // this search fast.
-      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
-      return _M_erase(__bkt, __prev_n, __n);
+      __bucket_type __prev_n = _M_get_previous_node(__bkt, __it._M_cur);
+      return _M_erase(__bkt, __prev_n, __it._M_cur);
     }
 
   template<typename _Key, typename _Value,
@@ -1870,21 +1894,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
-    _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n)
+    _M_erase(size_type __bkt, __bucket_type __prev_n, __node_pointer __n)
     -> iterator
     {
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n->_M_next(),
-	   __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
+	_M_remove_bucket_begin(__bkt, __n->_M_nxt,
+	   __n->_M_nxt ? _M_bucket_index(__n->_M_nxt) : 0);
       else if (__n->_M_nxt)
 	{
-	  size_type __next_bkt = _M_bucket_index(__n->_M_next());
+	  size_type __next_bkt = _M_bucket_index(__n->_M_nxt);
 	  if (__next_bkt != __bkt)
 	    _M_buckets[__next_bkt] = __prev_n;
 	}
 
       __prev_n->_M_nxt = __n->_M_nxt;
-      iterator __result(__n->_M_next());
+      iterator __result(__n->_M_nxt);
       this->_M_deallocate_node(__n);
       --_M_element_count;
 
@@ -1905,13 +1929,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__k, __code);
 
       // Look for the node before the first matching node.
-      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __bucket_type __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
       // We found a matching node, erase it.
-      __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
-      _M_erase(__bkt, __prev_n, __n);
+      _M_erase(__bkt, __prev_n, __prev_n->_M_nxt);
       return 1;
     }
 
@@ -1929,7 +1952,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__k, __code);
 
       // Look for the node before the first matching node.
-      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __bucket_type __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
@@ -1939,12 +1962,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // We use one loop to find all matching nodes and another to deallocate
       // them so that the key stays valid during the first loop. It might be
       // invalidated indirectly when destroying nodes.
-      __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
-      __node_type* __n_last = __n;
+      __node_pointer __n = __prev_n->_M_nxt;
+      __node_pointer __n_last = __n;
       std::size_t __n_last_bkt = __bkt;
       do
 	{
-	  __n_last = __n_last->_M_next();
+	  __n_last = __n_last->_M_nxt;
 	  if (!__n_last)
 	    break;
 	  __n_last_bkt = _M_bucket_index(__n_last);
@@ -1955,7 +1978,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       size_type __result = 0;
       do
 	{
-	  __node_type* __p = __n->_M_next();
+	  __node_pointer __p = __n->_M_nxt;
 	  this->_M_deallocate_node(__n);
 	  __n = __p;
 	  ++__result;
@@ -1981,22 +2004,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __first, const_iterator __last)
     -> iterator
     {
-      __node_type* __n = __first._M_cur;
-      __node_type* __last_n = __last._M_cur;
+      __node_pointer __n = __first._M_cur;
+      __node_pointer __last_n = __last._M_cur;
       if (__n == __last_n)
 	return iterator(__n);
 
       std::size_t __bkt = _M_bucket_index(__n);
 
-      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      __bucket_type __prev_n = _M_get_previous_node(__bkt, __n);
       bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
       std::size_t __n_bkt = __bkt;
       for (;;)
 	{
 	  do
 	    {
-	      __node_type* __tmp = __n;
-	      __n = __n->_M_next();
+	      __node_pointer __tmp = __n;
+	      __n = __n->_M_nxt;
 	      this->_M_deallocate_node(__tmp);
 	      --_M_element_count;
 	      if (!__n)
@@ -2027,8 +2050,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     clear() noexcept
     {
-      this->_M_deallocate_nodes(_M_begin());
-      __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(__bucket_type));
+      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
+      std::fill_n(_M_buckets, _M_bucket_count, nullptr);
       _M_element_count = 0;
       _M_before_begin._M_nxt = nullptr;
     }
@@ -2088,20 +2111,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_rehash_aux(size_type __bkt_count, true_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_type* __p = _M_begin();
+      __bucket_pointer __new_buckets = _M_allocate_buckets(__bkt_count);
+      auto __before_begin_ptr =
+	__node_base_ptr_traits::pointer_to(_M_before_begin);
+      __node_pointer __p = _M_before_begin._M_nxt;
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       while (__p)
 	{
-	  __node_type* __next = __p->_M_next();
+	  __node_pointer __next = __p->_M_nxt;
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
 	  if (!__new_buckets[__bkt])
 	    {
 	      __p->_M_nxt = _M_before_begin._M_nxt;
 	      _M_before_begin._M_nxt = __p;
-	      __new_buckets[__bkt] = &_M_before_begin;
+	      __new_buckets[__bkt] = __before_begin_ptr;
 	      if (__p->_M_nxt)
 		__new_buckets[__bbegin_bkt] = __p;
 	      __bbegin_bkt = __bkt;
@@ -2130,18 +2155,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _H1, _H2, _Hash, _RehashPolicy, _Traits>::
     _M_rehash_aux(size_type __bkt_count, false_type)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
-
-      __node_type* __p = _M_begin();
+      auto __new_buckets = _M_allocate_buckets(__bkt_count);
+      auto __before_begin_ptr =
+	__node_base_ptr_traits::pointer_to(_M_before_begin);
+      __node_pointer __p = _M_before_begin._M_nxt;
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       std::size_t __prev_bkt = 0;
-      __node_type* __prev_p = nullptr;
+      __node_pointer __prev_p{};
       bool __check_bucket = false;
 
       while (__p)
 	{
-	  __node_type* __next = __p->_M_next();
+	  __node_pointer __next = __p->_M_nxt;
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
 
@@ -2169,7 +2195,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		  if (__prev_p->_M_nxt)
 		    {
 		      std::size_t __next_bkt
-			= __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+			= __hash_code_base::_M_bucket_index(__prev_p->_M_nxt,
 							    __bkt_count);
 		      if (__next_bkt != __prev_bkt)
 			__new_buckets[__next_bkt] = __prev_p;
@@ -2181,7 +2207,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		{
 		  __p->_M_nxt = _M_before_begin._M_nxt;
 		  _M_before_begin._M_nxt = __p;
-		  __new_buckets[__bkt] = &_M_before_begin;
+		  __new_buckets[__bkt] = __before_begin_ptr;
 		  if (__p->_M_nxt)
 		    __new_buckets[__bbegin_bkt] = __p;
 		  __bbegin_bkt = __bkt;
@@ -2200,7 +2226,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__check_bucket && __prev_p->_M_nxt)
 	{
 	  std::size_t __next_bkt
-	    = __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+	    = __hash_code_base::_M_bucket_index(__prev_p->_M_nxt,
 						__bkt_count);
 	  if (__next_bkt != __prev_bkt)
 	    __new_buckets[__next_bkt] = __prev_p;
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index ef120134914..1ad8193f595 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -52,7 +52,7 @@ namespace __detail
    *  @ingroup unordered_associative_containers
    *  @{
    */
-  template<typename _Key, typename _Value,
+  template<typename _Key, typename _Value, typename _Alloc,
 	   typename _ExtractKey, typename _Equal,
 	   typename _H1, typename _H2, typename _Hash, typename _Traits>
     struct _Hashtable_base;
@@ -107,24 +107,24 @@ namespace __detail
       using __hashtable_alloc = _Hashtable_alloc<__node_alloc_type>;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_pointer = typename __hashtable_alloc::__node_pointer;
 
     public:
-      _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h)
-      : _M_nodes(__nodes), _M_h(__h) { }
+      _ReuseOrAllocNode(__node_pointer&& __nodes, __hashtable_alloc& __h)
+      : _M_nodes(std::move(__nodes)), _M_h(__h) { }
       _ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete;
 
       ~_ReuseOrAllocNode()
       { _M_h._M_deallocate_nodes(_M_nodes); }
 
       template<typename _Arg>
-	__node_type*
+	__node_pointer
 	operator()(_Arg&& __arg) const
 	{
 	  if (_M_nodes)
 	    {
-	      __node_type* __node = _M_nodes;
-	      _M_nodes = _M_nodes->_M_next();
+	      __node_pointer __node = _M_nodes;
+	      _M_nodes = _M_nodes->_M_nxt;
 	      __node->_M_nxt = nullptr;
 	      auto& __a = _M_h._M_node_allocator();
 	      __node_alloc_traits::destroy(__a, __node->_M_valptr());
@@ -144,7 +144,7 @@ namespace __detail
 	}
 
     private:
-      mutable __node_type* _M_nodes;
+      mutable __node_pointer _M_nodes;
       __hashtable_alloc& _M_h;
     };
 
@@ -155,14 +155,14 @@ namespace __detail
     {
     private:
       using __hashtable_alloc = _Hashtable_alloc<_NodeAlloc>;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_pointer = typename __hashtable_alloc::__node_pointer;
 
     public:
       _AllocNode(__hashtable_alloc& __h)
       : _M_h(__h) { }
 
       template<typename _Arg>
-	__node_type*
+	__node_pointer
 	operator()(_Arg&& __arg) const
 	{ return _M_h._M_allocate_node(std::forward<_Arg>(__arg)); }
 
@@ -211,22 +211,27 @@ namespace __detail
    *  nodes also store a hash code. In some cases (e.g. strings) this
    *  may be a performance win.
    */
-  struct _Hash_node_base
-  {
-    _Hash_node_base* _M_nxt;
+  template<typename _NodePtr>
+    struct _Hash_node_base
+    {
+      using __node_pointer = _NodePtr;
 
-    _Hash_node_base() noexcept : _M_nxt() { }
+      __node_pointer _M_nxt;
 
-    _Hash_node_base(_Hash_node_base* __next) noexcept : _M_nxt(__next) { }
-  };
+      _Hash_node_base() noexcept : _M_nxt() { }
+
+      template<typename _Ptr>
+	_Hash_node_base(_Ptr&& __next) noexcept
+	: _M_nxt(std::forward<_Ptr>(__next)) { }
+    };
 
   /**
    *  struct _Hash_node_value_base
    *
    *  Node type with the value to store.
    */
-  template<typename _Value>
-    struct _Hash_node_value_base : _Hash_node_base
+  template<typename _NodePtr, typename _Value>
+    struct _Hash_node_value_base : _Hash_node_base<_NodePtr>
     {
       typedef _Value value_type;
 
@@ -252,7 +257,7 @@ namespace __detail
   /**
    *  Primary template struct _Hash_node.
    */
-  template<typename _Value, bool _Cache_hash_code>
+  template<typename _Ptr, typename _Value, bool _Cache_hash_code>
     struct _Hash_node;
 
   /**
@@ -260,84 +265,77 @@ namespace __detail
    *
    *  Base class is __detail::_Hash_node_value_base.
    */
-  template<typename _Value>
-    struct _Hash_node<_Value, true> : _Hash_node_value_base<_Value>
-    {
-      std::size_t  _M_hash_code;
-
-      _Hash_node*
-      _M_next() const noexcept
-      { return static_cast<_Hash_node*>(this->_M_nxt); }
-    };
+  template<typename _Ptr, typename _Value>
+    struct _Hash_node<_Ptr, _Value, true>
+    : _Hash_node_value_base<__ptr_rebind<_Ptr, _Hash_node<_Ptr, _Value, true>>,
+			    _Value>
+    { std::size_t  _M_hash_code; };
 
   /**
    *  Specialization for nodes without caches, struct _Hash_node.
    *
    *  Base class is __detail::_Hash_node_value_base.
    */
-  template<typename _Value>
-    struct _Hash_node<_Value, false> : _Hash_node_value_base<_Value>
-    {
-      _Hash_node*
-      _M_next() const noexcept
-      { return static_cast<_Hash_node*>(this->_M_nxt); }
-    };
+  template<typename _Ptr, typename _Value>
+    struct _Hash_node<_Ptr, _Value, false>
+    : _Hash_node_value_base<__ptr_rebind<_Ptr, _Hash_node<_Ptr, _Value, false>>,
+			    _Value>
+    { };
 
   /// Base class for node iterators.
-  template<typename _Value, bool _Cache_hash_code>
+  template<typename _NodePtr>
     struct _Node_iterator_base
     {
-      using __node_type = _Hash_node<_Value, _Cache_hash_code>;
+      using __node_type = typename std::pointer_traits<_NodePtr>::element_type;
 
-      __node_type*  _M_cur;
+      _NodePtr _M_cur;
 
-      _Node_iterator_base(__node_type* __p) noexcept
+      _Node_iterator_base(_NodePtr __p) noexcept
       : _M_cur(__p) { }
+      _Node_iterator_base() noexcept
+      : _Node_iterator_base(nullptr) { }
 
       void
       _M_incr() noexcept
-      { _M_cur = _M_cur->_M_next(); }
-    };
+      { _M_cur = _M_cur->_M_nxt; }
 
-  template<typename _Value, bool _Cache_hash_code>
-    inline bool
-    operator==(const _Node_iterator_base<_Value, _Cache_hash_code>& __x,
-	       const _Node_iterator_base<_Value, _Cache_hash_code >& __y)
-    noexcept
-    { return __x._M_cur == __y._M_cur; }
+      friend inline bool
+      operator==(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
+      noexcept
+      { return __x._M_cur == __y._M_cur; }
 
-  template<typename _Value, bool _Cache_hash_code>
-    inline bool
-    operator!=(const _Node_iterator_base<_Value, _Cache_hash_code>& __x,
-	       const _Node_iterator_base<_Value, _Cache_hash_code>& __y)
-    noexcept
-    { return __x._M_cur != __y._M_cur; }
+      friend inline bool
+      operator!=(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
+      noexcept
+      { return __x._M_cur != __y._M_cur; }
+    };
 
   /// Node iterators, used to iterate through all the hashtable.
-  template<typename _Value, bool __constant_iterators, bool __cache>
+  template<typename _NodePtr, bool __constant_iterators>
     struct _Node_iterator
-    : public _Node_iterator_base<_Value, __cache>
+    : public _Node_iterator_base<_NodePtr>
     {
     private:
-      using __base_type = _Node_iterator_base<_Value, __cache>;
+      using __base_type = _Node_iterator_base<_NodePtr>;
       using __node_type = typename __base_type::__node_type;
 
     public:
-      typedef _Value					value_type;
-      typedef std::ptrdiff_t				difference_type;
-      typedef std::forward_iterator_tag			iterator_category;
+      typedef typename __node_type::value_type	value_type;
+      typedef typename std::pointer_traits<_NodePtr>::difference_type
+						difference_type;
+      typedef std::forward_iterator_tag		iterator_category;
 
       using pointer = typename std::conditional<__constant_iterators,
-						const _Value*, _Value*>::type;
+				  const value_type*, value_type*>::type;
 
       using reference = typename std::conditional<__constant_iterators,
-						  const _Value&, _Value&>::type;
+				  const value_type&, value_type&>::type;
 
       _Node_iterator() noexcept
-      : __base_type(0) { }
+      : __base_type(nullptr) { }
 
       explicit
-      _Node_iterator(__node_type* __p) noexcept
+      _Node_iterator(_NodePtr __p) noexcept
       : __base_type(__p) { }
 
       reference
@@ -365,31 +363,32 @@ namespace __detail
     };
 
   /// Node const_iterators, used to iterate through all the hashtable.
-  template<typename _Value, bool __constant_iterators, bool __cache>
+  template<typename _NodePtr, bool __constant_iterators>
     struct _Node_const_iterator
-    : public _Node_iterator_base<_Value, __cache>
+    : public _Node_iterator_base<_NodePtr>
     {
     private:
-      using __base_type = _Node_iterator_base<_Value, __cache>;
+      using __base_type = _Node_iterator_base<_NodePtr>;
       using __node_type = typename __base_type::__node_type;
 
     public:
-      typedef _Value					value_type;
-      typedef std::ptrdiff_t				difference_type;
-      typedef std::forward_iterator_tag			iterator_category;
+      typedef typename __node_type::value_type	value_type;
+      typedef typename std::pointer_traits<_NodePtr>::difference_type
+						difference_type;
+      typedef std::forward_iterator_tag		iterator_category;
 
-      typedef const _Value*				pointer;
-      typedef const _Value&				reference;
+      typedef const value_type*			pointer;
+      typedef const value_type&			reference;
 
       _Node_const_iterator() noexcept
-      : __base_type(0) { }
+      : __base_type(nullptr) { }
 
       explicit
-      _Node_const_iterator(__node_type* __p) noexcept
+      _Node_const_iterator(_NodePtr __p) noexcept
       : __base_type(__p) { }
 
-      _Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
-			   __cache>& __x) noexcept
+      _Node_const_iterator(const _Node_iterator<_NodePtr,
+			   __constant_iterators>& __x) noexcept
       : __base_type(__x._M_cur) { }
 
       reference
@@ -662,17 +661,17 @@ namespace __detail
 		     _H1, _H2, _Hash, _RehashPolicy, _Traits, true>
     {
     private:
-      using __hashtable_base = __detail::_Hashtable_base<_Key, _Pair,
+      using __hashtable_base = __detail::_Hashtable_base<_Key, _Pair, _Alloc,
 							 _Select1st,
 							_Equal, _H1, _H2, _Hash,
-							  _Traits>;
+							 _Traits>;
 
       using __hashtable = _Hashtable<_Key, _Pair, _Alloc,
 				     _Select1st, _Equal,
 				     _H1, _H2, _Hash, _RehashPolicy, _Traits>;
 
       using __hash_code = typename __hashtable_base::__hash_code;
-      using __node_type = typename __hashtable_base::__node_type;
+      using __node_pointer = typename __hashtable_base::__node_pointer;
 
     public:
       using key_type = typename __hashtable_base::key_type;
@@ -706,7 +705,7 @@ namespace __detail
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__k, __code);
-      if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
+      if (__node_pointer __node = __h->_M_find_node(__bkt, __k, __code))
 	return __node->_M_v().second;
 
       typename __hashtable::_Scoped_node __node {
@@ -715,8 +714,8 @@ namespace __detail
 	std::tuple<const key_type&>(__k),
 	std::tuple<>()
       };
-      auto __pos
-	= __h->_M_insert_unique_node(__k, __bkt, __code, __node._M_node);
+      auto __pos = __h->_M_insert_unique_node(__k, __bkt, __code,
+					      std::move(__node._M_node));
       __node._M_node = nullptr;
       return __pos->second;
     }
@@ -733,7 +732,7 @@ namespace __detail
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__k, __code);
-      if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
+      if (__node_pointer __node = __h->_M_find_node(__bkt, __k, __code))
 	return __node->_M_v().second;
 
       typename __hashtable::_Scoped_node __node {
@@ -742,8 +741,8 @@ namespace __detail
 	std::forward_as_tuple(std::move(__k)),
 	std::tuple<>()
       };
-      auto __pos
-	= __h->_M_insert_unique_node(__k, __bkt, __code, __node._M_node);
+      auto __pos = __h->_M_insert_unique_node(__k, __bkt, __code,
+					      std::move(__node._M_node));
       __node._M_node = nullptr;
       return __pos->second;
     }
@@ -760,7 +759,7 @@ namespace __detail
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
+      __node_pointer __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	__throw_out_of_range(__N("_Map_base::at"));
@@ -779,7 +778,7 @@ namespace __detail
       const __hashtable* __h = static_cast<const __hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__k, __code);
-      __node_type* __p = __h->_M_find_node(__bkt, __k, __code);
+      __node_pointer __p = __h->_M_find_node(__bkt, __k, __code);
 
       if (!__p)
 	__throw_out_of_range(__N("_Map_base::at"));
@@ -802,7 +801,8 @@ namespace __detail
 				     _Equal, _H1, _H2, _Hash,
 				     _RehashPolicy, _Traits>;
 
-      using __hashtable_base = _Hashtable_base<_Key, _Value, _ExtractKey,
+      using __hashtable_base = _Hashtable_base<_Key, _Value, _Alloc,
+					       _ExtractKey,
 					       _Equal, _H1, _H2, _Hash,
 					       _Traits>;
 
@@ -813,7 +813,7 @@ namespace __detail
 
       using __unique_keys = typename __hashtable_base::__unique_keys;
       using __ireturn_type = typename __hashtable_base::__ireturn_type;
-      using __node_type = _Hash_node<_Value, _Traits::__hash_cached::value>;
+      using __node_type = typename __hashtable_base::__node_type;
       using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
       using __node_gen_type = _AllocNode<__node_alloc_type>;
 
@@ -948,7 +948,8 @@ namespace __detail
 					_Equal, _H1, _H2, _Hash,
 					_RehashPolicy, _Traits>;
 
-      using __hashtable_base = _Hashtable_base<_Key, _Value, _ExtractKey,
+      using __hashtable_base = _Hashtable_base<_Key, _Value, _Alloc,
+					       _ExtractKey,
 					       _Equal, _H1, _H2, _Hash,
 					       _Traits>;
 
@@ -1144,9 +1145,10 @@ namespace __detail
    *  Base class for local iterators, used to iterate within a bucket
    *  but not between buckets.
    */
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2, typename _Hash,
-	   bool __cache_hash_code>
+	   typename _NodePtr, bool __cache_hash_code>
     struct _Local_iterator_base;
 
   /**
@@ -1169,26 +1171,35 @@ namespace __detail
    *
    *  Primary template is unused except as a hook for specializations.
    */
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2, typename _Hash,
 	   bool __cache_hash_code>
     struct _Hash_code_base;
 
   /// Specialization: ranged hash function, no caching hash codes.  H1
   /// and H2 are provided but ignored.  We define a dummy hash code type.
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2, typename _Hash>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash, false>
+    struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
+			   _Hash, false>
     : private _Hashtable_ebo_helper<0, _ExtractKey>,
       private _Hashtable_ebo_helper<1, _Hash>
     {
     private:
       using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
       using __ebo_hash = _Hashtable_ebo_helper<1, _Hash>;
+      using __pointer = typename std::allocator_traits<_Alloc>::pointer;
 
     protected:
       typedef void* 					__hash_code;
-      typedef _Hash_node<_Value, false>			__node_type;
+      typedef _Hash_node<__pointer, _Value, false>	__node_type;
+      using __node_pointer = typename __node_type::__node_pointer;
+      using __node_ptr_arg_t = typename std::conditional<
+	std::__is_pointer<__node_pointer>::__value,
+	__node_pointer,
+	const __node_pointer&>::type;
 
       // We need the default constructor for the local iterators and _Hashtable
       // default constructor.
@@ -1208,17 +1219,17 @@ namespace __detail
       { return _M_ranged_hash()(__k, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+      _M_bucket_index(__node_ptr_arg_t __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>(),
 						   (std::size_t)0)) )
       { return _M_ranged_hash()(_M_extract()(__p->_M_v()), __bkt_count); }
 
       void
-      _M_store_code(__node_type*, __hash_code) const
+      _M_store_code(__node_ptr_arg_t, __hash_code) const
       { }
 
       void
-      _M_copy_code(__node_type*, const __node_type*) const
+      _M_copy_code(__node_ptr_arg_t, __node_ptr_arg_t) const
       { }
 
       void
@@ -1242,16 +1253,19 @@ namespace __detail
   /// Specialization: ranged hash function, cache hash codes.  This
   /// combination is meaningless, so we provide only a declaration
   /// and no definition.
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2, typename _Hash>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash, true>;
+    struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
+			   _Hash, true>;
 
   /// Specialization: hash function and range-hashing function, no
   /// caching of hash codes.
   /// Provides typedef and accessor required by C++ 11.
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2,
+    struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
 			   _Default_ranged_hash, false>
     : private _Hashtable_ebo_helper<0, _ExtractKey>,
       private _Hashtable_ebo_helper<1, _H1>,
@@ -1261,10 +1275,7 @@ namespace __detail
       using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
       using __ebo_h1 = _Hashtable_ebo_helper<1, _H1>;
       using __ebo_h2 = _Hashtable_ebo_helper<2, _H2>;
-
-      // Gives the local iterator implementation access to _M_bucket_index().
-      friend struct _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2,
-					 _Default_ranged_hash, false>;
+      using __pointer = typename std::allocator_traits<_Alloc>::pointer;
 
     public:
       typedef _H1 					hasher;
@@ -1275,7 +1286,17 @@ namespace __detail
 
     protected:
       typedef std::size_t 				__hash_code;
-      typedef _Hash_node<_Value, false>			__node_type;
+      typedef _Hash_node<__pointer, _Value, false>	__node_type;
+      using __node_pointer = typename __node_type::__node_pointer;
+      using __node_ptr_arg_t = typename std::conditional<
+	std::__is_pointer<__node_pointer>::__value,
+	__node_pointer,
+	const __node_pointer&>::type;
+
+      // Gives the local iterator implementation access to _M_bucket_index().
+      friend struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+					 _H1, _H2, _Default_ranged_hash,
+					 __node_pointer, false>;
 
       // We need the default constructor for the local iterators and _Hashtable
       // default constructor.
@@ -1300,18 +1321,18 @@ namespace __detail
       { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+      _M_bucket_index(__node_ptr_arg_t __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H1&>()(declval<const _Key&>()))
 		  && noexcept(declval<const _H2&>()((__hash_code)0,
 						    (std::size_t)0)) )
       { return _M_h2()(_M_h1()(_M_extract()(__p->_M_v())), __bkt_count); }
 
       void
-      _M_store_code(__node_type*, __hash_code) const
+      _M_store_code(__node_ptr_arg_t, __hash_code) const
       { }
 
       void
-      _M_copy_code(__node_type*, const __node_type*) const
+      _M_copy_code(__node_ptr_arg_t, __node_ptr_arg_t) const
       { }
 
       void
@@ -1336,22 +1357,20 @@ namespace __detail
   /// Specialization: hash function and range-hashing function,
   /// caching hash codes.  H is provided but ignored.  Provides
   /// typedef and accessor required by C++ 11.
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2,
+    struct _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2,
 			   _Default_ranged_hash, true>
     : private _Hashtable_ebo_helper<0, _ExtractKey>,
       private _Hashtable_ebo_helper<1, _H1>,
       private _Hashtable_ebo_helper<2, _H2>
     {
     private:
-      // Gives the local iterator implementation access to _M_h2().
-      friend struct _Local_iterator_base<_Key, _Value, _ExtractKey, _H1, _H2,
-					 _Default_ranged_hash, true>;
-
       using __ebo_extract_key = _Hashtable_ebo_helper<0, _ExtractKey>;
       using __ebo_h1 = _Hashtable_ebo_helper<1, _H1>;
       using __ebo_h2 = _Hashtable_ebo_helper<2, _H2>;
+      using __pointer = typename std::allocator_traits<_Alloc>::pointer;
 
     public:
       typedef _H1 					hasher;
@@ -1362,7 +1381,17 @@ namespace __detail
 
     protected:
       typedef std::size_t 				__hash_code;
-      typedef _Hash_node<_Value, true>			__node_type;
+      typedef _Hash_node<__pointer, _Value, true>	__node_type;
+      using __node_pointer = typename __node_type::__node_pointer;
+      using __node_ptr_arg_t = typename std::conditional<
+	std::__is_pointer<__node_pointer>::__value,
+	__node_pointer,
+	const __node_pointer&>::type;
+
+      // Gives the local iterator implementation access to _M_h2().
+      friend struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+					 _H1, _H2, _Default_ranged_hash,
+					 __node_pointer, true>;
 
       // We need the default constructor for _Hashtable default constructor.
       _Hash_code_base() = default;
@@ -1385,17 +1414,17 @@ namespace __detail
       { return _M_h2()(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+      _M_bucket_index(__node_ptr_arg_t __p, std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _H2&>()((__hash_code)0,
 						 (std::size_t)0)) )
       { return _M_h2()(__p->_M_hash_code, __bkt_count); }
 
       void
-      _M_store_code(__node_type* __n, __hash_code __c) const
+      _M_store_code(__node_ptr_arg_t __n, __hash_code __c) const
       { __n->_M_hash_code = __c; }
 
       void
-      _M_copy_code(__node_type* __to, const __node_type* __from) const
+      _M_copy_code(__node_ptr_arg_t __to, __node_ptr_arg_t __from) const
       { __to->_M_hash_code = __from->_M_hash_code; }
 
       void
@@ -1418,46 +1447,45 @@ namespace __detail
     };
 
   /// Partial specialization used when nodes contain a cached hash code.
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash>
-    struct _Local_iterator_base<_Key, _Value, _ExtractKey,
-				_H1, _H2, _Hash, true>
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
+	   typename _H1, typename _H2, typename _Hash,
+	   typename _NodePtr>
+    struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+				_H1, _H2, _Hash, _NodePtr, true>
     : private _Hashtable_ebo_helper<0, _H2>
+    , public _Node_iterator_base<_NodePtr>
     {
     protected:
       using __base_type = _Hashtable_ebo_helper<0, _H2>;
-      using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
+      using __base_node_iter = _Node_iterator_base<_NodePtr>;
+      using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
 					       _H1, _H2, _Hash, true>;
 
       _Local_iterator_base() = default;
-      _Local_iterator_base(const __hash_code_base& __base,
-			   _Hash_node<_Value, true>* __p,
+      _Local_iterator_base(const __hash_code_base& __base, _NodePtr __p,
 			   std::size_t __bkt, std::size_t __bkt_count)
-      : __base_type(__base._M_h2()),
-	_M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
+	: __base_type(__base._M_h2()), __base_node_iter(__p)
+	, _M_bucket(__bkt), _M_bucket_count(__bkt_count) { }
 
       void
       _M_incr()
       {
-	_M_cur = _M_cur->_M_next();
-	if (_M_cur)
+	__base_node_iter::_M_incr();
+	if (this->_M_cur)
 	  {
 	    std::size_t __bkt
-	      = __base_type::_M_get()(_M_cur->_M_hash_code,
-					   _M_bucket_count);
+	      = __base_type::_M_get()(this->_M_cur->_M_hash_code,
+				      _M_bucket_count);
 	    if (__bkt != _M_bucket)
-	      _M_cur = nullptr;
+	      this->_M_cur = nullptr;
 	  }
       }
 
-      _Hash_node<_Value, true>*  _M_cur;
       std::size_t _M_bucket;
       std::size_t _M_bucket_count;
 
     public:
-      const void*
-      _M_curr() const { return _M_cur; }  // for equality ops
-
       std::size_t
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
@@ -1493,29 +1521,33 @@ namespace __detail
       _M_h() const { return reinterpret_cast<const _Tp*>(this); }
     };
 
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2, typename _Hash>
     using __hash_code_for_local_iter
-      = _Hash_code_storage<_Hash_code_base<_Key, _Value, _ExtractKey,
+      = _Hash_code_storage<_Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
 					   _H1, _H2, _Hash, false>>;
 
   // Partial specialization used when hash codes are not cached
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash>
-    struct _Local_iterator_base<_Key, _Value, _ExtractKey,
-				_H1, _H2, _Hash, false>
-    : __hash_code_for_local_iter<_Key, _Value, _ExtractKey, _H1, _H2, _Hash>
+  template<typename _Key, typename _Value, typename _Alloc,
+	   typename _ExtractKey, typename _H1, typename _H2,
+	   typename _Hash, typename _NodePtr>
+    struct _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+				_H1, _H2, _Hash, _NodePtr, false>
+    : __hash_code_for_local_iter<_Key, _Value, _Alloc, _ExtractKey,
+				 _H1, _H2, _Hash>
+    , public _Node_iterator_base<_NodePtr>
     {
     protected:
-      using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
-					       _H1, _H2, _Hash, false>;
+      using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc,
+				_ExtractKey, _H1, _H2, _Hash, false>;
+      using __base_node_iter = _Node_iterator_base<_NodePtr>;
 
       _Local_iterator_base() : _M_bucket_count(-1) { }
 
-      _Local_iterator_base(const __hash_code_base& __base,
-			   _Hash_node<_Value, false>* __p,
+      _Local_iterator_base(const __hash_code_base& __base, _NodePtr __p,
 			   std::size_t __bkt, std::size_t __bkt_count)
-      : _M_cur(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      : __base_node_iter(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
       { _M_init(__base); }
 
       ~_Local_iterator_base()
@@ -1525,7 +1557,7 @@ namespace __detail
       }
 
       _Local_iterator_base(const _Local_iterator_base& __iter)
-      : _M_cur(__iter._M_cur), _M_bucket(__iter._M_bucket),
+      : __base_node_iter(__iter._M_cur), _M_bucket(__iter._M_bucket),
         _M_bucket_count(__iter._M_bucket_count)
       {
 	if (_M_bucket_count != -1)
@@ -1537,7 +1569,7 @@ namespace __detail
       {
 	if (_M_bucket_count != -1)
 	  _M_destroy();
-	_M_cur = __iter._M_cur;
+	this->_M_cur = __iter._M_cur;
 	_M_bucket = __iter._M_bucket;
 	_M_bucket_count = __iter._M_bucket_count;
 	if (_M_bucket_count != -1)
@@ -1548,17 +1580,16 @@ namespace __detail
       void
       _M_incr()
       {
-	_M_cur = _M_cur->_M_next();
-	if (_M_cur)
+	__base_node_iter::_M_incr();
+	if (this->_M_cur)
 	  {
-	    std::size_t __bkt = this->_M_h()->_M_bucket_index(_M_cur,
+	    std::size_t __bkt = this->_M_h()->_M_bucket_index(this->_M_cur,
 							      _M_bucket_count);
 	    if (__bkt != _M_bucket)
-	      _M_cur = nullptr;
+	      this->_M_cur = nullptr;
 	  }
       }
 
-      _Hash_node<_Value, false>*  _M_cur;
       std::size_t _M_bucket;
       std::size_t _M_bucket_count;
 
@@ -1570,42 +1601,22 @@ namespace __detail
       _M_destroy() { this->_M_h()->~__hash_code_base(); }
 
     public:
-      const void*
-      _M_curr() const { return _M_cur; }  // for equality ops and debug mode
-
       std::size_t
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
 
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash, bool __cache>
-    inline bool
-    operator==(const _Local_iterator_base<_Key, _Value, _ExtractKey,
-					  _H1, _H2, _Hash, __cache>& __x,
-	       const _Local_iterator_base<_Key, _Value, _ExtractKey,
-					  _H1, _H2, _Hash, __cache>& __y)
-    { return __x._M_curr() == __y._M_curr(); }
-
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _H1, typename _H2, typename _Hash, bool __cache>
-    inline bool
-    operator!=(const _Local_iterator_base<_Key, _Value, _ExtractKey,
-					  _H1, _H2, _Hash, __cache>& __x,
-	       const _Local_iterator_base<_Key, _Value, _ExtractKey,
-					  _H1, _H2, _Hash, __cache>& __y)
-    { return __x._M_curr() != __y._M_curr(); }
-
   /// local iterators
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2, typename _Hash,
-	   bool __constant_iterators, bool __cache>
+	   typename _NodePtr, bool __constant_iterators, bool __cache>
     struct _Local_iterator
-    : public _Local_iterator_base<_Key, _Value, _ExtractKey,
-				  _H1, _H2, _Hash, __cache>
+    : public _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+				  _H1, _H2, _Hash, _NodePtr, __cache>
     {
     private:
-      using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
-					       _H1, _H2, _Hash, __cache>;
+      using __base_type = _Local_iterator_base<_Key, _Value, _Alloc,
+			_ExtractKey, _H1, _H2, _Hash, _NodePtr, __cache>;
       using __hash_code_base = typename __base_type::__hash_code_base;
     public:
       typedef _Value					value_type;
@@ -1621,7 +1632,7 @@ namespace __detail
       _Local_iterator() = default;
 
       _Local_iterator(const __hash_code_base& __base,
-		      _Hash_node<_Value, __cache>* __n,
+		      _NodePtr __n,
 		      std::size_t __bkt, std::size_t __bkt_count)
       : __base_type(__base, __n, __bkt, __bkt_count)
       { }
@@ -1651,16 +1662,17 @@ namespace __detail
     };
 
   /// local const_iterators
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _Value,
+	   typename _Alloc, typename _ExtractKey,
 	   typename _H1, typename _H2, typename _Hash,
-	   bool __constant_iterators, bool __cache>
+	   typename _NodePtr, bool __constant_iterators, bool __cache>
     struct _Local_const_iterator
-    : public _Local_iterator_base<_Key, _Value, _ExtractKey,
-				  _H1, _H2, _Hash, __cache>
+    : public _Local_iterator_base<_Key, _Value, _Alloc, _ExtractKey,
+				  _H1, _H2, _Hash, _NodePtr, __cache>
     {
     private:
-      using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
-					       _H1, _H2, _Hash, __cache>;
+      using __base_type = _Local_iterator_base<_Key, _Value, _Alloc,
+			_ExtractKey, _H1, _H2, _Hash, _NodePtr, __cache>;
       using __hash_code_base = typename __base_type::__hash_code_base;
 
     public:
@@ -1673,14 +1685,15 @@ namespace __detail
       _Local_const_iterator() = default;
 
       _Local_const_iterator(const __hash_code_base& __base,
-			    _Hash_node<_Value, __cache>* __n,
+			    _NodePtr __n,
 			    std::size_t __bkt, std::size_t __bkt_count)
       : __base_type(__base, __n, __bkt, __bkt_count)
       { }
 
-      _Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
+      _Local_const_iterator(const _Local_iterator<_Key, _Value,
+						  _Alloc, _ExtractKey,
 						  _H1, _H2, _Hash,
-						  __constant_iterators,
+						  _NodePtr, __constant_iterators,
 						  __cache>& __x)
       : __base_type(__x)
       { }
@@ -1719,13 +1732,13 @@ namespace __detail
    *    - __detail::_Hash_code_base
    *    - __detail::_Hashtable_ebo_helper
    */
-  template<typename _Key, typename _Value,
+  template<typename _Key, typename _Value, typename _Alloc,
 	   typename _ExtractKey, typename _Equal,
 	   typename _H1, typename _H2, typename _Hash, typename _Traits>
-  struct _Hashtable_base
-  : public _Hash_code_base<_Key, _Value, _ExtractKey, _H1, _H2, _Hash,
-			   _Traits::__hash_cached::value>,
-    private _Hashtable_ebo_helper<0, _Equal>
+    struct _Hashtable_base
+    : public _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey, _H1, _H2, _Hash,
+			     _Traits::__hash_cached::value>,
+      private _Hashtable_ebo_helper<0, _Equal>
   {
   public:
     typedef _Key					key_type;
@@ -1739,31 +1752,29 @@ namespace __detail
     using __constant_iterators = typename __traits_type::__constant_iterators;
     using __unique_keys = typename __traits_type::__unique_keys;
 
-    using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
+    using __hash_code_base = _Hash_code_base<_Key, _Value, _Alloc, _ExtractKey,
 					     _H1, _H2, _Hash,
 					     __hash_cached::value>;
 
     using __hash_code = typename __hash_code_base::__hash_code;
     using __node_type = typename __hash_code_base::__node_type;
+    using __node_pointer = typename __hash_code_base::__node_pointer;
+    using __node_ptr_arg_t = typename __hash_code_base::__node_ptr_arg_t;
 
-    using iterator = __detail::_Node_iterator<value_type,
-					      __constant_iterators::value,
-					      __hash_cached::value>;
+    using iterator = __detail::_Node_iterator<__node_pointer,
+					      __constant_iterators::value>;
 
-    using const_iterator = __detail::_Node_const_iterator<value_type,
-						   __constant_iterators::value,
-						   __hash_cached::value>;
+    using const_iterator = __detail::_Node_const_iterator<__node_pointer,
+						   __constant_iterators::value>;
 
-    using local_iterator = __detail::_Local_iterator<key_type, value_type,
-						  _ExtractKey, _H1, _H2, _Hash,
-						  __constant_iterators::value,
-						     __hash_cached::value>;
+    using local_iterator = __detail::_Local_iterator<key_type, value_type, _Alloc,
+			_ExtractKey, _H1, _H2, _Hash, __node_pointer,
+			__constant_iterators::value, __hash_cached::value>;
 
-    using const_local_iterator = __detail::_Local_const_iterator<key_type,
-								 value_type,
-					_ExtractKey, _H1, _H2, _Hash,
-					__constant_iterators::value,
-					__hash_cached::value>;
+    using const_local_iterator = __detail::_Local_const_iterator<
+			key_type, value_type, _Alloc,
+			_ExtractKey, _H1, _H2, _Hash, __node_pointer,
+			__constant_iterators::value, __hash_cached::value>;
 
     using __ireturn_type = typename std::conditional<__unique_keys::value,
 						     std::pair<iterator, bool>,
@@ -1774,17 +1785,17 @@ namespace __detail
     template<typename _NodeT>
       struct _Equal_hash_code
       {
-       static bool
-       _S_equals(__hash_code, const _NodeT&)
-       { return true; }
+	static bool
+	_S_equals(__hash_code, const _NodeT&)
+	{ return true; }
       };
 
-    template<typename _Ptr2>
-      struct _Equal_hash_code<_Hash_node<_Ptr2, true>>
+    template<typename _Ptr2, typename _Value2>
+      struct _Equal_hash_code<_Hash_node<_Ptr2, _Value2, true>>
       {
-       static bool
-       _S_equals(__hash_code __c, const _Hash_node<_Ptr2, true>& __n)
-       { return __c == __n._M_hash_code; }
+	static bool
+	_S_equals(__hash_code __c, const _Hash_node<_Ptr2, _Value2, true>& __n)
+	{ return __c == __n._M_hash_code; }
       };
 
   protected:
@@ -1795,7 +1806,7 @@ namespace __detail
     { }
 
     bool
-    _M_equals(const _Key& __k, __hash_code __c, __node_type* __n) const
+    _M_equals(const _Key& __k, __hash_code __c, __node_ptr_arg_t __n) const
     {
       static_assert(__is_invocable<const _Equal&, const _Key&, const _Key&>{},
 	  "key equality predicate must be invocable with two arguments of "
@@ -1854,8 +1865,8 @@ namespace __detail
 	      _H1, _H2, _Hash, _RehashPolicy, _Traits, true>::
     _M_equal(const __hashtable& __other) const
     {
-      using __node_base = typename __hashtable::__node_base;
-      using __node_type = typename __hashtable::__node_type;
+      using __bucket_type = typename __hashtable::__bucket_type;
+      using __node_pointer = typename __hashtable::__node_pointer;
       const __hashtable* __this = static_cast<const __hashtable*>(this);
       if (__this->size() != __other.size())
 	return false;
@@ -1863,18 +1874,17 @@ namespace __detail
       for (auto __itx = __this->begin(); __itx != __this->end(); ++__itx)
 	{
 	  std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
-	  __node_base* __prev_n = __other._M_buckets[__ybkt];
+	  __bucket_type __prev_n = __other._M_buckets[__ybkt];
 	  if (!__prev_n)
 	    return false;
 
-	  for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
-	       __n = __n->_M_next())
+	  for (__node_pointer __n = __prev_n->_M_nxt;; __n = __n->_M_nxt)
 	    {
 	      if (__n->_M_v() == *__itx)
 		break;
 
 	      if (!__n->_M_nxt
-		  || __other._M_bucket_index(__n->_M_next()) != __ybkt)
+		  || __other._M_bucket_index(__n->_M_nxt) != __ybkt)
 		return false;
 	    }
 	}
@@ -1906,8 +1916,8 @@ namespace __detail
 	      _H1, _H2, _Hash, _RehashPolicy, _Traits, false>::
     _M_equal(const __hashtable& __other) const
     {
-      using __node_base = typename __hashtable::__node_base;
-      using __node_type = typename __hashtable::__node_type;
+      using __bucket_type = typename __hashtable::__bucket_type;
+      using __node_pointer = typename __hashtable::__node_pointer;
       const __hashtable* __this = static_cast<const __hashtable*>(this);
       if (__this->size() != __other.size())
 	return false;
@@ -1923,19 +1933,19 @@ namespace __detail
 	    ++__x_count;
 
 	  std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
-	  __node_base* __y_prev_n = __other._M_buckets[__ybkt];
+	  __bucket_type __y_prev_n = __other._M_buckets[__ybkt];
 	  if (!__y_prev_n)
 	    return false;
 
-	  __node_type* __y_n = static_cast<__node_type*>(__y_prev_n->_M_nxt);
-	  for (;; __y_n = __y_n->_M_next())
+	  __node_pointer __y_n = __y_prev_n->_M_nxt;
+	  for (;; __y_n = __y_n->_M_nxt)
 	    {
 	      if (__this->key_eq()(_ExtractKey()(__y_n->_M_v()),
 				   _ExtractKey()(*__itx)))
 		break;
 
 	      if (!__y_n->_M_nxt
-		  || __other._M_bucket_index(__y_n->_M_next()) != __ybkt)
+		  || __other._M_bucket_index(__y_n->_M_nxt) != __ybkt)
 		return false;
 	    }
 
@@ -1973,11 +1983,13 @@ namespace __detail
       using __value_alloc_traits = typename __node_alloc_traits::template
 	rebind_traits<typename __node_type::value_type>;
 
-      using __node_base = __detail::_Hash_node_base;
-      using __bucket_type = __node_base*;      
+      using __node_pointer = typename __node_alloc_traits::pointer;
+      using __node_base = __detail::_Hash_node_base<__node_pointer>;
+      using __bucket_type = __ptr_rebind<__node_pointer, __node_base>;
       using __bucket_alloc_type =
 	__alloc_rebind<__node_alloc_type, __bucket_type>;
       using __bucket_alloc_traits = std::allocator_traits<__bucket_alloc_type>;
+      using __bucket_pointer = typename __bucket_alloc_traits::pointer;
 
       _Hashtable_alloc() = default;
       _Hashtable_alloc(const _Hashtable_alloc&) = default;
@@ -1998,27 +2010,27 @@ namespace __detail
 
       // Allocate a node and construct an element within it.
       template<typename... _Args>
-	__node_type*
+	__node_pointer
 	_M_allocate_node(_Args&&... __args);
 
       // Destroy the element within a node and deallocate the node.
       void
-      _M_deallocate_node(__node_type* __n);
+      _M_deallocate_node(__node_pointer __n);
 
       // Deallocate a node.
       void
-      _M_deallocate_node_ptr(__node_type* __n);
+      _M_deallocate_node_ptr(__node_pointer __n);
 
       // Deallocate the linked list of nodes pointed to by __n.
       // The elements within the nodes are destroyed.
       void
-      _M_deallocate_nodes(__node_type* __n);
+      _M_deallocate_nodes(__node_pointer __n);
 
-      __bucket_type*
+      __bucket_pointer
       _M_allocate_buckets(std::size_t __bkt_count);
 
       void
-      _M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
+      _M_deallocate_buckets(__bucket_pointer, std::size_t __bkt_count);
     };
 
   // Definitions of class template _Hashtable_alloc's out-of-line member
@@ -2027,17 +2039,16 @@ namespace __detail
     template<typename... _Args>
       auto
       _Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
-      -> __node_type*
+      -> __node_pointer
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
-	__node_type* __n = std::__to_address(__nptr);
 	__try
 	  {
-	    ::new ((void*)__n) __node_type;
+	    ::new ((void*)std::__to_address(__nptr)) __node_type;
 	    __node_alloc_traits::construct(_M_node_allocator(),
-					   __n->_M_valptr(),
+					   __nptr->_M_valptr(),
 					   std::forward<_Args>(__args)...);
-	    return __n;
+	    return __nptr;
 	  }
 	__catch(...)
 	  {
@@ -2048,55 +2059,51 @@ namespace __detail
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_pointer __nptr)
     {
-      __node_alloc_traits::destroy(_M_node_allocator(), __n->_M_valptr());
-      _M_deallocate_node_ptr(__n);
+      __node_alloc_traits::destroy(_M_node_allocator(), __nptr->_M_valptr());
+      _M_deallocate_node_ptr(__nptr);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_pointer __nptr)
     {
-      typedef typename __node_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n);
-      __n->~__node_type();
-      __node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1);
+      __nptr->~__node_type();
+      __node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_pointer __nptr)
     {
-      while (__n)
+      while (__nptr)
 	{
-	  __node_type* __tmp = __n;
-	  __n = __n->_M_next();
+	  __node_pointer __tmp = __nptr;
+	  __nptr = __nptr->_M_nxt;
 	  _M_deallocate_node(__tmp);
 	}
     }
 
   template<typename _NodeAlloc>
-    typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
+    auto
     _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
+    -> __bucket_pointer
     {
       __bucket_alloc_type __alloc(_M_node_allocator());
 
       auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
-      __bucket_type* __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
-      return __p;
+      std::fill_n(__ptr, __bkt_count, nullptr);
+      return __ptr;
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_pointer __bkts,
 							std::size_t __bkt_count)
     {
-      typedef typename __bucket_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
       __bucket_alloc_type __alloc(_M_node_allocator());
-      __bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
+      __bucket_alloc_traits::deallocate(__alloc, __bkts, __bkt_count);
     }
 
  //@} hashtable-detail
diff --git a/libstdc++-v3/include/debug/unordered_map b/libstdc++-v3/include/debug/unordered_map
index 17fbba3aade..0635e9e8100 100644
--- a/libstdc++-v3/include/debug/unordered_map
+++ b/libstdc++-v3/include/debug/unordered_map
@@ -621,7 +621,7 @@ namespace __debug
 	  [__victim](_Base_const_iterator __it) { return __it == __victim; });
 	this->_M_invalidate_local_if(
 	  [__victim](_Base_const_local_iterator __it)
-	  { return __it._M_curr() == __victim._M_cur; });
+	  { return __it == __victim; });
       }
 
       _Base_iterator
@@ -1228,7 +1228,7 @@ namespace __debug
 	  [__victim](_Base_const_iterator __it) { return __it == __victim; });
 	this->_M_invalidate_local_if(
 	  [__victim](_Base_const_local_iterator __it)
-	  { return __it._M_curr() == __victim._M_cur; });
+	  { return __it == __victim; });
       }
 
       _Base_iterator
diff --git a/libstdc++-v3/include/debug/unordered_set b/libstdc++-v3/include/debug/unordered_set
index 4d30852186c..d1ebea98e8a 100644
--- a/libstdc++-v3/include/debug/unordered_set
+++ b/libstdc++-v3/include/debug/unordered_set
@@ -506,7 +506,7 @@ namespace __debug
 	  [__victim](_Base_const_iterator __it) { return __it == __victim; });
 	this->_M_invalidate_local_if(
 	  [__victim](_Base_const_local_iterator __it)
-	  { return __it._M_curr() == __victim._M_cur; });
+	  { return __it == __victim; });
       }
 
       _Base_iterator
@@ -1067,7 +1067,7 @@ namespace __debug
 	  [__victim](_Base_const_iterator __it) { return __it == __victim; });
 	this->_M_invalidate_local_if(
 	  [__victim](_Base_const_local_iterator __it)
-	  { return __it._M_curr() == __victim._M_cur; });
+	  { return __it == __victim; });
       }
 
       _Base_iterator
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..e9d7ada7151
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_map<T, int, H, E,
+				  CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_map<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..4a895a6302c
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multimap<T, int, H, E,
+				       CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_multimap<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..36b5e10cc7b
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
@@ -0,0 +1,56 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_set>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multiset<T, H, E, CustomPointerAlloc<T>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<T> alloc_type;
+  typedef std::unordered_multiset<T, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert(T());
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
index f6b908ac03e..479104709fb 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
@@ -15,10 +15,7 @@
 // with this library; see the file COPYING3.  If not see
 // <http://www.gnu.org/licenses/>.
 
-// This test fails to compile since C++17 (see xfail-if below) so we can only
-// do a "run" test for C++11 and C++14, and a "compile" test for C++17 and up.
-// { dg-do run { target { c++11_only || c++14_only } } }
-// { dg-do compile { target c++17 } }
+// { dg-do run { target { c++11 } } }
 
 #include <unordered_set>
 #include <memory>
@@ -26,15 +23,22 @@
 #include <testsuite_allocator.h>
 
 struct T { int i; };
-bool operator==(const T& l, const T& r) { return l.i == r.i; }
-struct H { std::size_t operator()(const T& t) const noexcept { return t.i; }
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
 };
-struct E : std::equal_to<T> { };
+
+struct E : std::equal_to<T>
+{ };
 
 using __gnu_test::CustomPointerAlloc;
 
-// { dg-xfail-if "node reinsertion assumes raw pointers" { c++17 } }
-// TODO when removing this xfail change the test back to "dg-do run".
 template class std::unordered_set<T, H, E, CustomPointerAlloc<T>>;
 
 void test01()

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-05-15 21:12 ` François Dumont
@ 2020-09-28 20:37   ` François Dumont
  2020-10-20 11:04     ` Jonathan Wakely
  0 siblings, 1 reply; 10+ messages in thread
From: François Dumont @ 2020-09-28 20:37 UTC (permalink / raw)
  To: libstdc++

[-- Attachment #1: Type: text/plain, Size: 5956 bytes --]

Following recent changes on _Hashtable I rebase the patch and completely 
review it.

I managed to integrate the allocator custom pointer type without 
touching to _Hashtable base types like _Hash_code_base or 
_Hashtable_base. However I cannot see how to use the custom pointer type 
without impacting the node types like _Hash_node_base which now takes a 
template parameter, the custom pointer type.

On an abi point of view node types are different however the data 
structure is the same. The only difference is that the _Hash_node_base 
_M_nxt is now a _Hash_node<> custom pointer rather than a simple 
_Hash_node_base*.

Even if this patch can't go in because of the abi breaking change I am 
going to adapt some of the code simplifications for master. Especially 
the _Hash_code_base and _Local_iterator_base simplifications.

Let me know if you can think of a way to integrate the custom pointer 
without impacting abi. Unless impacting node types and associated 
iterator types is fine even if I already noticed that pretty printer 
tests are broken with those changes.

François


On 15/05/20 11:12 pm, François Dumont wrote:
> I think I completed this evolution.
>
> I eventually used ref to node pointer as much as possible and even use 
> move semantic on it.
>
> My prerequisite for this to work is that nullptr can be assign on the 
> fancy pointer and that a fancy pointer to __node_type is assignable 
> implicitely to a fancy pointer to __node_base.
>
>     * include/bits/hashtable_policy.h (_Hashtable_base): Add _Alloc
>     template parameter.
>         (_ReuseOrAllocNode<>::__node_type): Remove.
>         (_ReuseOrAllocNode<>::__node_pointer): New.
>         (_ReuseOrAllocNode(__node_pointer, __hashtable_alloc&)): Adapt 
> to use
>         latter.
>         (_ReuseOrAllocNode<>::operator()(_Arg&&)): Return latter.
>         (_AllocNode<>::__node_type): Remove.
>         (_AllocNode<>::__node_pointer): New.
>         (_AllocNode<>::operator()<>(_Arg&&)): Return latter.
>         (_Hash_node_base<>): Add _NodePtr template parameter.
>         (_Hash_node_value_base<>): Likewise.
>         (_Hash_node<>): Add _Ptr template parameter.
>         (_Hash_node<>::_M_next()): Remove.
>         (_Node_iterator_base<>): Use _NodePtr template parameter.
>         (operator==(const _Node_iterator_base&, const 
> _Node_iterator_base&)):
>         Make inline friend.
>         (operator!=(const _Node_iterator_base&, const 
> _Node_iterator_base&)):
>         Likewise.
>         (_Node_iterator<>): Use _NodePtr template parameter.
>         (_Node_const_iterator<>): Use _NodePtr template parameter.
>         (_Map_base<>::__node_type): Remove.
>         (_Map_base<>::__node_pointer): New.
>         (_Hash_code_base<>): Add _Alloc template parameter.
>         (_Hash_code_base<>::__pointer): New.
>         (_Hash_code_base<>::__node_pointer): New.
>         (_Hash_code_base<>::__node_ptr_arg_t): New.
>         (_Local_iterator_base<>): Add _Alloc template parameter. 
> Inherit from
>         _Node_iterator_base<>.
>         (_Local_iterator_base<>::__base_node_iter): New.
>         (_Local_iterator_base<>::_M_cur): Remove.
>         (_Local_iterator_base<>::_M_incr()): Adapt.
>         (_Local_iterator_base<>::_M_curr()): Remove.
>     (operator==(const _Local_iterator_base<>&,
>     const _Local_iterator_base<>&)): Remove.
>         (operator!=(const _Local_iterator_base<>&,
>         const _Local_iterator_base<>&)): Remove.
>         (_Local_iterator<>): Add _Alloc and _NodePtr template parameters.
>         (_Local_const_iterator<>): Likewise.
>         (_Hashtable_base<>): Add _Alloc template parameter.
>         (_Hashtable_alloc<>::__node_pointer): New.
>         (_Hashtable_alloc<>::__bucket_pointer): New.
>         (_Hashtable_alloc<>::_M_allocate_node): Adapt.
>         (_Hashtable_alloc<>::_M_deallocate_node): Adapt.
>         (_Hashtable_alloc<>::_M_deallocate_node_ptr): Adapt.
>         (_Hashtable_alloc<>::_M_deallocate_nodes): Adapt.
>         (_Hashtable_alloc<>::_M_allocate_buckets): Adapt.
>         (_Hashtable_alloc<>::_M_deallocate_buckets): Adapt.
>         * include/bits/hashtable.h (_Hashtable<>): Adapt.
>     (_Hashtable<>::_M_begin()): Remove.
>         * include/debug/unordered_map: Adapt.
>         * include/debug/unordered_set: Adapt.
>         * testsuite/23_containers/unordered_map/allocator/ext_ptr.cc: 
> New.
>         * 
> testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc: New.
>         * 
> testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc: New.
>         * testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
>
> Tested under Linux x86_64.
>
> Ok to commit ?
>
> François
>
> On 19/04/20 7:31 pm, François Dumont wrote:
>> Here is my work in progress to use allocator pointer type. This type 
>> is used both as the node pointer and as the buckets pointer.
>>
>> Rather than adapting _Local_iterator_base like _Node_iterator_base I 
>> prefer to just make it inherits from _Node_iterator_base. It 
>> simplifies its implementation and avoids to provided dedicated 
>> comparison operators.
>>
>> Now I wonder if I need to consider Phil Bouchard comment regarding 
>> how node pointers are being passed, either by value or reference. I 
>> already chose to pass them as rvalue references in some occasions and 
>> even lvalue reference like in _M_bucket_index method. Do you think I 
>> need to continue this way ? Maybe I should use some conditional type, 
>> if raw pointer we pass by value and otherwise we pass by ref ?
>>
>> François
>>
>


[-- Attachment #2: hashtable_ext_ptr.patch --]
[-- Type: text/x-patch, Size: 88755 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index 07a4abe5c33..ea3411dec5f 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -101,7 +101,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
    *  - size_type       _M_bucket_count
    *  - size_type       _M_element_count
    *
-   *  with _Bucket being _Hash_node* and _Hash_node containing:
+   *  with _Bucket being _Hash_node_base* and _Hash_node containing:
    *
    *  - _Hash_node*   _M_next
    *  - Tp            _M_value
@@ -182,8 +182,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 				 _RehashPolicy, _Traits>,
       private __detail::_Hashtable_alloc<
 	__alloc_rebind<_Alloc,
-		       __detail::_Hash_node<_Value,
-					    _Traits::__hash_cached::value>>>
+		       __detail::_Hash_node<
+#if __cplusplus > 201703L || defined __STRICT_ANSI__
+			 typename std::allocator_traits<_Alloc>::pointer,
+#else
+			 typename std::allocator_traits<
+			   __alloc_rebind<_Alloc, _Value>>::pointer,
+#endif
+			 _Traits::__hash_cached::value>>>
     {
       static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
 	  "unordered container must have a non-const, non-volatile value_type");
@@ -194,17 +200,40 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       using __traits_type = _Traits;
       using __hash_cached = typename __traits_type::__hash_cached;
-      using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
-      using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
-
-      using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
+      using __constant_iterators = typename __traits_type::__constant_iterators;
+      using __unique_keys = typename __traits_type::__unique_keys;
 
+      using __hashtable_alloc = __detail::_Hashtable_alloc<
+	__alloc_rebind<_Alloc,
+		       __detail::_Hash_node<
+#if __cplusplus > 201703L || defined __STRICT_ANSI__
+			 typename std::allocator_traits<_Alloc>::pointer,
+#else
+			 typename std::allocator_traits<
+			   __alloc_rebind<_Alloc, _Value>>::pointer,
+#endif
+			 __hash_cached::value>>>;
+
+      using __node_value_type =
+	__detail::_Hash_node_value<_Value, __hash_cached::value>;
+      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_pointer = typename __hashtable_alloc::__node_pointer;
+      using __node_alloc_type =
+	typename __hashtable_alloc::__node_alloc_type;
       using __value_alloc_traits =
 	typename __hashtable_alloc::__value_alloc_traits;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
       using __node_base = typename __hashtable_alloc::__node_base;
-      using __bucket_type = typename __hashtable_alloc::__bucket_type;
+      using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
+      using __buckets_pointer = typename __hashtable_alloc::__buckets_pointer;
+      using __bucket_ptr_traits = std::pointer_traits<__buckets_pointer>;
+      using __node_base_ptr_traits = std::pointer_traits<__node_base_ptr>;
+
+      using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
+					      _Equal, _Hash,
+					      _RangeHash, _Unused,
+					      _RehashPolicy, _Traits>;
 
     public:
       typedef _Key						key_type;
@@ -219,20 +248,32 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       typedef value_type&					reference;
       typedef const value_type&					const_reference;
 
+      using iterator = typename __insert_base::iterator;
+
+      using const_iterator = typename __insert_base::const_iterator;
+
+      using local_iterator = __detail::_Local_iterator<key_type, __node_pointer,
+			_ExtractKey, _Hash, _RangeHash, _Unused,
+					     __constant_iterators::value,
+					     __hash_cached::value>;
+
+      using const_local_iterator = __detail::_Local_const_iterator<
+			key_type, __node_pointer,
+			_ExtractKey, _Hash, _RangeHash, _Unused,
+			__constant_iterators::value, __hash_cached::value>;
+
     private:
       using __rehash_type = _RehashPolicy;
       using __rehash_state = typename __rehash_type::_State;
 
-      using __constant_iterators = typename __traits_type::__constant_iterators;
-      using __unique_keys = typename __traits_type::__unique_keys;
-
       using __hashtable_base = __detail::
 	_Hashtable_base<_Key, _Value, _ExtractKey,
 			_Equal, _Hash, _RangeHash, _Unused, _Traits>;
 
       using __hash_code_base =  typename __hashtable_base::__hash_code_base;
       using __hash_code =  typename __hashtable_base::__hash_code;
-      using __ireturn_type = typename __hashtable_base::__ireturn_type;
+
+      using __ireturn_type = typename __insert_base::__ireturn_type;
 
       using __map_base = __detail::_Map_base<_Key, _Value, _Alloc, _ExtractKey,
 					     _Equal, _Hash, _RangeHash, _Unused,
@@ -256,7 +297,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       struct _Scoped_node
       {
 	// Take ownership of a node with a constructed element.
-	_Scoped_node(__node_type* __n, __hashtable_alloc* __h)
+	_Scoped_node(__node_pointer __n, __hashtable_alloc* __h)
 	: _M_h(__h), _M_node(__n) { }
 
 	// Allocate a node and construct an element within it.
@@ -273,7 +314,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_Scoped_node& operator=(const _Scoped_node&) = delete;
 
 	__hashtable_alloc* _M_h;
-	__node_type* _M_node;
+	__node_pointer _M_node;
       };
 
       template<typename _Ht>
@@ -293,8 +334,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Getting a bucket index from a node shall not throw because it is used
       // in methods (erase, swap...) that shall not throw.
       static_assert(noexcept(declval<const __hash_code_base_access&>()
-			     ._M_bucket_index((const __node_type*)nullptr,
-					      (std::size_t)0)),
+			._M_bucket_index(declval<const __node_value_type&>(),
+					 (std::size_t)0)),
 		    "Cache the hash code or qualify your functors involved"
 		    " in hash code and bucket index computation with noexcept");
 
@@ -345,20 +386,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       using size_type = typename __hashtable_base::size_type;
       using difference_type = typename __hashtable_base::difference_type;
 
-      using iterator = typename __hashtable_base::iterator;
-      using const_iterator = typename __hashtable_base::const_iterator;
-
-      using local_iterator = typename __hashtable_base::local_iterator;
-      using const_local_iterator = typename __hashtable_base::
-				   const_local_iterator;
-
 #if __cplusplus > 201402L
       using node_type = _Node_handle<_Key, _Value, __node_alloc_type>;
       using insert_return_type = _Node_insert_return<iterator, node_type>;
 #endif
 
     private:
-      __bucket_type*		_M_buckets		= &_M_single_bucket;
+      __buckets_pointer		_M_buckets
+	= __bucket_ptr_traits::pointer_to(_M_single_bucket);
       size_type			_M_bucket_count		= 1;
       __node_base		_M_before_begin;
       size_type			_M_element_count	= 0;
@@ -370,25 +405,29 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // qualified.
       // Note that we can't leave hashtable with 0 bucket without adding
       // numerous checks in the code to avoid 0 modulus.
-      __bucket_type		_M_single_bucket	= nullptr;
+      __node_base_ptr		_M_single_bucket	= nullptr;
 
       void
       _M_update_bbegin()
       {
-	if (_M_begin())
-	  _M_buckets[_M_bucket_index(_M_begin())] = &_M_before_begin;
+	if (_M_before_begin._M_nxt)
+	  _M_buckets[_M_bucket_index(*_M_before_begin._M_nxt)] =
+	    __node_base_ptr_traits::pointer_to(_M_before_begin);
       }
 
       void
-      _M_update_bbegin(__node_type* __n)
+      _M_update_bbegin(__node_pointer __n)
       {
 	_M_before_begin._M_nxt = __n;
 	_M_update_bbegin();
       }
 
       bool
-      _M_uses_single_bucket(__bucket_type* __bkts) const
-      { return __builtin_expect(__bkts == &_M_single_bucket, false); }
+      _M_uses_single_bucket(__buckets_pointer __bkts) const
+      {
+	return __builtin_expect(std::__to_address(__bkts) == &_M_single_bucket,
+				false);
+      }
 
       bool
       _M_uses_single_bucket() const
@@ -397,20 +436,20 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __hashtable_alloc&
       _M_base_alloc() { return *this; }
 
-      __bucket_type*
+      __buckets_pointer
       _M_allocate_buckets(size_type __bkt_count)
       {
 	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
-	    return &_M_single_bucket;
+	    return __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	  }
 
 	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
       }
 
       void
-      _M_deallocate_buckets(__bucket_type* __bkts, size_type __bkt_count)
+      _M_deallocate_buckets(__buckets_pointer __bkts, size_type __bkt_count)
       {
 	if (_M_uses_single_bucket(__bkts))
 	  return;
@@ -424,13 +463,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Gets bucket begin, deals with the fact that non-empty buckets contain
       // their before begin node.
-      __node_type*
+      __node_pointer
       _M_bucket_begin(size_type __bkt) const;
 
-      __node_type*
-      _M_begin() const
-      { return static_cast<__node_type*>(_M_before_begin._M_nxt); }
-
       // Assign *this using another _Hashtable instance. Whether elements
       // are copied or moved depends on the _Ht reference.
       template<typename _Ht>
@@ -552,7 +587,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(_M_before_begin._M_nxt,
+					   *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 
@@ -577,11 +613,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Basic container operations
       iterator
       begin() noexcept
-      { return iterator(_M_begin()); }
+      { return iterator(_M_before_begin._M_nxt); }
 
       const_iterator
       begin() const noexcept
-      { return const_iterator(_M_begin()); }
+      { return const_iterator(_M_before_begin._M_nxt); }
 
       iterator
       end() noexcept
@@ -593,7 +629,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       const_iterator
       cbegin() const noexcept
-      { return const_iterator(_M_begin()); }
+      { return const_iterator(_M_before_begin._M_nxt); }
 
       const_iterator
       cend() const noexcept
@@ -642,36 +678,45 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       local_iterator
       begin(size_type __bkt)
       {
-	return local_iterator(*this, _M_bucket_begin(__bkt),
+	return local_iterator(this->hash_function(), _M_bucket_begin(__bkt),
 			      __bkt, _M_bucket_count);
       }
 
       local_iterator
       end(size_type __bkt)
-      { return local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
+      {
+	return local_iterator(this->hash_function(), nullptr,
+			      __bkt, _M_bucket_count);
+      }
 
       const_local_iterator
       begin(size_type __bkt) const
       {
-	return const_local_iterator(*this, _M_bucket_begin(__bkt),
+	return const_local_iterator(this->hash_function(), _M_bucket_begin(__bkt),
 				    __bkt, _M_bucket_count);
       }
 
       const_local_iterator
       end(size_type __bkt) const
-      { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
+      {
+	return const_local_iterator(this->hash_function(), nullptr,
+				    __bkt, _M_bucket_count);
+      }
 
       // DR 691.
       const_local_iterator
       cbegin(size_type __bkt) const
       {
-	return const_local_iterator(*this, _M_bucket_begin(__bkt),
+	return const_local_iterator(this->hash_function(), _M_bucket_begin(__bkt),
 				    __bkt, _M_bucket_count);
       }
 
       const_local_iterator
       cend(size_type __bkt) const
-      { return const_local_iterator(*this, nullptr, __bkt, _M_bucket_count); }
+      {
+	return const_local_iterator(this->hash_function(), nullptr,
+				    __bkt, _M_bucket_count);
+      }
 
       float
       load_factor() const noexcept
@@ -711,7 +756,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     private:
       // Bucket index computation helpers.
       size_type
-      _M_bucket_index(__node_type* __n) const noexcept
+      _M_bucket_index(const __node_value_type& __n) const noexcept
       { return __hash_code_base::_M_bucket_index(__n, _M_bucket_count); }
 
       size_type
@@ -720,44 +765,44 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find and insert helper functions and types
       // Find the node before the one matching the criteria.
-      __node_base*
+      __node_base_ptr
       _M_find_before_node(size_type, const key_type&, __hash_code) const;
 
-      __node_type*
+      __node_pointer
       _M_find_node(size_type __bkt, const key_type& __key,
 		   __hash_code __c) const
       {
-	__node_base* __before_n = _M_find_before_node(__bkt, __key, __c);
+	__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
 	if (__before_n)
-	  return static_cast<__node_type*>(__before_n->_M_nxt);
+	  return __before_n->_M_nxt;
 	return nullptr;
       }
 
       // Insert a node at the beginning of a bucket.
       void
-      _M_insert_bucket_begin(size_type, __node_type*);
+      _M_insert_bucket_begin(size_type, __node_pointer);
 
       // Remove the bucket first node
       void
-      _M_remove_bucket_begin(size_type __bkt, __node_type* __next_n,
+      _M_remove_bucket_begin(size_type __bkt, __node_pointer __next_n,
 			     size_type __next_bkt);
 
       // Get the node before __n in the bucket __bkt
-      __node_base*
-      _M_get_previous_node(size_type __bkt, __node_base* __n);
+      __node_base_ptr
+      _M_get_previous_node(size_type __bkt, __node_pointer __n);
 
       // Insert node __n with hash code __code, in bucket __bkt if no
       // rehash (assumes no element with same key already present).
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
       _M_insert_unique_node(size_type __bkt, __hash_code,
-			    __node_type* __n, size_type __n_elt = 1);
+			    __node_pointer __n, size_type __n_elt = 1);
 
       // Insert node __n with key __k and hash code __code.
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
-      _M_insert_multi_node(__node_type* __hint,
-			   __hash_code __code, __node_type* __n);
+      _M_insert_multi_node(__node_pointer __hint,
+			   __hash_code __code, __node_pointer __n);
 
       template<typename... _Args>
 	std::pair<iterator, bool>
@@ -814,7 +859,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_erase(false_type __uks, const key_type&);
 
       iterator
-      _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n);
+      _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_pointer __n);
 
     public:
       // Emplace
@@ -874,7 +919,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    const key_type& __k = __nh._M_key();
 	    __hash_code __code = this->_M_hash_code(__k);
 	    size_type __bkt = _M_bucket_index(__code);
-	    if (__node_type* __n = _M_find_node(__bkt, __k, __code))
+	    if (__node_pointer __n = _M_find_node(__bkt, __k, __code))
 	      {
 		__ret.node = std::move(__nh);
 		__ret.position = iterator(__n);
@@ -910,15 +955,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       node_type
-      _M_extract_node(size_t __bkt, __node_base* __prev_n)
+      _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
       {
-	__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
+	__node_pointer __n = __prev_n->_M_nxt;
 	if (__prev_n == _M_buckets[__bkt])
-	  _M_remove_bucket_begin(__bkt, __n->_M_next(),
-	     __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
+	  _M_remove_bucket_begin(__bkt, __n->_M_nxt,
+	     __n->_M_nxt ? _M_bucket_index(*__n->_M_nxt) : 0);
 	else if (__n->_M_nxt)
 	  {
-	    size_type __next_bkt = _M_bucket_index(__n->_M_next());
+	    size_type __next_bkt = _M_bucket_index(*__n->_M_nxt);
 	    if (__next_bkt != __bkt)
 	      _M_buckets[__next_bkt] = __prev_n;
 	  }
@@ -934,7 +979,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       node_type
       extract(const_iterator __pos)
       {
-	size_t __bkt = _M_bucket_index(__pos._M_cur);
+	size_t __bkt = _M_bucket_index(*__pos._M_cur);
 	return _M_extract_node(__bkt,
 			       _M_get_previous_node(__bkt, __pos._M_cur));
       }
@@ -946,7 +991,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	node_type __nh;
 	__hash_code __code = this->_M_hash_code(__k);
 	std::size_t __bkt = _M_bucket_index(__code);
-	if (__node_base* __prev_node = _M_find_before_node(__bkt, __k, __code))
+	if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
 	  __nh = _M_extract_node(__bkt, __prev_node);
 	return __nh;
       }
@@ -1016,10 +1061,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_bucket_begin(size_type __bkt) const
-    -> __node_type*
+    -> __node_pointer
     {
-      __node_base* __n = _M_buckets[__bkt];
-      return __n ? static_cast<__node_type*>(__n->_M_nxt) : nullptr;
+      __node_base_ptr __n = _M_buckets[__bkt];
+      return __n ? __n->_M_nxt : nullptr;
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1107,7 +1152,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      && __this_alloc != __that_alloc)
 	    {
 	      // Replacement allocator cannot free existing storage.
-	      this->_M_deallocate_nodes(_M_begin());
+	      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
 	      _M_before_begin._M_nxt = nullptr;
 	      _M_deallocate_buckets();
 	      _M_buckets = nullptr;
@@ -1148,7 +1193,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
       _M_assign_elements(_Ht&& __ht)
       {
-	__bucket_type* __former_buckets = nullptr;
+	__buckets_pointer __former_buckets = nullptr;
 	std::size_t __former_bucket_count = _M_bucket_count;
 	const __rehash_state& __former_state = _M_rehash_policy._M_state();
 
@@ -1159,15 +1204,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    _M_bucket_count = __ht._M_bucket_count;
 	  }
 	else
-	  __builtin_memset(_M_buckets, 0,
-			   _M_bucket_count * sizeof(__bucket_type));
+	  std::fill_n(_M_buckets, _M_bucket_count, nullptr);
 
 	__try
 	  {
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t __roan(_M_before_begin._M_nxt, *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(std::forward<_Ht>(__ht), __roan);
 	    if (__former_buckets)
@@ -1183,8 +1227,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		_M_buckets = __former_buckets;
 		_M_bucket_count = __former_bucket_count;
 	      }
-	    __builtin_memset(_M_buckets, 0,
-			     _M_bucket_count * sizeof(__bucket_type));
+	    std::fill_n(_M_buckets, _M_bucket_count, nullptr);
 	    __throw_exception_again;
 	  }
       }
@@ -1199,7 +1242,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
       _M_assign(_Ht&& __ht, const _NodeGenerator& __node_gen)
       {
-	__bucket_type* __buckets = nullptr;
+	__buckets_pointer __buckets = nullptr;
 	if (!_M_buckets)
 	  _M_buckets = __buckets = _M_allocate_buckets(_M_bucket_count);
 
@@ -1210,20 +1253,20 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	    // First deal with the special first node pointed to by
 	    // _M_before_begin.
-	    __node_type* __ht_n = __ht._M_begin();
-	    __node_type* __this_n
+	    __node_pointer __ht_n = __ht._M_before_begin._M_nxt;
+	    __node_pointer __this_n
 	      = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
-	    this->_M_copy_code(__this_n, __ht_n);
+	    this->_M_copy_code(*__this_n, *__ht_n);
 	    _M_update_bbegin(__this_n);
 
 	    // Then deal with other nodes.
-	    __node_base* __prev_n = __this_n;
-	    for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
+	    __node_pointer __prev_n = __this_n;
+	    for (__ht_n = __ht_n->_M_nxt; __ht_n; __ht_n = __ht_n->_M_nxt)
 	      {
 		__this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
 		__prev_n->_M_nxt = __this_n;
-		this->_M_copy_code(__this_n, __ht_n);
-		size_type __bkt = _M_bucket_index(__this_n);
+		this->_M_copy_code(*__this_n, *__ht_n);
+		size_type __bkt = _M_bucket_index(*__this_n);
 		if (!_M_buckets[__bkt])
 		  _M_buckets[__bkt] = __prev_n;
 		__prev_n = __this_n;
@@ -1250,7 +1293,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_rehash_policy._M_reset();
       _M_bucket_count = 1;
       _M_single_bucket = nullptr;
-      _M_buckets = &_M_single_bucket;
+      _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
       _M_before_begin._M_nxt = nullptr;
       _M_element_count = 0;
     }
@@ -1267,7 +1310,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__builtin_expect(std::__addressof(__ht) == this, false))
 	return;
 
-      this->_M_deallocate_nodes(_M_begin());
+      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
       _M_deallocate_buckets();
       __hashtable_base::operator=(std::move(__ht));
       _M_rehash_policy = __ht._M_rehash_policy;
@@ -1275,7 +1318,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_buckets = __ht._M_buckets;
       else
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
@@ -1350,9 +1393,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_rehash_policy(__ht._M_rehash_policy)
     {
       // Update buckets if __ht is using its single bucket.
-      if (__ht._M_uses_single_bucket())
+      if (std::__to_address(_M_buckets) == &__ht._M_single_bucket)
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
@@ -1373,7 +1416,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       __map_base(__ht),
       __rehash_base(__ht),
       __hashtable_alloc(__node_alloc_type(__a)),
-      _M_buckets(),
+      _M_buckets(nullptr),
       _M_bucket_count(__ht._M_bucket_count),
       _M_element_count(__ht._M_element_count),
       _M_rehash_policy(__ht._M_rehash_policy)
@@ -1403,7 +1446,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	{
 	  if (__ht._M_uses_single_bucket())
 	    {
-	      _M_buckets = &_M_single_bucket;
+	      _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	      _M_single_bucket = __ht._M_single_bucket;
 	    }
 	  else
@@ -1411,7 +1454,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	  // Fix bucket containing the _M_before_begin pointer that can't be
 	  // moved.
-	  _M_update_bbegin(__ht._M_begin());
+	  _M_update_bbegin(__ht._M_before_begin._M_nxt);
 
 	  __ht._M_reset();
 	}
@@ -1464,13 +1507,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (!__x._M_uses_single_bucket())
 	    {
 	      _M_buckets = __x._M_buckets;
-	      __x._M_buckets = &__x._M_single_bucket;
+	      __x._M_buckets =
+		__bucket_ptr_traits::pointer_to(__x._M_single_bucket);
 	    }
 	}
       else if (__x._M_uses_single_bucket())
 	{
 	  __x._M_buckets = _M_buckets;
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __bucket_ptr_traits::pointer_to(_M_single_bucket);
 	}	
       else
 	std::swap(_M_buckets, __x._M_buckets);
@@ -1538,7 +1582,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // find any new equivalent value.
       size_type __result = 1;
       for (auto __ref = __it++;
-	   __it._M_cur && this->_M_node_equals(__ref._M_cur, __it._M_cur);
+	   __it._M_cur && this->_M_node_equals(*__ref._M_cur, *__it._M_cur);
 	   ++__it)
 	++__result;
 
@@ -1566,7 +1610,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // All equivalent values are next to each other, if we find a
       // non-equivalent value after an equivalent one it means that we won't
       // find any new equivalent value.
-      while (__ite._M_cur && this->_M_node_equals(__beg._M_cur, __ite._M_cur))
+      while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
 	++__ite;
 
       return { __beg, __ite };
@@ -1593,7 +1637,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // All equivalent values are next to each other, if we find a
       // non-equivalent value after an equivalent one it means that we won't
       // find any new equivalent value.
-      while (__ite._M_cur && this->_M_node_equals(__beg._M_cur, __ite._M_cur))
+      while (__ite._M_cur && this->_M_node_equals(*__beg._M_cur, *__ite._M_cur))
 	++__ite;
 
       return { __beg, __ite };
@@ -1610,19 +1654,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
-    -> __node_base*
+    -> __node_base_ptr
     {
-      __node_base* __prev_p = _M_buckets[__bkt];
+      __node_base_ptr __prev_p = _M_buckets[__bkt];
       if (!__prev_p)
 	return nullptr;
 
-      for (__node_type* __p = static_cast<__node_type*>(__prev_p->_M_nxt);;
-	   __p = __p->_M_next())
+      for (__node_pointer __p = __prev_p->_M_nxt;; __p = __p->_M_nxt)
 	{
-	  if (this->_M_equals(__k, __code, __p))
+	  if (this->_M_equals(__k, __code, *__p))
 	    return __prev_p;
 
-	  if (!__p->_M_nxt || _M_bucket_index(__p->_M_next()) != __bkt)
+	  if (!__p->_M_nxt || _M_bucket_index(*__p->_M_nxt) != __bkt)
 	    break;
 	  __prev_p = __p;
 	}
@@ -1637,7 +1680,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_insert_bucket_begin(size_type __bkt, __node_type* __node)
+    _M_insert_bucket_begin(size_type __bkt, __node_pointer __node)
     {
       if (_M_buckets[__bkt])
 	{
@@ -1657,9 +1700,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (__node->_M_nxt)
 	    // We must update former begin bucket that is pointing to
 	    // _M_before_begin.
-	    _M_buckets[_M_bucket_index(__node->_M_next())] = __node;
-
-	  _M_buckets[__bkt] = &_M_before_begin;
+	    _M_buckets[_M_bucket_index(*__node->_M_nxt)] = __node;
+	  _M_buckets[__bkt] =
+	    __node_base_ptr_traits::pointer_to(_M_before_begin);
 	}
     }
 
@@ -1670,7 +1713,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     void
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_remove_bucket_begin(size_type __bkt, __node_type* __next,
+    _M_remove_bucket_begin(size_type __bkt, __node_pointer __next,
 			   size_type __next_bkt)
     {
       if (!__next || __next_bkt != __bkt)
@@ -1681,7 +1724,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    _M_buckets[__next_bkt] = _M_buckets[__bkt];
 
 	  // Second update before begin node if necessary
-	  if (&_M_before_begin == _M_buckets[__bkt])
+	  if (&_M_before_begin == std::__to_address(_M_buckets[__bkt]))
 	    _M_before_begin._M_nxt = __next;
 	  _M_buckets[__bkt] = nullptr;
 	}
@@ -1694,10 +1737,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_get_previous_node(size_type __bkt, __node_base* __n)
-    -> __node_base*
+    _M_get_previous_node(size_type __bkt, __node_pointer __n)
+    -> __node_base_ptr
     {
-      __node_base* __prev_n = _M_buckets[__bkt];
+      __node_base_ptr __prev_n = _M_buckets[__bkt];
       while (__prev_n->_M_nxt != __n)
 	__prev_n = __prev_n->_M_nxt;
       return __prev_n;
@@ -1719,7 +1762,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__code);
-	if (__node_type* __p = _M_find_node(__bkt, __k, __code))
+	if (__node_pointer __p = _M_find_node(__bkt, __k, __code))
 	  // There is already an equivalent node, no insertion
 	  return std::make_pair(iterator(__p), false);
 
@@ -1760,7 +1803,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_insert_unique_node(size_type __bkt, __hash_code __code,
-			  __node_type* __node, size_type __n_elt)
+			  __node_pointer __node, size_type __n_elt)
     -> iterator
     {
       const __rehash_state& __saved_state = _M_rehash_policy._M_state();
@@ -1774,7 +1817,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  __bkt = _M_bucket_index(__code);
 	}
 
-      this->_M_store_code(__node, __code);
+      this->_M_store_code(*__node, __code);
 
       // Always insert at the beginning of the bucket.
       _M_insert_bucket_begin(__bkt, __node);
@@ -1789,8 +1832,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_insert_multi_node(__node_type* __hint,
-			 __hash_code __code, __node_type* __node)
+    _M_insert_multi_node(__node_pointer __hint,
+			 __hash_code __code, __node_pointer __node)
     -> iterator
     {
       const __rehash_state& __saved_state = _M_rehash_policy._M_state();
@@ -1800,17 +1843,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__do_rehash.first)
 	_M_rehash(__do_rehash.second, __saved_state);
 
-      this->_M_store_code(__node, __code);
+      this->_M_store_code(*__node, __code);
       const key_type& __k = _ExtractKey{}(__node->_M_v());
       size_type __bkt = _M_bucket_index(__code);
 
       // Find the node before an equivalent one or use hint if it exists and
       // if it is equivalent.
-      __node_base* __prev
-	= __builtin_expect(__hint != nullptr, false)
-	  && this->_M_equals(__k, __code, __hint)
-	    ? __hint
-	    : _M_find_before_node(__bkt, __k, __code);
+      __node_base_ptr __prev;
+      if (__builtin_expect(__hint != nullptr, false)
+	  && this->_M_equals(__k, __code, *__hint))
+	__prev = __hint;
+      else
+	__prev = _M_find_before_node(__bkt, __k, __code);
+
       if (__prev)
 	{
 	  // Insert after the node before the equivalent one.
@@ -1820,9 +1865,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    // hint might be the last bucket node, in this case we need to
 	    // update next bucket.
 	    if (__node->_M_nxt
-		&& !this->_M_equals(__k, __code, __node->_M_next()))
+		&& !this->_M_equals(__k, __code, *__node->_M_nxt))
 	      {
-		size_type __next_bkt = _M_bucket_index(__node->_M_next());
+		size_type __next_bkt = _M_bucket_index(*__node->_M_nxt);
 		if (__next_bkt != __bkt)
 		  _M_buckets[__next_bkt] = __node;
 	      }
@@ -1853,7 +1898,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__code);
 
-	if (__node_type* __node = _M_find_node(__bkt, __k, __code))
+	if (__node_pointer __node = _M_find_node(__bkt, __k, __code))
 	  return { iterator(__node), false };
 
 	_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
@@ -1899,14 +1944,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __it)
     -> iterator
     {
-      __node_type* __n = __it._M_cur;
-      std::size_t __bkt = _M_bucket_index(__n);
+      std::size_t __bkt = _M_bucket_index(*__it._M_cur);
 
       // Look for previous node to unlink it from the erased one, this
       // is why we need buckets to contain the before begin to make
       // this search fast.
-      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
-      return _M_erase(__bkt, __prev_n, __n);
+      __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __it._M_cur);
+      return _M_erase(__bkt, __prev_n, __it._M_cur);
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1916,21 +1960,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_erase(size_type __bkt, __node_base* __prev_n, __node_type* __n)
+    _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_pointer __n)
     -> iterator
     {
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n->_M_next(),
-	   __n->_M_nxt ? _M_bucket_index(__n->_M_next()) : 0);
+	_M_remove_bucket_begin(__bkt, __n->_M_nxt,
+	   __n->_M_nxt ? _M_bucket_index(*__n->_M_nxt) : 0);
       else if (__n->_M_nxt)
 	{
-	  size_type __next_bkt = _M_bucket_index(__n->_M_next());
+	  size_type __next_bkt = _M_bucket_index(*__n->_M_nxt);
 	  if (__next_bkt != __bkt)
 	    _M_buckets[__next_bkt] = __prev_n;
 	}
 
       __prev_n->_M_nxt = __n->_M_nxt;
-      iterator __result(__n->_M_next());
+      iterator __result(__n->_M_nxt);
       this->_M_deallocate_node(__n);
       --_M_element_count;
 
@@ -1951,13 +1995,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__code);
 
       // Look for the node before the first matching node.
-      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
       // We found a matching node, erase it.
-      __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
-      _M_erase(__bkt, __prev_n, __n);
+      _M_erase(__bkt, __prev_n, __prev_n->_M_nxt);
       return 1;
     }
 
@@ -1975,7 +2018,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__code);
 
       // Look for the node before the first matching node.
-      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
@@ -1985,18 +2028,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // We use one loop to find all matching nodes and another to deallocate
       // them so that the key stays valid during the first loop. It might be
       // invalidated indirectly when destroying nodes.
-      __node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);
-      __node_type* __n_last = __n->_M_next();
-      while (__n_last && this->_M_node_equals(__n, __n_last))
-	__n_last = __n_last->_M_next();
+      __node_pointer __n = __prev_n->_M_nxt;
+      __node_pointer __n_last = __n->_M_nxt;
+      while (__n_last && this->_M_node_equals(*__n, *__n_last))
+	__n_last = __n_last->_M_nxt;
 
-      std::size_t __n_last_bkt = __n_last ? _M_bucket_index(__n_last) : __bkt;
+      std::size_t __n_last_bkt = __n_last ? _M_bucket_index(*__n_last) : __bkt;
 
       // Deallocate nodes.
       size_type __result = 0;
       do
 	{
-	  __node_type* __p = __n->_M_next();
+	  __node_pointer __p = __n->_M_nxt;
 	  this->_M_deallocate_node(__n);
 	  __n = __p;
 	  ++__result;
@@ -2022,27 +2065,27 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __first, const_iterator __last)
     -> iterator
     {
-      __node_type* __n = __first._M_cur;
-      __node_type* __last_n = __last._M_cur;
+      __node_pointer __n = __first._M_cur;
+      __node_pointer __last_n = __last._M_cur;
       if (__n == __last_n)
 	return iterator(__n);
 
-      std::size_t __bkt = _M_bucket_index(__n);
+      std::size_t __bkt = _M_bucket_index(*__n);
 
-      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
       bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
       std::size_t __n_bkt = __bkt;
       for (;;)
 	{
 	  do
 	    {
-	      __node_type* __tmp = __n;
-	      __n = __n->_M_next();
+	      __node_pointer __tmp = __n;
+	      __n = __n->_M_nxt;
 	      this->_M_deallocate_node(__tmp);
 	      --_M_element_count;
 	      if (!__n)
 		break;
-	      __n_bkt = _M_bucket_index(__n);
+	      __n_bkt = _M_bucket_index(*__n);
 	    }
 	  while (__n != __last_n && __n_bkt == __bkt);
 	  if (__is_bucket_begin)
@@ -2068,8 +2111,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     clear() noexcept
     {
-      this->_M_deallocate_nodes(_M_begin());
-      __builtin_memset(_M_buckets, 0, _M_bucket_count * sizeof(__bucket_type));
+      this->_M_deallocate_nodes(_M_before_begin._M_nxt);
+      std::fill_n(_M_buckets, _M_bucket_count, nullptr);
       _M_element_count = 0;
       _M_before_begin._M_nxt = nullptr;
     }
@@ -2129,20 +2172,22 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_type* __p = _M_begin();
+      __buckets_pointer __new_buckets = _M_allocate_buckets(__bkt_count);
+      auto __before_begin_ptr =
+	__node_base_ptr_traits::pointer_to(_M_before_begin);
+      __node_pointer __p = _M_before_begin._M_nxt;
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       while (__p)
 	{
-	  __node_type* __next = __p->_M_next();
+	  __node_pointer __next = __p->_M_nxt;
 	  std::size_t __bkt
-	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
+	    = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
 	  if (!__new_buckets[__bkt])
 	    {
 	      __p->_M_nxt = _M_before_begin._M_nxt;
 	      _M_before_begin._M_nxt = __p;
-	      __new_buckets[__bkt] = &_M_before_begin;
+	      __new_buckets[__bkt] = __before_begin_ptr;
 	      if (__p->_M_nxt)
 		__new_buckets[__bbegin_bkt] = __p;
 	      __bbegin_bkt = __bkt;
@@ -2172,20 +2217,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
     {
-      __bucket_type* __new_buckets = _M_allocate_buckets(__bkt_count);
-
-      __node_type* __p = _M_begin();
+      auto __new_buckets = _M_allocate_buckets(__bkt_count);
+      auto __before_begin_ptr =
+	__node_base_ptr_traits::pointer_to(_M_before_begin);
+      __node_pointer __p = _M_before_begin._M_nxt;
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       std::size_t __prev_bkt = 0;
-      __node_type* __prev_p = nullptr;
+      __node_pointer __prev_p{};
       bool __check_bucket = false;
 
       while (__p)
 	{
-	  __node_type* __next = __p->_M_next();
+	  __node_pointer __next = __p->_M_nxt;
 	  std::size_t __bkt
-	    = __hash_code_base::_M_bucket_index(__p, __bkt_count);
+	    = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
 
 	  if (__prev_p && __prev_bkt == __bkt)
 	    {
@@ -2211,7 +2257,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		  if (__prev_p->_M_nxt)
 		    {
 		      std::size_t __next_bkt
-			= __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+			= __hash_code_base::_M_bucket_index(*__prev_p->_M_nxt,
 							    __bkt_count);
 		      if (__next_bkt != __prev_bkt)
 			__new_buckets[__next_bkt] = __prev_p;
@@ -2223,7 +2269,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		{
 		  __p->_M_nxt = _M_before_begin._M_nxt;
 		  _M_before_begin._M_nxt = __p;
-		  __new_buckets[__bkt] = &_M_before_begin;
+		  __new_buckets[__bkt] = __before_begin_ptr;
 		  if (__p->_M_nxt)
 		    __new_buckets[__bbegin_bkt] = __p;
 		  __bbegin_bkt = __bkt;
@@ -2242,7 +2288,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__check_bucket && __prev_p->_M_nxt)
 	{
 	  std::size_t __next_bkt
-	    = __hash_code_base::_M_bucket_index(__prev_p->_M_next(),
+	    = __hash_code_base::_M_bucket_index(*__prev_p->_M_nxt,
 						__bkt_count);
 	  if (__next_bkt != __prev_bkt)
 	    __new_buckets[__next_bkt] = __prev_p;
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index 0109ef86a7b..3f5faa7dba8 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -107,10 +107,10 @@ namespace __detail
       using __hashtable_alloc = _Hashtable_alloc<__node_alloc_type>;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_pointer = typename __hashtable_alloc::__node_pointer;
 
     public:
-      _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h)
+      _ReuseOrAllocNode(__node_pointer __nodes, __hashtable_alloc& __h)
       : _M_nodes(__nodes), _M_h(__h) { }
       _ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete;
 
@@ -118,13 +118,13 @@ namespace __detail
       { _M_h._M_deallocate_nodes(_M_nodes); }
 
       template<typename _Arg>
-	__node_type*
+	__node_pointer
 	operator()(_Arg&& __arg) const
 	{
 	  if (_M_nodes)
 	    {
-	      __node_type* __node = _M_nodes;
-	      _M_nodes = _M_nodes->_M_next();
+	      __node_pointer __node = _M_nodes;
+	      _M_nodes = _M_nodes->_M_nxt;
 	      __node->_M_nxt = nullptr;
 	      auto& __a = _M_h._M_node_allocator();
 	      __node_alloc_traits::destroy(__a, __node->_M_valptr());
@@ -144,7 +144,7 @@ namespace __detail
 	}
 
     private:
-      mutable __node_type* _M_nodes;
+      mutable __node_pointer _M_nodes;
       __hashtable_alloc& _M_h;
     };
 
@@ -155,14 +155,14 @@ namespace __detail
     {
     private:
       using __hashtable_alloc = _Hashtable_alloc<_NodeAlloc>;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_pointer = typename __hashtable_alloc::__node_pointer;
 
     public:
       _AllocNode(__hashtable_alloc& __h)
       : _M_h(__h) { }
 
       template<typename _Arg>
-	__node_type*
+	__node_pointer
 	operator()(_Arg&& __arg) const
 	{ return _M_h._M_allocate_node(std::forward<_Arg>(__arg)); }
 
@@ -211,14 +211,17 @@ namespace __detail
    *  nodes also store a hash code. In some cases (e.g. strings) this
    *  may be a performance win.
    */
-  struct _Hash_node_base
-  {
-    _Hash_node_base* _M_nxt;
+  template<typename _NodePtr>
+    struct _Hash_node_base
+    {
+      using __node_pointer = _NodePtr;
 
-    _Hash_node_base() noexcept : _M_nxt() { }
+      __node_pointer _M_nxt;
 
-    _Hash_node_base(_Hash_node_base* __next) noexcept : _M_nxt(__next) { }
-  };
+      _Hash_node_base() noexcept : _M_nxt() { }
+
+      _Hash_node_base(__node_pointer __next) noexcept : _M_nxt(__next) { }
+    };
 
   /**
    *  struct _Hash_node_value_base
@@ -226,7 +229,7 @@ namespace __detail
    *  Node type with the value to store.
    */
   template<typename _Value>
-    struct _Hash_node_value_base : _Hash_node_base
+    struct _Hash_node_value_base
     {
       typedef _Value value_type;
 
@@ -250,54 +253,49 @@ namespace __detail
     };
 
   /**
-   *  Primary template struct _Hash_node.
+   *  Primary template struct _Hash_node_code_cache.
    */
-  template<typename _Value, bool _Cache_hash_code>
-    struct _Hash_node;
+  template<bool _Cache_hash_code>
+    struct _Hash_node_code_cache
+    { };
 
   /**
-   *  Specialization for nodes with caches, struct _Hash_node.
-   *
-   *  Base class is __detail::_Hash_node_value_base.
+   *  Specialization for node with cache, struct _Hash_node_code_cache.
    */
-  template<typename _Value>
-    struct _Hash_node<_Value, true> : _Hash_node_value_base<_Value>
-    {
-      std::size_t  _M_hash_code;
+  template<>
+    struct _Hash_node_code_cache<true>
+    { std::size_t  _M_hash_code; };
 
-      _Hash_node*
-      _M_next() const noexcept
-      { return static_cast<_Hash_node*>(this->_M_nxt); }
-    };
+  template<typename _Value, bool _Cache_hash_code>
+    struct _Hash_node_value
+    : _Hash_node_value_base<_Value>
+    , _Hash_node_code_cache<_Cache_hash_code>
+    { };
 
   /**
-   *  Specialization for nodes without caches, struct _Hash_node.
-   *
-   *  Base class is __detail::_Hash_node_value_base.
+   *  Primary template struct _Hash_node.
    */
-  template<typename _Value>
-    struct _Hash_node<_Value, false> : _Hash_node_value_base<_Value>
-    {
-      _Hash_node*
-      _M_next() const noexcept
-      { return static_cast<_Hash_node*>(this->_M_nxt); }
-    };
+  template<typename _Ptr, bool _Cache_hash_code>
+    struct _Hash_node
+    : _Hash_node_base<__ptr_rebind<_Ptr,
+				   _Hash_node<_Ptr, _Cache_hash_code>>>
+    , _Hash_node_value<typename std::pointer_traits<_Ptr>::element_type,
+		       _Cache_hash_code>
+    { };
 
   /// Base class for node iterators.
-  template<typename _Value, bool _Cache_hash_code>
+  template<typename _NodePtr>
     struct _Node_iterator_base
     {
-      using __node_type = _Hash_node<_Value, _Cache_hash_code>;
-
-      __node_type*  _M_cur;
+      _NodePtr _M_cur;
 
       _Node_iterator_base() = default;
-      _Node_iterator_base(__node_type* __p) noexcept
+      _Node_iterator_base(_NodePtr __p) noexcept
       : _M_cur(__p) { }
 
       void
       _M_incr() noexcept
-      { _M_cur = _M_cur->_M_next(); }
+      { _M_cur = _M_cur->_M_nxt; }
 
       friend bool
       operator==(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
@@ -313,30 +311,31 @@ namespace __detail
     };
 
   /// Node iterators, used to iterate through all the hashtable.
-  template<typename _Value, bool __constant_iterators, bool __cache>
+  template<typename _NodePtr, bool __constant_iterators>
     struct _Node_iterator
-    : public _Node_iterator_base<_Value, __cache>
+    : public _Node_iterator_base<_NodePtr>
     {
     private:
-      using __base_type = _Node_iterator_base<_Value, __cache>;
-      using __node_type = typename __base_type::__node_type;
+      using __base_type = _Node_iterator_base<_NodePtr>;
+      using __node_type = typename std::pointer_traits<_NodePtr>::element_type;
 
     public:
-      typedef _Value					value_type;
-      typedef std::ptrdiff_t				difference_type;
+      typedef typename __node_type::value_type		value_type;
+      typedef typename std::pointer_traits<_NodePtr>::difference_type
+							difference_type;
       typedef std::forward_iterator_tag			iterator_category;
 
       using pointer = typename std::conditional<__constant_iterators,
-						const _Value*, _Value*>::type;
+				  const value_type*, value_type*>::type;
 
       using reference = typename std::conditional<__constant_iterators,
-						  const _Value&, _Value&>::type;
+				  const value_type&, value_type&>::type;
 
       _Node_iterator() noexcept
       : __base_type(nullptr) { }
 
       explicit
-      _Node_iterator(__node_type* __p) noexcept
+      _Node_iterator(_NodePtr __p) noexcept
       : __base_type(__p) { }
 
       reference
@@ -364,31 +363,32 @@ namespace __detail
     };
 
   /// Node const_iterators, used to iterate through all the hashtable.
-  template<typename _Value, bool __constant_iterators, bool __cache>
+  template<typename _NodePtr, bool __constant_iterators>
     struct _Node_const_iterator
-    : public _Node_iterator_base<_Value, __cache>
+    : public _Node_iterator_base<_NodePtr>
     {
     private:
-      using __base_type = _Node_iterator_base<_Value, __cache>;
-      using __node_type = typename __base_type::__node_type;
+      using __base_type = _Node_iterator_base<_NodePtr>;
+      using __node_type = typename std::pointer_traits<_NodePtr>::element_type;
 
     public:
-      typedef _Value					value_type;
-      typedef std::ptrdiff_t				difference_type;
+      typedef typename __node_type::value_type		value_type;
+      typedef typename std::pointer_traits<_NodePtr>::difference_type
+							difference_type;
       typedef std::forward_iterator_tag			iterator_category;
 
-      typedef const _Value*				pointer;
-      typedef const _Value&				reference;
+      typedef const value_type*				pointer;
+      typedef const value_type&				reference;
 
       _Node_const_iterator() noexcept
       : __base_type(nullptr) { }
 
       explicit
-      _Node_const_iterator(__node_type* __p) noexcept
+      _Node_const_iterator(_NodePtr __p) noexcept
       : __base_type(__p) { }
 
-      _Node_const_iterator(const _Node_iterator<_Value, __constant_iterators,
-			   __cache>& __x) noexcept
+      _Node_const_iterator(const _Node_iterator<_NodePtr,
+			   __constant_iterators>& __x) noexcept
       : __base_type(__x._M_cur) { }
 
       reference
@@ -670,11 +670,9 @@ namespace __detail
 				     _Unused, _RehashPolicy, _Traits>;
 
       using __hash_code = typename __hashtable_base::__hash_code;
-      using __node_type = typename __hashtable_base::__node_type;
 
     public:
       using key_type = typename __hashtable_base::key_type;
-      using iterator = typename __hashtable_base::iterator;
       using mapped_type = typename std::tuple_element<1, _Pair>::type;
 
       mapped_type&
@@ -704,7 +702,7 @@ namespace __detail
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__code);
-      if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
+      if (auto __node = __h->_M_find_node(__bkt, __k, __code))
 	return __node->_M_v().second;
 
       typename __hashtable::_Scoped_node __node {
@@ -731,7 +729,7 @@ namespace __detail
       __hashtable* __h = static_cast<__hashtable*>(this);
       __hash_code __code = __h->_M_hash_code(__k);
       std::size_t __bkt = __h->_M_bucket_index(__code);
-      if (__node_type* __node = __h->_M_find_node(__bkt, __k, __code))
+      if (auto __node = __h->_M_find_node(__bkt, __k, __code))
 	return __node->_M_v().second;
 
       typename __hashtable::_Scoped_node __node {
@@ -800,15 +798,25 @@ namespace __detail
 				     _Hash, _RangeHash,
 				     _Unused, _RehashPolicy, _Traits>;
 
+      using __hash_cached = typename _Traits::__hash_cached;
+      using __constant_iterators = typename _Traits::__constant_iterators;
+      using __unique_keys = typename _Traits::__unique_keys;
+
+      using __hashtable_alloc = _Hashtable_alloc<
+	__alloc_rebind<_Alloc, _Hash_node<
+#if __cplusplus > 201703L || defined __STRICT_ANSI__
+			 typename std::allocator_traits<_Alloc>::pointer,
+#else
+			 typename std::allocator_traits<
+			   __alloc_rebind<_Alloc, _Value>>::pointer,
+#endif
+				 __hash_cached::value>>>;
+
       using value_type = typename __hashtable_base::value_type;
-      using iterator = typename __hashtable_base::iterator;
-      using const_iterator =  typename __hashtable_base::const_iterator;
       using size_type = typename __hashtable_base::size_type;
 
-      using __unique_keys = typename __hashtable_base::__unique_keys;
-      using __ireturn_type = typename __hashtable_base::__ireturn_type;
-      using __node_type = _Hash_node<_Value, _Traits::__hash_cached::value>;
-      using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
+      using __node_pointer = typename __hashtable_alloc::__node_pointer;
+      using __node_alloc_type = typename __hashtable_alloc::__node_alloc_type;
       using __node_gen_type = _AllocNode<__node_alloc_type>;
 
       __hashtable&
@@ -826,6 +834,16 @@ namespace __detail
 			const _NodeGetter&, false_type __uks);
 
     public:
+      using iterator = _Node_iterator<__node_pointer,
+				      __constant_iterators::value>;
+
+      using const_iterator = _Node_const_iterator<__node_pointer,
+						  __constant_iterators::value>;
+
+      using __ireturn_type = typename std::conditional<__unique_keys::value,
+						     std::pair<iterator, bool>,
+						     iterator>::type;
+
       __ireturn_type
       insert(const value_type& __v)
       {
@@ -849,7 +867,7 @@ namespace __detail
 	  __hashtable& __h = _M_conjure_hashtable();
 	  auto __code = __h._M_hash_code(__k);
 	  std::size_t __bkt = __h._M_bucket_index(__code);
-	  if (__node_type* __node = __h._M_find_node(__bkt, __k, __code))
+	  if (__node_pointer __node = __h._M_find_node(__bkt, __k, __code))
 	    return { iterator(__node), false };
 
 	  typename __hashtable::_Scoped_node __node {
@@ -957,16 +975,12 @@ namespace __detail
 				       _Equal, _Hash, _RangeHash, _Unused,
 				       _RehashPolicy, _Traits>;
 
-      using __hashtable_base = _Hashtable_base<_Key, _Value, _ExtractKey,
-					       _Equal, _Hash, _RangeHash,
-					       _Unused, _Traits>;
-
       using value_type = typename __base_type::value_type;
       using iterator = typename __base_type::iterator;
       using const_iterator =  typename __base_type::const_iterator;
+      using __ireturn_type = typename __base_type::__ireturn_type;
 
       using __unique_keys = typename __base_type::__unique_keys;
-      using __ireturn_type = typename __hashtable_base::__ireturn_type;
       using __hashtable = typename __base_type::__hashtable;
       using __node_gen_type = typename __base_type::__node_gen_type;
 
@@ -1153,7 +1167,7 @@ namespace __detail
    *  Base class for local iterators, used to iterate within a bucket
    *  but not between buckets.
    */
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _NodePtr, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused,
 	   bool __cache_hash_code>
     struct _Local_iterator_base;
@@ -1175,30 +1189,16 @@ namespace __detail
    *  is inherited in some cases by the _Local_iterator_base type used
    *  to implement local_iterator and const_local_iterator. As with
    *  any iterator type we prefer to make it as small as possible.
-   *
-   *  Primary template is unused except as a hook for specializations.
    */
   template<typename _Key, typename _Value, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused,
 	   bool __cache_hash_code>
-    struct _Hash_code_base;
-
-  /// Specialization: hash function and range-hashing function, no
-  /// caching of hash codes.
-  /// Provides typedef and accessor required by C++ 11.
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _Hash, typename _RangeHash, typename _Unused>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _Hash, _RangeHash,
-			   _Unused, false>
+    struct _Hash_code_base
     : private _Hashtable_ebo_helper<1, _Hash>
     {
     private:
       using __ebo_hash = _Hashtable_ebo_helper<1, _Hash>;
 
-      // Gives the local iterator implementation access to _M_bucket_index().
-      friend struct _Local_iterator_base<_Key, _Value, _ExtractKey,
-					 _Hash, _RangeHash, _Unused, false>;
-
     public:
       typedef _Hash					hasher;
 
@@ -1208,7 +1208,6 @@ namespace __detail
 
     protected:
       typedef std::size_t 				__hash_code;
-      typedef _Hash_node<_Value, false>			__node_type;
 
       // We need the default constructor for the local iterators and _Hashtable
       // default constructor.
@@ -1228,83 +1227,40 @@ namespace __detail
       { return _RangeHash{}(__c, __bkt_count); }
 
       std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
+      _M_bucket_index(const _Hash_node_value<_Value, false>& __n,
+		      std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>()))
 		  && noexcept(declval<const _RangeHash&>()((__hash_code)0,
 							   (std::size_t)0)) )
       {
-	return _RangeHash{}(_M_hash()(_ExtractKey{}(__p->_M_v())),
+	return _RangeHash{}(_M_hash_code(_ExtractKey{}(__n._M_v())),
 			    __bkt_count);
       }
 
-      void
-      _M_store_code(__node_type*, __hash_code) const
-      { }
+      std::size_t
+      _M_bucket_index(const _Hash_node_value<_Value, true>& __n,
+		      std::size_t __bkt_count) const
+	noexcept( noexcept(declval<const _RangeHash&>()((__hash_code)0,
+							(std::size_t)0)) )
+      { return _RangeHash{}(__n._M_hash_code, __bkt_count); }
 
       void
-      _M_copy_code(__node_type*, const __node_type*) const
+      _M_store_code(_Hash_node_code_cache<false>&, __hash_code) const
       { }
 
       void
-      _M_swap(_Hash_code_base& __x)
-      { std::swap(__ebo_hash::_M_get(), __x.__ebo_hash::_M_get()); }
-
-      const _Hash&
-      _M_hash() const { return __ebo_hash::_M_cget(); }
-    };
-
-  /// Specialization: hash function and range-hashing function,
-  /// caching hash codes.  H is provided but ignored.  Provides
-  /// typedef and accessor required by C++ 11.
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _Hash, typename _RangeHash, typename _Unused>
-    struct _Hash_code_base<_Key, _Value, _ExtractKey, _Hash, _RangeHash,
-			   _Unused, true>
-    : private _Hashtable_ebo_helper<1, _Hash>
-    {
-    private:
-      using __ebo_hash = _Hashtable_ebo_helper<1, _Hash>;
-
-    public:
-      typedef _Hash					hasher;
-
-      hasher
-      hash_function() const
-      { return _M_hash(); }
-
-    protected:
-      typedef std::size_t 				__hash_code;
-      typedef _Hash_node<_Value, true>			__node_type;
-
-      // We need the default constructor for _Hashtable default constructor.
-      _Hash_code_base() = default;
-      _Hash_code_base(const _Hash& __hash) : __ebo_hash(__hash) { }
-
-      __hash_code
-      _M_hash_code(const _Key& __k) const
-      {
-	static_assert(__is_invocable<const _Hash&, const _Key&>{},
-	    "hash function must be invocable with an argument of key type");
-	return _M_hash()(__k);
-      }
-
-      std::size_t
-      _M_bucket_index(__hash_code __c, std::size_t __bkt_count) const
-      { return _RangeHash{}(__c, __bkt_count); }
-
-      std::size_t
-      _M_bucket_index(const __node_type* __p, std::size_t __bkt_count) const
-	noexcept( noexcept(declval<const _RangeHash&>()((__hash_code)0,
-							(std::size_t)0)) )
-      { return _RangeHash{}(__p->_M_hash_code, __bkt_count); }
+      _M_copy_code(_Hash_node_code_cache<false>&,
+		   const _Hash_node_code_cache<false>&) const
+      { }
 
       void
-      _M_store_code(__node_type* __n, __hash_code __c) const
-      { __n->_M_hash_code = __c; }
+      _M_store_code(_Hash_node_code_cache<true>& __n, __hash_code __c) const
+      { __n._M_hash_code = __c; }
 
       void
-      _M_copy_code(__node_type* __to, const __node_type* __from) const
-      { __to->_M_hash_code = __from->_M_hash_code; }
+      _M_copy_code(_Hash_node_code_cache<true>& __to,
+		   const _Hash_node_code_cache<true>& __from) const
+      { __to._M_hash_code = __from._M_hash_code; }
 
       void
       _M_swap(_Hash_code_base& __x)
@@ -1315,20 +1271,17 @@ namespace __detail
     };
 
   /// Partial specialization used when nodes contain a cached hash code.
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _NodePtr, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused>
-    struct _Local_iterator_base<_Key, _Value, _ExtractKey,
+    struct _Local_iterator_base<_Key, _NodePtr, _ExtractKey,
 				_Hash, _RangeHash, _Unused, true>
-    : public _Node_iterator_base<_Value, true>
+    : public _Node_iterator_base<_NodePtr>
     {
     protected:
-      using __base_node_iter = _Node_iterator_base<_Value, true>;
-      using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
-					      _Hash, _RangeHash, _Unused, true>;
+      using __base_node_iter = _Node_iterator_base<_NodePtr>;
 
       _Local_iterator_base() = default;
-      _Local_iterator_base(const __hash_code_base&,
-			   _Hash_node<_Value, true>* __p,
+      _Local_iterator_base(const _Hash&, _NodePtr __p,
 			   std::size_t __bkt, std::size_t __bkt_count)
       : __base_node_iter(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
       { }
@@ -1354,12 +1307,12 @@ namespace __detail
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
 
-  // Uninitialized storage for a _Hash_code_base.
+  // Uninitialized storage for a _Hash.
   // This type is DefaultConstructible and Assignable even if the
-  // _Hash_code_base type isn't, so that _Local_iterator_base<..., false>
+  // _Hash type isn't, so that _Local_iterator_base<..., false>
   // can be DefaultConstructible and Assignable.
   template<typename _Tp, bool _IsEmpty = std::is_empty<_Tp>::value>
-    struct _Hash_code_storage
+    struct _Hash_storage
     {
       __gnu_cxx::__aligned_buffer<_Tp> _M_storage;
 
@@ -1370,9 +1323,9 @@ namespace __detail
       _M_h() const { return _M_storage._M_ptr(); }
     };
 
-  // Empty partial specialization for empty _Hash_code_base types.
+  // Empty partial specialization for empty _Hash types.
   template<typename _Tp>
-    struct _Hash_code_storage<_Tp, true>
+    struct _Hash_storage<_Tp, true>
     {
       static_assert( std::is_empty<_Tp>::value, "Type must be empty" );
 
@@ -1385,33 +1338,23 @@ namespace __detail
       _M_h() const { return reinterpret_cast<const _Tp*>(this); }
     };
 
-  template<typename _Key, typename _Value, typename _ExtractKey,
-	   typename _Hash, typename _RangeHash, typename _Unused>
-    using __hash_code_for_local_iter
-      = _Hash_code_storage<_Hash_code_base<_Key, _Value, _ExtractKey,
-					   _Hash, _RangeHash, _Unused, false>>;
-
   // Partial specialization used when hash codes are not cached
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _NodePtr, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused>
-    struct _Local_iterator_base<_Key, _Value, _ExtractKey,
+    struct _Local_iterator_base<_Key, _NodePtr, _ExtractKey,
 				_Hash, _RangeHash, _Unused, false>
-    : __hash_code_for_local_iter<_Key, _Value, _ExtractKey, _Hash, _RangeHash,
-				 _Unused>
-    , _Node_iterator_base<_Value, false>
+    : _Hash_storage<_Hash>
+    , _Node_iterator_base<_NodePtr>
     {
     protected:
-      using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
-					     _Hash, _RangeHash, _Unused, false>;
-      using __node_iter_base = _Node_iterator_base<_Value, false>;
+      using __node_iter_base = _Node_iterator_base<_NodePtr>;
 
       _Local_iterator_base() : _M_bucket_count(-1) { }
 
-      _Local_iterator_base(const __hash_code_base& __base,
-			   _Hash_node<_Value, false>* __p,
+      _Local_iterator_base(const _Hash& __hasher, _NodePtr __p,
 			   std::size_t __bkt, std::size_t __bkt_count)
       : __node_iter_base(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
-      { _M_init(__base); }
+      { _M_init(__hasher); }
 
       ~_Local_iterator_base()
       {
@@ -1420,7 +1363,7 @@ namespace __detail
       }
 
       _Local_iterator_base(const _Local_iterator_base& __iter)
-      : __node_iter_base(__iter), _M_bucket(__iter._M_bucket)
+      : __node_iter_base(__iter._M_cur), _M_bucket(__iter._M_bucket)
       , _M_bucket_count(__iter._M_bucket_count)
       {
 	if (_M_bucket_count != -1)
@@ -1446,8 +1389,9 @@ namespace __detail
 	__node_iter_base::_M_incr();
 	if (this->_M_cur)
 	  {
-	    std::size_t __bkt = this->_M_h()->_M_bucket_index(this->_M_cur,
-							      _M_bucket_count);
+	    std::size_t __bkt =
+	      _RangeHash{}((*this->_M_h())(_ExtractKey{}(this->_M_cur->_M_v())),
+			   _M_bucket_count);
 	    if (__bkt != _M_bucket)
 	      this->_M_cur = nullptr;
 	  }
@@ -1457,11 +1401,11 @@ namespace __detail
       std::size_t _M_bucket_count;
 
       void
-      _M_init(const __hash_code_base& __base)
-      { ::new(this->_M_h()) __hash_code_base(__base); }
+      _M_init(const _Hash& __hasher)
+      { ::new(this->_M_h()) _Hash(__hasher); }
 
       void
-      _M_destroy() { this->_M_h()->~__hash_code_base(); }
+      _M_destroy() { this->_M_h()->~_Hash(); }
 
     public:
       std::size_t
@@ -1469,35 +1413,35 @@ namespace __detail
     };
 
   /// local iterators
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _NodePtr, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused,
 	   bool __constant_iterators, bool __cache>
     struct _Local_iterator
-    : public _Local_iterator_base<_Key, _Value, _ExtractKey,
+    : public _Local_iterator_base<_Key, _NodePtr, _ExtractKey,
 				  _Hash, _RangeHash, _Unused, __cache>
     {
     private:
-      using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
-					   _Hash, _RangeHash, _Unused, __cache>;
-      using __hash_code_base = typename __base_type::__hash_code_base;
+      using __base_type = _Local_iterator_base<_Key, _NodePtr, _ExtractKey,
+				_Hash, _RangeHash, _Unused, __cache>;
+      using __node_type = typename std::pointer_traits<_NodePtr>::element_type;
 
     public:
-      typedef _Value					value_type;
+      typedef typename __node_type::value_type		value_type;
       typedef typename std::conditional<__constant_iterators,
-					const _Value*, _Value*>::type
+					const value_type*, value_type*>::type
 							pointer;
       typedef typename std::conditional<__constant_iterators,
-					const _Value&, _Value&>::type
+					const value_type&, value_type&>::type
 							reference;
       typedef std::ptrdiff_t				difference_type;
       typedef std::forward_iterator_tag			iterator_category;
 
       _Local_iterator() = default;
 
-      _Local_iterator(const __hash_code_base& __base,
-		      _Hash_node<_Value, __cache>* __n,
+      _Local_iterator(const _Hash& __hasher,
+		      _NodePtr __n,
 		      std::size_t __bkt, std::size_t __bkt_count)
-      : __base_type(__base, __n, __bkt, __bkt_count)
+      : __base_type(__hasher, __n, __bkt, __bkt_count)
       { }
 
       reference
@@ -1525,37 +1469,37 @@ namespace __detail
     };
 
   /// local const_iterators
-  template<typename _Key, typename _Value, typename _ExtractKey,
+  template<typename _Key, typename _NodePtr, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused,
 	   bool __constant_iterators, bool __cache>
     struct _Local_const_iterator
-    : public _Local_iterator_base<_Key, _Value, _ExtractKey,
+    : public _Local_iterator_base<_Key, _NodePtr, _ExtractKey,
 				  _Hash, _RangeHash, _Unused, __cache>
     {
     private:
-      using __base_type = _Local_iterator_base<_Key, _Value, _ExtractKey,
-					   _Hash, _RangeHash, _Unused, __cache>;
-      using __hash_code_base = typename __base_type::__hash_code_base;
+      using __base_type = _Local_iterator_base<_Key, _NodePtr, _ExtractKey,
+				_Hash, _RangeHash, _Unused, __cache>;
+      using __node_type = typename std::pointer_traits<_NodePtr>::element_type;
 
     public:
-      typedef _Value					value_type;
-      typedef const _Value*				pointer;
-      typedef const _Value&				reference;
+      typedef typename __node_type::value_type		value_type;
+      typedef const value_type*				pointer;
+      typedef const value_type&				reference;
       typedef std::ptrdiff_t				difference_type;
       typedef std::forward_iterator_tag			iterator_category;
 
       _Local_const_iterator() = default;
 
-      _Local_const_iterator(const __hash_code_base& __base,
-			    _Hash_node<_Value, __cache>* __n,
+      _Local_const_iterator(const _Hash& __hasher,
+			    _NodePtr __n,
 			    std::size_t __bkt, std::size_t __bkt_count)
-      : __base_type(__base, __n, __bkt, __bkt_count)
+      : __base_type(__hasher, __n, __bkt, __bkt_count)
       { }
 
-      _Local_const_iterator(const _Local_iterator<_Key, _Value, _ExtractKey,
-						  _Hash, _RangeHash, _Unused,
-						  __constant_iterators,
-						  __cache>& __x)
+      _Local_const_iterator(const _Local_iterator<_Key, _NodePtr, _ExtractKey,
+						_Hash, _RangeHash, _Unused,
+						__constant_iterators,
+						__cache>& __x)
       : __base_type(__x)
       { }
 
@@ -1599,110 +1543,80 @@ namespace __detail
     struct _Hashtable_base
     : public _Hash_code_base<_Key, _Value, _ExtractKey, _Hash, _RangeHash,
 			     _Unused, _Traits::__hash_cached::value>,
-    private _Hashtable_ebo_helper<0, _Equal>
-  {
-  public:
-    typedef _Key					key_type;
-    typedef _Value					value_type;
-    typedef _Equal					key_equal;
-    typedef std::size_t					size_type;
-    typedef std::ptrdiff_t				difference_type;
-
-    using __traits_type = _Traits;
-    using __hash_cached = typename __traits_type::__hash_cached;
-    using __constant_iterators = typename __traits_type::__constant_iterators;
-    using __unique_keys = typename __traits_type::__unique_keys;
-
-    using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
-					     _Hash, _RangeHash, _Unused,
-					     __hash_cached::value>;
-
-    using __hash_code = typename __hash_code_base::__hash_code;
-    using __node_type = typename __hash_code_base::__node_type;
-
-    using iterator = _Node_iterator<value_type,
-				    __constant_iterators::value,
-				    __hash_cached::value>;
-
-    using const_iterator = _Node_const_iterator<value_type,
-						__constant_iterators::value,
-						__hash_cached::value>;
-
-    using local_iterator = _Local_iterator<key_type, value_type,
-					_ExtractKey, _Hash, _RangeHash, _Unused,
-					   __constant_iterators::value,
-					   __hash_cached::value>;
-
-    using const_local_iterator = _Local_const_iterator<key_type, value_type,
-					_ExtractKey, _Hash, _RangeHash, _Unused,
-					__constant_iterators::value,
-						       __hash_cached::value>;
-
-    using __ireturn_type = typename std::conditional<__unique_keys::value,
-						     std::pair<iterator, bool>,
-						     iterator>::type;
-  private:
-    using _EqualEBO = _Hashtable_ebo_helper<0, _Equal>;
+      private _Hashtable_ebo_helper<0, _Equal>
+    {
+    public:
+      typedef _Key					key_type;
+      typedef _Value					value_type;
+      typedef _Equal					key_equal;
+      typedef std::size_t				size_type;
+      typedef std::ptrdiff_t				difference_type;
 
-    template<typename _NodeT>
-      struct _Equal_hash_code
-      {
-	static bool
-	_S_equals(__hash_code, const _NodeT&)
-	{ return true; }
+      using __traits_type = _Traits;
+      using __hash_cached = typename __traits_type::__hash_cached;
 
-	static bool
-	_S_node_equals(const _NodeT&, const _NodeT&)
-	{ return true; }
-      };
+      using __hash_code_base = _Hash_code_base<_Key, _Value, _ExtractKey,
+					       _Hash, _RangeHash, _Unused,
+					       __hash_cached::value>;
 
-    template<typename _Ptr2>
-      struct _Equal_hash_code<_Hash_node<_Ptr2, true>>
-      {
-	static bool
-	_S_equals(__hash_code __c, const _Hash_node<_Ptr2, true>& __n)
-	{ return __c == __n._M_hash_code; }
-
-	static bool
-	_S_node_equals(const _Hash_node<_Ptr2, true>& __lhn,
-		       const _Hash_node<_Ptr2, true>& __rhn)
-	{ return __lhn._M_hash_code == __rhn._M_hash_code; }
-      };
+      using __hash_code = typename __hash_code_base::__hash_code;
 
-  protected:
-    _Hashtable_base() = default;
-    _Hashtable_base(const _Hash& __hash, const _Equal& __eq)
-    : __hash_code_base(__hash), _EqualEBO(__eq)
-    { }
+    private:
+      using _EqualEBO = _Hashtable_ebo_helper<0, _Equal>;
 
-    bool
-    _M_equals(const _Key& __k, __hash_code __c, const __node_type* __n) const
-    {
-      static_assert(__is_invocable<const _Equal&, const _Key&, const _Key&>{},
+      static bool
+      _S_equals(__hash_code, const _Hash_node_code_cache<false>&)
+      { return true; }
+
+      static bool
+      _S_node_equals(const _Hash_node_code_cache<false>&,
+		     const _Hash_node_code_cache<false>&)
+      { return true; }
+
+      static bool
+      _S_equals(__hash_code __c, const _Hash_node_code_cache<true>& __n)
+      { return __c == __n._M_hash_code; }
+
+      static bool
+      _S_node_equals(const _Hash_node_code_cache<true>& __lhn,
+		     const _Hash_node_code_cache<true>& __rhn)
+      { return __lhn._M_hash_code == __rhn._M_hash_code; }
+
+    protected:
+      _Hashtable_base() = default;
+      _Hashtable_base(const _Hash& __hash, const _Equal& __eq)
+      : __hash_code_base(__hash), _EqualEBO(__eq)
+      { }
+
+      bool
+      _M_equals(const _Key& __k, __hash_code __c,
+		const _Hash_node_value<_Value, __hash_cached::value>& __n) const
+      {
+	static_assert(__is_invocable<const _Equal&, const _Key&, const _Key&>{},
 	  "key equality predicate must be invocable with two arguments of "
 	  "key type");
-      return _Equal_hash_code<__node_type>::_S_equals(__c, *__n)
-	&& _M_eq()(__k, _ExtractKey{}(__n->_M_v()));
-    }
+	return _S_equals(__c, __n) && _M_eq()(__k, _ExtractKey{}(__n._M_v()));
+      }
 
-    bool
-    _M_node_equals(const __node_type* __lhn, const __node_type* __rhn) const
-    {
-      return _Equal_hash_code<__node_type>::_S_node_equals(*__lhn, *__rhn)
-	&& _M_eq()(_ExtractKey{}(__lhn->_M_v()),
-		   _ExtractKey{}(__rhn->_M_v()));
-    }
+      bool
+      _M_node_equals(
+	const _Hash_node_value<_Value, __hash_cached::value>& __lhn,
+	const _Hash_node_value<_Value, __hash_cached::value>& __rhn) const
+      {
+	return _S_node_equals(__lhn, __rhn)
+	  && _M_eq()(_ExtractKey{}(__lhn._M_v()), _ExtractKey{}(__rhn._M_v()));
+      }
 
-    void
-    _M_swap(_Hashtable_base& __x)
-    {
-      __hash_code_base::_M_swap(__x);
-      std::swap(_EqualEBO::_M_get(), __x._EqualEBO::_M_get());
-    }
+      void
+      _M_swap(_Hashtable_base& __x)
+      {
+	__hash_code_base::_M_swap(__x);
+	std::swap(_EqualEBO::_M_get(), __x._EqualEBO::_M_get());
+      }
 
-    const _Equal&
-    _M_eq() const { return _EqualEBO::_M_cget(); }
-  };
+      const _Equal&
+      _M_eq() const { return _EqualEBO::_M_cget(); }
+    };
 
   /**
    *  Primary class template  _Equality.
@@ -1744,27 +1658,24 @@ namespace __detail
 	      _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits, true>::
     _M_equal(const __hashtable& __other) const
     {
-      using __node_base = typename __hashtable::__node_base;
-      using __node_type = typename __hashtable::__node_type;
       const __hashtable* __this = static_cast<const __hashtable*>(this);
       if (__this->size() != __other.size())
 	return false;
 
       for (auto __itx = __this->begin(); __itx != __this->end(); ++__itx)
 	{
-	  std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
-	  __node_base* __prev_n = __other._M_buckets[__ybkt];
+	  std::size_t __ybkt = __other._M_bucket_index(*__itx._M_cur);
+	  auto __prev_n = __other._M_buckets[__ybkt];
 	  if (!__prev_n)
 	    return false;
 
-	  for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
-	       __n = __n->_M_next())
+	  for (auto __n = __prev_n->_M_nxt;; __n = __n->_M_nxt)
 	    {
 	      if (__n->_M_v() == *__itx)
 		break;
 
 	      if (!__n->_M_nxt
-		  || __other._M_bucket_index(__n->_M_next()) != __ybkt)
+		  || __other._M_bucket_index(*__n->_M_nxt) != __ybkt)
 		return false;
 	    }
 	}
@@ -1797,8 +1708,6 @@ namespace __detail
 	      _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits, false>::
     _M_equal(const __hashtable& __other) const
     {
-      using __node_base = typename __hashtable::__node_base;
-      using __node_type = typename __hashtable::__node_type;
       const __hashtable* __this = static_cast<const __hashtable*>(this);
       if (__this->size() != __other.size())
 	return false;
@@ -1813,24 +1722,24 @@ namespace __detail
 	       ++__itx_end)
 	    ++__x_count;
 
-	  std::size_t __ybkt = __other._M_bucket_index(__itx._M_cur);
-	  __node_base* __y_prev_n = __other._M_buckets[__ybkt];
+	  std::size_t __ybkt = __other._M_bucket_index(*__itx._M_cur);
+	  auto __y_prev_n = __other._M_buckets[__ybkt];
 	  if (!__y_prev_n)
 	    return false;
 
-	  __node_type* __y_n = static_cast<__node_type*>(__y_prev_n->_M_nxt);
+	  auto __y_n = __y_prev_n->_M_nxt;
 	  for (;;)
 	    {
 	      if (__this->key_eq()(_ExtractKey{}(__y_n->_M_v()),
 				   _ExtractKey{}(*__itx)))
 		break;
 
-	      __node_type* __y_ref_n = __y_n;
-	      for (__y_n = __y_n->_M_next(); __y_n; __y_n = __y_n->_M_next())
-		if (!__other._M_node_equals(__y_ref_n, __y_n))
+	      auto __y_ref_n = __y_n;
+	      for (__y_n = __y_n->_M_nxt; __y_n; __y_n = __y_n->_M_nxt)
+		if (!__other._M_node_equals(*__y_ref_n, *__y_n))
 		  break;
 
-	      if (!__y_n || __other._M_bucket_index(__y_n) != __ybkt)
+	      if (!__y_n || __other._M_bucket_index(*__y_n) != __ybkt)
 		return false;
 	    }
 
@@ -1868,11 +1777,13 @@ namespace __detail
       using __value_alloc_traits = typename __node_alloc_traits::template
 	rebind_traits<typename __node_type::value_type>;
 
-      using __node_base = __detail::_Hash_node_base;
-      using __bucket_type = __node_base*;      
-      using __bucket_alloc_type =
-	__alloc_rebind<__node_alloc_type, __bucket_type>;
-      using __bucket_alloc_traits = std::allocator_traits<__bucket_alloc_type>;
+      using __node_pointer = typename __node_alloc_traits::pointer;
+      using __node_base = __detail::_Hash_node_base<__node_pointer>;
+      using __node_base_ptr = __ptr_rebind<__node_pointer, __node_base>;
+      using __buckets_alloc_type =
+	__alloc_rebind<__node_alloc_type, __node_base_ptr>;
+      using __buckets_alloc_traits = std::allocator_traits<__buckets_alloc_type>;
+      using __buckets_pointer = typename __buckets_alloc_traits::pointer;
 
       _Hashtable_alloc() = default;
       _Hashtable_alloc(const _Hashtable_alloc&) = default;
@@ -1893,27 +1804,27 @@ namespace __detail
 
       // Allocate a node and construct an element within it.
       template<typename... _Args>
-	__node_type*
+	__node_pointer
 	_M_allocate_node(_Args&&... __args);
 
       // Destroy the element within a node and deallocate the node.
       void
-      _M_deallocate_node(__node_type* __n);
+      _M_deallocate_node(__node_pointer __n);
 
       // Deallocate a node.
       void
-      _M_deallocate_node_ptr(__node_type* __n);
+      _M_deallocate_node_ptr(__node_pointer __n);
 
       // Deallocate the linked list of nodes pointed to by __n.
       // The elements within the nodes are destroyed.
       void
-      _M_deallocate_nodes(__node_type* __n);
+      _M_deallocate_nodes(__node_pointer __n);
 
-      __bucket_type*
+      __buckets_pointer
       _M_allocate_buckets(std::size_t __bkt_count);
 
       void
-      _M_deallocate_buckets(__bucket_type*, std::size_t __bkt_count);
+      _M_deallocate_buckets(__buckets_pointer, std::size_t __bkt_count);
     };
 
   // Definitions of class template _Hashtable_alloc's out-of-line member
@@ -1922,17 +1833,16 @@ namespace __detail
     template<typename... _Args>
       auto
       _Hashtable_alloc<_NodeAlloc>::_M_allocate_node(_Args&&... __args)
-      -> __node_type*
+      -> __node_pointer
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
-	__node_type* __n = std::__to_address(__nptr);
 	__try
 	  {
-	    ::new ((void*)__n) __node_type;
+	    ::new ((void*)std::__to_address(__nptr)) __node_type;
 	    __node_alloc_traits::construct(_M_node_allocator(),
-					   __n->_M_valptr(),
+					   __nptr->_M_valptr(),
 					   std::forward<_Args>(__args)...);
-	    return __n;
+	    return __nptr;
 	  }
 	__catch(...)
 	  {
@@ -1943,55 +1853,53 @@ namespace __detail
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_pointer __nptr)
     {
-      __node_alloc_traits::destroy(_M_node_allocator(), __n->_M_valptr());
-      _M_deallocate_node_ptr(__n);
+      __node_alloc_traits::destroy(_M_node_allocator(), __nptr->_M_valptr());
+      _M_deallocate_node_ptr(__nptr);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_pointer __nptr)
     {
-      typedef typename __node_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n);
-      __n->~__node_type();
-      __node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1);
+      __nptr->~__node_type();
+      __node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_type* __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_nodes(__node_pointer __nptr)
     {
-      while (__n)
+      while (__nptr)
 	{
-	  __node_type* __tmp = __n;
-	  __n = __n->_M_next();
+	  __node_pointer __tmp = __nptr;
+	  __nptr = __nptr->_M_nxt;
 	  _M_deallocate_node(__tmp);
 	}
     }
 
   template<typename _NodeAlloc>
-    typename _Hashtable_alloc<_NodeAlloc>::__bucket_type*
-    _Hashtable_alloc<_NodeAlloc>::_M_allocate_buckets(std::size_t __bkt_count)
+    auto
+    _Hashtable_alloc<_NodeAlloc>::
+    _M_allocate_buckets(std::size_t __bkt_count)
+    -> __buckets_pointer
     {
-      __bucket_alloc_type __alloc(_M_node_allocator());
+      __buckets_alloc_type __alloc(_M_node_allocator());
 
-      auto __ptr = __bucket_alloc_traits::allocate(__alloc, __bkt_count);
-      __bucket_type* __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __bkt_count * sizeof(__bucket_type));
-      return __p;
+      auto __ptr = __buckets_alloc_traits::allocate(__alloc, __bkt_count);
+      std::fill_n(__ptr, __bkt_count, nullptr);
+      return __ptr;
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_buckets(__bucket_type* __bkts,
-							std::size_t __bkt_count)
+    _Hashtable_alloc<_NodeAlloc>::
+    _M_deallocate_buckets(__buckets_pointer __bkts,
+			  std::size_t __bkt_count)
     {
-      typedef typename __bucket_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
-      __bucket_alloc_type __alloc(_M_node_allocator());
-      __bucket_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
+      __buckets_alloc_type __alloc(_M_node_allocator());
+      __buckets_alloc_traits::deallocate(__alloc, __bkts, __bkt_count);
     }
 
  //@} hashtable-detail
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..e9d7ada7151
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_map<T, int, H, E,
+				  CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_map<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..4a895a6302c
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multimap<T, int, H, E,
+				       CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_multimap<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..36b5e10cc7b
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
@@ -0,0 +1,56 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_set>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multiset<T, H, E, CustomPointerAlloc<T>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<T> alloc_type;
+  typedef std::unordered_multiset<T, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert(T());
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
index f6b908ac03e..479104709fb 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
@@ -15,10 +15,7 @@
 // with this library; see the file COPYING3.  If not see
 // <http://www.gnu.org/licenses/>.
 
-// This test fails to compile since C++17 (see xfail-if below) so we can only
-// do a "run" test for C++11 and C++14, and a "compile" test for C++17 and up.
-// { dg-do run { target { c++11_only || c++14_only } } }
-// { dg-do compile { target c++17 } }
+// { dg-do run { target { c++11 } } }
 
 #include <unordered_set>
 #include <memory>
@@ -26,15 +23,22 @@
 #include <testsuite_allocator.h>
 
 struct T { int i; };
-bool operator==(const T& l, const T& r) { return l.i == r.i; }
-struct H { std::size_t operator()(const T& t) const noexcept { return t.i; }
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
 };
-struct E : std::equal_to<T> { };
+
+struct E : std::equal_to<T>
+{ };
 
 using __gnu_test::CustomPointerAlloc;
 
-// { dg-xfail-if "node reinsertion assumes raw pointers" { c++17 } }
-// TODO when removing this xfail change the test back to "dg-do run".
 template class std::unordered_set<T, H, E, CustomPointerAlloc<T>>;
 
 void test01()

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-09-28 20:37   ` François Dumont
@ 2020-10-20 11:04     ` Jonathan Wakely
  2020-10-20 17:26       ` François Dumont
  2020-11-01 21:48       ` François Dumont
  0 siblings, 2 replies; 10+ messages in thread
From: Jonathan Wakely @ 2020-10-20 11:04 UTC (permalink / raw)
  To: François Dumont; +Cc: libstdc++

On 28/09/20 22:37 +0200, François Dumont via Libstdc++ wrote:
>Following recent changes on _Hashtable I rebase the patch and 
>completely review it.
>
>I managed to integrate the allocator custom pointer type without 
>touching to _Hashtable base types like _Hash_code_base or 
>_Hashtable_base. However I cannot see how to use the custom pointer 
>type without impacting the node types like _Hash_node_base which now 
>takes a template parameter, the custom pointer type.
>
>On an abi point of view node types are different however the data 
>structure is the same. The only difference is that the _Hash_node_base 
>_M_nxt is now a _Hash_node<> custom pointer rather than a simple 
>_Hash_node_base*.
>
>Even if this patch can't go in because of the abi breaking change I am 
>going to adapt some of the code simplifications for master. Especially 
>the _Hash_code_base and _Local_iterator_base simplifications.
>
>Let me know if you can think of a way to integrate the custom pointer 
>without impacting abi. Unless impacting node types and associated 
>iterator types is fine even if I already noticed that pretty printer 
>tests are broken with those changes.

The approach I used for the other containers (which was never
completed and committed) is something like:

struct _Node_base
{
   _Node_base* _M_next;
};

template<typename _Ptr>
struct _Fancy_node_base
{
   _Ptr _M_next;
};

template<typename _Ptr>
   using node_base = conditional_t<is_pointer<_Ptr>::value,
                                   _Node_base,
                                   _Fancy_node_base<_Ptr>>;

This way all existing code that has allocators with non-fancy pointers
continues to use the same type. Code using fancy pointers (which
doesn't currently work properly anyway) changes to use the new types
that depend on the pointer type.


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-10-20 11:04     ` Jonathan Wakely
@ 2020-10-20 17:26       ` François Dumont
  2020-11-01 21:48       ` François Dumont
  1 sibling, 0 replies; 10+ messages in thread
From: François Dumont @ 2020-10-20 17:26 UTC (permalink / raw)
  To: Jonathan Wakely; +Cc: libstdc++

On 20/10/20 1:04 pm, Jonathan Wakely wrote:
> On 28/09/20 22:37 +0200, François Dumont via Libstdc++ wrote:
>> Following recent changes on _Hashtable I rebase the patch and 
>> completely review it.
>>
>> I managed to integrate the allocator custom pointer type without 
>> touching to _Hashtable base types like _Hash_code_base or 
>> _Hashtable_base. However I cannot see how to use the custom pointer 
>> type without impacting the node types like _Hash_node_base which now 
>> takes a template parameter, the custom pointer type.
>>
>> On an abi point of view node types are different however the data 
>> structure is the same. The only difference is that the 
>> _Hash_node_base _M_nxt is now a _Hash_node<> custom pointer rather 
>> than a simple _Hash_node_base*.
>>
>> Even if this patch can't go in because of the abi breaking change I 
>> am going to adapt some of the code simplifications for master. 
>> Especially the _Hash_code_base and _Local_iterator_base simplifications.
>>
>> Let me know if you can think of a way to integrate the custom pointer 
>> without impacting abi. Unless impacting node types and associated 
>> iterator types is fine even if I already noticed that pretty printer 
>> tests are broken with those changes.
>
> The approach I used for the other containers (which was never
> completed and committed) is something like:
>
> struct _Node_base
> {
>   _Node_base* _M_next;
> };
>
> template<typename _Ptr>
> struct _Fancy_node_base
> {
>   _Ptr _M_next;
> };
>
> template<typename _Ptr>
>   using node_base = conditional_t<is_pointer<_Ptr>::value,
>                                   _Node_base,
> _Fancy_node_base<_Ptr>>;
>
> This way all existing code that has allocators with non-fancy pointers
> continues to use the same type. Code using fancy pointers (which
> doesn't currently work properly anyway) changes to use the new types
> that depend on the pointer type.
>
Ok, thanks for the tips, I'll do something similar.


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-10-20 11:04     ` Jonathan Wakely
  2020-10-20 17:26       ` François Dumont
@ 2020-11-01 21:48       ` François Dumont
  2020-11-02 14:11         ` Jonathan Wakely
  1 sibling, 1 reply; 10+ messages in thread
From: François Dumont @ 2020-11-01 21:48 UTC (permalink / raw)
  To: Jonathan Wakely; +Cc: libstdc++

[-- Attachment #1: Type: text/plain, Size: 6803 bytes --]

Here is an other attempt.

This time I am storing the node using allocator pointer just in the 
singly linked list of nodes. Buckets are still __node_base* so that the 
custom pointer is not manipulated too much. Moreover iterators are also 
using node raw pointers.

As advised I introduced new types in case of custom pointers. But I am 
also using those in gnu versioned namespace so that I could more easily 
test this new code with all existing test cases.

Note that as we are at introducing a new abi I am also changing node 
memory layout in this case. I think it is better to put the hash code 
cache before the node value rather than after. It will be closer to the 
begining of the node and so accessible without mem page fault.

To be clear the node mem layout is:
- next node pointer
- node value_type
- hash code (optional)

The new node mem layout is:
- next node pointer
- hash code (optional)
- node value_type

Here is the git log in case you validate it.

     libstdc++: Store allocator::pointer in hashtable implementation

     Use allocator pointer type in _Hashtable implementation.

             * include/bits/hashtable_policy.h
             (_ReuseOrAllocNode<>::__node_type): Remove.
             (_ReuseOrAllocNode<>::__node_pointer): New.
             (_ReuseOrAllocNode(__node_pointer, __hashtable_alloc&)): 
Adapt to use
             latter.
(_ReuseOrAllocNode<>::operator()(_Arg&&)): Return latter.
             (_AllocNode<>::__node_type): Remove.
             (_AllocNode<>::__node_pointer): New.
(_AllocNode<>::operator()<>(_Arg&&)): Return latter.
             (_Hash_node_cust_ptr_base<>): New.
             (_Hash_node_cache_value<typename _Value, bool 
_Cache_hash_code>): New.
             (_Hash_node<>::__node_base): New.
             (_Hash_node<>::__node_ptr): New.
             (_Hash_node<>::__node_type): New.
             (_Hash_node<>::__node_value_cache_type): New.
             (_Hash_node<>::_M_next_ptr()): New.
             (_Hash_cust_ptr_node<typename _Ptr, bool 
_Cache_hash_code>): New.
             (_Hashtable_iterator_base<typename _NodeType>): New.
             (_Node_iterator_base<>): Inherits from latter.
             (_Hashtable_iterator<typename _NodeType, bool 
__constant_iterators>):
             New.
             (_Hashtable_const_iterator<typename _NodeType, bool 
__constant_iterators>):
             New.
             (_Insert_base<>::__alloc_ptr): New.
             (_Insert_base<>::__node_type): New. Define conditionally to 
_Hash_node<>
             or _Hash_cust_ptr_node<> depending on __alloc_ptr being a 
raw pointer.
             (_Insert_base<>::__node_alloc_type): New.
             (_Insert_base<>::__hashtable_alloc): Remove.
             (_Insert_base<>::iterator): Define conditionally to 
_Node_iterator<>
             or _Hashtable_iterator<> depending on __alloc_ptr being a 
raw pointer.
             (_Insert_base<>::const_iterator): Define conditionally to
             _Node_const_iterator<> or _Hashtable_const_iterator<> 
depending on
             __alloc_ptr being a raw pointer.
             (_Hashtable_local_iter_base<>): New.
             (_Hash_code_base<>::_M_bucket_index(const 
_Hash_node_cache_value<>&,
             size_t)): New.
             (_Hashtable_local_iter_base<>): New.
             (_Hashtable_local_iterator<>): New.
             (_Hashtable_const_local_iterator<>): New.
             (_Hashtable_base<>::_M_equals(const _Key&, __hash_code,
             const _Hash_node_cache_value<>&): New.
             (_Hashtable_base<>::_M_node_equals(const 
_Hash_node_cache_value<>&,
             const _Hash_node_cache_value<>&)): New.
             (_Hashtable_alloc<>::__value_alloc_traits): Remove.
             (_Hashtable_alloc<>::__node_base_ptr): Remove.
             * include/bits/hashtable.h (_Hashtable<>): Adapt.
             * 
testsuite/23_containers/unordered_map/allocator/ext_ptr.cc: New test.
             * 
testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc:
             New test.
             * 
testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc:
             New test.
             * 
testsuite/23_containers/unordered_set/allocator/ext_ptr.cc: Adapt.

Tested under Linux x86_64 normal and version namespace modes.

François


On 20/10/20 1:04 pm, Jonathan Wakely wrote:
> On 28/09/20 22:37 +0200, François Dumont via Libstdc++ wrote:
>> Following recent changes on _Hashtable I rebase the patch and 
>> completely review it.
>>
>> I managed to integrate the allocator custom pointer type without 
>> touching to _Hashtable base types like _Hash_code_base or 
>> _Hashtable_base. However I cannot see how to use the custom pointer 
>> type without impacting the node types like _Hash_node_base which now 
>> takes a template parameter, the custom pointer type.
>>
>> On an abi point of view node types are different however the data 
>> structure is the same. The only difference is that the 
>> _Hash_node_base _M_nxt is now a _Hash_node<> custom pointer rather 
>> than a simple _Hash_node_base*.
>>
>> Even if this patch can't go in because of the abi breaking change I 
>> am going to adapt some of the code simplifications for master. 
>> Especially the _Hash_code_base and _Local_iterator_base simplifications.
>>
>> Let me know if you can think of a way to integrate the custom pointer 
>> without impacting abi. Unless impacting node types and associated 
>> iterator types is fine even if I already noticed that pretty printer 
>> tests are broken with those changes.
>
> The approach I used for the other containers (which was never
> completed and committed) is something like:
>
> struct _Node_base
> {
>   _Node_base* _M_next;
> };
>
> template<typename _Ptr>
> struct _Fancy_node_base
> {
>   _Ptr _M_next;
> };
>
> template<typename _Ptr>
>   using node_base = conditional_t<is_pointer<_Ptr>::value,
>                                   _Node_base,
> _Fancy_node_base<_Ptr>>;
>
> This way all existing code that has allocators with non-fancy pointers
> continues to use the same type. Code using fancy pointers (which
> doesn't currently work properly anyway) changes to use the new types
> that depend on the pointer type.
>


[-- Attachment #2: hashtable.patch --]
[-- Type: text/x-patch, Size: 67403 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index 6c6c5edde0b..86644d447ca 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -182,8 +182,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 				 _RehashPolicy, _Traits>,
       private __detail::_Hashtable_alloc<
 	__alloc_rebind<_Alloc,
-		       __detail::_Hash_node<_Value,
-					    _Traits::__hash_cached::value>>>
+#if _GLIBCXX_INLINE_VERSION
+		       __detail::_Hash_cust_ptr_node<
+			 __detail::__alloc_val_ptr<_Alloc, _Value>,
+			 _Traits::__hash_cached::value>>>
+#else
+	  typename std::conditional<
+	    std::__is_pointer<
+	      __detail::__alloc_val_ptr<_Alloc, _Value>>::__value,
+	    __detail::_Hash_node<_Value, _Traits::__hash_cached::value>,
+	    __detail::_Hash_cust_ptr_node<
+	      __detail::__alloc_val_ptr<_Alloc, _Value>,
+	      _Traits::__hash_cached::value>>::type>>
+#endif
     {
       static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
 	  "unordered container must have a non-const, non-volatile value_type");
@@ -195,21 +206,30 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       using __traits_type = _Traits;
       using __hash_cached = typename __traits_type::__hash_cached;
       using __constant_iterators = typename __traits_type::__constant_iterators;
-      using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
+      using __alloc_ptr = __detail::__alloc_val_ptr<_Alloc, _Value>;
+      using __node_type =
+#if _GLIBCXX_INLINE_VERSION
+	__detail::_Hash_cust_ptr_node<__alloc_ptr,
+				      _Traits::__hash_cached::value>;
+#else
+	typename std::conditional<
+	  std::__is_pointer<__alloc_ptr>::__value,
+	    __detail::_Hash_node<_Value, _Traits::__hash_cached::value>,
+	    __detail::_Hash_cust_ptr_node<__alloc_ptr,
+					  _Traits::__hash_cached::value>>::type;
+#endif
+      using __node_base = typename __node_type::__node_base;
+      using __node_value_type = typename __node_type::__node_value_cache_type;
       using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
-
       using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
 
-      using __node_value_type =
-	__detail::_Hash_node_value<_Value, __hash_cached::value>;
       using __node_ptr = typename __hashtable_alloc::__node_ptr;
-      using __value_alloc_traits =
-	typename __hashtable_alloc::__value_alloc_traits;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_base = typename __hashtable_alloc::__node_base;
-      using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
+      using __value_alloc_traits =
+	typename __node_alloc_traits::template rebind_traits<_Value>;
       using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
+      using __buckets_ptr_traits = std::pointer_traits<__buckets_ptr>;
 
       using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
 					      _Equal, _Hash,
@@ -233,15 +253,47 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       using const_iterator = typename __insert_base::const_iterator;
 
-      using local_iterator = __detail::_Local_iterator<key_type, _Value,
-			_ExtractKey, _Hash, _RangeHash, _Unused,
-					     __constant_iterators::value,
-					     __hash_cached::value>;
+      using local_iterator =
+#if _GLIBCXX_INLINE_VERSION
+	__detail::_Hashtable_local_iterator<key_type, __node_type,
+					    _ExtractKey, _Hash,
+					    _RangeHash, _Unused,
+					    __constant_iterators::value,
+					    __hash_cached::value>;
+#else
+	typename std::conditional<
+	  std::__is_pointer<__alloc_ptr>::__value,
+	__detail::_Local_iterator<key_type, _Value,
+				  _ExtractKey, _Hash, _RangeHash, _Unused,
+				  __constant_iterators::value,
+				  __hash_cached::value>,
+	__detail::_Hashtable_local_iterator<key_type, __node_type,
+					    _ExtractKey, _Hash,
+					    _RangeHash, _Unused,
+					    __constant_iterators::value,
+					    __hash_cached::value>>::type;
+#endif
 
-      using const_local_iterator = __detail::_Local_const_iterator<
-			key_type, _Value,
-			_ExtractKey, _Hash, _RangeHash, _Unused,
-			__constant_iterators::value, __hash_cached::value>;
+      using const_local_iterator =
+#if _GLIBCXX_INLINE_VERSION
+	__detail::_Hashtable_const_local_iterator<key_type, __node_type,
+						  _ExtractKey, _Hash,
+						  _RangeHash, _Unused,
+						  __constant_iterators::value,
+						  __hash_cached::value>;
+#else
+	typename std::conditional<
+	std::__is_pointer<__alloc_ptr>::__value,
+	__detail::_Local_const_iterator<key_type, _Value,
+				  _ExtractKey, _Hash, _RangeHash, _Unused,
+				  __constant_iterators::value,
+				  __hash_cached::value>,
+	__detail::_Hashtable_const_local_iterator<key_type, __node_type,
+						  _ExtractKey, _Hash,
+						  _RangeHash, _Unused,
+						  __constant_iterators::value,
+						  __hash_cached::value>>::type;
+#endif
 
     private:
       using __rehash_type = _RehashPolicy;
@@ -374,7 +426,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 #endif
 
     private:
-      __buckets_ptr		_M_buckets		= &_M_single_bucket;
+      __buckets_ptr		_M_buckets =
+			__buckets_ptr_traits::pointer_to(_M_single_bucket);
       size_type			_M_bucket_count		= 1;
       __node_base		_M_before_begin;
       size_type			_M_element_count	= 0;
@@ -386,13 +439,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // qualified.
       // Note that we can't leave hashtable with 0 bucket without adding
       // numerous checks in the code to avoid 0 modulus.
-      __node_base_ptr		_M_single_bucket	= nullptr;
+      __node_base*		_M_single_bucket	= nullptr;
 
       void
       _M_update_bbegin()
       {
-	if (_M_begin())
-	  _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
+	if (auto __begin = _M_begin())
+	  _M_buckets[_M_bucket_index(*__begin)] = &_M_before_begin;
       }
 
       void
@@ -402,9 +455,18 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_update_bbegin();
       }
 
+#if !_GLIBCXX_INLINE_VERSION
+      void
+      _M_update_bbegin(__detail::_Hash_node_base* __n)
+      { _M_update_bbegin(static_cast<__node_ptr>(__n));  }
+#endif
+
       bool
       _M_uses_single_bucket(__buckets_ptr __bkts) const
-      { return __builtin_expect(__bkts == &_M_single_bucket, false); }
+      {
+	return __builtin_expect(
+	  std::__to_address(__bkts) == &_M_single_bucket, false);
+      }
 
       bool
       _M_uses_single_bucket() const
@@ -419,7 +481,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
-	    return &_M_single_bucket;
+	    return __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  }
 
 	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
@@ -440,12 +502,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Gets bucket begin, deals with the fact that non-empty buckets contain
       // their before begin node.
-      __node_ptr
+      __node_type*
       _M_bucket_begin(size_type __bkt) const;
 
-      __node_ptr
+      __node_type*
       _M_begin() const
-      { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
+      {
+	return
+	  static_cast<__node_type*>(std::__to_address(_M_before_begin._M_nxt));
+      }
 
       // Assign *this using another _Hashtable instance. Whether elements
       // are copied or moved depends on the _Ht reference.
@@ -492,6 +557,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		   const _Hash&, const _Equal&, const allocator_type&,
 		   false_type __uks);
 
+      static __node_ptr
+      _S_cast(__node_ptr __n)
+      { return __n; }
+
+#if !_GLIBCXX_INLINE_VERSION
+      static __node_ptr
+	_S_cast(__detail::_Hash_node_base* __n)
+      { return static_cast<__node_ptr>(__n); }
+#endif
+
     public:
       // Constructor, destructor, assignment, swap
       _Hashtable() = default;
@@ -568,7 +643,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(_M_before_begin._M_nxt, *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 
@@ -736,16 +811,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find and insert helper functions and types
       // Find the node before the one matching the criteria.
-      __node_base_ptr
+      __node_base*
       _M_find_before_node(size_type, const key_type&, __hash_code) const;
 
-      __node_ptr
+      __node_type*
       _M_find_node(size_type __bkt, const key_type& __key,
 		   __hash_code __c) const
       {
-	__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
+	__node_base* __before_n = _M_find_before_node(__bkt, __key, __c);
 	if (__before_n)
-	  return static_cast<__node_ptr>(__before_n->_M_nxt);
+	  return static_cast<__node_type*>(
+				std::__to_address(__before_n->_M_nxt));
 	return nullptr;
       }
 
@@ -759,8 +835,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 			     size_type __next_bkt);
 
       // Get the node before __n in the bucket __bkt
-      __node_base_ptr
-      _M_get_previous_node(size_type __bkt, __node_ptr __n);
+      __node_base*
+      _M_get_previous_node(size_type __bkt, __node_type* __n);
 
       // Insert node __n with hash code __code, in bucket __bkt if no
       // rehash (assumes no element with same key already present).
@@ -772,7 +848,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Insert node __n with key __k and hash code __code.
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
-      _M_insert_multi_node(__node_ptr __hint,
+      _M_insert_multi_node(__node_type* __hint,
 			   __hash_code __code, __node_ptr __n);
 
       template<typename... _Args>
@@ -830,7 +906,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_erase(false_type __uks, const key_type&);
 
       iterator
-      _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
+      _M_erase(size_type __bkt, __node_base* __prev_n, __node_ptr __n);
 
     public:
       // Emplace
@@ -890,7 +966,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    const key_type& __k = __nh._M_key();
 	    __hash_code __code = this->_M_hash_code(__k);
 	    size_type __bkt = _M_bucket_index(__code);
-	    if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
+	    if (__node_type* __n = _M_find_node(__bkt, __k, __code))
 	      {
 		__ret.node = std::move(__nh);
 		__ret.position = iterator(__n);
@@ -926,11 +1002,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       node_type
-      _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
+      _M_extract_node(size_t __bkt, __node_base* __prev_n)
       {
-	__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
+	__node_ptr __n = _S_cast(__prev_n->_M_nxt);
 	if (__prev_n == _M_buckets[__bkt])
-	  _M_remove_bucket_begin(__bkt, __n->_M_next(),
+	  _M_remove_bucket_begin(__bkt, __n->_M_next_ptr(),
 	     __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
 	else if (__n->_M_nxt)
 	  {
@@ -962,7 +1038,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	node_type __nh;
 	__hash_code __code = this->_M_hash_code(__k);
 	std::size_t __bkt = _M_bucket_index(__code);
-	if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
+	if (auto __prev_node = _M_find_before_node(__bkt, __k, __code))
 	  __nh = _M_extract_node(__bkt, __prev_node);
 	return __nh;
       }
@@ -1032,10 +1108,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_bucket_begin(size_type __bkt) const
-    -> __node_ptr
+    -> __node_type*
     {
-      __node_base_ptr __n = _M_buckets[__bkt];
-      return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
+      __node_base* __n = _M_buckets[__bkt];
+      return __n
+	? static_cast<__node_type*>(std::__to_address(__n->_M_nxt))
+	: nullptr;
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1123,7 +1201,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      && __this_alloc != __that_alloc)
 	    {
 	      // Replacement allocator cannot free existing storage.
-	      this->_M_deallocate_nodes(_M_begin());
+	      this->_M_deallocate_nodes(_S_cast(_M_before_begin._M_nxt));
 	      _M_before_begin._M_nxt = nullptr;
 	      _M_deallocate_buckets();
 	      _M_buckets = nullptr;
@@ -1175,15 +1253,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    _M_bucket_count = __ht._M_bucket_count;
 	  }
 	else
-	  __builtin_memset(_M_buckets, 0,
-			   _M_bucket_count * sizeof(__node_base_ptr));
+	  __builtin_memset(std::__to_address(_M_buckets), 0,
+			   _M_bucket_count * sizeof(__node_base*));
 
 	__try
 	  {
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t __roan(_M_before_begin._M_nxt, *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(std::forward<_Ht>(__ht), __roan);
 	    if (__former_buckets)
@@ -1199,8 +1277,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		_M_buckets = __former_buckets;
 		_M_bucket_count = __former_bucket_count;
 	      }
-	    __builtin_memset(_M_buckets, 0,
-			     _M_bucket_count * sizeof(__node_base_ptr));
+	    __builtin_memset(std::__to_address(_M_buckets), 0,
+			     _M_bucket_count * sizeof(__node_base*));
 	    __throw_exception_again;
 	  }
       }
@@ -1226,14 +1304,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	    // First deal with the special first node pointed to by
 	    // _M_before_begin.
-	    __node_ptr __ht_n = __ht._M_begin();
+	    __node_type* __ht_n = __ht._M_begin();
 	    __node_ptr __this_n
 	      = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
 	    this->_M_copy_code(*__this_n, *__ht_n);
 	    _M_update_bbegin(__this_n);
 
 	    // Then deal with other nodes.
-	    __node_ptr __prev_n = __this_n;
+	    __node_base* __prev_n = std::__to_address(__this_n);
 	    for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
 	      {
 		__this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
@@ -1242,7 +1320,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		size_type __bkt = _M_bucket_index(*__this_n);
 		if (!_M_buckets[__bkt])
 		  _M_buckets[__bkt] = __prev_n;
-		__prev_n = __this_n;
+		__prev_n = std::__to_address(__this_n);
 	      }
 	  }
 	__catch(...)
@@ -1266,7 +1344,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_rehash_policy._M_reset();
       _M_bucket_count = 1;
       _M_single_bucket = nullptr;
-      _M_buckets = &_M_single_bucket;
+      _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
       _M_before_begin._M_nxt = nullptr;
       _M_element_count = 0;
     }
@@ -1283,7 +1361,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__builtin_expect(std::__addressof(__ht) == this, false))
 	return;
 
-      this->_M_deallocate_nodes(_M_begin());
+      this->_M_deallocate_nodes(_S_cast(_M_before_begin._M_nxt));
       _M_deallocate_buckets();
       __hashtable_base::operator=(std::move(__ht));
       _M_rehash_policy = __ht._M_rehash_policy;
@@ -1291,7 +1369,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_buckets = __ht._M_buckets;
       else
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
@@ -1368,7 +1446,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Update buckets if __ht is using its single bucket.
       if (__ht._M_uses_single_bucket())
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
@@ -1419,7 +1497,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	{
 	  if (__ht._M_uses_single_bucket())
 	    {
-	      _M_buckets = &_M_single_bucket;
+	      _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	      _M_single_bucket = __ht._M_single_bucket;
 	    }
 	  else
@@ -1427,7 +1505,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	  // Fix bucket containing the _M_before_begin pointer that can't be
 	  // moved.
-	  _M_update_bbegin(__ht._M_begin());
+	  _M_update_bbegin(__ht._M_before_begin._M_nxt);
 
 	  __ht._M_reset();
 	}
@@ -1480,13 +1558,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (!__x._M_uses_single_bucket())
 	    {
 	      _M_buckets = __x._M_buckets;
-	      __x._M_buckets = &__x._M_single_bucket;
+	      __x._M_buckets =
+		__buckets_ptr_traits::pointer_to(__x._M_single_bucket);
 	    }
 	}
       else if (__x._M_uses_single_bucket())
 	{
 	  __x._M_buckets = _M_buckets;
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	}	
       else
 	std::swap(_M_buckets, __x._M_buckets);
@@ -1626,13 +1705,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
-    -> __node_base_ptr
+    -> __node_base*
     {
-      __node_base_ptr __prev_p = _M_buckets[__bkt];
+      __node_base* __prev_p = _M_buckets[__bkt];
       if (!__prev_p)
 	return nullptr;
 
-      for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
+      for (__node_type* __p =
+	     static_cast<__node_type*>(std::__to_address(__prev_p->_M_nxt));;
 	   __p = __p->_M_next())
 	{
 	  if (this->_M_equals(__k, __code, *__p))
@@ -1673,7 +1753,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (__node->_M_nxt)
 	    // We must update former begin bucket that is pointing to
 	    // _M_before_begin.
-	    _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
+	    _M_buckets[_M_bucket_index(*__node->_M_next())] =
+	      std::__to_address(__node);
 
 	  _M_buckets[__bkt] = &_M_before_begin;
 	}
@@ -1710,12 +1791,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_get_previous_node(size_type __bkt, __node_ptr __n)
-    -> __node_base_ptr
+    _M_get_previous_node(size_type __bkt, __node_type* __n)
+    -> __node_base*
     {
-      __node_base_ptr __prev_n = _M_buckets[__bkt];
-      while (__prev_n->_M_nxt != __n)
-	__prev_n = __prev_n->_M_nxt;
+      __node_base* __prev_n = _M_buckets[__bkt];
+      while (std::__to_address(__prev_n->_M_nxt) != __n)
+	__prev_n = std::__to_address(__prev_n->_M_nxt);
       return __prev_n;
     }
 
@@ -1735,7 +1816,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__code);
-	if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
+	if (__node_type* __p = _M_find_node(__bkt, __k, __code))
 	  // There is already an equivalent node, no insertion
 	  return std::make_pair(iterator(__p), false);
 
@@ -1795,7 +1876,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Always insert at the beginning of the bucket.
       _M_insert_bucket_begin(__bkt, __node);
       ++_M_element_count;
-      return iterator(__node);
+      return iterator(std::__to_address(__node));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1805,7 +1886,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_insert_multi_node(__node_ptr __hint,
+    _M_insert_multi_node(__node_type* __hint,
 			 __hash_code __code, __node_ptr __node)
     -> iterator
     {
@@ -1822,7 +1903,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find the node before an equivalent one or use hint if it exists and
       // if it is equivalent.
-      __node_base_ptr __prev
+      __node_base* __prev
 	= __builtin_expect(__hint != nullptr, false)
 	  && this->_M_equals(__k, __code, *__hint)
 	    ? __hint
@@ -1841,7 +1922,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      {
 		size_type __next_bkt = _M_bucket_index(*__node->_M_next());
 		if (__next_bkt != __bkt)
-		  _M_buckets[__next_bkt] = __node;
+		  _M_buckets[__next_bkt] = std::__to_address(__node);
 	      }
 	}
       else
@@ -1850,7 +1931,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	// equivalent elements' relative positions.
 	_M_insert_bucket_begin(__bkt, __node);
       ++_M_element_count;
-      return iterator(__node);
+      return iterator(std::__to_address(__node));
     }
 
   // Insert v if no element with its key is already present.
@@ -1870,8 +1951,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__code);
 
-	if (__node_ptr __node = _M_find_node(__bkt, __k, __code))
-	  return { iterator(__node), false };
+	if (__node_type* __n = _M_find_node(__bkt, __k, __code))
+	  return { iterator(__n), false };
 
 	_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
 	auto __pos
@@ -1916,14 +1997,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __it)
     -> iterator
     {
-      __node_ptr __n = __it._M_cur;
+      __node_type* __n = __it._M_cur;
       std::size_t __bkt = _M_bucket_index(*__n);
 
       // Look for previous node to unlink it from the erased one, this
       // is why we need buckets to contain the before begin to make
       // this search fast.
-      __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
-      return _M_erase(__bkt, __prev_n, __n);
+      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      return _M_erase(__bkt, __prev_n, _S_cast(__prev_n->_M_nxt));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1933,11 +2014,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
+    _M_erase(size_type __bkt, __node_base* __prev_n, __node_ptr __n)
     -> iterator
     {
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n->_M_next(),
+	_M_remove_bucket_begin(__bkt, __n->_M_next_ptr(),
 	  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
       else if (__n->_M_nxt)
 	{
@@ -1968,12 +2049,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__code);
 
       // Look for the node before the first matching node.
-      __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
       // We found a matching node, erase it.
-      __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
+      __node_ptr __n = _S_cast(__prev_n->_M_nxt);
       _M_erase(__bkt, __prev_n, __n);
       return 1;
     }
@@ -1992,7 +2073,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__code);
 
       // Look for the node before the first matching node.
-      __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
@@ -2002,8 +2083,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // We use one loop to find all matching nodes and another to deallocate
       // them so that the key stays valid during the first loop. It might be
       // invalidated indirectly when destroying nodes.
-      __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
-      __node_ptr __n_last = __n->_M_next();
+      __node_ptr __n = _S_cast(__prev_n->_M_nxt);
+      __node_type* __n_last = __n->_M_next();
       while (__n_last && this->_M_node_equals(*__n, *__n_last))
 	__n_last = __n_last->_M_next();
 
@@ -2013,19 +2094,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       size_type __result = 0;
       do
 	{
-	  __node_ptr __p = __n->_M_next();
+	  __node_ptr __p = __n->_M_next_ptr();
 	  this->_M_deallocate_node(__n);
 	  __n = __p;
 	  ++__result;
 	}
-      while (__n != __n_last);
+      while (std::__to_address(__n) != __n_last);
 
       _M_element_count -= __result;
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
+	_M_remove_bucket_begin(__bkt, __n, __n_last_bkt);
       else if (__n_last_bkt != __bkt)
 	_M_buckets[__n_last_bkt] = __prev_n;
-      __prev_n->_M_nxt = __n_last;
+      __prev_n->_M_nxt = __n;
       return __result;
     }
 
@@ -2039,41 +2120,42 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __first, const_iterator __last)
     -> iterator
     {
-      __node_ptr __n = __first._M_cur;
-      __node_ptr __last_n = __last._M_cur;
+      __node_type* __n = __first._M_cur;
+      __node_type* __last_n = __last._M_cur;
       if (__n == __last_n)
 	return iterator(__n);
 
       std::size_t __bkt = _M_bucket_index(*__n);
 
-      __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
+      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      __node_ptr __nptr = _S_cast(__prev_n->_M_nxt);
       bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
       std::size_t __n_bkt = __bkt;
       for (;;)
 	{
 	  do
 	    {
-	      __node_ptr __tmp = __n;
-	      __n = __n->_M_next();
+	      __node_ptr __tmp = __nptr;
+	      __nptr = __nptr->_M_next_ptr();
 	      this->_M_deallocate_node(__tmp);
 	      --_M_element_count;
-	      if (!__n)
+	      if (!__nptr)
 		break;
-	      __n_bkt = _M_bucket_index(*__n);
+	      __n_bkt = _M_bucket_index(*__nptr);
 	    }
-	  while (__n != __last_n && __n_bkt == __bkt);
+	  while (std::__to_address(__nptr) != __last_n && __n_bkt == __bkt);
 	  if (__is_bucket_begin)
-	    _M_remove_bucket_begin(__bkt, __n, __n_bkt);
-	  if (__n == __last_n)
+	    _M_remove_bucket_begin(__bkt, __nptr, __n_bkt);
+	  if (std::__to_address(__nptr) == __last_n)
 	    break;
 	  __is_bucket_begin = true;
 	  __bkt = __n_bkt;
 	}
 
-      if (__n && (__n_bkt != __bkt || __is_bucket_begin))
+      if (__nptr && (__n_bkt != __bkt || __is_bucket_begin))
 	_M_buckets[__n_bkt] = __prev_n;
-      __prev_n->_M_nxt = __n;
-      return iterator(__n);
+      __prev_n->_M_nxt = __nptr;
+      return iterator(std::__to_address(__nptr));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -2085,9 +2167,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     clear() noexcept
     {
-      this->_M_deallocate_nodes(_M_begin());
-      __builtin_memset(_M_buckets, 0,
-		       _M_bucket_count * sizeof(__node_base_ptr));
+      this->_M_deallocate_nodes(_S_cast(_M_before_begin._M_nxt));
+      __builtin_memset(std::__to_address(_M_buckets), 0,
+		       _M_bucket_count * sizeof(__node_base*));
       _M_element_count = 0;
       _M_before_begin._M_nxt = nullptr;
     }
@@ -2148,12 +2230,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
     {
       __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_ptr __p = _M_begin();
+      __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       while (__p)
 	{
-	  __node_ptr __next = __p->_M_next();
+	  __node_type* __next = __p->_M_next();
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
 	  if (!__new_buckets[__bkt])
@@ -2191,16 +2273,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
     {
       __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_ptr __p = _M_begin();
+      __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       std::size_t __prev_bkt = 0;
-      __node_ptr __prev_p = nullptr;
+      __node_type* __prev_p = nullptr;
       bool __check_bucket = false;
 
       while (__p)
 	{
-	  __node_ptr __next = __p->_M_next();
+	  __node_type* __next = __p->_M_next();
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
 
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index 28372979c87..131a16b1969 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -31,9 +31,9 @@
 #ifndef _HASHTABLE_POLICY_H
 #define _HASHTABLE_POLICY_H 1
 
-#include <tuple>		// for std::tuple, std::forward_as_tuple
-#include <bits/stl_algobase.h>	// for std::min, std::is_permutation.
-#include <ext/numeric_traits.h>	// for __gnu_cxx::__int_traits
+#include <tuple>		     // for std::tuple, std::forward_as_tuple
+#include <bits/stl_algobase.h>	     // for std::min, std::is_permutation.
+#include <ext/numeric_traits.h>	     // for __gnu_cxx::__int_traits
 
 namespace std _GLIBCXX_VISIBILITY(default)
 {
@@ -59,24 +59,29 @@ namespace __detail
 
   // Helper function: return distance(first, last) for forward
   // iterators, or 0/1 for input iterators.
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last,
 		  std::input_iterator_tag)
     { return __first != __last ? 1 : 0; }
 
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last,
 		  std::forward_iterator_tag)
     { return std::distance(__first, __last); }
 
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last)
     { return __distance_fw(__first, __last,
 			   std::__iterator_category(__first)); }
 
+  template<typename _Alloc, typename _Value>
+    using __alloc_val_ptr =
+      typename std::allocator_traits<__alloc_rebind<_Alloc,
+						    _Value>>::pointer;
+
   struct _Identity
   {
     template<typename _Tp>
@@ -94,6 +99,10 @@ namespace __detail
       { return std::get<0>(std::forward<_Tp>(__x)); }
   };
 
+#if !_GLIBCXX_INLINE_VERSION
+  struct _Hash_node_base;
+#endif
+
   template<typename _NodeAlloc>
     struct _Hashtable_alloc;
 
@@ -107,24 +116,28 @@ namespace __detail
       using __hashtable_alloc = _Hashtable_alloc<__node_alloc_type>;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_ptr = typename __hashtable_alloc::__node_ptr;
 
     public:
-      _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h)
+      _ReuseOrAllocNode(__node_ptr __nodes, __hashtable_alloc& __h)
       : _M_nodes(__nodes), _M_h(__h) { }
+#if !_GLIBCXX_INLINE_VERSION
+      _ReuseOrAllocNode(_Hash_node_base* __nodes, __hashtable_alloc& __h)
+      : _ReuseOrAllocNode(static_cast<__node_ptr>(__nodes), __h) { }
+#endif
       _ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete;
 
       ~_ReuseOrAllocNode()
       { _M_h._M_deallocate_nodes(_M_nodes); }
 
       template<typename _Arg>
-	__node_type*
+	__node_ptr
 	operator()(_Arg&& __arg) const
 	{
 	  if (_M_nodes)
 	    {
-	      __node_type* __node = _M_nodes;
-	      _M_nodes = _M_nodes->_M_next();
+	      __node_ptr __node = _M_nodes;
+	      _M_nodes = _M_nodes->_M_next_ptr();
 	      __node->_M_nxt = nullptr;
 	      auto& __a = _M_h._M_node_allocator();
 	      __node_alloc_traits::destroy(__a, __node->_M_valptr());
@@ -144,7 +157,7 @@ namespace __detail
 	}
 
     private:
-      mutable __node_type* _M_nodes;
+      mutable __node_ptr _M_nodes;
       __hashtable_alloc& _M_h;
     };
 
@@ -155,14 +168,14 @@ namespace __detail
     {
     private:
       using __hashtable_alloc = _Hashtable_alloc<_NodeAlloc>;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_ptr = typename __hashtable_alloc::__node_ptr;
 
     public:
       _AllocNode(__hashtable_alloc& __h)
       : _M_h(__h) { }
 
       template<typename _Arg>
-	__node_type*
+	__node_ptr
 	operator()(_Arg&& __arg) const
 	{ return _M_h._M_allocate_node(std::forward<_Arg>(__arg)); }
 
@@ -203,6 +216,7 @@ namespace __detail
       using __unique_keys = __bool_constant<_Unique_keys>;
     };
 
+#if !_GLIBCXX_INLINE_VERSION
   /**
    *  struct _Hash_node_base
    *
@@ -219,6 +233,26 @@ namespace __detail
 
     _Hash_node_base(_Hash_node_base* __next) noexcept : _M_nxt(__next) { }
   };
+#endif
+
+  /**
+   * struct _Hash_node_cust_ptr_base
+   *
+   * Like _Hash_node_base but used in case of custom pointer type
+   * defined in the allocator.
+   */
+  template<typename _NodePtr>
+    struct _Hash_node_cust_ptr_base
+    {
+      using __node_ptr = _NodePtr;
+
+      __node_ptr _M_nxt;
+
+      _Hash_node_cust_ptr_base() noexcept : _M_nxt() { }
+
+      _Hash_node_cust_ptr_base(__node_ptr __next) noexcept : _M_nxt(__next) { }
+    };
+
 
   /**
    *  struct _Hash_node_value_base
@@ -263,12 +297,21 @@ namespace __detail
     struct _Hash_node_code_cache<true>
     { std::size_t  _M_hash_code; };
 
+#if !_GLIBCXX_INLINE_VERSION
   template<typename _Value, bool _Cache_hash_code>
     struct _Hash_node_value
     : _Hash_node_value_base<_Value>
     , _Hash_node_code_cache<_Cache_hash_code>
     { };
+#endif
 
+  template<typename _Value, bool _Cache_hash_code>
+    struct _Hash_node_cache_value
+    : _Hash_node_code_cache<_Cache_hash_code>
+    , _Hash_node_value_base<_Value>
+    { };
+
+#if !_GLIBCXX_INLINE_VERSION
   /**
    *  Primary template struct _Hash_node.
    */
@@ -277,21 +320,63 @@ namespace __detail
     : _Hash_node_base
     , _Hash_node_value<_Value, _Cache_hash_code>
     {
+      using __node_base = _Hash_node_base;
+      using __node_ptr = _Hash_node*;
+      using __node_type = _Hash_node;
+      using __node_value_cache_type =
+	_Hash_node_value<_Value, _Cache_hash_code>;
+
       _Hash_node*
       _M_next() const noexcept
       { return static_cast<_Hash_node*>(this->_M_nxt); }
+
+      __node_ptr
+      _M_next_ptr() const noexcept
+      { return _M_next(); }
+    };
+#endif
+
+  /**
+   *  Primary template struct _Hash_cust_ptr_node.
+   */
+  template<typename _Ptr, bool _Cache_hash_code>
+    struct _Hash_cust_ptr_node
+    : _Hash_node_cust_ptr_base<__ptr_rebind<_Ptr,
+				   _Hash_cust_ptr_node<_Ptr, _Cache_hash_code>>>
+    , _Hash_node_cache_value<typename std::pointer_traits<_Ptr>::element_type,
+			     _Cache_hash_code>
+    {
+      using __node_base =
+	_Hash_node_cust_ptr_base<__ptr_rebind<_Ptr,
+				_Hash_cust_ptr_node<_Ptr, _Cache_hash_code>>>;
+      using __node_ptr = typename __node_base::__node_ptr;
+      using __node_type =
+	typename std::pointer_traits<__node_ptr>::element_type;
+      using value_type = typename __node_type::value_type;
+      using __node_value_cache_type =
+	_Hash_node_cache_value<value_type, _Cache_hash_code>;
+      typedef typename std::pointer_traits<__node_ptr>::difference_type
+							difference_type;
+
+      __node_type*
+      _M_next() const noexcept
+      { return std::__to_address(this->_M_nxt); }
+
+      __node_ptr
+      _M_next_ptr() const noexcept
+      { return this->_M_nxt; }
     };
 
   /// Base class for node iterators.
-  template<typename _Value, bool _Cache_hash_code>
-    struct _Node_iterator_base
+  template<typename _NodeType>
+    struct _Hashtable_iterator_base
     {
-      using __node_type = _Hash_node<_Value, _Cache_hash_code>;
+      using __node_type = _NodeType;
 
       __node_type* _M_cur;
 
-      _Node_iterator_base() = default;
-      _Node_iterator_base(__node_type* __p) noexcept
+      _Hashtable_iterator_base() = default;
+      _Hashtable_iterator_base(__node_type* __p) noexcept
       : _M_cur(__p) { }
 
       void
@@ -299,18 +384,33 @@ namespace __detail
       { _M_cur = _M_cur->_M_next(); }
 
       friend bool
-      operator==(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
+      operator==(const _Hashtable_iterator_base& __x, const _Hashtable_iterator_base& __y)
       noexcept
       { return __x._M_cur == __y._M_cur; }
 
 #if __cpp_impl_three_way_comparison < 201907L
       friend bool
-      operator!=(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
+      operator!=(const _Hashtable_iterator_base& __x, const _Hashtable_iterator_base& __y)
       noexcept
       { return __x._M_cur != __y._M_cur; }
 #endif
     };
 
+#if !_GLIBCXX_INLINE_VERSION
+  /// Base class for node iterators.
+  template<typename _Value, bool _Cache_hash_code>
+    struct _Node_iterator_base
+    : _Hashtable_iterator_base<_Hash_node<_Value, _Cache_hash_code>>
+    {
+      using __base_type =
+	_Hashtable_iterator_base<_Hash_node<_Value, _Cache_hash_code>>;
+      using __node_type = typename __base_type::__node_type;
+
+      _Node_iterator_base() = default;
+      _Node_iterator_base(__node_type* __p) noexcept
+      : __base_type(__p) { }
+    };
+
   /// Node iterators, used to iterate through all the hashtable.
   template<typename _Value, bool __constant_iterators, bool __cache>
     struct _Node_iterator
@@ -413,6 +513,111 @@ namespace __detail
 	return __tmp;
       }
     };
+#endif
+
+  /// Node iterators, used to iterate through all the hashtable.
+  template<typename _NodeType, bool __constant_iterators>
+    struct _Hashtable_iterator
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    private:
+      using __base_type = _Hashtable_iterator_base<_NodeType>;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __node_type::value_type		value_type;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      using pointer = typename std::conditional<__constant_iterators,
+				  const value_type*, value_type*>::type;
+
+      using reference = typename std::conditional<__constant_iterators,
+				  const value_type&, value_type&>::type;
+
+      _Hashtable_iterator() noexcept
+      : __base_type(nullptr) { }
+
+      explicit
+      _Hashtable_iterator(__node_type* __p) noexcept
+      : __base_type(__p) { }
+
+      reference
+      operator*() const noexcept
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const noexcept
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_iterator&
+      operator++() noexcept
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_iterator
+      operator++(int) noexcept
+      {
+	_Hashtable_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  /// Node const_iterators, used to iterate through all the hashtable.
+  template<typename _NodeType, bool __constant_iterators>
+    struct _Hashtable_const_iterator
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    private:
+      using __base_type = _Hashtable_iterator_base<_NodeType>;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __node_type::value_type		value_type;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      typedef const value_type*				pointer;
+      typedef const value_type&				reference;
+
+      _Hashtable_const_iterator() noexcept
+      : __base_type(nullptr) { }
+
+      explicit
+      _Hashtable_const_iterator(__node_type* __p) noexcept
+      : __base_type(__p) { }
+
+      _Hashtable_const_iterator(
+	const _Hashtable_iterator<_NodeType,
+				  __constant_iterators>& __x) noexcept
+      : __base_type(__x._M_cur) { }
+
+      reference
+      operator*() const noexcept
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const noexcept
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_const_iterator&
+      operator++() noexcept
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_const_iterator
+      operator++(int) noexcept
+      {
+	_Hashtable_const_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
 
   // Many of class template _Hashtable's template parameters are policy
   // classes.  These are defaults for the policies.
@@ -800,16 +1005,22 @@ namespace __detail
 
       using __hash_cached = typename _Traits::__hash_cached;
       using __constant_iterators = typename _Traits::__constant_iterators;
-
-      using __hashtable_alloc = _Hashtable_alloc<
-	__alloc_rebind<_Alloc, _Hash_node<_Value,
-					  __hash_cached::value>>>;
+      using __alloc_ptr = __alloc_val_ptr<_Alloc, _Value>;
+
+      using __node_type =
+#if _GLIBCXX_INLINE_VERSION
+	_Hash_cust_ptr_node<__alloc_ptr, __hash_cached::value>;
+#else
+	typename std::conditional<std::__is_pointer<__alloc_ptr>::__value,
+	    _Hash_node<_Value, __hash_cached::value>,
+	    _Hash_cust_ptr_node<__alloc_ptr, __hash_cached::value>>::type;
+#endif
+      using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
 
       using value_type = typename __hashtable_base::value_type;
       using size_type = typename __hashtable_base::size_type;
 
       using __unique_keys = typename _Traits::__unique_keys;
-      using __node_alloc_type = typename __hashtable_alloc::__node_alloc_type;
       using __node_gen_type = _AllocNode<__node_alloc_type>;
 
       __hashtable&
@@ -827,11 +1038,27 @@ namespace __detail
 			const _NodeGetter&, false_type __uks);
 
     public:
-      using iterator = _Node_iterator<_Value, __constant_iterators::value,
-				      __hash_cached::value>;
+      using iterator =
+#if _GLIBCXX_INLINE_VERSION
+	_Hashtable_iterator<__node_type, __constant_iterators::value>;
+#else
+	typename std::conditional<std::__is_pointer<__alloc_ptr>::__value,
+	  _Node_iterator<_Value, __constant_iterators::value,
+			 __hash_cached::value>,
+	  _Hashtable_iterator<__node_type,
+			      __constant_iterators::value>>::type;
+#endif
 
-      using const_iterator = _Node_const_iterator<_Value, __constant_iterators::value,
-						  __hash_cached::value>;
+      using const_iterator =
+#if _GLIBCXX_INLINE_VERSION
+	_Hashtable_const_iterator<__node_type, __constant_iterators::value>;
+#else
+	typename std::conditional<std::__is_pointer<__alloc_ptr>::__value,
+	  _Node_const_iterator<_Value, __constant_iterators::value,
+			       __hash_cached::value>,
+	  _Hashtable_const_iterator<__node_type,
+				    __constant_iterators::value>>::type;
+#endif
 
       using __ireturn_type = typename std::conditional<__unique_keys::value,
 						     std::pair<iterator, bool>,
@@ -1154,6 +1381,7 @@ namespace __detail
       _Tp _M_tp;
     };
 
+#if !_GLIBCXX_INLINE_VERSION
   /**
    *  Primary class template _Local_iterator_base.
    *
@@ -1164,6 +1392,18 @@ namespace __detail
 	   typename _Hash, typename _RangeHash, typename _Unused,
 	   bool __cache_hash_code>
     struct _Local_iterator_base;
+#endif
+
+  /**
+   *  Primary class template _Hashtable_local_iter_base.
+   *
+   *  Base class for local iterators, used to iterate within a bucket
+   *  but not between buckets.
+   */
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __cache_hash_code>
+    struct _Hashtable_local_iter_base;
 
   /**
    *  Primary class template _Hash_code_base.
@@ -1192,9 +1432,11 @@ namespace __detail
     private:
       using __ebo_hash = _Hashtable_ebo_helper<1, _Hash>;
 
+#if !_GLIBCXX_INLINE_VERSION
       // Gives the local iterator implementation access to _M_bucket_index().
       friend struct _Local_iterator_base<_Key, _Value, _ExtractKey,
 					 _Hash, _RangeHash, _Unused, false>;
+#endif
 
     public:
       typedef _Hash					hasher;
@@ -1223,6 +1465,7 @@ namespace __detail
       _M_bucket_index(__hash_code __c, std::size_t __bkt_count) const
       { return _RangeHash{}(__c, __bkt_count); }
 
+#if !_GLIBCXX_INLINE_VERSION
       std::size_t
       _M_bucket_index(const _Hash_node_value<_Value, false>& __n,
 		      std::size_t __bkt_count) const
@@ -1233,13 +1476,34 @@ namespace __detail
 	return _RangeHash{}(_M_hash_code(_ExtractKey{}(__n._M_v())),
 			    __bkt_count);
       }
+#endif
+
+      std::size_t
+      _M_bucket_index(const _Hash_node_cache_value<_Value, false>& __n,
+		      std::size_t __bkt_count) const
+	noexcept( noexcept(declval<const _Hash&>()(declval<const _Key&>()))
+		  && noexcept(declval<const _RangeHash&>()((__hash_code)0,
+							   (std::size_t)0)) )
+      {
+	return _RangeHash{}(_M_hash_code(_ExtractKey{}(__n._M_v())),
+			    __bkt_count);
+      }
 
+#if !_GLIBCXX_INLINE_VERSION
       std::size_t
       _M_bucket_index(const _Hash_node_value<_Value, true>& __n,
 		      std::size_t __bkt_count) const
 	noexcept( noexcept(declval<const _RangeHash&>()((__hash_code)0,
 							(std::size_t)0)) )
       { return _RangeHash{}(__n._M_hash_code, __bkt_count); }
+#endif
+
+      std::size_t
+      _M_bucket_index(const _Hash_node_cache_value<_Value, true>& __n,
+		      std::size_t __bkt_count) const
+	noexcept( noexcept(declval<const _RangeHash&>()((__hash_code)0,
+							(std::size_t)0)) )
+      { return _RangeHash{}(__n._M_hash_code, __bkt_count); }
 
       void
       _M_store_code(_Hash_node_code_cache<false>&, __hash_code) const
@@ -1267,6 +1531,7 @@ namespace __detail
       _M_hash() const { return __ebo_hash::_M_cget(); }
     };
 
+#if !_GLIBCXX_INLINE_VERSION
   /// Partial specialization used when nodes contain a cached hash code.
   template<typename _Key, typename _Value, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused>
@@ -1306,6 +1571,48 @@ namespace __detail
       std::size_t
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
+#endif
+
+  /// Partial specialization used when nodes contain a cached hash code.
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused>
+    struct _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused, true>
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    protected:
+      using __base_node_iter = _Hashtable_iterator_base<_NodeType>;
+      using value_type = typename _NodeType::value_type;
+      using __hash_code_base = _Hash_code_base<_Key, value_type, _ExtractKey,
+					      _Hash, _RangeHash, _Unused, true>;
+
+      _Hashtable_local_iter_base() = default;
+      _Hashtable_local_iter_base(const __hash_code_base&,
+				 _NodeType* __p,
+				 std::size_t __bkt, std::size_t __bkt_count)
+      : __base_node_iter(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      { }
+
+      void
+      _M_incr()
+      {
+	__base_node_iter::_M_incr();
+	if (this->_M_cur)
+	  {
+	    std::size_t __bkt
+	      = _RangeHash{}(this->_M_cur->_M_hash_code, _M_bucket_count);
+	    if (__bkt != _M_bucket)
+	      this->_M_cur = nullptr;
+	  }
+      }
+
+      std::size_t _M_bucket;
+      std::size_t _M_bucket_count;
+
+    public:
+      std::size_t
+      _M_get_bucket() const { return _M_bucket; }  // for debug mode
+    };
 
   // Uninitialized storage for a _Hash_code_base.
   // This type is DefaultConstructible and Assignable even if the
@@ -1338,6 +1645,7 @@ namespace __detail
       _M_h() const { return reinterpret_cast<const _Tp*>(this); }
     };
 
+#if !_GLIBCXX_INLINE_VERSION
   template<typename _Key, typename _Value, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused>
     using __hash_code_for_local_iter
@@ -1420,7 +1728,87 @@ namespace __detail
       std::size_t
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
+#endif
+
+  // Partial specialization used when hash codes are not cached
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused>
+    struct _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused, false>
+    : _Hash_code_storage<_Hash>
+    , _Hashtable_iterator_base<_NodeType>
+    {
+    protected:
+      using value_type = typename _NodeType::value_type;
+      using __hash_code_base = _Hash_code_base<_Key, value_type, _ExtractKey,
+					     _Hash, _RangeHash, _Unused, false>;
+      using __node_iter_base = _Hashtable_iterator_base<_NodeType>;
+
+      _Hashtable_local_iter_base() : _M_bucket_count(-1) { }
+
+      _Hashtable_local_iter_base(const __hash_code_base& __base,
+				 _NodeType* __p,
+				 std::size_t __bkt, std::size_t __bkt_count)
+      : __node_iter_base(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      { _M_init(__base.hash_function()); }
+
+      ~_Hashtable_local_iter_base()
+      {
+	if (_M_bucket_count != -1)
+	  _M_destroy();
+      }
+
+      _Hashtable_local_iter_base(const _Hashtable_local_iter_base& __iter)
+      : __node_iter_base(__iter._M_cur), _M_bucket(__iter._M_bucket)
+      , _M_bucket_count(__iter._M_bucket_count)
+      {
+	if (_M_bucket_count != -1)
+	  _M_init(*__iter._M_h());
+      }
+
+      _Hashtable_local_iter_base&
+      operator=(const _Hashtable_local_iter_base& __iter)
+      {
+	if (_M_bucket_count != -1)
+	  _M_destroy();
+	this->_M_cur = __iter._M_cur;
+	_M_bucket = __iter._M_bucket;
+	_M_bucket_count = __iter._M_bucket_count;
+	if (_M_bucket_count != -1)
+	  _M_init(*__iter._M_h());
+	return *this;
+      }
+
+      void
+      _M_incr()
+      {
+	__node_iter_base::_M_incr();
+	if (this->_M_cur)
+	  {
+	    std::size_t __bkt =
+	      _RangeHash{}((*this->_M_h())(_ExtractKey{}(this->_M_cur->_M_v())),
+			   _M_bucket_count);
+	    if (__bkt != _M_bucket)
+	      this->_M_cur = nullptr;
+	  }
+      }
+
+      std::size_t _M_bucket;
+      std::size_t _M_bucket_count;
+
+      void
+      _M_init(const _Hash& __h)
+      { ::new(this->_M_h()) _Hash(__h); }
+
+      void
+      _M_destroy() { this->_M_h()->~_Hash(); }
+
+    public:
+      std::size_t
+      _M_get_bucket() const { return _M_bucket; }  // for debug mode
+    };
 
+#if !_GLIBCXX_INLINE_VERSION
   /// local iterators
   template<typename _Key, typename _Value, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused,
@@ -1535,6 +1923,127 @@ namespace __detail
 	return __tmp;
       }
     };
+#endif
+
+  /// local iterators
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __cache>
+    struct _Hashtable_local_iterator
+    : public _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+					_Hash, _RangeHash, _Unused, __cache>
+    {
+    private:
+      using __base_type =
+	_Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				   _Hash, _RangeHash, _Unused, __cache>;
+      using __hash_code_base = typename __base_type::__hash_code_base;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __base_type::value_type		value_type;
+      typedef typename std::conditional<__constant_iterators,
+					const value_type*, value_type*>::type
+							pointer;
+      typedef typename std::conditional<__constant_iterators,
+					const value_type&, value_type&>::type
+							reference;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      _Hashtable_local_iterator() = default;
+
+      _Hashtable_local_iterator(const __hash_code_base& __base,
+				__node_type* __n,
+				std::size_t __bkt, std::size_t __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
+      { }
+
+      reference
+      operator*() const
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_local_iterator&
+      operator++()
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_local_iterator
+      operator++(int)
+      {
+	_Hashtable_local_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  /// local const_iterators
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __cache>
+    struct _Hashtable_const_local_iterator
+    : public _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+					_Hash, _RangeHash, _Unused, __cache>
+    {
+    private:
+      using __base_type =
+	_Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				   _Hash, _RangeHash, _Unused, __cache>;
+      using __hash_code_base = typename __base_type::__hash_code_base;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __base_type::value_type		value_type;
+      typedef const value_type*				pointer;
+      typedef const value_type&				reference;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      _Hashtable_const_local_iterator() = default;
+
+      _Hashtable_const_local_iterator(const __hash_code_base& __base,
+				      __node_type* __n,
+				    std::size_t __bkt, std::size_t __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
+      { }
+
+      _Hashtable_const_local_iterator(const _Hashtable_local_iterator<
+				      _Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused,
+				      __constant_iterators,
+				      __cache>& __x)
+      : __base_type(__x)
+      { }
+
+      reference
+      operator*() const
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_const_local_iterator&
+      operator++()
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_const_local_iterator
+      operator++(int)
+      {
+	_Hashtable_const_local_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
 
   /**
    *  Primary class template _Hashtable_base.
@@ -1597,6 +2106,7 @@ namespace __detail
       : __hash_code_base(__hash), _EqualEBO(__eq)
       { }
 
+#if !_GLIBCXX_INLINE_VERSION
       bool
       _M_equals(const _Key& __k, __hash_code __c,
 		const _Hash_node_value<_Value, __hash_cached::value>& __n) const
@@ -1606,7 +2116,19 @@ namespace __detail
 	  "key type");
 	return _S_equals(__c, __n) && _M_eq()(__k, _ExtractKey{}(__n._M_v()));
       }
+#endif
+
+      bool
+      _M_equals(const _Key& __k, __hash_code __c,
+		const _Hash_node_cache_value<_Value, __hash_cached::value>& __n) const
+      {
+	static_assert(__is_invocable<const _Equal&, const _Key&, const _Key&>{},
+	  "key equality predicate must be invocable with two arguments of "
+	  "key type");
+	return _S_equals(__c, __n) && _M_eq()(__k, _ExtractKey{}(__n._M_v()));
+      }
 
+#if !_GLIBCXX_INLINE_VERSION
       bool
       _M_node_equals(
 	const _Hash_node_value<_Value, __hash_cached::value>& __lhn,
@@ -1615,6 +2137,16 @@ namespace __detail
 	return _S_node_equals(__lhn, __rhn)
 	  && _M_eq()(_ExtractKey{}(__lhn._M_v()), _ExtractKey{}(__rhn._M_v()));
       }
+#endif
+
+      bool
+      _M_node_equals(
+	const _Hash_node_cache_value<_Value, __hash_cached::value>& __lhn,
+	const _Hash_node_cache_value<_Value, __hash_cached::value>& __rhn) const
+      {
+	return _S_node_equals(__lhn, __rhn)
+	  && _M_eq()(_ExtractKey{}(__lhn._M_v()), _ExtractKey{}(__rhn._M_v()));
+      }
 
       void
       _M_swap(_Hashtable_base& __x)
@@ -1679,8 +2211,9 @@ namespace __detail
 	  if (!__prev_n)
 	    return false;
 
-	  for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
-	       __n = __n->_M_next())
+	  __node_type* __n =
+	    static_cast<__node_type*>(std::__to_address(__prev_n->_M_nxt));
+	  for (;; __n = __n->_M_next())
 	    {
 	      if (__n->_M_v() == *__itx)
 		break;
@@ -1739,7 +2272,8 @@ namespace __detail
 	  if (!__y_prev_n)
 	    return false;
 
-	  __node_type* __y_n = static_cast<__node_type*>(__y_prev_n->_M_nxt);
+	  __node_type* __y_n =
+	    static_cast<__node_type*>(std::__to_address(__y_prev_n->_M_nxt));
 	  for (;;)
 	    {
 	      if (__this->key_eq()(_ExtractKey{}(__y_n->_M_v()),
@@ -1786,16 +2320,12 @@ namespace __detail
       // Use __gnu_cxx to benefit from _S_always_equal and al.
       using __node_alloc_traits = __gnu_cxx::__alloc_traits<__node_alloc_type>;
 
-      using __value_alloc_traits = typename __node_alloc_traits::template
-	rebind_traits<typename __node_type::value_type>;
-
-      using __node_ptr = __node_type*;
-      using __node_base = _Hash_node_base;
-      using __node_base_ptr = __node_base*;
+      using __node_ptr = typename __node_alloc_traits::pointer;
+      using __node_base = typename __node_type::__node_base;
       using __buckets_alloc_type =
-	__alloc_rebind<__node_alloc_type, __node_base_ptr>;
+	__alloc_rebind<__node_alloc_type, __node_base*>;
       using __buckets_alloc_traits = std::allocator_traits<__buckets_alloc_type>;
-      using __buckets_ptr = __node_base_ptr*;
+      using __buckets_ptr = typename __buckets_alloc_traits::pointer;
 
       _Hashtable_alloc() = default;
       _Hashtable_alloc(const _Hashtable_alloc&) = default;
@@ -1848,14 +2378,13 @@ namespace __detail
       -> __node_ptr
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
-	__node_ptr __n = std::__to_address(__nptr);
 	__try
 	  {
-	    ::new ((void*)__n) __node_type;
+	    ::new ((void*)std::__to_address(__nptr)) __node_type;
 	    __node_alloc_traits::construct(_M_node_allocator(),
-					   __n->_M_valptr(),
+					   __nptr->_M_valptr(),
 					   std::forward<_Args>(__args)...);
-	    return __n;
+	    return __nptr;
 	  }
 	__catch(...)
 	  {
@@ -1866,20 +2395,18 @@ namespace __detail
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_ptr __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_ptr __nptr)
     {
-      __node_alloc_traits::destroy(_M_node_allocator(), __n->_M_valptr());
-      _M_deallocate_node_ptr(__n);
+      __node_alloc_traits::destroy(_M_node_allocator(), __nptr->_M_valptr());
+      _M_deallocate_node_ptr(__nptr);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_ptr __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_ptr __nptr)
     {
-      typedef typename __node_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n);
-      __n->~__node_type();
-      __node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1);
+      __nptr->~__node_type();
+      __node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1);
     }
 
   template<typename _NodeAlloc>
@@ -1889,7 +2416,7 @@ namespace __detail
       while (__n)
 	{
 	  __node_ptr __tmp = __n;
-	  __n = __n->_M_next();
+	  __n = __n->_M_next_ptr();
 	  _M_deallocate_node(__tmp);
 	}
     }
@@ -1902,9 +2429,9 @@ namespace __detail
       __buckets_alloc_type __alloc(_M_node_allocator());
 
       auto __ptr = __buckets_alloc_traits::allocate(__alloc, __bkt_count);
-      __buckets_ptr __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __bkt_count * sizeof(__node_base_ptr));
-      return __p;
+      __builtin_memset(std::__to_address(__ptr), 0,
+		       __bkt_count * sizeof(__node_base*));
+      return __ptr;
     }
 
   template<typename _NodeAlloc>
@@ -1913,10 +2440,8 @@ namespace __detail
     _M_deallocate_buckets(__buckets_ptr __bkts,
 			  std::size_t __bkt_count)
     {
-      typedef typename __buckets_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
       __buckets_alloc_type __alloc(_M_node_allocator());
-      __buckets_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
+      __buckets_alloc_traits::deallocate(__alloc, __bkts, __bkt_count);
     }
 
  //@} hashtable-detail
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..e9d7ada7151
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_map<T, int, H, E,
+				  CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_map<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..4a895a6302c
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multimap<T, int, H, E,
+				       CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_multimap<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..36b5e10cc7b
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
@@ -0,0 +1,56 @@
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_set>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multiset<T, H, E, CustomPointerAlloc<T>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<T> alloc_type;
+  typedef std::unordered_multiset<T, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert(T());
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
index f6b908ac03e..479104709fb 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
@@ -15,10 +15,7 @@
 // with this library; see the file COPYING3.  If not see
 // <http://www.gnu.org/licenses/>.
 
-// This test fails to compile since C++17 (see xfail-if below) so we can only
-// do a "run" test for C++11 and C++14, and a "compile" test for C++17 and up.
-// { dg-do run { target { c++11_only || c++14_only } } }
-// { dg-do compile { target c++17 } }
+// { dg-do run { target { c++11 } } }
 
 #include <unordered_set>
 #include <memory>
@@ -26,15 +23,22 @@
 #include <testsuite_allocator.h>
 
 struct T { int i; };
-bool operator==(const T& l, const T& r) { return l.i == r.i; }
-struct H { std::size_t operator()(const T& t) const noexcept { return t.i; }
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
 };
-struct E : std::equal_to<T> { };
+
+struct E : std::equal_to<T>
+{ };
 
 using __gnu_test::CustomPointerAlloc;
 
-// { dg-xfail-if "node reinsertion assumes raw pointers" { c++17 } }
-// TODO when removing this xfail change the test back to "dg-do run".
 template class std::unordered_set<T, H, E, CustomPointerAlloc<T>>;
 
 void test01()

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-11-01 21:48       ` François Dumont
@ 2020-11-02 14:11         ` Jonathan Wakely
  2020-11-02 21:33           ` François Dumont
                             ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Jonathan Wakely @ 2020-11-02 14:11 UTC (permalink / raw)
  To: François Dumont; +Cc: libstdc++

On 01/11/20 22:48 +0100, François Dumont via Libstdc++ wrote:
>Here is an other attempt.
>
>This time I am storing the node using allocator pointer just in the 
>singly linked list of nodes. Buckets are still __node_base* so that 
>the custom pointer is not manipulated too much. Moreover iterators are 
>also using node raw pointers.

There's no point doing it if you still use raw pointers.

It either has to be done completely, or it's a waste of time and
energy.

>As advised I introduced new types in case of custom pointers. But I am 
>also using those in gnu versioned namespace so that I could more 
>easily test this new code with all existing test cases.
>
>Note that as we are at introducing a new abi I am also changing node 
>memory layout in this case. I think it is better to put the hash code 
>cache before the node value rather than after. It will be closer to 
>the begining of the node and so accessible without mem page fault.

I think that's a bad idea. if somebody is using a fancy pointer which
is just a thin wrapper around a real pointer, and it has implicit
conversions to/from the real pointer), then I think it might work OK
today. For example, __gnu_cxx::_Ext_pointer in <ext/pointer.h>. If the
size and alignment of the fancy pointer is just the same as the real
pointer, and the layout of the node classes doesn't change order, I
think that will probably Just Work.

If you reorder the node members, it definitely won't work.

Is this a realistic scenario? I don't know. It might be.

If we want to do that only for the versioned namespace, that would be
OK, but should be a separate patch.

I'm also concerned about the number of differences that depend on
_GLIBCXX_INLINE_VERSION. The code gets a lot less maintainable with so
many differences, and they only exist to support a mode nobody uses.

Wouldn't implementing https://wg21.link/P0809R0 or
https://wg21.link/P0919R3 (and https://wg21.link/P1690R1) be a better
use of time?

There are a couple more comments below, for things that I noticed
while quickly skimming over the patch.


>To be clear the node mem layout is:
>- next node pointer
>- node value_type
>- hash code (optional)
>
>The new node mem layout is:
>- next node pointer
>- hash code (optional)
>- node value_type
>
>Here is the git log in case you validate it.
>
>    libstdc++: Store allocator::pointer in hashtable implementation
>
>    Use allocator pointer type in _Hashtable implementation.
>
>            * include/bits/hashtable_policy.h
>            (_ReuseOrAllocNode<>::__node_type): Remove.
>            (_ReuseOrAllocNode<>::__node_pointer): New.
>            (_ReuseOrAllocNode(__node_pointer, __hashtable_alloc&)): 
>Adapt to use
>            latter.
>(_ReuseOrAllocNode<>::operator()(_Arg&&)): Return latter.
>            (_AllocNode<>::__node_type): Remove.
>            (_AllocNode<>::__node_pointer): New.
>(_AllocNode<>::operator()<>(_Arg&&)): Return latter.
>            (_Hash_node_cust_ptr_base<>): New.
>            (_Hash_node_cache_value<typename _Value, bool 
>_Cache_hash_code>): New.
>            (_Hash_node<>::__node_base): New.
>            (_Hash_node<>::__node_ptr): New.
>            (_Hash_node<>::__node_type): New.
>            (_Hash_node<>::__node_value_cache_type): New.
>            (_Hash_node<>::_M_next_ptr()): New.
>            (_Hash_cust_ptr_node<typename _Ptr, bool 
>_Cache_hash_code>): New.
>            (_Hashtable_iterator_base<typename _NodeType>): New.
>            (_Node_iterator_base<>): Inherits from latter.
>            (_Hashtable_iterator<typename _NodeType, bool 
>__constant_iterators>):
>            New.
>            (_Hashtable_const_iterator<typename _NodeType, bool 
>__constant_iterators>):
>            New.
>            (_Insert_base<>::__alloc_ptr): New.
>            (_Insert_base<>::__node_type): New. Define conditionally 
>to _Hash_node<>
>            or _Hash_cust_ptr_node<> depending on __alloc_ptr being a 
>raw pointer.
>            (_Insert_base<>::__node_alloc_type): New.
>            (_Insert_base<>::__hashtable_alloc): Remove.
>            (_Insert_base<>::iterator): Define conditionally to 
>_Node_iterator<>
>            or _Hashtable_iterator<> depending on __alloc_ptr being a 
>raw pointer.
>            (_Insert_base<>::const_iterator): Define conditionally to
>            _Node_const_iterator<> or _Hashtable_const_iterator<> 
>depending on
>            __alloc_ptr being a raw pointer.
>            (_Hashtable_local_iter_base<>): New.
>            (_Hash_code_base<>::_M_bucket_index(const 
>_Hash_node_cache_value<>&,
>            size_t)): New.
>            (_Hashtable_local_iter_base<>): New.
>            (_Hashtable_local_iterator<>): New.
>            (_Hashtable_const_local_iterator<>): New.
>            (_Hashtable_base<>::_M_equals(const _Key&, __hash_code,
>            const _Hash_node_cache_value<>&): New.
>            (_Hashtable_base<>::_M_node_equals(const 
>_Hash_node_cache_value<>&,
>            const _Hash_node_cache_value<>&)): New.
>            (_Hashtable_alloc<>::__value_alloc_traits): Remove.
>            (_Hashtable_alloc<>::__node_base_ptr): Remove.
>            * include/bits/hashtable.h (_Hashtable<>): Adapt.
>            * 
>testsuite/23_containers/unordered_map/allocator/ext_ptr.cc: New test.
>            * 
>testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc:
>            New test.
>            * 
>testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc:
>            New test.
>            * 
>testsuite/23_containers/unordered_set/allocator/ext_ptr.cc: Adapt.
>
>Tested under Linux x86_64 normal and version namespace modes.
>
>François
>
>
>On 20/10/20 1:04 pm, Jonathan Wakely wrote:
>>On 28/09/20 22:37 +0200, François Dumont via Libstdc++ wrote:
>>>Following recent changes on _Hashtable I rebase the patch and 
>>>completely review it.
>>>
>>>I managed to integrate the allocator custom pointer type without 
>>>touching to _Hashtable base types like _Hash_code_base or 
>>>_Hashtable_base. However I cannot see how to use the custom 
>>>pointer type without impacting the node types like _Hash_node_base 
>>>which now takes a template parameter, the custom pointer type.
>>>
>>>On an abi point of view node types are different however the data 
>>>structure is the same. The only difference is that the 
>>>_Hash_node_base _M_nxt is now a _Hash_node<> custom pointer rather 
>>>than a simple _Hash_node_base*.
>>>
>>>Even if this patch can't go in because of the abi breaking change 
>>>I am going to adapt some of the code simplifications for master. 
>>>Especially the _Hash_code_base and _Local_iterator_base 
>>>simplifications.
>>>
>>>Let me know if you can think of a way to integrate the custom 
>>>pointer without impacting abi. Unless impacting node types and 
>>>associated iterator types is fine even if I already noticed that 
>>>pretty printer tests are broken with those changes.
>>
>>The approach I used for the other containers (which was never
>>completed and committed) is something like:
>>
>>struct _Node_base
>>{
>>  _Node_base* _M_next;
>>};
>>
>>template<typename _Ptr>
>>struct _Fancy_node_base
>>{
>>  _Ptr _M_next;
>>};
>>
>>template<typename _Ptr>
>>  using node_base = conditional_t<is_pointer<_Ptr>::value,
>>                                  _Node_base,
>>_Fancy_node_base<_Ptr>>;
>>
>>This way all existing code that has allocators with non-fancy pointers
>>continues to use the same type. Code using fancy pointers (which
>>doesn't currently work properly anyway) changes to use the new types
>>that depend on the pointer type.
>>
>

>diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
>index 6c6c5edde0b..86644d447ca 100644
>--- a/libstdc++-v3/include/bits/hashtable.h
>+++ b/libstdc++-v3/include/bits/hashtable.h
>@@ -182,8 +182,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
> 				 _RehashPolicy, _Traits>,
>       private __detail::_Hashtable_alloc<
> 	__alloc_rebind<_Alloc,
>-		       __detail::_Hash_node<_Value,
>-					    _Traits::__hash_cached::value>>>
>+#if _GLIBCXX_INLINE_VERSION
>+		       __detail::_Hash_cust_ptr_node<
>+			 __detail::__alloc_val_ptr<_Alloc, _Value>,
>+			 _Traits::__hash_cached::value>>>
>+#else
>+	  typename std::conditional<
>+	    std::__is_pointer<
>+	      __detail::__alloc_val_ptr<_Alloc, _Value>>::__value,
>+	    __detail::_Hash_node<_Value, _Traits::__hash_cached::value>,
>+	    __detail::_Hash_cust_ptr_node<
>+	      __detail::__alloc_val_ptr<_Alloc, _Value>,
>+	      _Traits::__hash_cached::value>>::type>>
>+#endif

This ugliness should be hidden behind an alias template.

Use is_pointer<P>::value, not std::__is_pointer<P>::__value.
This is C++11 code, there's no need to use the C++98 traits. And you
don't need the std:: qualification.

_Hash_cust_ptr_node is not a good name if it's also used (sometimes)
for normal pointers. How about _Hash_pnode, or something like that?


>+
>+  /**
>+   *  Primary template struct _Hash_cust_ptr_node.

This comment is not useful. It shouldn't a Doxygen comment, because
this is not something we need to put in the API documentation for
end-users. I can tell it's the primary template, because I can read
C++. What is the type for? How is it different to _Hash_node? That's
what I'd like to read here.

>+   */
>+  template<typename _Ptr, bool _Cache_hash_code>
>+    struct _Hash_cust_ptr_node
>+    : _Hash_node_cust_ptr_base<__ptr_rebind<_Ptr,
>+				   _Hash_cust_ptr_node<_Ptr, _Cache_hash_code>>>
>+    , _Hash_node_cache_value<typename std::pointer_traits<_Ptr>::element_type,
>+			     _Cache_hash_code>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-11-02 14:11         ` Jonathan Wakely
@ 2020-11-02 21:33           ` François Dumont
  2021-01-11 18:10           ` François Dumont
  2021-06-10 17:22           ` François Dumont
  2 siblings, 0 replies; 10+ messages in thread
From: François Dumont @ 2020-11-02 21:33 UTC (permalink / raw)
  To: Jonathan Wakely; +Cc: libstdc++

On 02/11/20 3:11 pm, Jonathan Wakely wrote:
> On 01/11/20 22:48 +0100, François Dumont via Libstdc++ wrote:
>> Here is an other attempt.
>>
>> This time I am storing the node using allocator pointer just in the 
>> singly linked list of nodes. Buckets are still __node_base* so that 
>> the custom pointer is not manipulated too much. Moreover iterators 
>> are also using node raw pointers.
>
> There's no point doing it if you still use raw pointers.
>
> It either has to be done completely, or it's a waste of time and
> energy.

Is there a clear Standard point I am violating in doing so ? Is there a 
chance that std::__to_address is ill formed ?

In hashtable we have somehow 2 data structures, the singly link list and 
the buckets array. I thought it was rather a good idear to not store the 
custom pointers in both. It makes more clear that the node are stored in 
the singly linked list, not in the buckets.

>
>> As advised I introduced new types in case of custom pointers. But I 
>> am also using those in gnu versioned namespace so that I could more 
>> easily test this new code with all existing test cases.
>>
>> Note that as we are at introducing a new abi I am also changing node 
>> memory layout in this case. I think it is better to put the hash code 
>> cache before the node value rather than after. It will be closer to 
>> the begining of the node and so accessible without mem page fault.
>
> I think that's a bad idea. if somebody is using a fancy pointer which
> is just a thin wrapper around a real pointer, and it has implicit
> conversions to/from the real pointer), then I think it might work OK
> today. For example, __gnu_cxx::_Ext_pointer in <ext/pointer.h>. If the
> size and alignment of the fancy pointer is just the same as the real
> pointer, and the layout of the node classes doesn't change order, I
> think that will probably Just Work.

Ok, good point, I'll remove this part of the patch.

> If you reorder the node members, it definitely won't work.
>
> Is this a realistic scenario? I don't know. It might be.
>
> If we want to do that only for the versioned namespace, that would be
> OK, but should be a separate patch.
>
> I'm also concerned about the number of differences that depend on
> _GLIBCXX_INLINE_VERSION. The code gets a lot less maintainable with so
> many differences, and they only exist to support a mode nobody uses.
Still, it helps to validate the code which is normally dedicated to 
custom pointers.
>
> Wouldn't implementing https://wg21.link/P0809R0 or
> https://wg21.link/P0919R3 (and https://wg21.link/P1690R1) be a better
> use of time?

I thought support of custom pointers was.

But sure, I can have a look at those too. But I won't give up on this one !

>
> There are a couple more comments below, for things that I noticed
> while quickly skimming over the patch.
>
>
>> To be clear the node mem layout is:
>> - next node pointer
>> - node value_type
>> - hash code (optional)
>>
>> The new node mem layout is:
>> - next node pointer
>> - hash code (optional)
>> - node value_type
>>
>> Here is the git log in case you validate it.
>>
>>     libstdc++: Store allocator::pointer in hashtable implementation
>>
>>     Use allocator pointer type in _Hashtable implementation.
>>
>>             * include/bits/hashtable_policy.h
>>             (_ReuseOrAllocNode<>::__node_type): Remove.
>>            (_ReuseOrAllocNode<>::__node_pointer): New.
>>             (_ReuseOrAllocNode(__node_pointer, 
>> __hashtable_alloc&)): Adapt to use
>>             latter.
>> (_ReuseOrAllocNode<>::operator()(_Arg&&)): Return latter.
>>             (_AllocNode<>::__node_type): Remove.
>>             (_AllocNode<>::__node_pointer): New.
>> (_AllocNode<>::operator()<>(_Arg&&)): Return latter.
>>             (_Hash_node_cust_ptr_base<>): New.
>>             (_Hash_node_cache_value<typename _Value, bool 
>> _Cache_hash_code>): New.
>>             (_Hash_node<>::__node_base): New.
>>             (_Hash_node<>::__node_ptr): New.
>>             (_Hash_node<>::__node_type): New.
>>            (_Hash_node<>::__node_value_cache_type): New.
>>             (_Hash_node<>::_M_next_ptr()): New.
>>             (_Hash_cust_ptr_node<typename _Ptr, bool 
>> _Cache_hash_code>): New.
>>             (_Hashtable_iterator_base<typename 
>> _NodeType>): New.
>>             (_Node_iterator_base<>): Inherits from latter.
>>             (_Hashtable_iterator<typename _NodeType, bool 
>> __constant_iterators>):
>>             New.
>>             (_Hashtable_const_iterator<typename _NodeType, 
>> bool __constant_iterators>):
>>             New.
>>             (_Insert_base<>::__alloc_ptr): New.
>>             (_Insert_base<>::__node_type): New. Define 
>> conditionally to _Hash_node<>
>>             or _Hash_cust_ptr_node<> depending on 
>> __alloc_ptr being a raw pointer.
>>            (_Insert_base<>::__node_alloc_type): New.
>>            (_Insert_base<>::__hashtable_alloc): Remove.
>>             (_Insert_base<>::iterator): Define 
>> conditionally to _Node_iterator<>
>>             or _Hashtable_iterator<> depending on 
>> __alloc_ptr being a raw pointer.
>>             (_Insert_base<>::const_iterator): Define 
>> conditionally to
>>             _Node_const_iterator<> or 
>> _Hashtable_const_iterator<> depending on
>>             __alloc_ptr being a raw pointer.
>>             (_Hashtable_local_iter_base<>): New.
>>            (_Hash_code_base<>::_M_bucket_index(const 
>> _Hash_node_cache_value<>&,
>>             size_t)): New.
>>             (_Hashtable_local_iter_base<>): New.
>>             (_Hashtable_local_iterator<>): New.
>>            (_Hashtable_const_local_iterator<>): New.
>>             (_Hashtable_base<>::_M_equals(const _Key&, 
>> __hash_code,
>>             const _Hash_node_cache_value<>&): New.
>>            (_Hashtable_base<>::_M_node_equals(const 
>> _Hash_node_cache_value<>&,
>>             const _Hash_node_cache_value<>&)): New.
>>            (_Hashtable_alloc<>::__value_alloc_traits): 
>> Remove.
>>            (_Hashtable_alloc<>::__node_base_ptr): Remove.
>>             * include/bits/hashtable.h (_Hashtable<>): Adapt.
>>             * 
>> testsuite/23_containers/unordered_map/allocator/ext_ptr.cc: New test.
>>             * 
>> testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc:
>>             New test.
>>             * 
>> testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc:
>>             New test.
>>             * 
>> testsuite/23_containers/unordered_set/allocator/ext_ptr.cc: Adapt.
>>
>> Tested under Linux x86_64 normal and version namespace modes.
>>
>> François
>>
>>
>> On 20/10/20 1:04 pm, Jonathan Wakely wrote:
>>> On 28/09/20 22:37 +0200, François Dumont via Libstdc++ wrote:
>>>> Following recent changes on _Hashtable I rebase the patch and 
>>>> completely review it.
>>>>
>>>> I managed to integrate the allocator custom pointer type without 
>>>> touching to _Hashtable base types like _Hash_code_base or 
>>>> _Hashtable_base. However I cannot see how to use the custom pointer 
>>>> type without impacting the node types like _Hash_node_base which 
>>>> now takes a template parameter, the custom pointer type.
>>>>
>>>> On an abi point of view node types are different however the data 
>>>> structure is the same. The only difference is that the 
>>>> _Hash_node_base _M_nxt is now a _Hash_node<> custom pointer rather 
>>>> than a simple _Hash_node_base*.
>>>>
>>>> Even if this patch can't go in because of the abi breaking change I 
>>>> am going to adapt some of the code simplifications for master. 
>>>> Especially the _Hash_code_base and _Local_iterator_base 
>>>> simplifications.
>>>>
>>>> Let me know if you can think of a way to integrate the custom 
>>>> pointer without impacting abi. Unless impacting node types and 
>>>> associated iterator types is fine even if I already noticed that 
>>>> pretty printer tests are broken with those changes.
>>>
>>> The approach I used for the other containers (which was never
>>> completed and committed) is something like:
>>>
>>> struct _Node_base
>>> {
>>>   _Node_base* _M_next;
>>> };
>>>
>>> template<typename _Ptr>
>>> struct _Fancy_node_base
>>> {
>>>   _Ptr _M_next;
>>> };
>>>
>>> template<typename _Ptr>
>>>   using node_base = conditional_t<is_pointer<_Ptr>::value,
>>>                                  
>>> _Node_base,
>>> _Fancy_node_base<_Ptr>>;
>>>
>>> This way all existing code that has allocators with non-fancy pointers
>>> continues to use the same type. Code using fancy pointers (which
>>> doesn't currently work properly anyway) changes to use the new types
>>> that depend on the pointer type.
>>>
>>
>
>> diff --git a/libstdc++-v3/include/bits/hashtable.h 
>> b/libstdc++-v3/include/bits/hashtable.h
>> index 6c6c5edde0b..86644d447ca 100644
>> --- a/libstdc++-v3/include/bits/hashtable.h
>> +++ b/libstdc++-v3/include/bits/hashtable.h
>> @@ -182,8 +182,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>                  _RehashPolicy, _Traits>,
>>       private __detail::_Hashtable_alloc<
>>     __alloc_rebind<_Alloc,
>> -               __detail::_Hash_node<_Value,
>> - _Traits::__hash_cached::value>>>
>> +#if _GLIBCXX_INLINE_VERSION
>> +               __detail::_Hash_cust_ptr_node<
>> +             __detail::__alloc_val_ptr<_Alloc, _Value>,
>> +             _Traits::__hash_cached::value>>>
>> +#else
>> +      typename std::conditional<
>> +        std::__is_pointer<
>> +          __detail::__alloc_val_ptr<_Alloc, _Value>>::__value,
>> +        __detail::_Hash_node<_Value, _Traits::__hash_cached::value>,
>> +        __detail::_Hash_cust_ptr_node<
>> +          __detail::__alloc_val_ptr<_Alloc, _Value>,
>> +          _Traits::__hash_cached::value>>::type>>
>> +#endif
>
> This ugliness should be hidden behind an alias template.
>
> Use is_pointer<P>::value, not std::__is_pointer<P>::__value.
> This is C++11 code, there's no need to use the C++98 traits. And you
> don't need the std:: qualification.
>
> _Hash_cust_ptr_node is not a good name if it's also used (sometimes)
> for normal pointers. How about _Hash_pnode, or something like that?
Ok.
>
>
>> +
>> +  /**
>> +   *  Primary template struct _Hash_cust_ptr_node.
>
> This comment is not useful. It shouldn't a Doxygen comment, because
> this is not something we need to put in the API documentation for
> end-users. I can tell it's the primary template, because I can read
> C++. What is the type for? How is it different to _Hash_node? That's
> what I'd like to read here.
Yes, that's a bad copy/paster.
>
>> +   */
>> +  template<typename _Ptr, bool _Cache_hash_code>
>> +    struct _Hash_cust_ptr_node
>> +    : _Hash_node_cust_ptr_base<__ptr_rebind<_Ptr,
>> +                   _Hash_cust_ptr_node<_Ptr, _Cache_hash_code>>>
>> +    , _Hash_node_cache_value<typename 
>> std::pointer_traits<_Ptr>::element_type,
>> +                 _Cache_hash_code>
>


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-11-02 14:11         ` Jonathan Wakely
  2020-11-02 21:33           ` François Dumont
@ 2021-01-11 18:10           ` François Dumont
  2021-06-10 17:22           ` François Dumont
  2 siblings, 0 replies; 10+ messages in thread
From: François Dumont @ 2021-01-11 18:10 UTC (permalink / raw)
  To: Jonathan Wakely; +Cc: libstdc++, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 17596 bytes --]

Hi

     I had another look to this attempt to properly support alloc fancy 
pointers.

     I consider all your remarks appart from the big one below about all 
this being a waste of time :-)

     I do not see why we should use the alloc fancy pointer type in our 
_Hashtable implementation details. It is not noticeable from a user 
stand point unless he wants to track all dereferencements or '->' 
operator usages which would be quite odd.

     For now I just consider that we should store the fancy pointer 
coming from the allocator::allocate calls as-is and return it to the 
allocator when needed without the pointer_traits::pointer_to as we used 
to do. This should preserve any additional data the allocator might 
associate to the raw pointer in the allocator.

     Even if the Standard is saying we should extend the fancy pointer 
usage this patch is still a good 1st step which is unavoidable to 
complete the potential final picture. We could still provide this for 
now and see if users have complains about it.

     This patch is implementing a small refinement by using fancy 
pointer move semantic in a couple of situations. I see that node_handle 
is not doing this but I consider it as a potential node handle enhancement.

     I am completing execution of tests but unordered ones are OK for 
both normal and debug modes.

libstdc++: Store allocator::pointer in hashtable implementation

     In _Hashtable implementation store the allocator::pointer returned 
by the allocate
     call as-is and return it on the deallocate when necessary. This is 
true for both
     allocate nodes and buckets.

     Note that internnally, as an implementation detail, we are still 
using raw pointers
     in iterators and buckets.

     libstdc++-v3/ChangeLog:

             * include/bits/hashtable_policy.h
             (__alloc_val_ptr<>): New template alias.
             (_ReuseOrAllocNode<>::__node_type): Remove.
             (_ReuseOrAllocNode<>::__node_ptr): New.
(_ReuseOrAllocNode<>::operator()(_Arg&&)): Return latter.
             (_ReuseOrAllocNode(__node_ptr, __hashtable_alloc&)): Adapt 
to use latter.
             (_ReuseOrAllocNode(_Hash_node_base*, __hashtable_alloc&)): New.
             (_AllocNode<>::__node_type): Remove.
             (_AllocNode<>::__node_ptr): New.
(_AllocNode<>::operator()<>(_Arg&&)): Return latter.
             (_Hash_pnode_base<>): New.
             (_Hash_node<>::__node_base): New.
             (_Hash_node<>::__node_ptr): New.
             (_Hash_node<>::__node_type): New.
             (_Hash_node<>::__node_value_cache_type): New.
             (_Hash_node<>::_M_next_ptr()): New.
             (_Hash_pnode<typename _Ptr, bool _Cache_hash_code>): New.
             (__get_node_type<>): New, template alias to _Hash_node<> if 
allocator pointer
             type is a raw pointer, _Hash_pnode<> otherwise..
             (_Hashtable_iterator_base<typename _NodeType>): New.
             (_Node_iterator_base<>): Inherits from latter.
             (_Hashtable_iterator<typename _NodeType, bool 
__constant_iterators>):
             New.
             (_Hashtable_const_iterator<typename _NodeType, bool 
__constant_iterators>):
             New.
             (_Insert_base<>::__alloc_ptr): New.
             (_Insert_base<>::__hashtable_alloc): Remove.
             (_Insert_base<>::__node_type): New.
             (_Insert_base<>::iterator): Define conditionally to 
_Node_iterator<>
             or _Hashtable_iterator<> depending on __alloc_ptr being a 
raw pointer.
             (_Insert_base<>::const_iterator): Define conditionally to
             _Node_const_iterator<> or _Hashtable_const_iterator<> 
depending on
             __alloc_ptr being a raw pointer.
             (_Hashtable_local_iter_base<>): New.
             (_Hashtable_local_iterator<>): New.
             (_Hashtable_const_local_iterator<>): New.
             (__local_iterator<>): New template alias.
             (__const_local_iterator<>): New template alias.
             (_Hashtable_base<>::_M_equals(const _Key&, __hash_code,
             const _Hash_node_cache_value<>&): New.
             (_Hashtable_base<>::_M_node_equals(const 
_Hash_node_cache_value<>&,
             const _Hash_node_cache_value<>&)): New.
             (_Hashtable_alloc<>::__value_alloc_traits): Remove.
             (_Hashtable_alloc<>::__node_base_ptr): Remove.
             * include/bits/hashtable.h (_Hashtable<>): Adapt.
             * 
testsuite/23_containers/unordered_map/allocator/ext_ptr.cc: New test.
             * 
testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc:
             New test.
             * 
testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc:
             New test.
             * 
testsuite/23_containers/unordered_set/allocator/ext_ptr.cc: Adapt.

Ok to commit ? (even if in a few months)

François


On 02/11/20 3:11 pm, Jonathan Wakely wrote:
> On 01/11/20 22:48 +0100, François Dumont via Libstdc++ wrote:
>> Here is an other attempt.
>>
>> This time I am storing the node using allocator pointer just in the 
>> singly linked list of nodes. Buckets are still __node_base* so that 
>> the custom pointer is not manipulated too much. Moreover iterators 
>> are also using node raw pointers.
>
> There's no point doing it if you still use raw pointers.
>
> It either has to be done completely, or it's a waste of time and
> energy.
>
>> As advised I introduced new types in case of custom pointers. But I 
>> am also using those in gnu versioned namespace so that I could more 
>> easily test this new code with all existing test cases.
>>
>> Note that as we are at introducing a new abi I am also changing node 
>> memory layout in this case. I think it is better to put the hash code 
>> cache before the node value rather than after. It will be closer to 
>> the begining of the node and so accessible without mem page fault.
>
> I think that's a bad idea. if somebody is using a fancy pointer which
> is just a thin wrapper around a real pointer, and it has implicit
> conversions to/from the real pointer), then I think it might work OK
> today. For example, __gnu_cxx::_Ext_pointer in <ext/pointer.h>. If the
> size and alignment of the fancy pointer is just the same as the real
> pointer, and the layout of the node classes doesn't change order, I
> think that will probably Just Work.
>
> If you reorder the node members, it definitely won't work.
>
> Is this a realistic scenario? I don't know. It might be.
>
> If we want to do that only for the versioned namespace, that would be
> OK, but should be a separate patch.
>
> I'm also concerned about the number of differences that depend on
> _GLIBCXX_INLINE_VERSION. The code gets a lot less maintainable with so
> many differences, and they only exist to support a mode nobody uses.
>
> Wouldn't implementing https://wg21.link/P0809R0 or
> https://wg21.link/P0919R3 (and https://wg21.link/P1690R1) be a better
> use of time?
>
> There are a couple more comments below, for things that I noticed
> while quickly skimming over the patch.
>
>
>> To be clear the node mem layout is:
>> - next node pointer
>> - node value_type
>> - hash code (optional)
>>
>> The new node mem layout is:
>> - next node pointer
>> - hash code (optional)
>> - node value_type
>>
>> Here is the git log in case you validate it.
>>
>>     libstdc++: Store allocator::pointer in hashtable implementation
>>
>>     Use allocator pointer type in _Hashtable implementation.
>>
>>             * include/bits/hashtable_policy.h
>>             (_ReuseOrAllocNode<>::__node_type): Remove.
>>            (_ReuseOrAllocNode<>::__node_pointer): New.
>>             (_ReuseOrAllocNode(__node_pointer, 
>> __hashtable_alloc&)): Adapt to use
>>             latter.
>> (_ReuseOrAllocNode<>::operator()(_Arg&&)): Return latter.
>>             (_AllocNode<>::__node_type): Remove.
>>             (_AllocNode<>::__node_pointer): New.
>> (_AllocNode<>::operator()<>(_Arg&&)): Return latter.
>>             (_Hash_node_cust_ptr_base<>): New.
>>             (_Hash_node_cache_value<typename _Value, bool 
>> _Cache_hash_code>): New.
>>             (_Hash_node<>::__node_base): New.
>>             (_Hash_node<>::__node_ptr): New.
>>             (_Hash_node<>::__node_type): New.
>>            (_Hash_node<>::__node_value_cache_type): New.
>>             (_Hash_node<>::_M_next_ptr()): New.
>>             (_Hash_cust_ptr_node<typename _Ptr, bool 
>> _Cache_hash_code>): New.
>>             (_Hashtable_iterator_base<typename 
>> _NodeType>): New.
>>             (_Node_iterator_base<>): Inherits from latter.
>>             (_Hashtable_iterator<typename _NodeType, bool 
>> __constant_iterators>):
>>             New.
>>             (_Hashtable_const_iterator<typename _NodeType, 
>> bool __constant_iterators>):
>>             New.
>>             (_Insert_base<>::__alloc_ptr): New.
>>             (_Insert_base<>::__node_type): New. Define 
>> conditionally to _Hash_node<>
>>             or _Hash_cust_ptr_node<> depending on 
>> __alloc_ptr being a raw pointer.
>>            (_Insert_base<>::__node_alloc_type): New.
>>            (_Insert_base<>::__hashtable_alloc): Remove.
>>             (_Insert_base<>::iterator): Define 
>> conditionally to _Node_iterator<>
>>             or _Hashtable_iterator<> depending on 
>> __alloc_ptr being a raw pointer.
>>             (_Insert_base<>::const_iterator): Define 
>> conditionally to
>>             _Node_const_iterator<> or 
>> _Hashtable_const_iterator<> depending on
>>             __alloc_ptr being a raw pointer.
>>             (_Hashtable_local_iter_base<>): New.
>>            (_Hash_code_base<>::_M_bucket_index(const 
>> _Hash_node_cache_value<>&,
>>             size_t)): New.
>>             (_Hashtable_local_iter_base<>): New.
>>             (_Hashtable_local_iterator<>): New.
>>            (_Hashtable_const_local_iterator<>): New.
>>             (_Hashtable_base<>::_M_equals(const _Key&, 
>> __hash_code,
>>             const _Hash_node_cache_value<>&): New.
>>            (_Hashtable_base<>::_M_node_equals(const 
>> _Hash_node_cache_value<>&,
>>             const _Hash_node_cache_value<>&)): New.
>>            (_Hashtable_alloc<>::__value_alloc_traits): 
>> Remove.
>>            (_Hashtable_alloc<>::__node_base_ptr): Remove.
>>             * include/bits/hashtable.h (_Hashtable<>): Adapt.
>>             * 
>> testsuite/23_containers/unordered_map/allocator/ext_ptr.cc: New test.
>>             * 
>> testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc:
>>             New test.
>>             * 
>> testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc:
>>             New test.
>>             * 
>> testsuite/23_containers/unordered_set/allocator/ext_ptr.cc: Adapt.
>>
>> Tested under Linux x86_64 normal and version namespace modes.
>>
>> François
>>
>>
>> On 20/10/20 1:04 pm, Jonathan Wakely wrote:
>>> On 28/09/20 22:37 +0200, François Dumont via Libstdc++ wrote:
>>>> Following recent changes on _Hashtable I rebase the patch and 
>>>> completely review it.
>>>>
>>>> I managed to integrate the allocator custom pointer type without 
>>>> touching to _Hashtable base types like _Hash_code_base or 
>>>> _Hashtable_base. However I cannot see how to use the custom pointer 
>>>> type without impacting the node types like _Hash_node_base which 
>>>> now takes a template parameter, the custom pointer type.
>>>>
>>>> On an abi point of view node types are different however the data 
>>>> structure is the same. The only difference is that the 
>>>> _Hash_node_base _M_nxt is now a _Hash_node<> custom pointer rather 
>>>> than a simple _Hash_node_base*.
>>>>
>>>> Even if this patch can't go in because of the abi breaking change I 
>>>> am going to adapt some of the code simplifications for master. 
>>>> Especially the _Hash_code_base and _Local_iterator_base 
>>>> simplifications.
>>>>
>>>> Let me know if you can think of a way to integrate the custom 
>>>> pointer without impacting abi. Unless impacting node types and 
>>>> associated iterator types is fine even if I already noticed that 
>>>> pretty printer tests are broken with those changes.
>>>
>>> The approach I used for the other containers (which was never
>>> completed and committed) is something like:
>>>
>>> struct _Node_base
>>> {
>>>   _Node_base* _M_next;
>>> };
>>>
>>> template<typename _Ptr>
>>> struct _Fancy_node_base
>>> {
>>>   _Ptr _M_next;
>>> };
>>>
>>> template<typename _Ptr>
>>>   using node_base = conditional_t<is_pointer<_Ptr>::value,
>>>                                  
>>> _Node_base,
>>> _Fancy_node_base<_Ptr>>;
>>>
>>> This way all existing code that has allocators with non-fancy pointers
>>> continues to use the same type. Code using fancy pointers (which
>>> doesn't currently work properly anyway) changes to use the new types
>>> that depend on the pointer type.
>>>
>>
>
>> diff --git a/libstdc++-v3/include/bits/hashtable.h 
>> b/libstdc++-v3/include/bits/hashtable.h
>> index 6c6c5edde0b..86644d447ca 100644
>> --- a/libstdc++-v3/include/bits/hashtable.h
>> +++ b/libstdc++-v3/include/bits/hashtable.h
>> @@ -182,8 +182,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
>>                  _RehashPolicy, _Traits>,
>>       private __detail::_Hashtable_alloc<
>>     __alloc_rebind<_Alloc,
>> -               __detail::_Hash_node<_Value,
>> - _Traits::__hash_cached::value>>>
>> +#if _GLIBCXX_INLINE_VERSION
>> +               __detail::_Hash_cust_ptr_node<
>> +             __detail::__alloc_val_ptr<_Alloc, _Value>,
>> +             _Traits::__hash_cached::value>>>
>> +#else
>> +      typename std::conditional<
>> +        std::__is_pointer<
>> +          __detail::__alloc_val_ptr<_Alloc, _Value>>::__value,
>> +        __detail::_Hash_node<_Value, _Traits::__hash_cached::value>,
>> +        __detail::_Hash_cust_ptr_node<
>> +          __detail::__alloc_val_ptr<_Alloc, _Value>,
>> +          _Traits::__hash_cached::value>>::type>>
>> +#endif
>
> This ugliness should be hidden behind an alias template.
>
> Use is_pointer<P>::value, not std::__is_pointer<P>::__value.
> This is C++11 code, there's no need to use the C++98 traits. And you
> don't need the std:: qualification.
>
> _Hash_cust_ptr_node is not a good name if it's also used (sometimes)
> for normal pointers. How about _Hash_pnode, or something like that?
>
>
>> +
>> +  /**
>> +   *  Primary template struct _Hash_cust_ptr_node.
>
> This comment is not useful. It shouldn't a Doxygen comment, because
> this is not something we need to put in the API documentation for
> end-users. I can tell it's the primary template, because I can read
> C++. What is the type for? How is it different to _Hash_node? That's
> what I'd like to read here.
>
>> +   */
>> +  template<typename _Ptr, bool _Cache_hash_code>
>> +    struct _Hash_cust_ptr_node
>> +    : _Hash_node_cust_ptr_base<__ptr_rebind<_Ptr,
>> +                   _Hash_cust_ptr_node<_Ptr, _Cache_hash_code>>>
>> +    , _Hash_node_cache_value<typename 
>> std::pointer_traits<_Ptr>::element_type,
>> +                 _Cache_hash_code>
>


[-- Attachment #2: hashtable.patch --]
[-- Type: text/x-patch, Size: 62376 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index bc7ec926155..7b9e2f3111a 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -182,8 +182,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 				 _RehashPolicy, _Traits>,
       private __detail::_Hashtable_alloc<
 	__alloc_rebind<_Alloc,
-		       __detail::_Hash_node<_Value,
-					    _Traits::__hash_cached::value>>>
+		       __detail::__get_node_type<_Alloc, _Value,
+						_Traits::__hash_cached::value>>>
     {
       static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
 	  "unordered container must have a non-const, non-volatile value_type");
@@ -195,21 +195,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       using __traits_type = _Traits;
       using __hash_cached = typename __traits_type::__hash_cached;
       using __constant_iterators = typename __traits_type::__constant_iterators;
-      using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
+      using __alloc_ptr = __detail::__alloc_val_ptr<_Alloc, _Value>;
+      using __node_type = __detail::__get_node_type<
+	_Alloc, _Value, _Traits::__hash_cached::value>;
+      using __node_base = typename __node_type::__node_base;
+      using __node_value_type = typename __node_type::__node_value_cache_type;
       using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
-
       using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
 
-      using __node_value_type =
-	__detail::_Hash_node_value<_Value, __hash_cached::value>;
       using __node_ptr = typename __hashtable_alloc::__node_ptr;
-      using __value_alloc_traits =
-	typename __hashtable_alloc::__value_alloc_traits;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_base = typename __hashtable_alloc::__node_base;
-      using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
+      using __value_alloc_traits =
+	typename __node_alloc_traits::template rebind_traits<_Value>;
       using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
+      using __buckets_ptr_traits = std::pointer_traits<__buckets_ptr>;
 
       using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
 					      _Equal, _Hash,
@@ -233,15 +233,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       using const_iterator = typename __insert_base::const_iterator;
 
-      using local_iterator = __detail::_Local_iterator<key_type, _Value,
-			_ExtractKey, _Hash, _RangeHash, _Unused,
-					     __constant_iterators::value,
-					     __hash_cached::value>;
+      using local_iterator = __detail::__local_iterator<
+	__node_ptr, key_type, value_type,
+	_ExtractKey, _Hash, _RangeHash, _Unused,
+	__constant_iterators::value, __hash_cached::value>;
 
-      using const_local_iterator = __detail::_Local_const_iterator<
-			key_type, _Value,
-			_ExtractKey, _Hash, _RangeHash, _Unused,
-			__constant_iterators::value, __hash_cached::value>;
+      using const_local_iterator = __detail::__const_local_iterator<
+	__node_ptr, key_type, value_type,
+	_ExtractKey, _Hash, _RangeHash, _Unused,
+	__constant_iterators::value, __hash_cached::value>;
 
     private:
       using __rehash_type = _RehashPolicy;
@@ -279,8 +279,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       struct _Scoped_node
       {
 	// Take ownership of a node with a constructed element.
-	_Scoped_node(__node_ptr __n, __hashtable_alloc* __h)
-	: _M_h(__h), _M_node(__n) { }
+	_Scoped_node(__node_ptr&& __n, __hashtable_alloc* __h)
+	: _M_h(__h), _M_node(std::move(__n)) { }
 
 	// Allocate a node and construct an element within it.
 	template<typename... _Args>
@@ -374,7 +374,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 #endif
 
     private:
-      __buckets_ptr		_M_buckets		= &_M_single_bucket;
+      __buckets_ptr		_M_buckets =
+			__buckets_ptr_traits::pointer_to(_M_single_bucket);
       size_type			_M_bucket_count		= 1;
       __node_base		_M_before_begin;
       size_type			_M_element_count	= 0;
@@ -386,13 +387,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // qualified.
       // Note that we can't leave hashtable with 0 bucket without adding
       // numerous checks in the code to avoid 0 modulus.
-      __node_base_ptr		_M_single_bucket	= nullptr;
+      __node_base*		_M_single_bucket	= nullptr;
 
       void
       _M_update_bbegin()
       {
-	if (_M_begin())
-	  _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
+	if (auto __begin = _M_begin())
+	  _M_buckets[_M_bucket_index(*__begin)] = &_M_before_begin;
       }
 
       void
@@ -402,9 +403,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_update_bbegin();
       }
 
+      void
+      _M_update_bbegin(__detail::_Hash_node_base* __n)
+      { _M_update_bbegin(static_cast<__node_ptr>(__n));  }
+
       bool
       _M_uses_single_bucket(__buckets_ptr __bkts) const
-      { return __builtin_expect(__bkts == &_M_single_bucket, false); }
+      {
+	return __builtin_expect(
+	  std::__to_address(__bkts) == &_M_single_bucket, false);
+      }
 
       bool
       _M_uses_single_bucket() const
@@ -419,7 +427,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
-	    return &_M_single_bucket;
+	    return __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  }
 
 	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
@@ -440,12 +448,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Gets bucket begin, deals with the fact that non-empty buckets contain
       // their before begin node.
-      __node_ptr
+      __node_type*
       _M_bucket_begin(size_type __bkt) const;
 
-      __node_ptr
+      __node_type*
       _M_begin() const
-      { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
+      {
+	return
+	  static_cast<__node_type*>(std::__to_address(_M_before_begin._M_nxt));
+      }
 
       // Assign *this using another _Hashtable instance. Whether elements
       // are copied or moved depends on the _Ht reference.
@@ -492,6 +503,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		   const _Hash&, const _Equal&, const allocator_type&,
 		   false_type __uks);
 
+      static __node_ptr
+      _S_cast(__node_ptr __n)
+      { return __n; }
+
+      static __node_ptr
+	_S_cast(__detail::_Hash_node_base* __n)
+      { return static_cast<__node_ptr>(__n); }
+
     public:
       // Constructor, destructor, assignment, swap
       _Hashtable() = default;
@@ -568,7 +587,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(std::move(_M_before_begin._M_nxt),
+					   *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 
@@ -736,16 +756,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find and insert helper functions and types
       // Find the node before the one matching the criteria.
-      __node_base_ptr
+      __node_base*
       _M_find_before_node(size_type, const key_type&, __hash_code) const;
 
-      __node_ptr
+      __node_type*
       _M_find_node(size_type __bkt, const key_type& __key,
 		   __hash_code __c) const
       {
-	__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
+	__node_base* __before_n = _M_find_before_node(__bkt, __key, __c);
 	if (__before_n)
-	  return static_cast<__node_ptr>(__before_n->_M_nxt);
+	  return static_cast<__node_type*>(
+				std::__to_address(__before_n->_M_nxt));
 	return nullptr;
       }
 
@@ -759,8 +780,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 			     size_type __next_bkt);
 
       // Get the node before __n in the bucket __bkt
-      __node_base_ptr
-      _M_get_previous_node(size_type __bkt, __node_ptr __n);
+      __node_base*
+      _M_get_previous_node(size_type __bkt, __node_type* __n);
 
       // Insert node __n with hash code __code, in bucket __bkt if no
       // rehash (assumes no element with same key already present).
@@ -772,7 +793,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Insert node __n with key __k and hash code __code.
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
-      _M_insert_multi_node(__node_ptr __hint,
+      _M_insert_multi_node(__node_type* __hint,
 			   __hash_code __code, __node_ptr __n);
 
       template<typename... _Args>
@@ -830,7 +851,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_erase(false_type __uks, const key_type&);
 
       iterator
-      _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
+      _M_erase(size_type __bkt, __node_base* __prev_n, __node_ptr __n);
 
     public:
       // Emplace
@@ -890,7 +911,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    const key_type& __k = __nh._M_key();
 	    __hash_code __code = this->_M_hash_code(__k);
 	    size_type __bkt = _M_bucket_index(__code);
-	    if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
+	    if (__node_type* __n = _M_find_node(__bkt, __k, __code))
 	      {
 		__ret.node = std::move(__nh);
 		__ret.position = iterator(__n);
@@ -926,11 +947,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       node_type
-      _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
+      _M_extract_node(size_t __bkt, __node_base* __prev_n)
       {
-	__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
+	__node_ptr __n = _S_cast(__prev_n->_M_nxt);
 	if (__prev_n == _M_buckets[__bkt])
-	  _M_remove_bucket_begin(__bkt, __n->_M_next(),
+	  _M_remove_bucket_begin(__bkt, __n->_M_next_ptr(),
 	     __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
 	else if (__n->_M_nxt)
 	  {
@@ -962,7 +983,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	node_type __nh;
 	__hash_code __code = this->_M_hash_code(__k);
 	std::size_t __bkt = _M_bucket_index(__code);
-	if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
+	if (auto __prev_node = _M_find_before_node(__bkt, __k, __code))
 	  __nh = _M_extract_node(__bkt, __prev_node);
 	return __nh;
       }
@@ -1032,10 +1053,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_bucket_begin(size_type __bkt) const
-    -> __node_ptr
+    -> __node_type*
     {
-      __node_base_ptr __n = _M_buckets[__bkt];
-      return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
+      __node_base* __n = _M_buckets[__bkt];
+      return __n
+	? static_cast<__node_type*>(std::__to_address(__n->_M_nxt))
+	: nullptr;
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1123,7 +1146,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      && __this_alloc != __that_alloc)
 	    {
 	      // Replacement allocator cannot free existing storage.
-	      this->_M_deallocate_nodes(_M_begin());
+	      this->_M_deallocate_nodes(_S_cast(_M_before_begin._M_nxt));
 	      _M_before_begin._M_nxt = nullptr;
 	      _M_deallocate_buckets();
 	      _M_buckets = nullptr;
@@ -1175,15 +1198,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    _M_bucket_count = __ht._M_bucket_count;
 	  }
 	else
-	  __builtin_memset(_M_buckets, 0,
-			   _M_bucket_count * sizeof(__node_base_ptr));
+	  __builtin_memset(std::__to_address(_M_buckets), 0,
+			   _M_bucket_count * sizeof(__node_base*));
 
 	__try
 	  {
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t
+	      __roan(std::move(_M_before_begin._M_nxt), *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(std::forward<_Ht>(__ht), __roan);
 	    if (__former_buckets)
@@ -1199,8 +1223,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		_M_buckets = __former_buckets;
 		_M_bucket_count = __former_bucket_count;
 	      }
-	    __builtin_memset(_M_buckets, 0,
-			     _M_bucket_count * sizeof(__node_base_ptr));
+	    __builtin_memset(std::__to_address(_M_buckets), 0,
+			     _M_bucket_count * sizeof(__node_base*));
 	    __throw_exception_again;
 	  }
       }
@@ -1226,14 +1250,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	    // First deal with the special first node pointed to by
 	    // _M_before_begin.
-	    __node_ptr __ht_n = __ht._M_begin();
+	    __node_type* __ht_n = __ht._M_begin();
 	    __node_ptr __this_n
 	      = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
 	    this->_M_copy_code(*__this_n, *__ht_n);
 	    _M_update_bbegin(__this_n);
 
 	    // Then deal with other nodes.
-	    __node_ptr __prev_n = __this_n;
+	    __node_base* __prev_n = std::__to_address(__this_n);
 	    for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
 	      {
 		__this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
@@ -1242,7 +1266,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		size_type __bkt = _M_bucket_index(*__this_n);
 		if (!_M_buckets[__bkt])
 		  _M_buckets[__bkt] = __prev_n;
-		__prev_n = __this_n;
+		__prev_n = std::__to_address(__this_n);
 	      }
 	  }
 	__catch(...)
@@ -1266,7 +1290,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_rehash_policy._M_reset();
       _M_bucket_count = 1;
       _M_single_bucket = nullptr;
-      _M_buckets = &_M_single_bucket;
+      _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
       _M_before_begin._M_nxt = nullptr;
       _M_element_count = 0;
     }
@@ -1283,7 +1307,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__builtin_expect(std::__addressof(__ht) == this, false))
 	return;
 
-      this->_M_deallocate_nodes(_M_begin());
+      this->_M_deallocate_nodes(_S_cast(_M_before_begin._M_nxt));
       _M_deallocate_buckets();
       __hashtable_base::operator=(std::move(__ht));
       _M_rehash_policy = __ht._M_rehash_policy;
@@ -1291,7 +1315,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_buckets = __ht._M_buckets;
       else
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
@@ -1368,7 +1392,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Update buckets if __ht is using its single bucket.
       if (__ht._M_uses_single_bucket())
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
@@ -1419,7 +1443,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	{
 	  if (__ht._M_uses_single_bucket())
 	    {
-	      _M_buckets = &_M_single_bucket;
+	      _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	      _M_single_bucket = __ht._M_single_bucket;
 	    }
 	  else
@@ -1427,7 +1451,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	  // Fix bucket containing the _M_before_begin pointer that can't be
 	  // moved.
-	  _M_update_bbegin(__ht._M_begin());
+	  _M_update_bbegin(__ht._M_before_begin._M_nxt);
 
 	  __ht._M_reset();
 	}
@@ -1480,13 +1504,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (!__x._M_uses_single_bucket())
 	    {
 	      _M_buckets = __x._M_buckets;
-	      __x._M_buckets = &__x._M_single_bucket;
+	      __x._M_buckets =
+		__buckets_ptr_traits::pointer_to(__x._M_single_bucket);
 	    }
 	}
       else if (__x._M_uses_single_bucket())
 	{
 	  __x._M_buckets = _M_buckets;
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	}	
       else
 	std::swap(_M_buckets, __x._M_buckets);
@@ -1626,13 +1651,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
-    -> __node_base_ptr
+    -> __node_base*
     {
-      __node_base_ptr __prev_p = _M_buckets[__bkt];
+      __node_base* __prev_p = _M_buckets[__bkt];
       if (!__prev_p)
 	return nullptr;
 
-      for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
+      for (__node_type* __p =
+	     static_cast<__node_type*>(std::__to_address(__prev_p->_M_nxt));;
 	   __p = __p->_M_next())
 	{
 	  if (this->_M_equals(__k, __code, *__p))
@@ -1673,7 +1699,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (__node->_M_nxt)
 	    // We must update former begin bucket that is pointing to
 	    // _M_before_begin.
-	    _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
+	    _M_buckets[_M_bucket_index(*__node->_M_next())] =
+	      std::__to_address(__node);
 
 	  _M_buckets[__bkt] = &_M_before_begin;
 	}
@@ -1710,12 +1737,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_get_previous_node(size_type __bkt, __node_ptr __n)
-    -> __node_base_ptr
+    _M_get_previous_node(size_type __bkt, __node_type* __n)
+    -> __node_base*
     {
-      __node_base_ptr __prev_n = _M_buckets[__bkt];
-      while (__prev_n->_M_nxt != __n)
-	__prev_n = __prev_n->_M_nxt;
+      __node_base* __prev_n = _M_buckets[__bkt];
+      while (std::__to_address(__prev_n->_M_nxt) != __n)
+	__prev_n = std::__to_address(__prev_n->_M_nxt);
       return __prev_n;
     }
 
@@ -1735,7 +1762,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__code);
-	if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
+	if (__node_type* __p = _M_find_node(__bkt, __k, __code))
 	  // There is already an equivalent node, no insertion
 	  return std::make_pair(iterator(__p), false);
 
@@ -1795,7 +1822,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Always insert at the beginning of the bucket.
       _M_insert_bucket_begin(__bkt, __node);
       ++_M_element_count;
-      return iterator(__node);
+      return iterator(std::__to_address(__node));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1805,7 +1832,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_insert_multi_node(__node_ptr __hint,
+    _M_insert_multi_node(__node_type* __hint,
 			 __hash_code __code, __node_ptr __node)
     -> iterator
     {
@@ -1822,7 +1849,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find the node before an equivalent one or use hint if it exists and
       // if it is equivalent.
-      __node_base_ptr __prev
+      __node_base* __prev
 	= __builtin_expect(__hint != nullptr, false)
 	  && this->_M_equals(__k, __code, *__hint)
 	    ? __hint
@@ -1841,7 +1868,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      {
 		size_type __next_bkt = _M_bucket_index(*__node->_M_next());
 		if (__next_bkt != __bkt)
-		  _M_buckets[__next_bkt] = __node;
+		  _M_buckets[__next_bkt] = std::__to_address(__node);
 	      }
 	}
       else
@@ -1850,7 +1877,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	// equivalent elements' relative positions.
 	_M_insert_bucket_begin(__bkt, __node);
       ++_M_element_count;
-      return iterator(__node);
+      return iterator(std::__to_address(__node));
     }
 
   // Insert v if no element with its key is already present.
@@ -1870,8 +1897,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__code);
 
-	if (__node_ptr __node = _M_find_node(__bkt, __k, __code))
-	  return { iterator(__node), false };
+	if (__node_type* __n = _M_find_node(__bkt, __k, __code))
+	  return { iterator(__n), false };
 
 	_Scoped_node __node{ __node_gen(std::forward<_Arg>(__v)), this };
 	auto __pos
@@ -1916,14 +1943,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __it)
     -> iterator
     {
-      __node_ptr __n = __it._M_cur;
+      __node_type* __n = __it._M_cur;
       std::size_t __bkt = _M_bucket_index(*__n);
 
       // Look for previous node to unlink it from the erased one, this
       // is why we need buckets to contain the before begin to make
       // this search fast.
-      __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
-      return _M_erase(__bkt, __prev_n, __n);
+      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      return _M_erase(__bkt, __prev_n, _S_cast(__prev_n->_M_nxt));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1933,11 +1960,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
+    _M_erase(size_type __bkt, __node_base* __prev_n, __node_ptr __n)
     -> iterator
     {
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n->_M_next(),
+	_M_remove_bucket_begin(__bkt, __n->_M_next_ptr(),
 	  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
       else if (__n->_M_nxt)
 	{
@@ -1968,12 +1995,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__code);
 
       // Look for the node before the first matching node.
-      __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
       // We found a matching node, erase it.
-      __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
+      __node_ptr __n = _S_cast(__prev_n->_M_nxt);
       _M_erase(__bkt, __prev_n, __n);
       return 1;
     }
@@ -1992,7 +2019,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__code);
 
       // Look for the node before the first matching node.
-      __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
@@ -2002,8 +2029,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // We use one loop to find all matching nodes and another to deallocate
       // them so that the key stays valid during the first loop. It might be
       // invalidated indirectly when destroying nodes.
-      __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
-      __node_ptr __n_last = __n->_M_next();
+      __node_ptr __n = _S_cast(__prev_n->_M_nxt);
+      __node_type* __n_last = __n->_M_next();
       while (__n_last && this->_M_node_equals(*__n, *__n_last))
 	__n_last = __n_last->_M_next();
 
@@ -2013,19 +2040,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       size_type __result = 0;
       do
 	{
-	  __node_ptr __p = __n->_M_next();
+	  __node_ptr __p = __n->_M_next_ptr();
 	  this->_M_deallocate_node(__n);
 	  __n = __p;
 	  ++__result;
 	}
-      while (__n != __n_last);
+      while (std::__to_address(__n) != __n_last);
 
       _M_element_count -= __result;
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
+	_M_remove_bucket_begin(__bkt, __n, __n_last_bkt);
       else if (__n_last_bkt != __bkt)
 	_M_buckets[__n_last_bkt] = __prev_n;
-      __prev_n->_M_nxt = __n_last;
+      __prev_n->_M_nxt = __n;
       return __result;
     }
 
@@ -2039,41 +2066,42 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __first, const_iterator __last)
     -> iterator
     {
-      __node_ptr __n = __first._M_cur;
-      __node_ptr __last_n = __last._M_cur;
+      __node_type* __n = __first._M_cur;
+      __node_type* __last_n = __last._M_cur;
       if (__n == __last_n)
 	return iterator(__n);
 
       std::size_t __bkt = _M_bucket_index(*__n);
 
-      __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
+      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      __node_ptr __nptr = _S_cast(__prev_n->_M_nxt);
       bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
       std::size_t __n_bkt = __bkt;
       for (;;)
 	{
 	  do
 	    {
-	      __node_ptr __tmp = __n;
-	      __n = __n->_M_next();
+	      __node_ptr __tmp = __nptr;
+	      __nptr = __nptr->_M_next_ptr();
 	      this->_M_deallocate_node(__tmp);
 	      --_M_element_count;
-	      if (!__n)
+	      if (!__nptr)
 		break;
-	      __n_bkt = _M_bucket_index(*__n);
+	      __n_bkt = _M_bucket_index(*__nptr);
 	    }
-	  while (__n != __last_n && __n_bkt == __bkt);
+	  while (std::__to_address(__nptr) != __last_n && __n_bkt == __bkt);
 	  if (__is_bucket_begin)
-	    _M_remove_bucket_begin(__bkt, __n, __n_bkt);
-	  if (__n == __last_n)
+	    _M_remove_bucket_begin(__bkt, __nptr, __n_bkt);
+	  if (std::__to_address(__nptr) == __last_n)
 	    break;
 	  __is_bucket_begin = true;
 	  __bkt = __n_bkt;
 	}
 
-      if (__n && (__n_bkt != __bkt || __is_bucket_begin))
+      if (__nptr && (__n_bkt != __bkt || __is_bucket_begin))
 	_M_buckets[__n_bkt] = __prev_n;
-      __prev_n->_M_nxt = __n;
-      return iterator(__n);
+      __prev_n->_M_nxt = __nptr;
+      return iterator(std::__to_address(__nptr));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -2085,9 +2113,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     clear() noexcept
     {
-      this->_M_deallocate_nodes(_M_begin());
-      __builtin_memset(_M_buckets, 0,
-		       _M_bucket_count * sizeof(__node_base_ptr));
+      this->_M_deallocate_nodes(_S_cast(_M_before_begin._M_nxt));
+      __builtin_memset(std::__to_address(_M_buckets), 0,
+		       _M_bucket_count * sizeof(__node_base*));
       _M_element_count = 0;
       _M_before_begin._M_nxt = nullptr;
     }
@@ -2148,12 +2176,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
     {
       __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_ptr __p = _M_begin();
+      __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       while (__p)
 	{
-	  __node_ptr __next = __p->_M_next();
+	  __node_type* __next = __p->_M_next();
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
 	  if (!__new_buckets[__bkt])
@@ -2191,16 +2219,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
     {
       __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_ptr __p = _M_begin();
+      __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       std::size_t __prev_bkt = 0;
-      __node_ptr __prev_p = nullptr;
+      __node_type* __prev_p = nullptr;
       bool __check_bucket = false;
 
       while (__p)
 	{
-	  __node_ptr __next = __p->_M_next();
+	  __node_type* __next = __p->_M_next();
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
 
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index 84961849fb4..ed85d4bb440 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -59,24 +59,29 @@ namespace __detail
 
   // Helper function: return distance(first, last) for forward
   // iterators, or 0/1 for input iterators.
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last,
 		  std::input_iterator_tag)
     { return __first != __last ? 1 : 0; }
 
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last,
 		  std::forward_iterator_tag)
     { return std::distance(__first, __last); }
 
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last)
     { return __distance_fw(__first, __last,
 			   std::__iterator_category(__first)); }
 
+  template<typename _Alloc, typename _Value>
+    using __alloc_val_ptr =
+      typename std::allocator_traits<__alloc_rebind<_Alloc,
+						    _Value>>::pointer;
+
   struct _Identity
   {
     template<typename _Tp>
@@ -94,6 +99,8 @@ namespace __detail
       { return std::get<0>(std::forward<_Tp>(__x)); }
   };
 
+  struct _Hash_node_base;
+
   template<typename _NodeAlloc>
     struct _Hashtable_alloc;
 
@@ -107,24 +114,26 @@ namespace __detail
       using __hashtable_alloc = _Hashtable_alloc<__node_alloc_type>;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_ptr = typename __hashtable_alloc::__node_ptr;
 
     public:
-      _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h)
-      : _M_nodes(__nodes), _M_h(__h) { }
+      _ReuseOrAllocNode(__node_ptr&& __nodes, __hashtable_alloc& __h)
+      : _M_nodes(std::move(__nodes)), _M_h(__h) { }
+      _ReuseOrAllocNode(_Hash_node_base* __nodes, __hashtable_alloc& __h)
+      : _M_nodes(static_cast<__node_ptr>(__nodes)), _M_h(__h) { }
       _ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete;
 
       ~_ReuseOrAllocNode()
       { _M_h._M_deallocate_nodes(_M_nodes); }
 
       template<typename _Arg>
-	__node_type*
+	__node_ptr
 	operator()(_Arg&& __arg) const
 	{
 	  if (_M_nodes)
 	    {
-	      __node_type* __node = _M_nodes;
-	      _M_nodes = _M_nodes->_M_next();
+	      __node_ptr __node = _M_nodes;
+	      _M_nodes = _M_nodes->_M_next_ptr();
 	      __node->_M_nxt = nullptr;
 	      auto& __a = _M_h._M_node_allocator();
 	      __node_alloc_traits::destroy(__a, __node->_M_valptr());
@@ -144,7 +153,7 @@ namespace __detail
 	}
 
     private:
-      mutable __node_type* _M_nodes;
+      mutable __node_ptr _M_nodes;
       __hashtable_alloc& _M_h;
     };
 
@@ -155,14 +164,14 @@ namespace __detail
     {
     private:
       using __hashtable_alloc = _Hashtable_alloc<_NodeAlloc>;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_ptr = typename __hashtable_alloc::__node_ptr;
 
     public:
       _AllocNode(__hashtable_alloc& __h)
       : _M_h(__h) { }
 
       template<typename _Arg>
-	__node_type*
+	__node_ptr
 	operator()(_Arg&& __arg) const
 	{ return _M_h._M_allocate_node(std::forward<_Arg>(__arg)); }
 
@@ -220,6 +229,25 @@ namespace __detail
     _Hash_node_base(_Hash_node_base* __next) noexcept : _M_nxt(__next) { }
   };
 
+  /**
+   * struct _Hash_pnode_base
+   *
+   * Like _Hash_node_base but used in case of custom pointer type defined by the
+   * allocator.
+   */
+  template<typename _NodePtr>
+    struct _Hash_pnode_base
+    {
+      using __node_ptr = _NodePtr;
+
+      __node_ptr _M_nxt;
+
+      _Hash_pnode_base() noexcept : _M_nxt() { }
+
+      _Hash_pnode_base(__node_ptr __next) noexcept : _M_nxt(__next) { }
+    };
+
+
   /**
    *  struct _Hash_node_value_base
    *
@@ -251,18 +279,23 @@ namespace __detail
 
   /**
    *  Primary template struct _Hash_node_code_cache.
+   *
+   *  No cache.
    */
   template<bool _Cache_hash_code>
     struct _Hash_node_code_cache
     { };
 
   /**
-   *  Specialization for node with cache, struct _Hash_node_code_cache.
+   *  Specialization for node with cache.
    */
   template<>
     struct _Hash_node_code_cache<true>
     { std::size_t  _M_hash_code; };
 
+  /**
+   * Node with value and optionally a cache for the hash code.
+   */
   template<typename _Value, bool _Cache_hash_code>
     struct _Hash_node_value
     : _Hash_node_value_base<_Value>
@@ -270,28 +303,79 @@ namespace __detail
     { };
 
   /**
-   *  Primary template struct _Hash_node.
+   *  struct _Hash_node.
+   *
+   *  The node definition when the allocator is using raw pointers.
    */
   template<typename _Value, bool _Cache_hash_code>
     struct _Hash_node
     : _Hash_node_base
     , _Hash_node_value<_Value, _Cache_hash_code>
     {
+      using __node_base = _Hash_node_base;
+      using __node_ptr = _Hash_node*;
+      using __node_type = _Hash_node;
+      using __node_value_cache_type =
+	_Hash_node_value<_Value, _Cache_hash_code>;
+
       _Hash_node*
       _M_next() const noexcept
       { return static_cast<_Hash_node*>(this->_M_nxt); }
+
+      __node_ptr
+      _M_next_ptr() const noexcept
+      { return _M_next(); }
+    };
+
+  /**
+   *  struct _Hash_pnode.
+   *
+   *  The node definition used when the allocator define a custom pointer type.
+   */
+  template<typename _Ptr, bool _Cache_hash_code>
+    struct _Hash_pnode
+    : _Hash_pnode_base<__ptr_rebind<_Ptr,
+				    _Hash_pnode<_Ptr, _Cache_hash_code>>>
+    , _Hash_node_value<typename std::pointer_traits<_Ptr>::element_type,
+		       _Cache_hash_code>
+    {
+      using __node_base =
+	_Hash_pnode_base<__ptr_rebind<_Ptr,
+				      _Hash_pnode<_Ptr, _Cache_hash_code>>>;
+      using __node_ptr = typename __node_base::__node_ptr;
+      using __node_type =
+	typename std::pointer_traits<__node_ptr>::element_type;
+      using value_type = typename __node_type::value_type;
+      using __node_value_cache_type =
+	_Hash_node_value<value_type, _Cache_hash_code>;
+      typedef typename std::pointer_traits<__node_ptr>::difference_type
+							difference_type;
+
+      __node_type*
+      _M_next() const noexcept
+      { return std::__to_address(this->_M_nxt); }
+
+      __node_ptr
+      _M_next_ptr() const noexcept
+      { return this->_M_nxt; }
     };
 
+  template<typename _Alloc, typename _Value, bool __hash_cached>
+    using __get_node_type = typename std::conditional<
+      std::is_pointer<__alloc_val_ptr<_Alloc, _Value>>::value,
+      _Hash_node<_Value, __hash_cached>,
+      _Hash_pnode<__alloc_val_ptr<_Alloc, _Value>, __hash_cached>>::type;
+
   /// Base class for node iterators.
-  template<typename _Value, bool _Cache_hash_code>
-    struct _Node_iterator_base
+  template<typename _NodeType>
+    struct _Hashtable_iterator_base
     {
-      using __node_type = _Hash_node<_Value, _Cache_hash_code>;
+      using __node_type = _NodeType;
 
       __node_type* _M_cur;
 
-      _Node_iterator_base() = default;
-      _Node_iterator_base(__node_type* __p) noexcept
+      _Hashtable_iterator_base() = default;
+      _Hashtable_iterator_base(__node_type* __p) noexcept
       : _M_cur(__p) { }
 
       void
@@ -299,18 +383,32 @@ namespace __detail
       { _M_cur = _M_cur->_M_next(); }
 
       friend bool
-      operator==(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
-      noexcept
+      operator==(const _Hashtable_iterator_base& __x,
+		 const _Hashtable_iterator_base& __y) noexcept
       { return __x._M_cur == __y._M_cur; }
 
 #if __cpp_impl_three_way_comparison < 201907L
       friend bool
-      operator!=(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
-      noexcept
+      operator!=(const _Hashtable_iterator_base& __x,
+		 const _Hashtable_iterator_base& __y) noexcept
       { return __x._M_cur != __y._M_cur; }
 #endif
     };
 
+  /// Base class for node iterators.
+  template<typename _Value, bool _Cache_hash_code>
+    struct _Node_iterator_base
+    : _Hashtable_iterator_base<_Hash_node<_Value, _Cache_hash_code>>
+    {
+      using __base_type =
+	_Hashtable_iterator_base<_Hash_node<_Value, _Cache_hash_code>>;
+      using __node_type = typename __base_type::__node_type;
+
+      _Node_iterator_base() = default;
+      _Node_iterator_base(__node_type* __p) noexcept
+      : __base_type(__p) { }
+    };
+
   /// Node iterators, used to iterate through all the hashtable.
   template<typename _Value, bool __constant_iterators, bool __cache>
     struct _Node_iterator
@@ -414,6 +512,110 @@ namespace __detail
       }
     };
 
+  /// Node iterators, used to iterate through all the hashtable.
+  template<typename _NodeType, bool __constant_iterators>
+    struct _Hashtable_iterator
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    private:
+      using __base_type = _Hashtable_iterator_base<_NodeType>;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __node_type::value_type		value_type;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      using pointer = typename std::conditional<__constant_iterators,
+				  const value_type*, value_type*>::type;
+
+      using reference = typename std::conditional<__constant_iterators,
+				  const value_type&, value_type&>::type;
+
+      _Hashtable_iterator() noexcept
+      : __base_type(nullptr) { }
+
+      explicit
+      _Hashtable_iterator(__node_type* __p) noexcept
+      : __base_type(__p) { }
+
+      reference
+      operator*() const noexcept
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const noexcept
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_iterator&
+      operator++() noexcept
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_iterator
+      operator++(int) noexcept
+      {
+	_Hashtable_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  /// Node const_iterators, used to iterate through all the hashtable.
+  template<typename _NodeType, bool __constant_iterators>
+    struct _Hashtable_const_iterator
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    private:
+      using __base_type = _Hashtable_iterator_base<_NodeType>;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __node_type::value_type		value_type;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      typedef const value_type*				pointer;
+      typedef const value_type&				reference;
+
+      _Hashtable_const_iterator() noexcept
+      : __base_type(nullptr) { }
+
+      explicit
+      _Hashtable_const_iterator(__node_type* __p) noexcept
+      : __base_type(__p) { }
+
+      _Hashtable_const_iterator(
+	const _Hashtable_iterator<_NodeType,
+				  __constant_iterators>& __x) noexcept
+      : __base_type(__x._M_cur) { }
+
+      reference
+      operator*() const noexcept
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const noexcept
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_const_iterator&
+      operator++() noexcept
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_const_iterator
+      operator++(int) noexcept
+      {
+	_Hashtable_const_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
   // Many of class template _Hashtable's template parameters are policy
   // classes.  These are defaults for the policies.
 
@@ -800,16 +1002,15 @@ namespace __detail
 
       using __hash_cached = typename _Traits::__hash_cached;
       using __constant_iterators = typename _Traits::__constant_iterators;
+      using __alloc_ptr = __alloc_val_ptr<_Alloc, _Value>;
 
-      using __hashtable_alloc = _Hashtable_alloc<
-	__alloc_rebind<_Alloc, _Hash_node<_Value,
-					  __hash_cached::value>>>;
+      using __node_type = __get_node_type<_Alloc, _Value, __hash_cached::value>;
+      using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
 
       using value_type = typename __hashtable_base::value_type;
       using size_type = typename __hashtable_base::size_type;
 
       using __unique_keys = typename _Traits::__unique_keys;
-      using __node_alloc_type = typename __hashtable_alloc::__node_alloc_type;
       using __node_gen_type = _AllocNode<__node_alloc_type>;
 
       __hashtable&
@@ -827,11 +1028,19 @@ namespace __detail
 			const _NodeGetter&, false_type __uks);
 
     public:
-      using iterator = _Node_iterator<_Value, __constant_iterators::value,
-				      __hash_cached::value>;
-
-      using const_iterator = _Node_const_iterator<_Value, __constant_iterators::value,
-						  __hash_cached::value>;
+      using iterator =
+	typename std::conditional<std::is_pointer<__alloc_ptr>::value,
+	  _Node_iterator<_Value,
+			 __constant_iterators::value, __hash_cached::value>,
+	  _Hashtable_iterator<__node_type,
+			      __constant_iterators::value>>::type;
+
+      using const_iterator =
+	typename std::conditional<std::is_pointer<__alloc_ptr>::value,
+	  _Node_const_iterator<_Value,
+			     __constant_iterators::value, __hash_cached::value>,
+	  _Hashtable_const_iterator<__node_type,
+				    __constant_iterators::value>>::type;
 
       using __ireturn_type = typename std::conditional<__unique_keys::value,
 						     std::pair<iterator, bool>,
@@ -1165,6 +1374,17 @@ namespace __detail
 	   bool __cache_hash_code>
     struct _Local_iterator_base;
 
+  /**
+   *  Primary class template _Hashtable_local_iter_base.
+   *
+   *  Base class for local iterators, used to iterate within a bucket
+   *  but not between buckets.
+   */
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __cache_hash_code>
+    struct _Hashtable_local_iter_base;
+
   /**
    *  Primary class template _Hash_code_base.
    *
@@ -1307,6 +1527,47 @@ namespace __detail
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
 
+  /// Partial specialization used when nodes contain a cached hash code.
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused>
+    struct _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused, true>
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    protected:
+      using __base_node_iter = _Hashtable_iterator_base<_NodeType>;
+      using value_type = typename _NodeType::value_type;
+      using __hash_code_base = _Hash_code_base<_Key, value_type, _ExtractKey,
+					      _Hash, _RangeHash, _Unused, true>;
+
+      _Hashtable_local_iter_base() = default;
+      _Hashtable_local_iter_base(const __hash_code_base&,
+				 _NodeType* __p,
+				 std::size_t __bkt, std::size_t __bkt_count)
+      : __base_node_iter(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      { }
+
+      void
+      _M_incr()
+      {
+	__base_node_iter::_M_incr();
+	if (this->_M_cur)
+	  {
+	    std::size_t __bkt
+	      = _RangeHash{}(this->_M_cur->_M_hash_code, _M_bucket_count);
+	    if (__bkt != _M_bucket)
+	      this->_M_cur = nullptr;
+	  }
+      }
+
+      std::size_t _M_bucket;
+      std::size_t _M_bucket_count;
+
+    public:
+      std::size_t
+      _M_get_bucket() const { return _M_bucket; }  // for debug mode
+    };
+
   // Uninitialized storage for a _Hash_code_base.
   // This type is DefaultConstructible and Assignable even if the
   // _Hash_code_base type isn't, so that _Local_iterator_base<..., false>
@@ -1421,6 +1682,84 @@ namespace __detail
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
 
+  // Partial specialization used when hash codes are not cached
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused>
+    struct _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused, false>
+    : _Hash_code_storage<_Hash>
+    , _Hashtable_iterator_base<_NodeType>
+    {
+    protected:
+      using value_type = typename _NodeType::value_type;
+      using __hash_code_base = _Hash_code_base<_Key, value_type, _ExtractKey,
+					     _Hash, _RangeHash, _Unused, false>;
+      using __node_iter_base = _Hashtable_iterator_base<_NodeType>;
+
+      _Hashtable_local_iter_base() : _M_bucket_count(-1) { }
+
+      _Hashtable_local_iter_base(const __hash_code_base& __base,
+				 _NodeType* __p,
+				 std::size_t __bkt, std::size_t __bkt_count)
+      : __node_iter_base(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      { _M_init(__base.hash_function()); }
+
+      ~_Hashtable_local_iter_base()
+      {
+	if (_M_bucket_count != -1)
+	  _M_destroy();
+      }
+
+      _Hashtable_local_iter_base(const _Hashtable_local_iter_base& __iter)
+      : __node_iter_base(__iter._M_cur), _M_bucket(__iter._M_bucket)
+      , _M_bucket_count(__iter._M_bucket_count)
+      {
+	if (_M_bucket_count != -1)
+	  _M_init(*__iter._M_h());
+      }
+
+      _Hashtable_local_iter_base&
+      operator=(const _Hashtable_local_iter_base& __iter)
+      {
+	if (_M_bucket_count != -1)
+	  _M_destroy();
+	this->_M_cur = __iter._M_cur;
+	_M_bucket = __iter._M_bucket;
+	_M_bucket_count = __iter._M_bucket_count;
+	if (_M_bucket_count != -1)
+	  _M_init(*__iter._M_h());
+	return *this;
+      }
+
+      void
+      _M_incr()
+      {
+	__node_iter_base::_M_incr();
+	if (this->_M_cur)
+	  {
+	    std::size_t __bkt =
+	      _RangeHash{}((*this->_M_h())(_ExtractKey{}(this->_M_cur->_M_v())),
+			   _M_bucket_count);
+	    if (__bkt != _M_bucket)
+	      this->_M_cur = nullptr;
+	  }
+      }
+
+      std::size_t _M_bucket;
+      std::size_t _M_bucket_count;
+
+      void
+      _M_init(const _Hash& __h)
+      { ::new(this->_M_h()) _Hash(__h); }
+
+      void
+      _M_destroy() { this->_M_h()->~_Hash(); }
+
+    public:
+      std::size_t
+      _M_get_bucket() const { return _M_bucket; }  // for debug mode
+    };
+
   /// local iterators
   template<typename _Key, typename _Value, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused,
@@ -1536,6 +1875,156 @@ namespace __detail
       }
     };
 
+  /// local iterators
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __cache>
+    struct _Hashtable_local_iterator
+    : public _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+					_Hash, _RangeHash, _Unused, __cache>
+    {
+    private:
+      using __base_type =
+	_Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				   _Hash, _RangeHash, _Unused, __cache>;
+      using __hash_code_base = typename __base_type::__hash_code_base;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __base_type::value_type		value_type;
+      typedef typename std::conditional<__constant_iterators,
+					const value_type*, value_type*>::type
+							pointer;
+      typedef typename std::conditional<__constant_iterators,
+					const value_type&, value_type&>::type
+							reference;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      _Hashtable_local_iterator() = default;
+
+      _Hashtable_local_iterator(const __hash_code_base& __base,
+				__node_type* __n,
+				std::size_t __bkt, std::size_t __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
+      { }
+
+      reference
+      operator*() const
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_local_iterator&
+      operator++()
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_local_iterator
+      operator++(int)
+      {
+	_Hashtable_local_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  /// local const_iterators
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __cache>
+    struct _Hashtable_const_local_iterator
+    : public _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+					_Hash, _RangeHash, _Unused, __cache>
+    {
+    private:
+      using __base_type =
+	_Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				   _Hash, _RangeHash, _Unused, __cache>;
+      using __hash_code_base = typename __base_type::__hash_code_base;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __base_type::value_type		value_type;
+      typedef const value_type*				pointer;
+      typedef const value_type&				reference;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      _Hashtable_const_local_iterator() = default;
+
+      _Hashtable_const_local_iterator(const __hash_code_base& __base,
+				      __node_type* __n,
+				    std::size_t __bkt, std::size_t __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
+      { }
+
+      _Hashtable_const_local_iterator(const _Hashtable_local_iterator<
+				      _Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused,
+				      __constant_iterators,
+				      __cache>& __x)
+      : __base_type(__x)
+      { }
+
+      reference
+      operator*() const
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_const_local_iterator&
+      operator++()
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_const_local_iterator
+      operator++(int)
+      {
+	_Hashtable_const_local_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  template<typename _NodePtr, typename _Key, typename _Value,
+	   typename _ExtractKey, typename _Hash,
+	   typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __hash_cached>
+    using __local_iterator = typename std::conditional<
+      std::is_pointer<_NodePtr>::value,
+      _Local_iterator<_Key, _Value,
+		      _ExtractKey, _Hash, _RangeHash, _Unused,
+		      __constant_iterators, __hash_cached>,
+      _Hashtable_local_iterator<_Key,
+	       typename std::pointer_traits<_NodePtr>::element_type,
+				_ExtractKey, _Hash, _RangeHash, _Unused,
+				__constant_iterators,
+				__hash_cached>>::type;
+
+  template<typename _NodePtr, typename _Key, typename _Value,
+	   typename _ExtractKey, typename _Hash,
+	   typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __hash_cached>
+    using __const_local_iterator = typename std::conditional<
+      std::is_pointer<_NodePtr>::value,
+      _Local_const_iterator<_Key, _Value,
+			    _ExtractKey, _Hash, _RangeHash, _Unused,
+			    __constant_iterators, __hash_cached>,
+      _Hashtable_const_local_iterator<_Key,
+	       typename std::pointer_traits<_NodePtr>::element_type,
+				      _ExtractKey, _Hash, _RangeHash, _Unused,
+				      __constant_iterators,
+				      __hash_cached>>::type;
+
   /**
    *  Primary class template _Hashtable_base.
    *
@@ -1679,8 +2168,9 @@ namespace __detail
 	  if (!__prev_n)
 	    return false;
 
-	  for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
-	       __n = __n->_M_next())
+	  __node_type* __n =
+	    static_cast<__node_type*>(std::__to_address(__prev_n->_M_nxt));
+	  for (;; __n = __n->_M_next())
 	    {
 	      if (__n->_M_v() == *__itx)
 		break;
@@ -1739,7 +2229,8 @@ namespace __detail
 	  if (!__y_prev_n)
 	    return false;
 
-	  __node_type* __y_n = static_cast<__node_type*>(__y_prev_n->_M_nxt);
+	  __node_type* __y_n =
+	    static_cast<__node_type*>(std::__to_address(__y_prev_n->_M_nxt));
 	  for (;;)
 	    {
 	      if (__this->key_eq()(_ExtractKey{}(__y_n->_M_v()),
@@ -1786,16 +2277,12 @@ namespace __detail
       // Use __gnu_cxx to benefit from _S_always_equal and al.
       using __node_alloc_traits = __gnu_cxx::__alloc_traits<__node_alloc_type>;
 
-      using __value_alloc_traits = typename __node_alloc_traits::template
-	rebind_traits<typename __node_type::value_type>;
-
-      using __node_ptr = __node_type*;
-      using __node_base = _Hash_node_base;
-      using __node_base_ptr = __node_base*;
+      using __node_ptr = typename __node_alloc_traits::pointer;
+      using __node_base = typename __node_type::__node_base;
       using __buckets_alloc_type =
-	__alloc_rebind<__node_alloc_type, __node_base_ptr>;
+	__alloc_rebind<__node_alloc_type, __node_base*>;
       using __buckets_alloc_traits = std::allocator_traits<__buckets_alloc_type>;
-      using __buckets_ptr = __node_base_ptr*;
+      using __buckets_ptr = typename __buckets_alloc_traits::pointer;
 
       _Hashtable_alloc() = default;
       _Hashtable_alloc(const _Hashtable_alloc&) = default;
@@ -1848,14 +2335,13 @@ namespace __detail
       -> __node_ptr
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
-	__node_ptr __n = std::__to_address(__nptr);
 	__try
 	  {
-	    ::new ((void*)__n) __node_type;
+	    ::new ((void*)std::__to_address(__nptr)) __node_type;
 	    __node_alloc_traits::construct(_M_node_allocator(),
-					   __n->_M_valptr(),
+					   __nptr->_M_valptr(),
 					   std::forward<_Args>(__args)...);
-	    return __n;
+	    return __nptr;
 	  }
 	__catch(...)
 	  {
@@ -1866,20 +2352,18 @@ namespace __detail
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_ptr __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_ptr __nptr)
     {
-      __node_alloc_traits::destroy(_M_node_allocator(), __n->_M_valptr());
-      _M_deallocate_node_ptr(__n);
+      __node_alloc_traits::destroy(_M_node_allocator(), __nptr->_M_valptr());
+      _M_deallocate_node_ptr(__nptr);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_ptr __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_ptr __nptr)
     {
-      typedef typename __node_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n);
-      __n->~__node_type();
-      __node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1);
+      __nptr->~__node_type();
+      __node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1);
     }
 
   template<typename _NodeAlloc>
@@ -1889,7 +2373,7 @@ namespace __detail
       while (__n)
 	{
 	  __node_ptr __tmp = __n;
-	  __n = __n->_M_next();
+	  __n = __n->_M_next_ptr();
 	  _M_deallocate_node(__tmp);
 	}
     }
@@ -1902,9 +2386,9 @@ namespace __detail
       __buckets_alloc_type __alloc(_M_node_allocator());
 
       auto __ptr = __buckets_alloc_traits::allocate(__alloc, __bkt_count);
-      __buckets_ptr __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __bkt_count * sizeof(__node_base_ptr));
-      return __p;
+      __builtin_memset(std::__to_address(__ptr), 0,
+		       __bkt_count * sizeof(__node_base*));
+      return __ptr;
     }
 
   template<typename _NodeAlloc>
@@ -1913,10 +2397,8 @@ namespace __detail
     _M_deallocate_buckets(__buckets_ptr __bkts,
 			  std::size_t __bkt_count)
     {
-      typedef typename __buckets_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
       __buckets_alloc_type __alloc(_M_node_allocator());
-      __buckets_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
+      __buckets_alloc_traits::deallocate(__alloc, __bkts, __bkt_count);
     }
 
  //@} hashtable-detail
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..5e9ff548032
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2021 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_map<T, int, H, E,
+				  CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_map<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..6dd62a40293
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2021 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multimap<T, int, H, E,
+				       CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_multimap<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..dbc7b6247a2
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
@@ -0,0 +1,56 @@
+// Copyright (C) 2021 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_set>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multiset<T, H, E, CustomPointerAlloc<T>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<T> alloc_type;
+  typedef std::unordered_multiset<T, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert(T());
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
index c0e6a1f53a2..88814b3009c 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
@@ -15,10 +15,7 @@
 // with this library; see the file COPYING3.  If not see
 // <http://www.gnu.org/licenses/>.
 
-// This test fails to compile since C++17 (see xfail-if below) so we can only
-// do a "run" test for C++11 and C++14, and a "compile" test for C++17 and up.
-// { dg-do run { target { c++11_only || c++14_only } } }
-// { dg-do compile { target c++17 } }
+// { dg-do run { target { c++11 } } }
 
 #include <unordered_set>
 #include <memory>
@@ -26,15 +23,22 @@
 #include <testsuite_allocator.h>
 
 struct T { int i; };
-bool operator==(const T& l, const T& r) { return l.i == r.i; }
-struct H { std::size_t operator()(const T& t) const noexcept { return t.i; }
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
 };
-struct E : std::equal_to<T> { };
+
+struct E : std::equal_to<T>
+{ };
 
 using __gnu_test::CustomPointerAlloc;
 
-// { dg-xfail-if "node reinsertion assumes raw pointers" { c++17 } }
-// TODO when removing this xfail change the test back to "dg-do run".
 template class std::unordered_set<T, H, E, CustomPointerAlloc<T>>;
 
 void test01()

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: libstdc++ PR 57272 Fancy pointer support in Hashtable
  2020-11-02 14:11         ` Jonathan Wakely
  2020-11-02 21:33           ` François Dumont
  2021-01-11 18:10           ` François Dumont
@ 2021-06-10 17:22           ` François Dumont
  2 siblings, 0 replies; 10+ messages in thread
From: François Dumont @ 2021-06-10 17:22 UTC (permalink / raw)
  To: Jonathan Wakely; +Cc: libstdc++, gcc-patches

[-- Attachment #1: Type: text/plain, Size: 1155 bytes --]

I would like to renew this proposal.

I considered all your feedbacks expect:

On 02/11/20 3:11 pm, Jonathan Wakely wrote:
>
> There's no point doing it if you still use raw pointers.
>
> It either has to be done completely, or it's a waste of time and
> energy.
>

Why ? Can you provide the Standard documentation explaining why the 
custom pointer must the used everywhere ?

For the moment I considered that fancy pointer types are meant to allow 
access to some special memory area in which a simple raw pointer is not 
enough to describe an instance location. This is why this patch is 
making sure that the fancy pointer is stored and returned to the 
allocator without any loss of information.

Otherwise, for internal Hashtable purpose simple raw pointers are still 
being used. I cannot imagine that any user is expecting to improve 
container performances with a hand written pointer implementation.

For the moment I ignore the comment in the PR about limiting operations 
done with the pointer (except that I am not using it everywhere of 
course). I will propose to add move semantic on those pointers if this 
patch is accepted.

François


[-- Attachment #2: hashtable_cust_ptr.patch --]
[-- Type: text/x-patch, Size: 62503 bytes --]

diff --git a/libstdc++-v3/include/bits/hashtable.h b/libstdc++-v3/include/bits/hashtable.h
index 4bdbe7dd9cc..c77cb50c3d9 100644
--- a/libstdc++-v3/include/bits/hashtable.h
+++ b/libstdc++-v3/include/bits/hashtable.h
@@ -182,8 +182,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 				 _RehashPolicy, _Traits>,
       private __detail::_Hashtable_alloc<
 	__alloc_rebind<_Alloc,
-		       __detail::_Hash_node<_Value,
-					    _Traits::__hash_cached::value>>>
+		       __detail::__get_node_type<_Alloc, _Value,
+						_Traits::__hash_cached::value>>>
     {
       static_assert(is_same<typename remove_cv<_Value>::type, _Value>::value,
 	  "unordered container must have a non-const, non-volatile value_type");
@@ -195,21 +195,21 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       using __traits_type = _Traits;
       using __hash_cached = typename __traits_type::__hash_cached;
       using __constant_iterators = typename __traits_type::__constant_iterators;
-      using __node_type = __detail::_Hash_node<_Value, __hash_cached::value>;
+      using __node_type = __detail::__get_node_type<
+	_Alloc, _Value, _Traits::__hash_cached::value>;
       using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
-
       using __hashtable_alloc = __detail::_Hashtable_alloc<__node_alloc_type>;
 
       using __node_value_type =
-	__detail::_Hash_node_value<_Value, __hash_cached::value>;
+	typename __node_type::__node_value_cache_type;
       using __node_ptr = typename __hashtable_alloc::__node_ptr;
-      using __value_alloc_traits =
-	typename __hashtable_alloc::__value_alloc_traits;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_base = typename __hashtable_alloc::__node_base;
-      using __node_base_ptr = typename __hashtable_alloc::__node_base_ptr;
+      using __node_base = typename __node_type::__node_base;
+      using __value_alloc_traits =
+	typename __node_alloc_traits::template rebind_traits<_Value>;
       using __buckets_ptr = typename __hashtable_alloc::__buckets_ptr;
+      using __buckets_ptr_traits = std::pointer_traits<__buckets_ptr>;
 
       using __insert_base = __detail::_Insert<_Key, _Value, _Alloc, _ExtractKey,
 					      _Equal, _Hash,
@@ -233,15 +233,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       using const_iterator = typename __insert_base::const_iterator;
 
-      using local_iterator = __detail::_Local_iterator<key_type, _Value,
-			_ExtractKey, _Hash, _RangeHash, _Unused,
-					     __constant_iterators::value,
-					     __hash_cached::value>;
+      using local_iterator = __detail::__local_iterator<
+	__node_ptr, key_type, value_type,
+	_ExtractKey, _Hash, _RangeHash, _Unused,
+	__constant_iterators::value, __hash_cached::value>;
 
-      using const_local_iterator = __detail::_Local_const_iterator<
-			key_type, _Value,
-			_ExtractKey, _Hash, _RangeHash, _Unused,
-			__constant_iterators::value, __hash_cached::value>;
+      using const_local_iterator = __detail::__const_local_iterator<
+	__node_ptr, key_type, value_type,
+	_ExtractKey, _Hash, _RangeHash, _Unused,
+	__constant_iterators::value, __hash_cached::value>;
 
     private:
       using __rehash_type = _RehashPolicy;
@@ -376,7 +376,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 #endif
 
     private:
-      __buckets_ptr		_M_buckets		= &_M_single_bucket;
+      __buckets_ptr		_M_buckets =
+			__buckets_ptr_traits::pointer_to(_M_single_bucket);
       size_type			_M_bucket_count		= 1;
       __node_base		_M_before_begin;
       size_type			_M_element_count	= 0;
@@ -388,13 +389,13 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // qualified.
       // Note that we can't leave hashtable with 0 bucket without adding
       // numerous checks in the code to avoid 0 modulus.
-      __node_base_ptr		_M_single_bucket	= nullptr;
+      __node_base*		_M_single_bucket	= nullptr;
 
       void
       _M_update_bbegin()
       {
-	if (_M_begin())
-	  _M_buckets[_M_bucket_index(*_M_begin())] = &_M_before_begin;
+	if (auto __begin = _M_begin())
+	  _M_buckets[_M_bucket_index(*__begin)] = &_M_before_begin;
       }
 
       void
@@ -406,7 +407,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       bool
       _M_uses_single_bucket(__buckets_ptr __bkts) const
-      { return __builtin_expect(__bkts == &_M_single_bucket, false); }
+      {
+	return __builtin_expect(
+	  std::__to_address(__bkts) == &_M_single_bucket, false);
+      }
 
       bool
       _M_uses_single_bucket() const
@@ -421,7 +425,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	if (__builtin_expect(__bkt_count == 1, false))
 	  {
 	    _M_single_bucket = nullptr;
-	    return &_M_single_bucket;
+	    return __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  }
 
 	return __hashtable_alloc::_M_allocate_buckets(__bkt_count);
@@ -440,14 +444,29 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_deallocate_buckets()
       { _M_deallocate_buckets(_M_buckets, _M_bucket_count); }
 
+      static __node_ptr
+      _S_cast(__node_ptr __n)
+      { return __n; }
+
+      static __node_ptr
+      _S_cast(__detail::_Hash_node_base* __n)
+      { return static_cast<__node_ptr>(__n); }
+
       // Gets bucket begin, deals with the fact that non-empty buckets contain
       // their before begin node.
-      __node_ptr
+      __node_type*
       _M_bucket_begin(size_type __bkt) const;
 
       __node_ptr
+      _M_pbegin() const
+      { return _S_cast(_M_before_begin._M_nxt); }
+
+      __node_type*
       _M_begin() const
-      { return static_cast<__node_ptr>(_M_before_begin._M_nxt); }
+      {
+	return
+	  static_cast<__node_type*>(std::__to_address(_M_before_begin._M_nxt));
+      }
 
       // Assign *this using another _Hashtable instance. Whether elements
       // are copied or moved depends on the _Ht reference.
@@ -579,7 +598,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _Hashtable&
       operator=(initializer_list<value_type> __l)
       {
-	__reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	__reuse_or_alloc_node_gen_t __roan(_M_pbegin(), *this);
 	_M_before_begin._M_nxt = nullptr;
 	clear();
 
@@ -779,31 +798,33 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find and insert helper functions and types
       // Find the node before the one matching the criteria.
-      __node_base_ptr
+      __node_base*
       _M_find_before_node(size_type, const key_type&, __hash_code) const;
 
       template<typename _Kt>
-	__node_base_ptr
+	__node_base*
 	_M_find_before_node_tr(size_type, const _Kt&, __hash_code) const;
 
-      __node_ptr
+      __node_type*
       _M_find_node(size_type __bkt, const key_type& __key,
 		   __hash_code __c) const
       {
-	__node_base_ptr __before_n = _M_find_before_node(__bkt, __key, __c);
+	__node_base* __before_n = _M_find_before_node(__bkt, __key, __c);
 	if (__before_n)
-	  return static_cast<__node_ptr>(__before_n->_M_nxt);
+	  return static_cast<__node_type*>(
+				std::__to_address(__before_n->_M_nxt));
 	return nullptr;
       }
 
       template<typename _Kt>
-	__node_ptr
+	__node_type*
 	_M_find_node_tr(size_type __bkt, const _Kt& __key,
 			__hash_code __c) const
 	{
 	  auto __before_n = _M_find_before_node_tr(__bkt, __key, __c);
 	  if (__before_n)
-	    return static_cast<__node_ptr>(__before_n->_M_nxt);
+	    return static_cast<__node_type*>(
+				std::__to_address(__before_n->_M_nxt));
 	  return nullptr;
 	}
 
@@ -817,8 +838,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 			     size_type __next_bkt);
 
       // Get the node before __n in the bucket __bkt
-      __node_base_ptr
-      _M_get_previous_node(size_type __bkt, __node_ptr __n);
+      __node_base*
+      _M_get_previous_node(size_type __bkt, __node_type* __n);
 
       // Insert node __n with hash code __code, in bucket __bkt if no
       // rehash (assumes no element with same key already present).
@@ -830,7 +851,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Insert node __n with key __k and hash code __code.
       // Takes ownership of __n if insertion succeeds, throws otherwise.
       iterator
-      _M_insert_multi_node(__node_ptr __hint,
+      _M_insert_multi_node(__node_type* __hint,
 			   __hash_code __code, __node_ptr __n);
 
       template<typename... _Args>
@@ -914,7 +935,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_erase(false_type __uks, const key_type&);
 
       iterator
-      _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n);
+      _M_erase(size_type __bkt, __node_base* __prev_n, __node_ptr __n);
 
     public:
       // Emplace
@@ -974,7 +995,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    const key_type& __k = __nh._M_key();
 	    __hash_code __code = this->_M_hash_code(__k);
 	    size_type __bkt = _M_bucket_index(__code);
-	    if (__node_ptr __n = _M_find_node(__bkt, __k, __code))
+	    if (__node_type* __n = _M_find_node(__bkt, __k, __code))
 	      {
 		__ret.node = std::move(__nh);
 		__ret.position = iterator(__n);
@@ -1010,11 +1031,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
     private:
       node_type
-      _M_extract_node(size_t __bkt, __node_base_ptr __prev_n)
+      _M_extract_node(size_t __bkt, __node_base* __prev_n)
       {
-	__node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
+	__node_ptr __n = _S_cast(__prev_n->_M_nxt);
 	if (__prev_n == _M_buckets[__bkt])
-	  _M_remove_bucket_begin(__bkt, __n->_M_next(),
+	  _M_remove_bucket_begin(__bkt, __n->_M_next_ptr(),
 	     __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
 	else if (__n->_M_nxt)
 	  {
@@ -1046,7 +1067,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	node_type __nh;
 	__hash_code __code = this->_M_hash_code(__k);
 	std::size_t __bkt = _M_bucket_index(__code);
-	if (__node_base_ptr __prev_node = _M_find_before_node(__bkt, __k, __code))
+	if (auto __prev_node = _M_find_before_node(__bkt, __k, __code))
 	  __nh = _M_extract_node(__bkt, __prev_node);
 	return __nh;
       }
@@ -1116,10 +1137,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_bucket_begin(size_type __bkt) const
-    -> __node_ptr
+    -> __node_type*
     {
-      __node_base_ptr __n = _M_buckets[__bkt];
-      return __n ? static_cast<__node_ptr>(__n->_M_nxt) : nullptr;
+      __node_base* __n = _M_buckets[__bkt];
+      return __n
+	? static_cast<__node_type*>(std::__to_address(__n->_M_nxt))
+	: nullptr;
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -1207,7 +1230,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      && __this_alloc != __that_alloc)
 	    {
 	      // Replacement allocator cannot free existing storage.
-	      this->_M_deallocate_nodes(_M_begin());
+	      this->_M_deallocate_nodes(_M_pbegin());
 	      _M_before_begin._M_nxt = nullptr;
 	      _M_deallocate_buckets();
 	      _M_buckets = nullptr;
@@ -1259,15 +1282,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	    _M_bucket_count = __ht._M_bucket_count;
 	  }
 	else
-	  __builtin_memset(_M_buckets, 0,
-			   _M_bucket_count * sizeof(__node_base_ptr));
+	  __builtin_memset(std::__to_address(_M_buckets), 0,
+			   _M_bucket_count * sizeof(__node_base*));
 
 	__try
 	  {
 	    __hashtable_base::operator=(std::forward<_Ht>(__ht));
 	    _M_element_count = __ht._M_element_count;
 	    _M_rehash_policy = __ht._M_rehash_policy;
-	    __reuse_or_alloc_node_gen_t __roan(_M_begin(), *this);
+	    __reuse_or_alloc_node_gen_t __roan(_M_pbegin(), *this);
 	    _M_before_begin._M_nxt = nullptr;
 	    _M_assign(std::forward<_Ht>(__ht), __roan);
 	    if (__former_buckets)
@@ -1283,8 +1306,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		_M_buckets = __former_buckets;
 		_M_bucket_count = __former_bucket_count;
 	      }
-	    __builtin_memset(_M_buckets, 0,
-			     _M_bucket_count * sizeof(__node_base_ptr));
+	    __builtin_memset(std::__to_address(_M_buckets), 0,
+			     _M_bucket_count * sizeof(__node_base*));
 	    __throw_exception_again;
 	  }
       }
@@ -1310,14 +1333,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	    // First deal with the special first node pointed to by
 	    // _M_before_begin.
-	    __node_ptr __ht_n = __ht._M_begin();
+	    __node_type* __ht_n = __ht._M_begin();
 	    __node_ptr __this_n
 	      = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
 	    this->_M_copy_code(*__this_n, *__ht_n);
 	    _M_update_bbegin(__this_n);
 
 	    // Then deal with other nodes.
-	    __node_ptr __prev_n = __this_n;
+	    __node_base* __prev_n = std::__to_address(__this_n);
 	    for (__ht_n = __ht_n->_M_next(); __ht_n; __ht_n = __ht_n->_M_next())
 	      {
 		__this_n = __node_gen(__fwd_value_for<_Ht>(__ht_n->_M_v()));
@@ -1326,7 +1349,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		size_type __bkt = _M_bucket_index(*__this_n);
 		if (!_M_buckets[__bkt])
 		  _M_buckets[__bkt] = __prev_n;
-		__prev_n = __this_n;
+		__prev_n = std::__to_address(__this_n);
 	      }
 	  }
 	__catch(...)
@@ -1350,7 +1373,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       _M_rehash_policy._M_reset();
       _M_bucket_count = 1;
       _M_single_bucket = nullptr;
-      _M_buckets = &_M_single_bucket;
+      _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
       _M_before_begin._M_nxt = nullptr;
       _M_element_count = 0;
     }
@@ -1367,7 +1390,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       if (__builtin_expect(std::__addressof(__ht) == this, false))
 	return;
 
-      this->_M_deallocate_nodes(_M_begin());
+      this->_M_deallocate_nodes(_M_pbegin());
       _M_deallocate_buckets();
       __hashtable_base::operator=(std::move(__ht));
       _M_rehash_policy = __ht._M_rehash_policy;
@@ -1375,7 +1398,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	_M_buckets = __ht._M_buckets;
       else
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
@@ -1451,7 +1474,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Update buckets if __ht is using its single bucket.
       if (__ht._M_uses_single_bucket())
 	{
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	  _M_single_bucket = __ht._M_single_bucket;
 	}
 
@@ -1502,7 +1525,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	{
 	  if (__ht._M_uses_single_bucket())
 	    {
-	      _M_buckets = &_M_single_bucket;
+	      _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	      _M_single_bucket = __ht._M_single_bucket;
 	    }
 	  else
@@ -1510,7 +1533,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
 	  // Fix bucket containing the _M_before_begin pointer that can't be
 	  // moved.
-	  _M_update_bbegin(__ht._M_begin());
+	  _M_update_bbegin(__ht._M_pbegin());
 
 	  __ht._M_reset();
 	}
@@ -1563,13 +1586,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (!__x._M_uses_single_bucket())
 	    {
 	      _M_buckets = __x._M_buckets;
-	      __x._M_buckets = &__x._M_single_bucket;
+	      __x._M_buckets =
+		__buckets_ptr_traits::pointer_to(__x._M_single_bucket);
 	    }
 	}
       else if (__x._M_uses_single_bucket())
 	{
 	  __x._M_buckets = _M_buckets;
-	  _M_buckets = &_M_single_bucket;
+	  _M_buckets = __buckets_ptr_traits::pointer_to(_M_single_bucket);
 	}	
       else
 	std::swap(_M_buckets, __x._M_buckets);
@@ -1833,13 +1857,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     _M_find_before_node(size_type __bkt, const key_type& __k,
 			__hash_code __code) const
-    -> __node_base_ptr
+    -> __node_base*
     {
-      __node_base_ptr __prev_p = _M_buckets[__bkt];
+      __node_base* __prev_p = _M_buckets[__bkt];
       if (!__prev_p)
 	return nullptr;
 
-      for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
+      for (__node_type* __p =
+	     static_cast<__node_type*>(std::__to_address(__prev_p->_M_nxt));;
 	   __p = __p->_M_next())
 	{
 	  if (this->_M_equals(__k, __code, *__p))
@@ -1863,13 +1888,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 		 _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
       _M_find_before_node_tr(size_type __bkt, const _Kt& __k,
 			     __hash_code __code) const
-      -> __node_base_ptr
+      -> __node_base*
       {
-	__node_base_ptr __prev_p = _M_buckets[__bkt];
+	__node_base* __prev_p = _M_buckets[__bkt];
 	if (!__prev_p)
 	  return nullptr;
 
-	for (__node_ptr __p = static_cast<__node_ptr>(__prev_p->_M_nxt);;
+	for (__node_type* __p =
+	       static_cast<__node_type*>(std::__to_address(__prev_p->_M_nxt));;
 	     __p = __p->_M_next())
 	  {
 	    if (this->_M_equals_tr(__k, __code, *__p))
@@ -1910,7 +1936,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	  if (__node->_M_nxt)
 	    // We must update former begin bucket that is pointing to
 	    // _M_before_begin.
-	    _M_buckets[_M_bucket_index(*__node->_M_next())] = __node;
+	    _M_buckets[_M_bucket_index(*__node->_M_next())] =
+	      std::__to_address(__node);
 
 	  _M_buckets[__bkt] = &_M_before_begin;
 	}
@@ -1947,12 +1974,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_get_previous_node(size_type __bkt, __node_ptr __n)
-    -> __node_base_ptr
+    _M_get_previous_node(size_type __bkt, __node_type* __n)
+    -> __node_base*
     {
-      __node_base_ptr __prev_n = _M_buckets[__bkt];
-      while (__prev_n->_M_nxt != __n)
-	__prev_n = __prev_n->_M_nxt;
+      __node_base* __prev_n = _M_buckets[__bkt];
+      while (std::__to_address(__prev_n->_M_nxt) != __n)
+	__prev_n = std::__to_address(__prev_n->_M_nxt);
       return __prev_n;
     }
 
@@ -1972,7 +1999,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	const key_type& __k = _ExtractKey{}(__node._M_node->_M_v());
 	__hash_code __code = this->_M_hash_code(__k);
 	size_type __bkt = _M_bucket_index(__code);
-	if (__node_ptr __p = _M_find_node(__bkt, __k, __code))
+	if (__node_type* __p = _M_find_node(__bkt, __k, __code))
 	  // There is already an equivalent node, no insertion
 	  return std::make_pair(iterator(__p), false);
 
@@ -2032,7 +2059,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // Always insert at the beginning of the bucket.
       _M_insert_bucket_begin(__bkt, __node);
       ++_M_element_count;
-      return iterator(__node);
+      return iterator(std::__to_address(__node));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -2042,7 +2069,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_insert_multi_node(__node_ptr __hint,
+    _M_insert_multi_node(__node_type* __hint,
 			 __hash_code __code, __node_ptr __node)
     -> iterator
     {
@@ -2059,7 +2086,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 
       // Find the node before an equivalent one or use hint if it exists and
       // if it is equivalent.
-      __node_base_ptr __prev
+      __node_base* __prev
 	= __builtin_expect(__hint != nullptr, false)
 	  && this->_M_equals(__k, __code, *__hint)
 	    ? __hint
@@ -2078,7 +2105,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	      {
 		size_type __next_bkt = _M_bucket_index(*__node->_M_next());
 		if (__next_bkt != __bkt)
-		  _M_buckets[__next_bkt] = __node;
+		  _M_buckets[__next_bkt] = std::__to_address(__node);
 	      }
 	}
       else
@@ -2087,7 +2114,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	// equivalent elements' relative positions.
 	_M_insert_bucket_begin(__bkt, __node);
       ++_M_element_count;
-      return iterator(__node);
+      return iterator(std::__to_address(__node));
     }
 
   // Insert v if no element with its key is already present.
@@ -2106,8 +2133,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	__hash_code __code = this->_M_hash_code_tr(__k);
 	size_type __bkt = _M_bucket_index(__code);
 
-	if (__node_ptr __node = _M_find_node_tr(__bkt, __k, __code))
-	  return { iterator(__node), false };
+	if (__node_type* __n = _M_find_node_tr(__bkt, __k, __code))
+	  return { iterator(__n), false };
 
 	_Scoped_node __node {
 	  __node_builder_t::_S_build(std::forward<_Kt>(__k),
@@ -2158,14 +2185,14 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __it)
     -> iterator
     {
-      __node_ptr __n = __it._M_cur;
+      __node_type* __n = __it._M_cur;
       std::size_t __bkt = _M_bucket_index(*__n);
 
       // Look for previous node to unlink it from the erased one, this
       // is why we need buckets to contain the before begin to make
       // this search fast.
-      __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
-      return _M_erase(__bkt, __prev_n, __n);
+      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      return _M_erase(__bkt, __prev_n, _S_cast(__prev_n->_M_nxt));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -2175,11 +2202,11 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     auto
     _Hashtable<_Key, _Value, _Alloc, _ExtractKey, _Equal,
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
-    _M_erase(size_type __bkt, __node_base_ptr __prev_n, __node_ptr __n)
+    _M_erase(size_type __bkt, __node_base* __prev_n, __node_ptr __n)
     -> iterator
     {
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n->_M_next(),
+	_M_remove_bucket_begin(__bkt, __n->_M_next_ptr(),
 	  __n->_M_nxt ? _M_bucket_index(*__n->_M_next()) : 0);
       else if (__n->_M_nxt)
 	{
@@ -2210,12 +2237,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__code);
 
       // Look for the node before the first matching node.
-      __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
       // We found a matching node, erase it.
-      __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
+      __node_ptr __n = _S_cast(__prev_n->_M_nxt);
       _M_erase(__bkt, __prev_n, __n);
       return 1;
     }
@@ -2234,7 +2261,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       std::size_t __bkt = _M_bucket_index(__code);
 
       // Look for the node before the first matching node.
-      __node_base_ptr __prev_n = _M_find_before_node(__bkt, __k, __code);
+      __node_base* __prev_n = _M_find_before_node(__bkt, __k, __code);
       if (!__prev_n)
 	return 0;
 
@@ -2244,8 +2271,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       // We use one loop to find all matching nodes and another to deallocate
       // them so that the key stays valid during the first loop. It might be
       // invalidated indirectly when destroying nodes.
-      __node_ptr __n = static_cast<__node_ptr>(__prev_n->_M_nxt);
-      __node_ptr __n_last = __n->_M_next();
+      __node_ptr __n = _S_cast(__prev_n->_M_nxt);
+      __node_type* __n_last = __n->_M_next();
       while (__n_last && this->_M_node_equals(*__n, *__n_last))
 	__n_last = __n_last->_M_next();
 
@@ -2255,19 +2282,19 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
       size_type __result = 0;
       do
 	{
-	  __node_ptr __p = __n->_M_next();
+	  __node_ptr __p = __n->_M_next_ptr();
 	  this->_M_deallocate_node(__n);
 	  __n = __p;
 	  ++__result;
 	}
-      while (__n != __n_last);
+      while (std::__to_address(__n) != __n_last);
 
       _M_element_count -= __result;
       if (__prev_n == _M_buckets[__bkt])
-	_M_remove_bucket_begin(__bkt, __n_last, __n_last_bkt);
+	_M_remove_bucket_begin(__bkt, __n, __n_last_bkt);
       else if (__n_last_bkt != __bkt)
 	_M_buckets[__n_last_bkt] = __prev_n;
-      __prev_n->_M_nxt = __n_last;
+      __prev_n->_M_nxt = __n;
       return __result;
     }
 
@@ -2281,41 +2308,42 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     erase(const_iterator __first, const_iterator __last)
     -> iterator
     {
-      __node_ptr __n = __first._M_cur;
-      __node_ptr __last_n = __last._M_cur;
+      __node_type* __n = __first._M_cur;
+      __node_type* __last_n = __last._M_cur;
       if (__n == __last_n)
 	return iterator(__n);
 
       std::size_t __bkt = _M_bucket_index(*__n);
 
-      __node_base_ptr __prev_n = _M_get_previous_node(__bkt, __n);
+      __node_base* __prev_n = _M_get_previous_node(__bkt, __n);
+      __node_ptr __nptr = _S_cast(__prev_n->_M_nxt);
       bool __is_bucket_begin = __n == _M_bucket_begin(__bkt);
       std::size_t __n_bkt = __bkt;
       for (;;)
 	{
 	  do
 	    {
-	      __node_ptr __tmp = __n;
-	      __n = __n->_M_next();
+	      __node_ptr __tmp = __nptr;
+	      __nptr = __nptr->_M_next_ptr();
 	      this->_M_deallocate_node(__tmp);
 	      --_M_element_count;
-	      if (!__n)
+	      if (!__nptr)
 		break;
-	      __n_bkt = _M_bucket_index(*__n);
+	      __n_bkt = _M_bucket_index(*__nptr);
 	    }
-	  while (__n != __last_n && __n_bkt == __bkt);
+	  while (std::__to_address(__nptr) != __last_n && __n_bkt == __bkt);
 	  if (__is_bucket_begin)
-	    _M_remove_bucket_begin(__bkt, __n, __n_bkt);
-	  if (__n == __last_n)
+	    _M_remove_bucket_begin(__bkt, __nptr, __n_bkt);
+	  if (std::__to_address(__nptr) == __last_n)
 	    break;
 	  __is_bucket_begin = true;
 	  __bkt = __n_bkt;
 	}
 
-      if (__n && (__n_bkt != __bkt || __is_bucket_begin))
+      if (__nptr && (__n_bkt != __bkt || __is_bucket_begin))
 	_M_buckets[__n_bkt] = __prev_n;
-      __prev_n->_M_nxt = __n;
-      return iterator(__n);
+      __prev_n->_M_nxt = __nptr;
+      return iterator(std::__to_address(__nptr));
     }
 
   template<typename _Key, typename _Value, typename _Alloc,
@@ -2327,9 +2355,9 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
 	       _Hash, _RangeHash, _Unused, _RehashPolicy, _Traits>::
     clear() noexcept
     {
-      this->_M_deallocate_nodes(_M_begin());
-      __builtin_memset(_M_buckets, 0,
-		       _M_bucket_count * sizeof(__node_base_ptr));
+      this->_M_deallocate_nodes(_M_pbegin());
+      __builtin_memset(std::__to_address(_M_buckets), 0,
+		       _M_bucket_count * sizeof(__node_base*));
       _M_element_count = 0;
       _M_before_begin._M_nxt = nullptr;
     }
@@ -2390,12 +2418,12 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _M_rehash_aux(size_type __bkt_count, true_type /* __uks */)
     {
       __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_ptr __p = _M_begin();
+      __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       while (__p)
 	{
-	  __node_ptr __next = __p->_M_next();
+	  __node_type* __next = __p->_M_next();
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
 	  if (!__new_buckets[__bkt])
@@ -2433,16 +2461,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
     _M_rehash_aux(size_type __bkt_count, false_type /* __uks */)
     {
       __buckets_ptr __new_buckets = _M_allocate_buckets(__bkt_count);
-      __node_ptr __p = _M_begin();
+      __node_type* __p = _M_begin();
       _M_before_begin._M_nxt = nullptr;
       std::size_t __bbegin_bkt = 0;
       std::size_t __prev_bkt = 0;
-      __node_ptr __prev_p = nullptr;
+      __node_type* __prev_p = nullptr;
       bool __check_bucket = false;
 
       while (__p)
 	{
-	  __node_ptr __next = __p->_M_next();
+	  __node_type* __next = __p->_M_next();
 	  std::size_t __bkt
 	    = __hash_code_base::_M_bucket_index(*__p, __bkt_count);
 
diff --git a/libstdc++-v3/include/bits/hashtable_policy.h b/libstdc++-v3/include/bits/hashtable_policy.h
index 2130c958262..495d5ad5ae9 100644
--- a/libstdc++-v3/include/bits/hashtable_policy.h
+++ b/libstdc++-v3/include/bits/hashtable_policy.h
@@ -59,24 +59,29 @@ namespace __detail
 
   // Helper function: return distance(first, last) for forward
   // iterators, or 0/1 for input iterators.
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last,
 		  std::input_iterator_tag)
     { return __first != __last ? 1 : 0; }
 
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last,
 		  std::forward_iterator_tag)
     { return std::distance(__first, __last); }
 
-  template<class _Iterator>
+  template<typename _Iterator>
     inline typename std::iterator_traits<_Iterator>::difference_type
     __distance_fw(_Iterator __first, _Iterator __last)
     { return __distance_fw(__first, __last,
 			   std::__iterator_category(__first)); }
 
+  template<typename _Alloc, typename _Value>
+    using __alloc_val_ptr =
+      typename std::allocator_traits<__alloc_rebind<_Alloc,
+						    _Value>>::pointer;
+
   struct _Identity
   {
     template<typename _Tp>
@@ -146,10 +151,10 @@ namespace __detail
       using __hashtable_alloc = _Hashtable_alloc<__node_alloc_type>;
       using __node_alloc_traits =
 	typename __hashtable_alloc::__node_alloc_traits;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_ptr = typename __hashtable_alloc::__node_ptr;
 
     public:
-      _ReuseOrAllocNode(__node_type* __nodes, __hashtable_alloc& __h)
+      _ReuseOrAllocNode(__node_ptr __nodes, __hashtable_alloc& __h)
       : _M_nodes(__nodes), _M_h(__h) { }
       _ReuseOrAllocNode(const _ReuseOrAllocNode&) = delete;
 
@@ -157,13 +162,13 @@ namespace __detail
       { _M_h._M_deallocate_nodes(_M_nodes); }
 
       template<typename... _Args>
-	__node_type*
+	__node_ptr
 	operator()(_Args&&... __args) const
 	{
 	  if (_M_nodes)
 	    {
-	      __node_type* __node = _M_nodes;
-	      _M_nodes = _M_nodes->_M_next();
+	      __node_ptr __node = _M_nodes;
+	      _M_nodes = _M_nodes->_M_next_ptr();
 	      __node->_M_nxt = nullptr;
 	      auto& __a = _M_h._M_node_allocator();
 	      __node_alloc_traits::destroy(__a, __node->_M_valptr());
@@ -183,7 +188,7 @@ namespace __detail
 	}
 
     private:
-      mutable __node_type* _M_nodes;
+      mutable __node_ptr _M_nodes;
       __hashtable_alloc& _M_h;
     };
 
@@ -194,14 +199,14 @@ namespace __detail
     {
     private:
       using __hashtable_alloc = _Hashtable_alloc<_NodeAlloc>;
-      using __node_type = typename __hashtable_alloc::__node_type;
+      using __node_ptr = typename __hashtable_alloc::__node_ptr;
 
     public:
       _AllocNode(__hashtable_alloc& __h)
       : _M_h(__h) { }
 
       template<typename... _Args>
-	__node_type*
+	__node_ptr
 	operator()(_Args&&... __args) const
 	{ return _M_h._M_allocate_node(std::forward<_Args>(__args)...); }
 
@@ -259,6 +264,28 @@ namespace __detail
     _Hash_node_base(_Hash_node_base* __next) noexcept : _M_nxt(__next) { }
   };
 
+  /**
+   * struct _Hash_pnode_base
+   *
+   * Like _Hash_node_base but used in case of custom pointer type defined by the
+   * allocator.
+   */
+  template<typename _NodePtr>
+    struct _Hash_pnode_base
+    {
+      using __node_ptr = _NodePtr;
+
+      __node_ptr _M_nxt;
+
+      _Hash_pnode_base()
+      noexcept(std::is_nothrow_default_constructible<__node_ptr>::value)
+      : _M_nxt() { }
+
+      _Hash_pnode_base(__node_ptr __next)
+      noexcept(std::is_nothrow_copy_constructible<__node_ptr>::value)
+      : _M_nxt(__next) { }
+    };
+
   /**
    *  struct _Hash_node_value_base
    *
@@ -290,18 +317,23 @@ namespace __detail
 
   /**
    *  Primary template struct _Hash_node_code_cache.
+   *
+   *  No cache.
    */
   template<bool _Cache_hash_code>
     struct _Hash_node_code_cache
     { };
 
   /**
-   *  Specialization for node with cache, struct _Hash_node_code_cache.
+   *  Specialization for node with cache.
    */
   template<>
     struct _Hash_node_code_cache<true>
     { std::size_t  _M_hash_code; };
 
+  /**
+   * Node with value and optionally a cache for the hash code.
+   */
   template<typename _Value, bool _Cache_hash_code>
     struct _Hash_node_value
     : _Hash_node_value_base<_Value>
@@ -309,28 +341,79 @@ namespace __detail
     { };
 
   /**
-   *  Primary template struct _Hash_node.
+   *  struct _Hash_node.
+   *
+   *  The node definition when the allocator is using raw pointers.
    */
   template<typename _Value, bool _Cache_hash_code>
     struct _Hash_node
     : _Hash_node_base
     , _Hash_node_value<_Value, _Cache_hash_code>
     {
+      using __node_base = _Hash_node_base;
+      using __node_ptr = _Hash_node*;
+      using __node_type = _Hash_node;
+      using __node_value_cache_type =
+	_Hash_node_value<_Value, _Cache_hash_code>;
+
       _Hash_node*
       _M_next() const noexcept
       { return static_cast<_Hash_node*>(this->_M_nxt); }
+
+      __node_ptr
+      _M_next_ptr() const noexcept
+      { return _M_next(); }
     };
 
+  /**
+   *  struct _Hash_pnode.
+   *
+   *  The node definition used when the allocator define a custom pointer type.
+   */
+  template<typename _Ptr, bool _Cache_hash_code>
+    struct _Hash_pnode
+    : _Hash_pnode_base<__ptr_rebind<_Ptr,
+				    _Hash_pnode<_Ptr, _Cache_hash_code>>>
+    , _Hash_node_value<typename std::pointer_traits<_Ptr>::element_type,
+		       _Cache_hash_code>
+    {
+      using __node_base =
+	_Hash_pnode_base<__ptr_rebind<_Ptr,
+				      _Hash_pnode<_Ptr, _Cache_hash_code>>>;
+      using __node_ptr = typename __node_base::__node_ptr;
+      using __node_type =
+	typename std::pointer_traits<__node_ptr>::element_type;
+      using value_type = typename __node_type::value_type;
+      using __node_value_cache_type =
+	_Hash_node_value<value_type, _Cache_hash_code>;
+      typedef typename std::pointer_traits<__node_ptr>::difference_type
+							difference_type;
+
+      __node_type*
+      _M_next() const noexcept
+      { return std::__to_address(this->_M_nxt); }
+
+      __node_ptr
+      _M_next_ptr() const noexcept
+      { return this->_M_nxt; }
+    };
+
+  template<typename _Alloc, typename _Value, bool __hash_cached>
+    using __get_node_type = typename std::conditional<
+      std::is_pointer<__alloc_val_ptr<_Alloc, _Value>>::value,
+      _Hash_node<_Value, __hash_cached>,
+      _Hash_pnode<__alloc_val_ptr<_Alloc, _Value>, __hash_cached>>::type;
+
   /// Base class for node iterators.
-  template<typename _Value, bool _Cache_hash_code>
-    struct _Node_iterator_base
+  template<typename _NodeType>
+    struct _Hashtable_iterator_base
     {
-      using __node_type = _Hash_node<_Value, _Cache_hash_code>;
+      using __node_type = _NodeType;
 
       __node_type* _M_cur;
 
-      _Node_iterator_base() : _M_cur(nullptr) { }
-      _Node_iterator_base(__node_type* __p) noexcept
+      _Hashtable_iterator_base() : _M_cur(nullptr) { }
+      _Hashtable_iterator_base(__node_type* __p) noexcept
       : _M_cur(__p) { }
 
       void
@@ -338,18 +421,32 @@ namespace __detail
       { _M_cur = _M_cur->_M_next(); }
 
       friend bool
-      operator==(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
-      noexcept
+      operator==(const _Hashtable_iterator_base& __x,
+		 const _Hashtable_iterator_base& __y) noexcept
       { return __x._M_cur == __y._M_cur; }
 
 #if __cpp_impl_three_way_comparison < 201907L
       friend bool
-      operator!=(const _Node_iterator_base& __x, const _Node_iterator_base& __y)
-      noexcept
+      operator!=(const _Hashtable_iterator_base& __x,
+		 const _Hashtable_iterator_base& __y) noexcept
       { return __x._M_cur != __y._M_cur; }
 #endif
     };
 
+  /// Base class for node iterators.
+  template<typename _Value, bool _Cache_hash_code>
+    struct _Node_iterator_base
+    : _Hashtable_iterator_base<_Hash_node<_Value, _Cache_hash_code>>
+    {
+      using __base_type =
+	_Hashtable_iterator_base<_Hash_node<_Value, _Cache_hash_code>>;
+      using __node_type = typename __base_type::__node_type;
+
+      _Node_iterator_base() = default;
+      _Node_iterator_base(__node_type* __p) noexcept
+      : __base_type(__p) { }
+    };
+
   /// Node iterators, used to iterate through all the hashtable.
   template<typename _Value, bool __constant_iterators, bool __cache>
     struct _Node_iterator
@@ -451,6 +548,110 @@ namespace __detail
       }
     };
 
+  /// Node iterators, used to iterate through all the hashtable.
+  template<typename _NodeType, bool __constant_iterators>
+    struct _Hashtable_iterator
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    private:
+      using __base_type = _Hashtable_iterator_base<_NodeType>;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __node_type::value_type		value_type;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      using pointer = typename std::conditional<__constant_iterators,
+				  const value_type*, value_type*>::type;
+
+      using reference = typename std::conditional<__constant_iterators,
+				  const value_type&, value_type&>::type;
+
+      _Hashtable_iterator() noexcept
+      : __base_type(nullptr) { }
+
+      explicit
+      _Hashtable_iterator(__node_type* __p) noexcept
+      : __base_type(__p) { }
+
+      reference
+      operator*() const noexcept
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const noexcept
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_iterator&
+      operator++() noexcept
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_iterator
+      operator++(int) noexcept
+      {
+	_Hashtable_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  /// Node const_iterators, used to iterate through all the hashtable.
+  template<typename _NodeType, bool __constant_iterators>
+    struct _Hashtable_const_iterator
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    private:
+      using __base_type = _Hashtable_iterator_base<_NodeType>;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __node_type::value_type		value_type;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      typedef const value_type*				pointer;
+      typedef const value_type&				reference;
+
+      _Hashtable_const_iterator() noexcept
+      : __base_type(nullptr) { }
+
+      explicit
+      _Hashtable_const_iterator(__node_type* __p) noexcept
+      : __base_type(__p) { }
+
+      _Hashtable_const_iterator(
+	const _Hashtable_iterator<_NodeType,
+				  __constant_iterators>& __x) noexcept
+      : __base_type(__x._M_cur) { }
+
+      reference
+      operator*() const noexcept
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const noexcept
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_const_iterator&
+      operator++() noexcept
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_const_iterator
+      operator++(int) noexcept
+      {
+	_Hashtable_const_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
   // Many of class template _Hashtable's template parameters are policy
   // classes.  These are defaults for the policies.
 
@@ -837,16 +1038,15 @@ namespace __detail
 
       using __hash_cached = typename _Traits::__hash_cached;
       using __constant_iterators = typename _Traits::__constant_iterators;
+      using __alloc_ptr = __alloc_val_ptr<_Alloc, _Value>;
 
-      using __hashtable_alloc = _Hashtable_alloc<
-	__alloc_rebind<_Alloc, _Hash_node<_Value,
-					  __hash_cached::value>>>;
+      using __node_type = __get_node_type<_Alloc, _Value, __hash_cached::value>;
+      using __node_alloc_type = __alloc_rebind<_Alloc, __node_type>;
 
       using value_type = typename __hashtable_base::value_type;
       using size_type = typename __hashtable_base::size_type;
 
       using __unique_keys = typename _Traits::__unique_keys;
-      using __node_alloc_type = typename __hashtable_alloc::__node_alloc_type;
       using __node_gen_type = _AllocNode<__node_alloc_type>;
 
       __hashtable&
@@ -864,11 +1064,19 @@ namespace __detail
 			const _NodeGetter&, false_type __uks);
 
     public:
-      using iterator = _Node_iterator<_Value, __constant_iterators::value,
-				      __hash_cached::value>;
-
-      using const_iterator = _Node_const_iterator<_Value, __constant_iterators::value,
-						  __hash_cached::value>;
+      using iterator =
+	typename std::conditional<std::is_pointer<__alloc_ptr>::value,
+	  _Node_iterator<_Value,
+			 __constant_iterators::value, __hash_cached::value>,
+	  _Hashtable_iterator<__node_type,
+			      __constant_iterators::value>>::type;
+
+      using const_iterator =
+	typename std::conditional<std::is_pointer<__alloc_ptr>::value,
+	  _Node_const_iterator<_Value,
+			     __constant_iterators::value, __hash_cached::value>,
+	  _Hashtable_const_iterator<__node_type,
+				    __constant_iterators::value>>::type;
 
       using __ireturn_type = typename std::conditional<__unique_keys::value,
 						     std::pair<iterator, bool>,
@@ -1202,6 +1410,17 @@ namespace __detail
 	   bool __cache_hash_code>
     struct _Local_iterator_base;
 
+  /**
+   *  Primary class template _Hashtable_local_iter_base.
+   *
+   *  Base class for local iterators, used to iterate within a bucket
+   *  but not between buckets.
+   */
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __cache_hash_code>
+    struct _Hashtable_local_iter_base;
+
   /**
    *  Primary class template _Hash_code_base.
    *
@@ -1354,6 +1573,47 @@ namespace __detail
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
 
+  /// Partial specialization used when nodes contain a cached hash code.
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused>
+    struct _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused, true>
+    : public _Hashtable_iterator_base<_NodeType>
+    {
+    protected:
+      using __base_node_iter = _Hashtable_iterator_base<_NodeType>;
+      using value_type = typename _NodeType::value_type;
+      using __hash_code_base = _Hash_code_base<_Key, value_type, _ExtractKey,
+					      _Hash, _RangeHash, _Unused, true>;
+
+      _Hashtable_local_iter_base() = default;
+      _Hashtable_local_iter_base(const __hash_code_base&,
+				 _NodeType* __p,
+				 std::size_t __bkt, std::size_t __bkt_count)
+      : __base_node_iter(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      { }
+
+      void
+      _M_incr()
+      {
+	__base_node_iter::_M_incr();
+	if (this->_M_cur)
+	  {
+	    std::size_t __bkt
+	      = _RangeHash{}(this->_M_cur->_M_hash_code, _M_bucket_count);
+	    if (__bkt != _M_bucket)
+	      this->_M_cur = nullptr;
+	  }
+      }
+
+      std::size_t _M_bucket;
+      std::size_t _M_bucket_count;
+
+    public:
+      std::size_t
+      _M_get_bucket() const { return _M_bucket; }  // for debug mode
+    };
+
   // Uninitialized storage for a _Hash_code_base.
   // This type is DefaultConstructible and Assignable even if the
   // _Hash_code_base type isn't, so that _Local_iterator_base<..., false>
@@ -1468,6 +1728,84 @@ namespace __detail
       _M_get_bucket() const { return _M_bucket; }  // for debug mode
     };
 
+  // Partial specialization used when hash codes are not cached
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused>
+    struct _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused, false>
+    : _Hash_code_storage<_Hash>
+    , _Hashtable_iterator_base<_NodeType>
+    {
+    protected:
+      using value_type = typename _NodeType::value_type;
+      using __hash_code_base = _Hash_code_base<_Key, value_type, _ExtractKey,
+					     _Hash, _RangeHash, _Unused, false>;
+      using __node_iter_base = _Hashtable_iterator_base<_NodeType>;
+
+      _Hashtable_local_iter_base() : _M_bucket_count(-1) { }
+
+      _Hashtable_local_iter_base(const __hash_code_base& __base,
+				 _NodeType* __p,
+				 std::size_t __bkt, std::size_t __bkt_count)
+      : __node_iter_base(__p), _M_bucket(__bkt), _M_bucket_count(__bkt_count)
+      { _M_init(__base.hash_function()); }
+
+      ~_Hashtable_local_iter_base()
+      {
+	if (_M_bucket_count != -1)
+	  _M_destroy();
+      }
+
+      _Hashtable_local_iter_base(const _Hashtable_local_iter_base& __iter)
+      : __node_iter_base(__iter._M_cur), _M_bucket(__iter._M_bucket)
+      , _M_bucket_count(__iter._M_bucket_count)
+      {
+	if (_M_bucket_count != -1)
+	  _M_init(*__iter._M_h());
+      }
+
+      _Hashtable_local_iter_base&
+      operator=(const _Hashtable_local_iter_base& __iter)
+      {
+	if (_M_bucket_count != -1)
+	  _M_destroy();
+	this->_M_cur = __iter._M_cur;
+	_M_bucket = __iter._M_bucket;
+	_M_bucket_count = __iter._M_bucket_count;
+	if (_M_bucket_count != -1)
+	  _M_init(*__iter._M_h());
+	return *this;
+      }
+
+      void
+      _M_incr()
+      {
+	__node_iter_base::_M_incr();
+	if (this->_M_cur)
+	  {
+	    std::size_t __bkt =
+	      _RangeHash{}((*this->_M_h())(_ExtractKey{}(this->_M_cur->_M_v())),
+			   _M_bucket_count);
+	    if (__bkt != _M_bucket)
+	      this->_M_cur = nullptr;
+	  }
+      }
+
+      std::size_t _M_bucket;
+      std::size_t _M_bucket_count;
+
+      void
+      _M_init(const _Hash& __h)
+      { ::new(this->_M_h()) _Hash(__h); }
+
+      void
+      _M_destroy() { this->_M_h()->~_Hash(); }
+
+    public:
+      std::size_t
+      _M_get_bucket() const { return _M_bucket; }  // for debug mode
+    };
+
   /// local iterators
   template<typename _Key, typename _Value, typename _ExtractKey,
 	   typename _Hash, typename _RangeHash, typename _Unused,
@@ -1583,6 +1921,156 @@ namespace __detail
       }
     };
 
+  /// local iterators
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __cache>
+    struct _Hashtable_local_iterator
+    : public _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+					_Hash, _RangeHash, _Unused, __cache>
+    {
+    private:
+      using __base_type =
+	_Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				   _Hash, _RangeHash, _Unused, __cache>;
+      using __hash_code_base = typename __base_type::__hash_code_base;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __base_type::value_type		value_type;
+      typedef typename std::conditional<__constant_iterators,
+					const value_type*, value_type*>::type
+							pointer;
+      typedef typename std::conditional<__constant_iterators,
+					const value_type&, value_type&>::type
+							reference;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      _Hashtable_local_iterator() = default;
+
+      _Hashtable_local_iterator(const __hash_code_base& __base,
+				__node_type* __n,
+				std::size_t __bkt, std::size_t __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
+      { }
+
+      reference
+      operator*() const
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_local_iterator&
+      operator++()
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_local_iterator
+      operator++(int)
+      {
+	_Hashtable_local_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  /// local const_iterators
+  template<typename _Key, typename _NodeType, typename _ExtractKey,
+	   typename _Hash, typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __cache>
+    struct _Hashtable_const_local_iterator
+    : public _Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+					_Hash, _RangeHash, _Unused, __cache>
+    {
+    private:
+      using __base_type =
+	_Hashtable_local_iter_base<_Key, _NodeType, _ExtractKey,
+				   _Hash, _RangeHash, _Unused, __cache>;
+      using __hash_code_base = typename __base_type::__hash_code_base;
+      using __node_type = typename __base_type::__node_type;
+
+    public:
+      typedef typename __base_type::value_type		value_type;
+      typedef const value_type*				pointer;
+      typedef const value_type&				reference;
+      typedef typename __node_type::difference_type	difference_type;
+      typedef std::forward_iterator_tag			iterator_category;
+
+      _Hashtable_const_local_iterator() = default;
+
+      _Hashtable_const_local_iterator(const __hash_code_base& __base,
+				      __node_type* __n,
+				    std::size_t __bkt, std::size_t __bkt_count)
+      : __base_type(__base, __n, __bkt, __bkt_count)
+      { }
+
+      _Hashtable_const_local_iterator(const _Hashtable_local_iterator<
+				      _Key, _NodeType, _ExtractKey,
+				      _Hash, _RangeHash, _Unused,
+				      __constant_iterators,
+				      __cache>& __x)
+      : __base_type(__x)
+      { }
+
+      reference
+      operator*() const
+      { return this->_M_cur->_M_v(); }
+
+      pointer
+      operator->() const
+      { return this->_M_cur->_M_valptr(); }
+
+      _Hashtable_const_local_iterator&
+      operator++()
+      {
+	this->_M_incr();
+	return *this;
+      }
+
+      _Hashtable_const_local_iterator
+      operator++(int)
+      {
+	_Hashtable_const_local_iterator __tmp(*this);
+	this->_M_incr();
+	return __tmp;
+      }
+    };
+
+  template<typename _NodePtr, typename _Key, typename _Value,
+	   typename _ExtractKey, typename _Hash,
+	   typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __hash_cached>
+    using __local_iterator = typename std::conditional<
+      std::is_pointer<_NodePtr>::value,
+      _Local_iterator<_Key, _Value,
+		      _ExtractKey, _Hash, _RangeHash, _Unused,
+		      __constant_iterators, __hash_cached>,
+      _Hashtable_local_iterator<_Key,
+	       typename std::pointer_traits<_NodePtr>::element_type,
+				_ExtractKey, _Hash, _RangeHash, _Unused,
+				__constant_iterators,
+				__hash_cached>>::type;
+
+  template<typename _NodePtr, typename _Key, typename _Value,
+	   typename _ExtractKey, typename _Hash,
+	   typename _RangeHash, typename _Unused,
+	   bool __constant_iterators, bool __hash_cached>
+    using __const_local_iterator = typename std::conditional<
+      std::is_pointer<_NodePtr>::value,
+      _Local_const_iterator<_Key, _Value,
+			    _ExtractKey, _Hash, _RangeHash, _Unused,
+			    __constant_iterators, __hash_cached>,
+      _Hashtable_const_local_iterator<_Key,
+	       typename std::pointer_traits<_NodePtr>::element_type,
+				      _ExtractKey, _Hash, _RangeHash, _Unused,
+				      __constant_iterators,
+				      __hash_cached>>::type;
+
   /**
    *  Primary class template _Hashtable_base.
    *
@@ -1740,8 +2228,9 @@ namespace __detail
 	  if (!__prev_n)
 	    return false;
 
-	  for (__node_type* __n = static_cast<__node_type*>(__prev_n->_M_nxt);;
-	       __n = __n->_M_next())
+	  __node_type* __n =
+	    static_cast<__node_type*>(std::__to_address(__prev_n->_M_nxt));
+	  for (;; __n = __n->_M_next())
 	    {
 	      if (__n->_M_v() == *__itx)
 		break;
@@ -1800,7 +2289,8 @@ namespace __detail
 	  if (!__y_prev_n)
 	    return false;
 
-	  __node_type* __y_n = static_cast<__node_type*>(__y_prev_n->_M_nxt);
+	  __node_type* __y_n =
+	    static_cast<__node_type*>(std::__to_address(__y_prev_n->_M_nxt));
 	  for (;;)
 	    {
 	      if (__this->key_eq()(_ExtractKey{}(__y_n->_M_v()),
@@ -1847,16 +2337,12 @@ namespace __detail
       // Use __gnu_cxx to benefit from _S_always_equal and al.
       using __node_alloc_traits = __gnu_cxx::__alloc_traits<__node_alloc_type>;
 
-      using __value_alloc_traits = typename __node_alloc_traits::template
-	rebind_traits<typename __node_type::value_type>;
-
-      using __node_ptr = __node_type*;
-      using __node_base = _Hash_node_base;
-      using __node_base_ptr = __node_base*;
+      using __node_ptr = typename __node_alloc_traits::pointer;
+      using __node_base = typename __node_type::__node_base;
       using __buckets_alloc_type =
-	__alloc_rebind<__node_alloc_type, __node_base_ptr>;
+	__alloc_rebind<__node_alloc_type, __node_base*>;
       using __buckets_alloc_traits = std::allocator_traits<__buckets_alloc_type>;
-      using __buckets_ptr = __node_base_ptr*;
+      using __buckets_ptr = typename __buckets_alloc_traits::pointer;
 
       _Hashtable_alloc() = default;
       _Hashtable_alloc(const _Hashtable_alloc&) = default;
@@ -1909,14 +2395,13 @@ namespace __detail
       -> __node_ptr
       {
 	auto __nptr = __node_alloc_traits::allocate(_M_node_allocator(), 1);
-	__node_ptr __n = std::__to_address(__nptr);
 	__try
 	  {
-	    ::new ((void*)__n) __node_type;
+	    ::new ((void*)std::__to_address(__nptr)) __node_type;
 	    __node_alloc_traits::construct(_M_node_allocator(),
-					   __n->_M_valptr(),
+					   __nptr->_M_valptr(),
 					   std::forward<_Args>(__args)...);
-	    return __n;
+	    return __nptr;
 	  }
 	__catch(...)
 	  {
@@ -1927,20 +2412,18 @@ namespace __detail
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_ptr __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node(__node_ptr __nptr)
     {
-      __node_alloc_traits::destroy(_M_node_allocator(), __n->_M_valptr());
-      _M_deallocate_node_ptr(__n);
+      __node_alloc_traits::destroy(_M_node_allocator(), __nptr->_M_valptr());
+      _M_deallocate_node_ptr(__nptr);
     }
 
   template<typename _NodeAlloc>
     void
-    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_ptr __n)
+    _Hashtable_alloc<_NodeAlloc>::_M_deallocate_node_ptr(__node_ptr __nptr)
     {
-      typedef typename __node_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__n);
-      __n->~__node_type();
-      __node_alloc_traits::deallocate(_M_node_allocator(), __ptr, 1);
+      __nptr->~__node_type();
+      __node_alloc_traits::deallocate(_M_node_allocator(), __nptr, 1);
     }
 
   template<typename _NodeAlloc>
@@ -1950,7 +2433,7 @@ namespace __detail
       while (__n)
 	{
 	  __node_ptr __tmp = __n;
-	  __n = __n->_M_next();
+	  __n = __n->_M_next_ptr();
 	  _M_deallocate_node(__tmp);
 	}
     }
@@ -1963,9 +2446,9 @@ namespace __detail
       __buckets_alloc_type __alloc(_M_node_allocator());
 
       auto __ptr = __buckets_alloc_traits::allocate(__alloc, __bkt_count);
-      __buckets_ptr __p = std::__to_address(__ptr);
-      __builtin_memset(__p, 0, __bkt_count * sizeof(__node_base_ptr));
-      return __p;
+      __builtin_memset(std::__to_address(__ptr), 0,
+		       __bkt_count * sizeof(__node_base*));
+      return __ptr;
     }
 
   template<typename _NodeAlloc>
@@ -1974,10 +2457,8 @@ namespace __detail
     _M_deallocate_buckets(__buckets_ptr __bkts,
 			  std::size_t __bkt_count)
     {
-      typedef typename __buckets_alloc_traits::pointer _Ptr;
-      auto __ptr = std::pointer_traits<_Ptr>::pointer_to(*__bkts);
       __buckets_alloc_type __alloc(_M_node_allocator());
-      __buckets_alloc_traits::deallocate(__alloc, __ptr, __bkt_count);
+      __buckets_alloc_traits::deallocate(__alloc, __bkts, __bkt_count);
     }
 
  ///@} hashtable-detail
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..5e9ff548032
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_map/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2021 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_map<T, int, H, E,
+				  CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_map<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..6dd62a40293
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multimap/allocator/ext_ptr.cc
@@ -0,0 +1,57 @@
+// Copyright (C) 2021 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_map>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multimap<T, int, H, E,
+				       CustomPointerAlloc<std::pair<const T, int>>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<std::pair<const T, int>> alloc_type;
+  typedef std::unordered_multimap<T, int, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert({ T(), 0 });
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
new file mode 100644
index 00000000000..dbc7b6247a2
--- /dev/null
+++ b/libstdc++-v3/testsuite/23_containers/unordered_multiset/allocator/ext_ptr.cc
@@ -0,0 +1,56 @@
+// Copyright (C) 2021 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library.  This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License along
+// with this library; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// { dg-do run { target c++11 } }
+
+#include <unordered_set>
+#include <memory>
+#include <testsuite_hooks.h>
+#include <testsuite_allocator.h>
+
+struct T { int i; };
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
+};
+
+struct E : std::equal_to<T>
+{ };
+
+using __gnu_test::CustomPointerAlloc;
+
+template class std::unordered_multiset<T, H, E, CustomPointerAlloc<T>>;
+
+void test01()
+{
+  typedef CustomPointerAlloc<T> alloc_type;
+  typedef std::unordered_multiset<T, H, E, alloc_type> test_type;
+  test_type v;
+  v.insert(T());
+  VERIFY( ++v.begin() == v.end() );
+}
+
+int main()
+{
+  test01();
+}
diff --git a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
index c0e6a1f53a2..88814b3009c 100644
--- a/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
+++ b/libstdc++-v3/testsuite/23_containers/unordered_set/allocator/ext_ptr.cc
@@ -15,10 +15,7 @@
 // with this library; see the file COPYING3.  If not see
 // <http://www.gnu.org/licenses/>.
 
-// This test fails to compile since C++17 (see xfail-if below) so we can only
-// do a "run" test for C++11 and C++14, and a "compile" test for C++17 and up.
-// { dg-do run { target { c++11_only || c++14_only } } }
-// { dg-do compile { target c++17 } }
+// { dg-do run { target { c++11 } } }
 
 #include <unordered_set>
 #include <memory>
@@ -26,15 +23,22 @@
 #include <testsuite_allocator.h>
 
 struct T { int i; };
-bool operator==(const T& l, const T& r) { return l.i == r.i; }
-struct H { std::size_t operator()(const T& t) const noexcept { return t.i; }
+
+bool operator==(const T& l, const T& r)
+{ return l.i == r.i; }
+
+struct H
+{
+  std::size_t
+  operator()(const T& t) const noexcept
+  { return t.i; }
 };
-struct E : std::equal_to<T> { };
+
+struct E : std::equal_to<T>
+{ };
 
 using __gnu_test::CustomPointerAlloc;
 
-// { dg-xfail-if "node reinsertion assumes raw pointers" { c++17 } }
-// TODO when removing this xfail change the test back to "dg-do run".
 template class std::unordered_set<T, H, E, CustomPointerAlloc<T>>;
 
 void test01()

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2021-06-10 17:22 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-19 17:31 libstdc++ PR 57272 Fancy pointer support in Hashtable François Dumont
2020-05-15 21:12 ` François Dumont
2020-09-28 20:37   ` François Dumont
2020-10-20 11:04     ` Jonathan Wakely
2020-10-20 17:26       ` François Dumont
2020-11-01 21:48       ` François Dumont
2020-11-02 14:11         ` Jonathan Wakely
2020-11-02 21:33           ` François Dumont
2021-01-11 18:10           ` François Dumont
2021-06-10 17:22           ` François Dumont

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).