public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [cxx-mem-model] C++ wrappers
@ 2011-09-10  0:02 Andrew MacLeod
  2011-09-15 21:03 ` Richard Henderson
  0 siblings, 1 reply; 8+ messages in thread
From: Andrew MacLeod @ 2011-09-10  0:02 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 518 bytes --]

Same as before, only a slight change.  __sync_mem_flag_test_and_set() 
and __sync_mem_flag_clear() have been removed in a different 
consolidation patch, so this changes those calls to 
__sync_mem_exchange() and __sync_mem_store() which are subsuming all the 
behaviour of those routines.

Straight up translation which now calls the new routines with a memory 
model parameter instead of the old __sync routines with various barriers.

bootstrapped on x86_64-unknown-linux-gnu and no new regressions.

OK for branch?

[-- Attachment #2: c++.diff --]
[-- Type: text/plain, Size: 20640 bytes --]


	* libstdc++-v3/include/bits/atomic_2.h (__atomic2): Use new
	__sync_mem routines.


Index: include/bits/atomic_2.h
===================================================================
*** include/bits/atomic_2.h	(revision 178710)
--- include/bits/atomic_2.h	(working copy)
*************** namespace __atomic2
*** 60,78 ****
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) noexcept
      {
!       // Redundant synchronize if built-in for lock is a full barrier.
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
!       return __sync_lock_test_and_set(&_M_i, 1);
      }
  
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
      {
!       // Redundant synchronize if built-in for lock is a full barrier.
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
!       return __sync_lock_test_and_set(&_M_i, 1);
      }
  
      void
--- 60,72 ----
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) noexcept
      {
!       return __sync_mem_exchange (&_M_i, 1, __m);
      }
  
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
      {
!       return __sync_mem_exchange (&_M_i, 1, __m);
      }
  
      void
*************** namespace __atomic2
*** 82,90 ****
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_lock_release(&_M_i);
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
      }
  
      void
--- 76,82 ----
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_mem_store (&_M_i, 0, __m);
      }
  
      void
*************** namespace __atomic2
*** 94,102 ****
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_lock_release(&_M_i);
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
      }
    };
  
--- 86,92 ----
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_mem_store (&_M_i, 0, __m);
      }
    };
  
*************** namespace __atomic2
*** 180,238 ****
  
        __int_type
        operator++() noexcept
!       { return __sync_add_and_fetch(&_M_i, 1); }
  
        __int_type
        operator++() volatile noexcept
!       { return __sync_add_and_fetch(&_M_i, 1); }
  
        __int_type
        operator--() noexcept
!       { return __sync_sub_and_fetch(&_M_i, 1); }
  
        __int_type
        operator--() volatile noexcept
!       { return __sync_sub_and_fetch(&_M_i, 1); }
  
        __int_type
        operator+=(__int_type __i) noexcept
!       { return __sync_add_and_fetch(&_M_i, __i); }
  
        __int_type
        operator+=(__int_type __i) volatile noexcept
!       { return __sync_add_and_fetch(&_M_i, __i); }
  
        __int_type
        operator-=(__int_type __i) noexcept
!       { return __sync_sub_and_fetch(&_M_i, __i); }
  
        __int_type
        operator-=(__int_type __i) volatile noexcept
!       { return __sync_sub_and_fetch(&_M_i, __i); }
  
        __int_type
        operator&=(__int_type __i) noexcept
!       { return __sync_and_and_fetch(&_M_i, __i); }
  
        __int_type
        operator&=(__int_type __i) volatile noexcept
!       { return __sync_and_and_fetch(&_M_i, __i); }
  
        __int_type
        operator|=(__int_type __i) noexcept
!       { return __sync_or_and_fetch(&_M_i, __i); }
  
        __int_type
        operator|=(__int_type __i) volatile noexcept
!       { return __sync_or_and_fetch(&_M_i, __i); }
  
        __int_type
        operator^=(__int_type __i) noexcept
!       { return __sync_xor_and_fetch(&_M_i, __i); }
  
        __int_type
        operator^=(__int_type __i) volatile noexcept
!       { return __sync_xor_and_fetch(&_M_i, __i); }
  
        bool
        is_lock_free() const noexcept
--- 170,228 ----
  
        __int_type
        operator++() noexcept
!       { return __sync_mem_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator++() volatile noexcept
!       { return __sync_mem_add__fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator--() noexcept
!       { return __sync_mem_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator--() volatile noexcept
!       { return __sync_mem_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator+=(__int_type __i) noexcept
!       { return __sync_mem_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator+=(__int_type __i) volatile noexcept
!       { return __sync_mem_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator-=(__int_type __i) noexcept
!       { return __sync_mem_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator-=(__int_type __i) volatile noexcept
!       { return __sync_mem_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator&=(__int_type __i) noexcept
!       { return __sync_mem_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator&=(__int_type __i) volatile noexcept
!       { return __sync_mem_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator|=(__int_type __i) noexcept
!       { return __sync_mem_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator|=(__int_type __i) volatile noexcept
!       { return __sync_mem_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator^=(__int_type __i) noexcept
!       { return __sync_mem_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator^=(__int_type __i) volatile noexcept
!       { return __sync_mem_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        bool
        is_lock_free() const noexcept
*************** namespace __atomic2
*** 249,263 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_i = __i;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_i = __i;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        void
--- 239,245 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_i, __i, __m);
        }
  
        void
*************** namespace __atomic2
*** 268,282 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_i = __i;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_i = __i;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        __int_type
--- 250,256 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_i, __i, __m);
        }
  
        __int_type
*************** namespace __atomic2
*** 285,294 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__int_type __ret = _M_i;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __int_type
--- 259,265 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (&_M_i, __m);
        }
  
        __int_type
*************** namespace __atomic2
*** 297,314 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__int_type __ret = _M_i;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __int_type
        exchange(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) noexcept
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_i, __i);
        }
  
  
--- 268,281 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (&_M_i, __m);
        }
  
        __int_type
        exchange(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) noexcept
        {
!         return __sync_mem_exchange (&_M_i, __i, __m);
        }
  
  
*************** namespace __atomic2
*** 316,323 ****
        exchange(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) volatile noexcept
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_i, __i);
        }
  
        bool
--- 283,289 ----
        exchange(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) volatile noexcept
        {
!         return __sync_mem_exchange (&_M_i, __i, __m);
        }
  
        bool
*************** namespace __atomic2
*** 356,361 ****
--- 322,328 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__int_type __i1o = __i1;
+ 	// Compare_and_swap is a full barrier already.
  	__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 373,378 ****
--- 340,346 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__int_type __i1o = __i1;
+ 	// Compare_and_swap is a full barrier already.
  	__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 399,450 ****
        __int_type
        fetch_add(__int_type __i,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_fetch_and_add(&_M_i, __i); }
  
        __int_type
        fetch_add(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_fetch_and_add(&_M_i, __i); }
  
        __int_type
        fetch_sub(__int_type __i,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_fetch_and_sub(&_M_i, __i); }
  
        __int_type
        fetch_sub(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_fetch_and_sub(&_M_i, __i); }
  
        __int_type
        fetch_and(__int_type __i,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_fetch_and_and(&_M_i, __i); }
  
        __int_type
        fetch_and(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_fetch_and_and(&_M_i, __i); }
  
        __int_type
        fetch_or(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_fetch_and_or(&_M_i, __i); }
  
        __int_type
        fetch_or(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_fetch_and_or(&_M_i, __i); }
  
        __int_type
        fetch_xor(__int_type __i,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_fetch_and_xor(&_M_i, __i); }
  
        __int_type
        fetch_xor(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_fetch_and_xor(&_M_i, __i); }
      };
  
  
--- 367,418 ----
        __int_type
        fetch_add(__int_type __i,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_mem_fetch_add(&_M_i, __i, __m); }
  
        __int_type
        fetch_add(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_mem_fetch_add(&_M_i, __i, __m); }
  
        __int_type
        fetch_sub(__int_type __i,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_mem_fetch_sub(&_M_i, __i, __m); }
  
        __int_type
        fetch_sub(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_mem_fetch_sub(&_M_i, __i, __m); }
  
        __int_type
        fetch_and(__int_type __i,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_mem_fetch_and(&_M_i, __i, __m); }
  
        __int_type
        fetch_and(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_mem_fetch_and(&_M_i, __i, __m); }
  
        __int_type
        fetch_or(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_mem_fetch_or(&_M_i, __i, __m); }
  
        __int_type
        fetch_or(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_mem_fetch_or(&_M_i, __i, __m); }
  
        __int_type
        fetch_xor(__int_type __i,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_mem_fetch_xor(&_M_i, __i, __m); }
  
        __int_type
        fetch_xor(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_mem_fetch_xor(&_M_i, __i, __m); }
      };
  
  
*************** namespace __atomic2
*** 505,539 ****
  
        __pointer_type
        operator++() noexcept
!       { return fetch_add(1) + 1; }
  
        __pointer_type
        operator++() volatile noexcept
!       { return fetch_add(1) + 1; }
  
        __pointer_type
        operator--() noexcept
!       { return fetch_sub(1) -1; }
  
        __pointer_type
        operator--() volatile noexcept
!       { return fetch_sub(1) -1; }
  
        __pointer_type
        operator+=(ptrdiff_t __d) noexcept
!       { return fetch_add(__d) + __d; }
  
        __pointer_type
        operator+=(ptrdiff_t __d) volatile noexcept
!       { return fetch_add(__d) + __d; }
  
        __pointer_type
        operator-=(ptrdiff_t __d) noexcept
!       { return fetch_sub(__d) - __d; }
  
        __pointer_type
        operator-=(ptrdiff_t __d) volatile noexcept
!       { return fetch_sub(__d) - __d; }
  
        bool
        is_lock_free() const noexcept
--- 473,507 ----
  
        __pointer_type
        operator++() noexcept
!       { return __sync_mem_add_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator++() volatile noexcept
!       { return __sync_mem_add_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator--() noexcept
!       { return __sync_mem_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator--() volatile noexcept
!       { return __sync_mem_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator+=(ptrdiff_t __d) noexcept
!       { return __sync_mem_add_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator+=(ptrdiff_t __d) volatile noexcept
!       { return __sync_mem_add_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator-=(ptrdiff_t __d) noexcept
!       { return __sync_mem_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator-=(ptrdiff_t __d) volatile noexcept
!       { return __sync_mem_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        bool
        is_lock_free() const noexcept
*************** namespace __atomic2
*** 551,565 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_p = __p;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_p = __p;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        void
--- 519,525 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_p, __p, __m);
        }
  
        void
*************** namespace __atomic2
*** 570,584 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_p = __p;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_p = __p;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        __pointer_type
--- 530,536 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_p, __p, __m);
        }
  
        __pointer_type
*************** namespace __atomic2
*** 587,596 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__pointer_type __ret = _M_p;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __pointer_type
--- 539,545 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (&_M_p, __m);
        }
  
        __pointer_type
*************** namespace __atomic2
*** 599,616 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__pointer_type __ret = _M_p;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __pointer_type
        exchange(__pointer_type __p,
  	       memory_order __m = memory_order_seq_cst) noexcept
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_p, __p);
        }
  
  
--- 548,561 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (&_M_p, __m);
        }
  
        __pointer_type
        exchange(__pointer_type __p,
  	       memory_order __m = memory_order_seq_cst) noexcept
        {
! 	return __sync_mem_exchange (&_M_p, __p, __m);
        }
  
  
*************** namespace __atomic2
*** 618,625 ****
        exchange(__pointer_type __p,
  	       memory_order __m = memory_order_seq_cst) volatile noexcept
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_p, __p);
        }
  
        bool
--- 563,569 ----
        exchange(__pointer_type __p,
  	       memory_order __m = memory_order_seq_cst) volatile noexcept
        {
! 	return __sync_mem_exchange (&_M_p, __p, __m);
        }
  
        bool
*************** namespace __atomic2
*** 632,637 ****
--- 576,582 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__pointer_type __p1o = __p1;
+ 	// Compare_and_swap is a full barrier already.
  	__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 649,654 ****
--- 594,600 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__pointer_type __p1o = __p1;
+ 	// Compare_and_swap is a full barrier already.
  	__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 659,680 ****
        __pointer_type
        fetch_add(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_fetch_and_add(&_M_p, __d); }
  
        __pointer_type
        fetch_add(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_fetch_and_add(&_M_p, __d); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_fetch_and_sub(&_M_p, __d); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_fetch_and_sub(&_M_p, __d); }
      };
  
  } // namespace __atomic2
--- 605,626 ----
        __pointer_type
        fetch_add(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_mem_fetch_add(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_add(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_mem_fetch_add(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) noexcept
!       { return __sync_mem_fetch_sub(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile noexcept
!       { return __sync_mem_fetch_sub(&_M_p, __d, __m); }
      };
  
  } // namespace __atomic2

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [cxx-mem-model] C++ wrappers
  2011-09-10  0:02 [cxx-mem-model] C++ wrappers Andrew MacLeod
@ 2011-09-15 21:03 ` Richard Henderson
  2011-09-15 21:50   ` Andrew MacLeod
  0 siblings, 1 reply; 8+ messages in thread
From: Richard Henderson @ 2011-09-15 21:03 UTC (permalink / raw)
  To: Andrew MacLeod; +Cc: gcc-patches

On 09/09/2011 01:29 PM, Andrew MacLeod wrote:
> !       { return __sync_mem_add__fetch(&_M_i, 1, memory_order_seq_cst); }

                                 ^^ typo

Which begs the question of how this didn't get caught
in the testsuite somewhere?

Otherwise it looks ok.


r~

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [cxx-mem-model] C++ wrappers
  2011-09-15 21:03 ` Richard Henderson
@ 2011-09-15 21:50   ` Andrew MacLeod
  2011-09-15 23:04     ` Richard Henderson
  0 siblings, 1 reply; 8+ messages in thread
From: Andrew MacLeod @ 2011-09-15 21:50 UTC (permalink / raw)
  To: Richard Henderson; +Cc: gcc-patches

On 09/15/2011 04:47 PM, Richard Henderson wrote:
> On 09/09/2011 01:29 PM, Andrew MacLeod wrote:
>> !       { return __sync_mem_add__fetch(&_M_i, 1, memory_order_seq_cst); }
>
>                                   ^^ typo
>
> Which begs the question of how this didn't get caught
> in the testsuite somewhere?
>

good catch :-)

a template calling that particular method was probably not invoked. The 
joys of c++ templates eh :-)

I do plan to flush out a set of tests which try every variant, just 
havent gotten to it yet.


Andrew

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [cxx-mem-model] C++ wrappers
  2011-09-15 21:50   ` Andrew MacLeod
@ 2011-09-15 23:04     ` Richard Henderson
  2011-09-15 23:08       ` Jason Merrill
  2011-09-15 23:08       ` Andrew MacLeod
  0 siblings, 2 replies; 8+ messages in thread
From: Richard Henderson @ 2011-09-15 23:04 UTC (permalink / raw)
  To: Andrew MacLeod; +Cc: gcc-patches, Jason Merrill

On 09/15/2011 02:03 PM, Andrew MacLeod wrote:
> On 09/15/2011 04:47 PM, Richard Henderson wrote:
>> On 09/09/2011 01:29 PM, Andrew MacLeod wrote:
>>> !       { return __sync_mem_add__fetch(&_M_i, 1, memory_order_seq_cst); }
>>
>>                                   ^^ typo
>>
>> Which begs the question of how this didn't get caught
>> in the testsuite somewhere?
>>
> 
> good catch :-)
> 
> a template calling that particular method was probably not invoked. The joys of c++ templates eh :-)

Isn't there an extension that forces an entire class template to be instantiated?


r~

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [cxx-mem-model] C++ wrappers
  2011-09-15 23:08       ` Jason Merrill
@ 2011-09-15 23:08         ` Andrew MacLeod
  2011-09-19 20:57           ` Jason Merrill
  0 siblings, 1 reply; 8+ messages in thread
From: Andrew MacLeod @ 2011-09-15 23:08 UTC (permalink / raw)
  To: Jason Merrill; +Cc: Richard Henderson, gcc-patches, Benjamin Kosnik

On 09/15/2011 05:49 PM, Jason Merrill wrote:
> On 09/15/2011 05:14 PM, Richard Henderson wrote:
>> On 09/15/2011 02:03 PM, Andrew MacLeod wrote:
>
>>> a template calling that particular method was probably not invoked.
>>> The joys of c++ templates eh :-)
>>
>> Isn't there an extension that forces an entire class template to be
>> instantiated?
>
> It's standard C++.
>
> template struct __atomic_base<bool>;
>
> etc.
>
> Do we want to start exporting these from libstdc++?
>

I dont know the ins and outs of the library...  the typedefs and 
specializations are already there, I can write:

atomic_char c;
c = 'a'

   in my program and it will invoke the required template for the class 
and call the appropriate store routine or whatever.

I'm working on the changes now which will enable lock-free versions of 
the templates for the types which are supported by the __sync routines, 
and locked versions for everything else.

do we need to explicitly export something?

Andrew

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [cxx-mem-model] C++ wrappers
  2011-09-15 23:04     ` Richard Henderson
  2011-09-15 23:08       ` Jason Merrill
@ 2011-09-15 23:08       ` Andrew MacLeod
  1 sibling, 0 replies; 8+ messages in thread
From: Andrew MacLeod @ 2011-09-15 23:08 UTC (permalink / raw)
  To: Richard Henderson; +Cc: gcc-patches, Jason Merrill

On 09/15/2011 05:14 PM, Richard Henderson wrote:
> On 09/15/2011 02:03 PM, Andrew MacLeod wrote:
>> On 09/15/2011 04:47 PM, Richard Henderson wrote:
>>> On 09/09/2011 01:29 PM, Andrew MacLeod wrote:
>>>> !       { return __sync_mem_add__fetch(&_M_i, 1, memory_order_seq_cst); }
>>>
>>>                                    ^^ typo
>>>
>>> Which begs the question of how this didn't get caught
>>> in the testsuite somewhere?
>>>
>>
>> good catch :-)
>>
>> a template calling that particular method was probably not invoked. The joys of c++ templates eh :-)
>
> Isn't there an extension that forces an entire class template to be instantiated?
>
It is instantiated, but if someone doesn't actually make a call to 
operator++() volatile  (which I think is where this mistyped call was), 
you don't end up with an unresolved function call...

At least it didn't trip up any tests cases and I did a full bootstrap & 
make check.

Andrew

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [cxx-mem-model] C++ wrappers
  2011-09-15 23:04     ` Richard Henderson
@ 2011-09-15 23:08       ` Jason Merrill
  2011-09-15 23:08         ` Andrew MacLeod
  2011-09-15 23:08       ` Andrew MacLeod
  1 sibling, 1 reply; 8+ messages in thread
From: Jason Merrill @ 2011-09-15 23:08 UTC (permalink / raw)
  To: Richard Henderson; +Cc: Andrew MacLeod, gcc-patches, Benjamin Kosnik

On 09/15/2011 05:14 PM, Richard Henderson wrote:
> On 09/15/2011 02:03 PM, Andrew MacLeod wrote:

>> a template calling that particular method was probably not invoked. The joys of c++ templates eh :-)
>
> Isn't there an extension that forces an entire class template to be instantiated?

It's standard C++.

template struct __atomic_base<bool>;

etc.

Do we want to start exporting these from libstdc++?

Jason

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [cxx-mem-model] C++ wrappers
  2011-09-15 23:08         ` Andrew MacLeod
@ 2011-09-19 20:57           ` Jason Merrill
  0 siblings, 0 replies; 8+ messages in thread
From: Jason Merrill @ 2011-09-19 20:57 UTC (permalink / raw)
  To: Andrew MacLeod; +Cc: Richard Henderson, gcc-patches, Benjamin Kosnik

On 09/15/2011 06:02 PM, Andrew MacLeod wrote:
> On 09/15/2011 05:49 PM, Jason Merrill wrote:

>> Do we want to start exporting these from libstdc++?
>
> I dont know the ins and outs of the library... the typedefs and
> specializations are already there, I can write:
>
> atomic_char c;
> c = 'a'
>
> in my program and it will invoke the required template for the class and
> call the appropriate store routine or whatever.
>
> do we need to explicitly export something?

Need, no.  I was wondering if Benjamin wanted to.

Jason

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2011-09-19 19:15 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-09-10  0:02 [cxx-mem-model] C++ wrappers Andrew MacLeod
2011-09-15 21:03 ` Richard Henderson
2011-09-15 21:50   ` Andrew MacLeod
2011-09-15 23:04     ` Richard Henderson
2011-09-15 23:08       ` Jason Merrill
2011-09-15 23:08         ` Andrew MacLeod
2011-09-19 20:57           ` Jason Merrill
2011-09-15 23:08       ` Andrew MacLeod

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).