public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [cxx-mem-model] Atomic C++ header file changes
@ 2011-08-17 17:54 Andrew MacLeod
  2011-08-19  4:05 ` Richard Henderson
                   ` (2 more replies)
  0 siblings, 3 replies; 11+ messages in thread
From: Andrew MacLeod @ 2011-08-17 17:54 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 973 bytes --]

Next step, change the C++ header files to use the new __sync builtins.  
pretty straightforward.

mostly.

Turns out, C++ will allow you to specify the memory model as a variable 
of type enum memory_order... WTF?  I would expect that to be pretty 
uncommon, and in order to get that right, we'd need a switch statement 
and call the appropriate __sync_mem* routine with the appropriate 
constant parameter.

That would be quite ugly, and you get what you deserve if you do that.   
I changed the builtins so that if you dont specify a compile time 
constant in the memory model parameter, it will simply default to 
__SYNC_MEM_SEQ_CST, which will always be safe.  That is standard 
compliant (verified), and if anyone is really unhappy about it, then the 
c++ headers can be really uglified by adding a bunch of switch 
statements to handle this twisted case.

bootstraps and no new regressions.  (In fact, it fixes one of the atomic 
verification tests!)

Andrew









[-- Attachment #2: c++.patch --]
[-- Type: text/plain, Size: 21108 bytes --]


	* gcc/builtins.c (get_memmodel): Allow non constant parameters and
	default to MEMMODEL_SEQ_CST mode for these cases.

	* libstdc++-v3/include/bits/atomic_2.h (__atomic2): Use new
	__sync_mem routines.

Index: gcc/builtins.c
===================================================================
*** gcc/builtins.c	(revision 177737)
--- gcc/builtins.c	(working copy)
*************** get_memmodel (tree exp)
*** 5225,5240 ****
  {
    rtx op;
  
    if (TREE_CODE (exp) != INTEGER_CST)
!     {
!       error ("invalid memory model argument to builtin");
!       return MEMMODEL_RELAXED;
!     }
    op = expand_normal (exp);
    if (INTVAL (op) < 0 || INTVAL (op) >= MEMMODEL_LAST)
      {
        error ("invalid memory model argument to builtin");
!       return MEMMODEL_RELAXED;
      }
    return (enum memmodel) INTVAL (op);
  }
--- 5225,5240 ----
  {
    rtx op;
  
+   /* If the parameter is not a constant, it's a run time value so we'll just
+      convert it to MEMMODEL_SEQ_CST to avoid annoying runtime checking.  */
    if (TREE_CODE (exp) != INTEGER_CST)
!     return MEMMODEL_SEQ_CST;
! 
    op = expand_normal (exp);
    if (INTVAL (op) < 0 || INTVAL (op) >= MEMMODEL_LAST)
      {
        error ("invalid memory model argument to builtin");
!       return MEMMODEL_SEQ_CST;
      }
    return (enum memmodel) INTVAL (op);
  }
Index: libstdc++-v3/include/bits/atomic_2.h
===================================================================
*** libstdc++-v3/include/bits/atomic_2.h	(revision 177737)
--- libstdc++-v3/include/bits/atomic_2.h	(working copy)
*************** namespace __atomic2
*** 60,78 ****
      bool
      test_and_set(memory_order __m = memory_order_seq_cst)
      {
!       // Redundant synchronize if built-in for lock is a full barrier.
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
!       return __sync_lock_test_and_set(&_M_i, 1);
      }
  
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) volatile
      {
!       // Redundant synchronize if built-in for lock is a full barrier.
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
!       return __sync_lock_test_and_set(&_M_i, 1);
      }
  
      void
--- 60,72 ----
      bool
      test_and_set(memory_order __m = memory_order_seq_cst)
      {
!       return __sync_mem_flag_test_and_set(&_M_i, __m);
      }
  
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) volatile
      {
!       return __sync_mem_flag_test_and_set(&_M_i, __m);
      }
  
      void
*************** namespace __atomic2
*** 82,90 ****
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_lock_release(&_M_i);
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
      }
  
      void
--- 76,82 ----
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_mem_flag_clear(&_M_i, __m);
      }
  
      void
*************** namespace __atomic2
*** 94,102 ****
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_lock_release(&_M_i);
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
      }
    };
  
--- 86,92 ----
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_mem_flag_clear(&_M_i, __m);
      }
    };
  
*************** namespace __atomic2
*** 180,238 ****
  
        __int_type
        operator++()
!       { return __sync_add_and_fetch(&_M_i, 1); }
  
        __int_type
        operator++() volatile
!       { return __sync_add_and_fetch(&_M_i, 1); }
  
        __int_type
        operator--()
!       { return __sync_sub_and_fetch(&_M_i, 1); }
  
        __int_type
        operator--() volatile
!       { return __sync_sub_and_fetch(&_M_i, 1); }
  
        __int_type
        operator+=(__int_type __i)
!       { return __sync_add_and_fetch(&_M_i, __i); }
  
        __int_type
        operator+=(__int_type __i) volatile
!       { return __sync_add_and_fetch(&_M_i, __i); }
  
        __int_type
        operator-=(__int_type __i)
!       { return __sync_sub_and_fetch(&_M_i, __i); }
  
        __int_type
        operator-=(__int_type __i) volatile
!       { return __sync_sub_and_fetch(&_M_i, __i); }
  
        __int_type
        operator&=(__int_type __i)
!       { return __sync_and_and_fetch(&_M_i, __i); }
  
        __int_type
        operator&=(__int_type __i) volatile
!       { return __sync_and_and_fetch(&_M_i, __i); }
  
        __int_type
        operator|=(__int_type __i)
!       { return __sync_or_and_fetch(&_M_i, __i); }
  
        __int_type
        operator|=(__int_type __i) volatile
!       { return __sync_or_and_fetch(&_M_i, __i); }
  
        __int_type
        operator^=(__int_type __i)
!       { return __sync_xor_and_fetch(&_M_i, __i); }
  
        __int_type
        operator^=(__int_type __i) volatile
!       { return __sync_xor_and_fetch(&_M_i, __i); }
  
        bool
        is_lock_free() const
--- 170,228 ----
  
        __int_type
        operator++()
!       { return __sync_mem_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator++() volatile
!       { return __sync_mem_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator--()
!       { return __sync_mem_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator--() volatile
!       { return __sync_mem_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator+=(__int_type __i)
!       { return __sync_mem_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator+=(__int_type __i) volatile
!       { return __sync_mem_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator-=(__int_type __i)
!       { return __sync_mem_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator-=(__int_type __i) volatile
!       { return __sync_mem_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator&=(__int_type __i)
!       { return __sync_mem_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator&=(__int_type __i) volatile
!       { return __sync_mem_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator|=(__int_type __i)
!       { return __sync_mem_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator|=(__int_type __i) volatile
!       { return __sync_mem_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator^=(__int_type __i)
!       { return __sync_mem_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator^=(__int_type __i) volatile
!       { return __sync_mem_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        bool
        is_lock_free() const
*************** namespace __atomic2
*** 249,263 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_i = __i;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_i = __i;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        void
--- 239,245 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_i, __i, __m);
        }
  
        void
*************** namespace __atomic2
*** 267,281 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_i = __i;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_i = __i;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        __int_type
--- 249,255 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_i, __i, __m);
        }
  
        __int_type
*************** namespace __atomic2
*** 284,293 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__int_type __ret = _M_i;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __int_type
--- 258,264 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (const_cast <__int_type *>(&_M_i), __m);
        }
  
        __int_type
*************** namespace __atomic2
*** 296,320 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__int_type __ret = _M_i;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_i, __i);
        }
  
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_i, __i);
        }
  
        bool
--- 267,286 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (const_cast <__int_type *>(&_M_i), __m);
        }
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
        {
! 	return __sync_mem_exchange (&_M_i, __i, __m);
        }
  
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
        {
! 	return __sync_mem_exchange (&_M_i, __i, __m);
        }
  
        bool
*************** namespace __atomic2
*** 352,357 ****
--- 318,324 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__int_type __i1o = __i1;
+ 	// Compare_and_swap is a full barrier already.
  	__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 368,373 ****
--- 335,341 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__int_type __i1o = __i1;
+ 	// Compare_and_swap is a full barrier already.
  	__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 393,440 ****
  
        __int_type
        fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_add(&_M_i, __i); }
  
        __int_type
        fetch_add(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_add(&_M_i, __i); }
  
        __int_type
        fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_sub(&_M_i, __i); }
  
        __int_type
        fetch_sub(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_sub(&_M_i, __i); }
  
        __int_type
        fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_and(&_M_i, __i); }
  
        __int_type
        fetch_and(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_and(&_M_i, __i); }
  
        __int_type
        fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_or(&_M_i, __i); }
  
        __int_type
        fetch_or(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_or(&_M_i, __i); }
  
        __int_type
        fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_xor(&_M_i, __i); }
  
        __int_type
        fetch_xor(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_xor(&_M_i, __i); }
      };
  
  
--- 361,408 ----
  
        __int_type
        fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_add(&_M_i, __i, __m); }
  
        __int_type
        fetch_add(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_add(&_M_i, __i, __m); }
  
        __int_type
        fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_sub(&_M_i, __i, __m); }
  
        __int_type
        fetch_sub(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_sub(&_M_i, __i, __m); }
  
        __int_type
        fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_and(&_M_i, __i, __m); }
  
        __int_type
        fetch_and(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_and(&_M_i, __i, __m); }
  
        __int_type
        fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_or(&_M_i, __i, __m); }
  
        __int_type
        fetch_or(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_or(&_M_i, __i, __m); }
  
        __int_type
        fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_xor(&_M_i, __i, __m); }
  
        __int_type
        fetch_xor(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_xor(&_M_i, __i, __m); }
      };
  
  
*************** namespace __atomic2
*** 495,529 ****
  
        __pointer_type
        operator++()
!       { return fetch_add(1) + 1; }
  
        __pointer_type
        operator++() volatile
!       { return fetch_add(1) + 1; }
  
        __pointer_type
        operator--()
!       { return fetch_sub(1) -1; }
  
        __pointer_type
        operator--() volatile
!       { return fetch_sub(1) -1; }
  
        __pointer_type
        operator+=(ptrdiff_t __d)
!       { return fetch_add(__d) + __d; }
  
        __pointer_type
        operator+=(ptrdiff_t __d) volatile
!       { return fetch_add(__d) + __d; }
  
        __pointer_type
        operator-=(ptrdiff_t __d)
!       { return fetch_sub(__d) - __d; }
  
        __pointer_type
        operator-=(ptrdiff_t __d) volatile
!       { return fetch_sub(__d) - __d; }
  
        bool
        is_lock_free() const
--- 463,497 ----
  
        __pointer_type
        operator++()
!       { return __sync_mem_add_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator++() volatile
!       { return __sync_mem_add_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator--()
!       { return __sync_mem_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator--() volatile
!       { return __sync_mem_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator+=(ptrdiff_t __d)
!       { return __sync_mem_add_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator+=(ptrdiff_t __d) volatile
!       { return __sync_mem_add_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator-=(ptrdiff_t __d)
!       { return __sync_mem_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator-=(ptrdiff_t __d) volatile
!       { return __sync_mem_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        bool
        is_lock_free() const
*************** namespace __atomic2
*** 540,554 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_p = __p;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_p = __p;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        void
--- 508,514 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_p, __p, __m);
        }
  
        void
*************** namespace __atomic2
*** 559,573 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_p = __p;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_p = __p;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        __pointer_type
--- 519,525 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_p, __p, __m);
        }
  
        __pointer_type
*************** namespace __atomic2
*** 576,585 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__pointer_type __ret = _M_p;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __pointer_type
--- 528,534 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (const_cast <__pointer_type *>(&_M_p), __m);
        }
  
        __pointer_type
*************** namespace __atomic2
*** 588,604 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__pointer_type __ret = _M_p;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __pointer_type
        exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_p, __p);
        }
  
  
--- 537,549 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (const_cast <__pointer_type *>(&_M_p), __m);
        }
  
        __pointer_type
        exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
        {
! 	return __sync_mem_exchange (&_M_p, __p, __m);
        }
  
  
*************** namespace __atomic2
*** 606,613 ****
        exchange(__pointer_type __p,
  	       memory_order __m = memory_order_seq_cst) volatile
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_p, __p);
        }
  
        bool
--- 551,557 ----
        exchange(__pointer_type __p,
  	       memory_order __m = memory_order_seq_cst) volatile
        {
! 	return __sync_mem_exchange (&_M_p, __p, __m);
        }
  
        bool
*************** namespace __atomic2
*** 619,624 ****
--- 563,569 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__pointer_type __p1o = __p1;
+ 	// Compare_and_swap is a full barrier already.
  	__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 635,640 ****
--- 580,586 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__pointer_type __p1o = __p1;
+ 	// Compare_and_swap is a full barrier already.
  	__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 644,664 ****
  
        __pointer_type
        fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_add(&_M_p, __d); }
  
        __pointer_type
        fetch_add(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_add(&_M_p, __d); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_sub(&_M_p, __d); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_sub(&_M_p, __d); }
      };
  
  } // namespace __atomic2
--- 590,610 ----
  
        __pointer_type
        fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_add(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_add(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_add(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_sub(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_sub(&_M_p, __d, __m); }
      };
  
  } // namespace __atomic2

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-17 17:54 [cxx-mem-model] Atomic C++ header file changes Andrew MacLeod
@ 2011-08-19  4:05 ` Richard Henderson
  2011-08-23 23:24   ` Andrew MacLeod
  2011-08-19 10:17 ` Torvald Riegel
  2011-08-24 17:25 ` Andrew MacLeod
  2 siblings, 1 reply; 11+ messages in thread
From: Richard Henderson @ 2011-08-19  4:05 UTC (permalink / raw)
  To: Andrew MacLeod; +Cc: gcc-patches

On 08/17/2011 08:39 AM, Andrew MacLeod wrote:
> ! 	return __sync_mem_load (const_cast <__int_type *>(&_M_i), __m);

This suggests the builtin is incorrectly defined.
It ought to be const itself.


r~

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-17 17:54 [cxx-mem-model] Atomic C++ header file changes Andrew MacLeod
  2011-08-19  4:05 ` Richard Henderson
@ 2011-08-19 10:17 ` Torvald Riegel
  2011-08-19 13:22   ` Andrew MacLeod
  2011-08-24 17:25 ` Andrew MacLeod
  2 siblings, 1 reply; 11+ messages in thread
From: Torvald Riegel @ 2011-08-19 10:17 UTC (permalink / raw)
  To: Andrew MacLeod; +Cc: gcc-patches

On Wed, 2011-08-17 at 11:39 -0400, Andrew MacLeod wrote:
> Turns out, C++ will allow you to specify the memory model as a variable 
> of type enum memory_order... WTF?  I would expect that to be pretty 
> uncommon, and in order to get that right, we'd need a switch statement 
> and call the appropriate __sync_mem* routine with the appropriate 
> constant parameter.
> 
> That would be quite ugly, and you get what you deserve if you do that.   
> I changed the builtins so that if you dont specify a compile time 
> constant in the memory model parameter, it will simply default to 
> __SYNC_MEM_SEQ_CST, which will always be safe.  That is standard 
> compliant (verified), and if anyone is really unhappy about it, then the 
> c++ headers can be really uglified by adding a bunch of switch 
> statements to handle this twisted case.

IMHO this behavior should be documented so that users will be aware of
it, and it would be best if this would raise a warning. Note that I also
cannot see any reason why a programmer might want to make barriers
runtime-configurable, but silently adding overhead (perhaps the
parameter was supposed to be a constant, but wasn't?) can lead to more
confusion than necessary.

Torvald

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-19 10:17 ` Torvald Riegel
@ 2011-08-19 13:22   ` Andrew MacLeod
  2011-08-19 19:12     ` Torvald Riegel
  0 siblings, 1 reply; 11+ messages in thread
From: Andrew MacLeod @ 2011-08-19 13:22 UTC (permalink / raw)
  To: Torvald Riegel; +Cc: gcc-patches

On 08/19/2011 04:57 AM, Torvald Riegel wrote:
> On Wed, 2011-08-17 at 11:39 -0400, Andrew MacLeod wrote:
>> That would be quite ugly, and you get what you deserve if you do that.
>> I changed the builtins so that if you dont specify a compile time
>> constant in the memory model parameter, it will simply default to
>> __SYNC_MEM_SEQ_CST, which will always be safe.  That is standard
>> compliant (verified), and if anyone is really unhappy about it, then the
>> c++ headers can be really uglified by adding a bunch of switch
>> statements to handle this twisted case.
> IMHO this behavior should be documented so that users will be aware of
> it, and it would be best if this would raise a warning. Note that I also
> cannot see any reason why a programmer might want to make barriers
> runtime-configurable, but silently adding overhead (perhaps the
> parameter was supposed to be a constant, but wasn't?) can lead to more
> confusion than necessary.
>

The problem with issuing a warning is that anytime the compiler creates 
a C++ atomic class and you use a method with a memory order, it usually 
leaves an externally call-able method which has to take a runtime 
value... so you'd see the warning on basically every compilation... 
which in turn defeats the purpose of the warning.

Andrew


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-19 13:22   ` Andrew MacLeod
@ 2011-08-19 19:12     ` Torvald Riegel
  2011-08-19 21:11       ` Andrew MacLeod
  0 siblings, 1 reply; 11+ messages in thread
From: Torvald Riegel @ 2011-08-19 19:12 UTC (permalink / raw)
  To: Andrew MacLeod; +Cc: gcc-patches

On Fri, 2011-08-19 at 08:44 -0400, Andrew MacLeod wrote:
> On 08/19/2011 04:57 AM, Torvald Riegel wrote:
> > On Wed, 2011-08-17 at 11:39 -0400, Andrew MacLeod wrote:
> >> That would be quite ugly, and you get what you deserve if you do that.
> >> I changed the builtins so that if you dont specify a compile time
> >> constant in the memory model parameter, it will simply default to
> >> __SYNC_MEM_SEQ_CST, which will always be safe.  That is standard
> >> compliant (verified), and if anyone is really unhappy about it, then the
> >> c++ headers can be really uglified by adding a bunch of switch
> >> statements to handle this twisted case.
> > IMHO this behavior should be documented so that users will be aware of
> > it, and it would be best if this would raise a warning. Note that I also
> > cannot see any reason why a programmer might want to make barriers
> > runtime-configurable, but silently adding overhead (perhaps the
> > parameter was supposed to be a constant, but wasn't?) can lead to more
> > confusion than necessary.
> >
> 
> The problem with issuing a warning is that anytime the compiler creates 
> a C++ atomic class and you use a method with a memory order, it usually 
> leaves an externally call-able method which has to take a runtime 
> value... so you'd see the warning on basically every compilation... 
> which in turn defeats the purpose of the warning.

Hmm. I would have assumed that the check that would raise warnings would
be for actual calls, not for the instantiations. But that would probably
require special handling of calls to the atomics class for all the
integers and pointers (can atomic<T*> be handled as one thing?). So, if
that's too much work, at least document the constraint somewhere?

Torvald

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-19 19:12     ` Torvald Riegel
@ 2011-08-19 21:11       ` Andrew MacLeod
  0 siblings, 0 replies; 11+ messages in thread
From: Andrew MacLeod @ 2011-08-19 21:11 UTC (permalink / raw)
  To: Torvald Riegel; +Cc: gcc-patches

On 08/19/2011 12:48 PM, Torvald Riegel wrote:
>
>> The problem with issuing a warning is that anytime the compiler creates
>> a C++ atomic class and you use a method with a memory order, it usually
>> leaves an externally call-able method which has to take a runtime
>> value... so you'd see the warning on basically every compilation...
>> which in turn defeats the purpose of the warning.
>>      
> Hmm. I would have assumed that the check that would raise warnings would
> be for actual calls, not for the instantiations. But that would probably
> require special handling of calls to the atomics class for all the
> integers and pointers (can atomic<T*>  be handled as one thing?). So, if
> that's too much work, at least document the constraint somewhere?
>
>    

I'd definitely document the constraint.

To be honest, I think its a pretty useless thing, bordering on moronic.  
The whole point of the memory model is to be able to generate more 
efficient code when you don't need SEQ_CST and really know what you are 
doing.

Even if you *DO* want to make that kind of a call, you have to expect 
the overhead of a runtime library call.  And if you are using SEQ_CST 
mode, its going to be that much slower again due to the call.    I think 
inlining it to be SEQ_CST will provide smaller code size always, and I'd 
be surpised if it *ever* became a performance issue.  And I do mean *ever*.

Andrew

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-19  4:05 ` Richard Henderson
@ 2011-08-23 23:24   ` Andrew MacLeod
  2011-08-24  1:07     ` Richard Henderson
  0 siblings, 1 reply; 11+ messages in thread
From: Andrew MacLeod @ 2011-08-23 23:24 UTC (permalink / raw)
  To: Richard Henderson; +Cc: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 351 bytes --]

On 08/18/2011 06:33 PM, Richard Henderson wrote:
> On 08/17/2011 08:39 AM, Andrew MacLeod wrote:
>> ! 	return __sync_mem_load (const_cast<__int_type *>(&_M_i), __m);
> This suggests the builtin is incorrectly defined.
> It ought to be const itself.
>
Err, right.

This patch declares the function properly and the casts are no longer 
needed.

Andrew

[-- Attachment #2: const.diff --]
[-- Type: text/plain, Size: 7099 bytes --]


	* builtin-types.def (BT_CONST_VOLATILE_PTR): New primitive type.
	(BT_FN_I{1,2,4,8,16}_VPTR_INT): Change prototype to be const.
	* sync-builtins.def (BUILT_IN_SYNC_MEM_LOAD_*): Change to be const.

	* fortan/types.def (BUILT_IN_SYNC_MEM_LOAD_*): Change to be const.

Index: builtin-types.def
===================================================================
*** builtin-types.def	(revision 177737)
--- builtin-types.def	(working copy)
*************** DEF_PRIMITIVE_TYPE (BT_VOLATILE_PTR,
*** 95,100 ****
--- 95,104 ----
  		    build_pointer_type
  		     (build_qualified_type (void_type_node,
  					    TYPE_QUAL_VOLATILE)))
+ DEF_PRIMITIVE_TYPE (BT_CONST_VOLATILE_PTR,
+ 		    build_pointer_type
+ 		     (build_qualified_type (void_type_node,
+ 					  TYPE_QUAL_VOLATILE|TYPE_QUAL_CONST)))
  DEF_PRIMITIVE_TYPE (BT_PTRMODE, (*lang_hooks.types.type_for_mode)(ptr_mode, 0))
  DEF_PRIMITIVE_TYPE (BT_INT_PTR, integer_ptr_type_node)
  DEF_PRIMITIVE_TYPE (BT_FLOAT_PTR, float_ptr_type_node)
*************** DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_LONGPTR_
*** 315,325 ****
  		     BT_BOOL, BT_PTR_LONG, BT_PTR_LONG)
  DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR,
  		     BT_BOOL, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
! DEF_FUNCTION_TYPE_2 (BT_FN_I1_VPTR_INT, BT_I1, BT_VOLATILE_PTR, BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I2_VPTR_INT, BT_I2, BT_VOLATILE_PTR, BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I4_VPTR_INT, BT_I4, BT_VOLATILE_PTR, BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_INT, BT_I8, BT_VOLATILE_PTR, BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I16_VPTR_INT, BT_I16, BT_VOLATILE_PTR, BT_INT)
  DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_INT, BT_VOID, BT_VOLATILE_PTR, BT_INT)
  DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_VPTR_INT, BT_BOOL, BT_VOLATILE_PTR, BT_INT)
  
--- 319,334 ----
  		     BT_BOOL, BT_PTR_LONG, BT_PTR_LONG)
  DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_ULONGLONGPTR_ULONGLONGPTR,
  		     BT_BOOL, BT_PTR_ULONGLONG, BT_PTR_ULONGLONG)
! DEF_FUNCTION_TYPE_2 (BT_FN_I1_CONST_VPTR_INT, BT_I1, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I2_CONST_VPTR_INT, BT_I2, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I4_CONST_VPTR_INT, BT_I4, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I8_CONST_VPTR_INT, BT_I8, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I16_CONST_VPTR_INT, BT_I16, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
  DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_INT, BT_VOID, BT_VOLATILE_PTR, BT_INT)
  DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_VPTR_INT, BT_BOOL, BT_VOLATILE_PTR, BT_INT)
  
Index: sync-builtins.def
===================================================================
*** sync-builtins.def	(revision 177737)
--- sync-builtins.def	(working copy)
*************** DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD
*** 283,301 ****
  		  BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_1,
  		  "__sync_mem_load_1",
! 		  BT_FN_I1_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_2,
  		  "__sync_mem_load_2",
! 		  BT_FN_I2_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_4,
  		  "__sync_mem_load_4",
! 		  BT_FN_I4_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_8,
  		  "__sync_mem_load_8",
! 		  BT_FN_I8_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_16,
  		  "__sync_mem_load_16",
! 		  BT_FN_I16_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_COMPARE_EXCHANGE_N,
  		  "__sync_mem_compare_exchange",
--- 283,301 ----
  		  BT_FN_VOID_VAR, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_1,
  		  "__sync_mem_load_1",
! 		  BT_FN_I1_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_2,
  		  "__sync_mem_load_2",
! 		  BT_FN_I2_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_4,
  		  "__sync_mem_load_4",
! 		  BT_FN_I4_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_8,
  		  "__sync_mem_load_8",
! 		  BT_FN_I8_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_LOAD_16,
  		  "__sync_mem_load_16",
! 		  BT_FN_I16_CONST_VPTR_INT, ATTR_NOTHROW_LEAF_LIST)
  
  DEF_SYNC_BUILTIN (BUILT_IN_SYNC_MEM_COMPARE_EXCHANGE_N,
  		  "__sync_mem_compare_exchange",
Index: fortran/types.def
===================================================================
*** fortran/types.def	(revision 177737)
--- fortran/types.def	(working copy)
*************** DEF_PRIMITIVE_TYPE (BT_VOLATILE_PTR,
*** 70,76 ****
                      build_pointer_type
                       (build_qualified_type (void_type_node,
                                              TYPE_QUAL_VOLATILE)))
! 
  DEF_POINTER_TYPE (BT_PTR_LONG, BT_LONG)
  DEF_POINTER_TYPE (BT_PTR_ULONGLONG, BT_ULONGLONG)
  DEF_POINTER_TYPE (BT_PTR_PTR, BT_PTR)
--- 70,79 ----
                      build_pointer_type
                       (build_qualified_type (void_type_node,
                                              TYPE_QUAL_VOLATILE)))
! DEF_PRIMITIVE_TYPE (BT_CONST_VOLATILE_PTR,
! 		    build_pointer_type
! 		     (build_qualified_type (void_type_node,
! 					  TYPE_QUAL_VOLATILE|TYPE_QUAL_CONST)))
  DEF_POINTER_TYPE (BT_PTR_LONG, BT_LONG)
  DEF_POINTER_TYPE (BT_PTR_ULONGLONG, BT_ULONGLONG)
  DEF_POINTER_TYPE (BT_PTR_PTR, BT_PTR)
*************** DEF_FUNCTION_TYPE_2 (BT_FN_I4_VPTR_I4, B
*** 100,110 ****
  DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_I8, BT_I8, BT_VOLATILE_PTR, BT_I8)
  DEF_FUNCTION_TYPE_2 (BT_FN_I16_VPTR_I16, BT_I16, BT_VOLATILE_PTR, BT_I16)
  DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR)
! DEF_FUNCTION_TYPE_2 (BT_FN_I1_VPTR_INT, BT_I1, BT_VOLATILE_PTR, BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I2_VPTR_INT, BT_I2, BT_VOLATILE_PTR, BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I4_VPTR_INT, BT_I4, BT_VOLATILE_PTR, BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_INT, BT_I8, BT_VOLATILE_PTR, BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I16_VPTR_INT, BT_I16, BT_VOLATILE_PTR, BT_INT)
  DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_INT, BT_VOID, BT_VOLATILE_PTR, BT_INT)
  DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_VPTR_INT, BT_BOOL, BT_VOLATILE_PTR, BT_INT)
  
--- 103,118 ----
  DEF_FUNCTION_TYPE_2 (BT_FN_I8_VPTR_I8, BT_I8, BT_VOLATILE_PTR, BT_I8)
  DEF_FUNCTION_TYPE_2 (BT_FN_I16_VPTR_I16, BT_I16, BT_VOLATILE_PTR, BT_I16)
  DEF_FUNCTION_TYPE_2 (BT_FN_VOID_PTR_PTR, BT_VOID, BT_PTR, BT_PTR)
! DEF_FUNCTION_TYPE_2 (BT_FN_I1_CONST_VPTR_INT, BT_I1, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I2_CONST_VPTR_INT, BT_I2, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I4_CONST_VPTR_INT, BT_I4, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I8_CONST_VPTR_INT, BT_I8, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
! DEF_FUNCTION_TYPE_2 (BT_FN_I16_CONST_VPTR_INT, BT_I16, BT_CONST_VOLATILE_PTR,
! 		     BT_INT)
  DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_INT, BT_VOID, BT_VOLATILE_PTR, BT_INT)
  DEF_FUNCTION_TYPE_2 (BT_FN_BOOL_VPTR_INT, BT_BOOL, BT_VOLATILE_PTR, BT_INT)
  

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-23 23:24   ` Andrew MacLeod
@ 2011-08-24  1:07     ` Richard Henderson
  2011-08-24  3:09       ` Andrew MacLeod
  0 siblings, 1 reply; 11+ messages in thread
From: Richard Henderson @ 2011-08-24  1:07 UTC (permalink / raw)
  To: Andrew MacLeod; +Cc: gcc-patches

On 08/23/2011 03:13 PM, Andrew MacLeod wrote:
> ! DEF_FUNCTION_TYPE_2 (BT_FN_I1_CONST_VPTR_INT, BT_I1, BT_CONST_VOLATILE_PTR,
> ! 		     BT_INT)

Given that VPTR means "volatile PTR", I suggest you use CVPTR instead of CONST_VPTR.

Otherwise ok.


r~

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-24  1:07     ` Richard Henderson
@ 2011-08-24  3:09       ` Andrew MacLeod
  2011-08-24  6:41         ` Richard Henderson
  0 siblings, 1 reply; 11+ messages in thread
From: Andrew MacLeod @ 2011-08-24  3:09 UTC (permalink / raw)
  To: Richard Henderson; +Cc: gcc-patches

On 08/23/2011 06:20 PM, Richard Henderson wrote:
> On 08/23/2011 03:13 PM, Andrew MacLeod wrote:
>> ! DEF_FUNCTION_TYPE_2 (BT_FN_I1_CONST_VPTR_INT, BT_I1, BT_CONST_VOLATILE_PTR,
>> ! 		     BT_INT)
> Given that VPTR means "volatile PTR", I suggest you use CVPTR instead of CONST_VPTR.
>
Hmm, well the reason I spelled it out is because const was spelled out 
*everywhere* else... ie:

DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING, BT_INT, 
BT_CONST_STRING, BT_PTR_CONST_STRING)

Andrew

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-24  3:09       ` Andrew MacLeod
@ 2011-08-24  6:41         ` Richard Henderson
  0 siblings, 0 replies; 11+ messages in thread
From: Richard Henderson @ 2011-08-24  6:41 UTC (permalink / raw)
  To: Andrew MacLeod; +Cc: gcc-patches

On 08/23/2011 03:24 PM, Andrew MacLeod wrote:
> On 08/23/2011 06:20 PM, Richard Henderson wrote:
>> On 08/23/2011 03:13 PM, Andrew MacLeod wrote:
>>> ! DEF_FUNCTION_TYPE_2 (BT_FN_I1_CONST_VPTR_INT, BT_I1, BT_CONST_VOLATILE_PTR,
>>> !              BT_INT)
>> Given that VPTR means "volatile PTR", I suggest you use CVPTR instead of CONST_VPTR.
>>
> Hmm, well the reason I spelled it out is because const was spelled out *everywhere* else... ie:
> 
> DEF_FUNCTION_TYPE_2 (BT_FN_INT_CONST_STRING_PTR_CONST_STRING, BT_INT, BT_CONST_STRING, BT_PTR_CONST_STRING)

Ah, ok then.


r~

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [cxx-mem-model] Atomic C++ header file changes
  2011-08-17 17:54 [cxx-mem-model] Atomic C++ header file changes Andrew MacLeod
  2011-08-19  4:05 ` Richard Henderson
  2011-08-19 10:17 ` Torvald Riegel
@ 2011-08-24 17:25 ` Andrew MacLeod
  2 siblings, 0 replies; 11+ messages in thread
From: Andrew MacLeod @ 2011-08-24 17:25 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 716 bytes --]

On 08/17/2011 11:39 AM, Andrew MacLeod wrote:
> Next step, change the C++ header files to use the new __sync 
> builtins.  pretty straightforward.
>
> mostly.
>

A previous patch changed and documents the behaviour at runtime which is 
transparent to these template changes. Another patch fixed the 
const_cast requirement, so the builtins are all the right type now.

Straightforward replacement of the old __sync routines and wrapper code 
with calls to the new __sync_mem routines in atomic_2.h.

At some point in the future we'll figure out what to do with weak/strong 
compare and swap... until then, we leave the existing strong CAS code.

Boostrapped and no regressions on  x86_64-unknown-linux-gnu

Andrew



[-- Attachment #2: c++.diff --]
[-- Type: text/plain, Size: 19697 bytes --]


	* libstdc++-v3/include/bits/atomic_2.h (__atomic2): Use new
	__sync_mem routines.


Index: include/bits/atomic_2.h
===================================================================
*** include/bits/atomic_2.h	(revision 177737)
--- include/bits/atomic_2.h	(working copy)
*************** namespace __atomic2
*** 60,78 ****
      bool
      test_and_set(memory_order __m = memory_order_seq_cst)
      {
!       // Redundant synchronize if built-in for lock is a full barrier.
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
!       return __sync_lock_test_and_set(&_M_i, 1);
      }
  
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) volatile
      {
!       // Redundant synchronize if built-in for lock is a full barrier.
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
!       return __sync_lock_test_and_set(&_M_i, 1);
      }
  
      void
--- 60,72 ----
      bool
      test_and_set(memory_order __m = memory_order_seq_cst)
      {
!       return __sync_mem_flag_test_and_set(&_M_i, __m);
      }
  
      bool
      test_and_set(memory_order __m = memory_order_seq_cst) volatile
      {
!       return __sync_mem_flag_test_and_set(&_M_i, __m);
      }
  
      void
*************** namespace __atomic2
*** 82,90 ****
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_lock_release(&_M_i);
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
      }
  
      void
--- 76,82 ----
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_mem_flag_clear(&_M_i, __m);
      }
  
      void
*************** namespace __atomic2
*** 94,102 ****
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_lock_release(&_M_i);
!       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
! 	__sync_synchronize();
      }
    };
  
--- 86,92 ----
        __glibcxx_assert(__m != memory_order_acquire);
        __glibcxx_assert(__m != memory_order_acq_rel);
  
!       __sync_mem_flag_clear(&_M_i, __m);
      }
    };
  
*************** namespace __atomic2
*** 180,238 ****
  
        __int_type
        operator++()
!       { return __sync_add_and_fetch(&_M_i, 1); }
  
        __int_type
        operator++() volatile
!       { return __sync_add_and_fetch(&_M_i, 1); }
  
        __int_type
        operator--()
!       { return __sync_sub_and_fetch(&_M_i, 1); }
  
        __int_type
        operator--() volatile
!       { return __sync_sub_and_fetch(&_M_i, 1); }
  
        __int_type
        operator+=(__int_type __i)
!       { return __sync_add_and_fetch(&_M_i, __i); }
  
        __int_type
        operator+=(__int_type __i) volatile
!       { return __sync_add_and_fetch(&_M_i, __i); }
  
        __int_type
        operator-=(__int_type __i)
!       { return __sync_sub_and_fetch(&_M_i, __i); }
  
        __int_type
        operator-=(__int_type __i) volatile
!       { return __sync_sub_and_fetch(&_M_i, __i); }
  
        __int_type
        operator&=(__int_type __i)
!       { return __sync_and_and_fetch(&_M_i, __i); }
  
        __int_type
        operator&=(__int_type __i) volatile
!       { return __sync_and_and_fetch(&_M_i, __i); }
  
        __int_type
        operator|=(__int_type __i)
!       { return __sync_or_and_fetch(&_M_i, __i); }
  
        __int_type
        operator|=(__int_type __i) volatile
!       { return __sync_or_and_fetch(&_M_i, __i); }
  
        __int_type
        operator^=(__int_type __i)
!       { return __sync_xor_and_fetch(&_M_i, __i); }
  
        __int_type
        operator^=(__int_type __i) volatile
!       { return __sync_xor_and_fetch(&_M_i, __i); }
  
        bool
        is_lock_free() const
--- 170,228 ----
  
        __int_type
        operator++()
!       { return __sync_mem_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator++() volatile
!       { return __sync_mem_add_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator--()
!       { return __sync_mem_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator--() volatile
!       { return __sync_mem_sub_fetch(&_M_i, 1, memory_order_seq_cst); }
  
        __int_type
        operator+=(__int_type __i)
!       { return __sync_mem_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator+=(__int_type __i) volatile
!       { return __sync_mem_add_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator-=(__int_type __i)
!       { return __sync_mem_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator-=(__int_type __i) volatile
!       { return __sync_mem_sub_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator&=(__int_type __i)
!       { return __sync_mem_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator&=(__int_type __i) volatile
!       { return __sync_mem_and_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator|=(__int_type __i)
!       { return __sync_mem_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator|=(__int_type __i) volatile
!       { return __sync_mem_or_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator^=(__int_type __i)
!       { return __sync_mem_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        __int_type
        operator^=(__int_type __i) volatile
!       { return __sync_mem_xor_fetch(&_M_i, __i, memory_order_seq_cst); }
  
        bool
        is_lock_free() const
*************** namespace __atomic2
*** 249,263 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_i = __i;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_i = __i;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        void
--- 239,245 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_i, __i, __m);
        }
  
        void
*************** namespace __atomic2
*** 267,281 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_i = __i;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_i = __i;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        __int_type
--- 249,255 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_i, __i, __m);
        }
  
        __int_type
*************** namespace __atomic2
*** 284,293 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__int_type __ret = _M_i;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __int_type
--- 258,264 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (&_M_i, __m);
        }
  
        __int_type
*************** namespace __atomic2
*** 296,320 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__int_type __ret = _M_i;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_i, __i);
        }
  
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_i, __i);
        }
  
        bool
--- 267,286 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (&_M_i, __m);
        }
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst)
        {
! 	return __sync_mem_exchange (&_M_i, __i, __m);
        }
  
  
        __int_type
        exchange(__int_type __i, memory_order __m = memory_order_seq_cst) volatile
        {
! 	return __sync_mem_exchange (&_M_i, __i, __m);
        }
  
        bool
*************** namespace __atomic2
*** 352,357 ****
--- 318,324 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__int_type __i1o = __i1;
+ 	// Compare_and_swap is a full barrier already.
  	__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 368,373 ****
--- 335,341 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__int_type __i1o = __i1;
+ 	// Compare_and_swap is a full barrier already.
  	__int_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 393,440 ****
  
        __int_type
        fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_add(&_M_i, __i); }
  
        __int_type
        fetch_add(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_add(&_M_i, __i); }
  
        __int_type
        fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_sub(&_M_i, __i); }
  
        __int_type
        fetch_sub(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_sub(&_M_i, __i); }
  
        __int_type
        fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_and(&_M_i, __i); }
  
        __int_type
        fetch_and(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_and(&_M_i, __i); }
  
        __int_type
        fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_or(&_M_i, __i); }
  
        __int_type
        fetch_or(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_or(&_M_i, __i); }
  
        __int_type
        fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_xor(&_M_i, __i); }
  
        __int_type
        fetch_xor(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_xor(&_M_i, __i); }
      };
  
  
--- 361,408 ----
  
        __int_type
        fetch_add(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_add(&_M_i, __i, __m); }
  
        __int_type
        fetch_add(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_add(&_M_i, __i, __m); }
  
        __int_type
        fetch_sub(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_sub(&_M_i, __i, __m); }
  
        __int_type
        fetch_sub(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_sub(&_M_i, __i, __m); }
  
        __int_type
        fetch_and(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_and(&_M_i, __i, __m); }
  
        __int_type
        fetch_and(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_and(&_M_i, __i, __m); }
  
        __int_type
        fetch_or(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_or(&_M_i, __i, __m); }
  
        __int_type
        fetch_or(__int_type __i,
  	       memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_or(&_M_i, __i, __m); }
  
        __int_type
        fetch_xor(__int_type __i, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_xor(&_M_i, __i, __m); }
  
        __int_type
        fetch_xor(__int_type __i,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_xor(&_M_i, __i, __m); }
      };
  
  
*************** namespace __atomic2
*** 495,529 ****
  
        __pointer_type
        operator++()
!       { return fetch_add(1) + 1; }
  
        __pointer_type
        operator++() volatile
!       { return fetch_add(1) + 1; }
  
        __pointer_type
        operator--()
!       { return fetch_sub(1) -1; }
  
        __pointer_type
        operator--() volatile
!       { return fetch_sub(1) -1; }
  
        __pointer_type
        operator+=(ptrdiff_t __d)
!       { return fetch_add(__d) + __d; }
  
        __pointer_type
        operator+=(ptrdiff_t __d) volatile
!       { return fetch_add(__d) + __d; }
  
        __pointer_type
        operator-=(ptrdiff_t __d)
!       { return fetch_sub(__d) - __d; }
  
        __pointer_type
        operator-=(ptrdiff_t __d) volatile
!       { return fetch_sub(__d) - __d; }
  
        bool
        is_lock_free() const
--- 463,497 ----
  
        __pointer_type
        operator++()
!       { return __sync_mem_add_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator++() volatile
!       { return __sync_mem_add_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator--()
!       { return __sync_mem_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator--() volatile
!       { return __sync_mem_sub_fetch(&_M_p, 1, memory_order_seq_cst); }
  
        __pointer_type
        operator+=(ptrdiff_t __d)
!       { return __sync_mem_add_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator+=(ptrdiff_t __d) volatile
!       { return __sync_mem_add_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator-=(ptrdiff_t __d)
!       { return __sync_mem_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        __pointer_type
        operator-=(ptrdiff_t __d) volatile
!       { return __sync_mem_sub_fetch(&_M_p, __d, memory_order_seq_cst); }
  
        bool
        is_lock_free() const
*************** namespace __atomic2
*** 540,554 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_p = __p;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_p = __p;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        void
--- 508,514 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_p, __p, __m);
        }
  
        void
*************** namespace __atomic2
*** 559,573 ****
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	if (__m == memory_order_relaxed)
! 	  _M_p = __p;
! 	else
! 	  {
! 	    // write_mem_barrier();
! 	    _M_p = __p;
! 	    if (__m == memory_order_seq_cst)
! 	      __sync_synchronize();
! 	  }
        }
  
        __pointer_type
--- 519,525 ----
  	__glibcxx_assert(__m != memory_order_acq_rel);
  	__glibcxx_assert(__m != memory_order_consume);
  
! 	__sync_mem_store (&_M_p, __p, __m);
        }
  
        __pointer_type
*************** namespace __atomic2
*** 576,585 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__pointer_type __ret = _M_p;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __pointer_type
--- 528,534 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (&_M_p, __m);
        }
  
        __pointer_type
*************** namespace __atomic2
*** 588,604 ****
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	__sync_synchronize();
! 	__pointer_type __ret = _M_p;
! 	__sync_synchronize();
! 	return __ret;
        }
  
        __pointer_type
        exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_p, __p);
        }
  
  
--- 537,549 ----
  	__glibcxx_assert(__m != memory_order_release);
  	__glibcxx_assert(__m != memory_order_acq_rel);
  
! 	return __sync_mem_load (&_M_p, __m);
        }
  
        __pointer_type
        exchange(__pointer_type __p, memory_order __m = memory_order_seq_cst)
        {
! 	return __sync_mem_exchange (&_M_p, __p, __m);
        }
  
  
*************** namespace __atomic2
*** 606,613 ****
        exchange(__pointer_type __p,
  	       memory_order __m = memory_order_seq_cst) volatile
        {
! 	// XXX built-in assumes memory_order_acquire.
! 	return __sync_lock_test_and_set(&_M_p, __p);
        }
  
        bool
--- 551,557 ----
        exchange(__pointer_type __p,
  	       memory_order __m = memory_order_seq_cst) volatile
        {
! 	return __sync_mem_exchange (&_M_p, __p, __m);
        }
  
        bool
*************** namespace __atomic2
*** 619,624 ****
--- 563,569 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__pointer_type __p1o = __p1;
+ 	// Compare_and_swap is a full barrier already.
  	__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 635,640 ****
--- 580,586 ----
  	__glibcxx_assert(__m2 <= __m1);
  
  	__pointer_type __p1o = __p1;
+ 	// Compare_and_swap is a full barrier already.
  	__pointer_type __p1n = __sync_val_compare_and_swap(&_M_p, __p1o, __p2);
  
  	// Assume extra stores (of same value) allowed in true case.
*************** namespace __atomic2
*** 644,664 ****
  
        __pointer_type
        fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_add(&_M_p, __d); }
  
        __pointer_type
        fetch_add(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_add(&_M_p, __d); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_fetch_and_sub(&_M_p, __d); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_fetch_and_sub(&_M_p, __d); }
      };
  
  } // namespace __atomic2
--- 590,610 ----
  
        __pointer_type
        fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_add(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_add(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_add(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst)
!       { return __sync_mem_fetch_sub(&_M_p, __d, __m); }
  
        __pointer_type
        fetch_sub(ptrdiff_t __d,
  		memory_order __m = memory_order_seq_cst) volatile
!       { return __sync_mem_fetch_sub(&_M_p, __d, __m); }
      };
  
  } // namespace __atomic2

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2011-08-24 14:40 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-08-17 17:54 [cxx-mem-model] Atomic C++ header file changes Andrew MacLeod
2011-08-19  4:05 ` Richard Henderson
2011-08-23 23:24   ` Andrew MacLeod
2011-08-24  1:07     ` Richard Henderson
2011-08-24  3:09       ` Andrew MacLeod
2011-08-24  6:41         ` Richard Henderson
2011-08-19 10:17 ` Torvald Riegel
2011-08-19 13:22   ` Andrew MacLeod
2011-08-19 19:12     ` Torvald Riegel
2011-08-19 21:11       ` Andrew MacLeod
2011-08-24 17:25 ` Andrew MacLeod

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).