public inbox for glibc-cvs@sourceware.org
help / color / mirror / Atom feed
* [glibc/arm/morello/main] cheri: Implement 128-bit atomics
@ 2022-08-05 19:34 Szabolcs Nagy
  0 siblings, 0 replies; 4+ messages in thread
From: Szabolcs Nagy @ 2022-08-05 19:34 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=c8f1fc9d94f9c311d382833c3393f12c1acb1991

commit c8f1fc9d94f9c311d382833c3393f12c1acb1991
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Tue Jun 8 12:48:43 2021 +0100

    cheri: Implement 128-bit atomics
    
    Arm Morello requires 128-bit atomics.

Diff:
---
 include/atomic.h                 | 17 +++++++++++++++--
 sysdeps/aarch64/atomic-machine.h | 21 +++++++++++++++++++++
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/include/atomic.h b/include/atomic.h
index 2cb52c9cfd..140ef2a5a5 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -62,6 +62,8 @@
       __atg1_result = pre##_32_##post (mem, __VA_ARGS__);		      \
     else if (sizeof (*mem) == 8)					      \
       __atg1_result = pre##_64_##post (mem, __VA_ARGS__);		      \
+    else if (sizeof (*mem) == 16)					      \
+      __atg1_result = pre##_128_##post (mem, __VA_ARGS__);		      \
     else								      \
       abort ();								      \
     __atg1_result;							      \
@@ -77,6 +79,8 @@
       __atg2_result = pre##_32_##post (mem, __VA_ARGS__);		      \
     else if (sizeof (*mem) == 8)					      \
       __atg2_result = pre##_64_##post (mem, __VA_ARGS__);		      \
+    else if (sizeof (*mem) == 16)					      \
+      __atg2_result = pre##_128_##post (mem, __VA_ARGS__);		      \
     else								      \
       abort ();								      \
     __atg2_result;							      \
@@ -540,7 +544,11 @@
 /* We require 32b atomic operations; some archs also support 64b atomic
    operations.  */
 void __atomic_link_error (void);
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size(mem) \
+   if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size(mem) \
    if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8))			      \
      __atomic_link_error ();
@@ -553,7 +561,12 @@ void __atomic_link_error (void);
    need other atomic operations of such sizes, and restricting the support to
    loads and stores makes this easier for archs that do not have native
    support for atomic operations to less-than-word-sized data.  */
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size_ls(mem) \
+   if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
+       && (sizeof (*mem) != 8) && (sizeof (*mem) != 16))		      \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size_ls(mem) \
    if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
        && (sizeof (*mem) != 8))						      \
diff --git a/sysdeps/aarch64/atomic-machine.h b/sysdeps/aarch64/atomic-machine.h
index 52b3fb2047..14e9481392 100644
--- a/sysdeps/aarch64/atomic-machine.h
+++ b/sysdeps/aarch64/atomic-machine.h
@@ -54,6 +54,13 @@
 				  model, __ATOMIC_RELAXED);		\
   })
 
+#  define __arch_compare_and_exchange_bool_128_int(mem, newval, oldval, model) \
+  ({									\
+    typeof (*mem) __oldval = (oldval);					\
+    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,	\
+				  model, __ATOMIC_RELAXED);		\
+  })
+
 # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
   ({									\
     typeof (*mem) __oldval = (oldval);					\
@@ -86,6 +93,14 @@
     __oldval;								\
   })
 
+#  define __arch_compare_and_exchange_val_128_int(mem, newval, oldval, model) \
+  ({									\
+    typeof (*mem) __oldval = (oldval);					\
+    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,	\
+				 model, __ATOMIC_RELAXED);		\
+    __oldval;								\
+  })
+
 
 /* Compare and exchange with "acquire" semantics, ie barrier after.  */
 
@@ -118,6 +133,9 @@
 #  define __arch_exchange_64_int(mem, newval, model)	\
   __atomic_exchange_n (mem, newval, model)
 
+#  define __arch_exchange_128_int(mem, newval, model)	\
+  __atomic_exchange_n (mem, newval, model)
+
 # define atomic_exchange_acq(mem, value)				\
   __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
 
@@ -139,6 +157,9 @@
 #  define __arch_exchange_and_add_64_int(mem, value, model)	\
   __atomic_fetch_add (mem, value, model)
 
+#  define __arch_exchange_and_add_128_int(mem, value, model)	\
+  __atomic_fetch_add (mem, value, model)
+
 # define atomic_exchange_and_add_acq(mem, value)			\
   __atomic_val_bysize (__arch_exchange_and_add, int, mem, value,	\
 		       __ATOMIC_ACQUIRE)


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [glibc/arm/morello/main] cheri: Implement 128-bit atomics
@ 2022-11-23 14:45 Szabolcs Nagy
  0 siblings, 0 replies; 4+ messages in thread
From: Szabolcs Nagy @ 2022-11-23 14:45 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=7cbc4d39feaf6ec88ffe83a8524191c98925b955

commit 7cbc4d39feaf6ec88ffe83a8524191c98925b955
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Tue Jun 8 12:48:43 2021 +0100

    cheri: Implement 128-bit atomics
    
    Arm Morello requires 128-bit atomics.

Diff:
---
 include/atomic.h                 | 17 +++++++++++++++--
 sysdeps/aarch64/atomic-machine.h | 21 +++++++++++++++++++++
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/include/atomic.h b/include/atomic.h
index 2cb52c9cfd..140ef2a5a5 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -62,6 +62,8 @@
       __atg1_result = pre##_32_##post (mem, __VA_ARGS__);		      \
     else if (sizeof (*mem) == 8)					      \
       __atg1_result = pre##_64_##post (mem, __VA_ARGS__);		      \
+    else if (sizeof (*mem) == 16)					      \
+      __atg1_result = pre##_128_##post (mem, __VA_ARGS__);		      \
     else								      \
       abort ();								      \
     __atg1_result;							      \
@@ -77,6 +79,8 @@
       __atg2_result = pre##_32_##post (mem, __VA_ARGS__);		      \
     else if (sizeof (*mem) == 8)					      \
       __atg2_result = pre##_64_##post (mem, __VA_ARGS__);		      \
+    else if (sizeof (*mem) == 16)					      \
+      __atg2_result = pre##_128_##post (mem, __VA_ARGS__);		      \
     else								      \
       abort ();								      \
     __atg2_result;							      \
@@ -540,7 +544,11 @@
 /* We require 32b atomic operations; some archs also support 64b atomic
    operations.  */
 void __atomic_link_error (void);
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size(mem) \
+   if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size(mem) \
    if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8))			      \
      __atomic_link_error ();
@@ -553,7 +561,12 @@ void __atomic_link_error (void);
    need other atomic operations of such sizes, and restricting the support to
    loads and stores makes this easier for archs that do not have native
    support for atomic operations to less-than-word-sized data.  */
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size_ls(mem) \
+   if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
+       && (sizeof (*mem) != 8) && (sizeof (*mem) != 16))		      \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size_ls(mem) \
    if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
        && (sizeof (*mem) != 8))						      \
diff --git a/sysdeps/aarch64/atomic-machine.h b/sysdeps/aarch64/atomic-machine.h
index 52b3fb2047..14e9481392 100644
--- a/sysdeps/aarch64/atomic-machine.h
+++ b/sysdeps/aarch64/atomic-machine.h
@@ -54,6 +54,13 @@
 				  model, __ATOMIC_RELAXED);		\
   })
 
+#  define __arch_compare_and_exchange_bool_128_int(mem, newval, oldval, model) \
+  ({									\
+    typeof (*mem) __oldval = (oldval);					\
+    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,	\
+				  model, __ATOMIC_RELAXED);		\
+  })
+
 # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
   ({									\
     typeof (*mem) __oldval = (oldval);					\
@@ -86,6 +93,14 @@
     __oldval;								\
   })
 
+#  define __arch_compare_and_exchange_val_128_int(mem, newval, oldval, model) \
+  ({									\
+    typeof (*mem) __oldval = (oldval);					\
+    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,	\
+				 model, __ATOMIC_RELAXED);		\
+    __oldval;								\
+  })
+
 
 /* Compare and exchange with "acquire" semantics, ie barrier after.  */
 
@@ -118,6 +133,9 @@
 #  define __arch_exchange_64_int(mem, newval, model)	\
   __atomic_exchange_n (mem, newval, model)
 
+#  define __arch_exchange_128_int(mem, newval, model)	\
+  __atomic_exchange_n (mem, newval, model)
+
 # define atomic_exchange_acq(mem, value)				\
   __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
 
@@ -139,6 +157,9 @@
 #  define __arch_exchange_and_add_64_int(mem, value, model)	\
   __atomic_fetch_add (mem, value, model)
 
+#  define __arch_exchange_and_add_128_int(mem, value, model)	\
+  __atomic_fetch_add (mem, value, model)
+
 # define atomic_exchange_and_add_acq(mem, value)			\
   __atomic_val_bysize (__arch_exchange_and_add, int, mem, value,	\
 		       __ATOMIC_ACQUIRE)

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [glibc/arm/morello/main] cheri: Implement 128-bit atomics
@ 2022-10-27 13:55 Szabolcs Nagy
  0 siblings, 0 replies; 4+ messages in thread
From: Szabolcs Nagy @ 2022-10-27 13:55 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=96ac67a64b4eb14473ea1e06aa665a903052b643

commit 96ac67a64b4eb14473ea1e06aa665a903052b643
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Tue Jun 8 12:48:43 2021 +0100

    cheri: Implement 128-bit atomics
    
    Arm Morello requires 128-bit atomics.

Diff:
---
 include/atomic.h                 | 17 +++++++++++++++--
 sysdeps/aarch64/atomic-machine.h | 21 +++++++++++++++++++++
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/include/atomic.h b/include/atomic.h
index 2cb52c9cfd..140ef2a5a5 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -62,6 +62,8 @@
       __atg1_result = pre##_32_##post (mem, __VA_ARGS__);		      \
     else if (sizeof (*mem) == 8)					      \
       __atg1_result = pre##_64_##post (mem, __VA_ARGS__);		      \
+    else if (sizeof (*mem) == 16)					      \
+      __atg1_result = pre##_128_##post (mem, __VA_ARGS__);		      \
     else								      \
       abort ();								      \
     __atg1_result;							      \
@@ -77,6 +79,8 @@
       __atg2_result = pre##_32_##post (mem, __VA_ARGS__);		      \
     else if (sizeof (*mem) == 8)					      \
       __atg2_result = pre##_64_##post (mem, __VA_ARGS__);		      \
+    else if (sizeof (*mem) == 16)					      \
+      __atg2_result = pre##_128_##post (mem, __VA_ARGS__);		      \
     else								      \
       abort ();								      \
     __atg2_result;							      \
@@ -540,7 +544,11 @@
 /* We require 32b atomic operations; some archs also support 64b atomic
    operations.  */
 void __atomic_link_error (void);
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size(mem) \
+   if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size(mem) \
    if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8))			      \
      __atomic_link_error ();
@@ -553,7 +561,12 @@ void __atomic_link_error (void);
    need other atomic operations of such sizes, and restricting the support to
    loads and stores makes this easier for archs that do not have native
    support for atomic operations to less-than-word-sized data.  */
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size_ls(mem) \
+   if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
+       && (sizeof (*mem) != 8) && (sizeof (*mem) != 16))		      \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size_ls(mem) \
    if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
        && (sizeof (*mem) != 8))						      \
diff --git a/sysdeps/aarch64/atomic-machine.h b/sysdeps/aarch64/atomic-machine.h
index 52b3fb2047..14e9481392 100644
--- a/sysdeps/aarch64/atomic-machine.h
+++ b/sysdeps/aarch64/atomic-machine.h
@@ -54,6 +54,13 @@
 				  model, __ATOMIC_RELAXED);		\
   })
 
+#  define __arch_compare_and_exchange_bool_128_int(mem, newval, oldval, model) \
+  ({									\
+    typeof (*mem) __oldval = (oldval);					\
+    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,	\
+				  model, __ATOMIC_RELAXED);		\
+  })
+
 # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
   ({									\
     typeof (*mem) __oldval = (oldval);					\
@@ -86,6 +93,14 @@
     __oldval;								\
   })
 
+#  define __arch_compare_and_exchange_val_128_int(mem, newval, oldval, model) \
+  ({									\
+    typeof (*mem) __oldval = (oldval);					\
+    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,	\
+				 model, __ATOMIC_RELAXED);		\
+    __oldval;								\
+  })
+
 
 /* Compare and exchange with "acquire" semantics, ie barrier after.  */
 
@@ -118,6 +133,9 @@
 #  define __arch_exchange_64_int(mem, newval, model)	\
   __atomic_exchange_n (mem, newval, model)
 
+#  define __arch_exchange_128_int(mem, newval, model)	\
+  __atomic_exchange_n (mem, newval, model)
+
 # define atomic_exchange_acq(mem, value)				\
   __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
 
@@ -139,6 +157,9 @@
 #  define __arch_exchange_and_add_64_int(mem, value, model)	\
   __atomic_fetch_add (mem, value, model)
 
+#  define __arch_exchange_and_add_128_int(mem, value, model)	\
+  __atomic_fetch_add (mem, value, model)
+
 # define atomic_exchange_and_add_acq(mem, value)			\
   __atomic_val_bysize (__arch_exchange_and_add, int, mem, value,	\
 		       __ATOMIC_ACQUIRE)

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [glibc/arm/morello/main] cheri: Implement 128-bit atomics
@ 2022-10-26 15:17 Szabolcs Nagy
  0 siblings, 0 replies; 4+ messages in thread
From: Szabolcs Nagy @ 2022-10-26 15:17 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=ebedf4cbaf2d2654c19be8095925ffe434c86997

commit ebedf4cbaf2d2654c19be8095925ffe434c86997
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Tue Jun 8 12:48:43 2021 +0100

    cheri: Implement 128-bit atomics
    
    Arm Morello requires 128-bit atomics.

Diff:
---
 include/atomic.h                 | 17 +++++++++++++++--
 sysdeps/aarch64/atomic-machine.h | 21 +++++++++++++++++++++
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/include/atomic.h b/include/atomic.h
index 2cb52c9cfd..140ef2a5a5 100644
--- a/include/atomic.h
+++ b/include/atomic.h
@@ -62,6 +62,8 @@
       __atg1_result = pre##_32_##post (mem, __VA_ARGS__);		      \
     else if (sizeof (*mem) == 8)					      \
       __atg1_result = pre##_64_##post (mem, __VA_ARGS__);		      \
+    else if (sizeof (*mem) == 16)					      \
+      __atg1_result = pre##_128_##post (mem, __VA_ARGS__);		      \
     else								      \
       abort ();								      \
     __atg1_result;							      \
@@ -77,6 +79,8 @@
       __atg2_result = pre##_32_##post (mem, __VA_ARGS__);		      \
     else if (sizeof (*mem) == 8)					      \
       __atg2_result = pre##_64_##post (mem, __VA_ARGS__);		      \
+    else if (sizeof (*mem) == 16)					      \
+      __atg2_result = pre##_128_##post (mem, __VA_ARGS__);		      \
     else								      \
       abort ();								      \
     __atg2_result;							      \
@@ -540,7 +544,11 @@
 /* We require 32b atomic operations; some archs also support 64b atomic
    operations.  */
 void __atomic_link_error (void);
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size(mem) \
+   if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8) && (sizeof (*mem) != 16)) \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size(mem) \
    if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8))			      \
      __atomic_link_error ();
@@ -553,7 +561,12 @@ void __atomic_link_error (void);
    need other atomic operations of such sizes, and restricting the support to
    loads and stores makes this easier for archs that do not have native
    support for atomic operations to less-than-word-sized data.  */
-# if __HAVE_64B_ATOMICS == 1
+# if defined __CHERI_PURE_CAPABILITY__
+#  define __atomic_check_size_ls(mem) \
+   if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
+       && (sizeof (*mem) != 8) && (sizeof (*mem) != 16))		      \
+     __atomic_link_error ();
+# elif __HAVE_64B_ATOMICS == 1
 #  define __atomic_check_size_ls(mem) \
    if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && (sizeof (*mem) != 4)   \
        && (sizeof (*mem) != 8))						      \
diff --git a/sysdeps/aarch64/atomic-machine.h b/sysdeps/aarch64/atomic-machine.h
index 52b3fb2047..14e9481392 100644
--- a/sysdeps/aarch64/atomic-machine.h
+++ b/sysdeps/aarch64/atomic-machine.h
@@ -54,6 +54,13 @@
 				  model, __ATOMIC_RELAXED);		\
   })
 
+#  define __arch_compare_and_exchange_bool_128_int(mem, newval, oldval, model) \
+  ({									\
+    typeof (*mem) __oldval = (oldval);					\
+    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,	\
+				  model, __ATOMIC_RELAXED);		\
+  })
+
 # define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
   ({									\
     typeof (*mem) __oldval = (oldval);					\
@@ -86,6 +93,14 @@
     __oldval;								\
   })
 
+#  define __arch_compare_and_exchange_val_128_int(mem, newval, oldval, model) \
+  ({									\
+    typeof (*mem) __oldval = (oldval);					\
+    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0,	\
+				 model, __ATOMIC_RELAXED);		\
+    __oldval;								\
+  })
+
 
 /* Compare and exchange with "acquire" semantics, ie barrier after.  */
 
@@ -118,6 +133,9 @@
 #  define __arch_exchange_64_int(mem, newval, model)	\
   __atomic_exchange_n (mem, newval, model)
 
+#  define __arch_exchange_128_int(mem, newval, model)	\
+  __atomic_exchange_n (mem, newval, model)
+
 # define atomic_exchange_acq(mem, value)				\
   __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
 
@@ -139,6 +157,9 @@
 #  define __arch_exchange_and_add_64_int(mem, value, model)	\
   __atomic_fetch_add (mem, value, model)
 
+#  define __arch_exchange_and_add_128_int(mem, value, model)	\
+  __atomic_fetch_add (mem, value, model)
+
 # define atomic_exchange_and_add_acq(mem, value)			\
   __atomic_val_bysize (__arch_exchange_and_add, int, mem, value,	\
 		       __ATOMIC_ACQUIRE)

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-11-23 14:45 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-05 19:34 [glibc/arm/morello/main] cheri: Implement 128-bit atomics Szabolcs Nagy
2022-10-26 15:17 Szabolcs Nagy
2022-10-27 13:55 Szabolcs Nagy
2022-11-23 14:45 Szabolcs Nagy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).