public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/vendors/ARM/heads/morello)] builtins: Add helper functions for sync builtins
@ 2022-05-06 14:43 Matthew Malcomson
  0 siblings, 0 replies; only message in thread
From: Matthew Malcomson @ 2022-05-06 14:43 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:a6f60e441ab273f8e788904bc8ddfd93f342c78a

commit a6f60e441ab273f8e788904bc8ddfd93f342c78a
Author: Richard Sandiford <richard.sandiford@arm.com>
Date:   Fri Apr 22 08:38:40 2022 +0100

    builtins: Add helper functions for sync builtins
    
    Various pieces of code hard-wired the link between the
    BUILT_IN_FOO_N and BUILT_IN_FOO_{1,2,4,8,16,CAPABILITY} enums.
    This patch tries to consolidate all that into one place,
    so that it's easier to change the link later.
    
    The new mode functions are called builtin_sync_mode rather
    than get_builtin_sync_mode for consistency with the other
    new ones.
    
    In c-family, making sync_resolve_function do the lookup
    works out better for later patches.

Diff:
---
 gcc/ada/gcc-interface/utils2.c |   9 +--
 gcc/builtins.c                 | 148 ++++++++++++++++++++++++-----------------
 gcc/builtins.h                 |  13 ++++
 gcc/c-family/c-common.c        |  39 +++++------
 gcc/cp/decl2.c                 |   5 +-
 gcc/fortran/trans-intrinsic.c  |  16 ++---
 gcc/omp-expand.c               |  42 ++++++------
 gcc/tree-core.h                |   6 --
 8 files changed, 150 insertions(+), 128 deletions(-)

diff --git a/gcc/ada/gcc-interface/utils2.c b/gcc/ada/gcc-interface/utils2.c
index 7ba5ca25ca4..eeb30a14752 100644
--- a/gcc/ada/gcc-interface/utils2.c
+++ b/gcc/ada/gcc-interface/utils2.c
@@ -694,7 +694,6 @@ build_atomic_load (tree src, bool sync)
   tree orig_src = src;
   tree t, addr, val;
   unsigned int size;
-  int fncode;
 
   /* Remove conversions to get the address of the underlying object.  */
   src = remove_conversions (src, false);
@@ -702,8 +701,7 @@ build_atomic_load (tree src, bool sync)
   if (size == 0)
     return orig_src;
 
-  fncode = (int) BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1;
-  t = builtin_decl_implicit ((enum built_in_function) fncode);
+  t = builtin_decl_implicit (builtin_sync_code (BUILT_IN_ATOMIC_LOAD_N, size));
 
   addr = build_unary_op (unqualified_addr_expr (), ptr_type, src);
   val = build_call_expr (t, 2, addr, mem_model);
@@ -730,7 +728,6 @@ build_atomic_store (tree dest, tree src, bool sync)
   tree orig_dest = dest;
   tree t, int_type, addr;
   unsigned int size;
-  int fncode;
 
   /* Remove conversions to get the address of the underlying object.  */
   dest = remove_conversions (dest, false);
@@ -738,8 +735,8 @@ build_atomic_store (tree dest, tree src, bool sync)
   if (size == 0)
     return build_binary_op (MODIFY_EXPR, NULL_TREE, orig_dest, src);
 
-  fncode = (int) BUILT_IN_ATOMIC_STORE_N + exact_log2 (size) + 1;
-  t = builtin_decl_implicit ((enum built_in_function) fncode);
+  auto fncode = builtin_sync_code (BUILT_IN_ATOMIC_STORE_N, size);
+  t = builtin_decl_implicit (fncode);
   int_type = gnat_type_for_size (BITS_PER_UNIT * size, 1);
 
   /* First convert the bits to be stored to the original type of the store,
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 91f12af878a..0f6a277f8ae 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -6630,30 +6630,63 @@ log2_nbytes_int_mode (int nbytes_log2)
   return int_mode_for_size (BITS_PER_UNIT << nbytes_log2, 0).require ();
 }
 
-/* Reconstitute a mode for a __sync intrinsic operation.  Since the type of
-   the pointer in these functions is void*, the tree optimizers may remove
-   casts.  The mode computed in expand_builtin isn't reliable either, due
-   to __sync_bool_compare_and_swap.
+/* BASE_CODE is an overloaded BUILT_IN_*_N function defined in
+   sync-builtins.def.  Return the non-overloaded function associated
+   with sync_dsize DSIZE.
 
-   FCODE_DIFF should be fcode - base, where base is the FOO_1 code for the
-   group of builtins.  This gives us log2 of the mode size.  */
+   The type of the perameter is int rather than sync_dsize because many
+   callers can validly pass a plain byte size.  */
 
-static inline machine_mode
-get_builtin_sync_mode (int fcode_diff)
+built_in_function
+builtin_sync_code (built_in_function base_code, int dsize)
 {
-  /* Only _CAPABILITY fcodes may have a fcode_diff of CAPABILITY_BUILTIN_DIFF
-     (currently this is 5) at this point.  */
-  if (fcode_diff == CAPABILITY_BUILTIN_FCODE_DIFF)
-  {
-    opt_scalar_addr_mode opt_cap_mode = targetm.capability_mode();
-    gcc_assert (opt_cap_mode.exists());
-    scalar_addr_mode cap_mode = opt_cap_mode.require();
-    return (machine_mode) cap_mode;
-  }
-  else
-    /* The size is not negotiable, so ask not to get BLKmode in return
-       if the target indicates that a smaller size would be better.  */
-    return int_mode_for_size (BITS_PER_UNIT << fcode_diff, 0).require ();
+  gcc_assert (dsize == SYNC_I1
+	      || dsize == SYNC_I2
+	      || dsize == SYNC_I4
+	      || dsize == SYNC_I8
+	      || dsize == SYNC_I16
+	      || dsize == SYNC_ICAP);
+  /* The capability entry comes after the numerical ones.  */
+  if (dsize == SYNC_ICAP)
+    dsize = 32;
+  return (built_in_function) ((int) base_code + exact_log2 (dsize) + 1);
+}
+
+/* BASE_CODE is an overloaded BUILT_IN_*_N function defined in
+   sync-builtins.def and CODE is a non-overloaded function from
+   the same group.  Return the type of data that CODE handles.  */
+
+sync_dsize
+builtin_sync_dsize (built_in_function base_code, built_in_function code)
+{
+  int nbytes_log2 = (int) code - (int) base_code - 1;
+  gcc_assert (nbytes_log2 >= 0 && nbytes_log2 <= 5);
+  if (nbytes_log2 == 5)
+    return SYNC_ICAP;
+  return sync_dsize (1 << nbytes_log2);
+}
+
+/* Return the integer or capability mode associated with DSIZE.  */
+
+static scalar_addr_mode
+builtin_sync_mode (sync_dsize dsize)
+{
+  if (dsize == SYNC_ICAP)
+    return targetm.capability_mode ().require ();
+
+  /* The size is not negotiable, so ask not to get BLKmode in return
+     if the target indicates that a smaller size would be better.  */
+  return int_mode_for_size (int (dsize) * BITS_PER_UNIT, 0).require ();
+}
+
+/* BASE_CODE is an overloaded BUILT_IN_*_N function defined in
+   sync-builtins.def and CODE is a non-overloaded function from
+   the same group.  Return the data mode that CODE handles.  */
+
+static scalar_addr_mode
+builtin_sync_mode (built_in_function base_code, built_in_function code)
+{
+  return builtin_sync_mode (builtin_sync_dsize (base_code, code));
 }
 
 /* Expand the memory expression LOC and return the appropriate memory operand
@@ -7032,11 +7065,9 @@ expand_ifn_atomic_compare_exchange_into_call (gcall *call, machine_mode mode)
   for (z = 4; z < 6; z++)
     vec->quick_push (gimple_call_arg (call, z));
   /* At present we only have BUILT_IN_ATOMIC_COMPARE_EXCHANGE_{1,2,4,8,16}.  */
-  unsigned int bytes_log2 = exact_log2 (GET_MODE_SIZE (mode).to_constant ());
-  gcc_assert (bytes_log2 < 5);
+  unsigned int nbytes = GET_MODE_SIZE (mode).to_constant ();
   built_in_function fncode
-    = (built_in_function) ((int) BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1
-			   + bytes_log2);
+    = builtin_sync_code (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N, nbytes);
   tree fndecl = builtin_decl_explicit (fncode);
   tree fn = build_addr_expr (build_pointer_type (TREE_TYPE (fndecl)), fndecl);
   tree exp = build_call_vec (boolean_type_node, fn, vec);
@@ -8608,84 +8639,84 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_FETCH_AND_ADD):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_ADD_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_FETCH_AND_ADD_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, PLUS, false, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_FETCH_AND_SUB):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_SUB_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_FETCH_AND_SUB_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, MINUS, false, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_FETCH_AND_OR):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_OR_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_FETCH_AND_OR_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, IOR, false, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_FETCH_AND_AND):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_AND_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_FETCH_AND_AND_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, AND, false, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_FETCH_AND_XOR):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_XOR_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_FETCH_AND_XOR_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, XOR, false, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_FETCH_AND_NAND):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_FETCH_AND_NAND_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_FETCH_AND_NAND_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, NOT, false, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_ADD_AND_FETCH):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_ADD_AND_FETCH_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_ADD_AND_FETCH_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, PLUS, true, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_SUB_AND_FETCH):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_SUB_AND_FETCH_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_SUB_AND_FETCH_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, MINUS, true, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_OR_AND_FETCH):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_OR_AND_FETCH_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_OR_AND_FETCH_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, IOR, true, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_AND_AND_FETCH):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_AND_AND_FETCH_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_AND_AND_FETCH_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, AND, true, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_XOR_AND_FETCH):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_XOR_AND_FETCH_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_XOR_AND_FETCH_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, XOR, true, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_NAND_AND_FETCH):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_NAND_AND_FETCH_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_NAND_AND_FETCH_N, fcode);
       target = expand_builtin_sync_operation (mode, exp, NOT, true, target);
       if (target)
 	return target;
@@ -8696,30 +8727,28 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
 	mode = TYPE_MODE (boolean_type_node);
       if (!target || !register_operand (target, mode))
 	target = gen_reg_rtx (mode);
-      mode = get_builtin_sync_mode 
-				(fcode - BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_BOOL_COMPARE_AND_SWAP_N, fcode);
       target = expand_builtin_compare_and_swap (mode, exp, true, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP):
-      mode = get_builtin_sync_mode 
-				(fcode - BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N, fcode);
       target = expand_builtin_compare_and_swap (mode, exp, false, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_SYNC_LOCK_TEST_AND_SET):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_LOCK_TEST_AND_SET_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_LOCK_TEST_AND_SET_N, fcode);
       target = expand_builtin_sync_lock_test_and_set (mode, exp, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_NONCAP_N (BUILT_IN_SYNC_LOCK_RELEASE):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_SYNC_LOCK_RELEASE_1);
+      mode = builtin_sync_mode (BUILT_IN_SYNC_LOCK_RELEASE_N, fcode);
       expand_builtin_sync_lock_release (mode, exp);
       return const0_rtx;
 
@@ -8728,7 +8757,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       return const0_rtx;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_EXCHANGE):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_EXCHANGE_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_EXCHANGE_N, fcode);
       target = expand_builtin_atomic_exchange (mode, exp, target);
       if (target)
 	return target;
@@ -8738,8 +8767,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       {
 	unsigned int nargs, z;
 	vec<tree, va_gc> *vec;
-	mode = get_builtin_sync_mode (fcode
-				      - BUILT_IN_ATOMIC_COMPARE_EXCHANGE_1);
+	mode = builtin_sync_mode (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N, fcode);
 	target = expand_builtin_atomic_compare_exchange (mode, exp, target);
 	if (target)
 	  return target;
@@ -8758,14 +8786,14 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       }
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_LOAD):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_LOAD_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_LOAD_N, fcode);
       target = expand_builtin_atomic_load (mode, exp, target);
       if (target)
 	return target;
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_STORE):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_STORE_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_STORE_N, fcode);
       target = expand_builtin_atomic_store (mode, exp);
       if (target)
 	return const0_rtx;
@@ -8773,7 +8801,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_ADD_FETCH):
       {
-	mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1);
+	mode = builtin_sync_mode (BUILT_IN_ATOMIC_ADD_FETCH_N, fcode);
 	enum built_in_function lib;
 	lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_ADD_1 + 
 				       (fcode - BUILT_IN_ATOMIC_ADD_FETCH_1));
@@ -8786,7 +8814,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_SUB_FETCH):
       {
 	enum built_in_function lib;
-	mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1);
+	mode = builtin_sync_mode (BUILT_IN_ATOMIC_SUB_FETCH_N, fcode);
 	lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_SUB_1 + 
 				       (fcode - BUILT_IN_ATOMIC_SUB_FETCH_1));
 	target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, true,
@@ -8798,7 +8826,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_AND_FETCH):
       {
 	enum built_in_function lib;
-	mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_AND_FETCH_1);
+	mode = builtin_sync_mode (BUILT_IN_ATOMIC_AND_FETCH_N, fcode);
 	lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_AND_1 + 
 				       (fcode - BUILT_IN_ATOMIC_AND_FETCH_1));
 	target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, true,
@@ -8810,7 +8838,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_NAND_FETCH):
       {
 	enum built_in_function lib;
-	mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1);
+	mode = builtin_sync_mode (BUILT_IN_ATOMIC_NAND_FETCH_N, fcode);
 	lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_NAND_1 + 
 				       (fcode - BUILT_IN_ATOMIC_NAND_FETCH_1));
 	target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, true,
@@ -8822,7 +8850,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_XOR_FETCH):
       {
 	enum built_in_function lib;
-	mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1);
+	mode = builtin_sync_mode (BUILT_IN_ATOMIC_XOR_FETCH_N, fcode);
 	lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_XOR_1 + 
 				       (fcode - BUILT_IN_ATOMIC_XOR_FETCH_1));
 	target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, true,
@@ -8834,7 +8862,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_OR_FETCH):
       {
 	enum built_in_function lib;
-	mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_OR_FETCH_1);
+	mode = builtin_sync_mode (BUILT_IN_ATOMIC_OR_FETCH_N, fcode);
 	lib = (enum built_in_function)((int)BUILT_IN_ATOMIC_FETCH_OR_1 + 
 				       (fcode - BUILT_IN_ATOMIC_OR_FETCH_1));
 	target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, true,
@@ -8844,7 +8872,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
 	break;
       }
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_FETCH_ADD):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_ADD_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_FETCH_ADD_N, fcode);
       target = expand_builtin_atomic_fetch_op (mode, exp, target, PLUS, false,
 					       ignore, BUILT_IN_NONE);
       if (target)
@@ -8852,7 +8880,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_FETCH_SUB):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_SUB_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_FETCH_SUB_N, fcode);
       target = expand_builtin_atomic_fetch_op (mode, exp, target, MINUS, false,
 					       ignore, BUILT_IN_NONE);
       if (target)
@@ -8860,7 +8888,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       break;
 
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_FETCH_AND):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_AND_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_FETCH_AND_N, fcode);
       target = expand_builtin_atomic_fetch_op (mode, exp, target, AND, false,
 					       ignore, BUILT_IN_NONE);
       if (target)
@@ -8868,7 +8896,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       break;
   
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_FETCH_NAND):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_NAND_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_FETCH_NAND_N, fcode);
       target = expand_builtin_atomic_fetch_op (mode, exp, target, NOT, false,
 					       ignore, BUILT_IN_NONE);
       if (target)
@@ -8876,7 +8904,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       break;
  
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_FETCH_XOR):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_XOR_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_FETCH_XOR_N, fcode);
       target = expand_builtin_atomic_fetch_op (mode, exp, target, XOR, false,
 					       ignore, BUILT_IN_NONE);
       if (target)
@@ -8884,7 +8912,7 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
       break;
  
     CASE_SYNC_BUILTIN_ALL_N (BUILT_IN_ATOMIC_FETCH_OR):
-      mode = get_builtin_sync_mode (fcode - BUILT_IN_ATOMIC_FETCH_OR_1);
+      mode = builtin_sync_mode (BUILT_IN_ATOMIC_FETCH_OR_N, fcode);
       target = expand_builtin_atomic_fetch_op (mode, exp, target, IOR, false,
 					       ignore, BUILT_IN_NONE);
       if (target)
diff --git a/gcc/builtins.h b/gcc/builtins.h
index 8b812ceb2c4..1258d287778 100644
--- a/gcc/builtins.h
+++ b/gcc/builtins.h
@@ -187,4 +187,17 @@ struct access_data
 extern bool check_access (tree, tree, tree, tree, tree, tree, tree,
 			  bool = true, const access_data * = NULL);
 
+/* Enumerates the data-size-specific suffix of a group of sync-builtins.def
+   functions.  See the comments in that file for more details.  */
+enum sync_dsize {
+  SYNC_I1 = 1,
+  SYNC_I2 = 2,
+  SYNC_I4 = 4,
+  SYNC_I8 = 8,
+  SYNC_I16 = 16,
+  SYNC_ICAP = 0xcafe
+};
+extern built_in_function builtin_sync_code (built_in_function, int);
+extern sync_dsize builtin_sync_dsize (built_in_function, built_in_function);
+
 #endif /* GCC_BUILTINS_H */
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 98ae22fd00e..7b9680e7524 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -51,6 +51,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "c-spellcheck.h"
 #include "selftest.h"
 #include "debug.h"
+#include "builtins.h"
 
 cpp_reader *parse_in;		/* Declared in c-pragma.h.  */
 
@@ -6951,15 +6952,15 @@ speculation_safe_value_resolve_return (tree first_param, tree result)
   return result;
 }
 
-/* A helper function for resolve_overloaded_builtin in resolving the
-   overloaded __sync_ builtins.  Returns a positive power of 2 if the
-   first operand of PARAMS is a pointer to a supported data type.
-   Returns 0 if an error is encountered.
-   FETCH is true when FUNCTION is one of the _FETCH_OP_ or _OP_FETCH_
-   built-ins.  */
+/* A function to help resolve_overloaded_builtin resolve overloaded
+   __sync_ builtin ORIG_FCODE.  Return the non-overloaded function if
+   the first operand of PARAMS is a pointer to a supported data type.
+   Return END_BUILTINS if an error is encountered.  FETCH is true when
+   FUNCTION is one of the _FETCH_OP_ or _OP_FETCH_ built-ins.  */
 
-static int
-sync_resolve_size (tree function, vec<tree, va_gc> *params, bool fetch)
+static built_in_function
+sync_resolve_function (tree function, built_in_function orig_fcode,
+		       vec<tree, va_gc> *params, bool fetch)
 {
   /* Type of the argument.  */
   tree argtype;
@@ -6968,7 +6969,6 @@ sync_resolve_size (tree function, vec<tree, va_gc> *params, bool fetch)
   int size;
 
   argtype = type = TREE_TYPE ((*params)[0]);
-  gcc_assert (!capability_type_p (TREE_TYPE (type)));
 
   if (TREE_CODE (type) == ARRAY_TYPE && c_dialect_cxx ())
     {
@@ -6991,9 +6991,12 @@ sync_resolve_size (tree function, vec<tree, va_gc> *params, bool fetch)
   if (fetch && TREE_CODE (type) == BOOLEAN_TYPE)
     goto incompatible;
 
+  if (capability_type_p (type))
+    return builtin_sync_code (orig_fcode, SYNC_ICAP);
+
   size = tree_to_uhwi (TYPE_SIZE_UNIT (type));
   if (size == 1 || size == 2 || size == 4 || size == 8 || size == 16)
-    return size;
+    return builtin_sync_code (orig_fcode, size);
 
  incompatible:
   /* Issue the diagnostic only if the argument is valid, otherwise
@@ -7001,7 +7004,7 @@ sync_resolve_size (tree function, vec<tree, va_gc> *params, bool fetch)
   if (argtype != error_mark_node)
     error ("operand type %qT is incompatible with argument %d of %qE",
 	   argtype, 1, function);
-  return 0;
+  return END_BUILTINS;
 }
 
 /* A helper function for resolve_overloaded_builtin.  Adds casts to
@@ -7687,20 +7690,12 @@ resolve_atomic_fncode_n (tree function, vec<tree, va_gc> *params,
       error ("too few arguments to function %qE", function);
       return error_mark_node;
     }
-   else if ((*params)[0] == error_mark_node)
-    return error_mark_node;
-  else if (capability_type_p (TYPE_MAIN_VARIANT (TREE_TYPE
-						 (TREE_TYPE ((*params)[0])))))
-    new_code_bt = (enum built_in_function) ((int) orig_code
-						+ CAPABILITY_BUILTIN_FCODE_DIFF
-						+ 1);
   else
     {
-      int n = sync_resolve_size (function, params, fetch_op);
-      if (n == 0)
+      new_code_bt = sync_resolve_function (function, orig_code, params,
+					   fetch_op);
+      if (new_code_bt == END_BUILTINS)
 	return error_mark_node;
-      new_code_bt = (enum built_in_function)((int) orig_code
-					      + exact_log2 (n) + 1);
     }
   return builtin_decl_explicit (new_code_bt);
 }
diff --git a/gcc/cp/decl2.c b/gcc/cp/decl2.c
index 2d68a213b6c..021b1a8ca5b 100644
--- a/gcc/cp/decl2.c
+++ b/gcc/cp/decl2.c
@@ -48,6 +48,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "intl.h"
 #include "c-family/c-ada-spec.h"
 #include "asan.h"
+#include "builtins.h"
 
 /* Id for dumping the raw trees.  */
 int raw_dump_id;
@@ -3326,12 +3327,10 @@ build_atomic_load_byte (tree src, HOST_WIDE_INT model)
   tree mem_model = build_int_cst (integer_type_node, model);
   tree t, addr, val;
   unsigned int size;
-  int fncode;
 
   size = tree_to_uhwi (TYPE_SIZE_UNIT (char_type_node));
 
-  fncode = BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1;
-  t = builtin_decl_implicit ((enum built_in_function) fncode);
+  t = builtin_decl_implicit (builtin_sync_code (BUILT_IN_ATOMIC_LOAD_N, size));
 
   addr = build_addr_expr (ptr_type, src);
   val = build_call_expr (t, 2, addr, mem_model);
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index 2483f016d8e..2cc2b18dd37 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -40,6 +40,8 @@ along with GCC; see the file COPYING3.  If not see
 #include "trans-types.h"
 #include "trans-array.h"
 #include "dependency.h"	/* For CAF array alias analysis.  */
+#include "builtins.h"
+
 /* Only for gfc_trans_assign and gfc_trans_pointer_assign.  */
 
 /* This maps Fortran intrinsic math functions to external library or GCC
@@ -11358,9 +11360,7 @@ conv_intrinsic_atomic_op (gfc_code *code)
     }
 
   tmp = TREE_TYPE (TREE_TYPE (atom));
-  fn = (built_in_function) ((int) fn
-			    + exact_log2 (tree_to_uhwi (TYPE_SIZE_UNIT (tmp)))
-			    + 1);
+  fn = builtin_sync_code (fn, tree_to_uhwi (TYPE_SIZE_UNIT (tmp)));
   tree itype = TREE_TYPE (TREE_TYPE (atom));
   tmp = builtin_decl_explicit (fn);
 
@@ -11482,9 +11482,8 @@ conv_intrinsic_atomic_ref (gfc_code *code)
     }
 
   tmp = TREE_TYPE (TREE_TYPE (atom));
-  fn = (built_in_function) ((int) BUILT_IN_ATOMIC_LOAD_N
-			    + exact_log2 (tree_to_uhwi (TYPE_SIZE_UNIT (tmp)))
-			    + 1);
+  fn = builtin_sync_code (BUILT_IN_ATOMIC_LOAD_N,
+			  tree_to_uhwi (TYPE_SIZE_UNIT (tmp)));
   tmp = builtin_decl_explicit (fn);
   tmp = build_call_expr_loc (input_location, tmp, 2, atom,
 			     build_int_cst (integer_type_node,
@@ -11608,9 +11607,8 @@ conv_intrinsic_atomic_cas (gfc_code *code)
     }
 
   tmp = TREE_TYPE (TREE_TYPE (atom));
-  fn = (built_in_function) ((int) BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N
-			    + exact_log2 (tree_to_uhwi (TYPE_SIZE_UNIT (tmp)))
-			    + 1);
+  fn = builtin_sync_code (BUILT_IN_ATOMIC_COMPARE_EXCHANGE_N,
+			  tree_to_uhwi (TYPE_SIZE_UNIT (tmp)));
   tmp = builtin_decl_explicit (fn);
 
   gfc_add_modify (&block, old, comp);
diff --git a/gcc/omp-expand.c b/gcc/omp-expand.c
index dd606665f93..b33d55b8d19 100644
--- a/gcc/omp-expand.c
+++ b/gcc/omp-expand.c
@@ -59,6 +59,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "stringpool.h"
 #include "attribs.h"
 #include "tree-eh.h"
+#include "builtins.h"
 
 /* OMP region information.  Every parallel and workshare
    directive is enclosed between two markers, the OMP_* directive
@@ -8132,7 +8133,7 @@ omp_memory_order_to_memmodel (enum omp_memory_order mo)
 
 static bool
 expand_omp_atomic_load (basic_block load_bb, tree addr,
-			tree loaded_val, int index)
+			tree loaded_val, int nbytes)
 {
   enum built_in_function tmpbase;
   gimple_stmt_iterator gsi;
@@ -8150,7 +8151,7 @@ expand_omp_atomic_load (basic_block load_bb, tree addr,
      is smaller than word size, then expand_atomic_load assumes that the load
      is atomic.  We could avoid the builtin entirely in this case.  */
 
-  tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
+  tmpbase = builtin_sync_code (BUILT_IN_ATOMIC_LOAD_N, nbytes);
   decl = builtin_decl_explicit (tmpbase);
   if (decl == NULL_TREE)
     return false;
@@ -8184,7 +8185,7 @@ expand_omp_atomic_load (basic_block load_bb, tree addr,
 
 static bool
 expand_omp_atomic_store (basic_block load_bb, tree addr,
-			 tree loaded_val, tree stored_val, int index)
+			 tree loaded_val, tree stored_val, int nbytes)
 {
   enum built_in_function tmpbase;
   gimple_stmt_iterator gsi;
@@ -8212,7 +8213,7 @@ expand_omp_atomic_store (basic_block load_bb, tree addr,
      is atomic.  We could avoid the builtin entirely in this case.  */
 
   tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
-  tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
+  tmpbase = builtin_sync_code (tmpbase, nbytes);
   decl = builtin_decl_explicit (tmpbase);
   if (decl == NULL_TREE)
     return false;
@@ -8255,14 +8256,14 @@ expand_omp_atomic_store (basic_block load_bb, tree addr,
 }
 
 /* A subroutine of expand_omp_atomic.  Attempt to implement the atomic
-   operation as a __atomic_fetch_op builtin.  INDEX is log2 of the
-   size of the data type, and thus usable to find the index of the builtin
-   decl.  Returns false if the expression is not of the proper form.  */
+   operation as a __atomic_fetch_op builtin.  NBYTES the size of the
+   data type in bytes.  Returns false if the expression is not of the
+   proper form.  */
 
 static bool
 expand_omp_atomic_fetch_op (basic_block load_bb,
 			    tree addr, tree loaded_val,
-			    tree stored_val, int index)
+			    tree stored_val, int nbytes)
 {
   enum built_in_function oldbase, newbase, tmpbase;
   tree decl, itype, call;
@@ -8352,8 +8353,7 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
   else
     return false;
 
-  tmpbase = ((enum built_in_function)
-	     ((need_new ? newbase : oldbase) + index + 1));
+  tmpbase = builtin_sync_code (need_new ? newbase : oldbase, nbytes);
   decl = builtin_decl_explicit (tmpbase);
   if (decl == NULL_TREE)
     return false;
@@ -8413,13 +8413,12 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
 	if (oldval != newval)
 	  goto repeat;
 
-   INDEX is log2 of the size of the data type, and thus usable to find the
-   index of the builtin decl.  */
+   NBYTES is the size of the data type in bytes.  */
 
 static bool
 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
 			    tree addr, tree loaded_val, tree stored_val,
-			    int index)
+			    int nbytes)
 {
   tree loadedi, storedi, initial, new_storedi, old_vali;
   tree type, itype, cmpxchg, iaddr, atype;
@@ -8431,8 +8430,7 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
 
   /* ??? We need a non-pointer interface to __atomic_compare_exchange in
      order to use the RELAXED memory model effectively.  */
-  fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
-				    + index + 1);
+  fncode = builtin_sync_code (BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N, nbytes);
   cmpxchg = builtin_decl_explicit (fncode);
   if (cmpxchg == NULL_TREE)
     return false;
@@ -8474,7 +8472,7 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
       loadedi = loaded_val;
     }
 
-  fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
+  fncode = builtin_sync_code (BUILT_IN_ATOMIC_LOAD_N, nbytes);
   tree loaddecl = builtin_decl_explicit (fncode);
   if (loaddecl)
     initial
@@ -8686,8 +8684,8 @@ expand_omp_atomic (struct omp_region *region)
   HOST_WIDE_INT index;
 
   /* Make sure the type is one of the supported sizes.  */
-  index = tree_to_uhwi (TYPE_SIZE_UNIT (type));
-  index = exact_log2 (index);
+  auto nbytes = tree_to_uhwi (TYPE_SIZE_UNIT (type));
+  index = exact_log2 (nbytes);
   if (index >= 0 && index <= 4)
     {
       unsigned int align = TYPE_ALIGN_UNIT (type);
@@ -8701,7 +8699,7 @@ expand_omp_atomic (struct omp_region *region)
 	      && (is_int_mode (TYPE_MODE (type), &smode)
 		  || is_float_mode (TYPE_MODE (type), &smode))
 	      && GET_MODE_BITSIZE (smode) <= BITS_PER_WORD
-	      && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
+	      && expand_omp_atomic_load (load_bb, addr, loaded_val, nbytes))
 	    return;
 
 	  /* Atomic store.  */
@@ -8711,20 +8709,20 @@ expand_omp_atomic (struct omp_region *region)
 	      && store_bb == single_succ (load_bb)
 	      && first_stmt (store_bb) == store
 	      && expand_omp_atomic_store (load_bb, addr, loaded_val,
-					  stored_val, index))
+					  stored_val, nbytes))
 	    return;
 
 	  /* When possible, use specialized atomic update functions.  */
 	  if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
 	      && store_bb == single_succ (load_bb)
 	      && expand_omp_atomic_fetch_op (load_bb, addr,
-					     loaded_val, stored_val, index))
+					     loaded_val, stored_val, nbytes))
 	    return;
 
 	  /* If we don't have specialized __sync builtins, try and implement
 	     as a compare and swap loop.  */
 	  if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
-					  loaded_val, stored_val, index))
+					  loaded_val, stored_val, nbytes))
 	    return;
 	}
     }
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 9ea47809add..cfd565a1b98 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -208,12 +208,6 @@ enum combined_fn {
   CFN_LAST
 };
 
-/* This is a constant for how many BUILT_IN definitions exist between a
-   BUILT_IN_x_1 and a BUILT_IN_x_CAPABILITY. All _CAPABILITY BUILT_INs must
-   be defined this way.  Currently this resolves to 5.  */
-#define CAPABILITY_BUILTIN_FCODE_DIFF BUILT_IN_ATOMIC_LOAD_CAPABILITY \
-				- BUILT_IN_ATOMIC_LOAD_1
-
 /* Tree code classes.  Each tree_code has an associated code class
    represented by a TREE_CODE_CLASS.  */
 enum tree_code_class {


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-05-06 14:43 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-06 14:43 [gcc(refs/vendors/ARM/heads/morello)] builtins: Add helper functions for sync builtins Matthew Malcomson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).