public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH] vect: Fix single def-use cycle for ifn reductions [PR108608]
@ 2023-01-31 13:06 Richard Sandiford
  2023-01-31 14:53 ` Richard Biener
  0 siblings, 1 reply; 2+ messages in thread
From: Richard Sandiford @ 2023-01-31 13:06 UTC (permalink / raw)
  To: gcc-patches

The patch that added support for fmin/fmax reductions didn't
handle single def-use cycles.  In some ways, this seems like
going out of our way to make things slower, but that's a
discussion for another day.

Tested on aarch64-linux-gnu & x86_64-linux-gnu.  OK for trunk
and the GCC 12 branch?

Richard


gcc/
	PR tree-optimization/108608
	* tree-vect-loop.cc (vect_transform_reduction): Handle single
	def-use cycles that involve function calls rather than tree codes.

gcc/testsuite/
	PR tree-optimization/108608
	* gcc.dg/vect/pr108608.c: New test.
	* gcc.target/aarch64/sve/pr108608-1.c: Likewise.
---
 gcc/testsuite/gcc.dg/vect/pr108608.c          | 24 +++++++++++++++++++
 .../gcc.target/aarch64/sve/pr108608-1.c       |  9 +++++++
 gcc/tree-vect-loop.cc                         | 22 ++++++++++-------
 3 files changed, 46 insertions(+), 9 deletions(-)
 create mode 100644 gcc/testsuite/gcc.dg/vect/pr108608.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/pr108608-1.c

diff --git a/gcc/testsuite/gcc.dg/vect/pr108608.c b/gcc/testsuite/gcc.dg/vect/pr108608.c
new file mode 100644
index 00000000000..e968141ba03
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr108608.c
@@ -0,0 +1,24 @@
+#include "tree-vect.h"
+
+double __attribute__((noipa))
+foo (double m, float *ptr)
+{
+  for (int i = 0; i < 256; i++)
+    m = __builtin_fmax (m, ptr[i]);
+  return m;
+}
+
+int
+main (void)
+{
+  check_vect ();
+  float ptr[256];
+  for (int j = 0; j < 16; ++j)
+    {
+      for (int i = 0; i < 256; ++i)
+	ptr[i] = i == 128 + j ? 2 + j : i == 161 ? 1 : 0;
+      if (foo (0, ptr) != 2 + j)
+	__builtin_abort ();
+    }
+  return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pr108608-1.c b/gcc/testsuite/gcc.target/aarch64/sve/pr108608-1.c
new file mode 100644
index 00000000000..0a7d485e047
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pr108608-1.c
@@ -0,0 +1,9 @@
+/* { dg-options "-O3" } */
+
+double __attribute__((noipa))
+foo (double m, float *ptr)
+{
+  for (int i = 0; i < 256; i++)
+    m = __builtin_fmax (m, ptr[i]);
+  return m;
+}
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index f0801c23671..f03af1efd0f 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -7755,8 +7755,6 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
   gimple_match_op op;
   if (!gimple_extract_op (stmt_info->stmt, &op))
     gcc_unreachable ();
-  gcc_assert (op.code.is_tree_code ());
-  auto code = tree_code (op.code);
 
   /* All uses but the last are expected to be defined in the loop.
      The last use is the reduction variable.  In case of nested cycle this
@@ -7778,7 +7776,8 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
       vec_num = 1;
     }
 
-  internal_fn cond_fn = get_conditional_internal_fn (code);
+  code_helper code = canonicalize_code (op.code, op.type);
+  internal_fn cond_fn = get_conditional_internal_fn (code, op.type);
   vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
   bool mask_by_cond_expr = use_mask_by_cond_expr_p (code, cond_fn, vectype_in);
 
@@ -7802,9 +7801,10 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
   if (reduction_type == FOLD_LEFT_REDUCTION)
     {
       internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
+      gcc_assert (code.is_tree_code ());
       return vectorize_fold_left_reduction
-	  (loop_vinfo, stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
-	   reduc_fn, op.ops, vectype_in, reduc_index, masks);
+	  (loop_vinfo, stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi,
+	   tree_code (code), reduc_fn, op.ops, vectype_in, reduc_index, masks);
     }
 
   bool single_defuse_cycle = STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info);
@@ -7814,7 +7814,7 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
 	      || code == SAD_EXPR);
 
   /* Create the destination vector  */
-  tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
+  tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
   tree vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
 
   vect_get_vec_defs (loop_vinfo, stmt_info, slp_node, ncopies,
@@ -7849,7 +7849,7 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
 	  /* Make sure that the reduction accumulator is vop[0].  */
 	  if (reduc_index == 1)
 	    {
-	      gcc_assert (commutative_tree_code (code));
+	      gcc_assert (commutative_binary_op_p (code, op.type));
 	      std::swap (vop[0], vop[1]);
 	    }
 	  tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
@@ -7877,11 +7877,15 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
 	  if (emulated_mixed_dot_prod)
 	    new_stmt = vect_emulate_mixed_dot_prod (loop_vinfo, stmt_info, gsi,
 						    vec_dest, vop);
+	  else if (code.is_internal_fn ())
+	    new_stmt = gimple_build_call_internal (internal_fn (code),
+						   op.num_ops,
+						   vop[0], vop[1], vop[2]);
 	  else
-	    new_stmt = gimple_build_assign (vec_dest, code,
+	    new_stmt = gimple_build_assign (vec_dest, tree_code (op.code),
 					    vop[0], vop[1], vop[2]);
 	  new_temp = make_ssa_name (vec_dest, new_stmt);
-	  gimple_assign_set_lhs (new_stmt, new_temp);
+	  gimple_set_lhs (new_stmt, new_temp);
 	  vect_finish_stmt_generation (loop_vinfo, stmt_info, new_stmt, gsi);
 	}
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] vect: Fix single def-use cycle for ifn reductions [PR108608]
  2023-01-31 13:06 [PATCH] vect: Fix single def-use cycle for ifn reductions [PR108608] Richard Sandiford
@ 2023-01-31 14:53 ` Richard Biener
  0 siblings, 0 replies; 2+ messages in thread
From: Richard Biener @ 2023-01-31 14:53 UTC (permalink / raw)
  To: Richard Sandiford, gcc-patches

On Tue, Jan 31, 2023 at 2:08 PM Richard Sandiford via Gcc-patches
<gcc-patches@gcc.gnu.org> wrote:
>
> The patch that added support for fmin/fmax reductions didn't
> handle single def-use cycles.  In some ways, this seems like
> going out of our way to make things slower, but that's a
> discussion for another day.
>
> Tested on aarch64-linux-gnu & x86_64-linux-gnu.  OK for trunk
> and the GCC 12 branch?

OK.

Richard.

> Richard
>
>
> gcc/
>         PR tree-optimization/108608
>         * tree-vect-loop.cc (vect_transform_reduction): Handle single
>         def-use cycles that involve function calls rather than tree codes.
>
> gcc/testsuite/
>         PR tree-optimization/108608
>         * gcc.dg/vect/pr108608.c: New test.
>         * gcc.target/aarch64/sve/pr108608-1.c: Likewise.
> ---
>  gcc/testsuite/gcc.dg/vect/pr108608.c          | 24 +++++++++++++++++++
>  .../gcc.target/aarch64/sve/pr108608-1.c       |  9 +++++++
>  gcc/tree-vect-loop.cc                         | 22 ++++++++++-------
>  3 files changed, 46 insertions(+), 9 deletions(-)
>  create mode 100644 gcc/testsuite/gcc.dg/vect/pr108608.c
>  create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/pr108608-1.c
>
> diff --git a/gcc/testsuite/gcc.dg/vect/pr108608.c b/gcc/testsuite/gcc.dg/vect/pr108608.c
> new file mode 100644
> index 00000000000..e968141ba03
> --- /dev/null
> +++ b/gcc/testsuite/gcc.dg/vect/pr108608.c
> @@ -0,0 +1,24 @@
> +#include "tree-vect.h"
> +
> +double __attribute__((noipa))
> +foo (double m, float *ptr)
> +{
> +  for (int i = 0; i < 256; i++)
> +    m = __builtin_fmax (m, ptr[i]);
> +  return m;
> +}
> +
> +int
> +main (void)
> +{
> +  check_vect ();
> +  float ptr[256];
> +  for (int j = 0; j < 16; ++j)
> +    {
> +      for (int i = 0; i < 256; ++i)
> +       ptr[i] = i == 128 + j ? 2 + j : i == 161 ? 1 : 0;
> +      if (foo (0, ptr) != 2 + j)
> +       __builtin_abort ();
> +    }
> +  return 0;
> +}
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pr108608-1.c b/gcc/testsuite/gcc.target/aarch64/sve/pr108608-1.c
> new file mode 100644
> index 00000000000..0a7d485e047
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/sve/pr108608-1.c
> @@ -0,0 +1,9 @@
> +/* { dg-options "-O3" } */
> +
> +double __attribute__((noipa))
> +foo (double m, float *ptr)
> +{
> +  for (int i = 0; i < 256; i++)
> +    m = __builtin_fmax (m, ptr[i]);
> +  return m;
> +}
> diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
> index f0801c23671..f03af1efd0f 100644
> --- a/gcc/tree-vect-loop.cc
> +++ b/gcc/tree-vect-loop.cc
> @@ -7755,8 +7755,6 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
>    gimple_match_op op;
>    if (!gimple_extract_op (stmt_info->stmt, &op))
>      gcc_unreachable ();
> -  gcc_assert (op.code.is_tree_code ());
> -  auto code = tree_code (op.code);
>
>    /* All uses but the last are expected to be defined in the loop.
>       The last use is the reduction variable.  In case of nested cycle this
> @@ -7778,7 +7776,8 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
>        vec_num = 1;
>      }
>
> -  internal_fn cond_fn = get_conditional_internal_fn (code);
> +  code_helper code = canonicalize_code (op.code, op.type);
> +  internal_fn cond_fn = get_conditional_internal_fn (code, op.type);
>    vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
>    bool mask_by_cond_expr = use_mask_by_cond_expr_p (code, cond_fn, vectype_in);
>
> @@ -7802,9 +7801,10 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
>    if (reduction_type == FOLD_LEFT_REDUCTION)
>      {
>        internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
> +      gcc_assert (code.is_tree_code ());
>        return vectorize_fold_left_reduction
> -         (loop_vinfo, stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
> -          reduc_fn, op.ops, vectype_in, reduc_index, masks);
> +         (loop_vinfo, stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi,
> +          tree_code (code), reduc_fn, op.ops, vectype_in, reduc_index, masks);
>      }
>
>    bool single_defuse_cycle = STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info);
> @@ -7814,7 +7814,7 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
>               || code == SAD_EXPR);
>
>    /* Create the destination vector  */
> -  tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
> +  tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
>    tree vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
>
>    vect_get_vec_defs (loop_vinfo, stmt_info, slp_node, ncopies,
> @@ -7849,7 +7849,7 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
>           /* Make sure that the reduction accumulator is vop[0].  */
>           if (reduc_index == 1)
>             {
> -             gcc_assert (commutative_tree_code (code));
> +             gcc_assert (commutative_binary_op_p (code, op.type));
>               std::swap (vop[0], vop[1]);
>             }
>           tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
> @@ -7877,11 +7877,15 @@ vect_transform_reduction (loop_vec_info loop_vinfo,
>           if (emulated_mixed_dot_prod)
>             new_stmt = vect_emulate_mixed_dot_prod (loop_vinfo, stmt_info, gsi,
>                                                     vec_dest, vop);
> +         else if (code.is_internal_fn ())
> +           new_stmt = gimple_build_call_internal (internal_fn (code),
> +                                                  op.num_ops,
> +                                                  vop[0], vop[1], vop[2]);
>           else
> -           new_stmt = gimple_build_assign (vec_dest, code,
> +           new_stmt = gimple_build_assign (vec_dest, tree_code (op.code),
>                                             vop[0], vop[1], vop[2]);
>           new_temp = make_ssa_name (vec_dest, new_stmt);
> -         gimple_assign_set_lhs (new_stmt, new_temp);
> +         gimple_set_lhs (new_stmt, new_temp);
>           vect_finish_stmt_generation (loop_vinfo, stmt_info, new_stmt, gsi);
>         }
>
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-01-31 14:53 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-31 13:06 [PATCH] vect: Fix single def-use cycle for ifn reductions [PR108608] Richard Sandiford
2023-01-31 14:53 ` Richard Biener

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).