public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r13-111] tree-optimization/104658 - avoid mixing mask & non-mask vector defs
@ 2022-05-04 13:12 Richard Biener
  0 siblings, 0 replies; only message in thread
From: Richard Biener @ 2022-05-04 13:12 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:eca04dc8555f5fae462fbd16386da9aaf38a0711

commit r13-111-geca04dc8555f5fae462fbd16386da9aaf38a0711
Author: Richard Biener <rguenther@suse.de>
Date:   Tue Feb 22 16:02:27 2022 +0100

    tree-optimization/104658 - avoid mixing mask & non-mask vector defs
    
    When pattern recognition fails to sanitize all defs of a mask
    producing operation and the respective def is external or constant
    we end up trying to produce a VECTOR_BOOLEAN_TYPE_P constructor
    which in turn ends up exposing stmts like
    
      <signed-boolean:1> _135 = _49 ? -1 : 0;
    
    which isn't handled well in followup SLP and generates awful code.
    
    We do rely heavily on pattern recognition to sanitize mask vs.
    data uses of bools but that fails here which means we also should
    fail vectorization.  That avoids ICEing because of such stmts
    and it also avoids generating weird code which makes the
    vectorization not profitable.
    
    The following patch simply disallows external VECTOR_BOOLEAN_TYPE_P
    defs and arranges the promote to external code to instead promote
    mask uses to extern (that's just a short-cut here).
    
    I've also looked at aarch64 and with SVE and a fixed vector length
    for the gcc.target/i386/pr101636.c testcase.  I see similar vectorization
    (using <signed-boolean:4>) there but it's hard to decide whether the
    old, the new or no vectorization is better for this.  The code
    generated with traditional integer masks isn't as awkward but we
    still get the != 0 promotion done for each scalar element which
    doesn't look like intended - this operation should be visible upfront.
    
    That also means some cases will now become a missed optimization
    that needs to be fixed by bool pattern recognition.  But that can
    possibly be delayed to GCC 13.
    
    2022-02-22  Richard Biener  <rguenther@suse.de>
    
            PR tree-optimization/104658
            * tree-vect-slp.cc (vect_slp_convert_to_external): Do not
            create VECTOR_BOOLEAN_TYPE_P extern defs.  Reset the vector
            type on nodes we promote.
            (vectorizable_bb_reduc_epilogue): Deal with externalized
            root.
            * tree-vect-stmts.cc (vect_maybe_update_slp_op_vectype): Do
            not allow VECTOR_BOOLEAN_TYPE_P extern defs.
    
            * gcc.target/i386/pr104658.c: New testcase.

Diff:
---
 gcc/testsuite/gcc.target/i386/pr104658.c | 113 +++++++++++++++++++++++++++++++
 gcc/tree-vect-slp.cc                     |   9 ++-
 gcc/tree-vect-stmts.cc                   |   5 ++
 3 files changed, 125 insertions(+), 2 deletions(-)

diff --git a/gcc/testsuite/gcc.target/i386/pr104658.c b/gcc/testsuite/gcc.target/i386/pr104658.c
new file mode 100644
index 00000000000..2b8d02aacab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr104658.c
@@ -0,0 +1,113 @@
+/* { dg-do compile } */
+/* { dg-options "-O -fgimple -ftree-slp-vectorize -mavx512f -fdump-tree-slp2" } */
+
+void __GIMPLE (ssa,guessed_local(118111600))
+bar (int * restrict a, int * restrict e,
+     _Bool d0, _Bool d1, _Bool d2, _Bool d3, _Bool d4, _Bool d5, _Bool d6, _Bool d7)
+{
+  int _1;
+  int _4;
+  int _6;
+  int _8;
+  int _10;
+  int _12;
+  int _14;
+  int _16;
+  int _27;
+  _Bool _37;
+  _Bool _39;
+  _Bool _41;
+  int _43;
+  _Bool _45;
+  _Bool _47;
+  _Bool _49;
+  _Bool _53;
+  _Bool _54;
+  _Bool _55;
+  int _56;
+  _Bool _57;
+  _Bool _58;
+  _Bool _59;
+  int _60;
+  _Bool _61;
+  _Bool _62;
+  _Bool _63;
+  int _64;
+  _Bool _65;
+  _Bool _66;
+  _Bool _67;
+  int _68;
+  _Bool _69;
+  _Bool _70;
+  _Bool _71;
+  int _72;
+  _Bool _73;
+  _Bool _74;
+  _Bool _75;
+  int _76;
+
+  __BB(2,guessed_local(118111600)):
+  _73 = d0_2(D);
+  _69 = d1_5(D);
+  _65 = d2_7(D);
+  _61 = d3_9(D);
+  _57 = d4_11(D);
+  _53 = d5_13(D);
+  _41 = d6_15(D);
+  _49 = d7_17(D);
+  a_81 = a_22(D);
+  e_82 = e_23(D);
+  _1 = __MEM <int> (a_81 + _Literal (int * restrict) 32);
+  _4 = __MEM <int> (a_81 + _Literal (int * restrict) 36);
+  _6 = __MEM <int> (a_81);
+  _8 = __MEM <int> (a_81 + _Literal (int * restrict) 4);
+  _10 = __MEM <int> (a_81 + _Literal (int * restrict) 48);
+  _12 = __MEM <int> (a_81 + _Literal (int * restrict) 52);
+  _14 = __MEM <int> (a_81 + _Literal (int * restrict) 16);
+  _16 = __MEM <int> (a_81 + _Literal (int * restrict) 60);
+  _74 = _1 != 0;
+  _75 = _73 & _74;
+  _76 = _75 ? _1 : 0;
+  __MEM <int> (e_82) = _76;
+  __MEM <int> (e_82 + _Literal (int * restrict) 4) = _76;
+  __MEM <int> (e_82 + _Literal (int * restrict) 8) = _76;
+  __MEM <int> (e_82 + _Literal (int * restrict) 12) = _76;
+  __MEM <int> (e_82 + _Literal (int * restrict) 16) = _76;
+  __MEM <int> (e_82 + _Literal (int * restrict) 20) = _76;
+  __MEM <int> (e_82 + _Literal (int * restrict) 24) = _76;
+  __MEM <int> (e_82 + _Literal (int * restrict) 28) = _76;
+  __MEM <int> (e_82 + _Literal (int * restrict) 32) = _76;
+  _70 = _4 != 0;
+  _71 = _69 & _70;
+  _72 = _71 ? _4 : 0;
+  __MEM <int> (e_82 + _Literal (int * restrict) 36) = _72;
+  _66 = _6 != 0;
+  _67 = _65 & _66;
+  _68 = _67 ? _6 : 0;
+  __MEM <int> (e_82 + _Literal (int * restrict) 40) = _68;
+  _62 = _8 != 0;
+  _63 = _61 & _62;
+  _64 = _63 ? _8 : 0;
+  __MEM <int> (e_82 + _Literal (int * restrict) 44) = _64;
+  _58 = _10 != 0;
+  _59 = _57 & _58;
+  _60 = _59 ? _10 : 0;
+  __MEM <int> (e_82 + _Literal (int * restrict) 48) = _60;
+  _54 = _12 != 0;
+  _55 = _53 & _54;
+  _56 = _55 ? _12 : 0;
+  __MEM <int> (e_82 + _Literal (int * restrict) 52) = _56;
+  _39 = _14 != 0;
+  _37 = _39 & _41;
+  _27 = _37 ? _14 : 0;
+  __MEM <int> (e_82 + _Literal (int * restrict) 56) = _27;
+  _47 = _16 != 0;
+  _45 = _47 & _49;
+  _43 = _45 ? _16 : 0;
+  __MEM <int> (e_82 + _Literal (int * restrict) 60) = _43;
+  return;
+
+}
+
+/* We do not want a AVX512 mask CTOR built from converted _Bool.  */
+/* { dg-final { scan-tree-dump-not " = \\(<signed-boolean:1>\\) " "slp2" } } */
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 2685bc10347..cdfff1ab9f6 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -4532,7 +4532,9 @@ vect_slp_convert_to_external (vec_info *vinfo, slp_tree node,
   if (!is_a <bb_vec_info> (vinfo)
       || node == SLP_INSTANCE_TREE (node_instance)
       || !SLP_TREE_SCALAR_STMTS (node).exists ()
-      || vect_contains_pattern_stmt_p (SLP_TREE_SCALAR_STMTS (node)))
+      || vect_contains_pattern_stmt_p (SLP_TREE_SCALAR_STMTS (node))
+      /* Force the mask use to be built from scalars instead.  */
+      || VECTOR_BOOLEAN_TYPE_P (SLP_TREE_VECTYPE (node)))
     return false;
 
   if (dump_enabled_p ())
@@ -4544,6 +4546,8 @@ vect_slp_convert_to_external (vec_info *vinfo, slp_tree node,
      (need to) ignore child nodes of anything that isn't vect_internal_def.  */
   unsigned int group_size = SLP_TREE_LANES (node);
   SLP_TREE_DEF_TYPE (node) = vect_external_def;
+  /* Invariants get their vector type from the uses.  */
+  SLP_TREE_VECTYPE (node) = NULL_TREE;
   SLP_TREE_SCALAR_OPS (node).safe_grow (group_size, true);
   SLP_TREE_LOAD_PERMUTATION (node).release ();
   FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt_info)
@@ -4878,7 +4882,8 @@ vectorizable_bb_reduc_epilogue (slp_instance instance,
     reduc_code = PLUS_EXPR;
   internal_fn reduc_fn;
   tree vectype = SLP_TREE_VECTYPE (SLP_INSTANCE_TREE (instance));
-  if (!reduction_fn_for_scalar_code (reduc_code, &reduc_fn)
+  if (!vectype
+      || !reduction_fn_for_scalar_code (reduc_code, &reduc_fn)
       || reduc_fn == IFN_LAST
       || !direct_internal_fn_supported_p (reduc_fn, vectype, OPTIMIZE_FOR_BOTH)
       || !useless_type_conversion_p (TREE_TYPE (gimple_assign_lhs (stmt)),
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index d8da13e312a..8327e9d047e 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -11891,6 +11891,11 @@ vect_maybe_update_slp_op_vectype (slp_tree op, tree vectype)
     return true;
   if (SLP_TREE_VECTYPE (op))
     return types_compatible_p (SLP_TREE_VECTYPE (op), vectype);
+  /* For external defs refuse to produce VECTOR_BOOLEAN_TYPE_P, those
+     should be handled by patters.  Allow vect_constant_def for now.  */
+  if (VECTOR_BOOLEAN_TYPE_P (vectype)
+      && SLP_TREE_DEF_TYPE (op) == vect_external_def)
+    return false;
   SLP_TREE_VECTYPE (op) = vectype;
   return true;
 }


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-05-04 13:12 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-04 13:12 [gcc r13-111] tree-optimization/104658 - avoid mixing mask & non-mask vector defs Richard Biener

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).