public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r12-5490] middle-end: Fix failures with bitclear patterns on signed values
@ 2021-11-24  6:41 Tamar Christina
  0 siblings, 0 replies; only message in thread
From: Tamar Christina @ 2021-11-24  6:41 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:755c2e7d71cbab89b2bd1d787db46428a604efb2

commit r12-5490-g755c2e7d71cbab89b2bd1d787db46428a604efb2
Author: Tamar Christina <tamar.christina@arm.com>
Date:   Wed Nov 24 06:39:05 2021 +0000

    middle-end: Fix failures with bitclear patterns on signed values
    
    During testing after rebasing to commit I noticed a failing testcase with the
    bitmask compare patch.
    
    Consider the following C++ testcase:
    
    #include <compare>
    
    #define A __attribute__((noipa))
    A bool f5 (double i, double j) { auto c = i <=> j; return c >= 0; }
    
    This turns into a comparison against chars, on systems where chars are signed
    the pattern inserts an unsigned convert such that it's able to do the
    transformation.
    
    i.e.:
    
      # RANGE [-1, 2]
      # c$_M_value_22 = PHI <-1(3), 0(2), 2(5), 1(4)>
      # RANGE ~[3, 254]
      _11 = (unsigned char) c$_M_value_22;
      _19 = _11 <= 1;
      # .MEM_24 = VDEF <.MEM_6(D)>
      D.10434 ={v} {CLOBBER};
      # .MEM_14 = VDEF <.MEM_24>
      D.10407 ={v} {CLOBBER};
      # VUSE <.MEM_14>
      return _19;
    
    instead of:
    
      # RANGE [-1, 2]
      # c$_M_value_5 = PHI <-1(3), 0(2), 2(5), 1(4)>
      # RANGE [-2, 2]
      _3 = c$_M_value_5 & -2;
      _19 = _3 == 0;
      # .MEM_24 = VDEF <.MEM_6(D)>
      D.10440 ={v} {CLOBBER};
      # .MEM_14 = VDEF <.MEM_24>
      D.10413 ={v} {CLOBBER};
      # VUSE <.MEM_14>
      return _19;
    
    This causes much worse codegen under -ffast-math due to phiops no longer
    recognizing the pattern.  It turns out that phiopts spaceship_replacement is
    looking for the exact form that was just changed.
    
    The comments seems to suggest this code only checks for (res & ~1) == 0 but the
    implementation seems to suggest it's broader.
    
    As such I added a case to check to see if the value comparison we found is a
    type cast.  and strips away the type cast and continues.
    
    In match.pd the typecasts are only added for signed comparisons to == 0 and != 0
    which are then rewritten into comparisons with 1.
    
    As such I only check for 1 and LE and GT, which is what match.pd would have
    rewritten it to.
    
    This fixes the regression but this is not code I 100% understand, since I don't
    really know the semantics of the spaceship operator so would appreciate an extra
    look.
    
    gcc/ChangeLog:
    
            * tree-ssa-phiopt.c (spaceship_replacement): Handle new canonical
            codegen.

Diff:
---
 gcc/tree-ssa-phiopt.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 86 insertions(+), 8 deletions(-)

diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index 2fa7069f824..3eac9b1ce46 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -2055,11 +2055,36 @@ spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
   gimple *orig_use_stmt = use_stmt;
   tree orig_use_lhs = NULL_TREE;
   int prec = TYPE_PRECISION (TREE_TYPE (phires));
-  if (is_gimple_assign (use_stmt)
-      && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
-      && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
-      && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
-	  == wi::shifted_mask (1, prec - 1, false, prec)))
+  bool is_cast = false;
+
+  /* Deal with the case when match.pd has rewritten the (res & ~1) == 0
+     into res <= 1 and has left a type-cast for signed types.  */
+  if (gimple_assign_cast_p (use_stmt))
+    {
+      orig_use_lhs = gimple_assign_lhs (use_stmt);
+      /* match.pd would have only done this for a signed type,
+	 so the conversion must be to an unsigned one.  */
+      tree ty1 = TREE_TYPE (gimple_assign_rhs1 (use_stmt));
+      tree ty2 = TREE_TYPE (orig_use_lhs);
+
+      if (!TYPE_UNSIGNED (ty2) || !INTEGRAL_TYPE_P (ty2))
+	return false;
+      if (TYPE_PRECISION (ty1) != TYPE_PRECISION (ty2))
+	return false;
+      if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig_use_lhs))
+	return false;
+      if (EDGE_COUNT (phi_bb->preds) != 4)
+	return false;
+      if (!single_imm_use (orig_use_lhs, &use_p, &use_stmt))
+	return false;
+
+      is_cast = true;
+    }
+  else if (is_gimple_assign (use_stmt)
+	   && gimple_assign_rhs_code (use_stmt) == BIT_AND_EXPR
+	   && TREE_CODE (gimple_assign_rhs2 (use_stmt)) == INTEGER_CST
+	   && (wi::to_wide (gimple_assign_rhs2 (use_stmt))
+	       == wi::shifted_mask (1, prec - 1, false, prec)))
     {
       /* For partial_ordering result operator>= with unspec as second
 	 argument is (res & 1) == res, folded by match.pd into
@@ -2116,7 +2141,43 @@ spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
       || !tree_fits_shwi_p (rhs)
       || !IN_RANGE (tree_to_shwi (rhs), -1, 1))
     return false;
-  if (orig_use_lhs)
+
+  if (is_cast)
+    {
+      if (TREE_CODE (rhs) != INTEGER_CST)
+	return false;
+      /* As for -ffast-math we assume the 2 return to be
+	 impossible, canonicalize (unsigned) res <= 1U or
+	 (unsigned) res < 2U into res >= 0 and (unsigned) res > 1U
+	 or (unsigned) res >= 2U as res < 0.  */
+      switch (cmp)
+	{
+	case LE_EXPR:
+	  if (!integer_onep (rhs))
+	    return false;
+	  cmp = GE_EXPR;
+	  break;
+	case LT_EXPR:
+	  if (wi::ne_p (wi::to_widest (rhs), 2))
+	    return false;
+	  cmp = GE_EXPR;
+	  break;
+	case GT_EXPR:
+	  if (!integer_onep (rhs))
+	    return false;
+	  cmp = LT_EXPR;
+	  break;
+	case GE_EXPR:
+	  if (wi::ne_p (wi::to_widest (rhs), 2))
+	    return false;
+	  cmp = LT_EXPR;
+	  break;
+	default:
+	  return false;
+	}
+      rhs = build_zero_cst (TREE_TYPE (phires));
+    }
+  else if (orig_use_lhs)
     {
       if ((cmp != EQ_EXPR && cmp != NE_EXPR) || !integer_zerop (rhs))
 	return false;
@@ -2411,6 +2472,7 @@ spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
       use_operand_p use_p;
       imm_use_iterator iter;
       bool has_debug_uses = false;
+      bool has_cast_debug_uses = false;
       FOR_EACH_IMM_USE_FAST (use_p, iter, phires)
 	{
 	  gimple *use_stmt = USE_STMT (use_p);
@@ -2422,12 +2484,14 @@ spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
 	}
       if (orig_use_lhs)
 	{
-	  if (!has_debug_uses)
+	  if (!has_debug_uses || is_cast)
 	    FOR_EACH_IMM_USE_FAST (use_p, iter, orig_use_lhs)
 	      {
 		gimple *use_stmt = USE_STMT (use_p);
 		gcc_assert (is_gimple_debug (use_stmt));
 		has_debug_uses = true;
+		if (is_cast)
+		  has_cast_debug_uses = true;
 	      }
 	  gimple_stmt_iterator gsi = gsi_for_stmt (orig_use_stmt);
 	  tree zero = build_zero_cst (TREE_TYPE (orig_use_lhs));
@@ -2459,7 +2523,21 @@ spaceship_replacement (basic_block cond_bb, basic_block middle_bb,
 	  gsi_insert_before (&gsi, g, GSI_SAME_STMT);
 	  replace_uses_by (phires, temp2);
 	  if (orig_use_lhs)
-	    replace_uses_by (orig_use_lhs, temp2);
+	    {
+	      if (has_cast_debug_uses)
+		{
+		  tree temp3 = make_node (DEBUG_EXPR_DECL);
+		  DECL_ARTIFICIAL (temp3) = 1;
+		  TREE_TYPE (temp3) = TREE_TYPE (orig_use_lhs);
+		  SET_DECL_MODE (temp3, TYPE_MODE (type));
+		  t = fold_convert (TREE_TYPE (temp3), temp2);
+		  g = gimple_build_debug_bind (temp3, t, phi);
+		  gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+		  replace_uses_by (orig_use_lhs, temp3);
+		}
+	      else
+		replace_uses_by (orig_use_lhs, temp2);
+	    }
 	}
     }


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-11-24  6:41 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-24  6:41 [gcc r12-5490] middle-end: Fix failures with bitclear patterns on signed values Tamar Christina

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).