public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [COMMITTED] Tidy up the range normalization code.
@ 2023-06-29 16:48 Aldy Hernandez
  2023-06-29 16:48 ` [COMMITTED] Move maybe_set_nonzero_bits() to its only user Aldy Hernandez
  0 siblings, 1 reply; 2+ messages in thread
From: Aldy Hernandez @ 2023-06-29 16:48 UTC (permalink / raw)
  To: GCC patches; +Cc: Andrew MacLeod, Aldy Hernandez

There's a few spots where a range is being altered in-place, but we
fail to call normalize the range.  This patch makes sure we always
call normalize_kind(), and that normalize_kind in turn, calls
verify_range to make sure verything is canonical.

gcc/ChangeLog:

	* value-range.cc (frange::set): Do not call verify_range.
	(frange::normalize_kind): Verify range.
	(frange::union_nans): Do not call verify_range.
	(frange::union_): Same.
	(frange::intersect): Same.
	(irange::irange_single_pair_union): Call normalize_kind if
	necessary.
	(irange::union_): Same.
	(irange::intersect): Same.
	(irange::set_range_from_nonzero_bits): Verify range.
	(irange::set_nonzero_bits): Call normalize_kind if necessary.
	(irange::get_nonzero_bits): Tweak comment.
	(irange::intersect_nonzero_bits): Call normalize_kind if
	necessary.
	(irange::union_nonzero_bits): Same.
	* value-range.h (irange::normalize_kind): Verify range.
---
 gcc/value-range.cc | 99 ++++++++++++++++++++++------------------------
 gcc/value-range.h  |  2 +
 2 files changed, 50 insertions(+), 51 deletions(-)

diff --git a/gcc/value-range.cc b/gcc/value-range.cc
index 6f46f7c9875..f5d4bf3bb4a 100644
--- a/gcc/value-range.cc
+++ b/gcc/value-range.cc
@@ -411,9 +411,6 @@ frange::set (tree type,
   gcc_checking_assert (real_compare (LE_EXPR, &min, &max));
 
   normalize_kind ();
-
-  if (flag_checking)
-    verify_range ();
 }
 
 // Setter for an frange defaulting the NAN possibility to +-NAN when
@@ -462,6 +459,8 @@ frange::normalize_kind ()
 	  m_kind = VR_RANGE;
 	  m_min = frange_val_min (m_type);
 	  m_max = frange_val_max (m_type);
+	  if (flag_checking)
+	    verify_range ();
 	  return true;
 	}
     }
@@ -524,8 +523,6 @@ frange::union_nans (const frange &r)
   m_pos_nan |= r.m_pos_nan;
   m_neg_nan |= r.m_neg_nan;
   normalize_kind ();
-  if (flag_checking)
-    verify_range ();
   return true;
 }
 
@@ -569,8 +566,6 @@ frange::union_ (const vrange &v)
     changed |= combine_zeros (r, true);
 
   changed |= normalize_kind ();
-  if (flag_checking)
-    verify_range ();
   return changed;
 }
 
@@ -648,8 +643,6 @@ frange::intersect (const vrange &v)
     changed |= combine_zeros (r, false);
 
   changed |= normalize_kind ();
-  if (flag_checking)
-    verify_range ();
   return changed;
 }
 
@@ -1197,7 +1190,12 @@ irange::irange_single_pair_union (const irange &r)
 	  m_base[3] = r.m_base[1];
 	  m_num_ranges = 2;
 	}
-      union_nonzero_bits (r);
+      // The range has been altered, so normalize it even if nothing
+      // changed in the mask.
+      if (!union_nonzero_bits (r))
+	normalize_kind ();
+      if (flag_checking)
+	verify_range ();
       return true;
     }
 
@@ -1221,7 +1219,12 @@ irange::irange_single_pair_union (const irange &r)
       m_base[3] = m_base[1];
       m_base[1] = r.m_base[1];
     }
-  union_nonzero_bits (r);
+  // The range has been altered, so normalize it even if nothing
+  // changed in the mask.
+  if (!union_nonzero_bits (r))
+    normalize_kind ();
+  if (flag_checking)
+    verify_range ();
   return true;
 }
 
@@ -1351,7 +1354,12 @@ irange::union_ (const vrange &v)
   m_num_ranges = i / 2;
 
   m_kind = VR_RANGE;
-  union_nonzero_bits (r);
+  // The range has been altered, so normalize it even if nothing
+  // changed in the mask.
+  if (!union_nonzero_bits (r))
+    normalize_kind ();
+  if (flag_checking)
+    verify_range ();
   return true;
 }
 
@@ -1518,7 +1526,12 @@ irange::intersect (const vrange &v)
     }
 
   m_kind = VR_RANGE;
-  intersect_nonzero_bits (r);
+  // The range has been altered, so normalize it even if nothing
+  // changed in the mask.
+  if (!intersect_nonzero_bits (r))
+    normalize_kind ();
+  if (flag_checking)
+    verify_range ();
   return true;
 }
 
@@ -1585,10 +1598,7 @@ irange::intersect (const wide_int& lb, const wide_int& ub)
     }
 
   m_kind = VR_RANGE;
-  // No need to call normalize_kind(), as the caller will do this
-  // while intersecting the nonzero mask.
-  if (flag_checking)
-    verify_range ();
+  normalize_kind ();
   return true;
 }
 
@@ -1758,6 +1768,8 @@ irange::set_range_from_nonzero_bits ()
 	  zero.set_zero (type ());
 	  union_ (zero);
 	}
+      if (flag_checking)
+	verify_range ();
       return true;
     }
   else if (popcount == 0)
@@ -1778,10 +1790,8 @@ irange::set_nonzero_bits (const wide_int &bits)
     m_kind = VR_RANGE;
 
   m_nonzero_mask = bits;
-  if (set_range_from_nonzero_bits ())
-    return;
-
-  normalize_kind ();
+  if (!set_range_from_nonzero_bits ())
+    normalize_kind ();
   if (flag_checking)
     verify_range ();
 }
@@ -1807,8 +1817,8 @@ irange::get_nonzero_bits () const
     return m_nonzero_mask & get_nonzero_bits_from_range ();
 }
 
-// Intersect the nonzero bits in R into THIS and normalize the range.
-// Return TRUE if the intersection changed anything.
+// Intersect the nonzero bits in R into THIS.  Return TRUE and
+// normalize the range if anything changed.
 
 bool
 irange::intersect_nonzero_bits (const irange &r)
@@ -1816,14 +1826,8 @@ irange::intersect_nonzero_bits (const irange &r)
   gcc_checking_assert (!undefined_p () && !r.undefined_p ());
 
   if (m_nonzero_mask == -1 && r.m_nonzero_mask == -1)
-    {
-      normalize_kind ();
-      if (flag_checking)
-	verify_range ();
-      return false;
-    }
+    return false;
 
-  bool changed = false;
   if (m_nonzero_mask != r.m_nonzero_mask)
     {
       wide_int nz = get_nonzero_bits () & r.get_nonzero_bits ();
@@ -1832,18 +1836,17 @@ irange::intersect_nonzero_bits (const irange &r)
 	return false;
 
       m_nonzero_mask = nz;
-      if (set_range_from_nonzero_bits ())
-	return true;
-      changed = true;
+      if (!set_range_from_nonzero_bits ())
+	normalize_kind ();
+      if (flag_checking)
+	verify_range ();
+      return true;
     }
-  normalize_kind ();
-  if (flag_checking)
-    verify_range ();
-  return changed;
+  return false;
 }
 
-// Union the nonzero bits in R into THIS and normalize the range.
-// Return TRUE if the union changed anything.
+// Union the nonzero bits in R into THIS.  Return TRUE and normalize
+// the range if anything changed.
 
 bool
 irange::union_nonzero_bits (const irange &r)
@@ -1851,28 +1854,22 @@ irange::union_nonzero_bits (const irange &r)
   gcc_checking_assert (!undefined_p () && !r.undefined_p ());
 
   if (m_nonzero_mask == -1 && r.m_nonzero_mask == -1)
-    {
-      normalize_kind ();
-      if (flag_checking)
-	verify_range ();
-      return false;
-    }
+    return false;
 
-  bool changed = false;
   if (m_nonzero_mask != r.m_nonzero_mask)
     {
       wide_int save = get_nonzero_bits ();
       m_nonzero_mask = save | r.get_nonzero_bits ();
+      if (m_nonzero_mask == save)
+	return false;
       // No need to call set_range_from_nonzero_bits, because we'll
       // never narrow the range.  Besides, it would cause endless
       // recursion because of the union_ in
       // set_range_from_nonzero_bits.
-      changed = m_nonzero_mask != save;
+      normalize_kind ();
+      return true;
     }
-  normalize_kind ();
-  if (flag_checking)
-    verify_range ();
-  return changed;
+  return false;
 }
 
 void
diff --git a/gcc/value-range.h b/gcc/value-range.h
index 9103e9c41c7..5d4eaf8b625 100644
--- a/gcc/value-range.h
+++ b/gcc/value-range.h
@@ -1002,6 +1002,8 @@ irange::normalize_kind ()
       else if (m_kind == VR_ANTI_RANGE)
 	set_undefined ();
     }
+  if (flag_checking)
+    verify_range ();
 }
 
 inline bool
-- 
2.40.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* [COMMITTED] Move maybe_set_nonzero_bits() to its only user.
  2023-06-29 16:48 [COMMITTED] Tidy up the range normalization code Aldy Hernandez
@ 2023-06-29 16:48 ` Aldy Hernandez
  0 siblings, 0 replies; 2+ messages in thread
From: Aldy Hernandez @ 2023-06-29 16:48 UTC (permalink / raw)
  To: GCC patches; +Cc: Andrew MacLeod, Aldy Hernandez

gcc/ChangeLog:

	* tree-vrp.cc (maybe_set_nonzero_bits): Move from here...
	* tree-ssa-dom.cc (maybe_set_nonzero_bits): ...to here.
	* tree-vrp.h (maybe_set_nonzero_bits): Remove.
---
 gcc/tree-ssa-dom.cc | 65 +++++++++++++++++++++++++++++++++++++++++++++
 gcc/tree-vrp.cc     | 65 ---------------------------------------------
 gcc/tree-vrp.h      |  1 -
 3 files changed, 65 insertions(+), 66 deletions(-)

diff --git a/gcc/tree-ssa-dom.cc b/gcc/tree-ssa-dom.cc
index 9f534b5a190..f7f8b730877 100644
--- a/gcc/tree-ssa-dom.cc
+++ b/gcc/tree-ssa-dom.cc
@@ -1338,6 +1338,71 @@ all_uses_feed_or_dominated_by_stmt (tree name, gimple *stmt)
   return true;
 }
 
+/* Handle
+   _4 = x_3 & 31;
+   if (_4 != 0)
+     goto <bb 6>;
+   else
+     goto <bb 7>;
+   <bb 6>:
+   __builtin_unreachable ();
+   <bb 7>:
+
+   If x_3 has no other immediate uses (checked by caller), var is the
+   x_3 var, we can clear low 5 bits from the non-zero bitmask.  */
+
+static void
+maybe_set_nonzero_bits (edge e, tree var)
+{
+  basic_block cond_bb = e->src;
+  gcond *cond = safe_dyn_cast <gcond *> (*gsi_last_bb (cond_bb));
+  tree cst;
+
+  if (cond == NULL
+      || gimple_cond_code (cond) != ((e->flags & EDGE_TRUE_VALUE)
+				     ? EQ_EXPR : NE_EXPR)
+      || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
+      || !integer_zerop (gimple_cond_rhs (cond)))
+    return;
+
+  gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
+  if (!is_gimple_assign (stmt)
+      || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
+      || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
+    return;
+  if (gimple_assign_rhs1 (stmt) != var)
+    {
+      gimple *stmt2;
+
+      if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
+	return;
+      stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+      if (!gimple_assign_cast_p (stmt2)
+	  || gimple_assign_rhs1 (stmt2) != var
+	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
+	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
+			      != TYPE_PRECISION (TREE_TYPE (var))))
+	return;
+    }
+  cst = gimple_assign_rhs2 (stmt);
+  if (POINTER_TYPE_P (TREE_TYPE (var)))
+    {
+      struct ptr_info_def *pi = SSA_NAME_PTR_INFO (var);
+      if (pi && pi->misalign)
+	return;
+      wide_int w = wi::bit_not (wi::to_wide (cst));
+      unsigned int bits = wi::ctz (w);
+      if (bits == 0 || bits >= HOST_BITS_PER_INT)
+	return;
+      unsigned int align = 1U << bits;
+      if (pi == NULL || pi->align < align)
+	set_ptr_info_alignment (get_ptr_info (var), align, 0);
+    }
+  else
+    set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
+					    wi::to_wide (cst)));
+}
+
 /* Set global ranges that can be determined from the C->M edge:
 
    <bb C>:
diff --git a/gcc/tree-vrp.cc b/gcc/tree-vrp.cc
index c52e9971faa..d61b087b730 100644
--- a/gcc/tree-vrp.cc
+++ b/gcc/tree-vrp.cc
@@ -633,71 +633,6 @@ overflow_comparison_p (tree_code code, tree name, tree val, tree *new_cst)
 				  true, new_cst);
 }
 
-/* Handle
-   _4 = x_3 & 31;
-   if (_4 != 0)
-     goto <bb 6>;
-   else
-     goto <bb 7>;
-   <bb 6>:
-   __builtin_unreachable ();
-   <bb 7>:
-
-   If x_3 has no other immediate uses (checked by caller), var is the
-   x_3 var, we can clear low 5 bits from the non-zero bitmask.  */
-
-void
-maybe_set_nonzero_bits (edge e, tree var)
-{
-  basic_block cond_bb = e->src;
-  gcond *cond = safe_dyn_cast <gcond *> (*gsi_last_bb (cond_bb));
-  tree cst;
-
-  if (cond == NULL
-      || gimple_cond_code (cond) != ((e->flags & EDGE_TRUE_VALUE)
-				     ? EQ_EXPR : NE_EXPR)
-      || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
-      || !integer_zerop (gimple_cond_rhs (cond)))
-    return;
-
-  gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
-  if (!is_gimple_assign (stmt)
-      || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
-      || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
-    return;
-  if (gimple_assign_rhs1 (stmt) != var)
-    {
-      gimple *stmt2;
-
-      if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
-	return;
-      stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
-      if (!gimple_assign_cast_p (stmt2)
-	  || gimple_assign_rhs1 (stmt2) != var
-	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
-	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
-			      != TYPE_PRECISION (TREE_TYPE (var))))
-	return;
-    }
-  cst = gimple_assign_rhs2 (stmt);
-  if (POINTER_TYPE_P (TREE_TYPE (var)))
-    {
-      struct ptr_info_def *pi = SSA_NAME_PTR_INFO (var);
-      if (pi && pi->misalign)
-	return;
-      wide_int w = wi::bit_not (wi::to_wide (cst));
-      unsigned int bits = wi::ctz (w);
-      if (bits == 0 || bits >= HOST_BITS_PER_INT)
-	return;
-      unsigned int align = 1U << bits;
-      if (pi == NULL || pi->align < align)
-	set_ptr_info_alignment (get_ptr_info (var), align, 0);
-    }
-  else
-    set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
-					    wi::to_wide (cst)));
-}
-
 /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
    that includes the value VAL.  The search is restricted to the range
    [START_IDX, n - 1] where n is the size of VEC.
diff --git a/gcc/tree-vrp.h b/gcc/tree-vrp.h
index ba0a314d510..fe7ecbbe83e 100644
--- a/gcc/tree-vrp.h
+++ b/gcc/tree-vrp.h
@@ -32,6 +32,5 @@ extern bool find_case_label_range (gswitch *, tree, tree, size_t *, size_t *);
 extern tree find_case_label_range (gswitch *, const irange *vr);
 extern bool find_case_label_index (gswitch *, size_t, tree, size_t *);
 extern bool overflow_comparison_p (tree_code, tree, tree, tree *);
-extern void maybe_set_nonzero_bits (edge, tree);
 
 #endif /* GCC_TREE_VRP_H */
-- 
2.40.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-06-29 16:49 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-29 16:48 [COMMITTED] Tidy up the range normalization code Aldy Hernandez
2023-06-29 16:48 ` [COMMITTED] Move maybe_set_nonzero_bits() to its only user Aldy Hernandez

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).