public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH 1/5] Group tree-vrp.c by functionality.
@ 2020-11-12 15:15 Aldy Hernandez
  2020-11-12 15:15 ` [PATCH 2/5] Refactor VRP threading code into vrp_jump_threader class Aldy Hernandez
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Aldy Hernandez @ 2020-11-12 15:15 UTC (permalink / raw)
  To: GCC patches

Earlier in this cycle there was some work by Giuliano Belinassi and
myself to refactor tree-vrp.c.  A lot of functions and globals were
moved into independent classes, but the haphazard layout remained.
Assertion methods were indispersed with the propagation code, and with
the jump threading code, etc etc.

This series of patches moves things around so that common
functionality is geographically close.  There is no change in
behavior.

I know this is all slated to go in the next release, but finding
things in the current code base, even if just to compare with the
ranger, is difficult.

Tested on x86-64 Linux.  Aarch64 tests are still going.

Since I keep getting bit by aarch64 regressions, I'll push when the
entire patchset finishes tests on aarch64.

gcc/ChangeLog:

	* tree-vrp.c (struct assert_locus): Move.
	(class vrp_insert): Rename to vrp_asserts.
	(vrp_insert::build_assert_expr_for): Move to vrp_asserts.
	(fp_predicate): Same.
	(vrp_insert::dump): Same.
	(vrp_insert::register_new_assert_for): Same.
	(extract_code_and_val_from_cond_with_ops): Move.
	(vrp_insert::finish_register_edge_assert_for): Move to vrp_asserts.
	(maybe_set_nonzero_bits): Move.
	(vrp_insert::find_conditional_asserts): Move to vrp_asserts.
	(stmt_interesting_for_vrp): Move.
	(struct case_info): Move.
	(compare_case_labels): Move.
	(lhs_of_dominating_assert): Move.
	(find_case_label_index): Move.
	(find_case_label_range): Move.
	(class vrp_asserts): New.
	(vrp_asserts::build_assert_expr_for): Rename from vrp_insert.
	(vrp_asserts::dump): Same.
	(vrp_asserts::register_new_assert_for): Same.
	(vrp_asserts::finish_register_edge_assert_for): Same.
	(vrp_asserts::find_conditional_asserts): Same.
	(vrp_asserts::compare_case_labels): Same.
	(vrp_asserts::find_switch_asserts): Same.
	(vrp_asserts::find_assert_locations_in_bb): Same.
	(vrp_asserts::find_assert_locations): Same.
	(vrp_asserts::process_assert_insertions_for): Same.
	(vrp_asserts::compare_assert_loc): Same.
	(vrp_asserts::process_assert_insertions): Same.
	(vrp_asserts::insert_range_assertions): Same.
	(vrp_asserts::all_imm_uses_in_stmt_or_feed_cond): Same.
	(vrp_asserts::remove_range_assertions): Same.
	(class vrp_prop): Move.
	(all_imm_uses_in_stmt_or_feed_cond): Move.
	(vrp_prop::vrp_initialize): Move.
	(class vrp_folder): Move.
	(vrp_folder::fold_predicate_in): Move.
	(vrp_folder::fold_stmt): Move.
	(vrp_prop::initialize): Move.
	(vrp_prop::visit_stmt): Move.
	(enum ssa_prop_result): Move.
	(vrp_prop::visit_phi): Move.
	(vrp_prop::finalize): Move.
	(class vrp_dom_walker): Rename to...
	(class vrp_jump_threader): ...this.
	(vrp_jump_threader::before_dom_children): Rename from
	vrp_dom_walker.
	(simplify_stmt_for_jump_threading): Rename to...
	(vrp_jump_threader::simplify_stmt): ...here.
	(vrp_jump_threader::after_dom_children): Same.
	(identify_jump_threads): Move.
	(vrp_prop::vrp_finalize): Move array bounds setup code to...
	(execute_vrp): ...here.
---
 gcc/tree-vrp.c | 2127 ++++++++++++++++++++++++------------------------
 1 file changed, 1057 insertions(+), 1070 deletions(-)

diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index e00c034fee3..d3816ab569e 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -161,153 +161,6 @@ live_names::live_on_block_p (tree name, basic_block bb)
 	  && bitmap_bit_p (live[bb->index], SSA_NAME_VERSION (name)));
 }
 
-
-/* Location information for ASSERT_EXPRs.  Each instance of this
-   structure describes an ASSERT_EXPR for an SSA name.  Since a single
-   SSA name may have more than one assertion associated with it, these
-   locations are kept in a linked list attached to the corresponding
-   SSA name.  */
-struct assert_locus
-{
-  /* Basic block where the assertion would be inserted.  */
-  basic_block bb;
-
-  /* Some assertions need to be inserted on an edge (e.g., assertions
-     generated by COND_EXPRs).  In those cases, BB will be NULL.  */
-  edge e;
-
-  /* Pointer to the statement that generated this assertion.  */
-  gimple_stmt_iterator si;
-
-  /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
-  enum tree_code comp_code;
-
-  /* Value being compared against.  */
-  tree val;
-
-  /* Expression to compare.  */
-  tree expr;
-
-  /* Next node in the linked list.  */
-  assert_locus *next;
-};
-
-class vrp_insert
-{
-public:
-  vrp_insert (struct function *fn) : fun (fn) { }
-
-  /* Traverse the flowgraph looking for conditional jumps to insert range
-     expressions.  These range expressions are meant to provide information
-     to optimizations that need to reason in terms of value ranges.  They
-     will not be expanded into RTL.  See method implementation comment
-     for example.  */
-  void insert_range_assertions ();
-
-  /* Convert range assertion expressions into the implied copies and
-     copy propagate away the copies.  */
-  void remove_range_assertions ();
-
-  /* Dump all the registered assertions for all the names to FILE.  */
-  void dump (FILE *);
-
-  /* Dump all the registered assertions for NAME to FILE.  */
-  void dump (FILE *file, tree name);
-
-  /* Dump all the registered assertions for NAME to stderr.  */
-  void debug (tree name)
-  {
-    dump (stderr, name);
-  }
-
-  /* Dump all the registered assertions for all the names to stderr.  */
-  void debug ()
-  {
-    dump (stderr);
-  }
-
-private:
-  /* Set of SSA names found live during the RPO traversal of the function
-     for still active basic-blocks.  */
-  live_names live;
-
-  /* Function to work on.  */
-  struct function *fun;
-
-  /* If bit I is present, it means that SSA name N_i has a list of
-     assertions that should be inserted in the IL.  */
-  bitmap need_assert_for;
-
-  /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
-     holds a list of ASSERT_LOCUS_T nodes that describe where
-     ASSERT_EXPRs for SSA name N_I should be inserted.  */
-  assert_locus **asserts_for;
-
-  /* Finish found ASSERTS for E and register them at GSI.  */
-  void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
-					vec<assert_info> &asserts);
-
-  /* Determine whether the outgoing edges of BB should receive an
-     ASSERT_EXPR for each of the operands of BB's LAST statement.  The
-     last statement of BB must be a SWITCH_EXPR.
-
-     If any of the sub-graphs rooted at BB have an interesting use of
-     the predicate operands, an assert location node is added to the
-     list of assertions for the corresponding operands.  */
-  void find_switch_asserts (basic_block bb, gswitch *last);
-
-  /* Do an RPO walk over the function computing SSA name liveness
-     on-the-fly and deciding on assert expressions to insert.  */
-  void find_assert_locations ();
-
-  /* Traverse all the statements in block BB looking for statements that
-     may generate useful assertions for the SSA names in their operand.
-     See method implementation comentary for more information.  */
-  void find_assert_locations_in_bb (basic_block bb);
-
-  /* Determine whether the outgoing edges of BB should receive an
-     ASSERT_EXPR for each of the operands of BB's LAST statement.
-     The last statement of BB must be a COND_EXPR.
-
-     If any of the sub-graphs rooted at BB have an interesting use of
-     the predicate operands, an assert location node is added to the
-     list of assertions for the corresponding operands.  */
-  void find_conditional_asserts (basic_block bb, gcond *last);
-
-  /* Process all the insertions registered for every name N_i registered
-     in NEED_ASSERT_FOR.  The list of assertions to be inserted are
-     found in ASSERTS_FOR[i].  */
-  void process_assert_insertions ();
-
-  /* If NAME doesn't have an ASSERT_EXPR registered for asserting
-     'EXPR COMP_CODE VAL' at a location that dominates block BB or
-     E->DEST, then register this location as a possible insertion point
-     for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
-
-     BB, E and SI provide the exact insertion point for the new
-     ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
-     on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
-     BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
-     must not be NULL.  */
-  void register_new_assert_for (tree name, tree expr,
-				enum tree_code comp_code,
-				tree val, basic_block bb,
-				edge e, gimple_stmt_iterator si);
-
-  /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
-     create a new SSA name N and return the assertion assignment
-     'N = ASSERT_EXPR <V, V OP W>'.  */
-  gimple *build_assert_expr_for (tree cond, tree v);
-
-  /* Create an ASSERT_EXPR for NAME and insert it in the location
-     indicated by LOC.  Return true if we made any edge insertions.  */
-  bool process_assert_insertions_for (tree name, assert_locus *loc);
-
-  /* Qsort callback for sorting assert locations.  */
-  template <bool stable> static int compare_assert_loc (const void *,
-							const void *);
-};
-
 /* Return true if the SSA name NAME is live on the edge E.  */
 
 bool
@@ -1266,48 +1119,6 @@ range_fold_unary_expr (value_range *vr,
   op->fold_range (*vr, expr_type, vr0_cst, value_range (expr_type));
 }
 
-/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
-   create a new SSA name N and return the assertion assignment
-   'N = ASSERT_EXPR <V, V OP W>'.  */
-
-gimple *
-vrp_insert::build_assert_expr_for (tree cond, tree v)
-{
-  tree a;
-  gassign *assertion;
-
-  gcc_assert (TREE_CODE (v) == SSA_NAME
-	      && COMPARISON_CLASS_P (cond));
-
-  a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
-  assertion = gimple_build_assign (NULL_TREE, a);
-
-  /* The new ASSERT_EXPR, creates a new SSA name that replaces the
-     operand of the ASSERT_EXPR.  Create it so the new name and the old one
-     are registered in the replacement table so that we can fix the SSA web
-     after adding all the ASSERT_EXPRs.  */
-  tree new_def = create_new_def_for (v, assertion, NULL);
-  /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
-     given we have to be able to fully propagate those out to re-create
-     valid SSA when removing the asserts.  */
-  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
-    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
-
-  return assertion;
-}
-
-
-/* Return false if EXPR is a predicate expression involving floating
-   point values.  */
-
-static inline bool
-fp_predicate (gimple *stmt)
-{
-  GIMPLE_CHECK (stmt, GIMPLE_COND);
-
-  return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
-}
-
 /* If the range of values taken by OP can be inferred after STMT executes,
    return the comparison code (COMP_CODE_P) and value (VAL_P) that
    describes the inferred range.  Return true if a range could be
@@ -1350,54 +1161,6 @@ infer_value_range (gimple *stmt, tree op, tree_code *comp_code_p, tree *val_p)
   return false;
 }
 
-/* Dump all the registered assertions for NAME to FILE.  */
-
-void
-vrp_insert::dump (FILE *file, tree name)
-{
-  assert_locus *loc;
-
-  fprintf (file, "Assertions to be inserted for ");
-  print_generic_expr (file, name);
-  fprintf (file, "\n");
-
-  loc = asserts_for[SSA_NAME_VERSION (name)];
-  while (loc)
-    {
-      fprintf (file, "\t");
-      print_gimple_stmt (file, gsi_stmt (loc->si), 0);
-      fprintf (file, "\n\tBB #%d", loc->bb->index);
-      if (loc->e)
-	{
-	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
-	           loc->e->dest->index);
-	  dump_edge_info (file, loc->e, dump_flags, 0);
-	}
-      fprintf (file, "\n\tPREDICATE: ");
-      print_generic_expr (file, loc->expr);
-      fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
-      print_generic_expr (file, loc->val);
-      fprintf (file, "\n\n");
-      loc = loc->next;
-    }
-
-  fprintf (file, "\n");
-}
-
-/* Dump all the registered assertions for all the names to FILE.  */
-
-void
-vrp_insert::dump (FILE *file)
-{
-  unsigned i;
-  bitmap_iterator bi;
-
-  fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
-  EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
-    dump (file, ssa_name (i));
-  fprintf (file, "\n");
-}
-
 /* Dump assert_info structure.  */
 
 void
@@ -1457,133 +1220,22 @@ add_assert_info (vec<assert_info> &asserts,
 		 name, expr, op_symbol_code (comp_code), val);
 }
 
-/* If NAME doesn't have an ASSERT_EXPR registered for asserting
-   'EXPR COMP_CODE VAL' at a location that dominates block BB or
-   E->DEST, then register this location as a possible insertion point
-   for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
-
-   BB, E and SI provide the exact insertion point for the new
-   ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
-   on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
-   BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
-   must not be NULL.  */
+/* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
+   Extract a suitable test code and value and store them into *CODE_P and
+   *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
 
-void
-vrp_insert::register_new_assert_for (tree name, tree expr,
-				   enum tree_code comp_code,
-				   tree val,
-				   basic_block bb,
-				   edge e,
-				   gimple_stmt_iterator si)
-{
-  assert_locus *n, *loc, *last_loc;
-  basic_block dest_bb;
+   If no extraction was possible, return FALSE, otherwise return TRUE.
 
-  gcc_checking_assert (bb == NULL || e == NULL);
+   If INVERT is true, then we invert the result stored into *CODE_P.  */
 
-  if (e == NULL)
-    gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
-			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
-
-  /* Never build an assert comparing against an integer constant with
-     TREE_OVERFLOW set.  This confuses our undefined overflow warning
-     machinery.  */
-  if (TREE_OVERFLOW_P (val))
-    val = drop_tree_overflow (val);
-
-  /* The new assertion A will be inserted at BB or E.  We need to
-     determine if the new location is dominated by a previously
-     registered location for A.  If we are doing an edge insertion,
-     assume that A will be inserted at E->DEST.  Note that this is not
-     necessarily true.
-
-     If E is a critical edge, it will be split.  But even if E is
-     split, the new block will dominate the same set of blocks that
-     E->DEST dominates.
-
-     The reverse, however, is not true, blocks dominated by E->DEST
-     will not be dominated by the new block created to split E.  So,
-     if the insertion location is on a critical edge, we will not use
-     the new location to move another assertion previously registered
-     at a block dominated by E->DEST.  */
-  dest_bb = (bb) ? bb : e->dest;
-
-  /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
-     VAL at a block dominating DEST_BB, then we don't need to insert a new
-     one.  Similarly, if the same assertion already exists at a block
-     dominated by DEST_BB and the new location is not on a critical
-     edge, then update the existing location for the assertion (i.e.,
-     move the assertion up in the dominance tree).
-
-     Note, this is implemented as a simple linked list because there
-     should not be more than a handful of assertions registered per
-     name.  If this becomes a performance problem, a table hashed by
-     COMP_CODE and VAL could be implemented.  */
-  loc = asserts_for[SSA_NAME_VERSION (name)];
-  last_loc = loc;
-  while (loc)
-    {
-      if (loc->comp_code == comp_code
-	  && (loc->val == val
-	      || operand_equal_p (loc->val, val, 0))
-	  && (loc->expr == expr
-	      || operand_equal_p (loc->expr, expr, 0)))
-	{
-	  /* If E is not a critical edge and DEST_BB
-	     dominates the existing location for the assertion, move
-	     the assertion up in the dominance tree by updating its
-	     location information.  */
-	  if ((e == NULL || !EDGE_CRITICAL_P (e))
-	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
-	    {
-	      loc->bb = dest_bb;
-	      loc->e = e;
-	      loc->si = si;
-	      return;
-	    }
-	}
-
-      /* Update the last node of the list and move to the next one.  */
-      last_loc = loc;
-      loc = loc->next;
-    }
-
-  /* If we didn't find an assertion already registered for
-     NAME COMP_CODE VAL, add a new one at the end of the list of
-     assertions associated with NAME.  */
-  n = XNEW (struct assert_locus);
-  n->bb = dest_bb;
-  n->e = e;
-  n->si = si;
-  n->comp_code = comp_code;
-  n->val = val;
-  n->expr = expr;
-  n->next = NULL;
-
-  if (last_loc)
-    last_loc->next = n;
-  else
-    asserts_for[SSA_NAME_VERSION (name)] = n;
-
-  bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
-}
-
-/* (COND_OP0 COND_CODE COND_OP1) is a predicate which uses NAME.
-   Extract a suitable test code and value and store them into *CODE_P and
-   *VAL_P so the predicate is normalized to NAME *CODE_P *VAL_P.
-
-   If no extraction was possible, return FALSE, otherwise return TRUE.
-
-   If INVERT is true, then we invert the result stored into *CODE_P.  */
-
-static bool
-extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
-					 tree cond_op0, tree cond_op1,
-					 bool invert, enum tree_code *code_p,
-					 tree *val_p)
-{
-  enum tree_code comp_code;
-  tree val;
+static bool
+extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
+					 tree cond_op0, tree cond_op1,
+					 bool invert, enum tree_code *code_p,
+					 tree *val_p)
+{
+  enum tree_code comp_code;
+  tree val;
 
   /* Otherwise, we have a comparison of the form NAME COMP VAL
      or VAL COMP NAME.  */
@@ -2578,94 +2230,746 @@ register_edge_assert_for (tree name, edge e,
     }
 }
 
-/* Finish found ASSERTS for E and register them at GSI.  */
+/* Handle
+   _4 = x_3 & 31;
+   if (_4 != 0)
+     goto <bb 6>;
+   else
+     goto <bb 7>;
+   <bb 6>:
+   __builtin_unreachable ();
+   <bb 7>:
+   x_5 = ASSERT_EXPR <x_3, ...>;
+   If x_3 has no other immediate uses (checked by caller),
+   var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
+   from the non-zero bitmask.  */
 
 void
-vrp_insert::finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
-					   vec<assert_info> &asserts)
+maybe_set_nonzero_bits (edge e, tree var)
 {
-  for (unsigned i = 0; i < asserts.length (); ++i)
-    /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
-       reachable from E.  */
-    if (live.live_on_edge_p (asserts[i].name, e))
-      register_new_assert_for (asserts[i].name, asserts[i].expr,
-			       asserts[i].comp_code, asserts[i].val,
-			       NULL, e, gsi);
-}
+  basic_block cond_bb = e->src;
+  gimple *stmt = last_stmt (cond_bb);
+  tree cst;
 
+  if (stmt == NULL
+      || gimple_code (stmt) != GIMPLE_COND
+      || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
+				     ? EQ_EXPR : NE_EXPR)
+      || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
+      || !integer_zerop (gimple_cond_rhs (stmt)))
+    return;
 
+  stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
+  if (!is_gimple_assign (stmt)
+      || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
+      || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
+    return;
+  if (gimple_assign_rhs1 (stmt) != var)
+    {
+      gimple *stmt2;
 
-/* Determine whether the outgoing edges of BB should receive an
-   ASSERT_EXPR for each of the operands of BB's LAST statement.
-   The last statement of BB must be a COND_EXPR.
+      if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
+	return;
+      stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
+      if (!gimple_assign_cast_p (stmt2)
+	  || gimple_assign_rhs1 (stmt2) != var
+	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
+	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
+			      != TYPE_PRECISION (TREE_TYPE (var))))
+	return;
+    }
+  cst = gimple_assign_rhs2 (stmt);
+  set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
+					  wi::to_wide (cst)));
+}
 
-   If any of the sub-graphs rooted at BB have an interesting use of
-   the predicate operands, an assert location node is added to the
-   list of assertions for the corresponding operands.  */
+/* Return true if STMT is interesting for VRP.  */
 
-void
-vrp_insert::find_conditional_asserts (basic_block bb, gcond *last)
+bool
+stmt_interesting_for_vrp (gimple *stmt)
 {
-  gimple_stmt_iterator bsi;
-  tree op;
-  edge_iterator ei;
-  edge e;
-  ssa_op_iter iter;
-
-  bsi = gsi_for_stmt (last);
-
-  /* Look for uses of the operands in each of the sub-graphs
-     rooted at BB.  We need to check each of the outgoing edges
-     separately, so that we know what kind of ASSERT_EXPR to
-     insert.  */
-  FOR_EACH_EDGE (e, ei, bb->succs)
+  if (gimple_code (stmt) == GIMPLE_PHI)
     {
-      if (e->dest == bb)
-	continue;
+      tree res = gimple_phi_result (stmt);
+      return (!virtual_operand_p (res)
+	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
+		  || POINTER_TYPE_P (TREE_TYPE (res))));
+    }
+  else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
+    {
+      tree lhs = gimple_get_lhs (stmt);
 
-      /* Register the necessary assertions for each operand in the
-	 conditional predicate.  */
-      auto_vec<assert_info, 8> asserts;
-      FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
-	register_edge_assert_for (op, e,
-				  gimple_cond_code (last),
-				  gimple_cond_lhs (last),
-				  gimple_cond_rhs (last), asserts);
-      finish_register_edge_assert_for (e, bsi, asserts);
+      /* In general, assignments with virtual operands are not useful
+	 for deriving ranges, with the obvious exception of calls to
+	 builtin functions.  */
+      if (lhs && TREE_CODE (lhs) == SSA_NAME
+	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
+	  && (is_gimple_call (stmt)
+	      || !gimple_vuse (stmt)))
+	return true;
+      else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
+	switch (gimple_call_internal_fn (stmt))
+	  {
+	  case IFN_ADD_OVERFLOW:
+	  case IFN_SUB_OVERFLOW:
+	  case IFN_MUL_OVERFLOW:
+	  case IFN_ATOMIC_COMPARE_EXCHANGE:
+	    /* These internal calls return _Complex integer type,
+	       but are interesting to VRP nevertheless.  */
+	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
+	      return true;
+	    break;
+	  default:
+	    break;
+	  }
     }
+  else if (gimple_code (stmt) == GIMPLE_COND
+	   || gimple_code (stmt) == GIMPLE_SWITCH)
+    return true;
+
+  return false;
 }
 
-struct case_info
-{
-  tree expr;
-  basic_block bb;
-};
 
-/* Compare two case labels sorting first by the destination bb index
-   and then by the case value.  */
+/* Return the LHS of any ASSERT_EXPR where OP appears as the first
+   argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
+   BB.  If no such ASSERT_EXPR is found, return OP.  */
 
-static int
-compare_case_labels (const void *p1, const void *p2)
+static tree
+lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
 {
-  const struct case_info *ci1 = (const struct case_info *) p1;
-  const struct case_info *ci2 = (const struct case_info *) p2;
-  int idx1 = ci1->bb->index;
-  int idx2 = ci2->bb->index;
+  imm_use_iterator imm_iter;
+  gimple *use_stmt;
+  use_operand_p use_p;
 
-  if (idx1 < idx2)
-    return -1;
-  else if (idx1 == idx2)
+  if (TREE_CODE (op) == SSA_NAME)
     {
-      /* Make sure the default label is first in a group.  */
-      if (!CASE_LOW (ci1->expr))
-	return -1;
-      else if (!CASE_LOW (ci2->expr))
-	return 1;
-      else
-	return tree_int_cst_compare (CASE_LOW (ci1->expr),
-				     CASE_LOW (ci2->expr));
-    }
-  else
+      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
+	{
+	  use_stmt = USE_STMT (use_p);
+	  if (use_stmt != stmt
+	      && gimple_assign_single_p (use_stmt)
+	      && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
+	      && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
+	      && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
+	    return gimple_assign_lhs (use_stmt);
+	}
+    }
+  return op;
+}
+
+/* A hack.  */
+static class vr_values *x_vr_values;
+
+/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
+   that includes the value VAL.  The search is restricted to the range
+   [START_IDX, n - 1] where n is the size of VEC.
+
+   If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
+   returned.
+
+   If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
+   it is placed in IDX and false is returned.
+
+   If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
+   returned. */
+
+bool
+find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
+{
+  size_t n = gimple_switch_num_labels (stmt);
+  size_t low, high;
+
+  /* Find case label for minimum of the value range or the next one.
+     At each iteration we are searching in [low, high - 1]. */
+
+  for (low = start_idx, high = n; high != low; )
+    {
+      tree t;
+      int cmp;
+      /* Note that i != high, so we never ask for n. */
+      size_t i = (high + low) / 2;
+      t = gimple_switch_label (stmt, i);
+
+      /* Cache the result of comparing CASE_LOW and val.  */
+      cmp = tree_int_cst_compare (CASE_LOW (t), val);
+
+      if (cmp == 0)
+	{
+	  /* Ranges cannot be empty. */
+	  *idx = i;
+	  return true;
+	}
+      else if (cmp > 0)
+        high = i;
+      else
+	{
+	  low = i + 1;
+	  if (CASE_HIGH (t) != NULL
+	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
+	    {
+	      *idx = i;
+	      return true;
+	    }
+        }
+    }
+
+  *idx = high;
+  return false;
+}
+
+/* Searches the case label vector VEC for the range of CASE_LABELs that is used
+   for values between MIN and MAX. The first index is placed in MIN_IDX. The
+   last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
+   then MAX_IDX < MIN_IDX.
+   Returns true if the default label is not needed. */
+
+bool
+find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
+		       size_t *max_idx)
+{
+  size_t i, j;
+  bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
+  bool max_take_default = !find_case_label_index (stmt, i, max, &j);
+
+  if (i == j
+      && min_take_default
+      && max_take_default)
+    {
+      /* Only the default case label reached.
+         Return an empty range. */
+      *min_idx = 1;
+      *max_idx = 0;
+      return false;
+    }
+  else
+    {
+      bool take_default = min_take_default || max_take_default;
+      tree low, high;
+      size_t k;
+
+      if (max_take_default)
+	j--;
+
+      /* If the case label range is continuous, we do not need
+	 the default case label.  Verify that.  */
+      high = CASE_LOW (gimple_switch_label (stmt, i));
+      if (CASE_HIGH (gimple_switch_label (stmt, i)))
+	high = CASE_HIGH (gimple_switch_label (stmt, i));
+      for (k = i + 1; k <= j; ++k)
+	{
+	  low = CASE_LOW (gimple_switch_label (stmt, k));
+	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
+	    {
+	      take_default = true;
+	      break;
+	    }
+	  high = low;
+	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
+	    high = CASE_HIGH (gimple_switch_label (stmt, k));
+	}
+
+      *min_idx = i;
+      *max_idx = j;
+      return !take_default;
+    }
+}
+
+/* Given a SWITCH_STMT, return the case label that encompasses the
+   known possible values for the switch operand.  RANGE_OF_OP is a
+   range for the known values of the switch operand.  */
+
+tree
+find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
+{
+  if (range_of_op->undefined_p ()
+      || range_of_op->varying_p ()
+      || range_of_op->symbolic_p ())
+    return NULL_TREE;
+
+  size_t i, j;
+  tree op = gimple_switch_index (switch_stmt);
+  tree type = TREE_TYPE (op);
+  tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
+  tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
+  find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
+  if (i == j)
+    {
+      /* Look for exactly one label that encompasses the range of
+	 the operand.  */
+      tree label = gimple_switch_label (switch_stmt, i);
+      tree case_high
+	= CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
+      int_range_max label_range (CASE_LOW (label), case_high);
+      if (!types_compatible_p (label_range.type (), range_of_op->type ()))
+	range_cast (label_range, range_of_op->type ());
+      label_range.intersect (range_of_op);
+      if (label_range == *range_of_op)
+	return label;
+    }
+  else if (i > j)
+    {
+      /* If there are no labels at all, take the default.  */
+      return gimple_switch_label (switch_stmt, 0);
+    }
+  else
+    {
+      /* Otherwise, there are various labels that can encompass
+	 the range of operand.  In which case, see if the range of
+	 the operand is entirely *outside* the bounds of all the
+	 (non-default) case labels.  If so, take the default.  */
+      unsigned n = gimple_switch_num_labels (switch_stmt);
+      tree min_label = gimple_switch_label (switch_stmt, 1);
+      tree max_label = gimple_switch_label (switch_stmt, n - 1);
+      tree case_high = CASE_HIGH (max_label);
+      if (!case_high)
+	case_high = CASE_LOW (max_label);
+      int_range_max label_range (CASE_LOW (min_label), case_high);
+      if (!types_compatible_p (label_range.type (), range_of_op->type ()))
+	range_cast (label_range, range_of_op->type ());
+      label_range.intersect (range_of_op);
+      if (label_range.undefined_p ())
+	return gimple_switch_label (switch_stmt, 0);
+    }
+  return NULL_TREE;
+}
+
+struct case_info
+{
+  tree expr;
+  basic_block bb;
+};
+
+/* Location information for ASSERT_EXPRs.  Each instance of this
+   structure describes an ASSERT_EXPR for an SSA name.  Since a single
+   SSA name may have more than one assertion associated with it, these
+   locations are kept in a linked list attached to the corresponding
+   SSA name.  */
+struct assert_locus
+{
+  /* Basic block where the assertion would be inserted.  */
+  basic_block bb;
+
+  /* Some assertions need to be inserted on an edge (e.g., assertions
+     generated by COND_EXPRs).  In those cases, BB will be NULL.  */
+  edge e;
+
+  /* Pointer to the statement that generated this assertion.  */
+  gimple_stmt_iterator si;
+
+  /* Predicate code for the ASSERT_EXPR.  Must be COMPARISON_CLASS_P.  */
+  enum tree_code comp_code;
+
+  /* Value being compared against.  */
+  tree val;
+
+  /* Expression to compare.  */
+  tree expr;
+
+  /* Next node in the linked list.  */
+  assert_locus *next;
+};
+
+/* Class to traverse the flowgraph looking for conditional jumps to
+   insert ASSERT_EXPR range expressions.  These range expressions are
+   meant to provide information to optimizations that need to reason
+   in terms of value ranges.  They will not be expanded into RTL.  */
+
+class vrp_asserts
+{
+public:
+  vrp_asserts (struct function *fn) : fun (fn) { }
+
+  void insert_range_assertions ();
+
+  /* Convert range assertion expressions into the implied copies and
+     copy propagate away the copies.  */
+  void remove_range_assertions ();
+
+  /* Dump all the registered assertions for all the names to FILE.  */
+  void dump (FILE *);
+
+  /* Dump all the registered assertions for NAME to FILE.  */
+  void dump (FILE *file, tree name);
+
+  /* Dump all the registered assertions for NAME to stderr.  */
+  void debug (tree name)
+  {
+    dump (stderr, name);
+  }
+
+  /* Dump all the registered assertions for all the names to stderr.  */
+  void debug ()
+  {
+    dump (stderr);
+  }
+
+private:
+  /* Set of SSA names found live during the RPO traversal of the function
+     for still active basic-blocks.  */
+  live_names live;
+
+  /* Function to work on.  */
+  struct function *fun;
+
+  /* If bit I is present, it means that SSA name N_i has a list of
+     assertions that should be inserted in the IL.  */
+  bitmap need_assert_for;
+
+  /* Array of locations lists where to insert assertions.  ASSERTS_FOR[I]
+     holds a list of ASSERT_LOCUS_T nodes that describe where
+     ASSERT_EXPRs for SSA name N_I should be inserted.  */
+  assert_locus **asserts_for;
+
+  /* Finish found ASSERTS for E and register them at GSI.  */
+  void finish_register_edge_assert_for (edge e, gimple_stmt_iterator gsi,
+					vec<assert_info> &asserts);
+
+  /* Determine whether the outgoing edges of BB should receive an
+     ASSERT_EXPR for each of the operands of BB's LAST statement.  The
+     last statement of BB must be a SWITCH_EXPR.
+
+     If any of the sub-graphs rooted at BB have an interesting use of
+     the predicate operands, an assert location node is added to the
+     list of assertions for the corresponding operands.  */
+  void find_switch_asserts (basic_block bb, gswitch *last);
+
+  /* Do an RPO walk over the function computing SSA name liveness
+     on-the-fly and deciding on assert expressions to insert.  */
+  void find_assert_locations ();
+
+  /* Traverse all the statements in block BB looking for statements that
+     may generate useful assertions for the SSA names in their operand.
+     See method implementation comentary for more information.  */
+  void find_assert_locations_in_bb (basic_block bb);
+
+  /* Determine whether the outgoing edges of BB should receive an
+     ASSERT_EXPR for each of the operands of BB's LAST statement.
+     The last statement of BB must be a COND_EXPR.
+
+     If any of the sub-graphs rooted at BB have an interesting use of
+     the predicate operands, an assert location node is added to the
+     list of assertions for the corresponding operands.  */
+  void find_conditional_asserts (basic_block bb, gcond *last);
+
+  /* Process all the insertions registered for every name N_i registered
+     in NEED_ASSERT_FOR.  The list of assertions to be inserted are
+     found in ASSERTS_FOR[i].  */
+  void process_assert_insertions ();
+
+  /* If NAME doesn't have an ASSERT_EXPR registered for asserting
+     'EXPR COMP_CODE VAL' at a location that dominates block BB or
+     E->DEST, then register this location as a possible insertion point
+     for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
+
+     BB, E and SI provide the exact insertion point for the new
+     ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
+     on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
+     BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
+     must not be NULL.  */
+  void register_new_assert_for (tree name, tree expr,
+				enum tree_code comp_code,
+				tree val, basic_block bb,
+				edge e, gimple_stmt_iterator si);
+
+  /* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
+     create a new SSA name N and return the assertion assignment
+     'N = ASSERT_EXPR <V, V OP W>'.  */
+  gimple *build_assert_expr_for (tree cond, tree v);
+
+  /* Create an ASSERT_EXPR for NAME and insert it in the location
+     indicated by LOC.  Return true if we made any edge insertions.  */
+  bool process_assert_insertions_for (tree name, assert_locus *loc);
+
+  /* Qsort callback for sorting assert locations.  */
+  template <bool stable> static int compare_assert_loc (const void *,
+							const void *);
+
+  /* Return false if EXPR is a predicate expression involving floating
+     point values.  */
+  bool fp_predicate (gimple *stmt)
+  {
+    GIMPLE_CHECK (stmt, GIMPLE_COND);
+    return FLOAT_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)));
+  }
+
+  bool all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt,
+					  basic_block cond_bb);
+
+  static int compare_case_labels (const void *, const void *);
+};
+
+/* Given a COND_EXPR COND of the form 'V OP W', and an SSA name V,
+   create a new SSA name N and return the assertion assignment
+   'N = ASSERT_EXPR <V, V OP W>'.  */
+
+gimple *
+vrp_asserts::build_assert_expr_for (tree cond, tree v)
+{
+  tree a;
+  gassign *assertion;
+
+  gcc_assert (TREE_CODE (v) == SSA_NAME
+	      && COMPARISON_CLASS_P (cond));
+
+  a = build2 (ASSERT_EXPR, TREE_TYPE (v), v, cond);
+  assertion = gimple_build_assign (NULL_TREE, a);
+
+  /* The new ASSERT_EXPR, creates a new SSA name that replaces the
+     operand of the ASSERT_EXPR.  Create it so the new name and the old one
+     are registered in the replacement table so that we can fix the SSA web
+     after adding all the ASSERT_EXPRs.  */
+  tree new_def = create_new_def_for (v, assertion, NULL);
+  /* Make sure we preserve abnormalness throughout an ASSERT_EXPR chain
+     given we have to be able to fully propagate those out to re-create
+     valid SSA when removing the asserts.  */
+  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (v))
+    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_def) = 1;
+
+  return assertion;
+}
+
+/* Dump all the registered assertions for NAME to FILE.  */
+
+void
+vrp_asserts::dump (FILE *file, tree name)
+{
+  assert_locus *loc;
+
+  fprintf (file, "Assertions to be inserted for ");
+  print_generic_expr (file, name);
+  fprintf (file, "\n");
+
+  loc = asserts_for[SSA_NAME_VERSION (name)];
+  while (loc)
+    {
+      fprintf (file, "\t");
+      print_gimple_stmt (file, gsi_stmt (loc->si), 0);
+      fprintf (file, "\n\tBB #%d", loc->bb->index);
+      if (loc->e)
+	{
+	  fprintf (file, "\n\tEDGE %d->%d", loc->e->src->index,
+	           loc->e->dest->index);
+	  dump_edge_info (file, loc->e, dump_flags, 0);
+	}
+      fprintf (file, "\n\tPREDICATE: ");
+      print_generic_expr (file, loc->expr);
+      fprintf (file, " %s ", get_tree_code_name (loc->comp_code));
+      print_generic_expr (file, loc->val);
+      fprintf (file, "\n\n");
+      loc = loc->next;
+    }
+
+  fprintf (file, "\n");
+}
+
+/* Dump all the registered assertions for all the names to FILE.  */
+
+void
+vrp_asserts::dump (FILE *file)
+{
+  unsigned i;
+  bitmap_iterator bi;
+
+  fprintf (file, "\nASSERT_EXPRs to be inserted\n\n");
+  EXECUTE_IF_SET_IN_BITMAP (need_assert_for, 0, i, bi)
+    dump (file, ssa_name (i));
+  fprintf (file, "\n");
+}
+
+/* If NAME doesn't have an ASSERT_EXPR registered for asserting
+   'EXPR COMP_CODE VAL' at a location that dominates block BB or
+   E->DEST, then register this location as a possible insertion point
+   for ASSERT_EXPR <NAME, EXPR COMP_CODE VAL>.
+
+   BB, E and SI provide the exact insertion point for the new
+   ASSERT_EXPR.  If BB is NULL, then the ASSERT_EXPR is to be inserted
+   on edge E.  Otherwise, if E is NULL, the ASSERT_EXPR is inserted on
+   BB.  If SI points to a COND_EXPR or a SWITCH_EXPR statement, then E
+   must not be NULL.  */
+
+void
+vrp_asserts::register_new_assert_for (tree name, tree expr,
+				      enum tree_code comp_code,
+				      tree val,
+				      basic_block bb,
+				      edge e,
+				      gimple_stmt_iterator si)
+{
+  assert_locus *n, *loc, *last_loc;
+  basic_block dest_bb;
+
+  gcc_checking_assert (bb == NULL || e == NULL);
+
+  if (e == NULL)
+    gcc_checking_assert (gimple_code (gsi_stmt (si)) != GIMPLE_COND
+			 && gimple_code (gsi_stmt (si)) != GIMPLE_SWITCH);
+
+  /* Never build an assert comparing against an integer constant with
+     TREE_OVERFLOW set.  This confuses our undefined overflow warning
+     machinery.  */
+  if (TREE_OVERFLOW_P (val))
+    val = drop_tree_overflow (val);
+
+  /* The new assertion A will be inserted at BB or E.  We need to
+     determine if the new location is dominated by a previously
+     registered location for A.  If we are doing an edge insertion,
+     assume that A will be inserted at E->DEST.  Note that this is not
+     necessarily true.
+
+     If E is a critical edge, it will be split.  But even if E is
+     split, the new block will dominate the same set of blocks that
+     E->DEST dominates.
+
+     The reverse, however, is not true, blocks dominated by E->DEST
+     will not be dominated by the new block created to split E.  So,
+     if the insertion location is on a critical edge, we will not use
+     the new location to move another assertion previously registered
+     at a block dominated by E->DEST.  */
+  dest_bb = (bb) ? bb : e->dest;
+
+  /* If NAME already has an ASSERT_EXPR registered for COMP_CODE and
+     VAL at a block dominating DEST_BB, then we don't need to insert a new
+     one.  Similarly, if the same assertion already exists at a block
+     dominated by DEST_BB and the new location is not on a critical
+     edge, then update the existing location for the assertion (i.e.,
+     move the assertion up in the dominance tree).
+
+     Note, this is implemented as a simple linked list because there
+     should not be more than a handful of assertions registered per
+     name.  If this becomes a performance problem, a table hashed by
+     COMP_CODE and VAL could be implemented.  */
+  loc = asserts_for[SSA_NAME_VERSION (name)];
+  last_loc = loc;
+  while (loc)
+    {
+      if (loc->comp_code == comp_code
+	  && (loc->val == val
+	      || operand_equal_p (loc->val, val, 0))
+	  && (loc->expr == expr
+	      || operand_equal_p (loc->expr, expr, 0)))
+	{
+	  /* If E is not a critical edge and DEST_BB
+	     dominates the existing location for the assertion, move
+	     the assertion up in the dominance tree by updating its
+	     location information.  */
+	  if ((e == NULL || !EDGE_CRITICAL_P (e))
+	      && dominated_by_p (CDI_DOMINATORS, loc->bb, dest_bb))
+	    {
+	      loc->bb = dest_bb;
+	      loc->e = e;
+	      loc->si = si;
+	      return;
+	    }
+	}
+
+      /* Update the last node of the list and move to the next one.  */
+      last_loc = loc;
+      loc = loc->next;
+    }
+
+  /* If we didn't find an assertion already registered for
+     NAME COMP_CODE VAL, add a new one at the end of the list of
+     assertions associated with NAME.  */
+  n = XNEW (struct assert_locus);
+  n->bb = dest_bb;
+  n->e = e;
+  n->si = si;
+  n->comp_code = comp_code;
+  n->val = val;
+  n->expr = expr;
+  n->next = NULL;
+
+  if (last_loc)
+    last_loc->next = n;
+  else
+    asserts_for[SSA_NAME_VERSION (name)] = n;
+
+  bitmap_set_bit (need_assert_for, SSA_NAME_VERSION (name));
+}
+
+/* Finish found ASSERTS for E and register them at GSI.  */
+
+void
+vrp_asserts::finish_register_edge_assert_for (edge e,
+					      gimple_stmt_iterator gsi,
+					      vec<assert_info> &asserts)
+{
+  for (unsigned i = 0; i < asserts.length (); ++i)
+    /* Only register an ASSERT_EXPR if NAME was found in the sub-graph
+       reachable from E.  */
+    if (live.live_on_edge_p (asserts[i].name, e))
+      register_new_assert_for (asserts[i].name, asserts[i].expr,
+			       asserts[i].comp_code, asserts[i].val,
+			       NULL, e, gsi);
+}
+
+/* Determine whether the outgoing edges of BB should receive an
+   ASSERT_EXPR for each of the operands of BB's LAST statement.
+   The last statement of BB must be a COND_EXPR.
+
+   If any of the sub-graphs rooted at BB have an interesting use of
+   the predicate operands, an assert location node is added to the
+   list of assertions for the corresponding operands.  */
+
+void
+vrp_asserts::find_conditional_asserts (basic_block bb, gcond *last)
+{
+  gimple_stmt_iterator bsi;
+  tree op;
+  edge_iterator ei;
+  edge e;
+  ssa_op_iter iter;
+
+  bsi = gsi_for_stmt (last);
+
+  /* Look for uses of the operands in each of the sub-graphs
+     rooted at BB.  We need to check each of the outgoing edges
+     separately, so that we know what kind of ASSERT_EXPR to
+     insert.  */
+  FOR_EACH_EDGE (e, ei, bb->succs)
+    {
+      if (e->dest == bb)
+	continue;
+
+      /* Register the necessary assertions for each operand in the
+	 conditional predicate.  */
+      auto_vec<assert_info, 8> asserts;
+      FOR_EACH_SSA_TREE_OPERAND (op, last, iter, SSA_OP_USE)
+	register_edge_assert_for (op, e,
+				  gimple_cond_code (last),
+				  gimple_cond_lhs (last),
+				  gimple_cond_rhs (last), asserts);
+      finish_register_edge_assert_for (e, bsi, asserts);
+    }
+}
+
+/* Compare two case labels sorting first by the destination bb index
+   and then by the case value.  */
+
+int
+vrp_asserts::compare_case_labels (const void *p1, const void *p2)
+{
+  const struct case_info *ci1 = (const struct case_info *) p1;
+  const struct case_info *ci2 = (const struct case_info *) p2;
+  int idx1 = ci1->bb->index;
+  int idx2 = ci2->bb->index;
+
+  if (idx1 < idx2)
+    return -1;
+  else if (idx1 == idx2)
+    {
+      /* Make sure the default label is first in a group.  */
+      if (!CASE_LOW (ci1->expr))
+	return -1;
+      else if (!CASE_LOW (ci2->expr))
+	return 1;
+      else
+	return tree_int_cst_compare (CASE_LOW (ci1->expr),
+				     CASE_LOW (ci2->expr));
+    }
+  else
     return 1;
 }
 
@@ -2678,7 +2982,7 @@ compare_case_labels (const void *p1, const void *p2)
    list of assertions for the corresponding operands.  */
 
 void
-vrp_insert::find_switch_asserts (basic_block bb, gswitch *last)
+vrp_asserts::find_switch_asserts (basic_block bb, gswitch *last)
 {
   gimple_stmt_iterator bsi;
   tree op;
@@ -2827,7 +3131,6 @@ vrp_insert::find_switch_asserts (basic_block bb, gswitch *last)
     }
 }
 
-
 /* Traverse all the statements in block BB looking for statements that
    may generate useful assertions for the SSA names in their operand.
    If a statement produces a useful assertion A for name N_i, then the
@@ -2888,7 +3191,7 @@ vrp_insert::find_switch_asserts (basic_block bb, gswitch *last)
    P_4 will receive an ASSERT_EXPR.  */
 
 void
-vrp_insert::find_assert_locations_in_bb (basic_block bb)
+vrp_asserts::find_assert_locations_in_bb (basic_block bb)
 {
   gimple *last;
 
@@ -3008,7 +3311,7 @@ vrp_insert::find_assert_locations_in_bb (basic_block bb)
    on-the-fly and deciding on assert expressions to insert.  */
 
 void
-vrp_insert::find_assert_locations (void)
+vrp_asserts::find_assert_locations (void)
 {
   int *rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
   int *bb_rpo = XNEWVEC (int, last_basic_block_for_fn (fun));
@@ -3088,7 +3391,7 @@ vrp_insert::find_assert_locations (void)
    indicated by LOC.  Return true if we made any edge insertions.  */
 
 bool
-vrp_insert::process_assert_insertions_for (tree name, assert_locus *loc)
+vrp_asserts::process_assert_insertions_for (tree name, assert_locus *loc)
 {
   /* Build the comparison expression NAME_i COMP_CODE VAL.  */
   gimple *stmt;
@@ -3153,7 +3456,7 @@ vrp_insert::process_assert_insertions_for (tree name, assert_locus *loc)
 
 template <bool stable>
 int
-vrp_insert::compare_assert_loc (const void *pa, const void *pb)
+vrp_asserts::compare_assert_loc (const void *pa, const void *pb)
 {
   assert_locus * const a = *(assert_locus * const *)pa;
   assert_locus * const b = *(assert_locus * const *)pb;
@@ -3221,7 +3524,7 @@ vrp_insert::compare_assert_loc (const void *pa, const void *pb)
    found in ASSERTS_FOR[i].  */
 
 void
-vrp_insert::process_assert_insertions ()
+vrp_asserts::process_assert_insertions ()
 {
   unsigned i;
   bitmap_iterator bi;
@@ -3314,7 +3617,6 @@ vrp_insert::process_assert_insertions ()
 			    num_asserts);
 }
 
-
 /* Traverse the flowgraph looking for conditional jumps to insert range
    expressions.  These range expressions are meant to provide information
    to optimizations that need to reason in terms of value ranges.  They
@@ -3348,7 +3650,7 @@ vrp_insert::process_assert_insertions ()
    definition of 'x'.  */
 
 void
-vrp_insert::insert_range_assertions (void)
+vrp_asserts::insert_range_assertions (void)
 {
   need_assert_for = BITMAP_ALLOC (NULL);
   asserts_for = XCNEWVEC (assert_locus *, num_ssa_names);
@@ -3372,117 +3674,34 @@ vrp_insert::insert_range_assertions (void)
   BITMAP_FREE (need_assert_for);
 }
 
-class vrp_prop : public ssa_propagation_engine
-{
-public:
-  enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
-  enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
-
-  struct function *fun;
-
-  void vrp_initialize (struct function *);
-  void vrp_finalize (class vrp_folder *, bool);
-
-  class vr_values vr_values;
-
-private:
-  /* Temporary delegator to minimize code churn.  */
-  const value_range_equiv *get_value_range (const_tree op)
-    { return vr_values.get_value_range (op); }
-  void set_def_to_varying (const_tree def)
-    { vr_values.set_def_to_varying (def); }
-  void set_defs_to_varying (gimple *stmt)
-    { vr_values.set_defs_to_varying (stmt); }
-  void extract_range_from_stmt (gimple *stmt, edge *taken_edge_p,
-				tree *output_p, value_range_equiv *vr)
-    { vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, vr); }
-  bool update_value_range (const_tree op, value_range_equiv *vr)
-    { return vr_values.update_value_range (op, vr); }
-  void extract_range_basic (value_range_equiv *vr, gimple *stmt)
-    { vr_values.extract_range_basic (vr, stmt); }
-  void extract_range_from_phi_node (gphi *phi, value_range_equiv *vr)
-    { vr_values.extract_range_from_phi_node (phi, vr); }
-};
-
 /* Return true if all imm uses of VAR are either in STMT, or
    feed (optionally through a chain of single imm uses) GIMPLE_COND
    in basic block COND_BB.  */
-
-static bool
-all_imm_uses_in_stmt_or_feed_cond (tree var, gimple *stmt, basic_block cond_bb)
-{
-  use_operand_p use_p, use2_p;
-  imm_use_iterator iter;
-
-  FOR_EACH_IMM_USE_FAST (use_p, iter, var)
-    if (USE_STMT (use_p) != stmt)
-      {
-	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
-	if (is_gimple_debug (use_stmt))
-	  continue;
-	while (is_gimple_assign (use_stmt)
-	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
-	       && single_imm_use (gimple_assign_lhs (use_stmt),
-				  &use2_p, &use_stmt2))
-	  use_stmt = use_stmt2;
-	if (gimple_code (use_stmt) != GIMPLE_COND
-	    || gimple_bb (use_stmt) != cond_bb)
-	  return false;
-      }
-  return true;
-}
-
-/* Handle
-   _4 = x_3 & 31;
-   if (_4 != 0)
-     goto <bb 6>;
-   else
-     goto <bb 7>;
-   <bb 6>:
-   __builtin_unreachable ();
-   <bb 7>:
-   x_5 = ASSERT_EXPR <x_3, ...>;
-   If x_3 has no other immediate uses (checked by caller),
-   var is the x_3 var from ASSERT_EXPR, we can clear low 5 bits
-   from the non-zero bitmask.  */
-
-void
-maybe_set_nonzero_bits (edge e, tree var)
-{
-  basic_block cond_bb = e->src;
-  gimple *stmt = last_stmt (cond_bb);
-  tree cst;
-
-  if (stmt == NULL
-      || gimple_code (stmt) != GIMPLE_COND
-      || gimple_cond_code (stmt) != ((e->flags & EDGE_TRUE_VALUE)
-				     ? EQ_EXPR : NE_EXPR)
-      || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME
-      || !integer_zerop (gimple_cond_rhs (stmt)))
-    return;
-
-  stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
-  if (!is_gimple_assign (stmt)
-      || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR
-      || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST)
-    return;
-  if (gimple_assign_rhs1 (stmt) != var)
-    {
-      gimple *stmt2;
-
-      if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
-	return;
-      stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
-      if (!gimple_assign_cast_p (stmt2)
-	  || gimple_assign_rhs1 (stmt2) != var
-	  || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2))
-	  || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt)))
-			      != TYPE_PRECISION (TREE_TYPE (var))))
-	return;
-    }
-  cst = gimple_assign_rhs2 (stmt);
-  set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var),
-					  wi::to_wide (cst)));
+
+bool
+vrp_asserts::all_imm_uses_in_stmt_or_feed_cond (tree var,
+						gimple *stmt,
+						basic_block cond_bb)
+{
+  use_operand_p use_p, use2_p;
+  imm_use_iterator iter;
+
+  FOR_EACH_IMM_USE_FAST (use_p, iter, var)
+    if (USE_STMT (use_p) != stmt)
+      {
+	gimple *use_stmt = USE_STMT (use_p), *use_stmt2;
+	if (is_gimple_debug (use_stmt))
+	  continue;
+	while (is_gimple_assign (use_stmt)
+	       && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
+	       && single_imm_use (gimple_assign_lhs (use_stmt),
+				  &use2_p, &use_stmt2))
+	  use_stmt = use_stmt2;
+	if (gimple_code (use_stmt) != GIMPLE_COND
+	    || gimple_bb (use_stmt) != cond_bb)
+	  return false;
+      }
+  return true;
 }
 
 /* Convert range assertion expressions into the implied copies and
@@ -3510,7 +3729,7 @@ maybe_set_nonzero_bits (edge e, tree var)
    multiple ranges to be associated with one SSA_NAME.  */
 
 void
-vrp_insert::remove_range_assertions ()
+vrp_asserts::remove_range_assertions ()
 {
   basic_block bb;
   gimple_stmt_iterator si;
@@ -3595,270 +3814,163 @@ vrp_insert::remove_range_assertions ()
       }
 }
 
-/* Return true if STMT is interesting for VRP.  */
-
-bool
-stmt_interesting_for_vrp (gimple *stmt)
-{
-  if (gimple_code (stmt) == GIMPLE_PHI)
-    {
-      tree res = gimple_phi_result (stmt);
-      return (!virtual_operand_p (res)
-	      && (INTEGRAL_TYPE_P (TREE_TYPE (res))
-		  || POINTER_TYPE_P (TREE_TYPE (res))));
-    }
-  else if (is_gimple_assign (stmt) || is_gimple_call (stmt))
-    {
-      tree lhs = gimple_get_lhs (stmt);
-
-      /* In general, assignments with virtual operands are not useful
-	 for deriving ranges, with the obvious exception of calls to
-	 builtin functions.  */
-      if (lhs && TREE_CODE (lhs) == SSA_NAME
-	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
-	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
-	  && (is_gimple_call (stmt)
-	      || !gimple_vuse (stmt)))
-	return true;
-      else if (is_gimple_call (stmt) && gimple_call_internal_p (stmt))
-	switch (gimple_call_internal_fn (stmt))
-	  {
-	  case IFN_ADD_OVERFLOW:
-	  case IFN_SUB_OVERFLOW:
-	  case IFN_MUL_OVERFLOW:
-	  case IFN_ATOMIC_COMPARE_EXCHANGE:
-	    /* These internal calls return _Complex integer type,
-	       but are interesting to VRP nevertheless.  */
-	    if (lhs && TREE_CODE (lhs) == SSA_NAME)
-	      return true;
-	    break;
-	  default:
-	    break;
-	  }
-    }
-  else if (gimple_code (stmt) == GIMPLE_COND
-	   || gimple_code (stmt) == GIMPLE_SWITCH)
-    return true;
-
-  return false;
-}
-
-/* Initialization required by ssa_propagate engine.  */
-
-void
-vrp_prop::vrp_initialize (struct function *fn)
+class vrp_folder : public substitute_and_fold_engine
 {
-  basic_block bb;
-  fun = fn;
+ public:
+  vrp_folder (vr_values *v)
+    : substitute_and_fold_engine (/* Fold all stmts.  */ true),
+      m_vr_values (v), simplifier (v)
+    {  }
+  bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
 
-  FOR_EACH_BB_FN (bb, fun)
+  tree value_of_expr (tree name, gimple *stmt) OVERRIDE
     {
-      for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
-	   gsi_next (&si))
-	{
-	  gphi *phi = si.phi ();
-	  if (!stmt_interesting_for_vrp (phi))
-	    {
-	      tree lhs = PHI_RESULT (phi);
-	      set_def_to_varying (lhs);
-	      prop_set_simulate_again (phi, false);
-	    }
-	  else
-	    prop_set_simulate_again (phi, true);
-	}
-
-      for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
-	   gsi_next (&si))
-        {
-	  gimple *stmt = gsi_stmt (si);
-
- 	  /* If the statement is a control insn, then we do not
- 	     want to avoid simulating the statement once.  Failure
- 	     to do so means that those edges will never get added.  */
-	  if (stmt_ends_bb_p (stmt))
-	    prop_set_simulate_again (stmt, true);
-	  else if (!stmt_interesting_for_vrp (stmt))
-	    {
-	      set_defs_to_varying (stmt);
-	      prop_set_simulate_again (stmt, false);
-	    }
-	  else
-	    prop_set_simulate_again (stmt, true);
-	}
+      return m_vr_values->value_of_expr (name, stmt);
     }
-}
-
-/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
-   that includes the value VAL.  The search is restricted to the range
-   [START_IDX, n - 1] where n is the size of VEC.
-
-   If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
-   returned.
-
-   If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
-   it is placed in IDX and false is returned.
-
-   If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
-   returned. */
-
-bool
-find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
-{
-  size_t n = gimple_switch_num_labels (stmt);
-  size_t low, high;
-
-  /* Find case label for minimum of the value range or the next one.
-     At each iteration we are searching in [low, high - 1]. */
-
-  for (low = start_idx, high = n; high != low; )
-    {
-      tree t;
-      int cmp;
-      /* Note that i != high, so we never ask for n. */
-      size_t i = (high + low) / 2;
-      t = gimple_switch_label (stmt, i);
-
-      /* Cache the result of comparing CASE_LOW and val.  */
-      cmp = tree_int_cst_compare (CASE_LOW (t), val);
+  class vr_values *m_vr_values;
 
-      if (cmp == 0)
-	{
-	  /* Ranges cannot be empty. */
-	  *idx = i;
-	  return true;
-	}
-      else if (cmp > 0)
-        high = i;
-      else
-	{
-	  low = i + 1;
-	  if (CASE_HIGH (t) != NULL
-	      && tree_int_cst_compare (CASE_HIGH (t), val) >= 0)
-	    {
-	      *idx = i;
-	      return true;
-	    }
-        }
-    }
+private:
+  bool fold_predicate_in (gimple_stmt_iterator *);
+  /* Delegators.  */
+  tree vrp_evaluate_conditional (tree_code code, tree op0,
+				 tree op1, gimple *stmt)
+    { return simplifier.vrp_evaluate_conditional (code, op0, op1, stmt); }
+  bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
+    { return simplifier.simplify (gsi); }
 
-  *idx = high;
-  return false;
-}
+  simplify_using_ranges simplifier;
+};
 
-/* Searches the case label vector VEC for the range of CASE_LABELs that is used
-   for values between MIN and MAX. The first index is placed in MIN_IDX. The
-   last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
-   then MAX_IDX < MIN_IDX.
-   Returns true if the default label is not needed. */
+/* If the statement pointed by SI has a predicate whose value can be
+   computed using the value range information computed by VRP, compute
+   its value and return true.  Otherwise, return false.  */
 
 bool
-find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
-		       size_t *max_idx)
+vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
 {
-  size_t i, j;
-  bool min_take_default = !find_case_label_index (stmt, 1, min, &i);
-  bool max_take_default = !find_case_label_index (stmt, i, max, &j);
+  bool assignment_p = false;
+  tree val;
+  gimple *stmt = gsi_stmt (*si);
 
-  if (i == j
-      && min_take_default
-      && max_take_default)
+  if (is_gimple_assign (stmt)
+      && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
     {
-      /* Only the default case label reached.
-         Return an empty range. */
-      *min_idx = 1;
-      *max_idx = 0;
-      return false;
+      assignment_p = true;
+      val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
+				      gimple_assign_rhs1 (stmt),
+				      gimple_assign_rhs2 (stmt),
+				      stmt);
     }
+  else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
+    val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
+				    gimple_cond_lhs (cond_stmt),
+				    gimple_cond_rhs (cond_stmt),
+				    stmt);
   else
+    return false;
+
+  if (val)
     {
-      bool take_default = min_take_default || max_take_default;
-      tree low, high;
-      size_t k;
+      if (assignment_p)
+        val = fold_convert (gimple_expr_type (stmt), val);
 
-      if (max_take_default)
-	j--;
+      if (dump_file)
+	{
+	  fprintf (dump_file, "Folding predicate ");
+	  print_gimple_expr (dump_file, stmt, 0);
+	  fprintf (dump_file, " to ");
+	  print_generic_expr (dump_file, val);
+	  fprintf (dump_file, "\n");
+	}
 
-      /* If the case label range is continuous, we do not need
-	 the default case label.  Verify that.  */
-      high = CASE_LOW (gimple_switch_label (stmt, i));
-      if (CASE_HIGH (gimple_switch_label (stmt, i)))
-	high = CASE_HIGH (gimple_switch_label (stmt, i));
-      for (k = i + 1; k <= j; ++k)
+      if (is_gimple_assign (stmt))
+	gimple_assign_set_rhs_from_tree (si, val);
+      else
 	{
-	  low = CASE_LOW (gimple_switch_label (stmt, k));
-	  if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
-	    {
-	      take_default = true;
-	      break;
-	    }
-	  high = low;
-	  if (CASE_HIGH (gimple_switch_label (stmt, k)))
-	    high = CASE_HIGH (gimple_switch_label (stmt, k));
+	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
+	  gcond *cond_stmt = as_a <gcond *> (stmt);
+	  if (integer_zerop (val))
+	    gimple_cond_make_false (cond_stmt);
+	  else if (integer_onep (val))
+	    gimple_cond_make_true (cond_stmt);
+	  else
+	    gcc_unreachable ();
 	}
 
-      *min_idx = i;
-      *max_idx = j;
-      return !take_default;
+      return true;
     }
+
+  return false;
 }
 
-/* Given a SWITCH_STMT, return the case label that encompasses the
-   known possible values for the switch operand.  RANGE_OF_OP is a
-   range for the known values of the switch operand.  */
+/* Callback for substitute_and_fold folding the stmt at *SI.  */
 
-tree
-find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
+bool
+vrp_folder::fold_stmt (gimple_stmt_iterator *si)
 {
-  if (range_of_op->undefined_p ()
-      || range_of_op->varying_p ()
-      || range_of_op->symbolic_p ())
-    return NULL_TREE;
+  if (fold_predicate_in (si))
+    return true;
 
-  size_t i, j;
-  tree op = gimple_switch_index (switch_stmt);
-  tree type = TREE_TYPE (op);
-  tree tmin = wide_int_to_tree (type, range_of_op->lower_bound ());
-  tree tmax = wide_int_to_tree (type, range_of_op->upper_bound ());
-  find_case_label_range (switch_stmt, tmin, tmax, &i, &j);
-  if (i == j)
-    {
-      /* Look for exactly one label that encompasses the range of
-	 the operand.  */
-      tree label = gimple_switch_label (switch_stmt, i);
-      tree case_high
-	= CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
-      int_range_max label_range (CASE_LOW (label), case_high);
-      if (!types_compatible_p (label_range.type (), range_of_op->type ()))
-	range_cast (label_range, range_of_op->type ());
-      label_range.intersect (range_of_op);
-      if (label_range == *range_of_op)
-	return label;
-    }
-  else if (i > j)
-    {
-      /* If there are no labels at all, take the default.  */
-      return gimple_switch_label (switch_stmt, 0);
-    }
-  else
+  return simplify_stmt_using_ranges (si);
+}
+
+class vrp_prop : public ssa_propagation_engine
+{
+public:
+  enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
+  enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
+
+  struct function *fun;
+
+  void initialize (struct function *);
+  void finalize ();
+
+  class vr_values vr_values;
+};
+
+/* Initialization required by ssa_propagate engine.  */
+
+void
+vrp_prop::initialize (struct function *fn)
+{
+  basic_block bb;
+  fun = fn;
+
+  FOR_EACH_BB_FN (bb, fun)
     {
-      /* Otherwise, there are various labels that can encompass
-	 the range of operand.  In which case, see if the range of
-	 the operand is entirely *outside* the bounds of all the
-	 (non-default) case labels.  If so, take the default.  */
-      unsigned n = gimple_switch_num_labels (switch_stmt);
-      tree min_label = gimple_switch_label (switch_stmt, 1);
-      tree max_label = gimple_switch_label (switch_stmt, n - 1);
-      tree case_high = CASE_HIGH (max_label);
-      if (!case_high)
-	case_high = CASE_LOW (max_label);
-      int_range_max label_range (CASE_LOW (min_label), case_high);
-      if (!types_compatible_p (label_range.type (), range_of_op->type ()))
-	range_cast (label_range, range_of_op->type ());
-      label_range.intersect (range_of_op);
-      if (label_range.undefined_p ())
-	return gimple_switch_label (switch_stmt, 0);
+      for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
+	   gsi_next (&si))
+	{
+	  gphi *phi = si.phi ();
+	  if (!stmt_interesting_for_vrp (phi))
+	    {
+	      tree lhs = PHI_RESULT (phi);
+	      vr_values.set_def_to_varying (lhs);
+	      prop_set_simulate_again (phi, false);
+	    }
+	  else
+	    prop_set_simulate_again (phi, true);
+	}
+
+      for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
+	   gsi_next (&si))
+        {
+	  gimple *stmt = gsi_stmt (si);
+
+ 	  /* If the statement is a control insn, then we do not
+ 	     want to avoid simulating the statement once.  Failure
+ 	     to do so means that those edges will never get added.  */
+	  if (stmt_ends_bb_p (stmt))
+	    prop_set_simulate_again (stmt, true);
+	  else if (!stmt_interesting_for_vrp (stmt))
+	    {
+	      vr_values.set_defs_to_varying (stmt);
+	      prop_set_simulate_again (stmt, false);
+	    }
+	  else
+	    prop_set_simulate_again (stmt, true);
+	}
     }
-  return NULL_TREE;
 }
 
 /* Evaluate statement STMT.  If the statement produces a useful range,
@@ -3875,11 +3987,11 @@ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
 {
   tree lhs = gimple_get_lhs (stmt);
   value_range_equiv vr;
-  extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
+  vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
 
   if (*output_p)
     {
-      if (update_value_range (*output_p, &vr))
+      if (vr_values.update_value_range (*output_p, &vr))
 	{
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    {
@@ -3914,7 +4026,7 @@ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
 	    use_operand_p use_p;
 	    enum ssa_prop_result res = SSA_PROP_VARYING;
 
-	    set_def_to_varying (lhs);
+	    vr_values.set_def_to_varying (lhs);
 
 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
 	      {
@@ -3944,8 +4056,9 @@ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
 		   {REAL,IMAG}PART_EXPR uses at all,
 		   return SSA_PROP_VARYING.  */
 		value_range_equiv new_vr;
-		extract_range_basic (&new_vr, use_stmt);
-		const value_range_equiv *old_vr = get_value_range (use_lhs);
+		vr_values.extract_range_basic (&new_vr, use_stmt);
+		const value_range_equiv *old_vr
+		  = vr_values.get_value_range (use_lhs);
 		if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
 		  res = SSA_PROP_INTERESTING;
 		else
@@ -3963,248 +4076,88 @@ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
 	break;
       default:
 	break;
-      }
-
-  /* All other statements produce nothing of interest for VRP, so mark
-     their outputs varying and prevent further simulation.  */
-  set_defs_to_varying (stmt);
-
-  return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
-}
-
-/* Visit all arguments for PHI node PHI that flow through executable
-   edges.  If a valid value range can be derived from all the incoming
-   value ranges, set a new range for the LHS of PHI.  */
-
-enum ssa_prop_result
-vrp_prop::visit_phi (gphi *phi)
-{
-  tree lhs = PHI_RESULT (phi);
-  value_range_equiv vr_result;
-  extract_range_from_phi_node (phi, &vr_result);
-  if (update_value_range (lhs, &vr_result))
-    {
-      if (dump_file && (dump_flags & TDF_DETAILS))
-	{
-	  fprintf (dump_file, "Found new range for ");
-	  print_generic_expr (dump_file, lhs);
-	  fprintf (dump_file, ": ");
-	  dump_value_range (dump_file, &vr_result);
-	  fprintf (dump_file, "\n");
-	}
-
-      if (vr_result.varying_p ())
-	return SSA_PROP_VARYING;
-
-      return SSA_PROP_INTERESTING;
-    }
-
-  /* Nothing changed, don't add outgoing edges.  */
-  return SSA_PROP_NOT_INTERESTING;
-}
-
-class vrp_folder : public substitute_and_fold_engine
-{
- public:
-  vrp_folder (vr_values *v)
-    : substitute_and_fold_engine (/* Fold all stmts.  */ true),
-      m_vr_values (v), simplifier (v)
-    {  }
-  bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
-
-  tree value_of_expr (tree name, gimple *stmt) OVERRIDE
-    {
-      return m_vr_values->value_of_expr (name, stmt);
-    }
-  class vr_values *m_vr_values;
-
-private:
-  bool fold_predicate_in (gimple_stmt_iterator *);
-  /* Delegators.  */
-  tree vrp_evaluate_conditional (tree_code code, tree op0,
-				 tree op1, gimple *stmt)
-    { return simplifier.vrp_evaluate_conditional (code, op0, op1, stmt); }
-  bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
-    { return simplifier.simplify (gsi); }
-
-  simplify_using_ranges simplifier;
-};
-
-/* If the statement pointed by SI has a predicate whose value can be
-   computed using the value range information computed by VRP, compute
-   its value and return true.  Otherwise, return false.  */
-
-bool
-vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
-{
-  bool assignment_p = false;
-  tree val;
-  gimple *stmt = gsi_stmt (*si);
-
-  if (is_gimple_assign (stmt)
-      && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
-    {
-      assignment_p = true;
-      val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
-				      gimple_assign_rhs1 (stmt),
-				      gimple_assign_rhs2 (stmt),
-				      stmt);
-    }
-  else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
-    val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
-				    gimple_cond_lhs (cond_stmt),
-				    gimple_cond_rhs (cond_stmt),
-				    stmt);
-  else
-    return false;
-
-  if (val)
-    {
-      if (assignment_p)
-        val = fold_convert (gimple_expr_type (stmt), val);
-
-      if (dump_file)
-	{
-	  fprintf (dump_file, "Folding predicate ");
-	  print_gimple_expr (dump_file, stmt, 0);
-	  fprintf (dump_file, " to ");
-	  print_generic_expr (dump_file, val);
-	  fprintf (dump_file, "\n");
-	}
-
-      if (is_gimple_assign (stmt))
-	gimple_assign_set_rhs_from_tree (si, val);
-      else
-	{
-	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
-	  gcond *cond_stmt = as_a <gcond *> (stmt);
-	  if (integer_zerop (val))
-	    gimple_cond_make_false (cond_stmt);
-	  else if (integer_onep (val))
-	    gimple_cond_make_true (cond_stmt);
-	  else
-	    gcc_unreachable ();
-	}
-
-      return true;
-    }
-
-  return false;
-}
-
-/* Callback for substitute_and_fold folding the stmt at *SI.  */
-
-bool
-vrp_folder::fold_stmt (gimple_stmt_iterator *si)
-{
-  if (fold_predicate_in (si))
-    return true;
+      }
 
-  return simplify_stmt_using_ranges (si);
+  /* All other statements produce nothing of interest for VRP, so mark
+     their outputs varying and prevent further simulation.  */
+  vr_values.set_defs_to_varying (stmt);
+
+  return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
 }
 
-/* Return the LHS of any ASSERT_EXPR where OP appears as the first
-   argument to the ASSERT_EXPR and in which the ASSERT_EXPR dominates
-   BB.  If no such ASSERT_EXPR is found, return OP.  */
+/* Visit all arguments for PHI node PHI that flow through executable
+   edges.  If a valid value range can be derived from all the incoming
+   value ranges, set a new range for the LHS of PHI.  */
 
-static tree
-lhs_of_dominating_assert (tree op, basic_block bb, gimple *stmt)
+enum ssa_prop_result
+vrp_prop::visit_phi (gphi *phi)
 {
-  imm_use_iterator imm_iter;
-  gimple *use_stmt;
-  use_operand_p use_p;
-
-  if (TREE_CODE (op) == SSA_NAME)
+  tree lhs = PHI_RESULT (phi);
+  value_range_equiv vr_result;
+  vr_values.extract_range_from_phi_node (phi, &vr_result);
+  if (vr_values.update_value_range (lhs, &vr_result))
     {
-      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
+      if (dump_file && (dump_flags & TDF_DETAILS))
 	{
-	  use_stmt = USE_STMT (use_p);
-	  if (use_stmt != stmt
-	      && gimple_assign_single_p (use_stmt)
-	      && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ASSERT_EXPR
-	      && TREE_OPERAND (gimple_assign_rhs1 (use_stmt), 0) == op
-	      && dominated_by_p (CDI_DOMINATORS, bb, gimple_bb (use_stmt)))
-	    return gimple_assign_lhs (use_stmt);
+	  fprintf (dump_file, "Found new range for ");
+	  print_generic_expr (dump_file, lhs);
+	  fprintf (dump_file, ": ");
+	  dump_value_range (dump_file, &vr_result);
+	  fprintf (dump_file, "\n");
 	}
-    }
-  return op;
-}
 
-/* A hack.  */
-static class vr_values *x_vr_values;
+      if (vr_result.varying_p ())
+	return SSA_PROP_VARYING;
 
-/* A trivial wrapper so that we can present the generic jump threading
-   code with a simple API for simplifying statements.  STMT is the
-   statement we want to simplify, WITHIN_STMT provides the location
-   for any overflow warnings.
+      return SSA_PROP_INTERESTING;
+    }
 
-   ?? This should be cleaned up.  There's a virtually identical copy
-   of this function in tree-ssa-dom.c.  */
+  /* Nothing changed, don't add outgoing edges.  */
+  return SSA_PROP_NOT_INTERESTING;
+}
 
-static tree
-simplify_stmt_for_jump_threading (gimple *stmt, gimple *within_stmt,
-    class avail_exprs_stack *avail_exprs_stack ATTRIBUTE_UNUSED,
-    basic_block bb)
-{
-  /* First see if the conditional is in the hash table.  */
-  tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
-  if (cached_lhs && is_gimple_min_invariant (cached_lhs))
-    return cached_lhs;
+/* Traverse all the blocks folding conditionals with known ranges.  */
 
-  vr_values *vr_values = x_vr_values;
-  if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
-    {
-      tree op0 = gimple_cond_lhs (cond_stmt);
-      op0 = lhs_of_dominating_assert (op0, bb, stmt);
+void
+vrp_prop::finalize ()
+{
+  size_t i;
 
-      tree op1 = gimple_cond_rhs (cond_stmt);
-      op1 = lhs_of_dominating_assert (op1, bb, stmt);
+  /* We have completed propagating through the lattice.  */
+  vr_values.set_lattice_propagation_complete ();
 
-      simplify_using_ranges simplifier (vr_values);
-      return simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
-						  op0, op1, within_stmt);
+  if (dump_file)
+    {
+      fprintf (dump_file, "\nValue ranges after VRP:\n\n");
+      vr_values.dump_all_value_ranges (dump_file);
+      fprintf (dump_file, "\n");
     }
 
-  if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
+  /* Set value range to non pointer SSA_NAMEs.  */
+  for (i = 0; i < num_ssa_names; i++)
     {
-      tree op = gimple_switch_index (switch_stmt);
-      if (TREE_CODE (op) != SSA_NAME)
-	return NULL_TREE;
-
-      op = lhs_of_dominating_assert (op, bb, stmt);
+      tree name = ssa_name (i);
+      if (!name)
+	continue;
 
-      const value_range_equiv *vr = vr_values->get_value_range (op);
-      return find_case_label_range (switch_stmt, vr);
-    }
+      const value_range_equiv *vr = vr_values.get_value_range (name);
+      if (!name || !vr->constant_p ())
+	continue;
 
-  if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
-    {
-      tree lhs = gimple_assign_lhs (assign_stmt);
-      if (TREE_CODE (lhs) == SSA_NAME
-	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
-	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
-	  && stmt_interesting_for_vrp (stmt))
-	{
-	  edge dummy_e;
-	  tree dummy_tree;
-	  value_range_equiv new_vr;
-	  vr_values->extract_range_from_stmt (stmt, &dummy_e,
-					      &dummy_tree, &new_vr);
-	  tree singleton;
-	  if (new_vr.singleton_p (&singleton))
-	    return singleton;
-	}
+      if (POINTER_TYPE_P (TREE_TYPE (name))
+	  && range_includes_zero_p (vr) == 0)
+	set_ptr_nonnull (name);
+      else if (!POINTER_TYPE_P (TREE_TYPE (name)))
+	set_range_info (name, *vr);
     }
-
-  return NULL_TREE;
 }
 
-class vrp_dom_walker : public dom_walker
+class vrp_jump_threader : public dom_walker
 {
 public:
-  vrp_dom_walker (cdi_direction direction,
-		  class const_and_copies *const_and_copies,
-		  class avail_exprs_stack *avail_exprs_stack)
+  vrp_jump_threader (cdi_direction direction,
+		     class const_and_copies *const_and_copies,
+		     class avail_exprs_stack *avail_exprs_stack)
     : dom_walker (direction, REACHABLE_BLOCKS),
       m_const_and_copies (const_and_copies),
       m_avail_exprs_stack (avail_exprs_stack),
@@ -4216,11 +4169,13 @@ public:
   class vr_values *vr_values;
 
 private:
+  static tree simplify_stmt (gimple *stmt, gimple *within_stmt,
+			     avail_exprs_stack *, basic_block);
+
   class const_and_copies *m_const_and_copies;
   class avail_exprs_stack *m_avail_exprs_stack;
 
   gcond *m_dummy_cond;
-
 };
 
 /* Called before processing dominator children of BB.  We want to look
@@ -4231,7 +4186,7 @@ private:
    to significantly increase the jump threads we discover.  */
 
 edge
-vrp_dom_walker::before_dom_children (basic_block bb)
+vrp_jump_threader::before_dom_children (basic_block bb)
 {
   gimple_stmt_iterator gsi;
 
@@ -4263,10 +4218,77 @@ vrp_dom_walker::before_dom_children (basic_block bb)
   return NULL;
 }
 
+/* A trivial wrapper so that we can present the generic jump threading
+   code with a simple API for simplifying statements.  STMT is the
+   statement we want to simplify, WITHIN_STMT provides the location
+   for any overflow warnings.
+
+   ?? This should be cleaned up.  There's a virtually identical copy
+   of this function in tree-ssa-dom.c.  */
+
+tree
+vrp_jump_threader::simplify_stmt (gimple *stmt,
+				  gimple *within_stmt,
+				  avail_exprs_stack *avail_exprs_stack,
+				  basic_block bb)
+{
+  /* First see if the conditional is in the hash table.  */
+  tree cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, false, true);
+  if (cached_lhs && is_gimple_min_invariant (cached_lhs))
+    return cached_lhs;
+
+  class vr_values *vr_values = x_vr_values;
+  if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
+    {
+      tree op0 = gimple_cond_lhs (cond_stmt);
+      op0 = lhs_of_dominating_assert (op0, bb, stmt);
+
+      tree op1 = gimple_cond_rhs (cond_stmt);
+      op1 = lhs_of_dominating_assert (op1, bb, stmt);
+
+      simplify_using_ranges simplifier (vr_values);
+      return simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
+						  op0, op1, within_stmt);
+    }
+
+  if (gswitch *switch_stmt = dyn_cast <gswitch *> (stmt))
+    {
+      tree op = gimple_switch_index (switch_stmt);
+      if (TREE_CODE (op) != SSA_NAME)
+	return NULL_TREE;
+
+      op = lhs_of_dominating_assert (op, bb, stmt);
+
+      const value_range_equiv *vr = vr_values->get_value_range (op);
+      return find_case_label_range (switch_stmt, vr);
+    }
+
+  if (gassign *assign_stmt = dyn_cast <gassign *> (stmt))
+    {
+      tree lhs = gimple_assign_lhs (assign_stmt);
+      if (TREE_CODE (lhs) == SSA_NAME
+	  && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+	      || POINTER_TYPE_P (TREE_TYPE (lhs)))
+	  && stmt_interesting_for_vrp (stmt))
+	{
+	  edge dummy_e;
+	  tree dummy_tree;
+	  value_range_equiv new_vr;
+	  vr_values->extract_range_from_stmt (stmt, &dummy_e,
+					      &dummy_tree, &new_vr);
+	  tree singleton;
+	  if (new_vr.singleton_p (&singleton))
+	    return singleton;
+	}
+    }
+
+  return NULL_TREE;
+}
+
 /* Called after processing dominator children of BB.  This is where we
    actually call into the threader.  */
 void
-vrp_dom_walker::after_dom_children (basic_block bb)
+vrp_jump_threader::after_dom_children (basic_block bb)
 {
   if (!m_dummy_cond)
     m_dummy_cond = gimple_build_cond (NE_EXPR,
@@ -4276,7 +4298,7 @@ vrp_dom_walker::after_dom_children (basic_block bb)
   x_vr_values = vr_values;
   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
 			 m_avail_exprs_stack, NULL,
-			 simplify_stmt_for_jump_threading);
+			 simplify_stmt);
   x_vr_values = NULL;
 
   m_avail_exprs_stack->pop_to_marker ();
@@ -4327,7 +4349,7 @@ identify_jump_threads (struct function *fun, class vr_values *vr_values)
   avail_exprs_stack *avail_exprs_stack
     = new class avail_exprs_stack (avail_exprs);
 
-  vrp_dom_walker walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
+  vrp_jump_threader walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
   walker.vr_values = vr_values;
   walker.walk (fun->cfg->x_entry_block_ptr);
 
@@ -4339,62 +4361,6 @@ identify_jump_threads (struct function *fun, class vr_values *vr_values)
   delete avail_exprs_stack;
 }
 
-/* Traverse all the blocks folding conditionals with known ranges.  */
-
-void
-vrp_prop::vrp_finalize (vrp_folder *folder, bool warn_array_bounds_p)
-{
-  size_t i;
-
-  /* We have completed propagating through the lattice.  */
-  vr_values.set_lattice_propagation_complete ();
-
-  if (dump_file)
-    {
-      fprintf (dump_file, "\nValue ranges after VRP:\n\n");
-      vr_values.dump_all_value_ranges (dump_file);
-      fprintf (dump_file, "\n");
-    }
-
-  /* Set value range to non pointer SSA_NAMEs.  */
-  for (i = 0; i < num_ssa_names; i++)
-    {
-      tree name = ssa_name (i);
-      if (!name)
-	continue;
-
-      const value_range_equiv *vr = get_value_range (name);
-      if (!name || !vr->constant_p ())
-	continue;
-
-      if (POINTER_TYPE_P (TREE_TYPE (name))
-	  && range_includes_zero_p (vr) == 0)
-	set_ptr_nonnull (name);
-      else if (!POINTER_TYPE_P (TREE_TYPE (name)))
-	set_range_info (name, *vr);
-    }
-
-  /* If we're checking array refs, we want to merge information on
-     the executability of each edge between vrp_folder and the
-     check_array_bounds_dom_walker: each can clear the
-     EDGE_EXECUTABLE flag on edges, in different ways.
-
-     Hence, if we're going to call check_all_array_refs, set
-     the flag on every edge now, rather than in
-     check_array_bounds_dom_walker's ctor; vrp_folder may clear
-     it from some edges.  */
-  if (warn_array_bounds && warn_array_bounds_p)
-    set_all_edges_as_executable (fun);
-
-  folder->substitute_and_fold ();
-
-  if (warn_array_bounds && warn_array_bounds_p)
-    {
-      array_bounds_checker array_checker (fun, &vr_values);
-      array_checker.check ();
-    }
-}
-
 /* STMT is a conditional at the end of a basic block.
 
    If the conditional is of the form SSA_NAME op constant and the SSA_NAME
@@ -4511,7 +4477,7 @@ execute_vrp (struct function *fun, bool warn_array_bounds_p)
   /* ???  This ends up using stale EDGE_DFS_BACK for liveness computation.
      Inserting assertions may split edges which will invalidate
      EDGE_DFS_BACK.  */
-  vrp_insert assert_engine (fun);
+  vrp_asserts assert_engine (fun);
   assert_engine.insert_range_assertions ();
 
   threadedge_initialize_values ();
@@ -4520,12 +4486,33 @@ execute_vrp (struct function *fun, bool warn_array_bounds_p)
   mark_dfs_back_edges ();
 
   class vrp_prop vrp_prop;
-  vrp_prop.vrp_initialize (fun);
+  vrp_prop.initialize (fun);
   vrp_prop.ssa_propagate ();
+
   /* Instantiate the folder here, so that edge cleanups happen at the
      end of this function.  */
   vrp_folder folder (&vrp_prop.vr_values);
-  vrp_prop.vrp_finalize (&folder, warn_array_bounds_p);
+  vrp_prop.finalize ();
+
+  /* If we're checking array refs, we want to merge information on
+     the executability of each edge between vrp_folder and the
+     check_array_bounds_dom_walker: each can clear the
+     EDGE_EXECUTABLE flag on edges, in different ways.
+
+     Hence, if we're going to call check_all_array_refs, set
+     the flag on every edge now, rather than in
+     check_array_bounds_dom_walker's ctor; vrp_folder may clear
+     it from some edges.  */
+  if (warn_array_bounds && warn_array_bounds_p)
+    set_all_edges_as_executable (fun);
+
+  folder.substitute_and_fold ();
+
+  if (warn_array_bounds && warn_array_bounds_p)
+    {
+      array_bounds_checker array_checker (fun, &vrp_prop.vr_values);
+      array_checker.check ();
+    }
 
   /* We must identify jump threading opportunities before we release
      the datastructures built by VRP.  */
-- 
2.26.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 2/5] Refactor VRP threading code into vrp_jump_threader class.
  2020-11-12 15:15 [PATCH 1/5] Group tree-vrp.c by functionality Aldy Hernandez
@ 2020-11-12 15:15 ` Aldy Hernandez
  2020-11-12 15:15 ` [PATCH 3/5] Move vrp_prop before vrp_folder Aldy Hernandez
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Aldy Hernandez @ 2020-11-12 15:15 UTC (permalink / raw)
  To: GCC patches

Will push pending aarch64 tests.

gcc/ChangeLog:

	* tree-vrp.c (identify_jump_threads): Refactor to..
	(vrp_jump_threader::vrp_jump_threader): ...here
	(vrp_jump_threader::~vrp_jump_threader): ...and here.
	(vrp_jump_threader::after_dom_children): Rename vr_values to
	m_vr_values.
	(execute_vrp): Use vrp_jump_threader.
---
 gcc/tree-vrp.c | 144 ++++++++++++++++++++++++-------------------------
 1 file changed, 72 insertions(+), 72 deletions(-)

diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index d3816ab569e..6b77c357a8f 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -4152,32 +4152,87 @@ vrp_prop::finalize ()
     }
 }
 
+/* Blocks which have more than one predecessor and more than
+   one successor present jump threading opportunities, i.e.,
+   when the block is reached from a specific predecessor, we
+   may be able to determine which of the outgoing edges will
+   be traversed.  When this optimization applies, we are able
+   to avoid conditionals at runtime and we may expose secondary
+   optimization opportunities.
+
+   This class is effectively a driver for the generic jump
+   threading code.  It basically just presents the generic code
+   with edges that may be suitable for jump threading.
+
+   Unlike DOM, we do not iterate VRP if jump threading was successful.
+   While iterating may expose new opportunities for VRP, it is expected
+   those opportunities would be very limited and the compile time cost
+   to expose those opportunities would be significant.
+
+   As jump threading opportunities are discovered, they are registered
+   for later realization.  */
+
 class vrp_jump_threader : public dom_walker
 {
 public:
-  vrp_jump_threader (cdi_direction direction,
-		     class const_and_copies *const_and_copies,
-		     class avail_exprs_stack *avail_exprs_stack)
-    : dom_walker (direction, REACHABLE_BLOCKS),
-      m_const_and_copies (const_and_copies),
-      m_avail_exprs_stack (avail_exprs_stack),
-      m_dummy_cond (NULL) {}
-
-  virtual edge before_dom_children (basic_block);
-  virtual void after_dom_children (basic_block);
+  vrp_jump_threader (struct function *, vr_values *);
+  ~vrp_jump_threader ();
 
-  class vr_values *vr_values;
+  void thread_jumps ()
+  {
+    walk (m_fun->cfg->x_entry_block_ptr);
+  }
 
 private:
   static tree simplify_stmt (gimple *stmt, gimple *within_stmt,
 			     avail_exprs_stack *, basic_block);
+  virtual edge before_dom_children (basic_block);
+  virtual void after_dom_children (basic_block);
 
-  class const_and_copies *m_const_and_copies;
-  class avail_exprs_stack *m_avail_exprs_stack;
-
+  function *m_fun;
+  vr_values *m_vr_values;
+  const_and_copies *m_const_and_copies;
+  avail_exprs_stack *m_avail_exprs_stack;
+  hash_table<expr_elt_hasher> *m_avail_exprs;
   gcond *m_dummy_cond;
 };
 
+vrp_jump_threader::vrp_jump_threader (struct function *fun, vr_values *v)
+  : dom_walker (CDI_DOMINATORS, REACHABLE_BLOCKS)
+{
+  /* Ugh.  When substituting values earlier in this pass we can wipe
+     the dominance information.  So rebuild the dominator information
+     as we need it within the jump threading code.  */
+  calculate_dominance_info (CDI_DOMINATORS);
+
+  /* We do not allow VRP information to be used for jump threading
+     across a back edge in the CFG.  Otherwise it becomes too
+     difficult to avoid eliminating loop exit tests.  Of course
+     EDGE_DFS_BACK is not accurate at this time so we have to
+     recompute it.  */
+  mark_dfs_back_edges ();
+
+  /* Allocate our unwinder stack to unwind any temporary equivalences
+     that might be recorded.  */
+  m_const_and_copies = new const_and_copies ();
+
+  m_dummy_cond = NULL;
+  m_fun = fun;
+  m_vr_values = v;
+  m_avail_exprs = new hash_table<expr_elt_hasher> (1024);
+  m_avail_exprs_stack = new avail_exprs_stack (m_avail_exprs);
+}
+
+vrp_jump_threader::~vrp_jump_threader ()
+{
+  /* We do not actually update the CFG or SSA graphs at this point as
+     ASSERT_EXPRs are still in the IL and cfg cleanup code does not
+     yet handle ASSERT_EXPRs gracefully.  */
+  delete m_const_and_copies;
+  delete m_avail_exprs;
+  delete m_avail_exprs_stack;
+}
+
 /* Called before processing dominator children of BB.  We want to look
    at ASSERT_EXPRs and record information from them in the appropriate
    tables.
@@ -4295,7 +4350,7 @@ vrp_jump_threader::after_dom_children (basic_block bb)
 				      integer_zero_node, integer_zero_node,
 				      NULL, NULL);
 
-  x_vr_values = vr_values;
+  x_vr_values = m_vr_values;
   thread_outgoing_edges (bb, m_dummy_cond, m_const_and_copies,
 			 m_avail_exprs_stack, NULL,
 			 simplify_stmt);
@@ -4305,62 +4360,6 @@ vrp_jump_threader::after_dom_children (basic_block bb)
   m_const_and_copies->pop_to_marker ();
 }
 
-/* Blocks which have more than one predecessor and more than
-   one successor present jump threading opportunities, i.e.,
-   when the block is reached from a specific predecessor, we
-   may be able to determine which of the outgoing edges will
-   be traversed.  When this optimization applies, we are able
-   to avoid conditionals at runtime and we may expose secondary
-   optimization opportunities.
-
-   This routine is effectively a driver for the generic jump
-   threading code.  It basically just presents the generic code
-   with edges that may be suitable for jump threading.
-
-   Unlike DOM, we do not iterate VRP if jump threading was successful.
-   While iterating may expose new opportunities for VRP, it is expected
-   those opportunities would be very limited and the compile time cost
-   to expose those opportunities would be significant.
-
-   As jump threading opportunities are discovered, they are registered
-   for later realization.  */
-
-static void
-identify_jump_threads (struct function *fun, class vr_values *vr_values)
-{
-  /* Ugh.  When substituting values earlier in this pass we can
-     wipe the dominance information.  So rebuild the dominator
-     information as we need it within the jump threading code.  */
-  calculate_dominance_info (CDI_DOMINATORS);
-
-  /* We do not allow VRP information to be used for jump threading
-     across a back edge in the CFG.  Otherwise it becomes too
-     difficult to avoid eliminating loop exit tests.  Of course
-     EDGE_DFS_BACK is not accurate at this time so we have to
-     recompute it.  */
-  mark_dfs_back_edges ();
-
-  /* Allocate our unwinder stack to unwind any temporary equivalences
-     that might be recorded.  */
-  const_and_copies *equiv_stack = new const_and_copies ();
-
-  hash_table<expr_elt_hasher> *avail_exprs
-    = new hash_table<expr_elt_hasher> (1024);
-  avail_exprs_stack *avail_exprs_stack
-    = new class avail_exprs_stack (avail_exprs);
-
-  vrp_jump_threader walker (CDI_DOMINATORS, equiv_stack, avail_exprs_stack);
-  walker.vr_values = vr_values;
-  walker.walk (fun->cfg->x_entry_block_ptr);
-
-  /* We do not actually update the CFG or SSA graphs at this point as
-     ASSERT_EXPRs are still in the IL and cfg cleanup code does not yet
-     handle ASSERT_EXPRs gracefully.  */
-  delete equiv_stack;
-  delete avail_exprs;
-  delete avail_exprs_stack;
-}
-
 /* STMT is a conditional at the end of a basic block.
 
    If the conditional is of the form SSA_NAME op constant and the SSA_NAME
@@ -4516,7 +4515,8 @@ execute_vrp (struct function *fun, bool warn_array_bounds_p)
 
   /* We must identify jump threading opportunities before we release
      the datastructures built by VRP.  */
-  identify_jump_threads (fun, &vrp_prop.vr_values);
+  vrp_jump_threader threader (fun, &vrp_prop.vr_values);
+  threader.thread_jumps ();
 
   /* A comparison of an SSA_NAME against a constant where the SSA_NAME
      was set by a type conversion can often be rewritten to use the
-- 
2.26.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 3/5] Move vrp_prop before vrp_folder.
  2020-11-12 15:15 [PATCH 1/5] Group tree-vrp.c by functionality Aldy Hernandez
  2020-11-12 15:15 ` [PATCH 2/5] Refactor VRP threading code into vrp_jump_threader class Aldy Hernandez
@ 2020-11-12 15:15 ` Aldy Hernandez
  2020-11-12 15:15 ` [PATCH 4/5] Move vr_values out of vrp_prop into execute_vrp so it can be shared Aldy Hernandez
  2020-11-12 15:16 ` [PATCH 5/5] Inline delegators in vrp_folder Aldy Hernandez
  3 siblings, 0 replies; 5+ messages in thread
From: Aldy Hernandez @ 2020-11-12 15:15 UTC (permalink / raw)
  To: GCC patches

Will push pending aarch64 tests.

gcc/ChangeLog:

	* tree-vrp.c (class vrp_prop): Move entire class...
	(class vrp_folder): ...before here.
---
 gcc/tree-vrp.c | 200 ++++++++++++++++++++++++-------------------------
 1 file changed, 100 insertions(+), 100 deletions(-)

diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 6b77c357a8f..15267e3d878 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -3814,106 +3814,6 @@ vrp_asserts::remove_range_assertions ()
       }
 }
 
-class vrp_folder : public substitute_and_fold_engine
-{
- public:
-  vrp_folder (vr_values *v)
-    : substitute_and_fold_engine (/* Fold all stmts.  */ true),
-      m_vr_values (v), simplifier (v)
-    {  }
-  bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
-
-  tree value_of_expr (tree name, gimple *stmt) OVERRIDE
-    {
-      return m_vr_values->value_of_expr (name, stmt);
-    }
-  class vr_values *m_vr_values;
-
-private:
-  bool fold_predicate_in (gimple_stmt_iterator *);
-  /* Delegators.  */
-  tree vrp_evaluate_conditional (tree_code code, tree op0,
-				 tree op1, gimple *stmt)
-    { return simplifier.vrp_evaluate_conditional (code, op0, op1, stmt); }
-  bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
-    { return simplifier.simplify (gsi); }
-
-  simplify_using_ranges simplifier;
-};
-
-/* If the statement pointed by SI has a predicate whose value can be
-   computed using the value range information computed by VRP, compute
-   its value and return true.  Otherwise, return false.  */
-
-bool
-vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
-{
-  bool assignment_p = false;
-  tree val;
-  gimple *stmt = gsi_stmt (*si);
-
-  if (is_gimple_assign (stmt)
-      && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
-    {
-      assignment_p = true;
-      val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
-				      gimple_assign_rhs1 (stmt),
-				      gimple_assign_rhs2 (stmt),
-				      stmt);
-    }
-  else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
-    val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
-				    gimple_cond_lhs (cond_stmt),
-				    gimple_cond_rhs (cond_stmt),
-				    stmt);
-  else
-    return false;
-
-  if (val)
-    {
-      if (assignment_p)
-        val = fold_convert (gimple_expr_type (stmt), val);
-
-      if (dump_file)
-	{
-	  fprintf (dump_file, "Folding predicate ");
-	  print_gimple_expr (dump_file, stmt, 0);
-	  fprintf (dump_file, " to ");
-	  print_generic_expr (dump_file, val);
-	  fprintf (dump_file, "\n");
-	}
-
-      if (is_gimple_assign (stmt))
-	gimple_assign_set_rhs_from_tree (si, val);
-      else
-	{
-	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
-	  gcond *cond_stmt = as_a <gcond *> (stmt);
-	  if (integer_zerop (val))
-	    gimple_cond_make_false (cond_stmt);
-	  else if (integer_onep (val))
-	    gimple_cond_make_true (cond_stmt);
-	  else
-	    gcc_unreachable ();
-	}
-
-      return true;
-    }
-
-  return false;
-}
-
-/* Callback for substitute_and_fold folding the stmt at *SI.  */
-
-bool
-vrp_folder::fold_stmt (gimple_stmt_iterator *si)
-{
-  if (fold_predicate_in (si))
-    return true;
-
-  return simplify_stmt_using_ranges (si);
-}
-
 class vrp_prop : public ssa_propagation_engine
 {
 public:
@@ -4152,6 +4052,106 @@ vrp_prop::finalize ()
     }
 }
 
+class vrp_folder : public substitute_and_fold_engine
+{
+ public:
+  vrp_folder (vr_values *v)
+    : substitute_and_fold_engine (/* Fold all stmts.  */ true),
+      m_vr_values (v), simplifier (v)
+    {  }
+  bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
+
+  tree value_of_expr (tree name, gimple *stmt) OVERRIDE
+    {
+      return m_vr_values->value_of_expr (name, stmt);
+    }
+  class vr_values *m_vr_values;
+
+private:
+  bool fold_predicate_in (gimple_stmt_iterator *);
+  /* Delegators.  */
+  tree vrp_evaluate_conditional (tree_code code, tree op0,
+				 tree op1, gimple *stmt)
+    { return simplifier.vrp_evaluate_conditional (code, op0, op1, stmt); }
+  bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
+    { return simplifier.simplify (gsi); }
+
+  simplify_using_ranges simplifier;
+};
+
+/* If the statement pointed by SI has a predicate whose value can be
+   computed using the value range information computed by VRP, compute
+   its value and return true.  Otherwise, return false.  */
+
+bool
+vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
+{
+  bool assignment_p = false;
+  tree val;
+  gimple *stmt = gsi_stmt (*si);
+
+  if (is_gimple_assign (stmt)
+      && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
+    {
+      assignment_p = true;
+      val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
+				      gimple_assign_rhs1 (stmt),
+				      gimple_assign_rhs2 (stmt),
+				      stmt);
+    }
+  else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
+    val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
+				    gimple_cond_lhs (cond_stmt),
+				    gimple_cond_rhs (cond_stmt),
+				    stmt);
+  else
+    return false;
+
+  if (val)
+    {
+      if (assignment_p)
+        val = fold_convert (gimple_expr_type (stmt), val);
+
+      if (dump_file)
+	{
+	  fprintf (dump_file, "Folding predicate ");
+	  print_gimple_expr (dump_file, stmt, 0);
+	  fprintf (dump_file, " to ");
+	  print_generic_expr (dump_file, val);
+	  fprintf (dump_file, "\n");
+	}
+
+      if (is_gimple_assign (stmt))
+	gimple_assign_set_rhs_from_tree (si, val);
+      else
+	{
+	  gcc_assert (gimple_code (stmt) == GIMPLE_COND);
+	  gcond *cond_stmt = as_a <gcond *> (stmt);
+	  if (integer_zerop (val))
+	    gimple_cond_make_false (cond_stmt);
+	  else if (integer_onep (val))
+	    gimple_cond_make_true (cond_stmt);
+	  else
+	    gcc_unreachable ();
+	}
+
+      return true;
+    }
+
+  return false;
+}
+
+/* Callback for substitute_and_fold folding the stmt at *SI.  */
+
+bool
+vrp_folder::fold_stmt (gimple_stmt_iterator *si)
+{
+  if (fold_predicate_in (si))
+    return true;
+
+  return simplify_stmt_using_ranges (si);
+}
+
 /* Blocks which have more than one predecessor and more than
    one successor present jump threading opportunities, i.e.,
    when the block is reached from a specific predecessor, we
-- 
2.26.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 4/5] Move vr_values out of vrp_prop into execute_vrp so it can be shared.
  2020-11-12 15:15 [PATCH 1/5] Group tree-vrp.c by functionality Aldy Hernandez
  2020-11-12 15:15 ` [PATCH 2/5] Refactor VRP threading code into vrp_jump_threader class Aldy Hernandez
  2020-11-12 15:15 ` [PATCH 3/5] Move vrp_prop before vrp_folder Aldy Hernandez
@ 2020-11-12 15:15 ` Aldy Hernandez
  2020-11-12 15:16 ` [PATCH 5/5] Inline delegators in vrp_folder Aldy Hernandez
  3 siblings, 0 replies; 5+ messages in thread
From: Aldy Hernandez @ 2020-11-12 15:15 UTC (permalink / raw)
  To: GCC patches

vr_values is being shared among the propagator and the folder and
passed around.  I've pulled it out from the propagator so it can be
passed around to each, instead of being publicly accessible from the
propagator.

Will push pending aarch64 tests.

gcc/ChangeLog:

	* tree-vrp.c (class vrp_prop): Rename vr_values to m_vr_values.
	(vrp_prop::vrp_prop): New.
	(vrp_prop::initialize): Rename vr_values to m_vr_values.
	(vrp_prop::visit_stmt): Same.
	(vrp_prop::visit_phi): Same.
	(vrp_prop::finalize): Same.
	(execute_vrp): Instantiate vrp_vr_values and pass it to folder
	and propagator.
---
 gcc/tree-vrp.c | 53 +++++++++++++++++++++++++++-----------------------
 1 file changed, 29 insertions(+), 24 deletions(-)

diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 15267e3d878..81bbaefd642 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -3817,15 +3817,19 @@ vrp_asserts::remove_range_assertions ()
 class vrp_prop : public ssa_propagation_engine
 {
 public:
-  enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
-  enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
-
-  struct function *fun;
+  vrp_prop (vr_values *v)
+    : ssa_propagation_engine (),
+      m_vr_values (v) { }
 
   void initialize (struct function *);
   void finalize ();
 
-  class vr_values vr_values;
+  enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
+  enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
+
+private:
+  struct function *fun;
+  vr_values *m_vr_values;
 };
 
 /* Initialization required by ssa_propagate engine.  */
@@ -3845,7 +3849,7 @@ vrp_prop::initialize (struct function *fn)
 	  if (!stmt_interesting_for_vrp (phi))
 	    {
 	      tree lhs = PHI_RESULT (phi);
-	      vr_values.set_def_to_varying (lhs);
+	      m_vr_values->set_def_to_varying (lhs);
 	      prop_set_simulate_again (phi, false);
 	    }
 	  else
@@ -3864,7 +3868,7 @@ vrp_prop::initialize (struct function *fn)
 	    prop_set_simulate_again (stmt, true);
 	  else if (!stmt_interesting_for_vrp (stmt))
 	    {
-	      vr_values.set_defs_to_varying (stmt);
+	      m_vr_values->set_defs_to_varying (stmt);
 	      prop_set_simulate_again (stmt, false);
 	    }
 	  else
@@ -3887,11 +3891,11 @@ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
 {
   tree lhs = gimple_get_lhs (stmt);
   value_range_equiv vr;
-  vr_values.extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
+  m_vr_values->extract_range_from_stmt (stmt, taken_edge_p, output_p, &vr);
 
   if (*output_p)
     {
-      if (vr_values.update_value_range (*output_p, &vr))
+      if (m_vr_values->update_value_range (*output_p, &vr))
 	{
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    {
@@ -3926,7 +3930,7 @@ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
 	    use_operand_p use_p;
 	    enum ssa_prop_result res = SSA_PROP_VARYING;
 
-	    vr_values.set_def_to_varying (lhs);
+	    m_vr_values->set_def_to_varying (lhs);
 
 	    FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
 	      {
@@ -3956,9 +3960,9 @@ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
 		   {REAL,IMAG}PART_EXPR uses at all,
 		   return SSA_PROP_VARYING.  */
 		value_range_equiv new_vr;
-		vr_values.extract_range_basic (&new_vr, use_stmt);
+		m_vr_values->extract_range_basic (&new_vr, use_stmt);
 		const value_range_equiv *old_vr
-		  = vr_values.get_value_range (use_lhs);
+		  = m_vr_values->get_value_range (use_lhs);
 		if (!old_vr->equal_p (new_vr, /*ignore_equivs=*/false))
 		  res = SSA_PROP_INTERESTING;
 		else
@@ -3980,7 +3984,7 @@ vrp_prop::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
 
   /* All other statements produce nothing of interest for VRP, so mark
      their outputs varying and prevent further simulation.  */
-  vr_values.set_defs_to_varying (stmt);
+  m_vr_values->set_defs_to_varying (stmt);
 
   return (*taken_edge_p) ? SSA_PROP_INTERESTING : SSA_PROP_VARYING;
 }
@@ -3994,8 +3998,8 @@ vrp_prop::visit_phi (gphi *phi)
 {
   tree lhs = PHI_RESULT (phi);
   value_range_equiv vr_result;
-  vr_values.extract_range_from_phi_node (phi, &vr_result);
-  if (vr_values.update_value_range (lhs, &vr_result))
+  m_vr_values->extract_range_from_phi_node (phi, &vr_result);
+  if (m_vr_values->update_value_range (lhs, &vr_result))
     {
       if (dump_file && (dump_flags & TDF_DETAILS))
 	{
@@ -4024,12 +4028,12 @@ vrp_prop::finalize ()
   size_t i;
 
   /* We have completed propagating through the lattice.  */
-  vr_values.set_lattice_propagation_complete ();
+  m_vr_values->set_lattice_propagation_complete ();
 
   if (dump_file)
     {
       fprintf (dump_file, "\nValue ranges after VRP:\n\n");
-      vr_values.dump_all_value_ranges (dump_file);
+      m_vr_values->dump_all_value_ranges (dump_file);
       fprintf (dump_file, "\n");
     }
 
@@ -4040,7 +4044,7 @@ vrp_prop::finalize ()
       if (!name)
 	continue;
 
-      const value_range_equiv *vr = vr_values.get_value_range (name);
+      const value_range_equiv *vr = m_vr_values->get_value_range (name);
       if (!name || !vr->constant_p ())
 	continue;
 
@@ -4468,7 +4472,6 @@ vrp_simplify_cond_using_ranges (vr_values *query, gcond *stmt)
 static unsigned int
 execute_vrp (struct function *fun, bool warn_array_bounds_p)
 {
-
   loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
   rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
   scev_initialize ();
@@ -4484,13 +4487,15 @@ execute_vrp (struct function *fun, bool warn_array_bounds_p)
   /* For visiting PHI nodes we need EDGE_DFS_BACK computed.  */
   mark_dfs_back_edges ();
 
-  class vrp_prop vrp_prop;
+  vr_values vrp_vr_values;
+
+  class vrp_prop vrp_prop (&vrp_vr_values);
   vrp_prop.initialize (fun);
   vrp_prop.ssa_propagate ();
 
   /* Instantiate the folder here, so that edge cleanups happen at the
      end of this function.  */
-  vrp_folder folder (&vrp_prop.vr_values);
+  vrp_folder folder (&vrp_vr_values);
   vrp_prop.finalize ();
 
   /* If we're checking array refs, we want to merge information on
@@ -4509,13 +4514,13 @@ execute_vrp (struct function *fun, bool warn_array_bounds_p)
 
   if (warn_array_bounds && warn_array_bounds_p)
     {
-      array_bounds_checker array_checker (fun, &vrp_prop.vr_values);
+      array_bounds_checker array_checker (fun, &vrp_vr_values);
       array_checker.check ();
     }
 
   /* We must identify jump threading opportunities before we release
      the datastructures built by VRP.  */
-  vrp_jump_threader threader (fun, &vrp_prop.vr_values);
+  vrp_jump_threader threader (fun, &vrp_vr_values);
   threader.thread_jumps ();
 
   /* A comparison of an SSA_NAME against a constant where the SSA_NAME
@@ -4530,7 +4535,7 @@ execute_vrp (struct function *fun, bool warn_array_bounds_p)
     {
       gimple *last = last_stmt (bb);
       if (last && gimple_code (last) == GIMPLE_COND)
-	vrp_simplify_cond_using_ranges (&vrp_prop.vr_values,
+	vrp_simplify_cond_using_ranges (&vrp_vr_values,
 					as_a <gcond *> (last));
     }
 
-- 
2.26.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 5/5] Inline delegators in vrp_folder.
  2020-11-12 15:15 [PATCH 1/5] Group tree-vrp.c by functionality Aldy Hernandez
                   ` (2 preceding siblings ...)
  2020-11-12 15:15 ` [PATCH 4/5] Move vr_values out of vrp_prop into execute_vrp so it can be shared Aldy Hernandez
@ 2020-11-12 15:16 ` Aldy Hernandez
  3 siblings, 0 replies; 5+ messages in thread
From: Aldy Hernandez @ 2020-11-12 15:16 UTC (permalink / raw)
  To: GCC patches

Will push pending aarch64 tests.

gcc/ChangeLog:

	* tree-vrp.c (class vrp_folder): Make visit_stmt, visit_phi,
	and m_vr_values private.
	(vrp_folder::vrp_evaluate_conditional): Remove.
	(vrp_folder::vrp_simplify_stmt_using_ranges): Remove.
	(vrp_folder::fold_predicate_in): Inline
	vrp_evaluate_conditional and vrp_simplify_stmt_using_ranges.
	(vrp_folder::fold_stmt): Same.
---
 gcc/tree-vrp.c | 33 +++++++++++++--------------------
 1 file changed, 13 insertions(+), 20 deletions(-)

diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index 81bbaefd642..54ce017e8b2 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -3824,10 +3824,10 @@ public:
   void initialize (struct function *);
   void finalize ();
 
+private:
   enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE;
   enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE;
 
-private:
   struct function *fun;
   vr_values *m_vr_values;
 };
@@ -4063,23 +4063,16 @@ class vrp_folder : public substitute_and_fold_engine
     : substitute_and_fold_engine (/* Fold all stmts.  */ true),
       m_vr_values (v), simplifier (v)
     {  }
-  bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
 
+private:
   tree value_of_expr (tree name, gimple *stmt) OVERRIDE
     {
       return m_vr_values->value_of_expr (name, stmt);
     }
-  class vr_values *m_vr_values;
-
-private:
+  bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE;
   bool fold_predicate_in (gimple_stmt_iterator *);
-  /* Delegators.  */
-  tree vrp_evaluate_conditional (tree_code code, tree op0,
-				 tree op1, gimple *stmt)
-    { return simplifier.vrp_evaluate_conditional (code, op0, op1, stmt); }
-  bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi)
-    { return simplifier.simplify (gsi); }
 
+  vr_values *m_vr_values;
   simplify_using_ranges simplifier;
 };
 
@@ -4098,16 +4091,16 @@ vrp_folder::fold_predicate_in (gimple_stmt_iterator *si)
       && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
     {
       assignment_p = true;
-      val = vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
-				      gimple_assign_rhs1 (stmt),
-				      gimple_assign_rhs2 (stmt),
-				      stmt);
+      val = simplifier.vrp_evaluate_conditional (gimple_assign_rhs_code (stmt),
+						 gimple_assign_rhs1 (stmt),
+						 gimple_assign_rhs2 (stmt),
+						 stmt);
     }
   else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt))
-    val = vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
-				    gimple_cond_lhs (cond_stmt),
-				    gimple_cond_rhs (cond_stmt),
-				    stmt);
+    val = simplifier.vrp_evaluate_conditional (gimple_cond_code (cond_stmt),
+					       gimple_cond_lhs (cond_stmt),
+					       gimple_cond_rhs (cond_stmt),
+					       stmt);
   else
     return false;
 
@@ -4153,7 +4146,7 @@ vrp_folder::fold_stmt (gimple_stmt_iterator *si)
   if (fold_predicate_in (si))
     return true;
 
-  return simplify_stmt_using_ranges (si);
+  return simplifier.simplify (si);
 }
 
 /* Blocks which have more than one predecessor and more than
-- 
2.26.2


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-11-12 15:17 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-12 15:15 [PATCH 1/5] Group tree-vrp.c by functionality Aldy Hernandez
2020-11-12 15:15 ` [PATCH 2/5] Refactor VRP threading code into vrp_jump_threader class Aldy Hernandez
2020-11-12 15:15 ` [PATCH 3/5] Move vrp_prop before vrp_folder Aldy Hernandez
2020-11-12 15:15 ` [PATCH 4/5] Move vr_values out of vrp_prop into execute_vrp so it can be shared Aldy Hernandez
2020-11-12 15:16 ` [PATCH 5/5] Inline delegators in vrp_folder Aldy Hernandez

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).