public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc/devel/gccgo] re PR tree-optimization/93199 (Compile time hog in sink_clobbers)
@ 2020-01-23  0:11 Ian Lance Taylor
  0 siblings, 0 replies; 4+ messages in thread
From: Ian Lance Taylor @ 2020-01-23  0:11 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:734efcdda91645d6425f584b39362cf0c3af2587

commit 734efcdda91645d6425f584b39362cf0c3af2587
Author: Richard Biener <rguenther@suse.de>
Date:   Fri Jan 10 11:23:53 2020 +0000

    re PR tree-optimization/93199 (Compile time hog in sink_clobbers)
    
    2020-01-10  Richard Biener  <rguenther@suse.de>
    
    	PR middle-end/93199
    	* tree-eh.c (sink_clobbers): Move clobbers to out-of-IL
    	sequences to avoid walking them again for secondary opportunities.
    	(pass_lower_eh_dispatch::execute): Instead actually insert
    	them here.
    
    From-SVN: r280102

Diff:
---
 gcc/ChangeLog |  8 ++++++++
 gcc/tree-eh.c | 39 ++++++++++++++++++++++++++++++++-------
 2 files changed, 40 insertions(+), 7 deletions(-)

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 8b3b780..f93e919 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,6 +1,14 @@
 2020-01-10  Richard Biener  <rguenther@suse.de>
 
 	PR middle-end/93199
+	* tree-eh.c (sink_clobbers): Move clobbers to out-of-IL
+	sequences to avoid walking them again for secondary opportunities.
+	(pass_lower_eh_dispatch::execute): Instead actually insert
+	them here.
+
+2020-01-10  Richard Biener  <rguenther@suse.de>
+
+	PR middle-end/93199
 	* tree-eh.c (redirect_eh_edge_1): Avoid some work if possible.
 	(cleanup_all_empty_eh): Walk landing pads in reverse order to
 	avoid quadraticness.
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 408ff48..dc80f57 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -3550,11 +3550,15 @@ optimize_clobbers (basic_block bb)
 }
 
 /* Try to sink var = {v} {CLOBBER} stmts followed just by
-   internal throw to successor BB.  If FOUND_OPPORTUNITY is not NULL
-   then do not perform the optimization but set *FOUND_OPPORTUNITY to true.  */
+   internal throw to successor BB.
+   SUNK, if not NULL, is an array of sequences indexed by basic-block
+   index to sink to and to pick up sinking opportunities from.
+   If FOUND_OPPORTUNITY is not NULL then do not perform the optimization
+   but set *FOUND_OPPORTUNITY to true.  */
 
 static int
-sink_clobbers (basic_block bb, bool *found_opportunity = NULL)
+sink_clobbers (basic_block bb,
+	       gimple_seq *sunk = NULL, bool *found_opportunity = NULL)
 {
   edge e;
   edge_iterator ei;
@@ -3589,7 +3593,7 @@ sink_clobbers (basic_block bb, bool *found_opportunity = NULL)
 	return 0;
       any_clobbers = true;
     }
-  if (!any_clobbers)
+  if (!any_clobbers && (!sunk || gimple_seq_empty_p (sunk[bb->index])))
     return 0;
 
   /* If this was a dry run, tell it we found clobbers to sink.  */
@@ -3618,7 +3622,10 @@ sink_clobbers (basic_block bb, bool *found_opportunity = NULL)
 
   gimple *first_sunk = NULL;
   gimple *last_sunk = NULL;
-  dgsi = gsi_after_labels (succbb);
+  if (sunk)
+    dgsi = gsi_start (sunk[succbb->index]);
+  else
+    dgsi = gsi_after_labels (succbb);
   gsi = gsi_last_bb (bb);
   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
     {
@@ -3653,6 +3660,15 @@ sink_clobbers (basic_block bb, bool *found_opportunity = NULL)
 	first_sunk = stmt;
       last_sunk = stmt;
     }
+  if (sunk && !gimple_seq_empty_p (sunk[bb->index]))
+    {
+      if (!first_sunk)
+	first_sunk = gsi_stmt (gsi_last (sunk[bb->index]));
+      last_sunk = gsi_stmt (gsi_start (sunk[bb->index]));
+      gsi_insert_seq_before_without_update (&dgsi,
+					    sunk[bb->index], GSI_NEW_STMT);
+      sunk[bb->index] = NULL;
+    }
   if (first_sunk)
     {
       /* Adjust virtual operands if we sunk across a virtual PHI.  */
@@ -3892,7 +3908,7 @@ pass_lower_eh_dispatch::execute (function *fun)
 	  if (stmt_can_throw_external (fun, last))
 	    optimize_clobbers (bb);
 	  else if (!any_resx_to_process)
-	    sink_clobbers (bb, &any_resx_to_process);
+	    sink_clobbers (bb, NULL, &any_resx_to_process);
 	}
     }
   if (redirected)
@@ -3908,6 +3924,7 @@ pass_lower_eh_dispatch::execute (function *fun)
 	 and unreachable block removal.  */
       int *rpo = XNEWVEC  (int, n_basic_blocks_for_fn (fun));
       int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false);
+      gimple_seq *sunk = XCNEWVEC (gimple_seq, last_basic_block_for_fn (fun));
       for (int i = 0; i < rpo_n; ++i)
 	{
 	  bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
@@ -3915,9 +3932,17 @@ pass_lower_eh_dispatch::execute (function *fun)
 	  if (last
 	      && gimple_code (last) == GIMPLE_RESX
 	      && !stmt_can_throw_external (fun, last))
-	    flags |= sink_clobbers (bb);
+	    flags |= sink_clobbers (bb, sunk);
+	  /* If there were any clobbers sunk into this BB, insert them now.  */
+	  if (!gimple_seq_empty_p (sunk[bb->index]))
+	    {
+	      gimple_stmt_iterator gsi = gsi_after_labels (bb);
+	      gsi_insert_seq_before (&gsi, sunk[bb->index], GSI_NEW_STMT);
+	      sunk[bb->index] = NULL;
+	    }
 	}
       free (rpo);
+      free (sunk);
     }
 
   return flags;


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [gcc/devel/gccgo] re PR tree-optimization/93199 (Compile time hog in sink_clobbers)
@ 2020-01-23  0:11 Ian Lance Taylor
  0 siblings, 0 replies; 4+ messages in thread
From: Ian Lance Taylor @ 2020-01-23  0:11 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:5eaf0c498f718f60591b06fa81fc51ace6a16c01

commit 5eaf0c498f718f60591b06fa81fc51ace6a16c01
Author: Richard Biener <rguenther@suse.de>
Date:   Fri Jan 10 10:49:57 2020 +0000

    re PR tree-optimization/93199 (Compile time hog in sink_clobbers)
    
    2020-01-10  Richard Biener  <rguenther@suse.de>
    
    	PR middle-end/93199
    	* tree-eh.c (redirect_eh_edge_1): Avoid some work if possible.
    	(cleanup_all_empty_eh): Walk landing pads in reverse order to
    	avoid quadraticness.
    
    From-SVN: r280101

Diff:
---
 gcc/ChangeLog |  7 +++++++
 gcc/tree-eh.c | 21 ++++++++++++++-------
 2 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 61e3ef5..8b3b780 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,10 @@
+2020-01-10  Richard Biener  <rguenther@suse.de>
+
+	PR middle-end/93199
+	* tree-eh.c (redirect_eh_edge_1): Avoid some work if possible.
+	(cleanup_all_empty_eh): Walk landing pads in reverse order to
+	avoid quadraticness.
+
 2020-01-10  Martin Jambor  <mjambor@suse.cz>
 
 	* params.opt (param_ipa_sra_max_replacements): Mark as Optimization.
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index f25d2de..408ff48 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -2310,7 +2310,7 @@ redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
 
   throw_stmt = last_stmt (edge_in->src);
-  gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
+  gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
 
   new_label = gimple_block_label (new_bb);
 
@@ -4307,9 +4307,10 @@ cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
 	|  | EH
 	<..>
      which CFG verification would choke on.  See PR45172 and PR51089.  */
-  FOR_EACH_EDGE (e, ei, old_bb->preds)
-    if (find_edge (e->src, new_bb))
-      return false;
+  if (!single_pred_p (new_bb))
+    FOR_EACH_EDGE (e, ei, old_bb->preds)
+      if (find_edge (e->src, new_bb))
+	return false;
 
   FOR_EACH_EDGE (e, ei, old_bb->preds)
     redirect_edge_var_map_clear (e);
@@ -4698,9 +4699,15 @@ cleanup_all_empty_eh (void)
   eh_landing_pad lp;
   int i;
 
-  for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
-    if (lp)
-      changed |= cleanup_empty_eh (lp);
+  /* Ideally we'd walk the region tree and process LPs inner to outer
+     to avoid quadraticness in EH redirection.  Walking the LP array
+     in reverse seems to be an approximation of that.  */
+  for (i = vec_safe_length (cfun->eh->lp_array) - 1; i >= 1; --i)
+    {
+      lp = (*cfun->eh->lp_array)[i];
+      if (lp)
+	changed |= cleanup_empty_eh (lp);
+    }
 
   return changed;
 }


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [gcc/devel/gccgo] re PR tree-optimization/93199 (Compile time hog in sink_clobbers)
@ 2020-01-23  0:05 Ian Lance Taylor
  0 siblings, 0 replies; 4+ messages in thread
From: Ian Lance Taylor @ 2020-01-23  0:05 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:fb768529d28e74ceca93efdd2e0a6ada3bb141fe

commit fb768529d28e74ceca93efdd2e0a6ada3bb141fe
Author: Richard Biener <rguenther@suse.de>
Date:   Wed Jan 8 14:30:44 2020 +0000

    re PR tree-optimization/93199 (Compile time hog in sink_clobbers)
    
    2020-01-08  Richard Biener  <rguenther@suse.de>
    
    	PR middle-end/93199
    	* tree-eh.c (sink_clobbers): Update virtual operands for
    	the first and last stmt only.  Add a dry-run capability.
    	(pass_lower_eh_dispatch::execute): Perform clobber sinking
    	after CFG manipulations and in RPO order to catch all
    	secondary opportunities reliably.
    
    From-SVN: r280006

Diff:
---
 gcc/ChangeLog |  9 +++++++
 gcc/tree-eh.c | 85 +++++++++++++++++++++++++++++++++++++++--------------------
 2 files changed, 66 insertions(+), 28 deletions(-)

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index cc897ae..b733b56 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,12 @@
+2020-01-08  Richard Biener  <rguenther@suse.de>
+
+	PR middle-end/93199
+	* tree-eh.c (sink_clobbers): Update virtual operands for
+	the first and last stmt only.  Add a dry-run capability.
+	(pass_lower_eh_dispatch::execute): Perform clobber sinking
+	after CFG manipulations and in RPO order to catch all
+	secondary opportunities reliably.
+
 2020-01-08  Georg-Johann Lay  <avr@gjlay.de>
 
 	PR target/93182
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index 21ae24f..f25d2de 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -3550,10 +3550,11 @@ optimize_clobbers (basic_block bb)
 }
 
 /* Try to sink var = {v} {CLOBBER} stmts followed just by
-   internal throw to successor BB.  */
+   internal throw to successor BB.  If FOUND_OPPORTUNITY is not NULL
+   then do not perform the optimization but set *FOUND_OPPORTUNITY to true.  */
 
 static int
-sink_clobbers (basic_block bb)
+sink_clobbers (basic_block bb, bool *found_opportunity = NULL)
 {
   edge e;
   edge_iterator ei;
@@ -3591,13 +3592,19 @@ sink_clobbers (basic_block bb)
   if (!any_clobbers)
     return 0;
 
+  /* If this was a dry run, tell it we found clobbers to sink.  */
+  if (found_opportunity)
+    {
+      *found_opportunity = true;
+      return 0;
+    }
+
   edge succe = single_succ_edge (bb);
   succbb = succe->dest;
 
   /* See if there is a virtual PHI node to take an updated virtual
      operand from.  */
   gphi *vphi = NULL;
-  tree vuse = NULL_TREE;
   for (gphi_iterator gpi = gsi_start_phis (succbb);
        !gsi_end_p (gpi); gsi_next (&gpi))
     {
@@ -3605,11 +3612,12 @@ sink_clobbers (basic_block bb)
       if (virtual_operand_p (res))
 	{
 	  vphi = gpi.phi ();
-	  vuse = res;
 	  break;
 	}
     }
 
+  gimple *first_sunk = NULL;
+  gimple *last_sunk = NULL;
   dgsi = gsi_after_labels (succbb);
   gsi = gsi_last_bb (bb);
   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
@@ -3641,36 +3649,37 @@ sink_clobbers (basic_block bb)
          forwarder edge we can keep virtual operands in place.  */
       gsi_remove (&gsi, false);
       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
-
-      /* But adjust virtual operands if we sunk across a PHI node.  */
-      if (vuse)
+      if (!first_sunk)
+	first_sunk = stmt;
+      last_sunk = stmt;
+    }
+  if (first_sunk)
+    {
+      /* Adjust virtual operands if we sunk across a virtual PHI.  */
+      if (vphi)
 	{
-	  gimple *use_stmt;
 	  imm_use_iterator iter;
 	  use_operand_p use_p;
-	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
+	  gimple *use_stmt;
+	  tree phi_def = gimple_phi_result (vphi);
+	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def)
 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
-	      SET_USE (use_p, gimple_vdef (stmt));
-	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
+              SET_USE (use_p, gimple_vdef (first_sunk));
+	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def))
 	    {
-	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
-	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
+	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1;
+	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0;
 	    }
-	  /* Adjust the incoming virtual operand.  */
-	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
-	  SET_USE (gimple_vuse_op (stmt), vuse);
+	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe),
+		   gimple_vuse (last_sunk));
+	  SET_USE (gimple_vuse_op (last_sunk), phi_def);
 	}
       /* If there isn't a single predecessor but no virtual PHI node
          arrange for virtual operands to be renamed.  */
-      else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
-	       && !single_pred_p (succbb))
+      else if (!single_pred_p (succbb)
+	       && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME)
 	{
-	  /* In this case there will be no use of the VDEF of this stmt. 
-	     ???  Unless this is a secondary opportunity and we have not
-	     removed unreachable blocks yet, so we cannot assert this.  
-	     Which also means we will end up renaming too many times.  */
-	  SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
-	  mark_virtual_operands_for_renaming (cfun);
+	  mark_virtual_operand_for_renaming (gimple_vuse (last_sunk));
 	  todo |= TODO_update_ssa_only_virtuals;
 	}
     }
@@ -3863,6 +3872,7 @@ pass_lower_eh_dispatch::execute (function *fun)
   basic_block bb;
   int flags = 0;
   bool redirected = false;
+  bool any_resx_to_process = false;
 
   assign_filter_values ();
 
@@ -3879,18 +3889,37 @@ pass_lower_eh_dispatch::execute (function *fun)
 	}
       else if (gimple_code (last) == GIMPLE_RESX)
 	{
-	  if (stmt_can_throw_external (cfun, last))
+	  if (stmt_can_throw_external (fun, last))
 	    optimize_clobbers (bb);
-	  else
-	    flags |= sink_clobbers (bb);
+	  else if (!any_resx_to_process)
+	    sink_clobbers (bb, &any_resx_to_process);
 	}
     }
-
   if (redirected)
     {
       free_dominance_info (CDI_DOMINATORS);
       delete_unreachable_blocks ();
     }
+
+  if (any_resx_to_process)
+    {
+      /* Make sure to catch all secondary sinking opportunities by processing
+	 blocks in RPO order and after all CFG modifications from lowering
+	 and unreachable block removal.  */
+      int *rpo = XNEWVEC  (int, n_basic_blocks_for_fn (fun));
+      int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false);
+      for (int i = 0; i < rpo_n; ++i)
+	{
+	  bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
+	  gimple *last = last_stmt (bb);
+	  if (last
+	      && gimple_code (last) == GIMPLE_RESX
+	      && !stmt_can_throw_external (fun, last))
+	    flags |= sink_clobbers (bb);
+	}
+      free (rpo);
+    }
+
   return flags;
 }


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [gcc/devel/gccgo] re PR tree-optimization/93199 (Compile time hog in sink_clobbers)
@ 2020-01-23  0:05 Ian Lance Taylor
  0 siblings, 0 replies; 4+ messages in thread
From: Ian Lance Taylor @ 2020-01-23  0:05 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:f74c4b2c4427a4309d48bfc45bc140422a75aa6f

commit f74c4b2c4427a4309d48bfc45bc140422a75aa6f
Author: Richard Biener <rguenther@suse.de>
Date:   Wed Jan 8 12:49:14 2020 +0000

    re PR tree-optimization/93199 (Compile time hog in sink_clobbers)
    
    2019-01-08  Richard Biener  <rguenther@suse.de>
    
    	PR middle-end/93199
    	c/
    	* gimple-parser.c (c_parser_parse_gimple_body): Remove __PHI IFN
    	permanently.
    
    	* gimple-fold.c (rewrite_to_defined_overflow): Mark stmt modified.
    	* tree-ssa-loop-im.c (move_computations_worker): Properly adjust
    	virtual operand, also updating SSA use.
    	* gimple-loop-interchange.cc (loop_cand::undo_simple_reduction):
    	Update stmt after resetting virtual operand.
    	(tree_loop_interchange::move_code_to_inner_loop): Likewise.
    
    	* gimple-iterator.c (gsi_remove): When not removing the stmt
    	permanently do not delink immediate uses or mark the stmt modified.
    
    From-SVN: r280000

Diff:
---
 gcc/ChangeLog                  | 12 ++++++++++++
 gcc/c/ChangeLog                |  6 ++++++
 gcc/c/gimple-parser.c          |  2 +-
 gcc/gimple-fold.c              |  1 +
 gcc/gimple-iterator.c          |  8 +++++---
 gcc/gimple-loop-interchange.cc |  9 +++++++--
 gcc/tree-ssa-loop-im.c         |  3 ++-
 7 files changed, 34 insertions(+), 7 deletions(-)

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index f7f7402..3defa29 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,15 @@
+2019-01-08  Richard Biener  <rguenther@suse.de>
+
+	PR middle-end/93199
+	* gimple-fold.c (rewrite_to_defined_overflow): Mark stmt modified.
+	* tree-ssa-loop-im.c (move_computations_worker): Properly adjust
+	virtual operand, also updating SSA use.
+	* gimple-loop-interchange.cc (loop_cand::undo_simple_reduction):
+	Update stmt after resetting virtual operand.
+	(tree_loop_interchange::move_code_to_inner_loop): Likewise.
+	* gimple-iterator.c (gsi_remove): When not removing the stmt
+	permanently do not delink immediate uses or mark the stmt modified.
+
 2020-01-08  Martin Liska  <mliska@suse.cz>
 
 	* ipa-fnsummary.c (dump_ipa_call_summary): Use symtab_node::dump_name.
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index a4b1980..d0b6559 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,9 @@
+2019-01-08  Richard Biener  <rguenther@suse.de>
+
+	PR middle-end/93199
+	* gimple-parser.c (c_parser_parse_gimple_body): Remove __PHI IFN
+	permanently.
+
 2020-01-01  Jakub Jelinek  <jakub@redhat.com>
 
 	Update copyright years.
diff --git a/gcc/c/gimple-parser.c b/gcc/c/gimple-parser.c
index 7e4cd39..3370178 100644
--- a/gcc/c/gimple-parser.c
+++ b/gcc/c/gimple-parser.c
@@ -327,7 +327,7 @@ c_parser_parse_gimple_body (c_parser *cparser, char *gimple_pass,
 		      add_phi_arg (phi, gimple_call_arg (stmt, i + 1), e,
 				   UNKNOWN_LOCATION);
 		  }
-		gsi_remove (&gsi, false);
+		gsi_remove (&gsi, true);
 	      }
 	  /* Fill SSA name gaps, putting them on the freelist.  */
 	  for (unsigned i = 1; i < num_ssa_names; ++i)
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 8c94572..d7c5097 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -7380,6 +7380,7 @@ rewrite_to_defined_overflow (gimple *stmt)
   gimple_assign_set_lhs (stmt, make_ssa_name (type, stmt));
   if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR)
     gimple_assign_set_rhs_code (stmt, PLUS_EXPR);
+  gimple_set_modified (stmt, true);
   gimple_seq_add_stmt (&stmts, stmt);
   gimple *cvt = gimple_build_assign (lhs, NOP_EXPR, gimple_assign_lhs (stmt));
   gimple_seq_add_stmt (&stmts, cvt);
diff --git a/gcc/gimple-iterator.c b/gcc/gimple-iterator.c
index 0ccca23..d401c69 100644
--- a/gcc/gimple-iterator.c
+++ b/gcc/gimple-iterator.c
@@ -558,16 +558,18 @@ gsi_remove (gimple_stmt_iterator *i, bool remove_permanently)
   gimple *stmt = gsi_stmt (*i);
   bool require_eh_edge_purge = false;
 
+  /* ???  Do we want to do this for non-permanent operation?  */
   if (gimple_code (stmt) != GIMPLE_PHI)
     insert_debug_temps_for_defs (i);
 
-  /* Free all the data flow information for STMT.  */
   gimple_set_bb (stmt, NULL);
-  delink_stmt_imm_use (stmt);
-  gimple_set_modified (stmt, true);
 
   if (remove_permanently)
     {
+      /* Free all the data flow information for STMT.  */
+      delink_stmt_imm_use (stmt);
+      gimple_set_modified (stmt, true);
+
       if (gimple_debug_nonbind_marker_p (stmt))
 	/* We don't need this to be exact, but try to keep it at least
 	   close.  */
diff --git a/gcc/gimple-loop-interchange.cc b/gcc/gimple-loop-interchange.cc
index b3cb7700..2379848 100644
--- a/gcc/gimple-loop-interchange.cc
+++ b/gcc/gimple-loop-interchange.cc
@@ -879,6 +879,7 @@ loop_cand::undo_simple_reduction (reduction_p re, bitmap dce_seeds)
   if (re->producer != NULL)
     {
       gimple_set_vuse (re->producer, NULL_TREE);
+      update_stmt (re->producer);
       from = gsi_for_stmt (re->producer);
       gsi_remove (&from, false);
       gimple_seq_add_stmt_without_update (&stmts, re->producer);
@@ -920,6 +921,7 @@ loop_cand::undo_simple_reduction (reduction_p re, bitmap dce_seeds)
   gimple_set_vdef (re->consumer, NULL_TREE);
   gimple_set_vuse (re->consumer, NULL_TREE);
   gimple_assign_set_rhs1 (re->consumer, re->next);
+  update_stmt (re->consumer);
   from = gsi_for_stmt (re->consumer);
   to = gsi_for_stmt (SSA_NAME_DEF_STMT (re->next));
   gsi_move_after (&from, &to);
@@ -1248,14 +1250,17 @@ tree_loop_interchange::move_code_to_inner_loop (class loop *outer,
 	      continue;
 	    }
 
-	  if (gimple_vuse (stmt))
-	    gimple_set_vuse (stmt, NULL_TREE);
 	  if (gimple_vdef (stmt))
 	    {
 	      unlink_stmt_vdef (stmt);
 	      release_ssa_name (gimple_vdef (stmt));
 	      gimple_set_vdef (stmt, NULL_TREE);
 	    }
+	  if (gimple_vuse (stmt))
+	    {
+	      gimple_set_vuse (stmt, NULL_TREE);
+	      update_stmt (stmt);
+	    }
 
 	  reset_debug_uses (stmt);
 	  gsi_move_before (&gsi, &to);
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index dd9df25..3e64ae7 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -1231,7 +1231,8 @@ move_computations_worker (basic_block bb)
 	      gphi *phi = gsi2.phi ();
 	      if (virtual_operand_p (gimple_phi_result (phi)))
 		{
-		  gimple_set_vuse (stmt, PHI_ARG_DEF_FROM_EDGE (phi, e));
+		  SET_USE (gimple_vuse_op (stmt),
+			   PHI_ARG_DEF_FROM_EDGE (phi, e));
 		  break;
 		}
 	    }


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-01-23  0:11 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-23  0:11 [gcc/devel/gccgo] re PR tree-optimization/93199 (Compile time hog in sink_clobbers) Ian Lance Taylor
  -- strict thread matches above, loose matches on Subject: below --
2020-01-23  0:11 Ian Lance Taylor
2020-01-23  0:05 Ian Lance Taylor
2020-01-23  0:05 Ian Lance Taylor

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).