public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r14-137] change inverted_post_order_compute to inverted_rev_post_order_compute
@ 2023-04-21 11:26 Richard Biener
  0 siblings, 0 replies; only message in thread
From: Richard Biener @ 2023-04-21 11:26 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:773cc925e84b248afa4ed01bf444be0935d33861

commit r14-137-g773cc925e84b248afa4ed01bf444be0935d33861
Author: Richard Biener <rguenther@suse.de>
Date:   Fri Apr 21 09:40:01 2023 +0200

    change inverted_post_order_compute to inverted_rev_post_order_compute
    
    The following changes the inverted_post_order_compute API back to
    a plain C array interface and computing a reverse post order since
    that's what's always required.  It will make massaging DF to use
    the correct iteration orders easier.  Elsewhere it requires turning
    backward iteration over the computed order with forward iteration.
    
            * cfganal.h (inverted_rev_post_order_compute): Rename
            from ...
            (inverted_post_order_compute): ... this.  Add struct function
            argument, change allocation to a C array.
            * cfganal.cc (inverted_rev_post_order_compute): Likewise.
            * lcm.cc (compute_antinout_edge): Adjust.
            * lra-lives.cc (lra_create_live_ranges_1): Likewise.
            * tree-ssa-dce.cc (remove_dead_stmt): Likewise.
            * tree-ssa-pre.cc (compute_antic): Likewise.

Diff:
---
 gcc/cfganal.cc      | 41 ++++++++++++++++++++++-------------------
 gcc/cfganal.h       |  3 ++-
 gcc/lcm.cc          |  9 +++++----
 gcc/lra-lives.cc    | 11 ++++++-----
 gcc/tree-ssa-dce.cc | 15 ++++++++-------
 gcc/tree-ssa-pre.cc | 18 ++++++++++--------
 6 files changed, 53 insertions(+), 44 deletions(-)

diff --git a/gcc/cfganal.cc b/gcc/cfganal.cc
index ef24c5e4d15..cc858b99e64 100644
--- a/gcc/cfganal.cc
+++ b/gcc/cfganal.cc
@@ -740,7 +740,7 @@ post_order_compute (int *post_order, bool include_entry_exit,
 }
 
 
-/* Helper routine for inverted_post_order_compute
+/* Helper routine for inverted_rev_post_order_compute
    flow_dfs_compute_reverse_execute, and the reverse-CFG
    deapth first search in dominance.cc.
    BB has to belong to a region of CFG
@@ -820,12 +820,14 @@ dfs_find_deadend (basic_block bb)
    and start looking for a "dead end" from that block
    and do another inverted traversal from that block.  */
 
-void
-inverted_post_order_compute (vec<int> *post_order,
-			     sbitmap *start_points)
+int
+inverted_rev_post_order_compute (struct function *fn,
+				 int *rev_post_order,
+				 sbitmap *start_points)
 {
   basic_block bb;
-  post_order->reserve_exact (n_basic_blocks_for_fn (cfun));
+
+  int rev_post_order_num = n_basic_blocks_for_fn (fn) - 1;
 
   if (flag_checking)
     verify_no_unreachable_blocks ();
@@ -855,17 +857,17 @@ inverted_post_order_compute (vec<int> *post_order,
 	}
     }
   else
-  /* Put all blocks that have no successor into the initial work list.  */
-  FOR_ALL_BB_FN (bb, cfun)
-    if (EDGE_COUNT (bb->succs) == 0)
-      {
-        /* Push the initial edge on to the stack.  */
-        if (EDGE_COUNT (bb->preds) > 0)
-          {
-	    stack.quick_push (ei_start (bb->preds));
-            bitmap_set_bit (visited, bb->index);
-          }
-      }
+    /* Put all blocks that have no successor into the initial work list.  */
+    FOR_ALL_BB_FN (bb, cfun)
+      if (EDGE_COUNT (bb->succs) == 0)
+	{
+	  /* Push the initial edge on to the stack.  */
+	  if (EDGE_COUNT (bb->preds) > 0)
+	    {
+	      stack.quick_push (ei_start (bb->preds));
+	      bitmap_set_bit (visited, bb->index);
+	    }
+	}
 
   do
     {
@@ -893,13 +895,13 @@ inverted_post_order_compute (vec<int> *post_order,
                    time, check its predecessors.  */
 		stack.quick_push (ei_start (pred->preds));
               else
-		post_order->quick_push (pred->index);
+		rev_post_order[rev_post_order_num--] = pred->index;
             }
           else
             {
 	      if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
 		  && ei_one_before_end_p (ei))
-		post_order->quick_push (bb->index);
+		rev_post_order[rev_post_order_num--] = bb->index;
 
               if (!ei_one_before_end_p (ei))
 		ei_next (&stack.last ());
@@ -957,7 +959,8 @@ inverted_post_order_compute (vec<int> *post_order,
   while (!stack.is_empty ());
 
   /* EXIT_BLOCK is always included.  */
-  post_order->quick_push (EXIT_BLOCK);
+  rev_post_order[rev_post_order_num--] = EXIT_BLOCK;
+  return n_basic_blocks_for_fn (fn);
 }
 
 /* Compute the depth first search order of FN and store in the array
diff --git a/gcc/cfganal.h b/gcc/cfganal.h
index 0b6c67dfab5..5af917c27dd 100644
--- a/gcc/cfganal.h
+++ b/gcc/cfganal.h
@@ -66,7 +66,8 @@ extern void add_noreturn_fake_exit_edges (void);
 extern void connect_infinite_loops_to_exit (void);
 extern int post_order_compute (int *, bool, bool);
 extern basic_block dfs_find_deadend (basic_block);
-extern void inverted_post_order_compute (vec<int> *postorder, sbitmap *start_points = 0);
+extern int inverted_rev_post_order_compute (struct function *,
+					    int *, sbitmap *start_points = 0);
 extern int pre_and_rev_post_order_compute_fn (struct function *,
 					      int *, int *, bool);
 extern int pre_and_rev_post_order_compute (int *, int *, bool);
diff --git a/gcc/lcm.cc b/gcc/lcm.cc
index 5adb4eb1a11..94a3ed43aea 100644
--- a/gcc/lcm.cc
+++ b/gcc/lcm.cc
@@ -102,17 +102,18 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin,
      optimistic initialization of ANTIN above.  Use reverse postorder
      on the inverted graph to make the backward dataflow problem require
      less iterations.  */
-  auto_vec<int> postorder;
-  inverted_post_order_compute (&postorder);
-  for (int i = postorder.length () - 1; i >= 0; --i)
+  int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+  int n = inverted_rev_post_order_compute (cfun, rpo);
+  for (int i = 0; i < n; ++i)
     {
-      bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
+      bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
       if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
 	  || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
 	continue;
       *qin++ = bb;
       bb->aux = bb;
     }
+  free (rpo);
 
   qin = worklist;
   qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];
diff --git a/gcc/lra-lives.cc b/gcc/lra-lives.cc
index f7a7066055a..f7a3ba8d76a 100644
--- a/gcc/lra-lives.cc
+++ b/gcc/lra-lives.cc
@@ -1405,19 +1405,20 @@ lra_create_live_ranges_1 (bool all_p, bool dead_insn_p)
   point_freq_vec.truncate (0);
   point_freq_vec.reserve_exact (new_length);
   lra_point_freq = point_freq_vec.address ();
-  auto_vec<int, 20> post_order_rev_cfg;
-  inverted_post_order_compute (&post_order_rev_cfg);
-  lra_assert (post_order_rev_cfg.length () == (unsigned) n_basic_blocks_for_fn (cfun));
+  int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+  int n = inverted_rev_post_order_compute (cfun, rpo);
+  lra_assert (n == n_basic_blocks_for_fn (cfun));
   bb_live_change_p = false;
-  for (i = post_order_rev_cfg.length () - 1; i >= 0; --i)
+  for (i = 0; i < n; ++i)
     {
-      bb = BASIC_BLOCK_FOR_FN (cfun, post_order_rev_cfg[i]);
+      bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
       if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun) || bb
 	  == ENTRY_BLOCK_PTR_FOR_FN (cfun))
 	continue;
       if (process_bb_lives (bb, curr_point, dead_insn_p))
 	bb_live_change_p = true;
     }
+  free (rpo);
   if (bb_live_change_p)
     {
       /* We need to clear pseudo live info as some pseudos can
diff --git a/gcc/tree-ssa-dce.cc b/gcc/tree-ssa-dce.cc
index bda780876f3..08876bfc1c7 100644
--- a/gcc/tree-ssa-dce.cc
+++ b/gcc/tree-ssa-dce.cc
@@ -1095,7 +1095,7 @@ remove_dead_stmt (gimple_stmt_iterator *i, basic_block bb,
      nothing to the program, then we not only remove it, but we need to update
      the CFG.  We can chose any of edges out of BB as long as we are sure to not
      close infinite loops.  This is done by always choosing the edge closer to
-     exit in inverted_post_order_compute order.  */
+     exit in inverted_rev_post_order_compute order.  */
   if (is_ctrl_stmt (stmt))
     {
       edge_iterator ei;
@@ -1111,17 +1111,18 @@ remove_dead_stmt (gimple_stmt_iterator *i, basic_block bb,
 	{
 	  if (!bb_postorder)
 	    {
-	      auto_vec<int, 20> postorder;
-		 inverted_post_order_compute (&postorder,
-					      &bb_contains_live_stmts);
+	      int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+	      int n = inverted_rev_post_order_compute (cfun, rpo,
+						       &bb_contains_live_stmts);
 	      bb_postorder = XNEWVEC (int, last_basic_block_for_fn (cfun));
-	      for (unsigned int i = 0; i < postorder.length (); ++i)
-		 bb_postorder[postorder[i]] = i;
+	      for (int i = 0; i < n; ++i)
+		 bb_postorder[rpo[i]] = i;
+	      free (rpo);
 	    }
           FOR_EACH_EDGE (e2, ei, bb->succs)
 	    if (!e || e2->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
 		|| bb_postorder [e->dest->index]
-		   < bb_postorder [e2->dest->index])
+		   >= bb_postorder [e2->dest->index])
 	      e = e2;
 	}
       gcc_assert (e);
diff --git a/gcc/tree-ssa-pre.cc b/gcc/tree-ssa-pre.cc
index 37cad36f2de..943936df808 100644
--- a/gcc/tree-ssa-pre.cc
+++ b/gcc/tree-ssa-pre.cc
@@ -2464,8 +2464,8 @@ compute_antic (void)
   /* For ANTIC computation we need a postorder that also guarantees that
      a block with a single successor is visited after its successor.
      RPO on the inverted CFG has this property.  */
-  auto_vec<int, 20> postorder;
-  inverted_post_order_compute (&postorder);
+  int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
+  int n = inverted_rev_post_order_compute (cfun, rpo);
 
   auto_sbitmap worklist (last_basic_block_for_fn (cfun) + 1);
   bitmap_clear (worklist);
@@ -2481,11 +2481,11 @@ compute_antic (void)
 	 for PA ANTIC computation.  */
       num_iterations++;
       changed = false;
-      for (i = postorder.length () - 1; i >= 0; i--)
+      for (i = 0; i < n; ++i)
 	{
-	  if (bitmap_bit_p (worklist, postorder[i]))
+	  if (bitmap_bit_p (worklist, rpo[i]))
 	    {
-	      basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
+	      basic_block block = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
 	      bitmap_clear_bit (worklist, block->index);
 	      if (compute_antic_aux (block,
 				     bitmap_bit_p (has_abnormal_preds,
@@ -2513,15 +2513,17 @@ compute_antic (void)
   if (do_partial_partial)
     {
       /* For partial antic we ignore backedges and thus we do not need
-         to perform any iteration when we process blocks in postorder.  */
-      for (i = postorder.length () - 1; i >= 0; i--)
+	 to perform any iteration when we process blocks in rpo.  */
+      for (i = 0; i < n; ++i)
 	{
-	  basic_block block = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
+	  basic_block block = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
 	  compute_partial_antic_aux (block,
 				     bitmap_bit_p (has_abnormal_preds,
 						   block->index));
 	}
     }
+
+  free (rpo);
 }

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-04-21 11:26 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-21 11:26 [gcc r14-137] change inverted_post_order_compute to inverted_rev_post_order_compute Richard Biener

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).