public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/users/aldyh/heads/threader-refactor)] class jump_thread_path
@ 2020-11-25 14:51 Aldy Hernandez
  0 siblings, 0 replies; only message in thread
From: Aldy Hernandez @ 2020-11-25 14:51 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:cdb53f7a8d79b2dcc182c753b7a7d7e31e58eb7b

commit cdb53f7a8d79b2dcc182c753b7a7d7e31e58eb7b
Author: Aldy Hernandez <aldyh@redhat.com>
Date:   Tue Nov 24 18:36:44 2020 +0100

    class jump_thread_path

Diff:
---
 gcc/tree-ssa-threadbackward.c |  8 ++---
 gcc/tree-ssa-threadedge.c     |  8 ++---
 gcc/tree-ssa-threadedge.h     |  5 +--
 gcc/tree-ssa-threadupdate.c   | 71 +++++++++++++++++++++----------------------
 gcc/tree-ssa-threadupdate.h   | 28 ++++++++---------
 5 files changed, 58 insertions(+), 62 deletions(-)

diff --git a/gcc/tree-ssa-threadbackward.c b/gcc/tree-ssa-threadbackward.c
index 0b58e4c6b9b..8d959b250e4 100644
--- a/gcc/tree-ssa-threadbackward.c
+++ b/gcc/tree-ssa-threadbackward.c
@@ -464,7 +464,7 @@ thread_jumps::profitable_jump_thread_path (basic_block bbi, tree name,
 void
 thread_jumps::convert_and_register_current_path (edge taken_edge)
 {
-  vec<jump_thread_edge *> *jump_thread_path = new vec<jump_thread_edge *> ();
+  jump_thread_path *path = new jump_thread_path ();
 
   /* Record the edges between the blocks in PATH.  */
   for (unsigned int j = 0; j + 1 < m_path.length (); j++)
@@ -475,15 +475,15 @@ thread_jumps::convert_and_register_current_path (edge taken_edge)
       edge e = find_edge (bb1, bb2);
       gcc_assert (e);
       jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
-      jump_thread_path->safe_push (x);
+      path->safe_push (x);
     }
 
   /* Add the edge taken when the control variable has value ARG.  */
   jump_thread_edge *x
     = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
-  jump_thread_path->safe_push (x);
+  path->safe_push (x);
 
-  m_registry.register_jump_thread (jump_thread_path);
+  m_registry.register_jump_thread (path);
   --m_max_threaded_paths;
 }
 
diff --git a/gcc/tree-ssa-threadedge.c b/gcc/tree-ssa-threadedge.c
index 190decd0f6c..1982f55d6ac 100644
--- a/gcc/tree-ssa-threadedge.c
+++ b/gcc/tree-ssa-threadedge.c
@@ -881,7 +881,7 @@ propagate_threaded_block_debug_into (basic_block dest, basic_block src)
 bool
 jump_threader::thread_around_empty_blocks (edge taken_edge,
 					   bitmap visited,
-					   vec<jump_thread_edge *> *path)
+					   jump_thread_path *path)
 {
   basic_block bb = taken_edge->dest;
   gimple_stmt_iterator gsi;
@@ -1002,7 +1002,7 @@ jump_threader::thread_around_empty_blocks (edge taken_edge,
 
 int
 jump_threader::thread_through_normal_block (edge e,
-					    vec<jump_thread_edge *> *path,
+					    jump_thread_path *path,
 					    bitmap visited)
 {
   /* We want to record any equivalences created by traversing E.  */
@@ -1184,7 +1184,7 @@ jump_threader::thread_across_edge (edge e)
 
   stmt_count = 0;
 
-  vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
+  jump_thread_path *path = new jump_thread_path ();
   bitmap_clear (visited);
   bitmap_set_bit (visited, e->src->index);
   bitmap_set_bit (visited, e->dest->index);
@@ -1282,7 +1282,7 @@ jump_threader::thread_across_edge (edge e)
 	bitmap_set_bit (visited, e->src->index);
 	bitmap_set_bit (visited, e->dest->index);
 	bitmap_set_bit (visited, taken_edge->dest->index);
-        vec<jump_thread_edge *> *path = new vec<jump_thread_edge *> ();
+	jump_thread_path *path = new jump_thread_path ();
 
 	/* Record whether or not we were able to thread through a successor
 	   of E->dest.  */
diff --git a/gcc/tree-ssa-threadedge.h b/gcc/tree-ssa-threadedge.h
index 5468a9ca2d7..0ba990b37ee 100644
--- a/gcc/tree-ssa-threadedge.h
+++ b/gcc/tree-ssa-threadedge.h
@@ -49,11 +49,12 @@ private:
 					  tree op1,
 					  unsigned limit);
 
+  // FIXME: Change order of arguments with these 2 funcs.
   bool thread_around_empty_blocks (edge,
 				   bitmap visited,
-				   vec<class jump_thread_edge *> *path);
+				   class jump_thread_path *path);
   int thread_through_normal_block (edge,
-				   vec<jump_thread_edge *> *path,
+				   jump_thread_path *path,
 				   bitmap visited);
   void thread_across_edge (edge);
   bool record_temporary_equivalences_from_phis (edge);
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index ee99c125338..7df224cbfde 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -128,8 +128,7 @@ struct redirection_data : free_ptr_hash<redirection_data>
      which they appear in the jump thread path.  */
   basic_block dup_blocks[2];
 
-  /* The jump threading path.  */
-  vec<jump_thread_edge *> *path;
+  jump_thread_path *path;
 
   /* A list of incoming edges which we want to thread to the
      same path.  */
@@ -158,7 +157,7 @@ jump_thread_path_registry::~jump_thread_path_registry ()
    edge in the path.  */
 
 static void
-dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
+dump_jump_thread_path (FILE *dump_file, jump_thread_path path,
 		       bool registering)
 {
   fprintf (dump_file,
@@ -199,7 +198,7 @@ dump_jump_thread_path (FILE *dump_file, vec<jump_thread_edge *> path,
 inline hashval_t
 redirection_data::hash (const redirection_data *p)
 {
-  vec<jump_thread_edge *> *path = p->path;
+  jump_thread_path *path = p->path;
   return path->last ()->e->dest->index;
 }
 
@@ -208,8 +207,8 @@ redirection_data::hash (const redirection_data *p)
 inline int
 redirection_data::equal (const redirection_data *p1, const redirection_data *p2)
 {
-  vec<jump_thread_edge *> *path1 = p1->path;
-  vec<jump_thread_edge *> *path2 = p2->path;
+  jump_thread_path *path1 = p1->path;
+  jump_thread_path *path2 = p2->path;
 
   if (path1->length () != path2->length ())
     return false;
@@ -261,7 +260,7 @@ struct ssa_local_info_t
 /* When we start updating the CFG for threading, data necessary for jump
    threading is attached to the AUX field for the incoming edge.  Use these
    macros to access the underlying structure attached to the AUX field.  */
-#define THREAD_PATH(E) ((vec<jump_thread_edge *> *)(E)->aux)
+#define THREAD_PATH(E) ((jump_thread_path *)(E)->aux)
 
 /* Remove the last statement in block BB if it is a control statement
    Also remove all outgoing edges except the edge which reaches DEST_BB.
@@ -361,7 +360,7 @@ jump_thread_path_registry::lookup_redirection_data (edge e,
 {
   struct redirection_data **slot;
   struct redirection_data *elt;
-  vec<jump_thread_edge *> *path = THREAD_PATH (e);
+  jump_thread_path *path = THREAD_PATH (e);
 
   /* Build a hash table element so we can see if E is already
      in the table.  */
@@ -446,7 +445,7 @@ copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
    Return DEF directly if either PATH or idx is ZERO.  */
 
 static tree
-get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
+get_value_locus_in_path (tree def, jump_thread_path *path,
 			 basic_block bb, int idx, location_t *locus)
 {
   tree arg;
@@ -492,7 +491,7 @@ get_value_locus_in_path (tree def, vec<jump_thread_edge *> *path,
 
 static void
 copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
-	       vec<jump_thread_edge *> *path, int idx)
+	       jump_thread_path *path, int idx)
 {
   gphi_iterator gsi;
   int src_indx = src_e->dest_idx;
@@ -521,7 +520,7 @@ copy_phi_args (basic_block bb, edge src_e, edge tgt_e,
 
 static void
 update_destination_phis (basic_block orig_bb, basic_block new_bb,
-			 vec<jump_thread_edge *> *path, int idx)
+			 jump_thread_path *path, int idx)
 {
   edge_iterator ei;
   edge e;
@@ -578,7 +577,7 @@ create_edge_and_update_destination_phis (struct redirection_data *rd,
    any additional blocks that need to be duplicated.  Otherwise,
    return FALSE.  */
 static bool
-any_remaining_duplicated_blocks (vec<jump_thread_edge *> *path,
+any_remaining_duplicated_blocks (jump_thread_path *path,
 				 unsigned int start)
 {
   for (unsigned int i = start + 1; i < path->length (); i++)
@@ -696,7 +695,7 @@ compute_path_counts (struct redirection_data *rd,
 		     profile_count *path_out_count_ptr)
 {
   edge e = rd->incoming_edges->e;
-  vec<jump_thread_edge *> *path = THREAD_PATH (e);
+  jump_thread_path *path = THREAD_PATH (e);
   edge elast = path->last ()->e;
   profile_count nonpath_count = profile_count::zero ();
   bool has_joiner = false;
@@ -729,7 +728,7 @@ compute_path_counts (struct redirection_data *rd,
   edge_iterator ei;
   FOR_EACH_EDGE (ein, ei, e->dest->preds)
     {
-      vec<jump_thread_edge *> *ein_path = THREAD_PATH (ein);
+      jump_thread_path *ein_path = THREAD_PATH (ein);
       /* Simply check the incoming edge src against the set captured above.  */
       if (ein_path
 	  && bitmap_bit_p (in_edge_srcs, (*ein_path)[0]->e->src->index))
@@ -944,7 +943,7 @@ ssa_fix_duplicate_block_edges (struct redirection_data *rd,
 {
   bool multi_incomings = (rd->incoming_edges->next != NULL);
   edge e = rd->incoming_edges->e;
-  vec<jump_thread_edge *> *path = THREAD_PATH (e);
+  jump_thread_path *path = THREAD_PATH (e);
   edge elast = path->last ()->e;
   profile_count path_in_count = profile_count::zero ();
   profile_count path_out_count = profile_count::zero ();
@@ -1097,7 +1096,7 @@ ssa_create_duplicates (struct redirection_data **slot,
      Note the search starts with the third edge on the path.  The first
      edge is the incoming edge, the second edge always has its source
      duplicated.  Thus we start our search with the third edge.  */
-  vec<jump_thread_edge *> *path = rd->path;
+  jump_thread_path *path = rd->path;
   for (unsigned int i = 2; i < path->length (); i++)
     {
       if ((*path)[i]->type == EDGE_COPY_SRC_BLOCK
@@ -1251,7 +1250,7 @@ ssa_redirect_edges (struct redirection_data **slot,
   for (el = rd->incoming_edges; el; el = next)
     {
       edge e = el->e;
-      vec<jump_thread_edge *> *path = THREAD_PATH (e);
+      jump_thread_path *path = THREAD_PATH (e);
 
       /* Go ahead and free this element from the list.  Doing this now
 	 avoids the need for another list walk when we destroy the hash
@@ -1372,7 +1371,7 @@ jump_thread_path_registry::thread_block_1 (basic_block bb,
       if (e->aux == NULL)
 	continue;
 
-      vec<jump_thread_edge *> *path = THREAD_PATH (e);
+      jump_thread_path *path = THREAD_PATH (e);
 
       if (((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK && !joiners)
 	  || ((*path)[1]->type == EDGE_COPY_SRC_BLOCK && joiners))
@@ -1703,7 +1702,7 @@ jump_thread_path_registry::thread_through_loop_header
 	      goto fail;
 	    }
 
-	  vec<jump_thread_edge *> *path = THREAD_PATH (e);
+	  jump_thread_path *path = THREAD_PATH (e);
 
 	  if ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
 	    goto fail;
@@ -1790,7 +1789,7 @@ fail:
   /* We failed to thread anything.  Cancel the requests.  */
   FOR_EACH_EDGE (e, ei, header->preds)
     {
-      vec<jump_thread_edge *> *path = THREAD_PATH (e);
+      jump_thread_path *path = THREAD_PATH (e);
 
       if (path)
 	{
@@ -1887,7 +1886,7 @@ jump_thread_path_registry::mark_threaded_blocks (bitmap threaded_blocks)
      joiner block.  */
   for (i = 0; i < m_paths.length (); i++)
     {
-      vec<jump_thread_edge *> *path = m_paths[i];
+      jump_thread_path *path = m_paths[i];
 
       if (path->length () > 1
 	  && (*path)[1]->type != EDGE_COPY_SRC_JOINER_BLOCK)
@@ -1908,7 +1907,7 @@ jump_thread_path_registry::mark_threaded_blocks (bitmap threaded_blocks)
      case where there is already a path for that incoming edge.  */
   for (i = 0; i < m_paths.length ();)
     {
-      vec<jump_thread_edge *> *path = m_paths[i];
+      jump_thread_path *path = m_paths[i];
 
       if (path->length () > 1
 	  && (*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK)
@@ -1937,7 +1936,7 @@ jump_thread_path_registry::mark_threaded_blocks (bitmap threaded_blocks)
      them, and either finish converting them or cancel them.  */
   for (i = 0; i < m_paths.length ();)
     {
-      vec<jump_thread_edge *> *path = m_paths[i];
+      jump_thread_path *path = m_paths[i];
       edge e = (*path)[0]->e;
 
       if (path->length () > 1
@@ -1990,7 +1989,7 @@ jump_thread_path_registry::mark_threaded_blocks (bitmap threaded_blocks)
 	  FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, i)->preds)
 	    if (e->aux)
 	      {
-		vec<jump_thread_edge *> *path = THREAD_PATH (e);
+		jump_thread_path *path = THREAD_PATH (e);
 
 		unsigned int j;
 		for (j = 1; j < path->length (); j++)
@@ -2044,7 +2043,7 @@ jump_thread_path_registry::mark_threaded_blocks (bitmap threaded_blocks)
 	{
 	  if (e->aux)
 	    {
-	      vec<jump_thread_edge *> *path = THREAD_PATH (e);
+	      jump_thread_path *path = THREAD_PATH (e);
 	      bool have_joiner = ((*path)[1]->type == EDGE_COPY_SRC_JOINER_BLOCK);
 
 	      if (have_joiner)
@@ -2076,7 +2075,7 @@ jump_thread_path_registry::mark_threaded_blocks (bitmap threaded_blocks)
 	{
 	  if (e->aux)
 	    {
-	      vec<jump_thread_edge *> *path = THREAD_PATH (e);
+	      jump_thread_path *path = THREAD_PATH (e);
 
 	      for (unsigned int i = 0, crossed_headers = 0;
 		   i < path->length ();
@@ -2133,7 +2132,7 @@ bb_in_bbs (basic_block bb, basic_block *bbs, int n)
 void
 jump_thread_path_registry::debug_path (FILE *dump_file, int pathno)
 {
-  vec<jump_thread_edge *> *p = m_paths[pathno];
+  jump_thread_path *p = m_paths[pathno];
   fprintf (dump_file, "path: ");
   for (unsigned i = 0; i < p->length (); ++i)
     fprintf (dump_file, "%d -> %d, ",
@@ -2160,7 +2159,7 @@ bool
 jump_thread_path_registry::rewire_first_differing_edge (unsigned path_num,
 							unsigned edge_num)
 {
-  vec<jump_thread_edge *> *path = m_paths[path_num];
+  jump_thread_path *path = m_paths[path_num];
   edge &e = (*path)[edge_num]->e;
   if (dump_file && (dump_flags & TDF_DETAILS))
     fprintf (dump_file, "rewiring edge candidate: %d -> %d\n",
@@ -2206,7 +2205,7 @@ void
 jump_thread_path_registry::adjust_paths_after_duplication
 	(unsigned curr_path_num)
 {
-  vec<jump_thread_edge *> *curr_path = m_paths[curr_path_num];
+  jump_thread_path *curr_path = m_paths[curr_path_num];
   gcc_assert ((*curr_path)[0]->type == EDGE_FSM_THREAD);
 
   if (dump_file && (dump_flags & TDF_DETAILS))
@@ -2225,7 +2224,7 @@ jump_thread_path_registry::adjust_paths_after_duplication
 	}
       /* Make sure the candidate to adjust starts with the same path
 	 as the recently threaded path and is an FSM thread.  */
-      vec<jump_thread_edge *> *cand_path = m_paths[cand_path_num];
+      jump_thread_path *cand_path = m_paths[cand_path_num];
       if ((*cand_path)[0]->type != EDGE_FSM_THREAD
 	  || (*cand_path)[0]->e != (*curr_path)[0]->e)
 	{
@@ -2466,7 +2465,7 @@ jump_thread_path_registry::duplicate_thread_path (edge entry,
 /* Return true when PATH is a valid jump-thread path.  */
 
 static bool
-valid_jump_thread_path (vec<jump_thread_edge *> *path)
+valid_jump_thread_path (jump_thread_path *path)
 {
   unsigned len = path->length ();
 
@@ -2530,7 +2529,7 @@ jump_thread_path_registry::thread_through_all_blocks
     for (i = 0; i < m_paths.length (); )
       {
 	unsigned int j;
-	vec<jump_thread_edge *> *path = m_paths[i];
+	jump_thread_path *path = m_paths[i];
 
 	for (j = 0; j < path->length (); j++)
 	  {
@@ -2551,7 +2550,7 @@ jump_thread_path_registry::thread_through_all_blocks
   /* Jump-thread all FSM threads before other jump-threads.  */
   for (i = 0; i < m_paths.length ();)
     {
-      vec<jump_thread_edge *> *path = m_paths[i];
+      jump_thread_path *path = m_paths[i];
       edge entry = (*path)[0]->e;
 
       /* Only code-generate FSM jump-threads in this loop.  */
@@ -2605,7 +2604,7 @@ jump_thread_path_registry::thread_through_all_blocks
      jump-threaded.  */
   for (i = 0; i < m_paths.length ();)
     {
-      vec<jump_thread_edge *> *path = m_paths[i];
+      jump_thread_path *path = m_paths[i];
       edge entry = (*path)[0]->e;
 
       /* Do not jump-thread twice from the same block.  */
@@ -2691,7 +2690,7 @@ jump_thread_path_registry::thread_through_all_blocks
    each entry in the vector, then the container.  */
 
 void
-delete_jump_thread_path (vec<jump_thread_edge *> *path)
+delete_jump_thread_path (jump_thread_path *path)
 {
   for (unsigned int i = 0; i < path->length (); i++)
     delete (*path)[i];
@@ -2708,7 +2707,7 @@ delete_jump_thread_path (vec<jump_thread_edge *> *path)
    after fixing the SSA graph.  */
 
 void
-jump_thread_path_registry::register_jump_thread (vec<jump_thread_edge *> *path)
+jump_thread_path_registry::register_jump_thread (jump_thread_path *path)
 {
   if (!dbg_cnt (registered_jump_thread))
     {
diff --git a/gcc/tree-ssa-threadupdate.h b/gcc/tree-ssa-threadupdate.h
index 7d847ee0791..99e54ff13b9 100644
--- a/gcc/tree-ssa-threadupdate.h
+++ b/gcc/tree-ssa-threadupdate.h
@@ -30,7 +30,7 @@ class jump_thread_path_registry
 public:
   jump_thread_path_registry ();
   ~jump_thread_path_registry ();
-  void register_jump_thread (vec <class jump_thread_edge *> *);
+  void register_jump_thread (class jump_thread_path *);
   void remove_jump_threads_including (edge);
   // Perform CFG changes after all threadable candidates have been
   // registered.
@@ -53,7 +53,7 @@ private:
 				   bool may_peel_loop_headers);
   class redirection_data *lookup_redirection_data (edge e, enum insert_option);
 
-  vec<vec<jump_thread_edge *> *> m_paths;
+  vec<class jump_thread_path *> m_paths;
 
   hash_table<struct removed_edges> *m_removed_edges;
 
@@ -88,23 +88,19 @@ public:
 class jump_thread_path
 {
 public:
-  jump_thread_path () { m_path = new vec<jump_thread_edge *> (); }
-  jump_thread_edge *&operator[] (int i) { return (*m_path)[i]; }
-  jump_thread_edge *&last (void) { return m_path->last (); }
-  void safe_push (jump_thread_edge *e) { m_path->safe_push (e); }
-  unsigned length () { return m_path->length (); }
-  void release ()
+  jump_thread_path () { m_path.create (5); }
+  jump_thread_edge *&operator[] (int i) { return m_path[i]; }
+  jump_thread_edge *&last (void) { return m_path.last (); }
+  void safe_push (jump_thread_edge *e) { m_path.safe_push (e); }
+  unsigned length () { return m_path.length (); }
+  void release () { m_path.release (); }
+  void block_remove (unsigned ix, unsigned len)
   {
-    for (unsigned int i = 0; i < m_path->length (); i++)
-      delete (*m_path)[i];
-    m_path->release();
-    memset (m_path, 0x13, sizeof (*m_path));
-    delete m_path;
-    m_path = 0;
+    return m_path.block_remove (ix, len);
   }
 
 private:
-  vec<jump_thread_edge *> *m_path;
+  vec<jump_thread_edge *> m_path;
 };
 
 // Rather than search all the edges in jump thread paths each time DOM
@@ -117,7 +113,7 @@ struct removed_edges : nofree_ptr_hash<edge_def>
   static bool equal (edge e1, edge e2) { return e1 == e2; }
 };
 
-extern void delete_jump_thread_path (vec <jump_thread_edge *> *);
+extern void delete_jump_thread_path (jump_thread_path *);
 extern unsigned int estimate_threading_killed_stmts (basic_block);
 
 enum bb_dom_status


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2020-11-25 14:51 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-25 14:51 [gcc(refs/users/aldyh/heads/threader-refactor)] class jump_thread_path Aldy Hernandez

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).