public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* Stabilize inliner Fibonacci heap
@ 2023-04-21 12:03 Jan Hubicka
  0 siblings, 0 replies; only message in thread
From: Jan Hubicka @ 2023-04-21 12:03 UTC (permalink / raw)
  To: michal, rguenther, gcc-patches

Hi,
This fixes another problem Michal noticed while working on incrmeental
WHOPR.  The Fibonacci heap can change its behaviour quite significantly
for no good reasons when multiple edges with same key occurs.  This is
quite common for small functions.

This patch stabilizes the order by adding edge uids into the info.
Again I think this is good idea regardless of the incremental WPA project
since we do not want random changes in inline decisions.

Bootstrapped/regtested x86_64-linux, plan to commit it shortly.
gcc/ChangeLog:

	* ipa-inline.cc (class inline_badness): New class.
	(edge_heap_t, edge_heap_node_t): Use inline_badness for badness instead
	of sreal.
	(update_edge_key): Update.
	(lookup_recursive_calls): Likewise.
	(recursive_inlining): Likewise.
	(add_new_edges_to_heap): Likewise.
	(inline_small_functions): Likewise.

diff --git a/gcc/ipa-inline.cc b/gcc/ipa-inline.cc
index 474fbff2057..efc8df7d4e0 100644
--- a/gcc/ipa-inline.cc
+++ b/gcc/ipa-inline.cc
@@ -120,8 +120,54 @@ along with GCC; see the file COPYING3.  If not see
 #include "attribs.h"
 #include "asan.h"
 
-typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
-typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
+/* Inliner uses greedy algorithm to inline calls in a priority order.
+   Badness is used as the key in a Fibonacci heap which roughly corresponds
+   to negation of benefit to cost ratios.
+   In case multiple calls has same priority we want to stabilize the outcomes
+   for which we use ids.  */
+class inline_badness
+{
+public:
+  sreal badness;
+  int uid;
+  inline_badness ()
+  : badness (sreal::min ()), uid (0)
+  {
+  }
+  inline_badness (cgraph_edge *e, sreal b)
+  : badness (b), uid (e->get_uid ())
+  {
+  }
+  bool operator<= (const inline_badness &other)
+  {
+    if (badness != other.badness)
+      return badness <= other.badness;
+    return uid <= other.uid;
+  }
+  bool operator== (const inline_badness &other)
+  {
+    return badness == other.badness && uid == other.uid;
+  }
+  bool operator!= (const inline_badness &other)
+  {
+    return badness != other.badness || uid != other.uid;
+  }
+  bool operator< (const inline_badness &other)
+  {
+    if (badness != other.badness)
+      return badness < other.badness;
+    return uid < other.uid;
+  }
+  bool operator> (const inline_badness &other)
+  {
+    if (badness != other.badness)
+      return badness > other.badness;
+    return uid > other.uid;
+  }
+};
+
+typedef fibonacci_heap <inline_badness, cgraph_edge> edge_heap_t;
+typedef fibonacci_node <inline_badness, cgraph_edge> edge_heap_node_t;
 
 /* Statistics we collect about inlining algorithm.  */
 static int overall_size;
@@ -1399,7 +1445,7 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
 	 We do lazy increases: after extracting minimum if the key
 	 turns out to be out of date, it is re-inserted into heap
 	 with correct value.  */
-      if (badness < n->get_key ())
+      if (badness < n->get_key ().badness)
 	{
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    {
@@ -1407,10 +1453,11 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
 		       "  decreasing badness %s -> %s, %f to %f\n",
 		       edge->caller->dump_name (),
 		       edge->callee->dump_name (),
-		       n->get_key ().to_double (),
+		       n->get_key ().badness.to_double (),
 		       badness.to_double ());
 	    }
-	  heap->decrease_key (n, badness);
+	  inline_badness b (edge, badness);
+	  heap->decrease_key (n, b);
 	}
     }
   else
@@ -1423,7 +1470,8 @@ update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
 		    edge->callee->dump_name (),
 		    badness.to_double ());
 	 }
-      edge->aux = heap->insert (badness, edge);
+      inline_badness b (edge, badness);
+      edge->aux = heap->insert (b, edge);
     }
 }
 
@@ -1630,7 +1678,10 @@ lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
     if (e->callee == node
 	|| (e->callee->ultimate_alias_target (&avail, e->caller) == node
 	    && avail > AVAIL_INTERPOSABLE))
-      heap->insert (-e->sreal_frequency (), e);
+    {
+      inline_badness b (e, -e->sreal_frequency ());
+      heap->insert (b, e);
+    }
   for (e = where->callees; e; e = e->next_callee)
     if (!e->inline_failed)
       lookup_recursive_calls (node, e->callee, heap);
@@ -1649,7 +1700,8 @@ recursive_inlining (struct cgraph_edge *edge,
 		      ? edge->caller->inlined_to : edge->caller);
   int limit = opt_for_fn (to->decl,
 			  param_max_inline_insns_recursive_auto);
-  edge_heap_t heap (sreal::min ());
+  inline_badness b (edge, sreal::min ());
+  edge_heap_t heap (b);
   struct cgraph_node *node;
   struct cgraph_edge *e;
   struct cgraph_node *master_clone = NULL, *next;
@@ -1809,7 +1861,10 @@ add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> &new_edges)
 	  && can_inline_edge_p (edge, true)
 	  && want_inline_small_function_p (edge, true)
 	  && can_inline_edge_by_limits_p (edge, true))
-        edge->aux = heap->insert (edge_badness (edge, false), edge);
+	{
+	  inline_badness b (edge, edge_badness (edge, false));
+	  edge->aux = heap->insert (b, edge);
+	}
     }
 }
 
@@ -1950,7 +2005,8 @@ inline_small_functions (void)
 {
   struct cgraph_node *node;
   struct cgraph_edge *edge;
-  edge_heap_t edge_heap (sreal::min ());
+  inline_badness b;
+  edge_heap_t edge_heap (b);
   auto_bitmap updated_nodes;
   int min_size;
   auto_vec<cgraph_edge *> new_indirect_edges;
@@ -2088,7 +2144,7 @@ inline_small_functions (void)
     {
       int old_size = overall_size;
       struct cgraph_node *where, *callee;
-      sreal badness = edge_heap.min_key ();
+      sreal badness = edge_heap.min_key ().badness;
       sreal current_badness;
       int growth;
 
@@ -2141,9 +2197,10 @@ inline_small_functions (void)
         current_badness = edge_badness (edge, false);
       if (current_badness != badness)
 	{
-	  if (edge_heap.min () && current_badness > edge_heap.min_key ())
+	  if (edge_heap.min () && current_badness > edge_heap.min_key ().badness)
 	    {
-	      edge->aux = edge_heap.insert (current_badness, edge);
+	      inline_badness b (edge, current_badness);
+	      edge->aux = edge_heap.insert (b, edge);
 	      continue;
 	    }
 	  else

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-04-21 12:03 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-21 12:03 Stabilize inliner Fibonacci heap Jan Hubicka

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).