public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r14-275] Create a lazy ssa_cache.
@ 2023-04-26 19:26 Andrew Macleod
  0 siblings, 0 replies; only message in thread
From: Andrew Macleod @ 2023-04-26 19:26 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:0a38f677463ff8a4fb61b049263aa596ef6471a7

commit r14-275-g0a38f677463ff8a4fb61b049263aa596ef6471a7
Author: Andrew MacLeod <amacleod@redhat.com>
Date:   Tue Mar 28 11:35:26 2023 -0400

    Create a lazy ssa_cache.
    
    Sparsely used ssa caches can benefit from using a bitmap to
    determine if a name already has an entry.  Utilize it in the path query
    and remove its private bitmap for tracking the same info.
    Also use it in the "assume" query class.
    
            PR tree-optimization/108697
            * gimple-range-cache.cc (ssa_global_cache::clear_range): Do
            not clear the vector on an out of range query.
            (ssa_cache::dump): Use dump_range_query instead of get_range.
            (ssa_cache::dump_range_query): New.
            (ssa_lazy_cache::dump_range_query): New.
            (ssa_lazy_cache::set_range): New.
            * gimple-range-cache.h (ssa_cache::dump_range_query): New.
            (class ssa_lazy_cache): New.
            (ssa_lazy_cache::ssa_lazy_cache): New.
            (ssa_lazy_cache::~ssa_lazy_cache): New.
            (ssa_lazy_cache::get_range): New.
            (ssa_lazy_cache::clear_range): New.
            (ssa_lazy_cache::clear): New.
            (ssa_lazy_cache::dump): New.
            * gimple-range-path.cc (path_range_query::path_range_query): Do
            not allocate a ssa_cache object nor has_cache bitmap.
            (path_range_query::~path_range_query): Do not free objects.
            (path_range_query::clear_cache): Remove.
            (path_range_query::get_cache): Adjust.
            (path_range_query::set_cache): Remove.
            (path_range_query::dump): Don't call through a pointer.
            (path_range_query::internal_range_of_expr): Set cache directly.
            (path_range_query::reset_path): Clear cache directly.
            (path_range_query::ssa_range_in_phi): Fold with globals only.
            (path_range_query::compute_ranges_in_phis): Simply set range.
            (path_range_query::compute_ranges_in_block): Call cache directly.
            * gimple-range-path.h (class path_range_query): Replace bitmap
            and cache pointer with lazy cache object.
            * gimple-range.h (class assume_query): Use ssa_lazy_cache.

Diff:
---
 gcc/gimple-range-cache.cc | 45 +++++++++++++++++++++++++++++---
 gcc/gimple-range-cache.h  | 35 ++++++++++++++++++++++++-
 gcc/gimple-range-path.cc  | 65 ++++++++++-------------------------------------
 gcc/gimple-range-path.h   |  7 +----
 gcc/gimple-range.h        |  2 +-
 5 files changed, 92 insertions(+), 62 deletions(-)

diff --git a/gcc/gimple-range-cache.cc b/gcc/gimple-range-cache.cc
index 6de96f6b8a9..5510efba1ca 100644
--- a/gcc/gimple-range-cache.cc
+++ b/gcc/gimple-range-cache.cc
@@ -592,14 +592,14 @@ ssa_cache::set_range (tree name, const vrange &r)
   return m != NULL;
 }
 
-// Set the range for NAME to R in the global cache.
+// Set the range for NAME to R in the ssa cache.
 
 void
 ssa_cache::clear_range (tree name)
 {
   unsigned v = SSA_NAME_VERSION (name);
   if (v >= m_tab.length ())
-    m_tab.safe_grow_cleared (num_ssa_names + 1);
+    return;
   m_tab[v] = NULL;
 }
 
@@ -624,7 +624,10 @@ ssa_cache::dump (FILE *f)
       if (!gimple_range_ssa_p (ssa_name (x)))
 	continue;
       Value_Range r (TREE_TYPE (ssa_name (x)));
-      if (get_range (r, ssa_name (x)) && !r.varying_p ())
+      // Invoke dump_range_query which is a private virtual version of
+      // get_range.   This avoids performance impacts on general queries,
+      // but allows sharing of the dump routine.
+      if (dump_range_query (r, ssa_name (x)) && !r.varying_p ())
 	{
 	  if (print_header)
 	    {
@@ -646,6 +649,42 @@ ssa_cache::dump (FILE *f)
     fputc ('\n', f);
 }
 
+// Virtual private get_range query for dumping.
+
+bool
+ssa_cache::dump_range_query (vrange &r, tree name) const
+{
+  return get_range (r, name);
+}
+
+// Virtual private get_range query for dumping.
+
+bool
+ssa_lazy_cache::dump_range_query (vrange &r, tree name) const
+{
+  return get_range (r, name);
+}
+
+
+// Set range of NAME to R in a lazy cache.  Return FALSE if it did not already
+// have a range.
+
+bool
+ssa_lazy_cache::set_range (tree name, const vrange &r)
+{
+  unsigned v = SSA_NAME_VERSION (name);
+  if (!bitmap_set_bit (active_p, v))
+    {
+      // There is already an entry, simply set it.
+      gcc_checking_assert (v < m_tab.length ());
+      return ssa_cache::set_range (name, r);
+    }
+  if (v >= m_tab.length ())
+    m_tab.safe_grow (num_ssa_names + 1);
+  m_tab[v] = m_range_allocator->clone (r);
+  return false;
+}
+
 // --------------------------------------------------------------------------
 
 
diff --git a/gcc/gimple-range-cache.h b/gcc/gimple-range-cache.h
index 2d41f0c5c67..9032df9e3e3 100644
--- a/gcc/gimple-range-cache.h
+++ b/gcc/gimple-range-cache.h
@@ -63,11 +63,44 @@ public:
   void clear_range (tree name);
   void clear ();
   void dump (FILE *f = stderr);
-private:
+protected:
+  virtual bool dump_range_query (vrange &r, tree name) const;
   vec<vrange *> m_tab;
   vrange_allocator *m_range_allocator;
 };
 
+// This is the same as global cache, except it maintains an active bitmap
+// rather than depending on a zero'd out vector of pointers.  This is better
+// for sparsely/lightly used caches.
+// It could be made a fully derived class, but at this point there doesnt seem
+// to be a need to take the performance hit for it.
+
+class ssa_lazy_cache : protected ssa_cache
+{
+public:
+  inline ssa_lazy_cache () { active_p = BITMAP_ALLOC (NULL); }
+  inline ~ssa_lazy_cache () { BITMAP_FREE (active_p); }
+  bool set_range (tree name, const vrange &r);
+  inline bool get_range (vrange &r, tree name) const;
+  inline void clear_range (tree name)
+    { bitmap_clear_bit (active_p, SSA_NAME_VERSION (name)); } ;
+  inline void clear () { bitmap_clear (active_p); }
+  inline void dump (FILE *f = stderr) { ssa_cache::dump (f); }
+protected:
+  virtual bool dump_range_query (vrange &r, tree name) const;
+  bitmap active_p;
+};
+
+// Return TRUE if NAME has a range, and return it in R.
+
+bool
+ssa_lazy_cache::get_range (vrange &r, tree name) const
+{
+  if (!bitmap_bit_p (active_p, SSA_NAME_VERSION (name)))
+    return false;
+  return ssa_cache::get_range (r, name);
+}
+
 // This class provides all the caches a global ranger may need, and makes 
 // them available for gori-computes to query so outgoing edges can be
 // properly calculated.
diff --git a/gcc/gimple-range-path.cc b/gcc/gimple-range-path.cc
index 07868df5e6f..7438b832b5a 100644
--- a/gcc/gimple-range-path.cc
+++ b/gcc/gimple-range-path.cc
@@ -40,8 +40,7 @@ path_range_query::path_range_query (gimple_ranger &ranger,
 				    const vec<basic_block> &path,
 				    const bitmap_head *dependencies,
 				    bool resolve)
-  : m_cache (new ssa_cache),
-    m_has_cache_entry (BITMAP_ALLOC (NULL)),
+  : m_cache (),
     m_ranger (ranger),
     m_resolve (resolve)
 {
@@ -51,8 +50,7 @@ path_range_query::path_range_query (gimple_ranger &ranger,
 }
 
 path_range_query::path_range_query (gimple_ranger &ranger, bool resolve)
-  : m_cache (new ssa_cache),
-    m_has_cache_entry (BITMAP_ALLOC (NULL)),
+  : m_cache (),
     m_ranger (ranger),
     m_resolve (resolve)
 {
@@ -62,8 +60,6 @@ path_range_query::path_range_query (gimple_ranger &ranger, bool resolve)
 path_range_query::~path_range_query ()
 {
   delete m_oracle;
-  BITMAP_FREE (m_has_cache_entry);
-  delete m_cache;
 }
 
 // Return TRUE if NAME is an exit dependency for the path.
@@ -75,15 +71,6 @@ path_range_query::exit_dependency_p (tree name)
 	  && bitmap_bit_p (m_exit_dependencies, SSA_NAME_VERSION (name)));
 }
 
-// Mark cache entry for NAME as unused.
-
-void
-path_range_query::clear_cache (tree name)
-{
-  unsigned v = SSA_NAME_VERSION (name);
-  bitmap_clear_bit (m_has_cache_entry, v);
-}
-
 // If NAME has a cache entry, return it in R, and return TRUE.
 
 inline bool
@@ -92,21 +79,7 @@ path_range_query::get_cache (vrange &r, tree name)
   if (!gimple_range_ssa_p (name))
     return get_global_range_query ()->range_of_expr (r, name);
 
-  unsigned v = SSA_NAME_VERSION (name);
-  if (bitmap_bit_p (m_has_cache_entry, v))
-    return m_cache->get_range (r, name);
-
-  return false;
-}
-
-// Set the cache entry for NAME to R.
-
-void
-path_range_query::set_cache (const vrange &r, tree name)
-{
-  unsigned v = SSA_NAME_VERSION (name);
-  bitmap_set_bit (m_has_cache_entry, v);
-  m_cache->set_range (name, r);
+  return m_cache.get_range (r, name);
 }
 
 void
@@ -130,7 +103,7 @@ path_range_query::dump (FILE *dump_file)
       fprintf (dump_file, "\n");
     }
 
-  m_cache->dump (dump_file);
+  m_cache.dump (dump_file);
 }
 
 void
@@ -174,7 +147,7 @@ path_range_query::internal_range_of_expr (vrange &r, tree name, gimple *stmt)
   if (m_resolve && defined_outside_path (name))
     {
       range_on_path_entry (r, name);
-      set_cache (r, name);
+      m_cache.set_range (name, r);
       return true;
     }
 
@@ -188,7 +161,7 @@ path_range_query::internal_range_of_expr (vrange &r, tree name, gimple *stmt)
 	  r.intersect (glob);
 	}
 
-      set_cache (r, name);
+      m_cache.set_range (name, r);
       return true;
     }
 
@@ -225,7 +198,7 @@ path_range_query::reset_path (const vec<basic_block> &path,
   m_path = path.copy ();
   m_pos = m_path.length () - 1;
   m_undefined_path = false;
-  bitmap_clear (m_has_cache_entry);
+  m_cache.clear ();
 
   compute_ranges (dependencies);
 }
@@ -255,7 +228,7 @@ path_range_query::ssa_range_in_phi (vrange &r, gphi *phi)
       if (m_resolve && m_ranger.range_of_expr (r, name, phi))
 	return;
 
-      // Try to fold the phi exclusively with global or cached values.
+      // Try to fold the phi exclusively with global values.
       // This will get things like PHI <5(99), 6(88)>.  We do this by
       // calling range_of_expr with no context.
       unsigned nargs = gimple_phi_num_args (phi);
@@ -264,7 +237,7 @@ path_range_query::ssa_range_in_phi (vrange &r, gphi *phi)
       for (size_t i = 0; i < nargs; ++i)
 	{
 	  tree arg = gimple_phi_arg_def (phi, i);
-	  if (range_of_expr (arg_range, arg, /*stmt=*/NULL))
+	  if (m_ranger.range_of_expr (arg_range, arg, /*stmt=*/NULL))
 	    r.union_ (arg_range);
 	  else
 	    {
@@ -348,8 +321,6 @@ path_range_query::range_defined_in_block (vrange &r, tree name, basic_block bb)
 void
 path_range_query::compute_ranges_in_phis (basic_block bb)
 {
-  auto_bitmap phi_set;
-
   // PHIs must be resolved simultaneously on entry to the block
   // because any dependencies must be satisfied with values on entry.
   // Thus, we calculate all PHIs first, and then update the cache at
@@ -365,16 +336,8 @@ path_range_query::compute_ranges_in_phis (basic_block bb)
 
       Value_Range r (TREE_TYPE (name));
       if (range_defined_in_block (r, name, bb))
-	{
-	  unsigned v = SSA_NAME_VERSION (name);
-	  set_cache (r, name);
-	  bitmap_set_bit (phi_set, v);
-	  // Pretend we don't have a cache entry for this name until
-	  // we're done with all PHIs.
-	  bitmap_clear_bit (m_has_cache_entry, v);
-	}
+	m_cache.set_range (name, r);
     }
-  bitmap_ior_into (m_has_cache_entry, phi_set);
 }
 
 // Return TRUE if relations may be invalidated after crossing edge E.
@@ -408,7 +371,7 @@ path_range_query::compute_ranges_in_block (basic_block bb)
     {
       tree name = ssa_name (i);
       if (ssa_defined_in_bb (name, bb))
-	clear_cache (name);
+	m_cache.clear_range (name);
     }
 
   // Solve dependencies defined in this block, starting with the PHIs...
@@ -421,7 +384,7 @@ path_range_query::compute_ranges_in_block (basic_block bb)
 
       if (gimple_code (SSA_NAME_DEF_STMT (name)) != GIMPLE_PHI
 	  && range_defined_in_block (r, name, bb))
-	set_cache (r, name);
+	m_cache.set_range (name, r);
     }
 
   if (at_exit ())
@@ -457,7 +420,7 @@ path_range_query::compute_ranges_in_block (basic_block bb)
 	  if (get_cache (cached_range, name))
 	    r.intersect (cached_range);
 
-	  set_cache (r, name);
+	  m_cache.set_range (name, r);
 	  if (DEBUG_SOLVER)
 	    {
 	      fprintf (dump_file, "outgoing_edge_range_p for ");
@@ -500,7 +463,7 @@ path_range_query::adjust_for_non_null_uses (basic_block bb)
 	r.set_varying (TREE_TYPE (name));
 
       if (m_ranger.m_cache.m_exit.maybe_adjust_range (r, name, bb))
-	set_cache (r, name);
+	m_cache.set_range (name, r);
     }
 }
 
diff --git a/gcc/gimple-range-path.h b/gcc/gimple-range-path.h
index 29e33c6c37b..34841e78c3d 100644
--- a/gcc/gimple-range-path.h
+++ b/gcc/gimple-range-path.h
@@ -54,9 +54,7 @@ private:
   path_oracle *get_path_oracle () { return (path_oracle *)m_oracle; }
 
   // Cache manipulation.
-  void set_cache (const vrange &r, tree name);
   bool get_cache (vrange &r, tree name);
-  void clear_cache (tree name);
 
   // Methods to compute ranges for the given path.
   bool range_defined_in_block (vrange &, tree name, basic_block bb);
@@ -83,10 +81,7 @@ private:
   void move_next ()	  { --m_pos; }
 
   // Range cache for SSA names.
-  ssa_cache *m_cache;
-
-  // Set for each SSA that has an active entry in the cache.
-  bitmap m_has_cache_entry;
+  ssa_lazy_cache m_cache;
 
   // Path being analyzed.
   auto_vec<basic_block> m_path;
diff --git a/gcc/gimple-range.h b/gcc/gimple-range.h
index 944e7692a0e..e3aa9475f5e 100644
--- a/gcc/gimple-range.h
+++ b/gcc/gimple-range.h
@@ -96,7 +96,7 @@ protected:
   void calculate_phi (gphi *phi, vrange &lhs_range, fur_source &src);
   void check_taken_edge (edge e, fur_source &src);
 
-  ssa_cache global;
+  ssa_lazy_cache global;
   gori_compute m_gori;
 };

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-04-26 19:26 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-26 19:26 [gcc r14-275] Create a lazy ssa_cache Andrew Macleod

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).