public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Richard Sandiford <richard.sandiford@arm.com>
To: gcc-patches@gcc.gnu.org
Subject: [PATCH 22/23] Add rtl-ssa
Date: Fri, 13 Nov 2020 08:23:02 +0000	[thread overview]
Message-ID: <mptmtzl64yh.fsf@arm.com> (raw)
In-Reply-To: <mpth7ptad81.fsf@arm.com> (Richard Sandiford's message of "Fri, 13 Nov 2020 08:10:54 +0000")

This patch adds the RTL SSA infrastructure itself.  The following
fwprop.c patch will make use of it.

gcc/
	* configure.ac: Add rtl-ssa to the list of dependence directories.
	* configure: Regenerate.
	* Makefile.in (rtl-ssa-warn): New variable.
	(OBJS): Add the rtl-ssa object files.
	* emit-rtl.h (rtl_data::ssa): New field.
	* rtl-ssa.h: New file.
	* system.h: Include <functional> when INCLUDE_FUNCTIONAL is defined.
	* rtl-ssa: New directory.
---
 gcc/Makefile.in            |    6 +
 gcc/configure              |    2 +-
 gcc/configure.ac           |    2 +-
 gcc/emit-rtl.h             |    3 +
 gcc/rtl-ssa.h              |   71 ++
 gcc/rtl-ssa/access-utils.h |  553 +++++++++++++
 gcc/rtl-ssa/accesses.cc    | 1592 ++++++++++++++++++++++++++++++++++++
 gcc/rtl-ssa/accesses.h     | 1032 +++++++++++++++++++++++
 gcc/rtl-ssa/blocks.cc      | 1146 ++++++++++++++++++++++++++
 gcc/rtl-ssa/blocks.h       |  301 +++++++
 gcc/rtl-ssa/change-utils.h |  137 ++++
 gcc/rtl-ssa/changes.cc     | 1025 +++++++++++++++++++++++
 gcc/rtl-ssa/changes.h      |  118 +++
 gcc/rtl-ssa/functions.cc   |  325 ++++++++
 gcc/rtl-ssa/functions.h    |  433 ++++++++++
 gcc/rtl-ssa/insn-utils.h   |   46 ++
 gcc/rtl-ssa/insns.cc       |  718 ++++++++++++++++
 gcc/rtl-ssa/insns.h        |  505 ++++++++++++
 gcc/rtl-ssa/internals.inl  |  682 +++++++++++++++
 gcc/rtl-ssa/is-a.inl       |   98 +++
 gcc/rtl-ssa/member-fns.inl |  928 +++++++++++++++++++++
 gcc/rtl-ssa/movement.h     |  335 ++++++++
 gcc/system.h               |    3 +
 23 files changed, 10059 insertions(+), 2 deletions(-)
 create mode 100644 gcc/rtl-ssa.h
 create mode 100644 gcc/rtl-ssa/access-utils.h
 create mode 100644 gcc/rtl-ssa/accesses.cc
 create mode 100644 gcc/rtl-ssa/accesses.h
 create mode 100644 gcc/rtl-ssa/blocks.cc
 create mode 100644 gcc/rtl-ssa/blocks.h
 create mode 100644 gcc/rtl-ssa/change-utils.h
 create mode 100644 gcc/rtl-ssa/changes.cc
 create mode 100644 gcc/rtl-ssa/changes.h
 create mode 100644 gcc/rtl-ssa/functions.cc
 create mode 100644 gcc/rtl-ssa/functions.h
 create mode 100644 gcc/rtl-ssa/insn-utils.h
 create mode 100644 gcc/rtl-ssa/insns.cc
 create mode 100644 gcc/rtl-ssa/insns.h
 create mode 100644 gcc/rtl-ssa/internals.inl
 create mode 100644 gcc/rtl-ssa/is-a.inl
 create mode 100644 gcc/rtl-ssa/member-fns.inl
 create mode 100644 gcc/rtl-ssa/movement.h

diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 900bf11b0ba..e6907845118 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -207,6 +207,7 @@ VALGRIND_DRIVER_DEFINES = @valgrind_path_defines@
 # This is how we control whether or not the additional warnings are applied.
 .-warn = $(STRICT_WARN)
 build-warn = $(STRICT_WARN)
+rtl-ssa-warn = $(STRICT_WARN)
 GCC_WARN_CFLAGS = $(LOOSE_WARN) $(C_LOOSE_WARN) $($(@D)-warn) $(if $(filter-out $(STRICT_WARN),$($(@D)-warn)),,$(C_STRICT_WARN)) $(NOCOMMON_FLAG) $($@-warn)
 GCC_WARN_CXXFLAGS = $(LOOSE_WARN) $($(@D)-warn) $(NOCOMMON_FLAG) $($@-warn)
 
@@ -1519,6 +1520,11 @@ OBJS = \
 	reorg.o \
 	resource.o \
 	rtl-error.o \
+	rtl-ssa/accesses.o \
+	rtl-ssa/blocks.o \
+	rtl-ssa/changes.o \
+	rtl-ssa/functions.o \
+	rtl-ssa/insns.o \
 	rtl-tests.o \
 	rtl.o \
 	rtlhash.o \
diff --git a/gcc/configure b/gcc/configure
index 9d2fd0dc30b..04ba4617e0d 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -32769,7 +32769,7 @@ $as_echo "$as_me: executing $ac_file commands" >&6;}
     "depdir":C) $SHELL $ac_aux_dir/mkinstalldirs $DEPDIR ;;
     "gccdepdir":C)
   ${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs build/$DEPDIR
-  for lang in $subdirs c-family common analyzer
+  for lang in $subdirs c-family common analyzer rtl-ssa
   do
       ${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs $lang/$DEPDIR
   done ;;
diff --git a/gcc/configure.ac b/gcc/configure.ac
index 73034bb902b..716679d0096 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -1242,7 +1242,7 @@ AC_CHECK_HEADERS(ext/hash_map)
 ZW_CREATE_DEPDIR
 AC_CONFIG_COMMANDS([gccdepdir],[
   ${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs build/$DEPDIR
-  for lang in $subdirs c-family common analyzer
+  for lang in $subdirs c-family common analyzer rtl-ssa
   do
       ${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs $lang/$DEPDIR
   done], [subdirs="$subdirs" ac_aux_dir=$ac_aux_dir DEPDIR=$DEPDIR])
diff --git a/gcc/emit-rtl.h b/gcc/emit-rtl.h
index 4658c4aa3fd..b20cd76ea74 100644
--- a/gcc/emit-rtl.h
+++ b/gcc/emit-rtl.h
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3.  If not see
 class temp_slot;
 typedef class temp_slot *temp_slot_p;
 class predefined_function_abi;
+namespace rtl_ssa { class function_info; }
 
 /* Information mainlined about RTL representation of incoming arguments.  */
 struct GTY(()) incoming_args {
@@ -73,6 +74,8 @@ struct GTY(()) rtl_data {
      different ABIs.  */
   const predefined_function_abi *GTY((skip)) abi;
 
+  rtl_ssa::function_info *GTY((skip)) ssa;
+
   /* For function.c  */
 
   /* # of bytes of outgoing arguments.  If ACCUMULATE_OUTGOING_ARGS is
diff --git a/gcc/rtl-ssa.h b/gcc/rtl-ssa.h
new file mode 100644
index 00000000000..60cdad03dc0
--- /dev/null
+++ b/gcc/rtl-ssa.h
@@ -0,0 +1,71 @@
+// On-the-side RTL SSA representation                               -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef GCC_RTL_SSA_H
+#define GCC_RTL_SSA_H 1
+
+// This is an aggregation header file.  This means it should contain only
+// other include files.
+
+#if 0
+// Files that use this one should first have:
+#define INCLUDE_ALGORITHM
+#define INCLUDE_FUNCTIONAL
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "df.h"
+#endif
+
+// Needed by splay-tree-utils.h and directly by rtl-ssa.
+#include "pretty-print.h"
+
+// Needed directly by recog.h.
+#include "insn-config.h"
+
+// Needed directly by rtl-ssa.
+#include "splay-tree-utils.h"
+#include "recog.h"
+#include "regs.h"
+#include "function-abi.h"
+#include "obstack-utils.h"
+#include "mux-utils.h"
+#include "rtlanal.h"
+
+// Provides the global crtl->ssa.
+#include "tm_p.h"
+#include "memmodel.h"
+#include "emit-rtl.h"
+
+// The rtl-ssa files themselves.
+#include "rtl-ssa/accesses.h"
+#include "rtl-ssa/insns.h"
+#include "rtl-ssa/blocks.h"
+#include "rtl-ssa/changes.h"
+#include "rtl-ssa/functions.h"
+#include "rtl-ssa/is-a.inl"
+#include "rtl-ssa/access-utils.h"
+#include "rtl-ssa/insn-utils.h"
+#include "rtl-ssa/movement.h"
+#include "rtl-ssa/change-utils.h"
+#include "rtl-ssa/member-fns.inl"
+
+#endif
diff --git a/gcc/rtl-ssa/access-utils.h b/gcc/rtl-ssa/access-utils.h
new file mode 100644
index 00000000000..b200e3416d7
--- /dev/null
+++ b/gcc/rtl-ssa/access-utils.h
@@ -0,0 +1,553 @@
+// Access-related utilities for RTL SSA                             -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// Return a referene to the whole of register REGNO.
+inline resource_info
+full_register (unsigned int regno)
+{
+  return { reg_raw_mode[regno], regno };
+}
+
+// Return true if sorted array ACCESSES includes an access to hard registers.
+inline bool
+accesses_include_hard_registers (const access_array &accesses)
+{
+  return accesses.size () && HARD_REGISTER_NUM_P (accesses.front ()->regno ());
+}
+
+// Return true if sorted array ACCESSES includes an access to memory.
+inline bool
+accesses_include_memory (const access_array &accesses)
+{
+  return accesses.size () && accesses.back ()->is_mem ();
+}
+
+// If sorted array ACCESSES includes an access to memory, return the access,
+// otherwise return null.
+template<typename T>
+inline auto
+memory_access (T accesses) -> decltype (accesses[0])
+{
+  if (accesses.size () && accesses.back ()->is_mem ())
+    return accesses.back ();
+  return nullptr;
+}
+
+// If sorted array ACCESSES includes a reference to REGNO, return the
+// access, otherwise return null.
+template<typename T>
+inline auto
+find_access (T accesses, unsigned int regno) -> decltype (accesses[0])
+{
+  unsigned int start = 0;
+  unsigned int end = accesses.size ();
+  while (start < end)
+    {
+      unsigned int mid = (start + end) / 2;
+      unsigned int found = accesses[mid]->regno ();
+      if (found == regno)
+	return accesses[mid];
+      if (found < regno)
+	start = mid + 1;
+      else
+	end = mid;
+    }
+  return nullptr;
+}
+
+// If sorted array ACCESSES includes a reference to REGNO, return the
+// index of the access, otherwise return -1.
+inline int
+find_access_index (access_array accesses, unsigned int regno)
+{
+  unsigned int start = 0;
+  unsigned int end = accesses.size ();
+  while (start < end)
+    {
+      unsigned int mid = (start + end) / 2;
+      unsigned int found = accesses[mid]->regno ();
+      if (found == regno)
+	return mid;
+      if (found < regno)
+	start = mid + 1;
+      else
+	end = mid;
+    }
+  return -1;
+}
+
+// If ACCESS is a set whose result is used by at least one instruction,
+// return the access as a set_info, otherwise return null.
+inline const set_info *
+set_with_nondebug_insn_uses (const access_info *access)
+{
+  if (access->is_set_with_nondebug_insn_uses ())
+    // No need for as_a; this test is just as definitive.
+    return static_cast<const set_info *> (access);
+  return nullptr;
+}
+
+// A non-const version of the above.
+inline set_info *
+set_with_nondebug_insn_uses (access_info *access)
+{
+  if (access->is_set_with_nondebug_insn_uses ())
+    return static_cast<set_info *> (access);
+  return nullptr;
+}
+
+// Return true if SET is the only set of SET->resource () and if it
+// dominates all uses (excluding uses of SET->resource () at points
+// where SET->resource () is always undefined).
+inline bool
+is_single_dominating_def (const set_info *set)
+{
+  return set->is_first_def () && set->is_last_def ();
+}
+
+// SET is known to be available on entry to BB.  Return true if it is
+// also available on exit from BB.  (The value might or might not be live.)
+inline bool
+remains_available_on_exit (const set_info *set, bb_info *bb)
+{
+  return (set->is_last_def ()
+	  || *set->next_def ()->insn () > *bb->end_insn ());
+}
+
+// ACCESS is known to be associated with an instruction rather than
+// a phi node.  Return which instruction that is.
+inline insn_info *
+access_insn (const access_info *access)
+{
+  // In release builds this function reduces to a single pointer reference.
+  if (auto *def = dyn_cast<const def_info *> (access))
+    return def->insn ();
+  return as_a<const use_info *> (access)->insn ();
+}
+
+// If ACCESS records a use, return the value that it uses.  If ACCESS records
+// a set, return that set.  If ACCESS records a clobber, return null.
+inline const set_info *
+access_value (const access_info *access)
+{
+  if (!access)
+    return nullptr;
+
+  if (auto *use = dyn_cast<const use_info *> (access))
+    return use->def ();
+
+  return dyn_cast<const set_info *> (access);
+}
+
+// A non-const version of the above.
+inline set_info *
+access_value (access_info *access)
+{
+  auto *const_access = const_cast<const access_info *> (access);
+  return const_cast<set_info *> (access_value (const_access));
+}
+
+// If ACCESS is a degenerate phi, return the set_info that defines its input,
+// otherwise return ACCESS itself.
+template<typename T>
+inline const T *
+look_through_degenerate_phi (const T *access)
+{
+  if (auto *phi = dyn_cast<const phi_info *> (access))
+    if (phi->is_degenerate ())
+      return phi->input_value (0);
+  return access;
+}
+
+// A non-const version of the above.
+template<typename T>
+inline T *
+look_through_degenerate_phi (T *access)
+{
+  auto *const_access = const_cast<const T *> (access);
+  return const_cast<T *> (look_through_degenerate_phi (const_access));
+}
+
+// If CLOBBER is in a group, return the first clobber in the group,
+// otherwise return CLOBBER itself.
+inline clobber_info *
+first_clobber_in_group (clobber_info *clobber)
+{
+  if (clobber->is_in_group ())
+    return clobber->group ()->first_clobber ();
+  return clobber;
+}
+
+// If CLOBBER is in a group, return the last clobber in the group,
+// otherwise return CLOBBER itself.
+inline clobber_info *
+last_clobber_in_group (clobber_info *clobber)
+{
+  if (clobber->is_in_group ())
+    return clobber->group ()->last_clobber ();
+  return clobber;
+}
+
+// If DEF is a clobber in a group, return the containing group,
+// otherwise return DEF.
+inline def_mux
+clobber_group_or_single_def (def_info *def)
+{
+  if (auto *clobber = dyn_cast<clobber_info *> (def))
+    if (clobber->is_in_group ())
+      return clobber->group ();
+  return def;
+}
+
+// Return the first definition associated with NODE.  If NODE holds
+// a single set, the result is that set.  If NODE holds a clobber_group,
+// the result is the first clobber in the group.
+inline def_info *
+first_def (def_node *node)
+{
+  return node->first_def ();
+}
+
+// Likewise for something that is either a node or a single definition.
+inline def_info *
+first_def (def_mux mux)
+{
+  return mux.first_def ();
+}
+
+// Return the last definition associated with NODE.  If NODE holds
+// a single set, the result is that set.  If NODE holds a clobber_group,
+// the result is the last clobber in the group.
+inline def_info *
+last_def (def_node *node)
+{
+  if (auto *group = dyn_cast<clobber_group *> (node))
+    return group->last_clobber ();
+  return node->first_def ();
+}
+
+// Likewise for something that is either a node or a single definition.
+inline def_info *
+last_def (def_mux mux)
+{
+  return mux.last_def ();
+}
+
+int lookup_use (splay_tree<use_info *> &, insn_info *);
+int lookup_def (def_splay_tree &, insn_info *);
+int lookup_clobber (clobber_tree &, insn_info *);
+int lookup_call_clobbers (insn_call_clobbers_tree &, insn_info *);
+
+// Search backwards from immediately before INSN for the first instruction
+// recorded in TREE, ignoring any instruction I for which IGNORE (I) is true.
+// Return null if no such instruction exists.
+template<typename IgnorePredicate>
+insn_info *
+prev_call_clobbers_ignoring (insn_call_clobbers_tree &tree, insn_info *insn,
+			     IgnorePredicate ignore)
+{
+  if (!tree)
+    return nullptr;
+
+  int comparison = lookup_call_clobbers (tree, insn);
+  while (comparison <= 0 || ignore (tree->insn ()))
+    {
+      if (!tree.splay_prev_node ())
+	return nullptr;
+
+      comparison = 1;
+    }
+  return tree->insn ();
+}
+
+// Search forwards from immediately after INSN for the first instruction
+// recorded in TREE, ignoring any instruction I for which IGNORE (I) is true.
+// Return null if no such instruction exists.
+template<typename IgnorePredicate>
+insn_info *
+next_call_clobbers_ignoring (insn_call_clobbers_tree &tree, insn_info *insn,
+			     IgnorePredicate ignore)
+{
+  if (!tree)
+    return nullptr;
+
+  int comparison = lookup_call_clobbers (tree, insn);
+  while (comparison >= 0 || ignore (tree->insn ()))
+    {
+      if (!tree.splay_next_node ())
+	return nullptr;
+
+      comparison = -1;
+    }
+  return tree->insn ();
+}
+
+// If ACCESS is a set, return the first use of ACCESS by a nondebug insn I
+// for which IGNORE (I) is false.  Return null if ACCESS is not a set or if
+// no such use exists.
+template<typename IgnorePredicate>
+inline use_info *
+first_nondebug_insn_use_ignoring (const access_info *access,
+				  IgnorePredicate ignore)
+{
+  if (const set_info *set = set_with_nondebug_insn_uses (access))
+    {
+      // Written this way to emphasize to the compiler that first_use
+      // must be nonnull in this situation.
+      use_info *use = set->first_use ();
+      do
+	{
+	  if (!ignore (use->insn ()))
+	    return use;
+	  use = use->next_nondebug_insn_use ();
+	}
+      while (use);
+    }
+  return nullptr;
+}
+
+// If ACCESS is a set, return the last use of ACCESS by a nondebug insn I for
+// which IGNORE (I) is false.  Return null if ACCESS is not a set or if no
+// such use exists.
+template<typename IgnorePredicate>
+inline use_info *
+last_nondebug_insn_use_ignoring (const access_info *access,
+				 IgnorePredicate ignore)
+{
+  if (const set_info *set = set_with_nondebug_insn_uses (access))
+    {
+      // Written this way to emphasize to the compiler that
+      // last_nondebug_insn_use must be nonnull in this situation.
+      use_info *use = set->last_nondebug_insn_use ();
+      do
+	{
+	  if (!ignore (use->insn ()))
+	    return use;
+	  use = use->prev_use ();
+	}
+      while (use);
+    }
+  return nullptr;
+}
+
+// If DEF is null, return null.
+//
+// Otherwise, search backwards for an access to DEF->resource (), starting at
+// the end of DEF's live range.  Ignore clobbers if IGNORE_CLOBBERS_SETTING
+// is YES, otherwise treat them like any other access.  Also ignore any
+// access A for which IGNORE (access_insn (A)) is true.
+//
+// Thus if DEF is a set that is used by nondebug insns, the first access
+// that the function considers is the last such use of the set.  Otherwise,
+// the first access that the function considers is DEF itself.
+//
+// Return the access found, or null if there is no access that meets
+// the criteria.
+//
+// Note that this function does not consider separately-recorded call clobbers,
+// although such clobbers are only relevant if IGNORE_CLOBBERS_SETTING is NO.
+template<typename IgnorePredicate>
+access_info *
+last_access_ignoring (def_info *def, ignore_clobbers ignore_clobbers_setting,
+		      IgnorePredicate ignore)
+{
+  while (def)
+    {
+      auto *clobber = dyn_cast<clobber_info *> (def);
+      if (clobber && ignore_clobbers_setting == ignore_clobbers::YES)
+	def = first_clobber_in_group (clobber);
+      else
+	{
+	  if (use_info *use = last_nondebug_insn_use_ignoring (def, ignore))
+	    return use;
+
+	  insn_info *insn = def->insn ();
+	  if (!ignore (insn))
+	    return def;
+	}
+      def = def->prev_def ();
+    }
+  return nullptr;
+}
+
+// Search backwards for an access to DEF->resource (), starting
+// immediately before the point at which DEF occurs.  Ignore clobbers
+// if IGNORE_CLOBBERS_SETTING is YES, otherwise treat them like any other
+// access.  Also ignore any access A for which IGNORE (access_insn (A))
+// is true.
+//
+// Thus if DEF->insn () uses DEF->resource (), that use is the first access
+// that the function considers, since an instruction's uses occur strictly
+// before its definitions.
+//
+// Note that this function does not consider separately-recorded call clobbers,
+// although such clobbers are only relevant if IGNORE_CLOBBERS_SETTING is NO.
+template<typename IgnorePredicate>
+inline access_info *
+prev_access_ignoring (def_info *def, ignore_clobbers ignore_clobbers_setting,
+		      IgnorePredicate ignore)
+{
+  return last_access_ignoring (def->prev_def (), ignore_clobbers_setting,
+			       ignore);
+}
+
+// If DEF is null, return null.
+//
+// Otherwise, search forwards for a definition of DEF->resource (),
+// starting at DEF itself.  Ignore clobbers if IGNORE_CLOBBERS_SETTING
+// is YES, otherwise treat them like any other access.  Also ignore any
+// definition D for which IGNORE (D->insn ()) is true.
+//
+// Return the definition found, or null if there is no access that meets
+// the criteria.
+//
+// Note that this function does not consider separately-recorded call clobbers,
+// although such clobbers are only relevant if IGNORE_CLOBBERS_SETTING is NO.
+template<typename IgnorePredicate>
+def_info *
+first_def_ignoring (def_info *def, ignore_clobbers ignore_clobbers_setting,
+		    IgnorePredicate ignore)
+{
+  while (def)
+    {
+      auto *clobber = dyn_cast<clobber_info *> (def);
+      if (clobber && ignore_clobbers_setting == ignore_clobbers::YES)
+	def = last_clobber_in_group (clobber);
+      else if (!ignore (def->insn ()))
+	return def;
+
+      def = def->next_def ();
+    }
+  return nullptr;
+}
+
+// Search forwards for the next access to DEF->resource (),
+// starting immediately after DEF's instruction.  Ignore clobbers if
+// IGNORE_CLOBBERS_SETTING is YES, otherwise treat them like any other access.
+// Also ignore any access A for which IGNORE (access_insn (A)) is true;
+// in this context, ignoring a set includes ignoring all uses of the set.
+//
+// Thus if DEF is a set with uses by nondebug insns, the first access that the
+// function considers is the first such use of the set.
+//
+// Return the access found, or null if there is no access that meets the
+// criteria.
+//
+// Note that this function does not consider separately-recorded call clobbers,
+// although such clobbers are only relevant if IGNORE_CLOBBERS_SETTING is NO.
+template<typename IgnorePredicate>
+access_info *
+next_access_ignoring (def_info *def, ignore_clobbers ignore_clobbers_setting,
+		      IgnorePredicate ignore)
+{
+  if (use_info *use = first_nondebug_insn_use_ignoring (def, ignore))
+    return use;
+
+  return first_def_ignoring (def->next_def (), ignore_clobbers_setting,
+			     ignore);
+}
+
+// Return true if ACCESS1 should before ACCESS2 in an access_array.
+inline bool
+compare_access_infos (const access_info *access1, const access_info *access2)
+{
+  gcc_checking_assert (access1 == access2
+		       || access1->regno () != access2->regno ());
+  return access1->regno () < access2->regno ();
+}
+
+// Sort [BEGIN, END) into ascending regno order.  The sequence must have
+// at most one access to a given a regno.
+inline void
+sort_accesses (access_info **begin, access_info **end)
+{
+  auto count = end - begin;
+  if (count <= 1)
+    return;
+
+  if (count == 2)
+    {
+      gcc_checking_assert (begin[0]->regno () != begin[1]->regno ());
+      if (begin[0]->regno () > begin[1]->regno ())
+	std::swap (begin[0], begin[1]);
+      return;
+    }
+
+  std::sort (begin, end, compare_access_infos);
+}
+
+// Sort the accesses in CONTAINER, which contains pointers to access_infos.
+template<typename T>
+inline void
+sort_accesses (T &container)
+{
+  return sort_accesses (container.begin (), container.end ());
+}
+
+// The underlying non-template implementation of merge_access_arrays.
+access_array merge_access_arrays_base (obstack_watermark &, access_array,
+				       access_array);
+// Merge access arrays ACCESSES1 and ACCESSES2, including the allocation
+// in the area governed by WATERMARK.  Return an invalid access_array if
+// ACCESSES1 and ACCESSES2 contain conflicting accesses to the same resource.
+//
+// T can be an access_array, a def_array or a use_array.
+template<typename T>
+inline T
+merge_access_arrays (obstack_watermark &watermark, T accesses1, T accesses2)
+{
+  return T (merge_access_arrays_base (watermark, accesses1, accesses2));
+}
+
+// The underlying non-template implementation of insert_access.
+access_array insert_access_base (obstack_watermark &, access_info *,
+				 access_array);
+
+// Return a new access_array that contains the result of inserting ACCESS1
+// into sorted access array ACCESSES2.  Allocate the returned array in the
+// area governed by WATERMARK.  Return an invalid access_array if ACCESSES2
+// contains a conflicting access to the same resource as ACCESS1.
+//
+// T can be an access_array, a def_array or a use_array.
+template<typename T>
+inline T
+insert_access (obstack_watermark &watermark,
+	       typename T::value_type access1, T accesses2)
+{
+  return T (insert_access_base (watermark, access1, accesses2));
+}
+
+// The underlying non-template implementation of remove_note_accesses.
+access_array remove_note_accesses_base (obstack_watermark &, access_array);
+
+// If ACCESSES contains accesses that only occur in notes, return a new
+// array without such accesses, allocating it in the area governed by
+// WATERMARK.  Return ACCESSES itself otherwise.
+//
+// T can be an access_array, a def_array or a use_array.
+template<typename T>
+inline T
+remove_note_accesses (obstack_watermark &watermark, T accesses)
+{
+  return T (remove_note_accesses_base (watermark, accesses));
+}
+
+}
diff --git a/gcc/rtl-ssa/accesses.cc b/gcc/rtl-ssa/accesses.cc
new file mode 100644
index 00000000000..bd375177866
--- /dev/null
+++ b/gcc/rtl-ssa/accesses.cc
@@ -0,0 +1,1592 @@
+// Implementation of access-related functions for RTL SSA           -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+#define INCLUDE_ALGORITHM
+#define INCLUDE_FUNCTIONAL
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "df.h"
+#include "rtl-ssa.h"
+#include "rtl-ssa/internals.inl"
+
+using namespace rtl_ssa;
+
+// This clobber belongs to a clobber_group but m_group appears to be
+// out of date.  Update it and return the new (correct) value.
+clobber_group *
+clobber_info::recompute_group ()
+{
+  using splay_tree = clobber_info::splay_tree;
+
+  // Splay this clobber to the root of the tree while searching for a node
+  // that has the correct group.  The root always has the correct group,
+  // so the search always breaks early and does not install this clobber
+  // as the root.
+  clobber_info *cursor = m_parent;
+  auto find_group = [](clobber_info *node, unsigned int)
+    {
+      return node->m_group->has_been_superceded () ? nullptr : node->m_group;
+    };
+  clobber_group *group = splay_tree::splay_and_search (this, nullptr,
+						       find_group);
+  gcc_checking_assert (m_parent);
+
+  // If the previous splay operation did anything, this clobber is now an
+  // ancestor of CURSOR, and all the nodes inbetween have a stale group.
+  // Since we have visited the nodes, we might as well update them too.
+  //
+  // If the previous splay operation did nothing, start the update from
+  // this clobber instead.  In that case we change at most two clobbers:
+  // this clobber and possibly its parent.
+  if (cursor == m_parent)
+    cursor = this;
+
+  // Walk up the tree from CURSOR updating clobbers that need it.
+  // This walk always includes this clobber.
+  while (cursor->m_group != group)
+    {
+      cursor->m_group = group;
+      cursor = cursor->m_parent;
+    }
+
+  gcc_checking_assert (m_group == group);
+  return group;
+}
+
+// See the comment above the declaration.
+void
+resource_info::print_identifier (pretty_printer *pp) const
+{
+  if (is_mem ())
+    pp_string (pp, "mem");
+  else
+    {
+      char tmp[3 * sizeof (regno) + 2];
+      snprintf (tmp, sizeof (tmp), "r%d", regno);
+      pp_string (pp, tmp);
+    }
+}
+
+// See the comment above the declaration.
+void
+resource_info::print_context (pretty_printer *pp) const
+{
+  if (HARD_REGISTER_NUM_P (regno))
+    {
+      if (const char *name = reg_names[regno])
+	{
+	  pp_space (pp);
+	  pp_left_paren (pp);
+	  pp_string (pp, name);
+	  if (mode != E_BLKmode)
+	    {
+	      pp_colon (pp);
+	      pp_string (pp, GET_MODE_NAME (mode));
+	    }
+	  pp_right_paren (pp);
+	}
+    }
+  else if (is_reg ())
+    {
+      pp_space (pp);
+      pp_left_paren (pp);
+      if (mode != E_BLKmode)
+	{
+	  pp_string (pp, GET_MODE_NAME (mode));
+	  pp_space (pp);
+	}
+      pp_string (pp, "pseudo");
+      pp_right_paren (pp);
+    }
+}
+
+// See the comment above the declaration.
+void
+resource_info::print (pretty_printer *pp) const
+{
+  print_identifier (pp);
+  print_context (pp);
+}
+
+// Some properties can naturally be described using adjectives that attach
+// to nouns like "use" or "definition".  Print such adjectives to PP.
+void
+access_info::print_prefix_flags (pretty_printer *pp) const
+{
+  if (m_is_temp)
+    pp_string (pp, "temporary ");
+  if (m_has_been_superceded)
+    pp_string (pp, "superceded ");
+}
+
+// Print properties not handled by print_prefix_flags to PP, putting
+// each property on a new line indented by two extra spaces.
+void
+access_info::print_properties_on_new_lines (pretty_printer *pp) const
+{
+  if (m_is_pre_post_modify)
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "set by a pre/post-modify");
+      pp_indentation (pp) -= 2;
+    }
+  if (m_includes_address_uses)
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "appears inside an address");
+      pp_indentation (pp) -= 2;
+    }
+  if (m_includes_read_writes)
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "appears in a read/write context");
+      pp_indentation (pp) -= 2;
+    }
+  if (m_includes_subregs)
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "appears inside a subreg");
+      pp_indentation (pp) -= 2;
+    }
+}
+
+// Return true if there are no known issues with the integrity of the
+// link information.
+inline bool
+use_info::check_integrity ()
+{
+  auto subsequence_id = [](use_info *use)
+    {
+      if (use->is_in_nondebug_insn ())
+	return 1;
+      if (use->is_in_debug_insn ())
+	return 2;
+      return 3;
+    };
+
+  use_info *prev = prev_use ();
+  use_info *next = next_use ();
+
+  if (prev && subsequence_id (prev) > subsequence_id (this))
+    return false;
+  if (next && subsequence_id (next) < subsequence_id (this))
+    return false;
+  if (m_is_last_nondebug_insn_use != calculate_is_last_nondebug_insn_use ())
+    return false;
+
+  if (!prev && last_use ()->next_use ())
+    return false;
+  if (!next)
+    if (use_info *use = last_nondebug_insn_use ())
+      if (!use->m_is_last_nondebug_insn_use)
+	return false;
+
+  return true;
+}
+
+// See the comment above the declaration.
+void
+use_info::print_location (pretty_printer *pp) const
+{
+  if (is_in_phi ())
+    pp_access (pp, phi (), PP_ACCESS_INCLUDE_LOCATION);
+  else
+    insn ()->print_identifier_and_location (pp);
+}
+
+// See the comment above the declaration.
+void
+use_info::print_def (pretty_printer *pp) const
+{
+  if (const set_info *set = def ())
+    pp_access (pp, set, 0);
+  else
+    {
+      pp_string (pp, "undefined ");
+      resource ().print (pp);
+    }
+}
+
+// See the comment above the declaration.
+void
+use_info::print (pretty_printer *pp, unsigned int flags) const
+{
+  print_prefix_flags (pp);
+
+  const set_info *set = def ();
+  if (set && set->mode () != mode ())
+    {
+      pp_string (pp, GET_MODE_NAME (mode ()));
+      pp_space (pp);
+    }
+
+  pp_string (pp, "use of ");
+  print_def (pp);
+  if (flags & PP_ACCESS_INCLUDE_LOCATION)
+    {
+      pp_string (pp, " by ");
+      print_location (pp);
+    }
+  if (set && (flags & PP_ACCESS_INCLUDE_LINKS))
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "defined in ");
+      set->insn ()->print_location (pp);
+      pp_indentation (pp) -= 2;
+    }
+  if (flags & PP_ACCESS_INCLUDE_PROPERTIES)
+    print_properties_on_new_lines (pp);
+}
+
+// See the comment above the declaration.
+void
+def_info::print_identifier (pretty_printer *pp) const
+{
+  resource ().print_identifier (pp);
+  pp_colon (pp);
+  insn ()->print_identifier (pp);
+  resource ().print_context (pp);
+}
+
+// See the comment above the declaration.
+void
+def_info::print_location (pretty_printer *pp) const
+{
+  insn ()->print_identifier_and_location (pp);
+}
+
+// See the comment above the declaration.
+void
+clobber_info::print (pretty_printer *pp, unsigned int flags) const
+{
+  print_prefix_flags (pp);
+  if (is_call_clobber ())
+    pp_string (pp, "call ");
+  pp_string (pp, "clobber ");
+  print_identifier (pp);
+  if (flags & PP_ACCESS_INCLUDE_LOCATION)
+    {
+      pp_string (pp, " in ");
+      insn ()->print_location (pp);
+    }
+  if (flags & PP_ACCESS_INCLUDE_PROPERTIES)
+    print_properties_on_new_lines (pp);
+}
+
+// See the comment above the declaration.
+void
+set_info::print_uses_on_new_lines (pretty_printer *pp) const
+{
+  for (const use_info *use : all_uses ())
+    {
+      pp_newline_and_indent (pp, 2);
+      if (use->is_live_out_use ())
+	{
+	  pp_string (pp, "live out from ");
+	  use->insn ()->print_location (pp);
+	}
+      else
+	{
+	  pp_string (pp, "used by ");
+	  use->print_location (pp);
+	}
+      pp_indentation (pp) -= 2;
+    }
+  if (m_use_tree)
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "splay tree:");
+      pp_newline_and_indent (pp, 2);
+      auto print_use = [](pretty_printer *pp,
+			  splay_tree_node<use_info *> *node)
+	{
+	  pp_string (pp, "use by ");
+	  node->value ()->print_location (pp);
+	};
+      m_use_tree.print (pp, m_use_tree.root (), print_use);
+      pp_indentation (pp) -= 4;
+    }
+}
+
+// See the comment above the declaration.
+void
+set_info::print (pretty_printer *pp, unsigned int flags) const
+{
+  print_prefix_flags (pp);
+  pp_string (pp, "set ");
+  print_identifier (pp);
+  if (flags & PP_ACCESS_INCLUDE_LOCATION)
+    {
+      pp_string (pp, " in ");
+      insn ()->print_location (pp);
+    }
+  if (flags & PP_ACCESS_INCLUDE_PROPERTIES)
+    print_properties_on_new_lines (pp);
+  if (flags & PP_ACCESS_INCLUDE_LINKS)
+    print_uses_on_new_lines (pp);
+}
+
+// See the comment above the declaration.
+void
+phi_info::print (pretty_printer *pp, unsigned int flags) const
+{
+  print_prefix_flags (pp);
+  pp_string (pp, "phi node ");
+  print_identifier (pp);
+  if (flags & PP_ACCESS_INCLUDE_LOCATION)
+    {
+      pp_string (pp, " in ");
+      insn ()->print_location (pp);
+    }
+
+  if (flags & PP_ACCESS_INCLUDE_PROPERTIES)
+    print_properties_on_new_lines (pp);
+
+  if (flags & PP_ACCESS_INCLUDE_LINKS)
+    {
+      basic_block cfg_bb = bb ()->cfg_bb ();
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "inputs:");
+      unsigned int i = 0;
+      for (const use_info *input : inputs ())
+	{
+	  basic_block pred_cfg_bb = EDGE_PRED (cfg_bb, i)->src;
+	  pp_newline_and_indent (pp, 2);
+	  pp_string (pp, "bb");
+	  pp_decimal_int (pp, pred_cfg_bb->index);
+	  pp_colon (pp);
+	  pp_space (pp);
+	  input->print_def (pp);
+	  pp_indentation (pp) -= 2;
+	  i += 1;
+	}
+      pp_indentation (pp) -= 2;
+
+      print_uses_on_new_lines (pp);
+    }
+}
+
+// See the comment above the declaration.
+void
+set_node::print (pretty_printer *pp) const
+{
+  pp_access (pp, first_def ());
+}
+
+// See the comment above the declaration.
+void
+clobber_group::print (pretty_printer *pp) const
+{
+  auto print_clobber = [](pretty_printer *pp, const def_info *clobber)
+    {
+      pp_access (pp, clobber);
+    };
+  pp_string (pp, "grouped clobber");
+  for (const def_info *clobber : clobbers ())
+    {
+      pp_newline_and_indent (pp, 2);
+      print_clobber (pp, clobber);
+      pp_indentation (pp) -= 2;
+    }
+  pp_newline_and_indent (pp, 2);
+  pp_string (pp, "splay tree");
+  pp_newline_and_indent (pp, 2);
+  m_clobber_tree.print (pp, print_clobber);
+  pp_indentation (pp) -= 4;
+}
+
+// Return a clobber_group for CLOBBER, creating one if CLOBBER doesn't
+// already belong to a group.
+clobber_group *
+function_info::need_clobber_group (clobber_info *clobber)
+{
+  if (clobber->is_in_group ())
+    return clobber->group ();
+  return allocate<clobber_group> (clobber);
+}
+
+// Return a def_node for inserting DEF into the associated resource's
+// splay tree.  Use a clobber_group if DEF is a clobber and a set_node
+// otherwise.
+def_node *
+function_info::need_def_node (def_info *def)
+{
+  if (auto *clobber = dyn_cast<clobber_info *> (def))
+    return need_clobber_group (clobber);
+  return allocate<set_node> (as_a<set_info *> (def));
+}
+
+// LAST is the last thing to define LAST->resource (), and is where any
+// splay tree root for LAST->resource () is stored.  Require such a splay tree
+// to exist, creating a new one if necessary.  Return the root of the tree.
+//
+// The caller must call LAST->set_splay_root after it has finished with
+// the splay tree.
+def_splay_tree
+function_info::need_def_splay_tree (def_info *last)
+{
+  if (def_node *root = last->splay_root ())
+    return root;
+
+  // Use a left-spine rooted at the last node.
+  def_node *root = need_def_node (last);
+  def_node *parent = root;
+  while (def_info *prev = first_def (parent)->prev_def ())
+    {
+      def_node *node = need_def_node (prev);
+      def_splay_tree::insert_child (parent, 0, node);
+      parent = node;
+    }
+  return root;
+}
+
+// Search TREE for either:
+//
+// - a set_info at INSN or
+// - a clobber_group whose range includes INSN
+//
+// If such a node exists, install it as the root of TREE and return 0.
+// Otherwise arbitrarily choose between:
+//
+// (1) Installing the closest preceding node as the root and returning 1.
+// (2) Installing the closest following node as the root and returning -1.
+//
+// Note that this routine should not be used to check whether INSN
+// itself defines a resource; that can be checked more cheaply using
+// find_access_index.
+int
+rtl_ssa::lookup_def (def_splay_tree &tree, insn_info *insn)
+{
+  auto go_left = [&](def_node *node)
+    {
+      return *insn < *first_def (node)->insn ();
+    };
+  auto go_right = [&](def_node *node)
+    {
+      return *insn > *last_def (node)->insn ();
+    };
+  return tree.lookup (go_left, go_right);
+}
+
+// Search TREE for a clobber in INSN.  If such a clobber exists, install
+// it as the root of TREE and return 0.  Otherwise arbitrarily choose between:
+//
+// (1) Installing the closest preceding clobber as the root and returning 1.
+// (2) Installing the closest following clobber as the root and returning -1.
+int
+rtl_ssa::lookup_clobber (clobber_tree &tree, insn_info *insn)
+{
+  auto compare = [&](clobber_info *clobber)
+    {
+      return insn->compare_with (clobber->insn ());
+    };
+  return tree.lookup (compare);
+}
+
+// Search for a definition of RESOURCE at INSN and return the result of
+// the search as a def_lookup.  See the comment above the class for more
+// details.
+def_lookup
+function_info::find_def (resource_info resource, insn_info *insn)
+{
+  def_info *first = m_defs[resource.regno + 1];
+  if (!first)
+    // There are no nodes.  The comparison result is pretty meaningless
+    // in this case.
+    return { nullptr, -1 };
+
+  // See whether the first node matches.
+  auto first_result = clobber_group_or_single_def (first);
+  if (*insn <= *last_def (first_result)->insn ())
+    {
+      int comparison = (*insn >= *first->insn () ? 0 : -1);
+      return { first_result, comparison };
+    }
+
+  // See whether the last node matches.
+  def_info *last = first->last_def ();
+  auto last_result = clobber_group_or_single_def (last);
+  if (*insn >= *first_def (last_result)->insn ())
+    {
+      int comparison = (*insn <= *last->insn () ? 0 : 1);
+      return { last_result, comparison };
+    }
+
+  // Resort to using a splay tree to search for the result.
+  def_splay_tree tree = need_def_splay_tree (last);
+  int comparison = lookup_def (tree, insn);
+  last->set_splay_root (tree.root ());
+  return { tree.root (), comparison };
+}
+
+// Add DEF to the function's list of definitions of DEF->resource (),
+// inserting DEF immediately before BEFORE.  DEF is not currently in the list.
+void
+function_info::insert_def_before (def_info *def, def_info *before)
+{
+  gcc_checking_assert (!def->has_def_links ()
+		       && *before->insn () > *def->insn ());
+
+  def->copy_prev_from (before);
+  if (def_info *prev = def->prev_def ())
+    {
+      gcc_checking_assert (*prev->insn () < *def->insn ());
+      prev->set_next_def (def);
+    }
+  else
+    m_defs[def->regno () + 1] = def;
+
+  def->set_next_def (before);
+  before->set_prev_def (def);
+}
+
+// Add DEF to the function's list of definitions of DEF->resource (),
+// inserting DEF immediately after AFTER.  DEF is not currently in the list.
+void
+function_info::insert_def_after (def_info *def, def_info *after)
+{
+  gcc_checking_assert (!def->has_def_links ()
+		       && *after->insn () < *def->insn ());
+
+  def->copy_next_from (after);
+  if (def_info *next = def->next_def ())
+    {
+      gcc_checking_assert (*next->insn () > *def->insn ());
+      next->set_prev_def (def);
+    }
+  else
+    m_defs[def->regno () + 1]->set_last_def (def);
+
+  def->set_prev_def (after);
+  after->set_next_def (def);
+}
+
+// Remove DEF from the function's list of definitions of DEF->resource ().
+void
+function_info::remove_def_from_list (def_info *def)
+{
+  def_info *prev = def->prev_def ();
+  def_info *next = def->next_def ();
+
+  if (next)
+    next->copy_prev_from (def);
+  else
+    m_defs[def->regno () + 1]->set_last_def (prev);
+
+  if (prev)
+    prev->copy_next_from (def);
+  else
+    m_defs[def->regno () + 1] = next;
+
+  def->clear_def_links ();
+}
+
+// Add CLOBBER to GROUP and insert it into the function's list of
+// accesses to CLOBBER->resource ().  CLOBBER is not currently part
+// of an active group and is not currently in the list.
+void
+function_info::add_clobber (clobber_info *clobber, clobber_group *group)
+{
+  // Search for either the previous or next clobber in the group.
+  // The result is less than zero if CLOBBER should come before NEIGHBOR
+  // or greater than zero if CLOBBER should come after NEIGHBOR.
+  int comparison = lookup_clobber (group->m_clobber_tree, clobber->insn ());
+  gcc_checking_assert (comparison != 0);
+  clobber_info *neighbor = group->m_clobber_tree.root ();
+
+  // Since HEIGHBOR is now the root of the splay tree, its group needs
+  // to be up-to-date.
+  neighbor->update_group (group);
+
+  // If CLOBBER comes before NEIGHBOR, insert CLOBBER to NEIGHBOR's left,
+  // otherwise insert CLOBBER to NEIGHBOR's right.
+  clobber_info::splay_tree::insert_child (neighbor, comparison > 0, clobber);
+  clobber->set_group (group);
+
+  // Insert the clobber into the function-wide list and update the
+  // bounds of the group.
+  if (comparison > 0)
+    {
+      insert_def_after (clobber, neighbor);
+      if (neighbor == group->last_clobber ())
+	group->set_last_clobber (clobber);
+    }
+  else
+    {
+      insert_def_before (clobber, neighbor);
+      if (neighbor == group->first_clobber ())
+	group->set_first_clobber (clobber);
+    }
+}
+
+// Remove CLOBBER from GROUP, given that GROUP contains other clobbers too.
+// Also remove CLOBBER from the function's list of accesses to
+// CLOBBER->resource ().
+void
+function_info::remove_clobber (clobber_info *clobber, clobber_group *group)
+{
+  if (clobber == group->first_clobber ())
+    {
+      auto *new_first = as_a<clobber_info *> (clobber->next_def ());
+      group->set_first_clobber (new_first);
+      new_first->update_group (group);
+    }
+  else if (clobber == group->last_clobber ())
+    {
+      auto *new_last = as_a<clobber_info *> (clobber->prev_def ());
+      group->set_last_clobber (new_last);
+      new_last->update_group (group);
+    }
+
+  clobber_info *replacement = clobber_info::splay_tree::remove_node (clobber);
+  if (clobber == group->m_clobber_tree.root ())
+    {
+      group->m_clobber_tree = replacement;
+      replacement->update_group (group);
+    }
+  clobber->set_group (nullptr);
+
+  remove_def_from_list (clobber);
+}
+
+// Add CLOBBER immediately before the first clobber in GROUP, given that
+// CLOBBER is not currently part of any group.
+void
+function_info::prepend_clobber_to_group (clobber_info *clobber,
+					 clobber_group *group)
+{
+  clobber_info *next = group->first_clobber ();
+  clobber_info::splay_tree::insert_child (next, 0, clobber);
+  group->set_first_clobber (clobber);
+  clobber->set_group (group);
+}
+
+// Add CLOBBER immediately after the last clobber in GROUP, given that
+// CLOBBER is not currently part of any group.
+void
+function_info::append_clobber_to_group (clobber_info *clobber,
+					clobber_group *group)
+{
+  clobber_info *prev = group->last_clobber ();
+  clobber_info::splay_tree::insert_child (prev, 1, clobber);
+  group->set_last_clobber (clobber);
+  clobber->set_group (group);
+}
+
+// Put CLOBBER1 and CLOBBER2 into the same clobber_group, given that
+// CLOBBER1 occurs immediately before CLOBBER2 and that the two clobbers
+// are not currently in the same group.  LAST is the last definition of
+// the associated resource, and is where any splay tree is stored.
+void
+function_info::merge_clobber_groups (clobber_info *clobber1,
+				     clobber_info *clobber2,
+				     def_info *last)
+{
+  if (clobber1->is_in_group () && clobber2->is_in_group ())
+    {
+      clobber_group *group1 = clobber1->group ();
+      clobber_group *group2 = clobber2->group ();
+      gcc_checking_assert (clobber1 == group1->last_clobber ()
+			   && clobber2 == group2->first_clobber ());
+
+      if (def_splay_tree tree = last->splay_root ())
+	{
+	  // Remove GROUP2 from the splay tree.
+	  int comparison = lookup_def (tree, clobber2->insn ());
+	  gcc_checking_assert (comparison == 0);
+	  tree.remove_root ();
+	  last->set_splay_root (tree.root ());
+	}
+
+      // Splice the trees together.
+      group1->m_clobber_tree.splice_next_tree (group2->m_clobber_tree);
+
+      // Bring the two extremes of GROUP2 under GROUP1.  Any other
+      // clobbers in the group are updated lazily on demand.
+      clobber2->set_group (group1);
+      group2->last_clobber ()->set_group (group1);
+      group1->set_last_clobber (group2->last_clobber ());
+
+      // Record that GROUP2 is no more.
+      group2->set_first_clobber (nullptr);
+      group2->set_last_clobber (nullptr);
+      group2->m_clobber_tree = nullptr;
+    }
+  else
+    {
+      // In this case there can be no active splay tree.
+      gcc_assert (!last->splay_root ());
+      if (clobber2->is_in_group ())
+	prepend_clobber_to_group (clobber1, clobber2->group ());
+      else
+	append_clobber_to_group (clobber2, need_clobber_group (clobber1));
+    }
+}
+
+// GROUP spans INSN, and INSN now sets the resource that GROUP clobbers.
+// Split GROUP around INSN and return the clobber that comes immediately
+// before INSN.
+clobber_info *
+function_info::split_clobber_group (clobber_group *group, insn_info *insn)
+{
+  // Search for either the previous or next clobber in the group.
+  // The result is less than zero if CLOBBER should come before NEIGHBOR
+  // or greater than zero if CLOBBER should come after NEIGHBOR.
+  int comparison = lookup_clobber (group->m_clobber_tree, insn);
+  gcc_checking_assert (comparison != 0);
+  clobber_info *neighbor = group->m_clobber_tree.root ();
+
+  clobber_tree tree1, tree2;
+  clobber_info *prev;
+  clobber_info *next;
+  if (comparison > 0)
+    {
+      // NEIGHBOR is the last clobber in what will become the first group.
+      tree1 = neighbor;
+      tree2 = tree1.split_after_root ();
+      prev = neighbor;
+      next = as_a<clobber_info *> (prev->next_def ());
+    }
+  else
+    {
+      // NEIGHBOR is the first clobber in what will become the second group.
+      tree2 = neighbor;
+      tree1 = tree2.split_before_root ();
+      next = neighbor;
+      prev = as_a<clobber_info *> (next->prev_def ());
+    }
+
+  // Use GROUP to hold PREV and earlier clobbers.  Create a new group for
+  // NEXT onwards.
+  clobber_info *last_clobber = group->last_clobber ();
+  clobber_group *group1 = group;
+  clobber_group *group2 = allocate<clobber_group> (next);
+
+  // Finish setting up GROUP1, making sure that the roots and extremities
+  // have a correct group pointer.  Leave the rest to be updated lazily.
+  group1->set_last_clobber (prev);
+  tree1->set_group (group1);
+  prev->set_group (group1);
+
+  // Finish setting up GROUP2, with the same approach as for GROUP1.
+  group2->set_first_clobber (next);
+  group2->set_last_clobber (last_clobber);
+  next->set_group (group2);
+  tree2->set_group (group2);
+  last_clobber->set_group (group2);
+
+  return prev;
+}
+
+// Add DEF to the end of the function's list of definitions of
+// DEF->resource ().  There is known to be no associated splay tree yet.
+void
+function_info::append_def (def_info *def)
+{
+  gcc_checking_assert (!def->has_def_links ());
+  def_info **head = &m_defs[def->regno () + 1];
+  def_info *first = *head;
+  if (!first)
+    {
+      // This is the only definition of the resource.
+      def->set_last_def (def);
+      *head = def;
+      return;
+    }
+
+  def_info *prev = first->last_def ();
+  gcc_checking_assert (!prev->splay_root ());
+
+  // Maintain the invariant that two clobbers must not appear in
+  // neighboring nodes of the splay tree.
+  auto *clobber = dyn_cast<clobber_info *> (def);
+  auto *prev_clobber = dyn_cast<clobber_info *> (prev);
+  if (clobber && prev_clobber)
+    append_clobber_to_group (clobber, need_clobber_group (prev_clobber));
+
+  prev->set_next_def (def);
+  def->set_prev_def (prev);
+  first->set_last_def (def);
+}
+
+// Add DEF to the function's list of definitions of DEF->resource ().
+// Also insert it into the associated splay tree, if there is one.
+// DEF is not currently part of the list and is not in the splay tree.
+void
+function_info::add_def (def_info *def)
+{
+  gcc_checking_assert (!def->has_def_links ()
+		       && !def->m_is_temp
+		       && !def->m_has_been_superceded);
+  def_info **head = &m_defs[def->regno () + 1];
+  def_info *first = *head;
+  if (!first)
+    {
+      // This is the only definition of the resource.
+      def->set_last_def (def);
+      *head = def;
+      return;
+    }
+
+  def_info *last = first->last_def ();
+  insn_info *insn = def->insn ();
+
+  int comparison;
+  def_node *root = nullptr;
+  def_info *prev = nullptr;
+  def_info *next = nullptr;
+  if (*insn > *last->insn ())
+    {
+      // This definition comes after all other definitions.
+      comparison = 1;
+      if (def_splay_tree tree = last->splay_root ())
+	{
+	  tree.splay_max_node ();
+	  root = tree.root ();
+	  last->set_splay_root (root);
+	}
+      prev = last;
+    }
+  else if (*insn < *first->insn ())
+    {
+      // This definition comes before all other definitions.
+      comparison = -1;
+      if (def_splay_tree tree = last->splay_root ())
+	{
+	  tree.splay_min_node ();
+	  root = tree.root ();
+	  last->set_splay_root (root);
+	}
+      next = first;
+    }
+  else
+    {
+      // Search the splay tree for an insertion point.
+      def_splay_tree tree = need_def_splay_tree (last);
+      comparison = lookup_def (tree, insn);
+      root = tree.root ();
+      last->set_splay_root (root);
+
+      // Deal with cases in which we found an overlapping live range.
+      if (comparison == 0)
+	{
+	  auto *group = as_a<clobber_group *> (tree.root ());
+	  if (auto *clobber = dyn_cast<clobber_info *> (def))
+	    {
+	      add_clobber (clobber, group);
+	      return;
+	    }
+	  prev = split_clobber_group (group, insn);
+	  next = prev->next_def ();
+	}
+      // COMPARISON is < 0 if DEF comes before ROOT or > 0 if DEF comes
+      // after ROOT.
+      else if (comparison < 0)
+	{
+	  next = first_def (root);
+	  prev = next->prev_def ();
+	}
+      else
+	{
+	  prev = last_def (root);
+	  next = prev->next_def ();
+	}
+    }
+
+  // See if we should merge CLOBBER with a neighboring clobber.
+  auto *clobber = dyn_cast<clobber_info *> (def);
+  auto *prev_clobber = safe_dyn_cast<clobber_info *> (prev);
+  auto *next_clobber = safe_dyn_cast<clobber_info *> (next);
+  // We shouldn't have consecutive clobber_groups.
+  gcc_checking_assert (!(clobber && prev_clobber && next_clobber));
+  if (clobber && prev_clobber)
+    append_clobber_to_group (clobber, need_clobber_group (prev_clobber));
+  else if (clobber && next_clobber)
+    prepend_clobber_to_group (clobber, need_clobber_group (next_clobber));
+  else if (root)
+    {
+      // If DEF comes before ROOT, insert DEF to ROOT's left,
+      // otherwise insert DEF to ROOT's right.
+      def_node *node = need_def_node (def);
+      def_splay_tree::insert_child (root, comparison >= 0, node);
+    }
+  if (prev)
+    insert_def_after (def, prev);
+  else
+    insert_def_before (def, next);
+}
+
+// Remove DEF from the function's list of definitions of DEF->resource ().
+// Also remove DEF from the associated splay tree, if there is one.
+void
+function_info::remove_def (def_info *def)
+{
+  def_info **head = &m_defs[def->regno () + 1];
+  def_info *first = *head;
+  gcc_checking_assert (first);
+  if (first->is_last_def ())
+    {
+      // DEF is the only definition of the resource.
+      gcc_checking_assert (first == def);
+      *head = nullptr;
+      def->clear_def_links ();
+      return;
+    }
+
+  // If CLOBBER belongs to a clobber_group that contains other clobbers
+  // too, then we need to update the clobber_group and the list, but any
+  // splay tree that contains the clobber_group is unaffected.
+  if (auto *clobber = dyn_cast<clobber_info *> (def))
+    if (clobber->is_in_group ())
+      {
+	clobber_group *group = clobber->group ();
+	if (group->first_clobber () != group->last_clobber ())
+	  {
+	    remove_clobber (clobber, group);
+	    return;
+	  }
+      }
+
+  // If we've created a splay tree for this resource, remove the entry
+  // for DEF.
+  def_info *last = first->last_def ();
+  if (def_splay_tree tree = last->splay_root ())
+    {
+      int comparison = lookup_def (tree, def->insn ());
+      gcc_checking_assert (comparison == 0);
+      tree.remove_root ();
+      last->set_splay_root (tree.root ());
+    }
+
+  // If the definition came between two clobbers, merge them into a single
+  // group.
+  auto *prev_clobber = safe_dyn_cast<clobber_info *> (def->prev_def ());
+  auto *next_clobber = safe_dyn_cast<clobber_info *> (def->next_def ());
+  if (prev_clobber && next_clobber)
+    merge_clobber_groups (prev_clobber, next_clobber, last);
+
+  remove_def_from_list (def);
+}
+
+// Require DEF to have a splay tree that contains all non-phi uses.
+void
+function_info::need_use_splay_tree (set_info *def)
+{
+  if (!def->m_use_tree)
+    for (use_info *use : def->all_insn_uses ())
+      {
+	auto *use_node = allocate<splay_tree_node<use_info *>> (use);
+	def->m_use_tree.insert_max_node (use_node);
+      }
+}
+
+// Compare two instructions by their position in a use splay tree.  Return >0
+// if INSN1 comes after INSN2, <0 if INSN1 comes before INSN2, or 0 if they are
+// the same instruction.
+static inline int
+compare_use_insns (insn_info *insn1, insn_info *insn2)
+{
+  // Debug instructions go after nondebug instructions.
+  int diff = insn1->is_debug_insn () - insn2->is_debug_insn ();
+  if (diff != 0)
+    return diff;
+  return insn1->compare_with (insn2);
+}
+
+// Search TREE for a use in INSN.  If such a use exists, install it as
+// the root of TREE and return 0.  Otherwise arbitrarily choose between:
+//
+// (1) Installing the closest preceding use as the root and returning 1.
+// (2) Installing the closest following use as the root and returning -1.
+int
+rtl_ssa::lookup_use (splay_tree<use_info *> &tree, insn_info *insn)
+{
+  auto compare = [&](splay_tree_node<use_info *> *node)
+    {
+      return compare_use_insns (insn, node->value ()->insn ());
+    };
+  return tree.lookup (compare);
+}
+
+// Add USE to USE->def ()'s list of uses. inserting USE immediately before
+// BEFORE.  USE is not currently in the list.
+//
+// This routine should not be used for inserting phi uses.
+void
+function_info::insert_use_before (use_info *use, use_info *before)
+{
+  gcc_checking_assert (!use->has_use_links () && use->is_in_any_insn ());
+
+  set_info *def = use->def ();
+
+  use->copy_prev_from (before);
+  use->set_next_use (before);
+
+  if (use_info *prev = use->prev_use ())
+    prev->set_next_use (use);
+  else
+    use->def ()->set_first_use (use);
+
+  before->set_prev_use (use);
+  if (use->is_in_nondebug_insn () && before->is_in_debug_insn_or_phi ())
+    def->last_use ()->set_last_nondebug_insn_use (use);
+
+  gcc_checking_assert (use->check_integrity () && before->check_integrity ());
+}
+
+// Add USE to USE->def ()'s list of uses. inserting USE immediately after
+// AFTER.  USE is not currently in the list.
+//
+// This routine should not be used for inserting phi uses.
+void
+function_info::insert_use_after (use_info *use, use_info *after)
+{
+  set_info *def = use->def ();
+  gcc_checking_assert (after->is_in_any_insn ()
+		       && !use->has_use_links ()
+		       && use->is_in_any_insn ());
+
+  use->set_prev_use (after);
+  use->copy_next_from (after);
+
+  after->set_next_use (use);
+
+  if (use_info *next = use->next_use ())
+    {
+      // The last node doesn't change, but we might need to update its
+      // last_nondebug_insn_use record.
+      if (use->is_in_nondebug_insn () && next->is_in_debug_insn_or_phi ())
+	def->last_use ()->set_last_nondebug_insn_use (use);
+      next->set_prev_use (use);
+    }
+  else
+    {
+      // USE is now the last node.
+      if (use->is_in_nondebug_insn ())
+	use->set_last_nondebug_insn_use (use);
+      def->first_use ()->set_last_use (use);
+    }
+
+  gcc_checking_assert (use->check_integrity () && after->check_integrity ());
+}
+
+// If USE has a known definition, add USE to that definition's list of uses.
+// Also update the associated splay tree, if any.
+void
+function_info::add_use (use_info *use)
+{
+  gcc_checking_assert (!use->has_use_links ()
+		       && !use->m_is_temp
+		       && !use->m_has_been_superceded);
+
+  set_info *def = use->def ();
+  if (!def)
+    return;
+
+  use_info *first = def->first_use ();
+  if (!first)
+    {
+      // This is the only use of the definition.
+      use->set_last_use (use);
+      if (use->is_in_nondebug_insn ())
+	use->set_last_nondebug_insn_use (use);
+
+      def->set_first_use (use);
+
+      gcc_checking_assert (use->check_integrity ());
+      return;
+    }
+
+  if (use->is_in_phi ())
+    {
+      // Add USE at the end of the list, as the new first phi.
+      use_info *last = first->last_use ();
+
+      use->set_prev_use (last);
+      use->copy_next_from (last);
+
+      last->set_next_use (use);
+      first->set_last_use (use);
+
+      gcc_checking_assert (use->check_integrity ());
+      return;
+    }
+
+  // If there is currently no splay tree for this definition, see if can
+  // get away with a pure list-based update.
+  insn_info *insn = use->insn ();
+  auto quick_path = [&]()
+    {
+      // Check if USE should come before all current uses.
+      if (first->is_in_phi () || compare_use_insns (insn, first->insn ()) < 0)
+	{
+	  insert_use_before (use, first);
+	  return true;
+	}
+
+      // Check if USE should come after all current uses in the same
+      // subsequence (i.e. the list of nondebug insn uses or the list
+      // of debug insn uses).
+      use_info *last = first->last_use ();
+      if (use->is_in_debug_insn ())
+	{
+	  if (last->is_in_phi ())
+	    return false;
+	}
+      else
+	last = last->last_nondebug_insn_use ();
+
+      if (compare_use_insns (insn, last->insn ()) > 0)
+	{
+	  insert_use_after (use, last);
+	  return true;
+	}
+
+      return false;
+    };
+  if (!def->m_use_tree && quick_path ())
+    return;
+
+  // Search the splay tree for an insertion point.  COMPARISON is less
+  // than zero if USE should come before NEIGHBOR, or greater than zero
+  // if USE should come after NEIGHBOR.
+  need_use_splay_tree (def);
+  int comparison = lookup_use (def->m_use_tree, insn);
+  gcc_checking_assert (comparison != 0);
+  splay_tree_node<use_info *> *neighbor = def->m_use_tree.root ();
+
+  // If USE comes before NEIGHBOR, insert USE to NEIGHBOR's left,
+  // otherwise insert USE to NEIGHBOR's right.
+  auto *use_node = allocate<splay_tree_node<use_info *>> (use);
+  def->m_use_tree.insert_child (neighbor, comparison > 0, use_node);
+  if (comparison > 0)
+    insert_use_after (use, neighbor->value ());
+  else
+    insert_use_before (use, neighbor->value ());
+}
+
+// If USE has a known definition, remove USE from that definition's list
+// of uses.  Also remove if it from the associated splay tree, if any.
+void
+function_info::remove_use (use_info *use)
+{
+  set_info *def = use->def ();
+  if (!def)
+    return;
+
+  // Remove USE from the splay tree.
+  if (def->m_use_tree && use->is_in_any_insn ())
+    {
+      int comparison = lookup_use (def->m_use_tree, use->insn ());
+      gcc_checking_assert (comparison == 0);
+      def->m_use_tree.remove_root ();
+    }
+
+  use_info *prev = use->prev_use ();
+  use_info *next = use->next_use ();
+
+  use_info *first = def->first_use ();
+  use_info *last = first->last_use ();
+  if (last->last_nondebug_insn_use () == use)
+    last->set_last_nondebug_insn_use (prev);
+
+  if (next)
+    next->copy_prev_from (use);
+  else
+    first->set_last_use (prev);
+
+  if (prev)
+    prev->copy_next_from (use);
+  else
+    def->set_first_use (next);
+
+  use->clear_use_links ();
+  gcc_checking_assert ((!prev || prev->check_integrity ())
+		       && (!next || next->check_integrity ()));
+}
+
+// Allocate a temporary clobber_info for register REGNO in insn INSN,
+// including it in the region of the obstack governed by WATERMARK.
+// Return a new def_array that contains OLD_DEFS and the new clobber.
+//
+// OLD_DEFS is known not to define REGNO.
+def_array
+function_info::insert_temp_clobber (obstack_watermark &watermark,
+				    insn_info *insn, unsigned int regno,
+				    def_array old_defs)
+{
+  gcc_checking_assert (watermark == &m_temp_obstack);
+  auto *clobber = allocate_temp<clobber_info> (insn, regno);
+  clobber->m_is_temp = true;
+  return insert_access (watermark, clobber, old_defs);
+}
+
+// A subroutine of make_uses_available.  Try to make USE's definition
+// available at the head of BB.  On success:
+//
+// - If the use would have the same def () as USE, return USE.
+//
+// - If BB already has a degenerate phi for the same definition,
+//   return a temporary use of that phi.
+//
+// - Otherwise, the use would need a new degenerate phi.  Allocate a
+//   temporary phi and return a temporary use of it.
+//
+// Return null on failure.
+use_info *
+function_info::make_use_available (use_info *use, bb_info *bb)
+{
+  set_info *def = use->def ();
+  if (!def)
+    return use;
+
+  if (is_single_dominating_def (def))
+    return use;
+
+  // FIXME: Deliberately limited for fwprop compatibility testing.
+  basic_block cfg_bb = bb->cfg_bb ();
+  bb_info *use_bb = use->bb ();
+  if (single_pred_p (cfg_bb)
+      && single_pred (cfg_bb) == use_bb->cfg_bb ()
+      && remains_available_on_exit (def, use_bb))
+    {
+      if (def->ebb () == bb->ebb ())
+	return use;
+
+      resource_info resource = use->resource ();
+      set_info *ultimate_def = look_through_degenerate_phi (def);
+
+      // See if there is already a (degenerate) phi for DEF.
+      insn_info *phi_insn = bb->ebb ()->phi_insn ();
+      phi_info *phi;
+      def_lookup dl = find_def (resource, phi_insn);
+      if (set_info *set = dl.matching_set ())
+	{
+	  // There is an existing phi.
+	  phi = as_a<phi_info *> (set);
+	  gcc_checking_assert (phi->input_value (0) == ultimate_def);
+	}
+      else
+	{
+	  // Create a temporary placeholder phi.  This will become
+	  // permanent if the change is later committed.
+	  phi = allocate_temp<phi_info> (phi_insn, resource, 0);
+	  auto *input = allocate<use_info> (phi, resource, ultimate_def);
+	  input->m_is_temp = true;
+	  phi->m_is_temp = true;
+	  phi->make_degenerate (input);
+	  phi->set_prev_def (dl.prev_def ());
+	  phi->set_next_def (dl.next_def ());
+	}
+
+      // Create a temporary use of the phi at the head of the first
+      // block, since we know for sure that it's available there.
+      insn_info *use_insn = bb->ebb ()->first_bb ()->head_insn ();
+      auto *new_use = allocate_temp<use_info> (use_insn, resource, phi);
+      new_use->m_is_temp = true;
+      return new_use;
+    }
+  return nullptr;
+}
+
+// See the comment above the declaration.
+use_array
+function_info::make_uses_available (obstack_watermark &watermark,
+				    use_array uses, bb_info *bb)
+{
+  unsigned int num_uses = uses.size ();
+  if (num_uses == 0)
+    return uses;
+
+  auto **new_uses = XOBNEWVEC (watermark, access_info *, num_uses);
+  for (unsigned int i = 0; i < num_uses; ++i)
+    {
+      use_info *use = make_use_available (uses[i], bb);
+      if (!use)
+	return use_array (access_array::invalid ());
+      new_uses[i] = use;
+    }
+  return use_array (new_uses, num_uses);
+}
+
+// Return true if ACCESS1 can represent ACCESS2 and if ACCESS2 can
+// represent ACCESS1.
+static bool
+can_merge_accesses (access_info *access1, access_info *access2)
+{
+  if (access1 == access2)
+    return true;
+
+  auto *use1 = dyn_cast<use_info *> (access1);
+  auto *use2 = dyn_cast<use_info *> (access2);
+  return use1 && use2 && use1->def () == use2->def ();
+}
+
+// See the comment above the declaration.
+access_array
+rtl_ssa::merge_access_arrays_base (obstack_watermark &watermark,
+				   access_array accesses1,
+				   access_array accesses2)
+{
+  if (accesses1.empty ())
+    return accesses2;
+  if (accesses2.empty ())
+    return accesses1;
+
+  auto i1 = accesses1.begin ();
+  auto end1 = accesses1.end ();
+  auto i2 = accesses2.begin ();
+  auto end2 = accesses2.end ();
+
+  access_array_builder builder (watermark);
+  builder.reserve (accesses1.size () + accesses2.size ());
+
+  while (i1 != end1 && i2 != end2)
+    {
+      access_info *access1 = *i1;
+      access_info *access2 = *i2;
+
+      unsigned int regno1 = access1->regno ();
+      unsigned int regno2 = access2->regno ();
+      if (regno1 == regno2)
+	{
+	  if (!can_merge_accesses (access1, access2))
+	    return access_array::invalid ();
+
+	  builder.quick_push (access1);
+	  ++i1;
+	  ++i2;
+	}
+      else if (regno1 < regno2)
+	{
+	  builder.quick_push (access1);
+	  ++i1;
+	}
+      else
+	{
+	  builder.quick_push (access2);
+	  ++i2;
+	}
+    }
+  for (; i1 != end1; ++i1)
+    builder.quick_push (*i1);
+  for (; i2 != end2; ++i2)
+    builder.quick_push (*i2);
+
+  return builder.finish ();
+}
+
+// See the comment above the declaration.
+access_array
+rtl_ssa::insert_access_base (obstack_watermark &watermark,
+			     access_info *access1, access_array accesses2)
+{
+  access_array_builder builder (watermark);
+  builder.reserve (1 + accesses2.size ());
+
+  unsigned int regno1 = access1->regno ();
+  auto i2 = accesses2.begin ();
+  auto end2 = accesses2.end ();
+  while (i2 != end2)
+    {
+      access_info *access2 = *i2;
+
+      unsigned int regno2 = access2->regno ();
+      if (regno1 == regno2)
+	{
+	  if (!can_merge_accesses (access1, access2))
+	    return access_array::invalid ();
+
+	  builder.quick_push (access1);
+	  access1 = nullptr;
+	  ++i2;
+	  break;
+	}
+      else if (regno1 < regno2)
+	{
+	  builder.quick_push (access1);
+	  access1 = nullptr;
+	  break;
+	}
+      else
+	{
+	  builder.quick_push (access2);
+	  ++i2;
+	}
+    }
+  if (access1)
+    builder.quick_push (access1);
+  for (; i2 != end2; ++i2)
+    builder.quick_push (*i2);
+
+  return builder.finish ();
+}
+
+// See the comment above the declaration.
+access_array
+rtl_ssa::remove_note_accesses_base (obstack_watermark &watermark,
+				    access_array accesses)
+{
+  for (access_info *access : accesses)
+    if (access->only_occurs_in_notes ())
+      {
+	access_array_builder builder (watermark);
+	builder.reserve (accesses.size ());
+	for (access_info *access2 : accesses)
+	  if (!access2->only_occurs_in_notes ())
+	    builder.quick_push (access2);
+	return builder.finish ();
+      }
+  return accesses;
+}
+
+// Print RESOURCE to PP.
+void
+rtl_ssa::pp_resource (pretty_printer *pp, resource_info resource)
+{
+  resource.print (pp);
+}
+
+// Print ACCESS to PP.  FLAGS is a bitmask of PP_ACCESS_* flags.
+void
+rtl_ssa::pp_access (pretty_printer *pp, const access_info *access,
+		    unsigned int flags)
+{
+  if (!access)
+    pp_string (pp, "<null>");
+  else if (auto *phi = dyn_cast<const phi_info *> (access))
+    phi->print (pp, flags);
+  else if (auto *set = dyn_cast<const set_info *> (access))
+    set->print (pp, flags);
+  else if (auto *clobber = dyn_cast<const clobber_info *> (access))
+    clobber->print (pp, flags);
+  else if (auto *use = dyn_cast<const use_info *> (access))
+    use->print (pp, flags);
+  else
+    pp_string (pp, "??? Unknown access");
+}
+
+// Print ACCESSES to PP.  FLAGS is a bitmask of PP_ACCESS_* flags.
+void
+rtl_ssa::pp_accesses (pretty_printer *pp, access_array accesses,
+		      unsigned int flags)
+{
+  if (accesses.empty ())
+    pp_string (pp, "none");
+  else
+    {
+      bool is_first = true;
+      for (access_info *access : accesses)
+	{
+	  if (is_first)
+	    is_first = false;
+	  else
+	    pp_newline_and_indent (pp, 0);
+	  pp_access (pp, access, flags);
+	}
+    }
+}
+
+// Print NODE to PP.
+void
+rtl_ssa::pp_def_node (pretty_printer *pp, const def_node *node)
+{
+  if (!node)
+    pp_string (pp, "<null>");
+  else if (auto *group = dyn_cast<const clobber_group *> (node))
+    group->print (pp);
+  else if (auto *set = dyn_cast<const set_node *> (node))
+    set->print (pp);
+  else
+    pp_string (pp, "??? Unknown def node");
+}
+
+// Print MUX to PP.
+void
+rtl_ssa::pp_def_mux (pretty_printer *pp, def_mux mux)
+{
+  if (auto *node = mux.dyn_cast<def_node *> ())
+    pp_def_node (pp, node);
+  else
+    pp_access (pp, mux.as_a<def_info *> ());
+}
+
+// Print DL to PP.
+void
+rtl_ssa::pp_def_lookup (pretty_printer *pp, def_lookup dl)
+{
+  pp_string (pp, "comparison result of ");
+  pp_decimal_int (pp, dl.comparison);
+  pp_string (pp, " for ");
+  pp_newline_and_indent (pp, 0);
+  pp_def_mux (pp, dl.mux);
+}
+
+// Dump RESOURCE to FILE.
+void
+dump (FILE *file, resource_info resource)
+{
+  dump_using (file, pp_resource, resource);
+}
+
+// Dump ACCESS to FILE.  FLAGS is a bitmask of PP_ACCESS_* flags.
+void
+dump (FILE *file, const access_info *access, unsigned int flags)
+{
+  dump_using (file, pp_access, access, flags);
+}
+
+// Dump ACCESSES to FILE.  FLAGS is a bitmask of PP_ACCESS_* flags.
+void
+dump (FILE *file, access_array accesses, unsigned int flags)
+{
+  dump_using (file, pp_accesses, accesses, flags);
+}
+
+// Print NODE to FILE.
+void
+dump (FILE *file, const def_node *node)
+{
+  dump_using (file, pp_def_node, node);
+}
+
+// Print MUX to FILE.
+void
+dump (FILE *file, def_mux mux)
+{
+  dump_using (file, pp_def_mux, mux);
+}
+
+// Print RESULT to FILE.
+void
+dump (FILE *file, def_lookup result)
+{
+  dump_using (file, pp_def_lookup, result);
+}
+
+// Debug interfaces to the dump routines above.
+void debug (const resource_info &x) { dump (stderr, x); }
+void debug (const access_info *x) { dump (stderr, x); }
+void debug (const access_array &x) { dump (stderr, x); }
+void debug (const def_node *x) { dump (stderr, x); }
+void debug (const def_mux &x) { dump (stderr, x); }
+void debug (const def_lookup &x) { dump (stderr, x); }
diff --git a/gcc/rtl-ssa/accesses.h b/gcc/rtl-ssa/accesses.h
new file mode 100644
index 00000000000..fdb4a646183
--- /dev/null
+++ b/gcc/rtl-ssa/accesses.h
@@ -0,0 +1,1032 @@
+// Access-related classes for RTL SSA                               -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// Forward declarations.
+class bb_info;
+class clobber_group;
+class def_node;
+class ebb_info;
+class insn_info;
+class phi_info;
+class set_info;
+
+// Used as a boolean argunent to certain routines.
+enum class ignore_clobbers { NO, YES };
+
+// Represents something that the SSA form tracks: either a register
+// or memory.
+class resource_info
+{
+public:
+  // Return true if this resource represents memory.
+  bool is_mem () const { return regno == MEM_REGNO; }
+
+  // Return true if this resource represents a register.
+  bool is_reg () const { return regno != MEM_REGNO; }
+
+  // Print the name of the resource to PP.
+  void print_identifier (pretty_printer *pp) const;
+
+  // Possibly print additional information about the resource to PP.
+  void print_context (pretty_printer *pp) const;
+
+  // A combination of print_identifier and print_context.
+  void print (pretty_printer *pp) const;
+
+  // The mode with which the resource is being defined or used.  This is
+  // always BLKmode for memory.  It can also be BLKmode for registers if
+  // we don't yet know the real mode, or if the mode is not relevant for
+  // some reason.
+  machine_mode mode;
+
+  // The pseudo register or single hard register that the resource represents,
+  // or MEM_REGNO for memory.
+  unsigned int regno;
+};
+
+// For simplicity, we treat memory as a single unified entity.
+const resource_info memory = { E_BLKmode, MEM_REGNO };
+
+// Flags used when printing access_infos.
+//
+// Print the location at which the access occurs.  This is redundant
+// when the access is being printed as part of the instruction or phi node
+// that contains the access.
+const unsigned int PP_ACCESS_INCLUDE_LOCATION = 1U << 0;
+//
+// Print links to other accesses: the definition that defines a use,
+// the uses of a definition, and the inputs of a phi node.
+const unsigned int PP_ACCESS_INCLUDE_LINKS = 1U << 1;
+//
+// Print additional properties about the access.
+const unsigned int PP_ACCESS_INCLUDE_PROPERTIES = 1U << 2;
+//
+// The usual flags when printing an access in isolation.
+const unsigned int PP_ACCESS_DEFAULT = (PP_ACCESS_INCLUDE_LOCATION
+					| PP_ACCESS_INCLUDE_LINKS
+					| PP_ACCESS_INCLUDE_PROPERTIES);
+//
+// The usual flags when printing a def_info from its defining instruction.
+const unsigned int PP_ACCESS_SETTER = (PP_ACCESS_INCLUDE_LINKS
+				       | PP_ACCESS_INCLUDE_PROPERTIES);
+//
+// The usual flags when printing a use_info from its user.
+const unsigned int PP_ACCESS_USER = PP_ACCESS_INCLUDE_PROPERTIES;
+
+// The various ways of accessing a resource.  The two range checks that
+// we need to perform are [SET, PHI] (for set_info) and [SET, CLOBBER]
+// (for def_info), so the ordering tries to make those tests as
+// efficient as possible.
+enum class access_kind : uint8_t
+{
+  // Set the resource to a useful value.
+  SET,
+
+  // A form of SET that collects the possible incoming values of the
+  // resource using a phi node; the resource does not actually change value.
+  PHI,
+
+  // Set the resource to a value that is both unknown and not useful.
+  CLOBBER,
+
+  // Use the current value of the resource.
+  USE
+};
+
+// A base class that represents an access to a resource.
+class access_info
+{
+  // Size: 1 LP64 word
+  friend class function_info;
+
+public:
+  // Return the resource that is being accessed.
+  resource_info resource () const { return { m_mode, m_regno }; }
+
+  // Return true if the access is to memory.
+  bool is_mem () const { return m_regno == MEM_REGNO; }
+
+  // Return true if the access is to a register.
+  bool is_reg () const { return m_regno != MEM_REGNO; }
+
+  // If the access is to a register, return the register number,
+  // otherwise return MEM_REGNO.
+  unsigned int regno () const { return m_regno; }
+
+  // For sets, return the mode of the value to which the resource is being set.
+  // For uses, return the mode in which the resource is being used (which for
+  // hard registers might be different from the mode in which the resource
+  // was set).
+  //
+  // When accessing memory, the mode is always BLKmode.  When accessing
+  // pseudo registers, the mode is always the mode of the pseudo register
+  // (and so doesn't, for example, take subregs into account).
+  machine_mode mode () const { return m_mode; }
+
+  // Return the kind of access that this is.
+  access_kind kind () const { return m_kind; }
+
+  // Return true if the access occurs in a phi node or an "artificial"
+  // instruction (see insn_info), false if it occurs in a real instruction.
+  bool is_artificial () const { return m_is_artificial; }
+
+  // Return the opposite of is_artificial.
+  bool is_real () const { return !m_is_artificial; }
+
+  // Return true if this access is a set_info whose result is used by at least
+  // one nondebug instruction.
+  bool is_set_with_nondebug_insn_uses () const;
+
+  // Return true if the access describes a set_info and if the value
+  // is defined by an RTX_AUTOINC rtx.
+  bool is_pre_post_modify () const { return m_is_pre_post_modify; }
+
+  // Return true if the access is a clobber_info that describes the effect
+  // of a called function.  This kind of clobber is added for -fipa-ra
+  // functions that clobber only a strict subset of the normal ABI set.
+  bool is_call_clobber () const { return m_is_call_clobber; }
+
+  // Return true if the access is a use_info that simply marks a point in
+  // the live range of a set_info at which the value is live out from
+  // the containing EBB.
+  bool is_live_out_use () const { return m_is_live_out_use; }
+
+  // Return true if the access is a use_info for an instruction and if
+  // at least some of the uses occur within a MEM address.
+  //
+  // There shouldn't be a need to check whether *all* uses occur within
+  // a MEM address, since in principle:
+  //
+  // A: (set (reg:SI R1) (mem:SI (post_inc:SI (reg:SI R2))))
+  //
+  // should be semantically equivalent to:
+  //
+  // B: (parallel [(set (reg:SI R1) (mem:SI (reg:SI R2)))
+  //               (set (reg:SI R2) (plus:SI (reg:SI R2) (const_int 4)))])
+  //
+  // even though R2 occurs only in MEMs for A but occurs outside MEMs for B.
+  bool includes_address_uses () const { return m_includes_address_uses; }
+
+  // Return true if the access occurs in an instruction and if at least
+  // some accesses to resource () occur in a read-modify-write context.
+  // This is equivalent to the DF_REF_READ_WRITE flag.
+  bool includes_read_writes () const { return m_includes_read_writes; }
+
+  // Return true if the access occurs in an instruction and if at least
+  // some accesses to resource () occur in a subreg context.
+  bool includes_subregs () const { return m_includes_subregs; }
+
+  // Return true if the access occurs in an instruction and if at least
+  // some accesses to resource () occur in a multi-register REG.
+  // This implies that resource () is a hard register.
+  bool includes_multiregs () const { return m_includes_multiregs; }
+
+  // Return true if the access occurs in a real nondebug instruction
+  // and if all accesses to resource () occur in notes, rather than
+  // in the main instruction pattern.
+  bool only_occurs_in_notes () const { return m_only_occurs_in_notes; }
+
+protected:
+  access_info (resource_info, access_kind);
+
+  void print_prefix_flags (pretty_printer *) const;
+  void print_properties_on_new_lines (pretty_printer *) const;
+
+private:
+  void set_mode (machine_mode mode) { m_mode = mode; }
+
+  // The values returned by the accessors above.
+  unsigned int m_regno;
+  access_kind m_kind : 8;
+
+protected:
+  // The value returned by the accessors above.
+  unsigned int m_is_artificial : 1;
+  unsigned int m_is_set_with_nondebug_insn_uses : 1;
+  unsigned int m_is_pre_post_modify : 1;
+  unsigned int m_is_call_clobber : 1;
+  unsigned int m_is_live_out_use : 1;
+  unsigned int m_includes_address_uses : 1;
+  unsigned int m_includes_read_writes : 1;
+  unsigned int m_includes_subregs : 1;
+  unsigned int m_includes_multiregs : 1;
+  unsigned int m_only_occurs_in_notes : 1;
+
+  // True if this access is a use_insn that occurs in a nondebug instruction,
+  // and if there are no following uses by nondebug instructions.  The next use
+  // is null, a use_info for a debug instruction, or a use_info for a phi node.
+  //
+  // Providing this helps to optimize use_info::next_nondebug_insn_use.
+  unsigned int m_is_last_nondebug_insn_use : 1;
+
+  // True if this access is a use_info for a debug instruction or
+  // a phi node.
+  unsigned int m_is_in_debug_insn_or_phi : 1;
+
+private:
+  // Used as a flag during various update routines; has no long-lasting
+  // meaning.
+  unsigned int m_has_been_superceded : 1;
+
+  // Indicates that this access has been allocated on the function_info's
+  // temporary obstack and so is not (yet) part of the proper SSA form.
+  unsigned int m_is_temp : 1;
+
+  // Bits for future expansion.
+  unsigned int m_spare : 2;
+
+  // The value returned by the accessor above.
+  machine_mode m_mode : 8;
+};
+
+// A contiguous array of access_info pointers.  Used to represent a
+// (mostly small) number of definitions and/or uses.
+using access_array = array_slice<access_info *const>;
+
+// A class for building an access_array on an obstack.  It automatically
+// frees any in-progress array if the build attempt fails before finish ()
+// has been called.
+class access_array_builder : public obstack_watermark
+{
+public:
+  using obstack_watermark::obstack_watermark;
+
+  // Make sure that the array has enough for NUM_ACCESSES accesses.
+  void reserve (unsigned int num_accesses);
+
+  // Add ACCESS to the end of the array that we're building, given that
+  // reserve () has already made room.
+  void quick_push (access_info *access);
+
+  // Finish and return the new array.  The array survives the destruction
+  // of the builder.
+  array_slice<access_info *> finish ();
+};
+
+// An access_info that represents the use of a resource in either a phi node
+// or an instruction.  It records which set_info (if any) provides the
+// resource's value.
+class use_info : public access_info
+{
+  // Overall size: 5 LP64 words.
+  friend class set_info;
+  friend class function_info;
+
+public:
+  // Return true if the access occurs in an instruction rather than a phi node.
+  // The instruction might be a debug instruction or a nondebug instruction.
+  bool is_in_any_insn () const { return m_insn_or_phi.is_first (); }
+
+  // Return true if the access occurs in a nondebug instruction,
+  // false if it occurs in a debug instruction or a phi node.
+  bool is_in_nondebug_insn () const { return !m_is_in_debug_insn_or_phi; }
+
+  // Return true if the instruction occurs in a debug instruction.
+  bool is_in_debug_insn () const;
+
+  // Return true if the access occurs in a phi node rather than in an
+  // instruction.
+  bool is_in_phi () const { return m_insn_or_phi.is_second (); }
+
+  // Return true if the access occurs in a debug instruction or a phi node,
+  // false if it occurs in a nondebug instruction.
+  bool is_in_debug_insn_or_phi () const { return m_is_in_debug_insn_or_phi; }
+
+  // Return the instruction that uses the resource.  Only valid is
+  // is_in_any_insn ().
+  insn_info *insn () const { return m_insn_or_phi.known_first (); }
+
+  // Return the phi node that uses the resource.  Only valid if is_in_phi ().
+  phi_info *phi () const { return m_insn_or_phi.known_second (); }
+
+  // Return the basic block that contains the access.
+  bb_info *bb () const;
+
+  // Return the extended basic block that contains the access.
+  ebb_info *ebb () const;
+
+  // Return the set_info whose result the access uses, or null if the
+  // value of the resource is completely undefined.
+  //
+  // The value is undefined if the use is completely upwards exposed
+  // (i.e. has no preceding definition) or if the preceding definition
+  // is a clobber rather than a set.
+  //
+  // The mode of the definition can be different from the mode of the use;
+  // for example, a hard register might be set in DImode and used in SImode.
+  set_info *def () const { return m_def; }
+
+  // Return the previous and next uses of the definition.  See set_info
+  // for details about the ordering.
+  //
+  // These routines are only meaningful when def () is nonnull.
+  use_info *prev_use () const;
+  use_info *next_use () const;
+
+  // Return the next use by a nondebug instruction, or null if none.
+  //
+  // This is only valid if is_in_nondebug_insn ().  It is equivalent to,
+  // but more efficient than:
+  //
+  //    next_use () && next_use ()->is_in_nondebug_insn ()
+  //    ? next_use () : nullptr
+  use_info *next_nondebug_insn_use () const;
+
+  // Return the next use by an instruction, or null if none.  The use might
+  // be by a debug instruction or a nondebug instruction.
+  //
+  // This is only valid if is_in_any_insn ().  It is equivalent to:
+  //
+  //    next_use () && next_use ()->is_in_any_insn () ? next_use () : nullptr
+  use_info *next_any_insn_use () const;
+
+  // Return the previous use by a phi node in the list, or null if none.
+  //
+  // This is only valid if is_in_phi ().  It is equivalent to:
+  //
+  //    prev_use () && prev_use ()->is_in_phi () ? prev_use () : nullptr
+  use_info *prev_phi_use () const;
+
+  // Return true if this is the first use of the definition.  See set_info
+  // for details about the ordering.
+  //
+  // This routine is only meaningful when def () is nonnull.
+  bool is_first_use () const;
+
+  // Return true if this is the last use of the definition.  See set_info
+  // for details about the ordering.
+  //
+  // This routine is only meaningful when def () is nonnull.
+  bool is_last_use () const;
+
+  // Print a description of def () to PP.
+  void print_def (pretty_printer *pp) const;
+
+  // Print a description of the location of the use to PP.
+  void print_location (pretty_printer *pp) const;
+
+  // Print a description of the use to PP under the control of
+  // PP_ACCESS_* flags FLAGS.
+  void print (pretty_printer *pp,
+	      unsigned int flags = PP_ACCESS_DEFAULT) const;
+
+private:
+  // If we only create a set_info splay tree for sets that are used by
+  // three instructions or more, then only about 16% of uses need to be in
+  // a splay tree.  It is therefore more memory-efficient to use separate
+  // nodes for the splay tree, instead of storing the child nodes
+  // directly in the use_info.
+
+  // Make insn_info the first (and thus directly-encoded) choice since
+  // insn () is read much more often than phi ().
+  using insn_or_phi = pointer_mux<insn_info, phi_info>;
+
+  // The use belongs to a list that is partitioned into three sections:
+  //
+  // (1) all uses in nondebug instructions, in reverse postorder
+  //
+  // (2) all uses in debug instructions, in reverse postorder
+  //
+  // (3) all phi nodes, in no particular order.
+  //
+  // In order to preserve memory:
+  //
+  // - The set_info just has a pointer to the first use.
+  //
+  // - The first use's "prev" pointer points to the last use.
+  //
+  // - The last use's "next" pointer points to the last use in a nondebug
+  //   instruction, or null if there are no such uses.
+  using last_use_or_prev_use = pointer_mux<use_info>;
+  using last_nondebug_insn_use_or_next_use = pointer_mux<use_info>;
+
+  use_info (insn_or_phi, resource_info, set_info *);
+
+  use_info *last_use () const;
+  use_info *last_nondebug_insn_use () const;
+  bool calculate_is_last_nondebug_insn_use () const;
+
+  void record_reference (rtx_obj_reference, bool);
+  void set_insn (insn_info *);
+  void set_def (set_info *set) { m_def = set; }
+  void set_is_live_out_use (bool value) { m_is_live_out_use = value; }
+  void copy_prev_from (use_info *);
+  void copy_next_from (use_info *);
+  void set_last_use (use_info *);
+  void set_prev_use (use_info *);
+  void set_last_nondebug_insn_use (use_info *);
+  void set_next_use (use_info *);
+  void clear_use_links ();
+  bool has_use_links ();
+  bool check_integrity ();
+
+  // The location of the use.
+  insn_or_phi m_insn_or_phi;
+
+  // The overloaded "prev" and "next" pointers, as described above.
+  last_use_or_prev_use m_last_use_or_prev_use;
+  last_nondebug_insn_use_or_next_use m_last_nondebug_insn_use_or_next_use;
+
+  // The value of def ().
+  set_info *m_def;
+};
+
+// Iterators for lists of uses.
+using use_iterator = list_iterator<use_info, &use_info::next_use>;
+using reverse_use_iterator = list_iterator<use_info, &use_info::prev_use>;
+
+// Like use_iterator, but specifically for uses by nondebug instructions,
+// uses by any kind of instruction, and uses by phi nodes respectively.
+// These iterators allow a nullptr end point even if there are other types
+// of use in the same definition.
+using nondebug_insn_use_iterator
+  = list_iterator<use_info, &use_info::next_nondebug_insn_use>;
+using any_insn_use_iterator
+  = list_iterator<use_info, &use_info::next_any_insn_use>;
+using phi_use_iterator = list_iterator<use_info, &use_info::prev_phi_use>;
+
+// A view of an access_array in which every entry is known to be a use_info.
+using use_array = const_derived_container<use_info *, access_array>;
+
+// An access_info that describes a definition of a resource.  The definition
+// can be a set or a clobber; the difference is that a set provides a known
+// and potentially useful value, while a clobber provides an unknown and
+// unusable value.
+//
+// Every definition is associated with an insn_info.  All definitions of
+// a given resource are stored in a linked list, maintained in reverse
+// postorder.
+class def_info : public access_info
+{
+  // Overall size: 4 LP64 words
+  friend class function_info;
+  friend class clobber_group;
+
+public:
+  // Return the instruction that contains the definition.
+  insn_info *insn () const { return m_insn; }
+
+  // Return the basic block that contains the definition.
+  bb_info *bb () const;
+
+  // Return the extended basic block that contains the access.
+  ebb_info *ebb () const;
+
+  // Return the previous and next definitions of the same resource,
+  // in reverse postorder, or null if no such definition exists.
+  def_info *prev_def () const;
+  def_info *next_def () const;
+
+  // Return true if this is the first definition in the list.
+  bool is_first_def () const;
+
+  // Return true if this is the last definition in the list.
+  bool is_last_def () const;
+
+  // Print the location of the definition to PP.
+  void print_location (pretty_printer *pp) const;
+
+  // Print a unique identifier for this definition to PP.  The identifier has
+  // the form <resource>:<insn uid>.
+  void print_identifier (pretty_printer *pp) const;
+
+protected:
+  def_info (insn_info *insn, resource_info resource, access_kind kind);
+
+private:
+  // In order to preserve memory, the list head only points to the first
+  // definition in the list.  The "prev" entry of the first definition
+  // then points to the last definition.
+  using last_def_or_prev_def = pointer_mux<def_info>;
+
+  // For similar memory-saving reasons, if we want to create a splay tree
+  // of accesses to a resource, we hang the root off the "next" entry of
+  // the last definition in the list.
+  using splay_root_or_next_def = pointer_mux<def_node, def_info>;
+
+  void set_insn (insn_info *insn) { m_insn = insn; }
+
+  def_info *last_def () const;
+  def_node *splay_root () const;
+
+  void record_reference (rtx_obj_reference, bool);
+  void copy_prev_from (def_info *);
+  void copy_next_from (def_info *);
+  void set_last_def (def_info *);
+  void set_prev_def (def_info *);
+  void set_splay_root (def_node *);
+  void set_next_def (def_info *);
+  void clear_def_links ();
+  bool has_def_links ();
+
+  // The location of the definition.
+  insn_info *m_insn;
+
+  // The overloaded "prev" and "next" pointers, as described above.
+  last_def_or_prev_def m_last_def_or_prev_def;
+  splay_root_or_next_def m_splay_root_or_next_def;
+};
+
+// Iterators for lists of definitions.
+using def_iterator = list_iterator<def_info, &def_info::next_def>;
+using reverse_def_iterator = list_iterator<def_info, &def_info::prev_def>;
+
+// A view of an access_array in which every entry is known to be a
+// def_info.
+using def_array = const_derived_container<def_info *, access_array>;
+
+// A def_info that sets the resource to a value that is both
+// unknown and not useful.  This is only ever used for registers,
+// since memory always has some useful contents.
+//
+// Neighboring clobbers are grouped into clobber_groups, so that it's
+// possibly to skip over all neighboring clobbers in a single step.
+class clobber_info : public def_info
+{
+  // Overall size: 8 LP64 words
+  friend class default_splay_tree_accessors<clobber_info *>;
+  friend class default_splay_tree_accessors_with_parent<clobber_info *>;
+  friend class function_info;
+  friend class clobber_group;
+
+public:
+  using splay_tree = default_rootless_splay_tree<clobber_info *>;
+
+  // Return true if the clobber belongs to a clobber_group, false if it
+  // is standalone.
+  bool is_in_group () const { return m_group; }
+
+  // Return the group that the clobber is in, or null if none.
+  //
+  // Complexity: amortized O(1), worst case O(N), where N is the number
+  // of clobbers in the containing clobber_group.
+  clobber_group *group () const;
+
+  // Print a description of the clobber to PP under the control of
+  // PP_ACCESS_* flags FLAGS.
+  void print (pretty_printer *pp,
+	      unsigned int flags = PP_ACCESS_DEFAULT) const;
+
+private:
+  // Once normal call clobbers are taken out of the equation by
+  // insn_call_clobbers_notes, clobber_infos account for roughly 6% of all
+  // def_infos, with the rest being set_infos.  clobber_infos are
+  // therefore much less size-sensitive than set_infos are.
+  //
+  // As noted above, we want to group neighboring clobbers together so that
+  // we can quickly step over them to find the previous or next "real" set.
+  // We also want to be able to split the group in sublinear time,
+  // for example when inserting a set/use pair between two clobbers
+  // in a group.
+  //
+  // So:
+  //
+  // - Clobbers need to have ready access to their group, so that we
+  //   can cheaply skip over the whole group.  This means that they
+  //   need a group pointer.
+  //
+  // - We need to be able to update the group pointer lazily, so that
+  //   the cost of updating it is counted against accesses to the clobbers
+  //   that need updating.
+  //
+  // We also want to be able to insert clobbers into a group in
+  // amortized logarithmic time.
+  //
+  // We therefore use a splay tree to represent the clobbers in a group,
+  // with the nodes storing their parent node.  It is then possible to
+  // perform splay operations without first getting hold of the root.
+  // The root of the splay tree always has a valid, up-to-date group,
+  // so lazy group updates can get the new group from there.
+  //
+  // Roughly 90% of clobbers have a neighboring definition in the same
+  // block, which means that most need to be stored in a splay tree.
+  // We therefore store the splay tree fields directly in the clobber_info
+  // rather than using a separate node object.
+
+  clobber_info (insn_info *, unsigned int);
+
+  void set_group (clobber_group *group) { m_group = group; }
+  void update_group (clobber_group *);
+  clobber_group *recompute_group ();
+
+  // The child and parent nodes in the splay tree.
+  clobber_info *m_children[2];
+  clobber_info *m_parent;
+
+  // The last known value of group (), which might now be out of date.
+  clobber_group *m_group;
+};
+
+using clobber_tree = clobber_info::splay_tree::rooted;
+
+// A def_info that sets the resource to a useful value.  It records
+// all uses of the value in a linked list.  The list is partitioned
+// into three sections:
+//
+// (1) all uses by nondebug instructions, in reverse postorder, followed by
+// (2) all uses by debug instructions, in reverse postorder, followed by
+// (3) all uses by phi nodes, in no particular order.
+//
+// There are two cases:
+//
+// - If we know in advance that there is a single definition of a resource R
+//   and therefore decide not to use phi nodes for R, (1) and (2) contain
+//   all uses of R, regardless of which blocks contain the uses.  (3) is
+//   then empty.
+//
+// - Otherwise, (1) only contains uses in the same extended basic block
+//   as the definition, and it is terminated by a use that marks the end
+//   of the live range for the EBB.  In other words, if the resource dies
+//   in the EBB, the last use by a nondebug instruction marks the point at
+//   which it dies, otherwise there is a fake live-out use at the end of
+//   the EBB.
+//
+// Since debug instructions should not affect codegen, they opportunisticly
+// attach to the same set_info as nondebug instructions where possible.
+// If a nondebug instruction would attach to a degenerate phi and if no
+// such phi exists, debug instructions instead attach to whichever set_info
+// provides the value, regardless of where that set_info is.
+class set_info : public def_info
+{
+  // Overall size: 6 LP64 words.
+  friend class function_info;
+  using use_splay_tree = splay_tree<use_info *>;
+
+public:
+  // Return the first and last uses of the set, or null if the list is empty.
+  // See the comment above for details about the order.
+  use_info *first_use () const { return m_first_use; }
+  use_info *last_use () const;
+
+  // Return the first and last uses of the set by nondebug instructions,
+  // or null if there are no such uses.  The uses are in reverse postorder.
+  use_info *first_nondebug_insn_use () const;
+  use_info *last_nondebug_insn_use () const;
+
+  // Return the first use of the set by any kind of instruction, or null
+  // if there are no such uses.  The uses are in the order described above.
+  use_info *first_any_insn_use () const;
+
+  // Return the last use of the set by phi inputs, or null if there are no
+  // such uses.  The phi input uses are in no particular order.
+  use_info *last_phi_use () const;
+
+  // Return true if at least one nondebug instruction or phi node uses
+  // the set's result.  This is equivalent to testing whether the set is
+  // ever live.
+  bool has_nondebug_uses () const;
+
+  // Return true if anything uses the set's result.  Note that this includes
+  // uses by debug instructions, so it should not be used for optimization
+  // decisions.
+  bool has_any_uses () const { return m_first_use; }
+
+  // Return true if at least one nondebug instruction uses the set's result.
+  bool has_nondebug_insn_uses () const;
+
+  // Return true if at least one phi node uses the set's result.
+  bool has_phi_uses () const;
+
+  // Return true if the set and its uses are contained within a single
+  // extended basic block, with the set coming first.  This implies
+  // that all uses are by instructions rather than phi nodes.
+  bool is_local_to_ebb () const;
+
+  // List all the uses of the set, in the order described above.
+  iterator_range<use_iterator> all_uses () const;
+
+  // Return uses () in reverse order.
+  iterator_range<reverse_use_iterator> reverse_all_uses () const;
+
+  // List the uses of the set by nondebug instructions, in reverse postorder.
+  iterator_range<nondebug_insn_use_iterator> nondebug_insn_uses () const;
+
+  // Return nondebug_insn_uses () in reverse order.
+  iterator_range<reverse_use_iterator> reverse_nondebug_insn_uses () const;
+
+  // List the uses of the set by any kind of instruction.  The list follows
+  // the order described above.
+  iterator_range<any_insn_use_iterator> all_insn_uses () const;
+
+  // List the uses of the set by phi nodes, in no particular order.
+  // There is therefore no reversed equivalent of this list.
+  iterator_range<phi_use_iterator> phi_uses () const;
+
+  // Print a description of the set to PP under the control of
+  // PP_ACCESS_* flags FLAGS.
+  void print (pretty_printer *pp,
+	      unsigned int flags = PP_ACCESS_DEFAULT) const;
+
+protected:
+  set_info (insn_info *, resource_info, access_kind);
+
+  // Print information about uses () to PP, continuing information printed
+  // about the set itself.
+  void print_uses_on_new_lines (pretty_printer *pp) const;
+
+private:
+  // Sets (including phis) account for about 94% of all definitions
+
+  set_info (insn_info *, resource_info);
+
+  void set_first_use (use_info *);
+
+  // The first use in the list.
+  use_info *m_first_use;
+
+  // The root of a splay tree of all uses, built lazily when we first
+  // think it's needed.
+  use_splay_tree m_use_tree;
+};
+
+// A set_info for an on-the-side phi node.  The phi node is attached
+// to an extended basic block EBB and has one input for each incoming edge.
+// The inputs are represented as an array of use_infos, with input I
+// corresponding to EDGE_PRED (EBB->first_bb ()->cfg_bb (), I).
+//
+// Each phi node has a densely-allocated unique identifier, which is intended
+// to be suitable for bitmaps or sbitmaps.
+//
+// All the phi nodes in an extended basic block are chained together
+// into a linked list.  The list has no particular order.
+class phi_info : public set_info
+{
+  // Overall size: 8 LP64 words
+  friend class function_info;
+
+public:
+  // Return the previous and next phi nodes in the extended basic block's list,
+  // or null if none.
+  phi_info *prev_phi () const { return m_prev_phi; }
+  phi_info *next_phi () const { return m_next_phi; }
+
+  // Return the number of phi inputs.  This is 1 for degenerate phis,
+  // otherwise it is equal to the number of incoming edges.
+  unsigned int num_inputs () const { return m_num_inputs; }
+
+  // Return true if the phi node is degenerate, i.e. if it has only a
+  // single input.
+  bool is_degenerate () const { return m_num_inputs == 1; }
+
+  // Return the phi node's unique identifier.
+  unsigned int uid () const { return m_uid; }
+
+  // Return the array of inputs.  For degenerate phi nodes, this array contains
+  // a single element, otherwise it has one input per incoming edge,
+  // with element E corresponding to incoming edge E.
+  use_array inputs () const;
+
+  // Return the use_info that describes the phi input for incoming edge E.
+  use_info *input_use (unsigned int e) const;
+
+  // Return the value of resource () on incoming edge E, or null if the
+  // value is completely undefined for that edge.
+  set_info *input_value (unsigned int e) const;
+
+  // Print a description of the phi node to PP under the control of
+  // PP_ACCESS_* flags FLAGS.
+  void print (pretty_printer *pp,
+	      unsigned int flags = PP_ACCESS_DEFAULT) const;
+
+private:
+  phi_info (insn_info *insn, resource_info resource, unsigned int uid);
+
+  void make_degenerate (use_info *);
+  void set_inputs (use_array inputs);
+  void set_prev_phi (phi_info *prev_phi) { m_prev_phi = prev_phi; }
+  void set_next_phi (phi_info *next_phi) { m_next_phi = next_phi; }
+  void clear_phi_links () { m_prev_phi = m_next_phi = nullptr; }
+  bool has_phi_links () { return m_prev_phi || m_next_phi; }
+
+  // The values returned by the accessors above.
+  unsigned int m_uid;
+  unsigned int m_num_inputs;
+  union
+  {
+    access_info *const *m_inputs;
+    access_info *m_single_input;
+  };
+  phi_info *m_prev_phi;
+  phi_info *m_next_phi;
+};
+
+// An iterator for lists of phi nodes.
+using phi_iterator = list_iterator<phi_info, &phi_info::next_phi>;
+
+// One node in a splay tree of definitions.  This base class represents
+// a single def_info, but it is structured to allow derived classes
+// to add a range.
+class def_node
+{
+  // Size: 3 LP64 words.
+  friend class function_info;
+  friend class default_splay_tree_accessors<def_node *>;
+
+public:
+  // Return the first definition that the node represents.
+  def_info *first_def () const;
+
+  // Return which type of access first_def () is.
+  bool contains_clobber () const { return m_clobber_or_set.is_first (); }
+  bool contains_set () const { return m_clobber_or_set.is_second (); }
+
+protected:
+  // More nodes are clobbers rather than sets, so put clobbers first.
+  // Neither choice can be null.
+  using clobber_or_set = pointer_mux<clobber_info, set_info>;
+
+  // Construct a node that represents FIRST_DEF (and possibly later
+  // definitions too, if called from a derived class).
+  def_node (clobber_or_set first_def);
+
+  // The first definition in the node.
+  clobber_or_set m_clobber_or_set;
+
+private:
+  // The splay tree child nodes.
+  def_node *m_children[2];
+};
+
+// One node in a splay tree of def_infos, representing a single set_info.
+class set_node : public def_node
+{
+  // Overall size: 3 LP64 words.
+  friend class function_info;
+
+public:
+  // Return the set that the node contains.
+  set_info *set () const { return m_clobber_or_set.known_second (); }
+
+  // Print a description of the node to PP.
+  void print (pretty_printer *pp) const;
+
+private:
+  // Construct a node for SET.
+  set_node (set_info *set) : def_node (set) {}
+};
+
+// One node in a splay tree of def_infos.  This class represents
+// a list of contiguous clobber_infos, in execution order.
+class clobber_group : public def_node
+{
+  // Overall size: 5 LP64 words.
+  friend class function_info;
+
+public:
+  // Return the first and last clobbers in the group.  The results are
+  // always nonnull.
+  clobber_info *first_clobber () const;
+  clobber_info *last_clobber () const { return m_last_clobber; }
+
+  // Return true if this group has been replaced by new clobber_groups.
+  bool has_been_superceded () const { return !m_last_clobber; }
+
+  // Return a list of the clobbers in the group, in execution order.
+  iterator_range<def_iterator> clobbers () const;
+
+  // Print a description of the group to PP.
+  void print (pretty_printer *pp) const;
+
+private:
+  clobber_group (clobber_info *clobber);
+
+  // Set the values of first_clobber () and last_clobber ().
+  void set_first_clobber (clobber_info *c) { m_clobber_or_set = c; }
+  void set_last_clobber (clobber_info *c) { m_last_clobber = c; }
+
+  // The value returned by last_clobber ().
+  clobber_info *m_last_clobber;
+
+  // A splay tree that contains all the clobbers in the group.
+  // The root of the splay tree always has an up-to-date group
+  // pointer, but the other clobbers in the tree might not.
+  clobber_tree m_clobber_tree;
+};
+
+// A splay tree in which one node represents a standalone set_info or a
+// range of consecutive clobber_infos.  The nodes follow execution order
+// and maintain the invariant that no two groups of clobber_infos appear
+// next to each other (instead, the groups are merged).
+using def_splay_tree = default_splay_tree<def_node *>;
+
+// This type represents a choice between:
+//
+// (1) a single definition of a resource
+// (2) a node in a def_splay_tree that represents either a single
+//     set or a group of clobbers.
+class def_mux : public pointer_mux<def_info, def_node>
+{
+  using parent = pointer_mux<def_info, def_node>;
+
+  // Provide the same constructors as the pointer_mux.
+  using parent::parent;
+
+public:
+  // Return the first definition associated with this mux.  If the mux holds
+  // a single definition, the result is that definition.  If the mux holds
+  // a clobber_group, the result is the first clobber in the group.
+  def_info *first_def () const;
+
+  // Return the last definition associated with this mux.  If the mux holds
+  // a single definition, the result is that definition.  If the mux holds
+  // a clobber_group, the result is the last clobber in the group.
+  def_info *last_def () const;
+
+  // If the pointer represents a set_info, return that set_info,
+  // otherwise return null.
+  set_info *set () const;
+};
+
+// This class represents the result of looking up the definition of a
+// resource at a particular point, here referred to as point P.
+// There are four states:
+//
+// - MUX is null if there were no definitions to search.
+//
+// - Otherwise, COMPARISON is 0 if we found a definition at P or a
+//   clobber_group that spans P.  MUX then contains this definition
+//   or clobber_group.
+//
+// - Otherwise, COMPARISON is less than 0 if we found the definition
+//   that precedes P or the group of clobbers that precedes P.  MUX then
+//   contains this definition or clobber_group.
+//
+// - Otherwise, COMPARISON is greater than zero and we found the
+//   definition that follows P, or the group of clobbers that follows P.
+//   MUX then contains this definition or clobber_group.
+class def_lookup
+{
+public:
+  // If we found a clobber_group that spans P, return the definition
+  // that precedes the start of the group, or null if none.
+  //
+  // Otherwise, return the last definition that occurs before P,
+  // or null if none.
+  def_info *prev_def () const;
+
+  // If we found a clobber_group that spans P, return the definition
+  // that follows the end of the group, or null if none.
+  //
+  // Otherwise, return the first definition that occurs after P,
+  // or null if none.
+  def_info *next_def () const;
+
+  // If we found a set_info at P, return that set_info, otherwise return null.
+  set_info *matching_set () const;
+
+  // If we found a set_info at P, return that set_info, otherwise return
+  // prev_def ().
+  def_info *matching_or_prev_def () const;
+
+  // If we found a set_info at P, return that set_info, otherwise return
+  // next_def ().
+  def_info *matching_or_next_def () const;
+
+  def_mux mux;
+  int comparison;
+};
+
+void pp_resource (pretty_printer *, resource_info);
+void pp_access (pretty_printer *, const access_info *,
+		unsigned int flags = PP_ACCESS_DEFAULT);
+void pp_accesses (pretty_printer *, access_array,
+		  unsigned int flags = PP_ACCESS_DEFAULT);
+void pp_def_node (pretty_printer *, const def_node *);
+void pp_def_mux (pretty_printer *, def_mux);
+void pp_def_lookup (pretty_printer *, def_lookup);
+
+}
+
+void dump (FILE *, rtl_ssa::resource_info);
+void dump (FILE *, const rtl_ssa::access_info *,
+	   unsigned int flags = rtl_ssa::PP_ACCESS_DEFAULT);
+void dump (FILE *, rtl_ssa::access_array,
+	   unsigned int flags = rtl_ssa::PP_ACCESS_DEFAULT);
+void dump (FILE *, const rtl_ssa::def_node *);
+void dump (FILE *, rtl_ssa::def_mux);
+void dump (FILE *, rtl_ssa::def_lookup);
+
+void DEBUG_FUNCTION debug (const rtl_ssa::resource_info *);
+void DEBUG_FUNCTION debug (const rtl_ssa::access_info *);
+void DEBUG_FUNCTION debug (const rtl_ssa::access_array);
+void DEBUG_FUNCTION debug (const rtl_ssa::def_node *);
+void DEBUG_FUNCTION debug (const rtl_ssa::def_mux &);
+void DEBUG_FUNCTION debug (const rtl_ssa::def_lookup &);
diff --git a/gcc/rtl-ssa/blocks.cc b/gcc/rtl-ssa/blocks.cc
new file mode 100644
index 00000000000..5436305b11c
--- /dev/null
+++ b/gcc/rtl-ssa/blocks.cc
@@ -0,0 +1,1146 @@
+// Implementation of basic-block-related functions for RTL SSA      -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+#define INCLUDE_ALGORITHM
+#define INCLUDE_FUNCTIONAL
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "df.h"
+#include "rtl-ssa.h"
+#include "rtl-ssa/internals.inl"
+#include "cfganal.h"
+#include "cfgrtl.h"
+#include "predict.h"
+
+using namespace rtl_ssa;
+
+// See the comment above the declaration.
+void
+bb_info::print_identifier (pretty_printer *pp) const
+{
+  char tmp[3 * sizeof (index ()) + 3];
+  snprintf (tmp, sizeof (tmp), "bb%d", index ());
+  pp_string (pp, tmp);
+  if (ebb_info *ebb = this->ebb ())
+    {
+      pp_space (pp);
+      pp_left_bracket (pp);
+      ebb->print_identifier (pp);
+      pp_right_bracket (pp);
+    }
+}
+
+// See the comment above the declaration.
+void
+bb_info::print_full (pretty_printer *pp) const
+{
+  pp_string (pp, "basic block ");
+  print_identifier (pp);
+  pp_colon (pp);
+
+  auto print_insn = [pp](const char *header, const insn_info *insn)
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, header);
+      pp_newline_and_indent (pp, 2);
+      if (insn)
+	pp_insn (pp, insn);
+      else
+	pp_string (pp, "<uninitialized>");
+      pp_indentation (pp) -= 4;
+    };
+
+  print_insn ("head:", head_insn ());
+
+  pp_newline (pp);
+  pp_newline_and_indent (pp, 2);
+  pp_string (pp, "contents:");
+  if (!head_insn ())
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "<uninitialized>");
+      pp_indentation (pp) -= 2;
+    }
+  else if (auto insns = real_insns ())
+    {
+      bool is_first = true;
+      for (const insn_info *insn : insns)
+	{
+	  if (is_first)
+	    is_first = false;
+	  else
+	    pp_newline (pp);
+	  pp_newline_and_indent (pp, 2);
+	  pp_insn (pp, insn);
+	  pp_indentation (pp) -= 2;
+	}
+    }
+  else
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "none");
+      pp_indentation (pp) -= 2;
+    }
+  pp_indentation (pp) -= 2;
+
+  pp_newline (pp);
+  print_insn ("end:", end_insn ());
+}
+
+// See the comment above the declaration.
+void
+ebb_call_clobbers_info::print_summary (pretty_printer *pp) const
+{
+  pp_string (pp, "call clobbers for ABI ");
+  if (m_abi)
+    pp_decimal_int (pp, m_abi->id ());
+  else
+    pp_string (pp, "<null>");
+}
+
+// See the comment above the declaration.
+void
+ebb_call_clobbers_info::print_full (pretty_printer *pp) const
+{
+  print_summary (pp);
+  pp_colon (pp);
+  pp_newline_and_indent (pp, 2);
+  auto print_node = [](pretty_printer *pp,
+		       const insn_call_clobbers_note *note)
+    {
+      if (insn_info *insn = note->insn ())
+	insn->print_identifier_and_location (pp);
+      else
+	pp_string (pp, "<null>");
+    };
+  print (pp, root (), print_node);
+  pp_indentation (pp) -= 2;
+}
+
+// See the comment above the declaration.
+void
+ebb_info::print_identifier (pretty_printer *pp) const
+{
+  // first_bb is populated by the constructor and so should always
+  // be nonnull.
+  auto index = first_bb ()->index ();
+  char tmp[3 * sizeof (index) + 4];
+  snprintf (tmp, sizeof (tmp), "ebb%d", index);
+  pp_string (pp, tmp);
+}
+
+// See the comment above the declaration.
+void
+ebb_info::print_full (pretty_printer *pp) const
+{
+  pp_string (pp, "extended basic block ");
+  print_identifier (pp);
+  pp_colon (pp);
+
+  pp_newline_and_indent (pp, 2);
+  if (insn_info *phi_insn = this->phi_insn ())
+    {
+      phi_insn->print_identifier_and_location (pp);
+      pp_colon (pp);
+      if (auto phis = this->phis ())
+	{
+	  bool is_first = true;
+	  for (const phi_info *phi : phis)
+	    {
+	      if (is_first)
+		is_first = false;
+	      else
+		pp_newline (pp);
+	      pp_newline_and_indent (pp, 2);
+	      pp_access (pp, phi, PP_ACCESS_SETTER);
+	      pp_indentation (pp) -= 2;
+	    }
+	}
+      else
+	{
+	  pp_newline_and_indent (pp, 2);
+	  pp_string (pp, "no phi nodes");
+	  pp_indentation (pp) -= 2;
+	}
+    }
+  else
+    pp_string (pp, "no phi insn");
+  pp_indentation (pp) -= 2;
+
+  for (const bb_info *bb : bbs ())
+    {
+      pp_newline (pp);
+      pp_newline_and_indent (pp, 2);
+      pp_bb (pp, bb);
+      pp_indentation (pp) -= 2;
+    }
+
+  for (ebb_call_clobbers_info *ecc : call_clobbers ())
+    {
+      pp_newline (pp);
+      pp_newline_and_indent (pp, 2);
+      pp_ebb_call_clobbers (pp, ecc);
+      pp_indentation (pp) -= 2;
+    }
+}
+
+// Add a dummy use to mark that DEF is live out of BB's EBB at the end of BB.
+void
+function_info::add_live_out_use (bb_info *bb, set_info *def)
+{
+  // There is nothing to do if DEF is an artificial definition at the end
+  // of BB.  In that case the definitino is rooted at the end of the block
+  // and we wouldn't gain anything by inserting a use immediately after it.
+  // If we did want to insert a use, we'd need to associate it with a new
+  // instruction that comes after bb->end_insn ().
+  if (def->insn () == bb->end_insn ())
+    return;
+
+  // If the end of the block already has an artificial use, that use
+  // acts to make DEF live at the appropriate point.
+  unsigned int regno = def->regno ();
+  if (find_access (bb->end_insn ()->uses (), regno))
+    return;
+
+  // Currently there is no need to maintain a backward link from the end
+  // instruction to the list of live-out uses.  Such a list would be
+  // expensive to update if it was represented using the usual insn_info
+  // access arrays.
+  use_info *use = allocate<use_info> (bb->end_insn (), def->resource (), def);
+  use->set_is_live_out_use (true);
+  add_use (use);
+}
+
+// Return true if all nondebug uses of DEF are live-out uses.
+static bool
+all_uses_are_live_out_uses (set_info *def)
+{
+  for (use_info *use : def->all_uses ())
+    if (!use->is_in_debug_insn () && !use->is_live_out_use ())
+      return false;
+  return true;
+}
+
+// SET, if nonnull, is a definition of something that is live out from BB.
+// Return the live-out value itself.
+set_info *
+function_info::live_out_value (bb_info *bb, set_info *set)
+{
+  // Degenerate phis only exist to provide a definition for uses in the
+  // same EBB.  The live-out value is the same as the live-in value.
+  if (auto *phi = safe_dyn_cast<phi_info *> (set))
+    if (phi->is_degenerate ())
+      {
+	set = phi->input_value (0);
+
+	// Remove the phi if it turned out to be useless.  This is
+	// mainly useful for memory, because we don't know ahead of time
+	// whether a block will use memory or not.
+	if (bb == bb->ebb ()->last_bb () && all_uses_are_live_out_uses (phi))
+	  replace_phi (phi, set);
+      }
+
+  return set;
+}
+
+// Add PHI to EBB and enter it into the function's hash table.
+void
+function_info::append_phi (ebb_info *ebb, phi_info *phi)
+{
+  phi_info *first_phi = ebb->first_phi ();
+  if (first_phi)
+    first_phi->set_prev_phi (phi);
+  phi->set_next_phi (first_phi);
+  ebb->set_first_phi (phi);
+  add_def (phi);
+}
+
+// Remove PHI from its current position in the SSA graph.
+void
+function_info::remove_phi (phi_info *phi)
+{
+  phi_info *next = phi->next_phi ();
+  phi_info *prev = phi->prev_phi ();
+
+  if (next)
+    next->set_prev_phi (prev);
+
+  if (prev)
+    prev->set_next_phi (next);
+  else
+    phi->ebb ()->set_first_phi (next);
+
+  remove_def (phi);
+  phi->clear_phi_links ();
+}
+
+// Remove PHI from the SSA graph and free its memory.
+void
+function_info::delete_phi (phi_info *phi)
+{
+  gcc_assert (!phi->has_any_uses ());
+
+  // Remove the inputs to the phi.
+  for (use_info *input : phi->inputs ())
+    remove_use (input);
+
+  remove_phi (phi);
+
+  phi->set_next_phi (m_free_phis);
+  m_free_phis = phi;
+}
+
+// If possible, remove PHI and replace all uses with NEW_VALUE.
+void
+function_info::replace_phi (phi_info *phi, set_info *new_value)
+{
+  auto update_use = [&](use_info *use)
+    {
+      remove_use (use);
+      use->set_def (new_value);
+      add_use (use);
+    };
+
+  if (new_value)
+    for (use_info *use : phi->nondebug_insn_uses ())
+      if (!use->is_live_out_use ())
+	{
+	  // We need to keep the phi around for its local uses.
+	  // Turn it into a degenerate phi, if it isn't already.
+	  use_info *use = phi->input_use (0);
+	  if (use->def () != new_value)
+	    update_use (use);
+
+	  if (phi->is_degenerate ())
+	    return;
+
+	  phi->make_degenerate (use);
+
+	  // Redirect all phi users to NEW_VALUE.
+	  while (use_info *phi_use = phi->last_phi_use ())
+	    update_use (phi_use);
+
+	  return;
+	}
+
+  // Replace the uses.  We can discard uses that only existed for the
+  // sake of marking live-out values, since the resource is now transparent
+  // in the phi's EBB.
+  while (use_info *use = phi->last_use ())
+    if (use->is_live_out_use ())
+      remove_use (use);
+    else
+      update_use (use);
+
+  delete_phi (phi);
+}
+
+// Create and return a phi node for EBB.  RESOURCE is the resource that
+// the phi node sets (and thus that all the inputs set too).  NUM_INPUTS
+// is the number of inputs, which is 1 for a degenerate phi.  INPUTS[I]
+// is a set_info that gives the value of input I, or null if the value
+// is either unknown or uninitialized.  If NUM_INPUTS > 1, this array
+// is allocated on the main obstack and can be reused for the use array.
+//
+// Add the created phi node to its basic block and enter it into the
+// function's hash table.
+phi_info *
+function_info::create_phi (ebb_info *ebb, resource_info resource,
+			   access_info **inputs, unsigned int num_inputs)
+{
+  phi_info *phi = m_free_phis;
+  if (phi)
+    {
+      m_free_phis = phi->next_phi ();
+      *phi = phi_info (ebb->phi_insn (), resource, phi->uid ());
+    }
+  else
+    {
+      phi = allocate<phi_info> (ebb->phi_insn (), resource, m_next_phi_uid);
+      m_next_phi_uid += 1;
+    }
+
+  // Convert the array of set_infos into an array of use_infos.  Also work
+  // out what mode the phi should have.
+  machine_mode new_mode = resource.mode;
+  for (unsigned int i = 0; i < num_inputs; ++i)
+    {
+      auto *input = safe_as_a<set_info *> (inputs[i]);
+      auto *use = allocate<use_info> (phi, resource, input);
+      add_use (use);
+      inputs[i] = use;
+      if (input)
+	new_mode = combine_modes (new_mode, input->mode ());
+    }
+
+  phi->set_inputs (use_array (inputs, num_inputs));
+  phi->set_mode (new_mode);
+
+  append_phi (ebb, phi);
+
+  return phi;
+}
+
+// Create and return a degenerate phi for EBB whose input comes from DEF.
+// This is used in cases where DEF is known to be available on entry to
+// EBB but was not previously used within it.  If DEF is for a register,
+// there are two cases:
+//
+// (1) DEF was already live on entry to EBB but was previously transparent
+//     within it.
+//
+// (2) DEF was not previously live on entry to EBB and is being made live
+//     by this update.
+//
+// At the moment, this function only handles the case in which EBB has a
+// single predecessor block and DEF is defined in that block's EBB.
+phi_info *
+function_info::create_degenerate_phi (ebb_info *ebb, set_info *def)
+{
+  access_info *input = def;
+  phi_info *phi = create_phi (ebb, def->resource (), &input, 1);
+  if (def->is_reg ())
+    {
+      unsigned int regno = def->regno ();
+
+      // Find the single predecessor mentioned above.
+      basic_block pred_cfg_bb = single_pred (ebb->first_bb ()->cfg_bb ());
+      bb_info *pred_bb = this->bb (pred_cfg_bb);
+
+      if (!bitmap_set_bit (DF_LR_IN (ebb->first_bb ()->cfg_bb ()), regno))
+	{
+	  // The register was not previously live on entry to EBB and
+	  // might not have been live on exit from PRED_BB either.
+	  if (bitmap_set_bit (DF_LR_OUT (pred_cfg_bb), regno))
+	    add_live_out_use (pred_bb, def);
+	}
+      else
+	{
+	  // The register was previously live in to EBB.  Add live-out uses
+	  // at the appropriate points.
+	  insn_info *next_insn = nullptr;
+	  if (def_info *next_def = phi->next_def ())
+	    next_insn = next_def->insn ();
+	  for (bb_info *bb : ebb->bbs ())
+	    {
+	      if ((next_insn && *next_insn <= *bb->end_insn ())
+		  || !bitmap_bit_p (DF_LR_OUT (bb->cfg_bb ()), regno))
+		break;
+	      add_live_out_use (bb, def);
+	    }
+	}
+    }
+  return phi;
+}
+
+// Create a bb_info for CFG_BB, given that no such structure currently exists.
+bb_info *
+function_info::create_bb_info (basic_block cfg_bb)
+{
+  bb_info *bb = allocate<bb_info> (cfg_bb);
+  gcc_checking_assert (!m_bbs[cfg_bb->index]);
+  m_bbs[cfg_bb->index] = bb;
+  return bb;
+}
+
+// Add BB to the end of the list of blocks.
+void
+function_info::append_bb (bb_info *bb)
+{
+  if (m_last_bb)
+    m_last_bb->set_next_bb (bb);
+  else
+    m_first_bb = bb;
+  bb->set_prev_bb (m_last_bb);
+  m_last_bb = bb;
+}
+
+// Called while building SSA form using BI, with BI.current_bb being
+// the entry block.
+//
+// Create the entry block instructions and their definitions.  The only
+// useful instruction is the end instruction, which carries definitions
+// for the values that are live on entry to the function.  However, it
+// seems simpler to create a head instruction too, rather than force all
+// users of the block information to treat the entry block as a special case.
+void
+function_info::add_entry_block_defs (build_info &bi)
+{
+  bb_info *bb = bi.current_bb;
+  basic_block cfg_bb = bi.current_bb->cfg_bb ();
+  auto *lr_info = DF_LR_BB_INFO (cfg_bb);
+
+  bb->set_head_insn (append_artificial_insn (bb));
+  insn_info *insn = append_artificial_insn (bb);
+  bb->set_end_insn (insn);
+
+  start_insn_accesses ();
+
+  // Using LR to derive the liveness information means that we create an
+  // entry block definition for upwards exposed registers.  These registers
+  // are sometimes genuinely uninitialized.  However, some targets also
+  // create a pseudo PIC base register and only initialize it later.
+  // Handling that case correctly seems more important than optimizing
+  // uninitialized uses.
+  unsigned int regno;
+  bitmap_iterator in_bi;
+  EXECUTE_IF_SET_IN_BITMAP (&lr_info->out, 0, regno, in_bi)
+    {
+      auto *set = allocate<set_info> (insn, full_register (regno));
+      append_def (set);
+      m_temp_defs.safe_push (set);
+      bi.record_reg_def (regno, set);
+    }
+
+  // Create a definition that reflects the state of memory on entry to
+  // the function.
+  auto *set = allocate<set_info> (insn, memory);
+  append_def (set);
+  m_temp_defs.safe_push (set);
+  bi.record_mem_def (set);
+
+  finish_insn_accesses (insn);
+}
+
+// Called while building SSA form using BI.  Create phi nodes for the
+// current EBB, leaving backedge inputs to be filled in later.  Set
+// bi.last_access to the values that are live on entry to the EBB,
+// regardless of whether or not they are phi nodes.
+void
+function_info::add_phi_nodes (build_info &bi)
+{
+  ebb_info *ebb = bi.current_ebb;
+  basic_block cfg_bb = ebb->first_bb ()->cfg_bb ();
+  auto *lr_info = DF_LR_BB_INFO (cfg_bb);
+
+  // Get a local cache of the predecessor blocks' live out values.
+  unsigned int num_preds = EDGE_COUNT (cfg_bb->preds);
+  auto_vec<const bb_live_out_info *, 16> pred_live_outs (num_preds);
+  bool has_backedge = false;
+  bool has_eh_edge = false;
+  edge e;
+  edge_iterator ei;
+  FOR_EACH_EDGE (e, ei, cfg_bb->preds)
+    {
+      bb_info *pred_bb = this->bb (e->src);
+      const bb_live_out_info *live_out = &bi.bb_live_out[e->src->index];
+
+      // In LR (but not LIVE), the registers live on entry to a block must
+      // normally be a subset of the registers live on exit from any
+      // given predecessor block.  The exceptions are EH edges, which
+      // implicitly clobber all registers in eh_edge_abi.full_reg_clobbers ().
+      // Thus if a register is upwards exposed in an EH handler, it won't
+      // be propagated across the EH edge.
+      //
+      // Excluding that special case, all registers live on entry to
+      // EBB are also live on exit from PRED_BB and were (or will be)
+      // considered when creating LIVE_OUT.
+      gcc_checking_assert ((e->flags & EDGE_EH)
+			   || !bitmap_intersect_compl_p (&lr_info->in,
+							 DF_LR_OUT (e->src)));
+      if (!pred_bb || !pred_bb->head_insn ())
+	{
+	  has_backedge = true;
+	  live_out = nullptr;
+	}
+      has_eh_edge |= (e->flags & EDGE_EH);
+      pred_live_outs.quick_push (live_out);
+    }
+
+  // PRED_REG_INDICES[I] tracks the index into PRED_LIVE_OUTS[I]->reg_values
+  // of the first unused entry.
+  auto_vec<unsigned int, 16> pred_reg_indices (num_preds);
+  pred_reg_indices.quick_grow_cleared (num_preds);
+
+  // Use this array to build up the list of inputs to each phi.
+  m_temp_defs.safe_grow (num_preds);
+
+  // Return true if the current phi is degenerate, i.e. if all its inputs
+  // are the same.
+  auto is_degenerate_phi = [&]()
+    {
+      if (has_backedge)
+	return false;
+
+      for (unsigned int i = 1; i < num_preds; ++i)
+	if (m_temp_defs[i] != m_temp_defs[0])
+	  return false;
+
+      return true;
+    };
+
+  // Finish calculating the live-in value for RESOURCE.  Decide how to
+  // represent the value of RESOURCE on entry to EBB and return its definition.
+  auto finish_phi = [&](resource_info resource) -> set_info *
+    {
+      access_info **inputs;
+      unsigned int num_inputs;
+      if (is_degenerate_phi ())
+	{
+	  auto *input = safe_as_a<set_info *> (m_temp_defs[0]);
+	  if (!input)
+	    // The live-in value is completely uninitialized.
+	    return nullptr;
+
+	  unsigned int regno = input->regno ();
+	  if (input->is_reg () && !bitmap_bit_p (bi.ebb_use, regno))
+	    // The live-in value comes from a single source and there
+	    // are no uses of it within the EBB itself.  We therefore
+	    // don't need a phi node.
+	    return input;
+
+	  // The live-in value comes from a single source and might be
+	  // used by the EBB itself.  Create a degenerate phi for it.
+	  inputs = m_temp_defs.begin ();
+	  num_inputs = 1;
+	}
+      else
+	{
+	  obstack_grow (&m_obstack, m_temp_defs.address (),
+			num_preds * sizeof (access_info *));
+	  inputs = static_cast<access_info **> (obstack_finish (&m_obstack));
+	  num_inputs = num_preds;
+	}
+      return create_phi (ebb, resource, inputs, num_inputs);
+    };
+
+  if (bi.ebb_live_in_for_debug)
+    bitmap_clear (bi.ebb_live_in_for_debug);
+
+  // Get the definition of each live input register, excluding registers
+  // that are known to have a single definition that dominates all uses.
+  unsigned int regno;
+  bitmap_iterator in_bi;
+  EXECUTE_IF_AND_IN_BITMAP (&lr_info->in, m_potential_phi_regs,
+			    0, regno, in_bi)
+    {
+      for (unsigned int pred_i = 0; pred_i < num_preds; ++pred_i)
+	{
+	  set_info *input = nullptr;
+	  if (const bb_live_out_info *pred_live_out = pred_live_outs[pred_i])
+	    {
+	      // Skip over registers that aren't live on entry to this block.
+	      unsigned int reg_i = pred_reg_indices[pred_i];
+	      while (reg_i < pred_live_out->num_reg_values
+		     && pred_live_out->reg_values[reg_i]->regno () < regno)
+		reg_i += 1;
+
+	      // As we asserted above, REGNO is live out from the predecessor
+	      // block, at least by the LR reckoning.  But there are three
+	      // cases:
+	      //
+	      // (1) The live-out value is well-defined (the normal case),
+	      //     with the definition coming either from the block itself
+	      //     or from a predecessor block.  In this case reg_values
+	      //     has a set_info entry for the register.
+	      //
+	      // (2) The live-out value was not modified by the predecessor
+	      //     EBB and did not have a defined value on input to that
+	      //     EBB either.  In this case reg_values has no entry for
+	      //     the register.
+	      //
+	      // (3) The live-out value was modified by the predecessor EBB,
+	      //     but the final modification was a clobber rather than
+	      //     a set.  In this case reg_values again has no entry for
+	      //     the register.
+	      //
+	      // The phi input for (2) and (3) is undefined, which we
+	      // represent as a null set_info.
+	      if (reg_i < pred_live_out->num_reg_values)
+		{
+		  set_info *set = pred_live_out->reg_values[reg_i];
+		  if (set->regno () == regno)
+		    {
+		      input = set;
+		      reg_i += 1;
+		    }
+		}
+
+	      // Fully call-clobbered values do not survive across EH edges.
+	      // In particular, if a call that normally sets a result register
+	      // throws an exception, the set of the result register should
+	      // not be treated as live on entry to the EH handler.
+	      if (has_eh_edge
+		  && HARD_REGISTER_NUM_P (regno)
+		  && eh_edge_abi.clobbers_full_reg_p (regno)
+		  && (EDGE_PRED (cfg_bb, pred_i)->flags & EDGE_EH))
+		input = nullptr;
+
+	      pred_reg_indices[pred_i] = reg_i;
+	    }
+	  m_temp_defs[pred_i] = input;
+	}
+      // Later code works out the correct mode of the phi.  Use BLKmode
+      // as a placeholder for now.
+      bi.record_reg_def (regno, finish_phi ({ E_BLKmode, regno }));
+      if (bi.ebb_live_in_for_debug)
+	bitmap_set_bit (bi.ebb_live_in_for_debug, regno);
+    }
+
+  // Repeat the process above for memory.
+  for (unsigned int pred_i = 0; pred_i < num_preds; ++pred_i)
+    {
+      set_info *input = nullptr;
+      if (const bb_live_out_info *pred_live_out = pred_live_outs[pred_i])
+	input = pred_live_out->mem_value;
+      m_temp_defs[pred_i] = input;
+    }
+  bi.record_mem_def (finish_phi (memory));
+
+  m_temp_defs.truncate (0);
+}
+
+// Called while building SSA form using BI.
+//
+// If FLAGS is DF_REF_AT_TOP, create the head insn for BI.current_bb
+// and populate its uses and definitions.  If FLAGS is 0, do the same
+// for the end insn.
+void
+function_info::add_artificial_accesses (build_info &bi, df_ref_flags flags)
+{
+  bb_info *bb = bi.current_bb;
+  basic_block cfg_bb = bb->cfg_bb ();
+  auto *lr_info = DF_LR_BB_INFO (cfg_bb);
+  df_ref ref;
+
+  insn_info *insn;
+  if (flags == DF_REF_AT_TOP)
+    {
+      if (cfg_bb->index == EXIT_BLOCK)
+	insn = append_artificial_insn (bb);
+      else
+	insn = append_artificial_insn (bb, bb_note (cfg_bb));
+      bb->set_head_insn (insn);
+    }
+  else
+    {
+      insn = append_artificial_insn (bb);
+      bb->set_end_insn (insn);
+    }
+
+  start_insn_accesses ();
+
+  FOR_EACH_ARTIFICIAL_USE (ref, cfg_bb->index)
+    if ((DF_REF_FLAGS (ref) & DF_REF_AT_TOP) == flags)
+      {
+	unsigned int regno = DF_REF_REGNO (ref);
+	machine_mode mode = GET_MODE (DF_REF_REAL_REG (ref));
+	resource_info resource { mode, regno };
+
+	// A definition must be available.
+	gcc_checking_assert (bitmap_bit_p (&lr_info->in, regno)
+			     || (flags != DF_REF_AT_TOP
+				 && bitmap_bit_p (&lr_info->def, regno)));
+	set_info *def = bi.current_reg_value (regno);
+	auto *use = allocate<use_info> (insn, resource, def);
+	add_use (use);
+	m_temp_uses.safe_push (use);
+      }
+
+  // Track the return value of memory by adding an artificial use of
+  // memory at the end of the exit block.
+  if (flags == 0 && cfg_bb->index == EXIT_BLOCK)
+    {
+      auto *use = allocate<use_info> (insn, memory, bi.current_mem_value ());
+      add_use (use);
+      m_temp_uses.safe_push (use);
+    }
+
+  FOR_EACH_ARTIFICIAL_DEF (ref, cfg_bb->index)
+    if ((DF_REF_FLAGS (ref) & DF_REF_AT_TOP) == flags)
+      {
+	unsigned int regno = DF_REF_REGNO (ref);
+	machine_mode mode = GET_MODE (DF_REF_REAL_REG (ref));
+	resource_info resource { mode, regno };
+
+	// If the value isn't used later in the block and isn't live
+	// on exit, we could instead represent the definition as a
+	// clobber_info.  However, that case should be relatively
+	// rare and set_info is any case more compact than clobber_info.
+	set_info *def = allocate<set_info> (insn, resource);
+	append_def (def);
+	m_temp_defs.safe_push (def);
+	bi.record_reg_def (regno, def);
+      }
+
+  // Model the effect of a memory clobber on an incoming edge by adding
+  // a fake definition of memory at the start of the block.  We don't need
+  // to add a use of the phi node because memory is implicitly always live.
+  if (flags == DF_REF_AT_TOP && has_abnormal_call_or_eh_pred_edge_p (cfg_bb))
+    {
+      set_info *def = allocate<set_info> (insn, memory);
+      append_def (def);
+      m_temp_defs.safe_push (def);
+      bi.record_mem_def (def);
+    }
+
+  finish_insn_accesses (insn);
+}
+
+// Called while building SSA form using BI.  Create insn_infos for all
+// relevant instructions in BI.current_bb.
+void
+function_info::add_block_contents (build_info &bi)
+{
+  basic_block cfg_bb = bi.current_bb->cfg_bb ();
+  rtx_insn *insn;
+  FOR_BB_INSNS (cfg_bb, insn)
+    if (INSN_P (insn))
+      add_insn_to_block (bi, insn);
+}
+
+// Called while building SSA form using BI.  Use BI.bb_live_out to record
+// the values that are live out from BI.current_bb.
+void
+function_info::record_block_live_out (build_info &bi)
+{
+  bb_info *bb = bi.current_bb;
+  ebb_info *ebb = bi.current_ebb;
+  basic_block cfg_bb = bb->cfg_bb ();
+  bb_live_out_info *live_out = &bi.bb_live_out[bb->index ()];
+  auto *lr_info = DF_LR_BB_INFO (bb->cfg_bb ());
+
+  // Calculate which subset of m_potential_phi_regs is live out from EBB
+  // at the end of BB.
+  auto_bitmap live_out_from_ebb;
+  edge e;
+  edge_iterator ei;
+  FOR_EACH_EDGE (e, ei, cfg_bb->succs)
+    {
+      bb_info *dest_bb = this->bb (e->dest);
+      if (!dest_bb || dest_bb->ebb () != ebb)
+	bitmap_ior_and_into (live_out_from_ebb, DF_LR_IN (e->dest),
+			     m_potential_phi_regs);
+    }
+
+  // Record the live-out register values.
+  unsigned int regno;
+  bitmap_iterator out_bi;
+  EXECUTE_IF_AND_IN_BITMAP (&lr_info->out, m_potential_phi_regs,
+			    0, regno, out_bi)
+    if (set_info *value = live_out_value (bb, bi.current_reg_value (regno)))
+      {
+	if (value->ebb () == ebb && bitmap_bit_p (live_out_from_ebb, regno))
+	  add_live_out_use (bb, value);
+	obstack_ptr_grow (&m_temp_obstack, value);
+      }
+
+  live_out->num_reg_values = (obstack_object_size (&m_temp_obstack)
+			      / sizeof (set_info *));
+  auto *data = obstack_finish (&m_temp_obstack);
+  live_out->reg_values = static_cast<set_info **> (data);
+
+  live_out->mem_value = live_out_value (bb, bi.current_mem_value ());
+}
+
+// Called while building SSA form using BI.  Check if BI.current_bb has
+// any outgoing backedges.  If so, use the up-to-date contents of
+// BI.bb_live_out to populate the associated inputs of any phi nodes.
+void
+function_info::populate_backedge_phis (build_info &bi)
+{
+  bb_info *bb = bi.current_bb;
+  basic_block cfg_bb = bb->cfg_bb ();
+  const bb_live_out_info *live_out = &bi.bb_live_out[bb->index ()];
+
+  edge e;
+  edge_iterator ei;
+  FOR_EACH_EDGE (e, ei, cfg_bb->succs)
+    {
+      // Check if this edge counts as a backedge in the current traversal.
+      bb_info *succ_bb = this->bb (e->dest);
+      if (!succ_bb || !succ_bb->head_insn ())
+	continue;
+
+      // Although the phis do not keep a defined order long-term, they are
+      // still in reverse regno order at this point.  We can therefore use
+      // a merge operation on the phis and the live-out values.
+      unsigned int input_i = e->dest_idx;
+      int reg_i = live_out->num_reg_values - 1;
+      for (phi_info *phi : succ_bb->ebb ()->phis ())
+	{
+	  set_info *input = nullptr;
+	  if (phi->is_mem ())
+	    input = live_out->mem_value;
+	  else
+	    {
+	      // Skip over any intervening live-out values.
+	      unsigned int regno = phi->regno ();
+	      while (reg_i >= 0)
+		{
+		  set_info *reg_value = live_out->reg_values[reg_i];
+		  if (reg_value->regno () < regno)
+		    break;
+		  reg_i -= 1;
+		  if (reg_value->regno () == regno)
+		    {
+		      input = reg_value;
+		      break;
+		    }
+		}
+	    }
+	  if (input)
+	    {
+	      use_info *use = phi->input_use (input_i);
+	      gcc_assert (!use->def ());
+	      use->set_def (input);
+	      add_use (use);
+	    }
+	}
+    }
+}
+
+// Return true if it would be better to continue an EBB across NEW_EDGE
+// rather than across OLD_EDGE, given that both edges are viable candidates.
+// This is not a total ordering.
+static bool
+better_ebb_edge_p (edge new_edge, edge old_edge)
+{
+  // Prefer the likeliest edge.
+  if (new_edge->probability.initialized_p ()
+      && old_edge->probability.initialized_p ()
+      && !(old_edge->probability == new_edge->probability))
+    return old_edge->probability < new_edge->probability;
+
+  // If both edges are equally likely, prefer a fallthru edge.
+  if (new_edge->flags & EDGE_FALLTHRU)
+    return true;
+  if (old_edge->flags & EDGE_FALLTHRU)
+    return false;
+
+  // Otherwise just stick with OLD_EDGE.
+  return false;
+}
+
+// Pick and return the next basic block in an EBB that currently ends with BB.
+// Return null if the EBB must end with BB.
+static basic_block
+choose_next_block_in_ebb (basic_block bb)
+{
+  // Although there's nothing in principle wrong with having an EBB that
+  // starts with the entry block and includes later blocks, there's not
+  // really much point either.  Keeping the entry block separate means
+  // that uses of arguments consistently occur through phi nodes, rather
+  // than the arguments sometimes appearing to come from an EBB-local
+  // definition instead.
+  if (bb->index == ENTRY_BLOCK)
+    return nullptr;
+
+  bool optimize_for_speed_p = optimize_bb_for_speed_p (bb);
+  edge best_edge = nullptr;
+  edge e;
+  edge_iterator ei;
+  FOR_EACH_EDGE (e, ei, bb->succs)
+    if (!(e->flags & EDGE_COMPLEX)
+	&& e->dest->index != EXIT_BLOCK
+	&& single_pred_p (e->dest)
+	&& optimize_for_speed_p == optimize_bb_for_speed_p (e->dest)
+	&& (!best_edge || better_ebb_edge_p (e, best_edge)))
+      best_edge = e;
+
+  return best_edge ? best_edge->dest : nullptr;
+}
+
+// Partition the function's blocks into EBBs and build SSA form for all
+// EBBs in the function.
+void
+function_info::process_all_blocks ()
+{
+  auto temps = temp_watermark ();
+  unsigned int num_bb_indices = last_basic_block_for_fn (m_fn);
+
+  // Compute the starting reverse postorder.  We tweak this later to try
+  // to get better EBB assignments.
+  auto *postorder = new int[n_basic_blocks_for_fn (m_fn)];
+  unsigned int postorder_num
+    = pre_and_rev_post_order_compute (nullptr, postorder, true);
+  gcc_assert (int (postorder_num) <= n_basic_blocks_for_fn (m_fn));
+
+  // Construct the working state for this function and its subroutines.
+  build_info bi;
+  bi.last_access = XOBNEWVEC (&m_temp_obstack, access_info *, m_num_regs + 1);
+  memset (bi.last_access, 0, (m_num_regs + 1) * sizeof (set_info *));
+
+  // The bb_live_out array shouldn't need to be initialized, since we'll
+  // always write to an entry before reading from it.  But poison the
+  // contents when checking, just to make sure we don't accidentally use
+  // an uninitialized value.
+  bi.bb_live_out = XOBNEWVEC (&m_temp_obstack, bb_live_out_info,
+			      num_bb_indices);
+  if (flag_checking)
+    memset (bi.bb_live_out, 0xaf,
+	    num_bb_indices * sizeof (bb_live_out_info));
+
+  // Only pay the overhead of recording a separate live-in bitmap if
+  // there are debug instructions that might need it.
+  auto_bitmap ebb_live_in;
+  if (MAY_HAVE_DEBUG_INSNS)
+    {
+      bi.ebb_live_in_for_debug = ebb_live_in;
+      // The bitmap is tested using individual bit operations, so optimize
+      // for that case.
+      bitmap_tree_view (ebb_live_in);
+    }
+  else
+    bi.ebb_live_in_for_debug = nullptr;
+
+  // Iterate over the blocks in reverse postorder.  In cases where
+  // multiple possible orders exist, prefer orders that chain blocks
+  // together into EBBs.  If multiple possible EBBs exist, try to pick
+  // the ones that are most likely to be profitable.
+  auto_vec<bb_info *, 16> ebb;
+  auto_bitmap ebb_use_tmp;
+  auto_bitmap ebb_def_tmp;
+  for (unsigned int i = 0; i < postorder_num; ++i)
+    if (!m_bbs[postorder[i]])
+      {
+	// Choose and create the blocks that should form the next EBB,
+	// and calculate the set of registers that the EBB uses and defines
+	// Only do actual bitmap operations if the EBB contains multiple
+	// blocks.
+	basic_block cfg_bb = BASIC_BLOCK_FOR_FN (m_fn, postorder[i]);
+	bi.ebb_use = &DF_LR_BB_INFO (cfg_bb)->use;
+	bi.ebb_def = &DF_LR_BB_INFO (cfg_bb)->def;
+	ebb.safe_push (create_bb_info (cfg_bb));
+	cfg_bb = choose_next_block_in_ebb (cfg_bb);
+	if (cfg_bb)
+	  {
+	    // An EBB with two blocks.
+	    bitmap_ior (ebb_use_tmp, bi.ebb_use, &DF_LR_BB_INFO (cfg_bb)->use);
+	    bitmap_ior (ebb_def_tmp, bi.ebb_def, &DF_LR_BB_INFO (cfg_bb)->def);
+	    bi.ebb_use = ebb_use_tmp;
+	    bi.ebb_def = ebb_def_tmp;
+	    ebb.safe_push (create_bb_info (cfg_bb));
+	    cfg_bb = choose_next_block_in_ebb (cfg_bb);
+	    while (cfg_bb)
+	      {
+		// An EBB with three or more blocks.
+		bitmap_ior_into (bi.ebb_use, &DF_LR_BB_INFO (cfg_bb)->use);
+		bitmap_ior_into (bi.ebb_def, &DF_LR_BB_INFO (cfg_bb)->def);
+		ebb.safe_push (create_bb_info (cfg_bb));
+		cfg_bb = choose_next_block_in_ebb (cfg_bb);
+	      }
+	  }
+
+	// Create the EBB itself.
+	bi.current_ebb = allocate<ebb_info> (ebb[0], ebb.last ());
+	for (bb_info *bb : ebb)
+	  {
+	    bb->set_ebb (bi.current_ebb);
+	    append_bb (bb);
+	  }
+
+	// Populate the contents of the EBB.
+	bi.current_ebb->set_phi_insn (append_artificial_insn (ebb[0]));
+	if (ebb[0]->index () == ENTRY_BLOCK)
+	  {
+	    gcc_assert (ebb.length () == 1);
+	    bi.current_bb = ebb[0];
+	    add_entry_block_defs (bi);
+	    record_block_live_out (bi);
+	  }
+	else if (EDGE_COUNT (ebb[0]->cfg_bb ()->preds) == 0)
+	  // Leave unreachable blocks empty, since there is no useful
+	  // liveness information for them, and anything they do will
+	  // be wasted work.  In a cleaned-up cfg, the only unreachable
+	  // block we should see is the exit block of a noreturn function.
+	  for (bb_info *bb : ebb)
+	    {
+	      bb->set_head_insn (append_artificial_insn (bb));
+	      bb->set_end_insn (append_artificial_insn (bb));
+	    }
+	else
+	  {
+	    add_phi_nodes (bi);
+	    for (bb_info *bb : ebb)
+	      {
+		bi.current_bb = bb;
+		add_artificial_accesses (bi, DF_REF_AT_TOP);
+		if (bb->index () != EXIT_BLOCK)
+		  add_block_contents (bi);
+		add_artificial_accesses (bi, df_ref_flags ());
+		record_block_live_out (bi);
+		populate_backedge_phis (bi);
+	      }
+	  }
+	ebb.truncate (0);
+      }
+
+  delete[] postorder;
+}
+
+// Print a description of CALL_CLOBBERS to PP.
+void
+rtl_ssa::pp_ebb_call_clobbers (pretty_printer *pp,
+			       const ebb_call_clobbers_info *call_clobbers)
+{
+  if (!call_clobbers)
+    pp_string (pp, "<null>");
+  else
+    call_clobbers->print_full (pp);
+}
+
+// Print a description of BB to PP.
+void
+rtl_ssa::pp_bb (pretty_printer *pp, const bb_info *bb)
+{
+  if (!bb)
+    pp_string (pp, "<null>");
+  else
+    bb->print_full (pp);
+}
+
+// Print a description of EBB to PP
+void
+rtl_ssa::pp_ebb (pretty_printer *pp, const ebb_info *ebb)
+{
+  if (!ebb)
+    pp_string (pp, "<null>");
+  else
+    ebb->print_full (pp);
+}
+
+// Print a description of CALL_CLOBBERS to FILE.
+void
+dump (FILE *file, const ebb_call_clobbers_info *call_clobbers)
+{
+  dump_using (file, pp_ebb_call_clobbers, call_clobbers);
+}
+
+// Print a description of BB to FILE.
+void
+dump (FILE *file, const bb_info *bb)
+{
+  dump_using (file, pp_bb, bb);
+}
+
+// Print a description of EBB to FILE.
+void
+dump (FILE *file, const ebb_info *ebb)
+{
+  dump_using (file, pp_ebb, ebb);
+}
+
+// Debug interfaces to the dump routines above.
+void debug (const ebb_call_clobbers_info *x) { dump (stderr, x); }
+void debug (const bb_info *x) { dump (stderr, x); }
+void debug (const ebb_info *x) { dump (stderr, x); }
diff --git a/gcc/rtl-ssa/blocks.h b/gcc/rtl-ssa/blocks.h
new file mode 100644
index 00000000000..f173e6ff8da
--- /dev/null
+++ b/gcc/rtl-ssa/blocks.h
@@ -0,0 +1,301 @@
+// Basic-block-related classes for RTL SSA                          -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// SSA-related information about a basic block.  Each block contains
+// the following, which are conceptually executed in order:
+//
+// - an artificial "head" insn_info that holds artificial uses and definitions
+//   for the start of the block.
+//
+// - one insn_info for each "real" instruction in the block
+//   (i.e. those that have an RTL pattern).
+//
+// - an artificial "end" insn_info that holds artificial uses and definitions
+//   for the end of the block.
+//
+// Blocks are grouped together into extended basic blocks.  In cases where
+// multiple EBBs exist (such as in a full diamond), we try to pick the one
+// that's most frequently executed.
+//
+// Blocks are chained together in reverse postorder.  (Rather than use a
+// list, we could instead have stored the index of the block in the overall
+// postorder.  However, using lists should make it cheaper to update the
+// information after trivial CFG manipulations.)
+class bb_info
+{
+  // Size: 6 LP64 words.
+  friend class function_info;
+
+public:
+  // Return the previous basic block in reverse postorder, or null if this
+  // is the entry block.
+  bb_info *prev_bb () const { return m_prev_bb; }
+
+  // Return the next basic block in reverse postorder, or null if this
+  // is the exit block.
+  bb_info *next_bb () const { return m_next_bb; }
+
+  // Return true if this block is the function's entry block.
+  bool is_entry_block () const { return !m_prev_bb; }
+
+  // Return true if this block is the function's exit block.
+  bool is_exit_block () const { return !m_next_bb; }
+
+  // Return the underlying basic_block structure.
+  basic_block cfg_bb () const { return m_cfg_bb; }
+
+  // Return the unique identifier of the underlying basic_block.  These uids
+  // do not follow any particular order.
+  unsigned int index () const { return m_cfg_bb->index; }
+
+  // Return the EBB that contains this block.
+  ebb_info *ebb () const { return m_ebb; }
+
+  // Return a list of all the instructions in the block, in execution order.
+  // The list includes the head and end instructions described above.
+  //
+  // Iterations over the list will pick up any new instructions that are
+  // inserted after the iterator's current instruction.
+  iterator_range<any_insn_iterator> all_insns () const;
+
+  // Like all_insns (), except that the instructions are in reverse order.
+  //
+  // Iterations over the list will pick up any new instructions that are
+  // inserted before the iterator's current instruction.
+  iterator_range<reverse_any_insn_iterator> reverse_all_insns () const;
+
+  // Like all_insns (), but without the debug instructions.
+  iterator_range<nondebug_insn_iterator> nondebug_insns () const;
+
+  // Like reverse_all_insns (), but without the debug instructions.
+  iterator_range<reverse_nondebug_insn_iterator>
+    reverse_nondebug_insns () const;
+
+  // Like all_insns (), but without the artificial instructions.
+  iterator_range<any_insn_iterator> real_insns () const;
+
+  // Like reverse_all_insns (), but without the artificial instructions.
+  iterator_range<reverse_any_insn_iterator> reverse_real_insns () const;
+
+  // Like real_insns (), but without the debug instructions.
+  iterator_range<nondebug_insn_iterator> real_nondebug_insns () const;
+
+  // Like reverse_real_insns (), but without the debug instructions.
+  iterator_range<reverse_nondebug_insn_iterator>
+    reverse_real_nondebug_insns () const;
+
+  // Return the instruction that holds the artificial uses and
+  // definitions at the head of the block.  The associated RTL insn
+  // is the block head note.
+  //
+  // This instruction always exists, even if it has no uses and definitions.
+  insn_info *head_insn () const { return m_head_insn; }
+
+  // Return the instruction that holds the artificial uses and definitions
+  // at the end of the block.  There is no associated RTL insn.
+  //
+  // This instruction always exists, even if it has no uses and definitions.
+  insn_info *end_insn () const { return m_end_insn; }
+
+  // Print "bb" + index () to PP.
+  void print_identifier (pretty_printer *pp) const;
+
+  // Print a full description of the block to PP.
+  void print_full (pretty_printer *) const;
+
+private:
+  bb_info (basic_block);
+
+  void set_prev_bb (bb_info *bb) { m_prev_bb = bb; }
+  void set_next_bb (bb_info *bb) { m_next_bb = bb; }
+  void set_cfg_bb (basic_block cfg_bb) { m_cfg_bb = cfg_bb; }
+  void set_ebb (ebb_info *ebb) { m_ebb = ebb; }
+  void set_head_insn (insn_info *insn) { m_head_insn = insn; }
+  void set_end_insn (insn_info *insn) { m_end_insn = insn; }
+
+  // The values returned by the functions above.
+  bb_info *m_prev_bb;
+  bb_info *m_next_bb;
+  basic_block m_cfg_bb;
+  ebb_info *m_ebb;
+  insn_info *m_head_insn;
+  insn_info *m_end_insn;
+};
+
+// Iterators for lists of basic blocks.
+using bb_iterator = list_iterator<bb_info, &bb_info::next_bb>;
+using reverse_bb_iterator = list_iterator<bb_info, &bb_info::prev_bb>;
+
+// This class collects together instructions for which has_call_clobbers ()
+// is true, storing them in a splay tree that follows reverse postorder.
+// Instances of the class form a singly-linked list, with one instance
+// per predefined_function_abi.
+class ebb_call_clobbers_info : public insn_call_clobbers_tree
+{
+  // Size 3 LP64 words.
+  friend class function_info;
+
+public:
+  // Return the next group in the list.
+  ebb_call_clobbers_info *next () const { return m_next; }
+
+  // Return the function abi used by all the calls in the group.
+  const predefined_function_abi *abi () const { return m_abi; }
+
+  // Return true if at least one call in the group should conservatively
+  // be assumed to clobber RESOURCE.
+  bool clobbers (resource_info) const;
+
+  // Print a summary of what the class describes to PP, without printing
+  // the actual instructions.
+  void print_summary (pretty_printer *pp) const;
+
+  // Print a full description of the object to PP, including the
+  // instructions it contains.
+  void print_full (pretty_printer *) const;
+
+private:
+  ebb_call_clobbers_info (const predefined_function_abi *);
+
+  // The values returned by the accessors above.
+  ebb_call_clobbers_info *m_next;
+  const predefined_function_abi *m_abi;
+};
+
+// A list of ebb_call_clobbers_infos.
+using ebb_call_clobbers_iterator
+  = list_iterator<ebb_call_clobbers_info, &ebb_call_clobbers_info::next>;
+
+// Information about an extended basic block.
+//
+// Each EBB has a list of phi nodes and starts with an artificial phi
+// instruction that conceptually "executes" the phi nodes.  The phi
+// nodes are independent of one another and so can be executed in any
+// order.  The order of the phi nodes in the list is not significant.
+//
+// Each EBB also maintains a list of ebb_call_clobbers_info structures
+// that describe all instructions for which has_call_clobbers () is true.
+// See the comment above that class for details.
+class ebb_info
+{
+  // Size: 5 LP64 words.
+  friend class function_info;
+
+public:
+  // Return the previous EBB in reverse postorder, or null if this EBB
+  // contains the entry block.
+  ebb_info *prev_ebb () const;
+
+  // Return the next EBB in reverse postorder, or null if this EBB contains
+  // the exit block.
+  ebb_info *next_ebb () const;
+
+  // Return the instruction that holds the EBB's phi nodes (and does
+  // nothing else).  There is no associated RTL insn.
+  //
+  // This instruction always exists, even if the EBB does not currently
+  // need any phi nodes.
+  insn_info *phi_insn () const { return m_phi_insn; }
+
+  // Return the first and last blocks in the EBB.
+  bb_info *first_bb () const { return m_first_bb; }
+  bb_info *last_bb () const { return m_last_bb; }
+
+  // Return the first of the EBB's phi nodes.
+  phi_info *first_phi () const { return m_first_phi; }
+
+  // Return the head of the list of ebb_call_clobbers_infos.
+  ebb_call_clobbers_info *first_call_clobbers () const;
+
+  // Return the list of ebb_call_clobbers_infos.
+  iterator_range<ebb_call_clobbers_iterator> call_clobbers () const;
+
+  // Return a list of the EBB's phi nodes, in arbitrary order.
+  iterator_range<phi_iterator> phis () const;
+
+  // Return a list of the blocks in the EBB, in execution order.
+  iterator_range<bb_iterator> bbs () const;
+
+  // Return a list of the blocks in the EBB, in reverse execution order.
+  iterator_range<reverse_bb_iterator> reverse_bbs () const;
+
+  // Return a list of all the instructions in the EBB, in execution order.
+  // The list includes phi_insn (), the head and end of each block,
+  // and the real instructions in each block.
+  //
+  // Iterations over the list will pick up any new instructions that are
+  // inserted after the iterator's current instruction.
+  iterator_range<any_insn_iterator> all_insns () const;
+
+  // Like all_insns (), except that the instructions are in reverse order.
+  //
+  // Iterations over the list will pick up any new instructions that are
+  // inserted before the iterator's current instruction.
+  iterator_range<reverse_any_insn_iterator> reverse_all_insns () const;
+
+  // Like all_insns (), but without the debug instructions.
+  iterator_range<nondebug_insn_iterator> nondebug_insns () const;
+
+  // Like reverse_all_insns (), but without the debug instructions.
+  iterator_range<reverse_nondebug_insn_iterator>
+    reverse_nondebug_insns () const;
+
+  // Return an insn_range that covers the same instructions as all_insns ().
+  insn_range_info insn_range () const;
+
+  // Print "ebb" + first_bb ()->index () to PP.
+  void print_identifier (pretty_printer *pp) const;
+
+  // Print a full description of the EBB to PP.
+  void print_full (pretty_printer *pp) const;
+
+private:
+  ebb_info (bb_info *, bb_info *);
+
+  void set_first_phi (phi_info *phi) { m_first_phi = phi; }
+  void set_phi_insn (insn_info *insn) { m_phi_insn = insn; }
+  void set_first_call_clobbers (ebb_call_clobbers_info *);
+
+  // The values returned by the functions above.
+  phi_info *m_first_phi;
+  insn_info *m_phi_insn;
+  bb_info *m_first_bb;
+  bb_info *m_last_bb;
+  ebb_call_clobbers_info *m_first_call_clobbers;
+};
+
+// Iterators for lists of extended basic blocks.
+using ebb_iterator = list_iterator<ebb_info, &ebb_info::next_ebb>;
+using reverse_ebb_iterator = list_iterator<ebb_info, &ebb_info::prev_ebb>;
+
+void pp_bb (pretty_printer *, const bb_info *);
+void pp_ebb_call_clobbers (pretty_printer *, const ebb_call_clobbers_info *);
+void pp_ebb (pretty_printer *, const ebb_info *);
+
+}
+
+void dump (FILE *, const rtl_ssa::bb_info *);
+void dump (FILE *, const rtl_ssa::ebb_call_clobbers_info *);
+void dump (FILE *, const rtl_ssa::ebb_info *);
+
+void DEBUG_FUNCTION debug (const rtl_ssa::bb_info *);
+void DEBUG_FUNCTION debug (const rtl_ssa::ebb_call_clobbers_info *);
+void DEBUG_FUNCTION debug (const rtl_ssa::ebb_info *);
diff --git a/gcc/rtl-ssa/change-utils.h b/gcc/rtl-ssa/change-utils.h
new file mode 100644
index 00000000000..824533076e9
--- /dev/null
+++ b/gcc/rtl-ssa/change-utils.h
@@ -0,0 +1,137 @@
+// RTL SSA utility functions for changing instructions              -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// Return true if INSN is one of the instructions being changed by CHANGES.
+inline bool
+insn_is_changing (array_slice<insn_change *const> changes,
+		  const insn_info *insn)
+{
+  for (const insn_change *change : changes)
+    if (change->insn () == insn)
+      return true;
+  return false;
+}
+
+// Return a closure of insn_is_changing, for use as a predicate.
+// This could be done using local lambdas instead, but the predicate is
+// used often enough that having a class should be more convenient and allow
+// reuse of template instantiations.
+//
+// We don't use std::bind because it would involve an indirect function call,
+// whereas this function is used in relatively performance-critical code.
+inline insn_is_changing_closure
+insn_is_changing (array_slice<insn_change *const> changes)
+{
+  return insn_is_changing_closure (changes);
+}
+
+// Restrict CHANGE.move_range so that the changed instruction can perform
+// all its definitions and uses.  Assume that if:
+//
+// - CHANGE contains an access A1 of resource R;
+// - an instruction I2 contains another access A2 to R; and
+// - IGNORE (I2) is true
+//
+// then either:
+//
+// - A2 will be removed; or
+// - something will ensure that A1 and A2 maintain their current order,
+//   without this having to be enforced by CHANGE's move range.
+//
+// IGNORE should return true for CHANGE.insn ().
+//
+// Return true on success, otherwise leave CHANGE.move_range in an invalid
+// state.
+//
+// This function only works correctly for instructions that remain within
+// the same extended basic block.
+template<typename IgnorePredicate>
+bool
+restrict_movement_ignoring (insn_change &change, IgnorePredicate ignore)
+{
+  // Uses generally lead to failure quicker, so test those first.
+  return (restrict_movement_for_uses_ignoring (change.move_range,
+					       change.new_uses, ignore)
+	  && restrict_movement_for_defs_ignoring (change.move_range,
+						  change.new_defs, ignore)
+	  && canonicalize_move_range (change.move_range, change.insn ()));
+}
+
+// Like restrict_movement_ignoring, but ignore only the instruction
+// that is being changed.
+inline bool
+restrict_movement (insn_change &change)
+{
+  return restrict_movement_ignoring (change, insn_is (change.insn ()));
+}
+
+using add_regno_clobber_fn = std::function<bool (insn_change &,
+						 unsigned int)>;
+bool recog_internal (insn_change &, add_regno_clobber_fn);
+
+// Try to recognize the new instruction pattern for CHANGE, potentially
+// tweaking the pattern or adding extra clobbers in order to make it match.
+//
+// When adding an extra clobber for register R, restrict CHANGE.move_range
+// to a range of instructions for which R is not live.  When determining
+// whether R is live, ignore accesses made by an instruction I if
+// IGNORE (I) is true.  The caller then assumes the responsibility
+// of ensuring that CHANGE and I are placed in a valid order.
+//
+// IGNORE should return true for CHANGE.insn ().
+//
+// Return true on success.  Leave CHANGE unmodified on failure.
+template<typename IgnorePredicate>
+inline bool
+recog_ignoring (obstack_watermark &watermark, insn_change &change,
+		IgnorePredicate ignore)
+{
+  auto add_regno_clobber = [&](insn_change &change, unsigned int regno)
+    {
+      return crtl->ssa->add_regno_clobber (watermark, change, regno, ignore);
+    };
+  return recog_internal (change, add_regno_clobber);
+}
+
+// As for recog_ignoring, but ignore only the instruction that is being
+// changed.
+inline bool
+recog (obstack_watermark &watermark, insn_change &change)
+{
+  return recog_ignoring (watermark, change, insn_is (change.insn ()));
+}
+
+// Check whether insn costs indicate that the net effect of the changes
+// in CHANGES is worthwhile.  Require a strict improvement if STRICT_P,
+// otherwise allow the new instructions to be the same cost as the old
+// instructions.
+bool changes_are_worthwhile (array_slice<insn_change *const> changes,
+			     bool strict_p = false);
+
+// Like changes_are_worthwhile, but for a single change.
+inline bool
+change_is_worthwhile (insn_change &change, bool strict_p = false)
+{
+  insn_change *changes[] = { &change };
+  return changes_are_worthwhile (changes, strict_p);
+}
+
+}
diff --git a/gcc/rtl-ssa/changes.cc b/gcc/rtl-ssa/changes.cc
new file mode 100644
index 00000000000..1885a80e2e9
--- /dev/null
+++ b/gcc/rtl-ssa/changes.cc
@@ -0,0 +1,1025 @@
+// RTL SSA routines for changing instructions                       -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+#define INCLUDE_ALGORITHM
+#define INCLUDE_FUNCTIONAL
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "df.h"
+#include "rtl-ssa.h"
+#include "rtl-ssa/internals.inl"
+#include "target.h"
+#include "predict.h"
+#include "memmodel.h" // Needed by emit-rtl.h
+#include "emit-rtl.h"
+#include "cfghooks.h"
+#include "cfgrtl.h"
+
+using namespace rtl_ssa;
+
+// See the comment above the declaration.
+void
+insn_change::print (pretty_printer *pp) const
+{
+  if (m_is_deletion)
+    {
+      pp_string (pp, "deletion of ");
+      pp_insn (pp, m_insn);
+    }
+  else
+    {
+      pp_string (pp, "change to ");
+      pp_insn (pp, m_insn);
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "~~~~~~~");
+
+      pp_newline_and_indent (pp, 0);
+      pp_string (pp, "new cost: ");
+      pp_decimal_int (pp, new_cost);
+
+      pp_newline_and_indent (pp, 0);
+      pp_string (pp, "new uses:");
+      pp_newline_and_indent (pp, 2);
+      pp_accesses (pp, new_uses);
+      pp_indentation (pp) -= 2;
+
+      pp_newline_and_indent (pp, 0);
+      pp_string (pp, "new defs:");
+      pp_newline_and_indent (pp, 2);
+      pp_accesses (pp, new_defs);
+      pp_indentation (pp) -= 2;
+
+      pp_newline_and_indent (pp, 0);
+      pp_string (pp, "first insert-after candidate: ");
+      move_range.first->print_identifier_and_location (pp);
+
+      pp_newline_and_indent (pp, 0);
+      pp_string (pp, "last insert-after candidate: ");
+      move_range.last->print_identifier_and_location (pp);
+    }
+}
+
+// Return a copy of access_array ACCESSES, allocating it on the
+// temporary obstack.
+access_array
+function_info::temp_access_array (access_array accesses)
+{
+  if (accesses.empty ())
+    return accesses;
+
+  gcc_assert (obstack_object_size (&m_temp_obstack) == 0);
+  obstack_grow (&m_temp_obstack, accesses.begin (), accesses.size_bytes ());
+  return { static_cast<access_info **> (obstack_finish (&m_temp_obstack)),
+	   accesses.size () };
+}
+
+// See the comment above the declaration.
+bool
+function_info::verify_insn_changes (array_slice<insn_change *const> changes)
+{
+  HARD_REG_SET defined_hard_regs, clobbered_hard_regs;
+  CLEAR_HARD_REG_SET (defined_hard_regs);
+  CLEAR_HARD_REG_SET (clobbered_hard_regs);
+
+  insn_info *min_insn = m_first_insn;
+  for (insn_change *change : changes)
+    if (!change->is_deletion ())
+      {
+	// Make sure that the changes can be kept in their current order
+	// while honoring all of the move ranges.
+	min_insn = later_insn (min_insn, change->move_range.first);
+	while (min_insn != change->insn () && !can_insert_after (min_insn))
+	  min_insn = min_insn->next_nondebug_insn ();
+	if (*min_insn > *change->move_range.last)
+	  {
+	    if (dump_file && (dump_flags & TDF_DETAILS))
+	      fprintf (dump_file, "no viable insn position assignment\n");
+	    return false;
+	  }
+
+	// If recog introduced new clobbers of a register as part of
+	// the matching process, make sure that they don't conflict
+	// with any other new definitions or uses of the register.
+	// (We have already checked that they don't conflict with
+	// unchanging definitions and uses.)
+	for (use_info *use : change->new_uses)
+	  {
+	    unsigned int regno = use->regno ();
+	    if (HARD_REGISTER_NUM_P (regno)
+		&& TEST_HARD_REG_BIT (clobbered_hard_regs, regno))
+	      {
+		if (dump_file && (dump_flags & TDF_DETAILS))
+		  fprintf (dump_file, "register %d would be clobbered"
+			   " while it is still live\n", regno);
+		return false;
+	      }
+	  }
+	for (def_info *def : change->new_defs)
+	  {
+	    unsigned int regno = def->regno ();
+	    if (HARD_REGISTER_NUM_P (regno))
+	      {
+		if (def->m_is_temp)
+		  {
+		    // This is a clobber introduced by recog.
+		    gcc_checking_assert (is_a<clobber_info *> (def));
+		    if (TEST_HARD_REG_BIT (defined_hard_regs, regno))
+		      {
+			if (dump_file && (dump_flags & TDF_DETAILS))
+			  fprintf (dump_file, "conflicting definitions of"
+				   " register %d\n", regno);
+			return false;
+		      }
+		    SET_HARD_REG_BIT (clobbered_hard_regs, regno);
+		  }
+		else if (is_a<set_info *> (def))
+		  {
+		    // REGNO now has a defined value.
+		    SET_HARD_REG_BIT (defined_hard_regs, regno);
+		    CLEAR_HARD_REG_BIT (clobbered_hard_regs, regno);
+		  }
+	      }
+	  }
+      }
+  return true;
+}
+
+// See the comment above the declaration.
+bool
+rtl_ssa::changes_are_worthwhile (array_slice<insn_change *const> changes,
+				 bool strict_p)
+{
+  unsigned int old_cost = 0;
+  unsigned int new_cost = 0;
+  for (insn_change *change : changes)
+    {
+      old_cost += change->old_cost ();
+      if (!change->is_deletion ())
+	{
+	  basic_block cfg_bb = change->bb ()->cfg_bb ();
+	  change->new_cost = insn_cost (change->rtl (),
+					optimize_bb_for_speed_p (cfg_bb));
+	  new_cost += change->new_cost;
+	}
+    }
+  bool ok_p = (strict_p ? new_cost < old_cost : new_cost <= old_cost);
+  if (dump_file && (dump_flags & TDF_DETAILS))
+    {
+      fprintf (dump_file, "original cost");
+      char sep = '=';
+      for (const insn_change *change : changes)
+	{
+	  fprintf (dump_file, " %c %d", sep, change->old_cost ());
+	  sep = '+';
+	}
+      fprintf (dump_file, ", replacement cost");
+      sep = '=';
+      for (const insn_change *change : changes)
+	if (!change->is_deletion ())
+	  {
+	    fprintf (dump_file, " %c %d", sep, change->new_cost);
+	    sep = '+';
+	  }
+      fprintf (dump_file, "; %s\n",
+	       ok_p ? "keeping replacement" : "rejecting replacement");
+    }
+  if (!ok_p)
+    return false;
+
+  return true;
+}
+
+// Update the REG_NOTES of INSN, whose pattern has just been changed.
+static void
+update_notes (rtx_insn *insn)
+{
+  for (rtx *note_ptr = &REG_NOTES (insn); *note_ptr; )
+    {
+      rtx note = *note_ptr;
+      bool keep_p = true;
+      switch (REG_NOTE_KIND (note))
+	{
+	case REG_EQUAL:
+	case REG_EQUIV:
+	case REG_NOALIAS:
+	  keep_p = (single_set (insn) != nullptr);
+	  break;
+
+	case REG_UNUSED:
+	case REG_DEAD:
+	  // These notes are stale.  We'll recompute REG_UNUSED notes
+	  // after the update.
+	  keep_p = false;
+	  break;
+
+	default:
+	  break;
+	}
+      if (keep_p)
+	note_ptr = &XEXP (*note_ptr, 1);
+      else
+	{
+	  *note_ptr = XEXP (*note_ptr, 1);
+	  free_EXPR_LIST_node (note);
+	}
+    }
+}
+
+// Pick a location for CHANGE's instruction and return the instruction
+// after which it should be placed.
+static insn_info *
+choose_insn_placement (insn_change &change)
+{
+  gcc_checking_assert (change.move_range);
+
+  insn_info *insn = change.insn ();
+  insn_info *first = change.move_range.first;
+  insn_info *last = change.move_range.last;
+
+  // Quick(ish) exit if there is only one possible choice.
+  if (first == last)
+    return first;
+  if (first == insn->prev_nondebug_insn () && last == insn)
+    return insn;
+
+  // For now just use the closest valid choice to the original instruction.
+  // If the register usage has changed significantly, it might instead be
+  // better to try to take register pressure into account.
+  insn_info *closest = change.move_range.clamp_insn_to_range (insn);
+  while (closest != insn && !can_insert_after (closest))
+    closest = closest->next_nondebug_insn ();
+  return closest;
+}
+
+// Record any changes related to CHANGE that need to be queued for later.
+void
+function_info::possibly_queue_changes (insn_change &change)
+{
+  insn_info *insn = change.insn ();
+  rtx_insn *rtl = insn->rtl ();
+
+  // If the instruction could previously throw, we eventually need to call
+  // purge_dead_edges to check whether things have changed.
+  if (find_reg_note (rtl, REG_EH_REGION, nullptr))
+    bitmap_set_bit (m_need_to_purge_dead_edges, insn->bb ()->index ());
+
+  auto needs_pending_update = [&]()
+    {
+      // If an instruction became a no-op without the pass explicitly
+      // deleting it, queue the deletion for later.  Removing the
+      // instruction on the fly would require an update to all instructions
+      // that use the result of the move, which would be a potential source
+      // of quadraticness.  Also, definitions shouldn't disappear under
+      // the pass's feet.
+      if (INSN_CODE (rtl) == NOOP_MOVE_INSN_CODE)
+	return true;
+
+      // If any jumps got turned into unconditional jumps or nops, we need
+      // to update the CFG accordingly.
+      if (JUMP_P (rtl)
+	  && (returnjump_p (rtl) || any_uncondjump_p (rtl))
+	  && !single_succ_p (insn->bb ()->cfg_bb ()))
+	return true;
+
+      // If a previously conditional trap now always fires, execution
+      // terminates at that point.
+      rtx pattern = PATTERN (rtl);
+      if (GET_CODE (pattern) == TRAP_IF
+	  && XEXP (pattern, 0) == const1_rtx)
+	return true;
+
+      return false;
+    };
+
+  if (needs_pending_update ()
+      && bitmap_set_bit (m_queued_insn_update_uids, insn->uid ()))
+    {
+      gcc_assert (!change.is_deletion ());
+      m_queued_insn_updates.safe_push (insn);
+    }
+}
+
+// Remove the instruction described by CHANGE from the underlying RTL
+// and from the insn_info list.
+static void
+delete_insn (insn_change &change)
+{
+  insn_info *insn = change.insn ();
+  rtx_insn *rtl = change.rtl ();
+  if (dump_file && (dump_flags & TDF_DETAILS))
+    fprintf (dump_file, "deleting insn %d\n", insn->uid ());
+  set_insn_deleted (rtl);
+}
+
+// Move the RTL instruction associated with CHANGE so that it comes
+// immediately after AFTER.
+static void
+move_insn (insn_change &change, insn_info *after)
+{
+  rtx_insn *rtl = change.rtl ();
+  rtx_insn *after_rtl = after->rtl ();
+  if (dump_file && (dump_flags & TDF_DETAILS))
+    fprintf (dump_file, "moving insn %d after insn %d\n",
+	     INSN_UID (rtl), INSN_UID (after_rtl));
+
+  // At the moment we don't support moving instructions between EBBs,
+  // but this would be worth adding if it's useful.
+  insn_info *insn = change.insn ();
+  gcc_assert (after->ebb () == insn->ebb ());
+  bb_info *bb = after->bb ();
+  basic_block cfg_bb = bb->cfg_bb ();
+
+  if (insn->bb () != bb)
+    // Force DF to mark the old block as dirty.
+    df_insn_delete (rtl);
+  ::remove_insn (rtl);
+  ::add_insn_after (rtl, after_rtl, cfg_bb);
+}
+
+// The instruction associated with CHANGE is being changed in-place.
+// Update the DF information for its new pattern.
+static void
+update_insn_in_place (insn_change &change)
+{
+  insn_info *insn = change.insn ();
+  if (dump_file && (dump_flags & TDF_DETAILS))
+    fprintf (dump_file, "updating insn %d in-place\n", insn->uid ());
+  df_insn_rescan (change.rtl ());
+}
+
+// Finalize the new list of definitions and uses in CHANGE, removing
+// any uses and definitions that are no longer needed, and converting
+// pending clobbers into actual definitions.
+void
+function_info::finalize_new_accesses (insn_change &change)
+{
+  insn_info *insn = change.insn ();
+
+  // Get a list of all the things that the instruction now references.
+  vec_rtx_properties properties;
+  properties.add_insn (insn->rtl (), true);
+
+  // Build up the new list of definitions.
+  for (rtx_obj_reference ref : properties.refs ())
+    if (ref.is_write ())
+      {
+	def_info *def = find_access (change.new_defs, ref.regno);
+	gcc_assert (def);
+	if (def->m_is_temp)
+	  {
+	    // At present, the only temporary instruction definitions we
+	    // create are clobbers, such as those added during recog.
+	    gcc_assert (is_a<clobber_info *> (def));
+	    def = allocate<clobber_info> (change.insn (), ref.regno);
+	  }
+	else if (!def->m_has_been_superceded)
+	  {
+	    // This is a second or subsequent definition.
+	    // See function_info::record_def for a discussion of when
+	    // this can happen.
+	    def->record_reference (ref, false);
+	    continue;
+	  }
+	else
+	  {
+	    def->m_has_been_superceded = false;
+
+	    // Clobbers can move around, so remove them from their current
+	    // position and them back in their final position.
+	    //
+	    // At the moment, we don't allow sets to move relative to other
+	    // definitions of the same resource, so we can leave those where
+	    // they are.  It might be useful to relax this in future.
+	    // The main complication is that removing a set would potentially
+	    // fuse two adjoining clobber_groups, and adding the set back
+	    // would require the group to be split again.
+	    if (is_a<clobber_info *> (def))
+	      remove_def (def);
+	    else if (ref.is_reg ())
+	      def->set_mode (ref.mode);
+	    def->set_insn (insn);
+	  }
+	def->record_reference (ref, true);
+	m_temp_defs.safe_push (def);
+      }
+
+  // Also keep any explicitly-recorded call clobbers, which are deliberately
+  // excluded from the vec_rtx_properties.
+  for (def_info *def : change.new_defs)
+    if (def->m_has_been_superceded && def->is_call_clobber ())
+      {
+	def->m_has_been_superceded = false;
+	def->set_insn (insn);
+	m_temp_defs.safe_push (def);
+      }
+
+  // Install the new list of definitions in CHANGE.
+  sort_accesses (m_temp_defs);
+  access_array accesses = temp_access_array (m_temp_defs);
+  change.new_defs = def_array (accesses);
+  m_temp_defs.truncate (0);
+
+  // Create temporary copies of use_infos that are already attached to
+  // other insns, which could happen if the uses come from unchanging
+  // insns or if they have been used by earlier changes.  Doing this
+  // makes it easier to detect multiple reads below.
+  auto *unshared_uses_base = XOBNEWVEC (&m_temp_obstack, access_info *,
+					change.new_uses.size ());
+  unsigned int i = 0;
+  for (use_info *use : change.new_uses)
+    {
+      if (!use->m_has_been_superceded)
+	{
+	  use = allocate_temp<use_info> (insn, use->resource (), use->def ());
+	  use->m_has_been_superceded = true;
+	  use->m_is_temp = true;
+	}
+      unshared_uses_base[i++] = use;
+    }
+  auto unshared_uses = use_array (unshared_uses_base, change.new_uses.size ());
+
+  // Add (possibly temporary) uses to m_temp_uses for each resource.
+  // If there are multiple references to the same resource, aggregate
+  // information in the modes and flags.
+  for (rtx_obj_reference ref : properties.refs ())
+    if (ref.is_read ())
+      {
+	unsigned int regno = ref.regno;
+	machine_mode mode = ref.is_reg () ? ref.mode : BLKmode;
+	use_info *use = find_access (unshared_uses, ref.regno);
+	gcc_assert (use);
+	if (use->m_has_been_superceded)
+	  {
+	    // This is the first reference to the resource.
+	    bool is_temp = use->m_is_temp;
+	    *use = use_info (insn, resource_info { mode, regno }, use->def ());
+	    use->m_is_temp = is_temp;
+	    use->record_reference (ref, true);
+	    m_temp_uses.safe_push (use);
+	  }
+	else
+	  {
+	    // Record the mode of the largest use.  The choice is arbitrary if
+	    // the instruction (unusually) references the same register in two
+	    // different but equal-sized modes.
+	    if (HARD_REGISTER_NUM_P (regno)
+		&& partial_subreg_p (use->mode (), mode))
+	      use->set_mode (mode);
+	    use->record_reference (ref, false);
+	  }
+      }
+
+  // Replace any temporary uses and definitions with real ones.
+  for (unsigned int i = 0; i < m_temp_uses.length (); ++i)
+    {
+      auto *use = as_a<use_info *> (m_temp_uses[i]);
+      if (use->m_is_temp)
+	{
+	  m_temp_uses[i] = use = allocate<use_info> (*use);
+	  use->m_is_temp = false;
+	  set_info *def = use->def ();
+	  // Handle cases in which the value was previously not used
+	  // within the block.
+	  if (def && def->m_is_temp)
+	    {
+	      phi_info *phi = as_a<phi_info *> (def);
+	      gcc_assert (phi->is_degenerate ());
+	      phi = create_degenerate_phi (phi->ebb (), phi->input_value (0));
+	      use->set_def (phi);
+	    }
+	}
+    }
+
+  // Install the new list of definitions in CHANGE.
+  sort_accesses (m_temp_uses);
+  change.new_uses = use_array (temp_access_array (m_temp_uses));
+  m_temp_uses.truncate (0);
+
+  // Record the new instruction-wide properties.
+  insn->set_properties (properties);
+}
+
+// Copy information from CHANGE to its underlying insn_info, given that
+// the insn_info has already been placed appropriately.
+void
+function_info::apply_changes_to_insn (insn_change &change)
+{
+  insn_info *insn = change.insn ();
+  if (change.is_deletion ())
+    {
+      insn->set_accesses (nullptr, 0, 0);
+      return;
+    }
+
+  // Copy the cost.
+  insn->set_cost (change.new_cost);
+
+  // Add all clobbers.  Sets never moved relative to other definitions,
+  // so are OK as-is.
+  for (def_info *def : change.new_defs)
+    if (is_a<clobber_info *> (def))
+      add_def (def);
+
+  // Add all uses, now that their position is final.
+  for (use_info *use : change.new_uses)
+    add_use (use);
+
+  // Copy the uses and definitions.
+  unsigned int num_defs = change.new_defs.size ();
+  unsigned int num_uses = change.new_uses.size ();
+  if (num_defs + num_uses <= insn->num_defs () + insn->num_uses ())
+    insn->copy_accesses (change.new_defs, change.new_uses);
+  else
+    {
+      access_array_builder builder (&m_obstack);
+      builder.reserve (num_defs + num_uses);
+
+      for (def_info *def : change.new_defs)
+	builder.quick_push (def);
+      for (use_info *use : change.new_uses)
+	builder.quick_push (use);
+
+      insn->set_accesses (builder.finish ().begin (), num_defs, num_uses);
+    }
+
+  add_reg_unused_notes (insn);
+}
+
+// Add a temporary placeholder instruction after AFTER.
+insn_info *
+function_info::add_placeholder_after (insn_info *after)
+{
+  insn_info *insn = allocate_temp<insn_info> (after->bb (), nullptr, -1);
+  add_insn_after (insn, after);
+  return insn;
+}
+
+// See the comment above the declaration.
+void
+function_info::change_insns (array_slice<insn_change *> changes)
+{
+  auto watermark = temp_watermark ();
+
+  insn_info *min_insn = m_first_insn;
+  for (insn_change *change : changes)
+    {
+      // Tentatively mark all the old uses and definitions for deletion.
+      for (use_info *use : change->old_uses ())
+	{
+	  use->m_has_been_superceded = true;
+	  remove_use (use);
+	}
+      for (def_info *def : change->old_defs ())
+	def->m_has_been_superceded = true;
+
+      if (!change->is_deletion ())
+	{
+	  // Remove any notes that are no longer relevant.
+	  update_notes (change->rtl ());
+
+	  // Make sure that the placement of this instruction would still
+	  // leave room for previous instructions.
+	  change->move_range = move_later_than (change->move_range, min_insn);
+	  if (!canonicalize_move_range (change->move_range, change->insn ()))
+	    // verify_insn_changes is supposed to make sure that this holds.
+	    gcc_unreachable ();
+	  min_insn = later_insn (min_insn, change->move_range.first);
+	}
+    }
+
+  // Walk backwards through the changes, allocating specific positions
+  // to each one.  Update the underlying RTL and its associated DF
+  // information.
+  insn_info *following_insn = nullptr;
+  auto_vec<insn_info *, 16> placeholders;
+  placeholders.safe_grow_cleared (changes.size ());
+  for (unsigned int i = changes.size (); i-- > 0;)
+    {
+      insn_change &change = *changes[i];
+      insn_info *placeholder = nullptr;
+      possibly_queue_changes (change);
+      if (change.is_deletion ())
+	delete_insn (change);
+      else
+	{
+	  // Make sure that this instruction comes before later ones.
+	  if (following_insn)
+	    {
+	      change.move_range = move_earlier_than (change.move_range,
+						     following_insn);
+	      if (!canonicalize_move_range (change.move_range,
+					    change.insn ()))
+		// verify_insn_changes is supposed to make sure that this
+		// holds.
+		gcc_unreachable ();
+	    }
+
+	  // Decide which instruction INSN should go after.
+	  insn_info *after = choose_insn_placement (change);
+
+	  // If INSN is moving, insert a placeholder insn_info at the
+	  // new location.  We can't move INSN itself yet because it
+	  // might still be referenced by earlier move ranges.
+	  insn_info *insn = change.insn ();
+	  if (after == insn || after == insn->prev_nondebug_insn ())
+	    {
+	      update_insn_in_place (change);
+	      following_insn = insn;
+	    }
+	  else
+	    {
+	      move_insn (change, after);
+	      placeholder = add_placeholder_after (after);
+	      following_insn = placeholder;
+	    }
+
+	  // Finalize the new list of accesses for the change.  Don't install
+	  // them yet, so that we still have access to the old lists below.
+	  finalize_new_accesses (change);
+	}
+      placeholders[i] = placeholder;
+    }
+
+  // Remove all definitions that are no longer needed.  After the above,
+  // such definitions should no longer have any registered users.
+  //
+  // In particular, this means that consumers must handle debug
+  // instructions before removing a set.
+  for (insn_change *change : changes)
+    for (def_info *def : change->old_defs ())
+      if (def->m_has_been_superceded)
+	{
+	  auto *set = dyn_cast<set_info *> (def);
+	  gcc_assert (!set || !set->has_any_uses ());
+	  remove_def (def);
+	}
+
+  // Move the insn_infos to their new locations.
+  for (unsigned int i = 0; i < changes.size (); ++i)
+    {
+      insn_change &change = *changes[i];
+      insn_info *insn = change.insn ();
+      if (change.is_deletion ())
+	remove_insn (insn);
+      else if (insn_info *placeholder = placeholders[i])
+	{
+	  // Check if earlier movements turned a move into a no-op.
+	  if (placeholder->prev_nondebug_insn () == insn
+	      || placeholder->next_nondebug_insn () == insn)
+	    {
+	      remove_insn (placeholder);
+	      placeholders[i] = nullptr;
+	    }
+	  else
+	    {
+	      // Remove the placeholder first so that we have a wider range of
+	      // program points when inserting INSN.
+	      insn_info *after = placeholder->prev_any_insn ();
+	      remove_insn (insn);
+	      remove_insn (placeholder);
+	      insn->set_bb (after->bb ());
+	      add_insn_after (insn, after);
+	    }
+	}
+    }
+
+  // Finally apply the changes to the underlying insn_infos.
+  for (insn_change *change : changes)
+    apply_changes_to_insn (*change);
+}
+
+// See the comment above the declaration.
+void
+function_info::change_insn (insn_change &change)
+{
+  insn_change *changes[] = { &change };
+  return change_insns (changes);
+}
+
+// Try to adjust CHANGE so that its pattern can include clobber rtx CLOBBER.
+// Return true on success.
+//
+// ADD_REGNO_CLOBBER is a specialization of function_info::add_regno_clobber
+// for a specific caller-provided predicate.
+static bool
+add_clobber (insn_change &change, add_regno_clobber_fn add_regno_clobber,
+	     rtx clobber)
+{
+  rtx pat = PATTERN (change.rtl ());
+  gcc_assert (GET_CODE (clobber) == CLOBBER);
+  rtx dest = XEXP (clobber, 0);
+  if (GET_CODE (dest) == SCRATCH)
+    {
+      if (reload_completed)
+	{
+	  if (dump_file && (dump_flags & TDF_DETAILS))
+	    {
+	      // ??? Maybe we could try to do some RA here?
+	      fprintf (dump_file, "instruction requires a scratch"
+		       " after reload:\n");
+	      print_rtl_single (dump_file, pat);
+	    }
+	  return false;
+	}
+      return true;
+    }
+
+  gcc_assert (REG_P (dest));
+  for (unsigned int regno = REGNO (dest); regno != END_REGNO (dest); ++regno)
+    if (!add_regno_clobber (change, regno))
+      {
+	if (dump_file && (dump_flags & TDF_DETAILS))
+	  {
+	    fprintf (dump_file, "cannot clobber live register %d in:\n",
+		     regno);
+	    print_rtl_single (dump_file, pat);
+	  }
+	return false;
+      }
+  return true;
+}
+
+// Try to recognize the new form of the insn associated with CHANGE,
+// adding any clobbers that are necessary to make the instruction match
+// an .md pattern.  Return true on success.
+//
+// ADD_REGNO_CLOBBER is a specialization of function_info::add_regno_clobber
+// for a specific caller-provided predicate.
+static bool
+recog_level2 (insn_change &change, add_regno_clobber_fn add_regno_clobber)
+{
+  insn_change_watermark insn_watermark;
+  rtx_insn *rtl = change.rtl ();
+  rtx pat = PATTERN (rtl);
+  int num_clobbers = 0;
+  int icode = -1;
+  bool asm_p = asm_noperands (pat) >= 0;
+  if (asm_p)
+    {
+      if (!check_asm_operands (pat))
+	{
+	  if (dump_file && (dump_flags & TDF_DETAILS))
+	    {
+	      fprintf (dump_file, "failed to match this asm instruction:\n");
+	      print_rtl_single (dump_file, pat);
+	    }
+	  return false;
+	}
+    }
+  else if (noop_move_p (rtl))
+    {
+      INSN_CODE (rtl) = NOOP_MOVE_INSN_CODE;
+      if (dump_file && (dump_flags & TDF_DETAILS))
+	{
+	  fprintf (dump_file, "instruction becomes a no-op:\n");
+	  print_rtl_single (dump_file, pat);
+	}
+      insn_watermark.keep ();
+      return true;
+    }
+  else
+    {
+      icode = ::recog (pat, rtl, &num_clobbers);
+      if (icode < 0)
+	{
+	  if (dump_file && (dump_flags & TDF_DETAILS))
+	    {
+	      fprintf (dump_file, "failed to match this instruction:\n");
+	      print_rtl_single (dump_file, pat);
+	    }
+	  return false;
+	}
+    }
+
+  auto prev_new_defs = change.new_defs;
+  auto prev_move_range = change.move_range;
+  if (num_clobbers > 0)
+    {
+      // ??? It would be good to have a way of recycling the rtxes on failure,
+      // but any attempt to cache old PARALLELs would at best be a half
+      // measure, since add_clobbers would still generate fresh clobbers
+      // each time.  It would be better to have a more general recycling
+      // mechanism that all rtx passes can use.
+      rtvec newvec;
+      int oldlen;
+      if (GET_CODE (pat) == PARALLEL)
+	{
+	  oldlen = XVECLEN (pat, 0);
+	  newvec = rtvec_alloc (num_clobbers + oldlen);
+	  for (int i = 0; i < oldlen; ++i)
+	    RTVEC_ELT (newvec, i) = XVECEXP (pat, 0, i);
+	}
+      else
+	{
+	  oldlen = 1;
+	  newvec = rtvec_alloc (num_clobbers + oldlen);
+	  RTVEC_ELT (newvec, 0) = pat;
+	}
+      rtx newpat = gen_rtx_PARALLEL (VOIDmode, newvec);
+      add_clobbers (newpat, icode);
+      validate_change (rtl, &PATTERN (rtl), newpat, true);
+      for (int i = 0; i < num_clobbers; ++i)
+	if (!add_clobber (change, add_regno_clobber,
+			  XVECEXP (newpat, 0, oldlen + i)))
+	  {
+	    change.new_defs = prev_new_defs;
+	    change.move_range = prev_move_range;
+	    return false;
+	  }
+
+      pat = newpat;
+    }
+
+  INSN_CODE (rtl) = icode;
+  if (reload_completed)
+    {
+      extract_insn (rtl);
+      if (!constrain_operands (1, get_preferred_alternatives (rtl)))
+	{
+	  if (dump_file && (dump_flags & TDF_DETAILS))
+	    {
+	      if (asm_p)
+		fprintf (dump_file, "asm does not match its constraints:\n");
+	      else if (const char *name = get_insn_name (icode))
+		fprintf (dump_file, "instruction does not match the"
+			 " constraints for %s:\n", name);
+	      else
+		fprintf (dump_file, "instruction does not match its"
+			 " constraints:\n");
+	      print_rtl_single (dump_file, pat);
+	    }
+	  change.new_defs = prev_new_defs;
+	  change.move_range = prev_move_range;
+	  return false;
+	}
+    }
+
+  if (dump_file && (dump_flags & TDF_DETAILS))
+    {
+      const char *name;
+      if (!asm_p && (name = get_insn_name (icode)))
+	fprintf (dump_file, "successfully matched this instruction "
+		 "to %s:\n", name);
+      else
+	fprintf (dump_file, "successfully matched this instruction:\n");
+      print_rtl_single (dump_file, pat);
+    }
+
+  insn_watermark.keep ();
+  return true;
+}
+
+// Try to recognize the new form of the insn associated with CHANGE,
+// adding and removing clobbers as necessary to make the instruction
+// match an .md pattern.  Return true on success, otherwise leave
+// CHANGE as it was on entry.
+//
+// ADD_REGNO_CLOBBER is a specialization of function_info::add_regno_clobber
+// for a specific caller-provided predicate.
+bool
+rtl_ssa::recog_internal (insn_change &change,
+			 add_regno_clobber_fn add_regno_clobber)
+{
+  // Accept all changes to debug instructions.
+  insn_info *insn = change.insn ();
+  if (insn->is_debug_insn ())
+    return true;
+
+  rtx_insn *rtl = insn->rtl ();
+  rtx pat = PATTERN (rtl);
+  if (GET_CODE (pat) == PARALLEL && asm_noperands (pat) < 0)
+    {
+      // Try to remove trailing (clobber (scratch)) rtxes, since the new form
+      // of the instruction might not need those scratches.  recog will add
+      // back any that are needed.
+      int len = XVECLEN (pat, 0);
+      int new_len = len;
+      while (new_len > 0
+	     && GET_CODE (XVECEXP (pat, 0, new_len - 1)) == CLOBBER
+	     && GET_CODE (XEXP (XVECEXP (pat, 0, new_len - 1), 0)) == SCRATCH)
+	new_len -= 1;
+
+      int old_num_changes = num_validated_changes ();
+      validate_change_xveclen (rtl, &PATTERN (rtl), new_len, true);
+      if (recog_level2 (change, add_regno_clobber))
+	return true;
+      cancel_changes (old_num_changes);
+
+      // Try to remove all trailing clobbers.  For example, a pattern that
+      // used to clobber the flags might no longer need to do so.
+      int prev_len = new_len;
+      while (new_len > 0
+	     && GET_CODE (XVECEXP (pat, 0, new_len - 1)) == CLOBBER)
+	new_len -= 1;
+      if (new_len != prev_len)
+	{
+	  validate_change_xveclen (rtl, &PATTERN (rtl), new_len, true);
+	  if (recog_level2 (change, add_regno_clobber))
+	    return true;
+	  cancel_changes (old_num_changes);
+	}
+      return false;
+    }
+
+  return recog_level2 (change, add_regno_clobber);
+}
+
+// See the comment above the declaration.
+bool
+function_info::perform_pending_updates ()
+{
+  bool changed_cfg = false;
+  bool changed_jumps = false;
+  for (insn_info *insn : m_queued_insn_updates)
+    {
+      rtx_insn *rtl = insn->rtl ();
+      if (JUMP_P (rtl))
+	{
+	  if (INSN_CODE (rtl) == NOOP_MOVE_INSN_CODE)
+	    {
+	      ::delete_insn (rtl);
+	      bitmap_set_bit (m_need_to_purge_dead_edges,
+			      insn->bb ()->index ());
+	    }
+	  else if (returnjump_p (rtl) || any_uncondjump_p (rtl))
+	    {
+	      mark_jump_label (PATTERN (rtl), rtl, 0);
+	      update_cfg_for_uncondjump (rtl);
+	      changed_cfg = true;
+	      changed_jumps = true;
+	    }
+	}
+      else if (INSN_CODE (rtl) == NOOP_MOVE_INSN_CODE)
+	::delete_insn (rtl);
+      else
+	{
+	  rtx pattern = PATTERN (rtl);
+	  if (GET_CODE (pattern) == TRAP_IF
+	      && XEXP (pattern, 0) == const1_rtx)
+	    {
+	      remove_edge (split_block (BLOCK_FOR_INSN (rtl), rtl));
+	      emit_barrier_after_bb (BLOCK_FOR_INSN (rtl));
+	      changed_cfg = true;
+	    }
+	}
+    }
+
+  unsigned int index;
+  bitmap_iterator bi;
+  EXECUTE_IF_SET_IN_BITMAP (m_need_to_purge_dead_edges, 0, index, bi)
+    if (purge_dead_edges (BASIC_BLOCK_FOR_FN (m_fn, index)))
+      changed_cfg = true;
+
+  if (changed_jumps)
+    // This uses its own timevar internally, so we don't need to push
+    // one ourselves.
+    rebuild_jump_labels (get_insns ());
+
+  bitmap_clear (m_need_to_purge_dead_edges);
+  bitmap_clear (m_queued_insn_update_uids);
+  m_queued_insn_updates.truncate (0);
+
+  if (changed_cfg)
+    {
+      free_dominance_info (CDI_DOMINATORS);
+      free_dominance_info (CDI_POST_DOMINATORS);
+    }
+
+  return changed_cfg;
+}
+
+// Print a description of CHANGE to PP.
+void
+rtl_ssa::pp_insn_change (pretty_printer *pp, const insn_change &change)
+{
+  change.print (pp);
+}
+
+// Print a description of CHANGE to FILE.
+void
+dump (FILE *file, const insn_change &change)
+{
+  dump_using (file, pp_insn_change, change);
+}
+
+// Debug interface to the dump routine above.
+void debug (const insn_change &x) { dump (stderr, x); }
diff --git a/gcc/rtl-ssa/changes.h b/gcc/rtl-ssa/changes.h
new file mode 100644
index 00000000000..308c5edc409
--- /dev/null
+++ b/gcc/rtl-ssa/changes.h
@@ -0,0 +1,118 @@
+// RTL SSA classes related to changing instructions                 -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// A class that describes a change that we're considering making to an
+// instruction.  There are three choices:
+//
+// (1) delete the instruction
+// (2) replace the instruction with a new instruction in-place
+// (3) replace the instruction with a new instruction at a different location
+//
+// Anything related to the "new instruction" is irrelevant for (1).
+//
+// The class doesn't actually change anything itself, it simply records
+// something that we might do.
+class insn_change
+{
+public:
+  enum delete_action { DELETE };
+
+  // Construct a possible change to INSN.
+  insn_change (insn_info *insn);
+
+  // Construct a possible deletion of INSN.
+  insn_change (insn_info *insn, delete_action);
+
+  // The instruction that we would change.
+  insn_info *insn () const { return m_insn; }
+
+  // The rtx_insn of the instruction that we would change.
+  rtx_insn *rtl () const { return m_insn->rtl (); }
+
+  // The basic block that contains insn ().
+  bb_info *bb () const { return m_insn->bb (); }
+
+  // The extended basic block that contains insn ().
+  ebb_info *ebb () const { return m_insn->ebb (); }
+
+  // The uid of the instruction that we would change.
+  unsigned int insn_uid () const { return m_insn->uid (); }
+
+  // The list of things that the original instruction defined and used.
+  def_array old_defs () const { return m_insn->defs (); }
+  use_array old_uses () const { return m_insn->uses (); }
+
+  // The cost of the original instruction, as calculated by the target.
+  unsigned int old_cost () const { return m_insn->cost (); }
+
+  // Return true if the original instruction would simply be deleted,
+  // rather than being replaced by a new instruction.
+  bool is_deletion () const { return m_is_deletion; }
+
+  // Print a description of the change to PP.
+  void print (pretty_printer *pp) const;
+
+  // Return an insn_change for deleting INSN.
+  static insn_change delete_insn (insn_info *insn) { return { insn, DELETE }; }
+
+private:
+  // The value returned by insn ().
+  insn_info *m_insn;
+
+public:
+  // The list of things that the new instruction would define and use.
+  def_array new_defs;
+  use_array new_uses;
+
+  // The range of instructions after which the instruction could be placed.
+  // The range can include INSN itself: placing the instruction after either
+  // INSN or INSN->prev_nondebug_insn () is equivalent to not moving the
+  // instruction.
+  insn_range_info move_range;
+
+  // The cost that the new instruction would have, as calculated by the target.
+  unsigned int new_cost;
+
+private:
+  // The value returned by is_deletion ().
+  bool m_is_deletion;
+};
+
+// A class that represents a closure of the two-argument form of
+// insn_is_changing.  See the comment above the one-argument form
+// for details.
+class insn_is_changing_closure
+{
+public:
+  insn_is_changing_closure (array_slice<insn_change *const> changes);
+  bool operator() (const insn_info *) const;
+
+private:
+  array_slice<insn_change *const> m_changes;
+};
+
+void pp_insn_change (pretty_printer *, const insn_change &);
+
+}
+
+void dump (FILE *, const rtl_ssa::insn_change &);
+
+void DEBUG_FUNCTION debug (const rtl_ssa::insn_change &);
diff --git a/gcc/rtl-ssa/functions.cc b/gcc/rtl-ssa/functions.cc
new file mode 100644
index 00000000000..50595ac8ed6
--- /dev/null
+++ b/gcc/rtl-ssa/functions.cc
@@ -0,0 +1,325 @@
+// Implementation of function-related RTL SSA functions             -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+#define INCLUDE_ALGORITHM
+#define INCLUDE_FUNCTIONAL
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "df.h"
+#include "rtl-ssa.h"
+#include "rtl-ssa/internals.inl"
+
+using namespace rtl_ssa;
+
+function_info::function_info (function *fn)
+  : m_fn (fn)
+{
+  // Force the alignment to be obstack_alignment.  Everything else is normal.
+  obstack_specify_allocation (&m_obstack, OBSTACK_CHUNK_SIZE,
+			      obstack_alignment, obstack_chunk_alloc,
+			      obstack_chunk_free);
+  obstack_specify_allocation (&m_temp_obstack, OBSTACK_CHUNK_SIZE,
+			      obstack_alignment, obstack_chunk_alloc,
+			      obstack_chunk_free);
+
+  // Record the start of the obstacks.
+  m_obstack_start = XOBNEWVAR (&m_obstack, char, 0);
+  m_temp_obstack_start = XOBNEWVAR (&m_temp_obstack, char, 0);
+
+  init_function_data ();
+  process_all_blocks ();
+  simplify_phis ();
+}
+
+function_info::~function_info ()
+{
+  // Anything using the temporary obstack should free it afterwards,
+  // preferably via temp_watermark ().
+  gcc_assert (XOBNEWVAR (&m_temp_obstack, char, 0) == m_temp_obstack_start);
+
+  obstack_free (&m_temp_obstack, nullptr);
+  obstack_free (&m_obstack, nullptr);
+}
+
+// See the comment above the declaration.
+void
+function_info::print (pretty_printer *pp) const
+{
+  pp_string (pp, "Function: ");
+  pp_string (pp, function_name (m_fn));
+  for (ebb_info *ebb : ebbs ())
+    {
+      pp_newline (pp);
+      pp_newline_and_indent (pp, 0);
+      pp_ebb (pp, ebb);
+    }
+}
+
+// Calculate m_potential_phi_regs.
+void
+function_info::calculate_potential_phi_regs ()
+{
+  auto *lr_info = DF_LR_BB_INFO (ENTRY_BLOCK_PTR_FOR_FN (m_fn));
+  for (unsigned int regno = 0; regno < m_num_regs; ++regno)
+    if (regno >= DF_REG_SIZE (DF)
+	// Exclude registers that have a single definition that dominates
+	// all uses.  If the definition does not dominate all uses,
+	// the register will be exposed upwards to the entry block but
+	// will not be defined by the entry block.
+	|| DF_REG_DEF_COUNT (regno) > 1
+	|| (!bitmap_bit_p (&lr_info->def, regno)
+	    && bitmap_bit_p (&lr_info->out, regno)))
+      bitmap_set_bit (m_potential_phi_regs, regno);
+}
+
+// Initialize all member variables in preparation for (re)building
+// SSA form from scratch.
+void
+function_info::init_function_data ()
+{
+  m_next_artificial_uid = -1;
+  m_next_phi_uid = 0;
+  m_num_regs = max_reg_num ();
+  m_defs.safe_grow_cleared (m_num_regs + 1);
+  m_bbs.safe_grow_cleared (last_basic_block_for_fn (m_fn));
+  m_first_bb = nullptr;
+  m_last_bb = nullptr;
+  m_first_insn = nullptr;
+  m_last_insn = nullptr;
+  m_last_nondebug_insn = nullptr;
+  m_free_phis = nullptr;
+
+  calculate_potential_phi_regs ();
+}
+
+// The initial phase of the phi simplification process.  The cumulative
+// effect of the initial phase is to set up ASSUMED_VALUES such that,
+// for a phi P with uid ID:
+//
+// - if we think all inputs to P have the same value, ASSUMED_VALUES[ID]
+//   is that value
+//
+// - otherwise, ASSUMED_VALUES[ID] is P.
+//
+// This has already been done for phis with a lower uid than PHI,
+// initially making optimistic assumptions about backedge inputs.
+// Now do the same for PHI.  If this might invalidate any assumptions
+// made for earlier phis, add the uids of those phis to WORKLIST.
+void
+function_info::simplify_phi_setup (phi_info *phi, set_info **assumed_values,
+				   bitmap worklist)
+{
+  // If all non-backedge inputs have the same value, set NEW_VALUE
+  // to that value.  Otherwise set NEW_VALUE to PHI, to indicate
+  // that PHI cannot be simplified.
+  unsigned int phi_uid = phi->uid ();
+  bool is_first_input = true;
+  set_info *new_value = nullptr;
+  machine_mode phi_mode = phi->mode ();
+  for (use_info *input : phi->inputs ())
+    {
+      set_info *def = input->def ();
+
+      if (auto *input_phi = safe_dyn_cast<phi_info *> (def))
+	{
+	  // Ignore backedges for now.
+	  unsigned int input_phi_uid = input_phi->uid ();
+	  if (phi_uid <= input_phi_uid)
+	    continue;
+
+	  def = assumed_values[input_phi_uid];
+	}
+
+      // Compare this definition with previous ones.
+      if (is_first_input)
+	{
+	  new_value = def;
+	  is_first_input = false;
+	}
+      else if (new_value != def)
+	new_value = phi;
+
+      // If the input has a known mode (i.e. not BLKmode), make sure
+      // that the phi's mode is at least as large.
+      if (def)
+	phi_mode = combine_modes (phi_mode, def->mode ());
+    }
+  if (phi->mode () != phi_mode)
+    phi->set_mode (phi_mode);
+
+  // Since we use a reverse postorder traversal, no phi can consist
+  // entirely of backedges.
+  gcc_checking_assert (!is_first_input);
+  assumed_values[phi_uid] = new_value;
+
+  // See whether any assumptions for earlier phis are now invalid.
+  simplify_phi_propagate (phi, assumed_values, nullptr, worklist);
+}
+
+// The propagation phase of the phi simplification process, with
+// ASSUMED_VALUES as described above simplify_phi_setup.  Iteratively
+// update the phis that use PHI based on PHI's entry in ASSUMED_VALUES.
+// If CURR_WORKLIST is null, consider only phi uses with a lower uid
+// than PHI, otherwise consider all phi uses.
+//
+// If a phi with a higher uid than PHI needs updating, add its uid to
+// CURR_WORKLIST; if a phi with a lower uid than PHI needs updating,
+// add its uid to NEXT_WORKLIST.
+void
+function_info::simplify_phi_propagate (phi_info *phi,
+				       set_info **assumed_values,
+				       bitmap curr_worklist,
+				       bitmap next_worklist)
+{
+  // Go through each phi user of PHI to see whether it needs updating.
+  unsigned int phi_uid = phi->uid ();
+  machine_mode phi_mode = phi->mode ();
+  set_info *phi_value = assumed_values[phi_uid];
+  for (use_info *use : phi->phi_uses ())
+    {
+      phi_info *user_phi = use->phi ();
+
+      // Propagate the phi's new mode to all phi users.  Insn uses should
+      // not be updated, since their modes reflect a property of the insns
+      // rather than the phi.
+      if (use->mode () != phi_mode)
+	use->set_mode (phi_mode);
+
+      if (user_phi == phi)
+	continue;
+
+      // If this is a phi we should be looking at, see whether it needs
+      // an update.
+      unsigned int user_phi_uid = user_phi->uid ();
+      if (user_phi_uid < phi_uid || curr_worklist)
+	{
+	  bool needs_update = false;
+
+	  // Make sure that USER_PHI's mode is at least as big as PHI_MODE.
+	  machine_mode user_phi_mode = user_phi->mode ();
+	  machine_mode new_mode = combine_modes (user_phi_mode, phi_mode);
+	  if (user_phi_mode != new_mode)
+	    {
+	      user_phi->set_mode (new_mode);
+	      needs_update = true;
+	    }
+
+	  // If USER_PHI optimistically assumed an incorrect value,
+	  // adjust it now.
+	  if (assumed_values[user_phi_uid] != user_phi
+	      && assumed_values[user_phi_uid] != phi_value)
+	    {
+	      assumed_values[user_phi_uid] = user_phi;
+	      needs_update = true;
+	    }
+
+	  if (needs_update)
+	    {
+	      if (user_phi_uid < phi_uid)
+		bitmap_set_bit (next_worklist, user_phi_uid);
+	      else
+		bitmap_set_bit (curr_worklist, user_phi_uid);
+	    }
+	}
+    }
+}
+
+// Update the modes of all phis so that they are at least as big as
+// all inputs.  Remove any non-degenerate phis whose inputs are all equal.
+void
+function_info::simplify_phis ()
+{
+  auto temps = temp_watermark ();
+
+  // See the comment above simplify_phi_setup for details about this array.
+  auto *assumed_values = XOBNEWVEC (&m_temp_obstack, set_info *,
+				    m_next_phi_uid);
+
+  // An array of all phis, indexed by uid.
+  auto *phis = XOBNEWVEC (&m_temp_obstack, phi_info *, m_next_phi_uid);
+
+  // Which phi uids are actually in use.
+  auto_sbitmap valid_phi_uids (m_next_phi_uid);
+  bitmap_clear (valid_phi_uids);
+
+  // Bitmaps used for the main double-queue propagation phase.
+  auto_bitmap worklist1;
+  auto_bitmap worklist2;
+  bitmap curr_worklist = worklist1;
+  bitmap next_worklist = worklist2;
+
+  // Perform the set-up phase; see simplify_phi_setup for details.
+  for (ebb_info *ebb : ebbs ())
+    for (phi_info *phi : ebb->phis ())
+      {
+	bitmap_set_bit (valid_phi_uids, phi->uid ());
+	phis[phi->uid ()] = phi;
+	simplify_phi_setup (phi, assumed_values, curr_worklist);
+      }
+
+  // Iteratively process any phis that need updating; see
+  // simplify_phi_propagate for details.  Using a double queue
+  // should reduce the number of times that any given phi node
+  // needs to be revisited.
+  while (!bitmap_empty_p (curr_worklist))
+    {
+      do
+	{
+	  unsigned int uid = bitmap_first_set_bit (curr_worklist);
+	  bitmap_clear_bit (curr_worklist, uid);
+	  simplify_phi_propagate (phis[uid], assumed_values,
+				  curr_worklist, next_worklist);
+	}
+      while (!bitmap_empty_p (curr_worklist));
+      std::swap (next_worklist, curr_worklist);
+    }
+
+  // Make sure that assumed_values is a transitive closure.  This ensures
+  // that each use_info is only updated once.
+  if (flag_checking)
+    for (unsigned int i = 0; i < m_next_phi_uid; ++i)
+      if (bitmap_bit_p (valid_phi_uids, i))
+	if (auto *new_phi = safe_dyn_cast<phi_info *> (assumed_values[i]))
+	  gcc_assert (assumed_values[new_phi->uid ()] == new_phi);
+
+  // Update any phis that turned out to be equivalent to a single input.
+  for (unsigned int i = 0; i < m_next_phi_uid; ++i)
+    if (bitmap_bit_p (valid_phi_uids, i) && phis[i] != assumed_values[i])
+      replace_phi (phis[i], assumed_values[i]);
+}
+
+// Print a description of FUNCTION to PP.
+void
+rtl_ssa::pp_function (pretty_printer *pp, const function_info *function)
+{
+  function->print (pp);
+}
+
+// Print a description of FUNCTION to FILE.
+void
+dump (FILE *file, const function_info *function)
+{
+  dump_using (file, pp_function, function);
+}
+
+// Debug interface to the dump routine above.
+void debug (const function_info *x) { dump (stderr, x); }
diff --git a/gcc/rtl-ssa/functions.h b/gcc/rtl-ssa/functions.h
new file mode 100644
index 00000000000..b09d50e86b0
--- /dev/null
+++ b/gcc/rtl-ssa/functions.h
@@ -0,0 +1,433 @@
+// Function-related RTL SSA classes                                 -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// SSA-related information about a function.  It contains three levels
+// of information, each in reverse postorder:
+//
+// - a list of extended basic blocks
+// - a list of basic blocks
+// - a list of instructions
+//
+// It also maintains a list of definitions of memory, and a list of
+// definitions of each register.
+//
+// See doc/rtl.texi for more details about the way this information
+// is organized and how changes to it are made.
+class function_info
+{
+  // The default obstack alignment takes long double into account.
+  // Since we have no use for that here, and since we allocate many
+  // relatively small objects, it's better to specify an alignment
+  // explicitly.  The allocation routines assert that the alignment
+  // is enough for the objects being allocated.
+  //
+  // Because various structures use pointer_mux, we need at least 2 bytes
+  // of alignment.
+  static const size_t obstack_alignment = sizeof (void *);
+
+public:
+  // Construct SSA form for function FN.
+  function_info (function *fn);
+  ~function_info ();
+
+  // Return a list of all the extended basic blocks in the function, in reverse
+  // postorder.  The list includes the entry and exit blocks.
+  iterator_range<ebb_iterator> ebbs () const;
+
+  // Like ebbs (), but in the reverse order.
+  iterator_range<reverse_ebb_iterator> reverse_ebbs () const;
+
+  // Return a list of all the basic blocks in the function, in reverse
+  // postorder.  The list includes the entry and exit blocks.
+  iterator_range<bb_iterator> bbs () const;
+
+  // Like bbs (), but in the reverse order.
+  iterator_range<reverse_bb_iterator> reverse_bbs () const;
+
+  // Return the SSA information for the basic block with index INDEX.
+  bb_info *bb (unsigned int index) const { return m_bbs[index]; }
+
+  // Return the SSA information for CFG_BB.
+  bb_info *bb (basic_block cfg_bb) const { return m_bbs[cfg_bb->index]; }
+
+  // Return a list of all the instructions in the function, in reverse
+  // postorder.  The list includes both real and artificial instructions.
+  //
+  // Iterations over the list will pick up any new instructions that are
+  // inserted after the iterator's current instruction.
+  iterator_range<any_insn_iterator> all_insns () const;
+
+  // Like all_insns (), but in the reverse order.
+  //
+  // Iterations over the list will pick up any new instructions that are
+  // inserted before the iterator's current instruction.
+  iterator_range<reverse_any_insn_iterator> reverse_all_insns () const;
+
+  // Like all_insns (), but without the debug instructions.
+  iterator_range<nondebug_insn_iterator> nondebug_insns () const;
+
+  // Like reverse_all_insns (), but without the debug instructions.
+  iterator_range<reverse_nondebug_insn_iterator>
+    reverse_nondebug_insns () const;
+
+  // Return the first and last instructions in insns ().
+  insn_info *first_insn () const { return m_first_insn; }
+  insn_info *last_insn () const { return m_last_insn; }
+
+  // Return a list of all definitions of memory, in reverse postorder.
+  // This includes both real stores by instructions and artificial
+  // definitions by things like phi nodes.
+  iterator_range<def_iterator> mem_defs () const;
+
+  // Return a list of all definitions of register REGNO, in reverse postorder.
+  // This includes both real stores by instructions and artificial
+  // definitions by things like phi nodes.
+  iterator_range<def_iterator> ref_defs (unsigned int regno) const;
+
+  // Check if all uses of register REGNO are either unconditionally undefined
+  // or use the same single dominating definition.  Return the definition
+  // if so, otherwise return null.
+  set_info *single_dominating_def (unsigned int regno) const;
+
+  // Look for a definition of RESOURCE at INSN.  Return the result of the
+  // search as a def_lookup; see the comments there for more details.
+  def_lookup find_def (resource_info resource, insn_info *insn);
+
+  // Return an RAII object that owns all temporary RTL SSA memory
+  // allocated during a change attempt.  The object should remain in
+  // scope until the change has been aborted or successfully completed.
+  obstack_watermark new_change_attempt () { return &m_temp_obstack; }
+
+  // Make a best attempt to check whether the values used by USES are
+  // available on entry to BB, without solving a full dataflow problem.
+  // If all the values are already live on entry to BB or can be made
+  // available there, return a use_array that describes the uses as
+  // if they occured at the start of BB.  These uses are purely temporary,
+  // and will not become permanent unless applied using change_insns.
+  //
+  // If the operation fails, return an invalid use_array.
+  //
+  // WATERMARK is a watermark returned by new_change_attempt ().
+  use_array make_uses_available (obstack_watermark &watermark,
+				 use_array uses, bb_info *bb);
+
+  // If CHANGE doesn't already clobber REGNO, try to add such a clobber,
+  // limiting the movement range in order to make the clobber valid.
+  // When determining whether REGNO is live, ignore accesses made by an
+  // instruction I if IGNORE (I) is true.  The caller then assumes the
+  // responsibility of ensuring that CHANGE and I are placed in a valid order.
+  //
+  // Return true on success.  Leave CHANGE unmodified when returning false.
+  //
+  // WATERMARK is a watermark returned by new_change_attempt ().
+  template<typename IgnorePredicate>
+  bool add_regno_clobber (obstack_watermark &watermark, insn_change &change,
+			  unsigned int regno, IgnorePredicate ignore);
+
+  // Return true if change_insns will be able to perform the changes
+  // described by CHANGES.
+  bool verify_insn_changes (array_slice<insn_change *const> changes);
+
+  // Perform all the changes in CHANGES, keeping the instructions in the
+  // order specified by the CHANGES array.  On return, the SSA information
+  // remains up-to-date.  The same is true for instruction-level DF
+  // information, although the block-level DF information might be
+  // marked dirty.
+  void change_insns (array_slice<insn_change *> changes);
+
+  // Like change_insns, but for a single change CHANGE.
+  void change_insn (insn_change &change);
+
+  // If the changes that have been made to instructions require updates
+  // to the CFG, perform those updates now.  Return true if something changed.
+  // If it did:
+  //
+  // - The SSA information is now invalid and needs to be recomputed.
+  //
+  // - Dominance information is no longer available (in either direction).
+  //
+  // - The caller will need to call cleanup_cfg at some point.
+  //
+  // ??? We could probably update the SSA information for simple updates,
+  // but currently nothing would benefit.  These late CFG changes are
+  // relatively rare anyway, since gimple optimisers should remove most
+  // unnecessary control flow.
+  bool perform_pending_updates ();
+
+  // Print the contents of the function to PP.
+  void print (pretty_printer *pp) const;
+
+private:
+  // Information about the values that are live on exit from a basic block.
+  // This class is only used when constructing the SSA form, it isn't
+  // designed for being kept up-to-date.
+  class bb_live_out_info
+  {
+  public:
+    // REG_VALUES contains all the registers that live out from the block,
+    // in order of increasing register number.  There are NUM_REG_VALUES
+    // in total.  Registers do not appear here if their values are known
+    // to be completely undefined; in that sense, the information is
+    // closer to DF_LIVE than to DF_LR.
+    unsigned int num_reg_values;
+    set_info **reg_values;
+
+    // The memory value that is live on exit from the block.
+    set_info *mem_value;
+  };
+
+  // Information used while constructing the SSA form and discarded
+  // afterwards.
+  class build_info
+  {
+  public:
+    set_info *current_reg_value (unsigned int) const;
+    set_info *current_mem_value () const;
+
+    void record_reg_def (unsigned int, def_info *);
+    void record_mem_def (def_info *);
+
+    // The block that we're currently processing.
+    bb_info *current_bb;
+
+    // The EBB that contains CURRENT_BB.
+    ebb_info *current_ebb;
+
+    // Except for the local exception noted below:
+    //
+    // - If register R has been defined in the current EBB, LAST_ACCESS[R + 1]
+    //   is the last definition of R in the EBB.
+    //
+    // - If register R is currently live but has not yet been defined
+    //   in the EBB, LAST_ACCESS[R + 1] is the current value of R,
+    //   or null if the register's value is completely undefined.
+    //
+    // - The contents are not meaningful for other registers.
+    //
+    // Similarly:
+    //
+    // - If the current EBB has defined memory, LAST_ACCESS[0] is the last
+    //   definition of memory in the EBB.
+    //
+    // - Otherwise LAST_ACCESS[0] is the value of memory that is live on
+    // - entry to the EBB.
+    //
+    // The exception is that while building instructions, LAST_ACCESS[I]
+    // can temporarily be the use of regno I - 1 by that instruction.
+    access_info **last_access;
+
+    // A bitmap of registers that are live on entry to this EBB, with a tree
+    // view for quick lookup.  Only used if MAY_HAVE_DEBUG_INSNS.
+    bitmap ebb_live_in_for_debug;
+
+    // A conservative superset of the registers that are used by
+    // instructions in CURRENT_EBB.  That is, all used registers
+    // are in the set, but some unused registers might be too.
+    bitmap ebb_use;
+
+    // A similarly conservative superset of the registers that are defined
+    // by instructions in CURRENT_EBB.
+    bitmap ebb_def;
+
+    // BB_LIVE_OUT[BI] gives the live-out values for the basic block
+    // with index BI.
+    bb_live_out_info *bb_live_out;
+  };
+
+  // Return an RAII object that owns all objects allocated by
+  // allocate_temp during its lifetime.
+  obstack_watermark temp_watermark () { return &m_temp_obstack; }
+
+  template<typename T, typename... Ts>
+  T *allocate (Ts... args);
+
+  template<typename T, typename... Ts>
+  T *allocate_temp (Ts... args);
+
+  access_array temp_access_array (access_array accesses);
+
+  clobber_group *need_clobber_group (clobber_info *);
+  def_node *need_def_node (def_info *);
+  def_splay_tree need_def_splay_tree (def_info *);
+
+  use_info *make_use_available (use_info *, bb_info *);
+  def_array insert_temp_clobber (obstack_watermark &, insn_info *,
+				 unsigned int, def_array);
+
+  void insert_def_before (def_info *, def_info *);
+  void insert_def_after (def_info *, def_info *);
+  void remove_def_from_list (def_info *);
+
+  void add_clobber (clobber_info *, clobber_group *);
+  void remove_clobber (clobber_info *, clobber_group *);
+  void prepend_clobber_to_group (clobber_info *, clobber_group *);
+  void append_clobber_to_group (clobber_info *, clobber_group *);
+  void merge_clobber_groups (clobber_info *, clobber_info *,
+			     def_info *);
+  clobber_info *split_clobber_group (clobber_group *, insn_info *);
+
+  void append_def (def_info *);
+  void add_def (def_info *);
+  void remove_def (def_info *);
+
+  void need_use_splay_tree (set_info *);
+
+  static void insert_use_before (use_info *, use_info *);
+  static void insert_use_after (use_info *, use_info *);
+
+  void add_use (use_info *);
+  void remove_use (use_info *);
+
+  insn_info::order_node *need_order_node (insn_info *);
+
+  void add_insn_after (insn_info *, insn_info *);
+  void append_insn (insn_info *);
+  void remove_insn (insn_info *);
+
+  insn_info *append_artificial_insn (bb_info *, rtx_insn * = nullptr);
+
+  void start_insn_accesses ();
+  void finish_insn_accesses (insn_info *);
+
+  void record_use (build_info &, insn_info *, rtx_obj_reference);
+  void record_call_clobbers (build_info &, insn_info *, rtx_call_insn *);
+  void record_def (build_info &, insn_info *, rtx_obj_reference);
+  void add_insn_to_block (build_info &, rtx_insn *);
+
+  void add_reg_unused_notes (insn_info *);
+
+  void add_live_out_use (bb_info *, set_info *);
+  set_info *live_out_value (bb_info *, set_info *);
+
+  void append_phi (ebb_info *, phi_info *);
+  void remove_phi (phi_info *);
+  void delete_phi (phi_info *);
+  void replace_phi (phi_info *, set_info *);
+  phi_info *create_phi (ebb_info *, resource_info, access_info **,
+			unsigned int);
+  phi_info *create_degenerate_phi (ebb_info *, set_info *);
+
+  bb_info *create_bb_info (basic_block);
+  void append_bb (bb_info *);
+  void calculate_potential_phi_regs ();
+
+  insn_info *add_placeholder_after (insn_info *);
+  void possibly_queue_changes (insn_change &);
+  void finalize_new_accesses (insn_change &);
+  void apply_changes_to_insn (insn_change &);
+
+  void init_function_data ();
+  void add_entry_block_defs (build_info &);
+  void add_phi_nodes (build_info &);
+  void add_artificial_accesses (build_info &, df_ref_flags);
+  void add_block_contents (build_info &);
+  void record_block_live_out (build_info &);
+  void populate_backedge_phis (build_info &);
+  void process_all_blocks ();
+
+  void simplify_phi_setup (phi_info *, set_info **, bitmap);
+  void simplify_phi_propagate (phi_info *, set_info **, bitmap, bitmap);
+  void simplify_phis ();
+
+  // The function that this object describes.
+  function *m_fn;
+
+  // The lowest (negative) in-use artificial insn uid minus one.
+  int m_next_artificial_uid;
+
+  // The highest in-use phi uid plus one.
+  unsigned int m_next_phi_uid;
+
+  // The highest in-use register number plus one.
+  unsigned int m_num_regs;
+
+  // M_DEFS[R] is the first definition of register R - 1 in a reverse
+  // postorder traversal of the function, or null if the function has
+  // no definition of R.  Applying last () gives the last definition of R.
+  //
+  // M_DEFS[0] is for memory; MEM_REGNO + 1 == 0.
+  auto_vec<def_info *> m_defs;
+
+  // M_BBS[BI] gives the SSA information about the block with index BI.
+  auto_vec<bb_info *> m_bbs;
+
+  // An obstack used to allocate the main RTL SSA information.
+  obstack m_obstack;
+
+  // An obstack used for temporary work, such as while building up a list
+  // of possible instruction changes.
+  obstack m_temp_obstack;
+
+  // The start of each obstack, so that all memory in them can be freed.
+  char *m_obstack_start;
+  char *m_temp_obstack_start;
+
+  // The entry and exit blocks.
+  bb_info *m_first_bb;
+  bb_info *m_last_bb;
+
+  // The first and last instructions in a reverse postorder traversal
+  // of the function.
+  insn_info *m_first_insn;
+  insn_info *m_last_insn;
+
+  // The last nondebug instruction in the list of instructions.
+  // This is only different from m_last_insn when building the initial
+  // SSA information; after that, the last instruction is always a
+  // BB end instruction.
+  insn_info *m_last_nondebug_insn;
+
+  // Temporary working state when building up lists of definitions and uses.
+  // Keeping them around should reduce the number of unnecessary reallocations.
+  auto_vec<access_info *> m_temp_defs;
+  auto_vec<access_info *> m_temp_uses;
+
+  // The set of registers that might need to have phis associated with them.
+  // Registers outside this set are known to have a single definition that
+  // dominates all uses.
+  //
+  // Before RA, about 5% of registers are typically in the set.
+  auto_bitmap m_potential_phi_regs;
+
+  // A list of phis that are no longer in use.  Their uids are still unique
+  // and so can be recycled.
+  phi_info *m_free_phis;
+
+  // A list of instructions that have been changed in ways that need
+  // further processing later, such as removing dead instructions or
+  // altering the CFG.
+  auto_vec<insn_info *> m_queued_insn_updates;
+
+  // The INSN_UIDs of all instructions in M_QUEUED_INSN_UPDATES.
+  auto_bitmap m_queued_insn_update_uids;
+
+  // A basic_block is in this bitmap if we need to call purge_dead_edges
+  // on it.  As with M_QUEUED_INSN_UPDATES, these updates are queued until
+  // a convenient point.
+  auto_bitmap m_need_to_purge_dead_edges;
+};
+
+void pp_function (pretty_printer *, const function_info *);
+}
+
+void dump (FILE *, const rtl_ssa::function_info *);
+
+void DEBUG_FUNCTION debug (const rtl_ssa::function_info *);
diff --git a/gcc/rtl-ssa/insn-utils.h b/gcc/rtl-ssa/insn-utils.h
new file mode 100644
index 00000000000..d7705e96ac8
--- /dev/null
+++ b/gcc/rtl-ssa/insn-utils.h
@@ -0,0 +1,46 @@
+// Instruction-related utilities for RTL SSA                        -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// Return whichever of INSN1 and INSN2 occurs earlier in the function's
+// reverse postorder.
+inline insn_info *
+earlier_insn (insn_info *insn1, insn_info *insn2)
+{
+  return *insn1 < *insn2 ? insn1 : insn2;
+}
+
+// Return whichever of INSN1 and INSN2 occurs later in the function's
+// reverse postorder.
+inline insn_info *
+later_insn (insn_info *insn1, insn_info *insn2)
+{
+  return *insn1 < *insn2 ? insn2 : insn1;
+}
+
+// Return a closure of operator== for INSN.  See insn_is_changing for
+// the rationale for defining the function this way.
+inline insn_is_closure
+insn_is (const insn_info *insn)
+{
+  return insn_is_closure (insn);
+}
+
+}
diff --git a/gcc/rtl-ssa/insns.cc b/gcc/rtl-ssa/insns.cc
new file mode 100644
index 00000000000..e8300e036f1
--- /dev/null
+++ b/gcc/rtl-ssa/insns.cc
@@ -0,0 +1,718 @@
+// Implementation of instruction-related RTL SSA functions          -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+#define INCLUDE_ALGORITHM
+#define INCLUDE_FUNCTIONAL
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "rtl.h"
+#include "df.h"
+#include "rtl-ssa.h"
+#include "rtl-ssa/internals.inl"
+#include "predict.h"
+#include "print-rtl.h"
+#include "rtl-iter.h"
+
+using namespace rtl_ssa;
+
+// The gap to leave between program points when building up the list
+// of instructions for the first time.  Using 2 allows an instruction
+// to be inserted between two others without resorting to splay tree
+// ordering.  Using 0 is useful as a debugging aid to stress the
+// splay tree code.
+static const unsigned int POINT_INCREASE = 2;
+
+// Calculate and record the cost of the instruction, based on the
+// form it had before any in-progress changes were made.
+void
+insn_info::calculate_cost () const
+{
+  basic_block cfg_bb = BLOCK_FOR_INSN (m_rtl);
+  temporarily_undo_changes (0);
+  m_cost_or_uid = insn_cost (m_rtl, optimize_bb_for_speed_p (cfg_bb));
+  redo_changes (0);
+}
+
+// Add NOTE to the instruction's notes.
+void
+insn_info::add_note (insn_note *note)
+{
+  insn_note **ptr = &m_first_note;
+  // Always put the order node first, since it's the one that's likely
+  // to be used most often.
+  if (*ptr && (*ptr)->kind () == insn_note_kind::ORDER_NODE)
+    ptr = &(*ptr)->m_next_note;
+  note->m_next_note = *ptr;
+  *ptr = note;
+}
+
+// Implement compare_with for the case in which this insn and OTHER
+// have the same program point.
+int
+insn_info::slow_compare_with (const insn_info &other) const
+{
+  return order_splay_tree::compare_nodes (get_known_order_node (),
+					  other.get_known_order_node ());
+}
+
+// Print insn uid UID to PP, where UID has the same form as insn_info::uid.
+void
+insn_info::print_uid (pretty_printer *pp, int uid)
+{
+  char tmp[3 * sizeof (uid) + 2];
+  if (uid < 0)
+    // An artificial instruction.
+    snprintf (tmp, sizeof (tmp), "a%d", -uid);
+  else
+    // A real RTL instruction.
+    snprintf (tmp, sizeof (tmp), "i%d", uid);
+  pp_string (pp, tmp);
+}
+
+// See comment above declaration.
+void
+insn_info::print_identifier (pretty_printer *pp) const
+{
+  print_uid (pp, uid ());
+}
+
+// See comment above declaration.
+void
+insn_info::print_location (pretty_printer *pp) const
+{
+  if (bb_info *bb = this->bb ())
+    {
+      ebb_info *ebb = bb->ebb ();
+      if (ebb && is_phi ())
+	ebb->print_identifier (pp);
+      else
+	bb->print_identifier (pp);
+      pp_string (pp, " at point ");
+      pp_decimal_int (pp, m_point);
+    }
+  else
+    pp_string (pp, "<unknown location>");
+}
+
+// See comment above declaration.
+void
+insn_info::print_identifier_and_location (pretty_printer *pp) const
+{
+  if (m_is_asm)
+    pp_string (pp, "asm ");
+  if (m_is_debug_insn)
+    pp_string (pp, "debug ");
+  pp_string (pp, "insn ");
+  print_identifier (pp);
+  pp_string (pp, " in ");
+  print_location (pp);
+}
+
+// See comment above declaration.
+void
+insn_info::print_full (pretty_printer *pp) const
+{
+  print_identifier_and_location (pp);
+  pp_colon (pp);
+  if (is_real ())
+    {
+      pp_newline_and_indent (pp, 2);
+      if (has_been_deleted ())
+	pp_string (pp, "deleted");
+      else
+	{
+	  // Print the insn pattern to a temporary printer.
+	  pretty_printer sub_pp;
+	  print_insn_with_notes (&sub_pp, rtl ());
+	  const char *text = pp_formatted_text (&sub_pp);
+
+	  // Calculate the length of the maximum line in the pattern.
+	  unsigned int max_len = 0;
+	  const char *start = text;
+	  while (const char *end = strchr (start, '\n'))
+	    {
+	      max_len = MAX (max_len, end - start);
+	      start = end + 1;
+	    }
+
+	  // Print a separator before or after the pattern.
+	  auto print_top_bottom = [&]()
+	    {
+	      pp_character (pp, '+');
+	      for (unsigned int i = 0; i < max_len + 2; ++i)
+		pp_character (pp, '-');
+	    };
+
+	  print_top_bottom ();
+	  start = text;
+	  while (const char *end = strchr (start, '\n'))
+	    {
+	      pp_newline_and_indent (pp, 0);
+	      pp_character (pp, '|');
+	      // Each line of the pattern already starts with a space.
+	      // so we don't need to add another one here.
+	      pp_append_text (pp, start, end);
+	      start = end + 1;
+	    }
+	  pp_newline_and_indent (pp, 0);
+	  print_top_bottom ();
+
+	  if (m_cost_or_uid != UNKNOWN_COST)
+	    {
+	      pp_newline_and_indent (pp, 0);
+	      pp_string (pp, "cost: ");
+	      pp_decimal_int (pp, m_cost_or_uid);
+	    }
+	  if (m_has_pre_post_modify)
+	    {
+	      pp_newline_and_indent (pp, 0);
+	      pp_string (pp, "has pre/post-modify operations");
+	    }
+	  if (m_has_volatile_refs)
+	    {
+	      pp_newline_and_indent (pp, 0);
+	      pp_string (pp, "has volatile refs");
+	    }
+	}
+      pp_indentation (pp) -= 2;
+    }
+
+  auto print_accesses = [&](const char *heading, access_array accesses,
+			    unsigned int flags)
+    {
+      if (!accesses.empty ())
+	{
+	  pp_newline_and_indent (pp, 2);
+	  pp_string (pp, heading);
+	  pp_newline_and_indent (pp, 2);
+	  pp_accesses (pp, accesses, flags);
+	  pp_indentation (pp) -= 4;
+	}
+    };
+
+  print_accesses ("uses:", uses (), PP_ACCESS_USER);
+  auto *call_clobbers_note = find_note<insn_call_clobbers_note> ();
+  if (call_clobbers_note)
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "has call clobbers for ABI ");
+      pp_decimal_int (pp, call_clobbers_note->abi_id ());
+      pp_indentation (pp) -= 2;
+    }
+  print_accesses ("defines:", defs (), PP_ACCESS_SETTER);
+  if (num_uses () == 0 && !call_clobbers_note && num_defs () == 0)
+    {
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "has no uses or defs");
+      pp_indentation (pp) -= 2;
+    }
+
+  if (order_node *node = get_order_node ())
+    {
+      while (node->m_parent)
+	node = node->m_parent;
+
+      pp_newline_and_indent (pp, 2);
+      pp_string (pp, "insn order: ");
+      pp_newline_and_indent (pp, 2);
+      auto print_order = [](pretty_printer *pp, order_node *node)
+	{
+	  print_uid (pp, node->uid ());
+	};
+      order_splay_tree::print (pp, node, print_order);
+      pp_indentation (pp) -= 4;
+    }
+}
+
+// Return an insn_info::order_node for INSN, creating one if necessary.
+insn_info::order_node *
+function_info::need_order_node (insn_info *insn)
+{
+  insn_info::order_node *order = insn->get_order_node ();
+  if (!order)
+    {
+      order = allocate<insn_info::order_node> (insn->uid ());
+      insn->add_note (order);
+    }
+  return order;
+}
+
+// Add instruction INSN immediately after AFTER in the reverse postorder list.
+// INSN is not currently in the list.
+void
+function_info::add_insn_after (insn_info *insn, insn_info *after)
+{
+  gcc_checking_assert (!insn->has_insn_links ());
+
+  insn->copy_next_from (after);
+  after->set_next_any_insn (insn);
+
+  // The prev link is easy if AFTER and INSN are the same type.
+  // Handle the other cases below.
+  if (after->is_debug_insn () == insn->is_debug_insn ())
+    insn->set_prev_sametype_insn (after);
+
+  if (insn_info *next = insn->next_any_insn ())
+    {
+      if (insn->is_debug_insn () == next->is_debug_insn ())
+	{
+	  // INSN might now be the start of the subsequence of debug insns,
+	  // and so its prev pointer might point to the end of the subsequence
+	  // instead of AFTER.
+	  insn->copy_prev_from (next);
+	  next->set_prev_sametype_insn (insn);
+	}
+      else if (insn->is_debug_insn ()) // && !next->is_debug_insn ()
+	{
+	  // INSN ends a subsequence of debug instructions.  Find the
+	  // first debug instruction in the subsequence, which might
+	  // be INSN itself.  (If it isn't, then AFTER is also a debug
+	  // instruction and we updated INSN's prev link above.)
+	  insn_info *first = next->prev_nondebug_insn ()->next_any_insn ();
+	  first->set_last_debug_insn (insn);
+	}
+      else // !insn->is_debug_insn () && next->is_debug_insn ()
+	// At present we don't (need to) support inserting a nondebug
+	// instruction between two existing debug instructions.
+	gcc_assert (!after->is_debug_insn ());
+
+      // If AFTER and NEXT are separated by at least two points, we can
+      // use a unique point number for INSN.  Otherwise INSN will have
+      // the same point number as AFTER.
+      insn->set_point ((next->point () + after->point ()) / 2);
+    }
+  else
+    {
+      if (!insn->is_debug_insn ())
+	{
+	  insn->set_prev_sametype_insn (m_last_nondebug_insn);
+	  m_last_nondebug_insn = insn;
+	}
+      else
+	// There is now at least one debug instruction after
+	// m_last_nondebug_insn: either INSN itself, or the start of
+	// a longer subsequence of debug insns that now ends with AFTER
+	// followed by INSN.
+	m_last_nondebug_insn->next_any_insn ()->set_last_debug_insn (insn);
+      m_last_insn = insn;
+
+      insn->set_point (after->point () + POINT_INCREASE);
+    }
+
+  // If INSN's program point is the same as AFTER's, we need to use the
+  // splay tree to record their relative order.
+  if (insn->point () == after->point ())
+    {
+      insn_info::order_node *after_node = need_order_node (after);
+      insn_info::order_node *insn_node = need_order_node (insn);
+      insn_info::order_splay_tree::insert_child (after_node, 1, insn_node);
+    }
+}
+
+// Remove INSN from the function's list of instructions.
+void
+function_info::remove_insn (insn_info *insn)
+{
+  if (insn_info::order_node *order = insn->get_order_node ())
+    insn_info::order_splay_tree::remove_node (order);
+
+  if (auto *note = insn->find_note<insn_call_clobbers_note> ())
+    {
+      ebb_call_clobbers_info *ecc = insn->ebb ()->first_call_clobbers ();
+      while (ecc->abi ()->id () != note->abi_id ())
+	ecc = ecc->next ();
+      int comparison = lookup_call_clobbers (*ecc, insn);
+      gcc_assert (comparison == 0);
+      ecc->remove_root ();
+    }
+
+  insn_info *prev = insn->prev_any_insn ();
+  insn_info *next = insn->next_any_insn ();
+  insn_info *prev_nondebug = insn->prev_nondebug_insn ();
+  insn_info *next_nondebug = insn->next_nondebug_insn ();
+
+  // We should never remove the entry or exit block's instructions.
+  // At present we also don't remove entire blocks, so should never
+  // remove debug instructions.
+  gcc_checking_assert (prev_nondebug
+		       && next_nondebug
+		       && !insn->is_debug_insn ());
+
+  if (prev->is_debug_insn () && next->is_debug_insn ())
+    {
+      // We need to stitch together two subsequences of debug insns.
+      insn_info *last = next->last_debug_insn ();
+      next->set_prev_sametype_insn (prev);
+      prev_nondebug->next_any_insn ()->set_last_debug_insn (last);
+    }
+  prev->set_next_any_insn (next);
+  next_nondebug->set_prev_sametype_insn (prev_nondebug);
+
+  insn->clear_insn_links ();
+}
+
+// Create an artificial instruction for BB, associating it with RTL (which can
+// be null).  Add the new instruction to the end of the function's list and
+// return the new instruction.
+insn_info *
+function_info::append_artificial_insn (bb_info *bb, rtx_insn *rtl)
+{
+  insn_info *insn = allocate<insn_info> (bb, rtl, m_next_artificial_uid);
+  m_next_artificial_uid -= 1;
+  append_insn (insn);
+  return insn;
+}
+
+// Finish building a new list of uses and definitions for instruction INSN.
+void
+function_info::finish_insn_accesses (insn_info *insn)
+{
+  unsigned int num_defs = m_temp_defs.length ();
+  unsigned int num_uses = m_temp_uses.length ();
+  obstack_make_room (&m_obstack, num_defs + num_uses);
+  if (num_defs)
+    {
+      sort_accesses (m_temp_defs);
+      obstack_grow (&m_obstack, m_temp_defs.address (),
+		    num_defs * sizeof (access_info *));
+      m_temp_defs.truncate (0);
+    }
+  if (num_uses)
+    {
+      sort_accesses (m_temp_uses);
+      obstack_grow (&m_obstack, m_temp_uses.address (),
+		    num_uses * sizeof (access_info *));
+      m_temp_uses.truncate (0);
+    }
+  void *addr = obstack_finish (&m_obstack);
+  insn->set_accesses (static_cast<access_info **> (addr), num_defs, num_uses);
+}
+
+// Called while building SSA form using BI.  Record that INSN contains
+// read reference REF.  If this requires new entries to be added to
+// INSN->uses (), add those entries to the list we're building in
+// m_temp_uses.
+void
+function_info::record_use (build_info &bi, insn_info *insn,
+			   rtx_obj_reference ref)
+{
+  unsigned int regno = ref.regno;
+  machine_mode mode = ref.is_reg () ? ref.mode : BLKmode;
+  access_info *access = bi.last_access[ref.regno + 1];
+  use_info *use = safe_dyn_cast<use_info *> (access);
+  if (!use)
+    {
+      set_info *value = safe_dyn_cast<set_info *> (access);
+      // In order to ensure that -g doesn't affect codegen, uses in debug
+      // instructions do not affect liveness, either in DF or here.
+      // This means that there might be no correct definition of the resource
+      // available (e.g. if it would require a phi node that the nondebug
+      // code doesn't need).  Perhaps we could have "debug phi nodes" as
+      // well as "debug instructions", but that would require a method
+      // of building phi nodes that didn't depend on DF liveness information,
+      // and so might be significantly more expensive.
+      //
+      // Therefore, the only value we try to attach to a use by a debug
+      // instruction is VALUE itself (as we would for nondebug instructions).
+      // We then need to make a conservative check for whether VALUE is
+      // actually correct.
+      auto value_is_valid = [&]()
+	{
+	  // Memmory always has a valid definition.
+	  if (ref.is_mem ())
+	    return true;
+
+	  // If VALUE would lead to an uninitialized use anyway, there's
+	  // nothing to check.
+	  if (!value)
+	    return false;
+
+	  // If the previous definition occurs in the same EBB then it
+	  // is certainly correct.
+	  if (value->ebb () == bi.current_ebb)
+	    return true;
+
+	  // If the register is live on entry to the EBB but not used
+	  // within it, VALUE is the correct live-in value.
+	  if (bitmap_bit_p (bi.ebb_live_in_for_debug, regno))
+	    return true;
+
+	  // Check if VALUE is the function's only definition of REGNO
+	  // and if it dominates the use.
+	  if (regno != MEM_REGNO
+	      && regno < DF_REG_SIZE (DF)
+	      && DF_REG_DEF_COUNT (regno) == 1
+	      && dominated_by_p (CDI_DOMINATORS, insn->bb ()->cfg_bb (),
+				 value->bb ()->cfg_bb ()))
+	    return true;
+
+	  // Punt for other cases.
+	  return false;
+	};
+      if (insn->is_debug_insn () && !value_is_valid ())
+	value = nullptr;
+
+      use = allocate<use_info> (insn, resource_info { mode, regno }, value);
+      add_use (use);
+      m_temp_uses.safe_push (use);
+      bi.last_access[ref.regno + 1] = use;
+      use->record_reference (ref, true);
+    }
+  else
+    {
+      // Record the mode of the largest use.  The choice is arbitrary if
+      // the instruction (unusually) references the same register in two
+      // different but equal-sized modes.
+      gcc_checking_assert (use->insn () == insn);
+      if (HARD_REGISTER_NUM_P (regno)
+	  && partial_subreg_p (use->mode (), mode))
+	use->set_mode (mode);
+      use->record_reference (ref, false);
+    }
+}
+
+// Called while building SSA form for INSN using BI.  Record the effect
+// of call clobbers in RTL.  We have already added the explicit sets and
+// clobbers for RTL, which have priority over any call clobbers.
+void
+function_info::record_call_clobbers (build_info &bi, insn_info *insn,
+				     rtx_call_insn *rtl)
+{
+  // See whether we should record this call in the EBB's list of
+  // call clobbers.  Three things affect this choice:
+  //
+  // (1) The list is the only way we have of recording partial clobbers.
+  //     All calls that only partially clobber registers must therefore
+  //     be in the list.
+  //
+  // (2) Adding calls to the list is much more memory-efficient than
+  //     creating a long list of clobber_infos.
+  //
+  // (3) Adding calls to the list limits the ability to move definitions
+  //     of registers that are normally fully or partially clobbered
+  //     by the associated predefined ABI.  So adding calls to the list
+  //     can hamper optimization if (thanks to -fipa-ra) the number of
+  //     clobbers is much smaller than the usual set.
+  //
+  // The trade-off that we currently take is to use the list if there
+  // are some registers that the call only partially clobbers or if
+  // the set of clobbers is the standard set.
+  function_abi abi = insn_callee_abi (rtl);
+  if (abi.base_abi ().full_reg_clobbers () == abi.full_reg_clobbers ()
+      || abi.full_and_partial_reg_clobbers () != abi.full_reg_clobbers ())
+    {
+      // Find an entry for this predefined ABI, creating one if necessary.
+      ebb_call_clobbers_info *ecc = bi.current_ebb->first_call_clobbers ();
+      while (ecc && ecc->abi () != &abi.base_abi ())
+	ecc = ecc->next ();
+      if (!ecc)
+	{
+	  ecc = allocate<ebb_call_clobbers_info> (&abi.base_abi ());
+	  ecc->m_next = bi.current_ebb->first_call_clobbers ();
+	  bi.current_ebb->set_first_call_clobbers (ecc);
+	}
+
+      auto abi_id = abi.base_abi ().id ();
+      auto *insn_clobbers = allocate<insn_call_clobbers_note> (abi_id, insn);
+      insn->add_note (insn_clobbers);
+
+      ecc->insert_max_node (insn_clobbers);
+    }
+  else
+    for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
+      if (TEST_HARD_REG_BIT (abi.full_reg_clobbers (), regno))
+	{
+	  def_info *def = m_defs[regno + 1];
+	  if (!def || def->last_def ()->insn () != insn)
+	    {
+	      def = allocate<clobber_info> (insn, regno);
+	      def->m_is_call_clobber = true;
+	      append_def (def);
+	      m_temp_defs.safe_push (def);
+	      bi.last_access[regno + 1] = def;
+	    }
+	}
+}
+
+// Called while building SSA form using BI.  Record that INSN contains
+// write reference REF.  Add associated def_infos to the list of accesses
+// that we're building in m_temp_defs.  Record the register's new live
+// value in BI.
+void
+function_info::record_def (build_info &bi, insn_info *insn,
+			   rtx_obj_reference ref)
+{
+  // Punt if we see multiple definitions of the same resource.
+  // This can happen for several reasons:
+  //
+  // - An instruction might store two values to memory at once, giving two
+  //   distinct memory references.
+  //
+  // - An instruction might assign to multiple pieces of a wide pseudo
+  //   register.  For example, on 32-bit targets, an instruction might
+  //   assign to both the upper and lower halves of a 64-bit pseudo register.
+  //
+  // - It's possible for the same register to be clobbered by the
+  //   CALL_INSN_FUNCTION_USAGE and to be set by the main instruction
+  //   pattern as well.  In that case, the clobber conceptually happens
+  //   before the set and can essentially be ignored.
+  //
+  // - Similarly, global registers are implicitly set by a call but can
+  //   be explicitly set or clobbered as well.  In that situation, the sets
+  //   are listed first and should win over a clobber.
+  unsigned int regno = ref.regno;
+  machine_mode mode = ref.is_reg () ? ref.mode : BLKmode;
+  def_info *def = safe_dyn_cast<def_info *> (bi.last_access[ref.regno + 1]);
+  if (def && def->insn () == insn)
+    {
+      if (!ref.is_clobber ())
+	{
+	  gcc_checking_assert (!is_a<clobber_info *> (def));
+	  def->record_reference (ref, false);
+	}
+      return;
+    }
+
+  // Memory is always well-defined, so only use clobber_infos for registers.
+  if (ref.is_reg () && ref.is_clobber ())
+    def = allocate<clobber_info> (insn, regno);
+  else
+    def = allocate<set_info> (insn, resource_info { mode, regno });
+  def->record_reference (ref, true);
+  append_def (def);
+  m_temp_defs.safe_push (def);
+  bi.last_access[ref.regno + 1] = def;
+}
+
+// Called while building SSA form using BI.  Add an insn_info for RTL
+// to the block that we're current building.
+void
+function_info::add_insn_to_block (build_info &bi, rtx_insn *rtl)
+{
+  insn_info *insn = allocate<insn_info> (bi.current_bb, rtl, UNKNOWN_COST);
+  append_insn (insn);
+
+  vec_rtx_properties properties;
+  properties.add_insn (rtl, true);
+  insn->set_properties (properties);
+
+  start_insn_accesses ();
+
+  // Record the uses.
+  for (rtx_obj_reference ref : properties.refs ())
+    if (ref.is_read ())
+      record_use (bi, insn, ref);
+
+  // Restore the contents of bi.last_access, which we used as a cache
+  // when assembling the uses.
+  for (access_info *access : m_temp_uses)
+    {
+      unsigned int regno = access->regno ();
+      gcc_checking_assert (bi.last_access[regno + 1] == access);
+      bi.last_access[regno + 1] = as_a<use_info *> (access)->def ();
+    }
+
+  // Record the definitions.
+  for (rtx_obj_reference ref : properties.refs ())
+    if (ref.is_write ())
+      record_def (bi, insn, ref);
+
+  // Logically these happen before the explicit definitions, but if the
+  // explicit definitions and call clobbers reference the same register,
+  // the explicit definition should win.
+  if (auto *call_rtl = dyn_cast<rtx_call_insn *> (rtl))
+    record_call_clobbers (bi, insn, call_rtl);
+
+  finish_insn_accesses (insn);
+}
+
+// Check whether INSN sets any registers that are never subsequently used.
+// If so, add REG_UNUSED notes for them.  The caller has already removed
+// any previous REG_UNUSED notes.
+void
+function_info::add_reg_unused_notes (insn_info *insn)
+{
+  rtx_insn *rtl = insn->rtl ();
+
+  auto handle_potential_set = [&](rtx pattern)
+    {
+      if (GET_CODE (pattern) != SET)
+	return;
+
+      rtx dest = SET_DEST (pattern);
+      if (!REG_P (dest))
+	return;
+
+      def_array defs = insn->defs ();
+      unsigned int index = find_access_index (defs, REGNO (dest));
+      for (unsigned int i = 0; i < REG_NREGS (dest); ++i)
+	{
+	  def_info *def = defs[index + i];
+	  gcc_checking_assert (def->regno () == REGNO (dest) + i);
+	  set_info *set = dyn_cast<set_info *> (def);
+	  if (set && set->has_nondebug_uses ())
+	    return;
+	}
+      add_reg_note (rtl, REG_UNUSED, dest);
+    };
+
+  rtx pattern = PATTERN (rtl);
+  if (GET_CODE (pattern) == PARALLEL)
+    for (int i = 0; i < XVECLEN (pattern, 0); ++i)
+      handle_potential_set (XVECEXP (pattern, 0, i));
+  else
+    handle_potential_set (pattern);
+}
+
+// Search TREE for call clobbers at INSN.  Return:
+//
+// - less than zero if INSN occurs before the root of TREE
+// - 0 if INSN is the root of TREE
+// - greater than zero if INSN occurs after the root of TREE
+int
+rtl_ssa::lookup_call_clobbers (insn_call_clobbers_tree &tree, insn_info *insn)
+{
+  auto compare = [&](insn_call_clobbers_note *clobbers)
+    {
+      return insn->compare_with (clobbers->insn ());
+    };
+  return tree.lookup (compare);
+}
+
+// Print a description of INSN to PP.
+void
+rtl_ssa::pp_insn (pretty_printer *pp, const insn_info *insn)
+{
+  if (!insn)
+    pp_string (pp, "<null>");
+  else
+    insn->print_full (pp);
+}
+
+// Print a description of INSN to FILE.
+void
+dump (FILE *file, const insn_info *insn)
+{
+  dump_using (file, pp_insn, insn);
+}
+
+// Debug interface to the dump routine above.
+void debug (const insn_info *x) { dump (stderr, x); }
diff --git a/gcc/rtl-ssa/insns.h b/gcc/rtl-ssa/insns.h
new file mode 100644
index 00000000000..a663103c1d9
--- /dev/null
+++ b/gcc/rtl-ssa/insns.h
@@ -0,0 +1,505 @@
+// Instruction-related RTL SSA classes                              -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// A fake cost for instructions that we haven't costed yet.
+const int UNKNOWN_COST = INT_MAX;
+
+// Enumerates the kinds of note that can be added to an instruction.
+// See the comment above insn_info for details.
+enum class insn_note_kind : uint8_t
+{
+  ORDER_NODE,
+  CALL_CLOBBERS
+};
+
+// The base class for notes that can be added to an instruction.
+// See the comment above insn_info for details.
+class insn_note
+{
+  // Size: 2 LP64 words.
+  friend class insn_info;
+  friend class function_info;
+
+public:
+  // Return what kind of note this is.
+  insn_note_kind kind () const { return m_kind; }
+
+  // Return the next note in the list, or null if none.
+  insn_note *next_note () const { return m_next_note; }
+
+  // Used with T = Derived *, where Derived is derived from insn_note.
+  // Convert the note to Derived, asserting that it has the right kind.
+  template<typename T>
+  T as_a ();
+
+  // Used with T = Derived *, where Derived is derived from insn_note.
+  // If the note is a Derived note, return it in that form, otherwise
+  // return null.
+  template<typename T>
+  T dyn_cast ();
+
+protected:
+  // Construct a note with the given kind.
+  insn_note (insn_note_kind);
+
+private:
+  // The next note in the list, or null if none.
+  insn_note *m_next_note;
+
+  // The kind of note this is.
+  insn_note_kind m_kind : 8;
+
+protected:
+  // Fill in the remaining LP64 word with data that derived classes can use.
+  unsigned int m_data8 : 8;
+  unsigned int m_data16 : 16;
+  unsigned int m_data32 : 32;
+};
+
+// Instructions have one of these notes if insn_info::has_call_clobbers ()
+// is true.  All such instructions in an EBB are first grouped together
+// by the predefined_function_abis of the functions that they call.
+// Then, for each such predefined ABI, the call_clobbers notes are put
+// into a splay tree whose nodes follow execution order.
+class insn_call_clobbers_note : public insn_note
+{
+  friend class function_info;
+  friend class default_splay_tree_accessors<insn_call_clobbers_note *>;
+
+public:
+  static const insn_note_kind kind = insn_note_kind::CALL_CLOBBERS;
+
+  // Return the identifier of the predefined_function_abi.
+  unsigned int abi_id () const { return m_data32; }
+
+  // Return the instruction to which the note is attached.
+  insn_info *insn () const { return m_insn; }
+
+protected:
+  insn_call_clobbers_note (unsigned int abi_id, insn_info *insn);
+
+  // The splay tree pointers.
+  insn_call_clobbers_note *m_children[2];
+
+  // The value returned by insn ().
+  insn_info *m_insn;
+};
+
+// A splay tree of insn_call_clobbers_notes.
+using insn_call_clobbers_tree = default_splay_tree<insn_call_clobbers_note *>;
+
+// SSA-related information about an instruction.  It also represents
+// artificial instructions that are added to make the dataflow correct;
+// these artificial instructions fall into three categories:
+//
+// - Instructions that hold the phi nodes for an extended basic block (is_phi).
+//
+// - Instructions that represent the head of a basic block and that hold
+//   all the associated artificial uses and definitions.
+//
+// - Instructions that represent the end of a basic block and that again
+//   hold all the associated artificial uses and definitions.
+//
+// Dataflow-wise, each instruction goes through three stages:
+//
+// (1) Use all the values in uses ().
+//
+// (2) If has_call_clobbers (), clobber the registers indicated by
+//     insn_callee_abi.
+//
+// (3) Define all the values in defs ().
+//
+// Having stage (2) is a trade-off: it makes processing the instructions
+// more complicated, but it saves having to allocate memory for every
+// individual call clobber.  Without it, clobbers for calls would often
+// make up a large proportion of the total definitions in a function.
+//
+// All the instructions in a function are chained together in a list
+// that follows a reverse postorder traversal of the CFG.  The list
+// contains both debug and nondebug instructions, but it is possible
+// to hop from one nondebug instruction to the next with constant complexity.
+//
+// Instructions can have supplemental information attached in the form
+// of "notes", a bit like REG_NOTES for the underlying RTL insns.
+class insn_info
+{
+  // Size: 8 LP64 words.
+  friend class ebb_info;
+  friend class function_info;
+
+public:
+  // Compare instructions by their positions in the function list described
+  // above.  Thus for two instructions in the same basic block, I1 < I2 if
+  // I1 comes before I2 in the block.
+  bool operator< (const insn_info &) const;
+  bool operator<= (const insn_info &) const;
+  bool operator>= (const insn_info &) const;
+  bool operator> (const insn_info &) const;
+
+  // Return -1 if this instruction comes before INSN in the reverse
+  // postorder, 0 if this instruction is INSN, or 1 if this instruction
+  // comes after INSN in the reverse postorder.
+  int compare_with (const insn_info *insn) const;
+
+  // Return the previous and next instructions in the list described above,
+  // or null if there are no such instructions.
+  insn_info *prev_any_insn () const;
+  insn_info *next_any_insn () const;
+
+  // Only valid if !is_debug_insn ().  Return the previous and next
+  // nondebug instructions in the list described above, skipping over
+  // any intervening debug instructions.  These are constant-time operations.
+  insn_info *prev_nondebug_insn () const;
+  insn_info *next_nondebug_insn () const;
+
+  // Return the underlying RTL insn.  This instruction is null if is_phi ()
+  // or is_bb_end () are true.  The instruction is a basic block note if
+  // is_bb_head () is true.
+  rtx_insn *rtl () const { return m_rtl; }
+
+  // Return true if the instruction is a real insn with an rtl pattern.
+  // Return false if it is an artificial instruction that represents the
+  // phi nodes in an extended basic block or the head or end of a basic block.
+  bool is_real () const { return m_cost_or_uid >= 0; }
+
+  // Return the opposite of is_real ().
+  bool is_artificial () const { return m_cost_or_uid < 0; }
+
+  // Return true if the instruction was a real instruction but has now
+  // been deleted.  In this case the instruction is no longer part of
+  // the SSA information.
+  bool has_been_deleted () const { return m_rtl && !INSN_P (m_rtl); }
+
+  // Return true if the instruction is a debug instruction (and thus
+  // also a real instruction).
+  bool is_debug_insn () const { return m_is_debug_insn; }
+
+  // Return true if the instruction is something that we can optimize.
+  // This implies that it is a real instruction that contains an asm
+  // or that contains something that matches an .md define_insn pattern.
+  bool can_be_optimized () const { return m_can_be_optimized; }
+
+  // Return true if the instruction is a call instruction.
+  //
+  // ??? We could cache this information, but since most callers would
+  // go on to access PATTERN (rtl ()), a cache might not be helpful and
+  // could even be counterproductive.
+  bool is_call () const { return CALL_P (m_rtl); }
+
+  // Return true if the instruction is a jump instruction.
+  //
+  // ??? See is_call for the reason we don't cache this.
+  bool is_jump () const { return JUMP_P (m_rtl); }
+
+  // Return true if the instruction is real and contains an inline asm.
+  bool is_asm () const { return m_is_asm; }
+
+  // Return true if the instruction is real and includes an RTX_AUTOINC
+  // operation.
+  bool has_pre_post_modify () const { return m_has_pre_post_modify; }
+
+  // Return true if the instruction is real and has volatile references,
+  // in the sense of volatile_refs_p.  This includes volatile memory,
+  // volatile asms and UNSPEC_VOLATILEs.
+  bool has_volatile_refs () const { return m_has_volatile_refs; }
+
+  // Return true if the instruction is aritificial and if its (sole)
+  // purpose is to hold the phi nodes in an extended basic block.
+  bool is_phi () const;
+
+  // Return true if the instruction is artificial and if it represents
+  // the head of a basic block.  If so, the instruction conceptually
+  // executes before the real instructions in the block.  The uses
+  // and definitions represent the df_get_artificial_uses and
+  // df_get_artificial_defs entries for the head of the block.
+  bool is_bb_head () const;
+
+  // Return true if the instruction is artificial and if it represents
+  // the end of a basic block.  The uses and definitions represent the
+  // the df_get_artificial_uses and df_get_artificial_defs entries for
+  // the end of the block.
+  bool is_bb_end () const;
+
+  // Return the basic block that the instruction is in.
+  bb_info *bb () const { return m_bb; }
+
+  // Return the extended basic block that the instruction is in;
+  // see bb_info for details.
+  ebb_info *ebb () const;
+
+  // If the instruction is real, return the unique identifier of the
+  // underlying RTL insn.  If the instruction is artificial, return
+  // a unique negative identifier for the instructions.
+  //
+  // Note that the identifiers are not linear: it can be the case that
+  // an instruction with a higher uid comes earlier in a block than an
+  // instruction with a lower uid.  The identifiers are however persistent;
+  // the identifier remains the same after the instruction has been moved
+  // or changed.
+  int uid () const;
+
+  // Return the list of things that this instruction uses.  Registers
+  // come first, in register number order, followed by memory.
+  use_array uses () const;
+
+  // Return true if the instruction is a call and if the clobbers
+  // described by insn_callee_abi have been omitted from the list
+  // of definitions.
+  bool has_call_clobbers () const;
+
+  // Return the list of things that this instruction sets or clobbers.
+  // Registers come first, in register number order, followed by memory.
+  //
+  // If has_call_clobbers () is true, the list omits both the full and
+  // partial register clobbers described by insn_callee_abi.
+  def_array defs () const;
+
+  // The number of entries in uses ().
+  unsigned int num_uses () const { return m_num_uses; }
+
+  // The number of entries in defs ().
+  unsigned int num_defs () const { return m_num_defs; }
+
+  // Return the cost of the instruction, as calculated by the target.
+  // For performance reasons, the cost is evaluated lazily on first use.
+  //
+  // Artificial instructions have a cost of 0.
+  unsigned int cost () const;
+
+  // Return the first insn_note attached to the instruction, or null
+  // if none.
+  insn_note *first_note () const { return m_first_note; }
+
+  // See if a note of type T is attached to the instruction.  Return it
+  // if so, otherwise return null.
+  template<typename T>
+  const T *find_note () const;
+
+  // Print "i" + uid () for real instructions and "a" + -uid () for
+  // artificial instructions.
+  void print_identifier (pretty_printer *) const;
+
+  // Print a short(ish) description of where the instruction is.
+  void print_location (pretty_printer *) const;
+
+  // Combine print_identifier and print_location.
+  void print_identifier_and_location (pretty_printer *) const;
+
+  // Print a full description of the instruction.
+  void print_full (pretty_printer *) const;
+
+private:
+  // The first-order way of representing the order between instructions
+  // is to assign "program points", with higher point numbers coming
+  // later in the reverse postorder than lower point numbers.  However,
+  // after a sequence of instruction movements, we may end up in a situation
+  // that adjacent instructions have the same program point.
+  //
+  // When that happens, we put the instructions into a splay tree that
+  // records their relative order.  Each node of the splay tree is an
+  // order_node note that is attached to its respective instruction.
+  // The root of the splay tree is not stored, since the only thing
+  // we need the tree for is to compare two nodes.
+  class order_node : public insn_note
+  {
+  public:
+    static const insn_note_kind kind = insn_note_kind::ORDER_NODE;
+
+    order_node (int uid);
+
+    // Return the uid of the instruction that this node describes.
+    int uid () const { return m_data32; }
+
+    // The splay tree pointers.
+    order_node *m_children[2];
+    order_node *m_parent;
+  };
+  using order_splay_tree = default_rootless_splay_tree<order_node *>;
+
+  // prev_insn_or_last_debug_insn represents a choice between two things:
+  //
+  // (1) A pointer to the previous instruction in the list that has the
+  //     same is_debug_insn () value, or null if no such instruction exists.
+  //
+  // (2) A pointer to the end of a sublist of debug instructions.
+  //
+  // (2) is used if this instruction is a debug instruction and the
+  // previous instruction is not.  (1) is used otherwise.
+  //
+  // next_nondebug_or_debug_insn points to the next instruction but also
+  // records whether that next instruction is a debug instruction or a
+  // nondebug instruction.
+  //
+  // Thus the list is chained as follows:
+  //
+  //         ---->        ---->     ---->     ---->     ---->
+  // NONDEBUG     NONDEBUG     DEBUG     DEBUG     DEBUG     NONDEBUG ...
+  //         <----    ^     +--     <----     <----  ^    +--
+  //                  |     |                        |    |
+  //                  |     +------------------------+    |
+  //                  |                                   |
+  //                  +-----------------------------------+
+  using prev_insn_or_last_debug_insn = pointer_mux<insn_info>;
+  using next_nondebug_or_debug_insn = pointer_mux<insn_info>;
+
+  insn_info (bb_info *bb, rtx_insn *rtl, int cost_or_uid);
+
+  static void print_uid (pretty_printer *, int);
+
+  void calculate_cost () const;
+  void set_properties (const rtx_properties &);
+  void set_accesses (access_info **, unsigned int, unsigned int);
+  void copy_accesses (access_array, access_array);
+  void set_cost (unsigned int cost) { m_cost_or_uid = cost; }
+  void set_bb (bb_info *bb) { m_bb = bb; }
+
+  void add_note (insn_note *note);
+
+  order_node *get_order_node () const;
+  order_node *get_known_order_node () const;
+  int slow_compare_with (const insn_info &) const;
+
+  insn_info *last_debug_insn () const;
+
+  unsigned int point () const { return m_point; }
+  void copy_prev_from (insn_info *);
+  void copy_next_from (insn_info *);
+  void set_prev_sametype_insn (insn_info *);
+  void set_last_debug_insn (insn_info *);
+  void set_next_any_insn (insn_info *);
+  void set_point (unsigned int point) { m_point = point; }
+  void clear_insn_links ();
+  bool has_insn_links ();
+
+  // The values returned by the accessors above.
+  prev_insn_or_last_debug_insn m_prev_insn_or_last_debug_insn;
+  next_nondebug_or_debug_insn m_next_nondebug_or_debug_insn;
+  bb_info *m_bb;
+  rtx_insn *m_rtl;
+
+  // The list of definitions followed by the list of uses.
+  access_info **m_accesses;
+
+  // The number of definitions and the number uses.  FIRST_PSEUDO_REGISTER + 1
+  // is the maximum number of accesses to hard registers and memory, and
+  // MAX_RECOG_OPERANDS is the maximum number of pseudos that can be
+  // defined by an instruction, so the number of definitions should fit
+  // easily in 16 bits.
+  unsigned int m_num_uses;
+  unsigned int m_num_defs : 16;
+
+  // Flags returned by the accessors above.
+  unsigned int m_is_debug_insn : 1;
+  unsigned int m_can_be_optimized : 1;
+  unsigned int m_is_asm : 1;
+  unsigned int m_has_pre_post_modify : 1;
+  unsigned int m_has_volatile_refs : 1;
+
+  // For future expansion.
+  unsigned int m_spare : 11;
+
+  // The program point at which the instruction occurs.
+  //
+  // Note that the values of the program points are influenced by -g
+  // and so should not used to make codegen decisions.
+  unsigned int m_point;
+
+  // Negative if the instruction is artificial, nonnegative if it is real.
+  //
+  // For real instructions: the cost of the instruction, or UNKNOWN_COST
+  // if we haven't measured it yet.
+  //
+  // For artificial instructions: the (negative) unique identifier of the
+  // instruction.
+  mutable int m_cost_or_uid;
+
+  // The list of notes that have been attached to the instruction.
+  insn_note *m_first_note;
+};
+
+// Iterators for unfiltered lists of instructions.
+using any_insn_iterator = list_iterator<insn_info, &insn_info::next_any_insn>;
+using reverse_any_insn_iterator
+  = list_iterator<insn_info, &insn_info::prev_any_insn>;
+
+// Iterators for nondebug instructions only.
+using nondebug_insn_iterator
+  = list_iterator<insn_info, &insn_info::next_nondebug_insn>;
+using reverse_nondebug_insn_iterator
+  = list_iterator<insn_info, &insn_info::prev_nondebug_insn>;
+
+// A class that describes an inclusive range of instructions.
+class insn_range_info
+{
+public:
+  insn_range_info () = default;
+
+  // Create a range that contains a singleton instruction.
+  insn_range_info (insn_info *insn) : first (insn), last (insn) {}
+
+  // Create a range [FIRST, LAST], given that *FIRST <= *LAST.
+  insn_range_info (insn_info *first, insn_info *last);
+
+  // Return true if the range contains at least one instruction.
+  explicit operator bool () const { return *first <= *last; }
+
+  bool operator== (const insn_range_info &) const;
+  bool operator!= (const insn_range_info &) const;
+
+  // If the range contains a single instruction, return that instruction,
+  // otherwise return null.
+  insn_info *singleton () const;
+
+  // Return true if the range includes INSN.
+  bool includes (insn_info *insn) const;
+
+  // If INSN is inside the range, return INSN, otherwise return the
+  // nearest in-range instruction.
+  insn_info *clamp_insn_to_range (insn_info *insn) const;
+
+  // Return true if this range is a subrange of OTHER, i.e. if OTHER
+  // includes every instruction that this range does.
+  bool is_subrange_of (const insn_range_info &other) const;
+
+  // The lower and upper bounds of the range.
+  insn_info *first;
+  insn_info *last;
+};
+
+// A class that represents a closure of operator== for instructions.
+// This is used by insn_is; see there for details.
+class insn_is_closure
+{
+public:
+  insn_is_closure (const insn_info *insn) : m_insn (insn) {}
+  bool operator() (const insn_info *other) const { return m_insn == other; }
+
+private:
+  const insn_info *m_insn;
+};
+
+void pp_insn (pretty_printer *, const insn_info *);
+
+}
+
+void dump (FILE *, const rtl_ssa::insn_info *);
+
+void DEBUG_FUNCTION debug (const rtl_ssa::insn_info *);
diff --git a/gcc/rtl-ssa/internals.inl b/gcc/rtl-ssa/internals.inl
new file mode 100644
index 00000000000..00ad6422ee8
--- /dev/null
+++ b/gcc/rtl-ssa/internals.inl
@@ -0,0 +1,682 @@
+// Implementation of private inline member functions for RTL SSA    -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// Construct a new access with the given resource () and kind () values.
+inline access_info::access_info (resource_info resource, access_kind kind)
+  : m_regno (resource.regno),
+    m_kind (kind),
+    m_is_artificial (false),
+    m_is_set_with_nondebug_insn_uses (false),
+    m_is_pre_post_modify (false),
+    m_is_call_clobber (false),
+    m_is_live_out_use (false),
+    m_includes_address_uses (false),
+    m_includes_read_writes (false),
+    m_includes_subregs (false),
+    m_includes_multiregs (false),
+    m_only_occurs_in_notes (false),
+    m_is_last_nondebug_insn_use (false),
+    m_is_in_debug_insn_or_phi (false),
+    m_has_been_superceded (false),
+    m_is_temp (false),
+    m_spare (0),
+    m_mode (resource.mode)
+{
+}
+
+// Construct a use of RESOURCE in LOCATION.  The resource's value is provided
+// by DEF, or is completely undefined if DEF is null.
+inline use_info::use_info (insn_or_phi location, resource_info resource,
+			   set_info *definition)
+  : access_info (resource, access_kind::USE),
+    m_insn_or_phi (location),
+    m_last_use_or_prev_use (nullptr),
+    m_last_nondebug_insn_use_or_next_use (nullptr),
+    m_def (definition)
+{
+  if (m_insn_or_phi.is_second ())
+    {
+      m_is_in_debug_insn_or_phi = true;
+      m_is_artificial = true;
+    }
+  else
+    {
+      insn_info *insn = m_insn_or_phi.known_first ();
+      m_is_in_debug_insn_or_phi = insn->is_debug_insn ();
+      m_is_artificial = insn->is_artificial ();
+    }
+}
+
+// Return the correct (uncached) value of m_is_last_nondebug_insn_use.
+inline bool
+use_info::calculate_is_last_nondebug_insn_use () const
+{
+  use_info *next = next_use ();
+  return is_in_nondebug_insn () && (!next || next->is_in_debug_insn_or_phi ());
+}
+
+// Accumulate any properties about REF that are also stored in use_infos.
+// IS_FIRST is true if REF is the first access to resource () that we have
+// recorded in this way, false if we have already recorded previous
+// references.
+inline void
+use_info::record_reference (rtx_obj_reference ref, bool is_first)
+{
+  if (is_first)
+    {
+      m_includes_address_uses = ref.in_address ();
+      m_includes_read_writes = ref.is_write ();
+      m_includes_subregs = ref.in_subreg ();
+      m_includes_multiregs = ref.is_multireg ();
+      m_only_occurs_in_notes = ref.in_note ();
+    }
+  else
+    {
+      m_includes_address_uses |= ref.in_address ();
+      m_includes_read_writes |= ref.is_write ();
+      m_includes_subregs |= ref.in_subreg ();
+      m_includes_multiregs |= ref.is_multireg ();
+      m_only_occurs_in_notes &= ref.in_note ();
+    }
+}
+
+// Change the value of insn () to INSN.
+inline void
+use_info::set_insn (insn_info *insn)
+{
+  m_insn_or_phi = insn;
+  m_is_artificial = insn->is_artificial ();
+}
+
+// Copy the overloaded prev link from OTHER.
+inline void
+use_info::copy_prev_from (use_info *other)
+{
+  m_last_use_or_prev_use = other->m_last_use_or_prev_use;
+}
+
+// Copy the overloaded next link from OTHER.
+inline void
+use_info::copy_next_from (use_info *other)
+{
+  m_last_nondebug_insn_use_or_next_use
+    = other->m_last_nondebug_insn_use_or_next_use;
+  m_is_last_nondebug_insn_use = calculate_is_last_nondebug_insn_use ();
+}
+
+// Record that this use is the first in the list and that the last use is LAST.
+inline void
+use_info::set_last_use (use_info *last_use)
+{
+  m_last_use_or_prev_use.set_first (last_use);
+}
+
+// Record that this use is not the first in the list and that the previous
+// use is PREV.
+inline void
+use_info::set_prev_use (use_info *prev_use)
+{
+  m_last_use_or_prev_use.set_second (prev_use);
+}
+
+// Record that this use is the last use in the list.  If USE is nonnull,
+// record that USE is the last use in the list by a nondebug instruction,
+// otherwise record that there are no uses by nondebug instructions
+// in the list.
+inline void
+use_info::set_last_nondebug_insn_use (use_info *use)
+{
+  m_last_nondebug_insn_use_or_next_use.set_first (use);
+  m_is_last_nondebug_insn_use = (use == this);
+}
+
+// Record that this use is not the last in the list and that the next
+// use is NEXT_USE.
+inline void
+use_info::set_next_use (use_info *next_use)
+{
+  m_last_nondebug_insn_use_or_next_use.set_second (next_use);
+  m_is_last_nondebug_insn_use = calculate_is_last_nondebug_insn_use ();
+}
+
+// Clear any information relating to the position of the use in its
+// definition's list.
+inline void
+use_info::clear_use_links ()
+{
+  m_last_use_or_prev_use = nullptr;
+  m_last_nondebug_insn_use_or_next_use = nullptr;
+  m_is_last_nondebug_insn_use = false;
+}
+
+// Return true if the use has any links to other uses.  This is mostly
+// for assert checking.
+inline bool
+use_info::has_use_links ()
+{
+  return (m_last_use_or_prev_use
+	  || m_last_nondebug_insn_use_or_next_use
+	  || m_is_last_nondebug_insn_use);
+}
+
+// Construct a definition of RESOURCE in INSN, giving it kind KIND.
+inline def_info::def_info (insn_info *insn, resource_info resource,
+			   access_kind kind)
+  : access_info (resource, kind),
+    m_insn (insn),
+    m_last_def_or_prev_def (nullptr),
+    m_splay_root_or_next_def (nullptr)
+{
+  m_is_artificial = insn->is_artificial ();
+}
+
+// Record any properties about REF that are also stored in def_infos.
+// IS_FIRST is true if REF is the first access to resource () that we have
+// recorded in this way, false if we have already recorded previous
+// references.
+inline void
+def_info::record_reference (rtx_obj_reference ref, bool is_first)
+{
+  if (is_first)
+    {
+      m_is_pre_post_modify = ref.is_pre_post_modify ();
+      m_includes_read_writes = ref.is_read ();
+      m_includes_subregs = ref.in_subreg ();
+      m_includes_multiregs = ref.is_multireg ();
+    }
+  else
+    {
+      m_is_pre_post_modify |= ref.is_pre_post_modify ();
+      m_includes_read_writes |= ref.is_read ();
+      m_includes_subregs |= ref.in_subreg ();
+      m_includes_multiregs |= ref.is_multireg ();
+    }
+}
+
+// Return the last definition in the list.  Only valid when is_first ()
+// is true.
+inline def_info *
+def_info::last_def () const
+{
+  return m_last_def_or_prev_def.known_first ();
+}
+
+// Return the root of the splay tree of definitions of resource (),
+// or null if no splay tree has been created for this resource.
+// Only valid when is_last () is true.
+inline def_node *
+def_info::splay_root () const
+{
+  return m_splay_root_or_next_def.known_first ();
+}
+
+// Copy the overloaded prev link from OTHER.
+inline void
+def_info::copy_prev_from (def_info *other)
+{
+  m_last_def_or_prev_def
+    = other->m_last_def_or_prev_def;
+}
+
+// Copy the overloaded next link from OTHER.
+inline void
+def_info::copy_next_from (def_info *other)
+{
+  m_splay_root_or_next_def = other->m_splay_root_or_next_def;
+}
+
+// Record that this definition is the first in the list and that the last
+// definition is LAST.
+inline void
+def_info::set_last_def (def_info *last_def)
+{
+  m_last_def_or_prev_def.set_first (last_def);
+}
+
+// Record that this definition is not the first in the list and that the
+// previous definition is PREV.
+inline void
+def_info::set_prev_def (def_info *prev_def)
+{
+  m_last_def_or_prev_def.set_second (prev_def);
+}
+
+// Record that this definition is the last in the list and that the root
+// of the splay tree associated with resource () is ROOT.
+inline void
+def_info::set_splay_root (def_node *root)
+{
+  m_splay_root_or_next_def = root;
+}
+
+// Record that this definition is not the last in the list and that the
+// next definition is NEXT.
+inline void
+def_info::set_next_def (def_info *next_def)
+{
+  m_splay_root_or_next_def = next_def;
+}
+
+// Clear the prev and next links
+inline void
+def_info::clear_def_links ()
+{
+  m_last_def_or_prev_def = nullptr;
+  m_splay_root_or_next_def = nullptr;
+}
+
+// Return true if the definition has any links to other definitions.
+// This is mostly for assert checking.
+inline bool
+def_info::has_def_links ()
+{
+  return m_last_def_or_prev_def || m_splay_root_or_next_def;
+}
+
+// Construct a clobber of register REGNO in insn INSN.
+inline clobber_info::clobber_info (insn_info *insn, unsigned int regno)
+  : def_info (insn, { E_BLKmode, regno }, access_kind::CLOBBER),
+    m_children (),
+    m_parent (nullptr),
+    m_group (nullptr)
+{
+}
+
+// Set the containing group to GROUP, if it isn't already.  The main
+// use of this function is to update the new root of GROUP's splay tree.
+inline void
+clobber_info::update_group (clobber_group *group)
+{
+  if (__builtin_expect (m_group != group, 0))
+    m_group = group;
+}
+
+// Cconstruct a set_info for a store to RESOURCE in INSN, giving it
+// kind KIND.
+inline set_info::set_info (insn_info *insn, resource_info resource,
+			   access_kind kind)
+  : def_info (insn, resource, kind),
+    m_first_use (nullptr)
+{
+}
+
+// Cconstruct a set_info for a store to RESOURCE in INSN.
+inline set_info::set_info (insn_info *insn, resource_info resource)
+  : set_info (insn, resource, access_kind::SET)
+{
+}
+
+// Record that USE is the first use of this definition.
+inline void
+set_info::set_first_use (use_info *first_use)
+{
+  m_first_use = first_use;
+  m_is_set_with_nondebug_insn_uses
+    = (first_use && first_use->is_in_nondebug_insn ());
+}
+
+// Construct a phi for RESOURCE in INSN, giving it identifier UID.
+inline phi_info::phi_info (insn_info *insn, resource_info resource,
+			   unsigned int uid)
+  : set_info (insn, resource, access_kind::PHI),
+    m_uid (uid),
+    m_num_inputs (0),
+    m_prev_phi (nullptr),
+    m_next_phi (nullptr)
+{
+}
+
+// Turn the phi into a degenerate phi, with INPUT representing the
+// value of the resource on all incoming edges.
+inline void
+phi_info::make_degenerate (use_info *input)
+{
+  m_num_inputs = 1;
+  m_single_input = input;
+}
+
+// Set the inputs of the phi to INPUTS.
+inline void
+phi_info::set_inputs (use_array inputs)
+{
+  m_num_inputs = inputs.size ();
+  if (inputs.size () == 1)
+    m_single_input = inputs[0];
+  else
+    m_inputs = access_array (inputs).begin ();
+}
+
+// Construct a definition splay tree node for FIRST_DEF, which is either
+// the first clobber_info in a group or a standalone set_info.
+inline def_node::def_node (clobber_or_set first_def)
+  : m_clobber_or_set (first_def),
+    m_children ()
+{
+}
+
+// Construct a new group of clobber_infos that initially contains just CLOBBER.
+inline clobber_group::clobber_group (clobber_info *clobber)
+  : def_node (clobber),
+    m_last_clobber (clobber),
+    m_clobber_tree (clobber)
+{
+  clobber->m_group = this;
+}
+
+// Construct a node for the instruction with uid UID.
+inline insn_info::order_node::order_node (int uid)
+  : insn_note (kind),
+    m_children (),
+    m_parent (nullptr)
+{
+  m_data32 = uid;
+}
+
+// Construct a note for instruction INSN, giving it abi_id () value ABI_ID.
+inline insn_call_clobbers_note::insn_call_clobbers_note (unsigned int abi_id,
+							 insn_info *insn)
+  : insn_note (kind),
+    m_children (),
+    m_insn (insn)
+{
+  m_data32 = abi_id;
+}
+
+// Construct an instruction with the given bb () and rtl () values.
+// If the instruction is real, COST_OR_UID is the value of cost (),
+// otherwise it is the value of uid ().
+inline insn_info::insn_info (bb_info *bb, rtx_insn *rtl, int cost_or_uid)
+  : m_prev_insn_or_last_debug_insn (nullptr),
+    m_next_nondebug_or_debug_insn (nullptr),
+    m_bb (bb),
+    m_rtl (rtl),
+    m_accesses (nullptr),
+    m_num_uses (0),
+    m_num_defs (0),
+    m_is_debug_insn (rtl && DEBUG_INSN_P (rtl)),
+    m_can_be_optimized (false),
+    m_is_asm (false),
+    m_has_pre_post_modify (false),
+    m_has_volatile_refs (false),
+    m_spare (0),
+    m_point (0),
+    m_cost_or_uid (cost_or_uid),
+    m_first_note (nullptr)
+{
+}
+
+// Copy any insn properties from PROPERTIES that are also stored in an
+// insn_info.
+inline void
+insn_info::set_properties (const rtx_properties &properties)
+{
+  m_is_asm = properties.has_asm;
+  m_has_pre_post_modify = properties.has_pre_post_modify;
+  m_has_volatile_refs = properties.has_volatile_refs;
+  // Not strictly related to the properties we've been given, but it's
+  // a convenient location to do this.
+  m_can_be_optimized = (NONDEBUG_INSN_P (m_rtl)
+			& (GET_CODE (PATTERN (m_rtl)) != USE)
+			& (GET_CODE (PATTERN (m_rtl)) != CLOBBER));
+}
+
+// Change the list of instruction accesses to ACCESSES, which contains
+// NUM_DEFS definitions followed by NUM_USES uses.
+inline void
+insn_info::set_accesses (access_info **accesses,
+			 unsigned int num_defs, unsigned int num_uses)
+{
+  m_accesses = accesses;
+  m_num_defs = num_defs;
+  gcc_assert (num_defs == m_num_defs);
+  m_num_uses = num_uses;
+}
+
+// Change defs () and uses () to DEFS and USES respectively, given that
+// the existing m_accesses array has enough room for them.
+inline void
+insn_info::copy_accesses (access_array defs, access_array uses)
+{
+  gcc_assert (defs.size () + uses.size () <= m_num_defs + m_num_uses);
+  memcpy (m_accesses, defs.begin (), defs.size_bytes ());
+  memcpy (m_accesses + defs.size (), uses.begin (), uses.size_bytes ());
+  m_num_defs = defs.size ();
+  gcc_assert (m_num_defs == defs.size ());
+  m_num_uses = uses.size ();
+}
+
+// If the instruction has an insn_info::order_node, return the node,
+// otherwise return null.
+inline insn_info::order_node *
+insn_info::get_order_node () const
+{
+  // The order_node always comes first.
+  if (insn_note *note = first_note ())
+    return note->dyn_cast<insn_info::order_node *> ();
+  return nullptr;
+}
+
+// Like get_order_node (), but the node is known to exist.
+inline insn_info::order_node *
+insn_info::get_known_order_node () const
+{
+  // The order_node always comes first.
+  return first_note ()->as_a<insn_info::order_node *> ();
+}
+
+// Copy the overloaded prev link from OTHER.
+inline void
+insn_info::copy_prev_from (insn_info *other)
+{
+  m_prev_insn_or_last_debug_insn = other->m_prev_insn_or_last_debug_insn;
+}
+
+// Copy the overloaded next link from OTHER.
+inline void
+insn_info::copy_next_from (insn_info *other)
+{
+  m_next_nondebug_or_debug_insn = other->m_next_nondebug_or_debug_insn;
+}
+
+// If this is a nondebug instruction, record that the previous nondebug
+// instruction is PREV.  (There might be intervening debug instructions.)
+//
+// If this is a debug instruction, record that the previous instruction
+// is debug instruction PREV.
+inline void
+insn_info::set_prev_sametype_insn (insn_info *prev)
+{
+  m_prev_insn_or_last_debug_insn.set_first (prev);
+}
+
+// Only valid for debug instructions.  Record that this instruction starts
+// a subsequence of debug instructions that ends with LAST.
+inline void
+insn_info::set_last_debug_insn (insn_info *last)
+{
+  m_prev_insn_or_last_debug_insn.set_second (last);
+}
+
+// Record that the next instruction of any kind is NEXT.
+inline void
+insn_info::set_next_any_insn (insn_info *next)
+{
+  if (next && next->is_debug_insn ())
+    m_next_nondebug_or_debug_insn.set_second (next);
+  else
+    m_next_nondebug_or_debug_insn.set_first (next);
+}
+
+// Clear the list links and point number for this instruction.
+inline void
+insn_info::clear_insn_links ()
+{
+  m_prev_insn_or_last_debug_insn = nullptr;
+  m_next_nondebug_or_debug_insn = nullptr;
+  m_point = 0;
+}
+
+// Return true if the instruction contains any list information.
+// This is used by assert checking.
+inline bool
+insn_info::has_insn_links ()
+{
+  return (m_prev_insn_or_last_debug_insn
+	  || m_next_nondebug_or_debug_insn
+	  || m_point);
+}
+
+// Construct a representation of basic block CFG_BB.
+inline bb_info::bb_info (basic_block cfg_bb)
+  : m_prev_bb (nullptr),
+    m_next_bb (nullptr),
+    m_cfg_bb (cfg_bb),
+    m_ebb (nullptr),
+    m_head_insn (nullptr),
+    m_end_insn (nullptr)
+{
+}
+
+// Construct a tree of call clobbers for the given ABI.
+inline ebb_call_clobbers_info::
+ebb_call_clobbers_info (const predefined_function_abi *abi)
+  : m_next (nullptr),
+    m_abi (abi)
+{
+}
+
+// Construct an EBB whose first block is FIRST_BB and whose last block
+// is LAST_BB.
+inline ebb_info::ebb_info (bb_info *first_bb, bb_info *last_bb)
+  : m_first_phi (nullptr),
+    m_phi_insn (nullptr),
+    m_first_bb (first_bb),
+    m_last_bb (last_bb),
+    m_first_call_clobbers (nullptr)
+{
+}
+
+// Set the contents of last_access for register REGNO to DEF.
+inline void
+function_info::build_info::record_reg_def (unsigned int regno, def_info *def)
+{
+  last_access[regno + 1] = def;
+}
+
+// Set the contents of last_access for memory to DEF.
+inline void
+function_info::build_info::record_mem_def (def_info *def)
+{
+  last_access[0] = def;
+}
+
+// Return the current value of live register REGNO, or null if the register's
+// value is completedly undefined.
+inline set_info *
+function_info::build_info::current_reg_value (unsigned int regno) const
+{
+  return safe_dyn_cast<set_info *> (last_access[regno + 1]);
+}
+
+// Return the current value of memory.
+inline set_info *
+function_info::build_info::current_mem_value () const
+{
+  return as_a<set_info *> (last_access[0]);
+}
+
+// Allocate a T on the function's main obstack, passing ARGS
+// to its constructor.
+template<typename T, typename... Ts>
+inline T *
+function_info::allocate (Ts... args)
+{
+  static_assert (std::is_trivially_destructible<T>::value,
+		 "destructor won't be called");
+  static_assert (alignof (T) <= obstack_alignment,
+		 "too much alignment required");
+  void *addr = obstack_alloc (&m_obstack, sizeof (T));
+  return new (addr) T (std::forward<Ts> (args)...);
+}
+
+// Allocate a T on the function's temporary obstack, passing ARGS
+// to its constructor.
+template<typename T, typename... Ts>
+inline T *
+function_info::allocate_temp (Ts... args)
+{
+  static_assert (std::is_trivially_destructible<T>::value,
+		 "destructor won't be called");
+  static_assert (alignof (T) <= obstack_alignment,
+		 "too much alignment required");
+  void *addr = obstack_alloc (&m_temp_obstack, sizeof (T));
+  return new (addr) T (std::forward<Ts> (args)...);
+}
+
+// Add INSN to the end of the function's list of instructions.
+inline void
+function_info::append_insn (insn_info *insn)
+{
+  gcc_checking_assert (!insn->has_insn_links ());
+  if (insn_info *after = m_last_insn)
+    add_insn_after (insn, after);
+  else
+    // The first instruction is for the entry block and is always a nondebug
+    // insn
+    m_first_insn = m_last_insn = m_last_nondebug_insn = insn;
+}
+
+// Start building a new list of uses and definitions for an instruction.
+inline void
+function_info::start_insn_accesses ()
+{
+  gcc_checking_assert (m_temp_defs.is_empty ()
+		       && m_temp_uses.is_empty ());
+}
+
+// Return a mode that encapsulates two distinct references to a register,
+// one with mode MODE1 and one with mode MODE2.  Treat BLKmode as a
+// "don't know" wildcard.
+inline machine_mode
+combine_modes (machine_mode mode1, machine_mode mode2)
+{
+  if (mode1 == E_BLKmode)
+    return mode2;
+
+  if (mode2 == E_BLKmode)
+    return mode1;
+
+  return wider_subreg_mode (mode1, mode2);
+}
+
+// PRINTER (PP, ARGS...) prints ARGS... to a pretty_printer PP.  Use it
+// to print ARGS... to FILE.
+template<typename Printer, typename... Args>
+inline void
+dump_using (FILE *file, Printer printer, Args... args)
+{
+  pretty_printer pp;
+  printer (&pp, args...);
+  pp_newline (&pp);
+  fprintf (file, "%s", pp_formatted_text (&pp));
+}
+
+}
diff --git a/gcc/rtl-ssa/is-a.inl b/gcc/rtl-ssa/is-a.inl
new file mode 100644
index 00000000000..14e1316b6c0
--- /dev/null
+++ b/gcc/rtl-ssa/is-a.inl
@@ -0,0 +1,98 @@
+// is_a<> support for RTL SSA classes                               -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+template<>
+struct is_a_helper<rtl_ssa::def_info *>
+  : static_is_a_helper<rtl_ssa::def_info *>
+{
+  static inline bool
+  test (const rtl_ssa::access_info *ref)
+  {
+    return (ref->kind () == rtl_ssa::access_kind::SET
+	    || ref->kind () == rtl_ssa::access_kind::PHI
+	    || ref->kind () == rtl_ssa::access_kind::CLOBBER);
+  }
+};
+
+template<>
+struct is_a_helper<rtl_ssa::clobber_info *>
+  : static_is_a_helper<rtl_ssa::clobber_info *>
+{
+  static inline bool
+  test (const rtl_ssa::access_info *ref)
+  {
+    return ref->kind () == rtl_ssa::access_kind::CLOBBER;
+  }
+};
+
+template<>
+struct is_a_helper<rtl_ssa::use_info *>
+  : static_is_a_helper<rtl_ssa::use_info *>
+{
+  static inline bool
+  test (const rtl_ssa::access_info *ref)
+  {
+    return ref->kind () == rtl_ssa::access_kind::USE;
+  }
+};
+
+template<>
+struct is_a_helper<rtl_ssa::set_info *>
+  : static_is_a_helper<rtl_ssa::set_info *>
+{
+  static inline bool
+  test (const rtl_ssa::access_info *ref)
+  {
+    return (ref->kind () == rtl_ssa::access_kind::SET
+	    || ref->kind () == rtl_ssa::access_kind::PHI);
+  }
+};
+
+template<>
+struct is_a_helper<rtl_ssa::phi_info *>
+  : static_is_a_helper<rtl_ssa::phi_info *>
+{
+  static inline bool
+  test (const rtl_ssa::access_info *ref)
+  {
+    return ref->kind () == rtl_ssa::access_kind::PHI;
+  }
+};
+
+template<>
+struct is_a_helper<rtl_ssa::set_node *>
+  : static_is_a_helper<rtl_ssa::set_node *>
+{
+  static inline bool
+  test (const rtl_ssa::def_node *node)
+  {
+    return node->contains_set ();
+  }
+};
+
+template<>
+struct is_a_helper<rtl_ssa::clobber_group *>
+  : static_is_a_helper<rtl_ssa::clobber_group *>
+{
+  static inline bool
+  test (const rtl_ssa::def_node *node)
+  {
+    return node->contains_clobber ();
+  }
+};
diff --git a/gcc/rtl-ssa/member-fns.inl b/gcc/rtl-ssa/member-fns.inl
new file mode 100644
index 00000000000..4f3bb2e1518
--- /dev/null
+++ b/gcc/rtl-ssa/member-fns.inl
@@ -0,0 +1,928 @@
+// Implementation of public inline member functions for RTL SSA     -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+// This file contains inline implementations of public member functions that
+// are too large to be written in the class definition.  It also contains
+// some non-inline template definitions of public member functions.
+// See the comments above the function declarations for details.
+//
+// The file also contains the bare minimum of private and protected inline
+// member functions that are needed to make the public functions compile.
+namespace rtl_ssa {
+
+inline void
+access_array_builder::reserve (unsigned int num_accesses)
+{
+  obstack_make_room (m_obstack, num_accesses * sizeof (access_info *));
+}
+
+inline void
+access_array_builder::quick_push (access_info *access)
+{
+  obstack_ptr_grow_fast (m_obstack, access);
+}
+
+inline array_slice<access_info *>
+access_array_builder::finish ()
+{
+  auto num_accesses = obstack_object_size (m_obstack) / sizeof (access_info *);
+  if (num_accesses == 0)
+    return {};
+
+  auto **base = static_cast<access_info **> (obstack_finish (m_obstack));
+  keep ();
+  return { base, num_accesses };
+}
+
+inline bool
+access_info::is_set_with_nondebug_insn_uses () const
+{
+  return m_is_set_with_nondebug_insn_uses;
+}
+
+inline bool
+use_info::is_in_debug_insn () const
+{
+  return m_insn_or_phi.is_first () && m_is_in_debug_insn_or_phi;
+}
+
+inline bb_info *
+use_info::bb () const
+{
+  if (m_insn_or_phi.is_first ())
+    return m_insn_or_phi.known_first ()->bb ();
+  return m_insn_or_phi.known_second ()->bb ();
+}
+
+inline ebb_info *
+use_info::ebb () const
+{
+  return bb ()->ebb ();
+}
+
+inline use_info *
+use_info::prev_use () const
+{
+  return m_last_use_or_prev_use.second_or_null ();
+}
+
+inline use_info *
+use_info::next_use () const
+{
+  return m_last_nondebug_insn_use_or_next_use.second_or_null ();
+}
+
+inline bool
+use_info::is_first_use () const
+{
+  return m_last_use_or_prev_use.is_first ();
+}
+
+inline bool
+use_info::is_last_use () const
+{
+  return m_last_nondebug_insn_use_or_next_use.is_first ();
+}
+
+inline use_info *
+use_info::next_nondebug_insn_use () const
+{
+  if (m_is_last_nondebug_insn_use)
+    return nullptr;
+  return m_last_nondebug_insn_use_or_next_use.known_second ();
+}
+
+inline use_info *
+use_info::next_any_insn_use () const
+{
+  // This is used less often than next_nondebug_insn_use, so it doesn't
+  // seem worth having an m_is_last_nondebug_insn_use-style end marker.
+  if (use_info *use = next_use ())
+    if (use->is_in_any_insn ())
+      return use;
+  return nullptr;
+}
+
+inline use_info *
+use_info::prev_phi_use () const
+{
+  // This is used less often than next_nondebug_insn_use, so it doesn't
+  // seem worth having an m_is_last_nondebug_insn_use-style end marker.
+  if (use_info *use = prev_use ())
+    if (use->is_in_phi ())
+      return use;
+  return nullptr;
+}
+
+// Return the last use of any kind in the list.  Only valid when is_first ()
+// is true.
+inline use_info *
+use_info::last_use () const
+{
+  return m_last_use_or_prev_use.known_first ();
+}
+
+// Return the last nondebug insn use in the list, or null if none.  Only valid
+// when is_last_use () is true.
+inline use_info *
+use_info::last_nondebug_insn_use () const
+{
+  return m_last_nondebug_insn_use_or_next_use.known_first ();
+}
+
+inline def_info *
+def_info::prev_def () const
+{
+  return m_last_def_or_prev_def.second_or_null ();
+}
+
+inline def_info *
+def_info::next_def () const
+{
+  return m_splay_root_or_next_def.second_or_null ();
+}
+
+inline bool
+def_info::is_first_def () const
+{
+  return m_last_def_or_prev_def.is_first ();
+}
+
+inline bool
+def_info::is_last_def () const
+{
+  return m_splay_root_or_next_def.is_first ();
+}
+
+inline bb_info *
+def_info::bb () const
+{
+  return m_insn->bb ();
+}
+
+inline ebb_info *
+def_info::ebb () const
+{
+  return m_insn->ebb ();
+}
+
+inline clobber_group *
+clobber_info::group () const
+{
+  if (!m_group || !m_group->has_been_superceded ())
+    return m_group;
+  return const_cast<clobber_info *> (this)->recompute_group ();
+}
+
+inline use_info *
+set_info::last_use () const
+{
+  return m_first_use ? m_first_use->last_use () : nullptr;
+}
+
+inline use_info *
+set_info::first_nondebug_insn_use () const
+{
+  if (m_is_set_with_nondebug_insn_uses)
+    return m_first_use;
+  return nullptr;
+}
+
+inline use_info *
+set_info::last_nondebug_insn_use () const
+{
+  if (m_is_set_with_nondebug_insn_uses)
+    return m_first_use->last_use ()->last_nondebug_insn_use ();
+  return nullptr;
+}
+
+inline use_info *
+set_info::first_any_insn_use () const
+{
+  if (m_first_use->is_in_any_insn ())
+    return m_first_use;
+  return nullptr;
+}
+
+inline use_info *
+set_info::last_phi_use () const
+{
+  if (m_first_use)
+    {
+      use_info *last = m_first_use->last_use ();
+      if (last->is_in_phi ())
+	return last;
+    }
+  return nullptr;
+}
+
+inline bool
+set_info::has_nondebug_uses () const
+{
+  return has_nondebug_insn_uses () || has_phi_uses ();
+}
+
+inline bool
+set_info::has_nondebug_insn_uses () const
+{
+  return m_is_set_with_nondebug_insn_uses;
+}
+
+inline bool
+set_info::has_phi_uses () const
+{
+  return m_first_use && m_first_use->last_use ()->is_in_phi ();
+}
+
+inline bool
+set_info::is_local_to_ebb () const
+{
+  if (!m_first_use)
+    return true;
+
+  use_info *last = m_first_use->last_use ();
+  if (last->is_in_phi ())
+    return false;
+
+  last = last->last_nondebug_insn_use ();
+  return !last || last->ebb () == ebb ();
+}
+
+inline iterator_range<use_iterator>
+set_info::all_uses () const
+{
+  return { m_first_use, nullptr };
+}
+
+inline iterator_range<reverse_use_iterator>
+set_info::reverse_all_uses () const
+{
+  return { last_use (), nullptr };
+}
+
+inline iterator_range<nondebug_insn_use_iterator>
+set_info::nondebug_insn_uses () const
+{
+  return { first_nondebug_insn_use (), nullptr };
+}
+
+inline iterator_range<reverse_use_iterator>
+set_info::reverse_nondebug_insn_uses () const
+{
+  return { last_nondebug_insn_use (), nullptr };
+}
+
+inline iterator_range<any_insn_use_iterator>
+set_info::all_insn_uses () const
+{
+  return { first_any_insn_use (), nullptr };
+}
+
+inline iterator_range<phi_use_iterator>
+set_info::phi_uses () const
+{
+  return { last_phi_use (), nullptr };
+}
+
+inline use_array
+phi_info::inputs () const
+{
+  if (m_num_inputs == 1)
+    return use_array (&m_single_input, 1);
+  return use_array (m_inputs, m_num_inputs);
+}
+
+inline use_info *
+phi_info::input_use (unsigned int i) const
+{
+  if (m_num_inputs == 1)
+    return as_a<use_info *> (m_single_input);
+  return as_a<use_info *> (m_inputs[i]);
+}
+
+inline set_info *
+phi_info::input_value (unsigned int i) const
+{
+  return input_use (i)->def ();
+}
+
+inline def_info *
+def_node::first_def () const
+{
+  // This should get optimized into an AND with -2.
+  if (m_clobber_or_set.is_first ())
+    return m_clobber_or_set.known_first ();
+  return m_clobber_or_set.known_second ();
+}
+
+inline clobber_info *
+clobber_group::first_clobber () const
+{
+  return m_clobber_or_set.known_first ();
+}
+
+inline iterator_range<def_iterator>
+clobber_group::clobbers () const
+{
+  return { first_clobber (), m_last_clobber->next_def () };
+}
+
+inline def_info *
+def_mux::first_def () const
+{
+  if (is_first ())
+    return known_first ();
+  return known_second ()->first_def ();
+}
+
+inline def_info *
+def_mux::last_def () const
+{
+  if (is_first ())
+    return known_first ();
+
+  def_node *node = known_second ();
+  if (auto *clobber = ::dyn_cast<clobber_group *> (node))
+    return clobber->last_clobber ();
+
+  return node->first_def ();
+}
+
+inline set_info *
+def_mux::set () const
+{
+  if (is_first ())
+    return ::safe_dyn_cast<set_info *> (known_first ());
+  return ::dyn_cast<set_info *> (known_second ()->first_def ());
+}
+
+inline def_info *
+def_lookup::prev_def () const
+{
+  if (!mux)
+    return nullptr;
+
+  if (comparison > 0)
+    return mux.last_def ();
+
+  return mux.first_def ()->prev_def ();
+}
+
+inline def_info *
+def_lookup::next_def () const
+{
+  if (!mux)
+    return nullptr;
+
+  if (comparison < 0)
+    return mux.first_def ();
+
+  return mux.last_def ()->next_def ();
+}
+
+inline set_info *
+def_lookup::matching_set () const
+{
+  if (comparison == 0)
+    return mux.set ();
+  return nullptr;
+}
+
+inline def_info *
+def_lookup::matching_or_prev_def () const
+{
+  if (set_info *set = matching_set ())
+    return set;
+  return prev_def ();
+}
+
+inline def_info *
+def_lookup::matching_or_next_def () const
+{
+  if (set_info *set = matching_set ())
+    return set;
+  return next_def ();
+}
+
+inline insn_note::insn_note (insn_note_kind kind)
+  : m_next_note (nullptr),
+    m_kind (kind),
+    m_data8 (0),
+    m_data16 (0),
+    m_data32 (0)
+{
+}
+
+template<typename T>
+inline T
+insn_note::as_a ()
+{
+  using deref_type = decltype (*std::declval<T> ());
+  using derived = typename std::remove_reference<deref_type>::type;
+  gcc_checking_assert (m_kind == derived::kind);
+  return static_cast<T> (this);
+}
+
+template<typename T>
+inline T
+insn_note::dyn_cast ()
+{
+  using deref_type = decltype (*std::declval<T> ());
+  using derived = typename std::remove_reference<deref_type>::type;
+  if (m_kind == derived::kind)
+    return static_cast<T> (this);
+  return nullptr;
+}
+
+inline bool
+insn_info::operator< (const insn_info &other) const
+{
+  if (this == &other)
+    return false;
+
+  if (__builtin_expect (m_point != other.m_point, 1))
+    return m_point < other.m_point;
+
+  return slow_compare_with (other) < 0;
+}
+
+inline bool
+insn_info::operator> (const insn_info &other) const
+{
+  return other < *this;
+}
+
+inline bool
+insn_info::operator<= (const insn_info &other) const
+{
+  return !(other < *this);
+}
+
+inline bool
+insn_info::operator>= (const insn_info &other) const
+{
+  return !(*this < other);
+}
+
+inline int
+insn_info::compare_with (const insn_info *other) const
+{
+  if (this == other)
+    return 0;
+
+  if (__builtin_expect (m_point != other->m_point, 1))
+    // Assume that points remain in [0, INT_MAX].
+    return m_point - other->m_point;
+
+  return slow_compare_with (*other);
+}
+
+inline insn_info *
+insn_info::prev_nondebug_insn () const
+{
+  gcc_checking_assert (!is_debug_insn ());
+  return m_prev_insn_or_last_debug_insn.known_first ();
+}
+
+inline insn_info *
+insn_info::next_nondebug_insn () const
+{
+  gcc_checking_assert (!is_debug_insn ());
+  const insn_info *from = this;
+  if (insn_info *first_debug = m_next_nondebug_or_debug_insn.second_or_null ())
+    from = first_debug->last_debug_insn ();
+  return from->m_next_nondebug_or_debug_insn.known_first ();
+}
+
+inline insn_info *
+insn_info::prev_any_insn () const
+{
+  const insn_info *from = this;
+  if (insn_info *last_debug = m_prev_insn_or_last_debug_insn.second_or_null ())
+    // This instruction is the first in a subsequence of debug instructions.
+    // Move to the following nondebug instruction.
+    from = last_debug->m_next_nondebug_or_debug_insn.known_first ();
+  return from->m_prev_insn_or_last_debug_insn.known_first ();
+}
+
+inline insn_info *
+insn_info::next_any_insn () const
+{
+  // This should get optimized into an AND with -2.
+  if (m_next_nondebug_or_debug_insn.is_first ())
+    return m_next_nondebug_or_debug_insn.known_first ();
+  return m_next_nondebug_or_debug_insn.known_second ();
+}
+
+inline bool
+insn_info::is_phi () const
+{
+  return this == ebb ()->phi_insn ();
+}
+
+inline bool
+insn_info::is_bb_head () const
+{
+  return this == m_bb->head_insn ();
+}
+
+inline bool
+insn_info::is_bb_end () const
+{
+  return this == m_bb->end_insn ();
+}
+
+inline ebb_info *
+insn_info::ebb () const
+{
+  return m_bb->ebb ();
+}
+
+inline int
+insn_info::uid () const
+{
+  return m_cost_or_uid < 0 ? m_cost_or_uid : INSN_UID (m_rtl);
+}
+
+inline use_array
+insn_info::uses () const
+{
+  return use_array (m_accesses + m_num_defs, m_num_uses);
+}
+
+inline bool
+insn_info::has_call_clobbers () const
+{
+  return find_note<insn_call_clobbers_note> ();
+}
+
+inline def_array
+insn_info::defs () const
+{
+  return def_array (m_accesses, m_num_defs);
+}
+
+inline unsigned int
+insn_info::cost () const
+{
+  if (m_cost_or_uid < 0)
+    return 0;
+  if (m_cost_or_uid == UNKNOWN_COST)
+    calculate_cost ();
+  return m_cost_or_uid;
+}
+
+template<typename T>
+inline const T *
+insn_info::find_note () const
+{
+  // We could break if the note kind is > T::kind, but since the number
+  // of notes should be very small, the check is unlikely to pay for itself.
+  for (const insn_note *note = first_note (); note; note = note->next_note ())
+    if (note->kind () == T::kind)
+      return static_cast<const T *> (note);
+  return nullptr;
+}
+
+// Only valid for debug instructions that come after a nondebug instruction,
+// and so start a subsequence of debug instructions.  Return the last debug
+// instruction in the subsequence.
+inline insn_info *
+insn_info::last_debug_insn () const
+{
+  return m_prev_insn_or_last_debug_insn.known_second ();
+}
+
+inline insn_range_info::insn_range_info (insn_info *first, insn_info *last)
+  : first (first), last (last)
+{
+}
+
+inline bool
+insn_range_info::operator== (const insn_range_info &other) const
+{
+  return first == other.first && last == other.last;
+}
+
+inline bool
+insn_range_info::operator!= (const insn_range_info &other) const
+{
+  return first != other.first || last != other.last;
+}
+
+inline insn_info *
+insn_range_info::singleton () const
+{
+  return first == last ? last : nullptr;
+}
+
+inline bool
+insn_range_info::includes (insn_info *insn) const
+{
+  return *insn >= *first && *insn <= *last;
+}
+
+inline insn_info *
+insn_range_info::clamp_insn_to_range (insn_info *insn) const
+{
+  if (*first > *insn)
+    return first;
+  if (*last < *insn)
+    return last;
+  return insn;
+}
+
+inline bool
+insn_range_info::is_subrange_of (const insn_range_info &other) const
+{
+  return *first >= *other.first && *last <= *other.last;
+}
+
+inline iterator_range<any_insn_iterator>
+bb_info::all_insns () const
+{
+  return { m_head_insn, m_end_insn->next_any_insn () };
+}
+
+inline iterator_range<reverse_any_insn_iterator>
+bb_info::reverse_all_insns () const
+{
+  return { m_end_insn, m_head_insn->prev_any_insn () };
+}
+
+inline iterator_range<nondebug_insn_iterator>
+bb_info::nondebug_insns () const
+{
+  return { m_head_insn, m_end_insn->next_nondebug_insn () };
+}
+
+inline iterator_range<reverse_nondebug_insn_iterator>
+bb_info::reverse_nondebug_insns () const
+{
+  return { m_end_insn, m_head_insn->prev_nondebug_insn () };
+}
+
+inline iterator_range<any_insn_iterator>
+bb_info::real_insns () const
+{
+  return { m_head_insn->next_any_insn (), m_end_insn };
+}
+
+inline iterator_range<reverse_any_insn_iterator>
+bb_info::reverse_real_insns () const
+{
+  return { m_end_insn->prev_any_insn (), m_head_insn };
+}
+
+inline iterator_range<nondebug_insn_iterator>
+bb_info::real_nondebug_insns () const
+{
+  return { m_head_insn->next_nondebug_insn (), m_end_insn };
+}
+
+inline iterator_range<reverse_nondebug_insn_iterator>
+bb_info::reverse_real_nondebug_insns () const
+{
+  return { m_end_insn->prev_nondebug_insn (), m_head_insn };
+}
+
+inline bool
+ebb_call_clobbers_info::clobbers (resource_info resource) const
+{
+  // Only register clobbers are tracked this way.  Other clobbers are
+  // recorded explicitly.
+  return (resource.is_reg ()
+	  && m_abi->clobbers_reg_p (resource.mode, resource.regno));
+}
+
+inline ebb_info *
+ebb_info::prev_ebb () const
+{
+  if (bb_info *prev_bb = m_first_bb->prev_bb ())
+    return prev_bb->ebb ();
+  return nullptr;
+}
+
+inline ebb_info *
+ebb_info::next_ebb () const
+{
+  if (bb_info *next_bb = m_last_bb->next_bb ())
+    return next_bb->ebb ();
+  return nullptr;
+}
+
+inline iterator_range<phi_iterator>
+ebb_info::phis () const
+{
+  return { m_first_phi, nullptr };
+}
+
+inline iterator_range<bb_iterator>
+ebb_info::bbs () const
+{
+  return { m_first_bb, m_last_bb->next_bb () };
+}
+
+inline iterator_range<reverse_bb_iterator>
+ebb_info::reverse_bbs () const
+{
+  return { m_last_bb, m_first_bb->prev_bb () };
+}
+
+inline iterator_range<any_insn_iterator>
+ebb_info::all_insns () const
+{
+  return { m_phi_insn, m_last_bb->end_insn ()->next_any_insn () };
+}
+
+inline iterator_range<reverse_any_insn_iterator>
+ebb_info::reverse_all_insns () const
+{
+  return { m_last_bb->end_insn (), m_phi_insn->prev_any_insn () };
+}
+
+inline iterator_range<nondebug_insn_iterator>
+ebb_info::nondebug_insns () const
+{
+  return { m_phi_insn, m_last_bb->end_insn ()->next_nondebug_insn () };
+}
+
+inline iterator_range<reverse_nondebug_insn_iterator>
+ebb_info::reverse_nondebug_insns () const
+{
+  return { m_last_bb->end_insn (), m_phi_insn->prev_nondebug_insn () };
+}
+
+inline insn_range_info
+ebb_info::insn_range () const
+{
+  return { m_phi_insn, m_last_bb->end_insn () };
+}
+
+inline void
+ebb_info::set_first_call_clobbers (ebb_call_clobbers_info *call_clobbers)
+{
+  m_first_call_clobbers = call_clobbers;
+}
+
+inline ebb_call_clobbers_info *
+ebb_info::first_call_clobbers () const
+{
+  return m_first_call_clobbers;
+}
+
+inline iterator_range<ebb_call_clobbers_iterator>
+ebb_info::call_clobbers () const
+{
+  return { m_first_call_clobbers, nullptr };
+}
+
+inline insn_change::insn_change (insn_info *insn)
+  : m_insn (insn),
+    new_defs (insn->defs ()),
+    new_uses (insn->uses ()),
+    move_range (insn),
+    new_cost (UNKNOWN_COST),
+    m_is_deletion (false)
+{
+}
+
+inline insn_change::insn_change (insn_info *insn, delete_action)
+  : m_insn (insn),
+    new_defs (),
+    new_uses (),
+    move_range (insn),
+    new_cost (0),
+    m_is_deletion (true)
+{
+}
+
+inline insn_is_changing_closure::
+insn_is_changing_closure (array_slice<insn_change *const> changes)
+  : m_changes (changes)
+{
+}
+
+inline bool
+insn_is_changing_closure::operator() (const insn_info *insn) const
+{
+  for (const insn_change *change : m_changes)
+    if (change->insn () == insn)
+      return true;
+  return false;
+}
+
+inline iterator_range<bb_iterator>
+function_info::bbs () const
+{
+  return { m_first_bb, nullptr };
+}
+
+inline iterator_range<reverse_bb_iterator>
+function_info::reverse_bbs () const
+{
+  return { m_last_bb, nullptr };
+}
+
+inline iterator_range<ebb_iterator>
+function_info::ebbs () const
+{
+  return { m_first_bb->ebb (), nullptr };
+}
+
+inline iterator_range<reverse_ebb_iterator>
+function_info::reverse_ebbs () const
+{
+  return { m_last_bb->ebb (), nullptr };
+}
+
+inline iterator_range<any_insn_iterator>
+function_info::all_insns () const
+{
+  return { m_first_insn, nullptr };
+}
+
+inline iterator_range<reverse_any_insn_iterator>
+function_info::reverse_all_insns () const
+{
+  return { m_last_insn, nullptr };
+}
+
+inline iterator_range<nondebug_insn_iterator>
+function_info::nondebug_insns () const
+{
+  return { m_first_insn, nullptr };
+}
+
+inline iterator_range<reverse_nondebug_insn_iterator>
+function_info::reverse_nondebug_insns () const
+{
+  return { m_last_insn, nullptr };
+}
+
+inline iterator_range<def_iterator>
+function_info::mem_defs () const
+{
+  return { m_defs[0], nullptr };
+}
+
+inline iterator_range<def_iterator>
+function_info::ref_defs (unsigned int regno) const
+{
+  return { m_defs[regno + 1], nullptr };
+}
+
+inline set_info *
+function_info::single_dominating_def (unsigned int regno) const
+{
+  if (set_info *set = safe_dyn_cast<set_info *> (m_defs[regno + 1]))
+    if (is_single_dominating_def (set))
+      return set;
+  return nullptr;
+}
+
+template<typename IgnorePredicate>
+bool
+function_info::add_regno_clobber (obstack_watermark &watermark,
+				  insn_change &change, unsigned int regno,
+				  IgnorePredicate ignore)
+{
+  // Check whether CHANGE already clobbers REGNO.
+  if (find_access (change.new_defs, regno))
+    return true;
+
+  // Get the closest position to INSN at which the new instruction
+  // could be placed.
+  insn_info *insn = change.move_range.clamp_insn_to_range (change.insn ());
+  def_array new_defs = insert_temp_clobber (watermark, insn, regno,
+					    change.new_defs);
+  if (!new_defs.is_valid ())
+    return false;
+
+  // Find a definition at or neighboring INSN.
+  insn_range_info move_range = change.move_range;
+  if (!restrict_movement_for_dead_range (move_range, regno, insn, ignore))
+    return false;
+
+  change.new_defs = new_defs;
+  change.move_range = move_range;
+  return true;
+}
+
+}
diff --git a/gcc/rtl-ssa/movement.h b/gcc/rtl-ssa/movement.h
new file mode 100644
index 00000000000..3b0cbf9d411
--- /dev/null
+++ b/gcc/rtl-ssa/movement.h
@@ -0,0 +1,335 @@
+// RTL SSA utilities relating to instruction movement               -*- C++ -*-
+// Copyright (C) 2020 Free Software Foundation, Inc.
+//
+// This file is part of GCC.
+//
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+//
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3.  If not see
+// <http://www.gnu.org/licenses/>.
+
+namespace rtl_ssa {
+
+// Restrict movement range RANGE so that the instruction is placed later
+// than INSN.  (The movement range is the range of instructions after which
+// an instruction can be placed.)
+inline insn_range_info
+move_later_than (insn_range_info range, insn_info *insn)
+{
+  return { later_insn (range.first, insn), range.last };
+}
+
+// Restrict movement range RANGE so that the instruction is placed no earlier
+// than INSN.  (The movement range is the range of instructions after which
+// an instruction can be placed.)
+inline insn_range_info
+move_no_earlier_than (insn_range_info range, insn_info *insn)
+{
+  insn_info *first = later_insn (range.first, insn->prev_nondebug_insn ());
+  return { first, range.last };
+}
+
+// Restrict movement range RANGE so that the instruction is placed no later
+// than INSN.  (The movement range is the range of instructions after which
+// an instruction can be placed.)
+inline insn_range_info
+move_no_later_than (insn_range_info range, insn_info *insn)
+{
+  return { range.first, earlier_insn (range.last, insn) };
+}
+
+// Restrict movement range RANGE so that the instruction is placed earlier
+// than INSN.  (The movement range is the range of instructions after which
+// an instruction can be placed.)
+inline insn_range_info
+move_earlier_than (insn_range_info range, insn_info *insn)
+{
+  insn_info *last = earlier_insn (range.last, insn->prev_nondebug_insn ());
+  return { range.first, last };
+}
+
+// Return true if it is possible to insert a new instruction after INSN.
+inline bool
+can_insert_after (insn_info *insn)
+{
+  return insn->is_bb_head () || (insn->is_real () && !insn->is_jump ());
+}
+
+// Try to restrict move range MOVE_RANGE so that it is possible to
+// insert INSN after both of the end points.  Return true on success,
+// otherwise leave MOVE_RANGE in an invalid state.
+inline bool
+canonicalize_move_range (insn_range_info &move_range, insn_info *insn)
+{
+  while (move_range.first != insn && !can_insert_after (move_range.first))
+    move_range.first = move_range.first->next_nondebug_insn ();
+  while (move_range.last != insn && !can_insert_after (move_range.last))
+    move_range.last = move_range.last->prev_nondebug_insn ();
+  return bool (move_range);
+}
+
+// Try to restrict movement range MOVE_RANGE of INSN so that it can set
+// or clobber REGNO.  Assume that if:
+//
+// - an instruction I2 contains another access A to REGNO; and
+// - IGNORE (I2) is true
+//
+// then either:
+//
+// - A will be removed; or
+// - something will ensure that the new definition of REGNO does not
+//   interfere with A, without this having to be enforced by I1's move range.
+//
+// Return true on success, otherwise leave MOVE_RANGE in an invalid state.
+//
+// This function only works correctly for instructions that remain within
+// the same extended basic block.
+template<typename IgnorePredicate>
+bool
+restrict_movement_for_dead_range (insn_range_info &move_range,
+				  unsigned int regno, insn_info *insn,
+				  IgnorePredicate ignore)
+{
+  // Find a definition at or neighboring INSN.
+  resource_info resource = full_register (regno);
+  def_lookup dl = crtl->ssa->find_def (resource, insn);
+
+  def_info *prev = dl.prev_def ();
+  ebb_info *ebb = insn->ebb ();
+  if (!prev || prev->ebb () != ebb)
+    {
+      // REGNO is not defined or used in EBB before INSN, but it
+      // might be live on entry.  To keep complexity under control,
+      // handle only these cases:
+      //
+      // - If the register is not live on entry to EBB, the register is
+      //   free from the start of EBB to the first definition in EBB.
+      //
+      // - Otherwise, if the register is live on entry to BB, refuse
+      //   to allocate the register.  We could in principle try to move
+      //   the instruction to later blocks in the EBB, but it's rarely
+      //   worth the effort, and could lead to linear complexity.
+      //
+      // - Otherwise, don't allow INSN to move earlier than its current
+      //   block.  Again, we could in principle look backwards to find where
+      //   REGNO dies, but it's rarely worth the effort.
+      bb_info *bb = insn->bb ();
+      insn_info *limit;
+      if (!bitmap_bit_p (DF_LR_IN (ebb->first_bb ()->cfg_bb ()), regno))
+	limit = ebb->phi_insn ();
+      else if (bitmap_bit_p (DF_LR_IN (bb->cfg_bb ()), regno))
+	return false;
+      else
+	limit = bb->head_insn ();
+      move_range = move_later_than (move_range, limit);
+    }
+  else
+    {
+      // Stop the instruction moving beyond the previous relevant access
+      // to REGNO.
+      access_info *prev_access
+	= last_access_ignoring (prev, ignore_clobbers::YES, ignore);
+      if (prev_access)
+	move_range = move_later_than (move_range, access_insn (prev_access));
+    }
+
+  // Stop the instruction moving beyond the next relevant definition of REGNO.
+  def_info *next = first_def_ignoring (dl.matching_or_next_def (),
+				       ignore_clobbers::YES, ignore);
+  if (next)
+    move_range = move_earlier_than (move_range, next->insn ());
+
+  return canonicalize_move_range (move_range, insn);
+}
+
+// Try to restrict movement range MOVE_RANGE so that it is possible for the
+// instruction being moved ("instruction I1") to perform all the definitions
+// in DEFS while still preserving dependencies between those definitions
+// and surrounding instructions.  Assume that if:
+//
+// - DEFS contains a definition D of resource R;
+// - an instruction I2 contains another access A to R; and
+// - IGNORE (I2) is true
+//
+// then either:
+//
+// - A will be removed; or
+// - something will ensure that D and A maintain their current order,
+//   without this having to be enforced by I1's move range.
+//
+// Return true on success, otherwise leave MOVE_RANGE in an invalid state.
+//
+// This function only works correctly for instructions that remain within
+// the same extended basic block.
+template<typename IgnorePredicate>
+bool
+restrict_movement_for_defs_ignoring (insn_range_info &move_range,
+				     def_array defs, IgnorePredicate ignore)
+{
+  for (def_info *def : defs)
+    {
+      // If the definition is a clobber, we can move it with respect
+      // to other clobbers.
+      //
+      // ??? We could also do this if a definition and all its uses
+      // are being moved at once.
+      bool is_clobber = is_a<clobber_info *> (def);
+
+      // Search back for the first unfiltered use or definition of the
+      // same resource.
+      access_info *access;
+      access = prev_access_ignoring (def, ignore_clobbers (is_clobber),
+				     ignore);
+      if (access)
+	move_range = move_later_than (move_range, access_insn (access));
+
+      // Search forward for the first unfiltered use of DEF,
+      // or the first unfiltered definition that follows DEF.
+      //
+      // We don't need to consider uses of following definitions, since
+      // if IGNORE (D->insn ()) is true for some definition D, the caller
+      // is guarantees that either
+      //
+      // - D will be removed, and thus its uses will be removed; or
+      // - D will occur after DEF, and thus D's uses will also occur
+      //   after DEF.
+      //
+      // This is purely a simplification: we could also process D's uses,
+      // but we don't need to.
+      access = next_access_ignoring (def, ignore_clobbers (is_clobber),
+				     ignore);
+      if (access)
+	move_range = move_earlier_than (move_range, access_insn (access));
+
+      // If DEF sets a hard register, take any call clobbers
+      // into account.
+      unsigned int regno = def->regno ();
+      if (!HARD_REGISTER_NUM_P (regno) || is_clobber)
+	continue;
+
+      ebb_info *ebb = def->ebb ();
+      for (ebb_call_clobbers_info *call_group : ebb->call_clobbers ())
+	{
+	  if (!call_group->clobbers (def->resource ()))
+	    continue;
+
+	  // Exit now if we've already failed, and if the splay accesses
+	  // below would be wasted work.
+	  if (!move_range)
+	    return false;
+
+	  insn_info *insn;
+	  insn = prev_call_clobbers_ignoring (*call_group, def->insn (),
+					      ignore);
+	  if (insn)
+	    move_range = move_later_than (move_range, insn);
+
+	  insn = next_call_clobbers_ignoring (*call_group, def->insn (),
+					      ignore);
+	  if (insn)
+	    move_range = move_earlier_than (move_range, insn);
+	}
+    }
+
+  // Make sure that we don't move stores between basic blocks, since we
+  // don't have enough information to tell whether it's safe.
+  if (def_info *def = memory_access (defs))
+    {
+      move_range = move_later_than (move_range, def->bb ()->head_insn ());
+      move_range = move_earlier_than (move_range, def->bb ()->end_insn ());
+    }
+
+  return bool (move_range);
+}
+
+// Like restrict_movement_for_defs_ignoring, but for the uses in USES.
+template<typename IgnorePredicate>
+bool
+restrict_movement_for_uses_ignoring (insn_range_info &move_range,
+				     use_array uses, IgnorePredicate ignore)
+{
+  for (const use_info *use : uses)
+    {
+      // Ignore uses of undefined values.
+      set_info *set = use->def ();
+      if (!set)
+	continue;
+
+      // Ignore uses by debug instructions.  Debug instructions are
+      // never supposed to move, and uses by debug instructions are
+      // never supposed to be transferred elsewhere, so we know that
+      // the caller must be changing the uses on the debug instruction
+      // and checking whether all new uses are available at the debug
+      // instruction's original location.
+      if (use->is_in_debug_insn ())
+	continue;
+
+      // If the used value is defined by an instruction I2 for which
+      // IGNORE (I2) is true, the caller guarantees that I2 will occur
+      // before change.insn ().  Otherwise, make sure that the use occurs
+      // after the definition.
+      insn_info *insn = set->insn ();
+      if (!ignore (insn))
+	move_range = move_later_than (move_range, insn);
+
+      // Search forward for the first unfiltered definition that follows SET.
+      //
+      // We don't need to consider the uses of these definitions, since
+      // if IGNORE (D->insn ()) is true for some definition D, the caller
+      // is guarantees that either
+      //
+      // - D will be removed, and thus its uses will be removed; or
+      // - D will occur after USE, and thus D's uses will also occur
+      //   after USE.
+      //
+      // This is purely a simplification: we could also process D's uses,
+      // but we don't need to.
+      def_info *def;
+      def = first_def_ignoring (set->next_def (), ignore_clobbers::NO,
+				ignore);
+      if (def)
+	move_range = move_earlier_than (move_range, def->insn ());
+
+      // If USE uses a hard register, take any call clobbers into account too.
+      // SET will necessarily occur after any previous call clobber, so we
+      // only need to check for later clobbers.
+      unsigned int regno = use->regno ();
+      if (!HARD_REGISTER_NUM_P (regno))
+	continue;
+
+      ebb_info *ebb = use->ebb ();
+      for (ebb_call_clobbers_info *call_group : ebb->call_clobbers ())
+	{
+	  if (!call_group->clobbers (use->resource ()))
+	    continue;
+
+	  if (!move_range)
+	    return false;
+
+	  insn_info *insn = next_call_clobbers_ignoring (*call_group,
+							 use->insn (), ignore);
+	  if (insn)
+	    move_range = move_earlier_than (move_range, insn);
+	}
+    }
+
+  // Make sure that we don't move loads into an earlier basic block.
+  //
+  // ??? It would be good to relax this for loads that can be safely
+  // speculated.
+  if (use_info *use = memory_access (uses))
+    move_range = move_later_than (move_range, use->bb ()->head_insn ());
+
+  return bool (move_range);
+}
+
+}
diff --git a/gcc/system.h b/gcc/system.h
index d4e29e53ee3..5470a21fb05 100644
--- a/gcc/system.h
+++ b/gcc/system.h
@@ -235,6 +235,9 @@ extern int errno;
 #ifdef INCLUDE_ARRAY
 # include <array>
 #endif
+#ifdef INCLUDE_FUNCTIONAL
+# include <functional>
+#endif
 # include <cstring>
 # include <new>
 # include <utility>
-- 
2.17.1


  parent reply	other threads:[~2020-11-13  8:23 UTC|newest]

Thread overview: 88+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-13  8:10 [00/23] Make fwprop use an on-the-side RTL SSA representation Richard Sandiford
2020-11-13  8:11 ` [01/23] vec: Silence clang warning Richard Sandiford
2020-11-25 19:58   ` Jeff Law
2020-11-13  8:12 ` [02/23] rtlanal: Remove noop_move_p REG_EQUAL condition Richard Sandiford
2020-11-25 20:00   ` Jeff Law
2020-11-13  8:12 ` [03/23] reginfo: Add a global_reg_set Richard Sandiford
2020-11-25 20:01   ` Jeff Law
2020-11-13  8:13 ` [04/23] Move iterator_range to a new iterator-utils.h file Richard Sandiford
2020-11-25 20:02   ` Jeff Law
2020-11-13  8:13 ` [05/23] Add more iterator utilities Richard Sandiford
2020-11-25 20:12   ` Jeff Law
2020-11-13  8:14 ` [06/23] Add an RAII class for managing obstacks Richard Sandiford
2020-11-25 20:15   ` Jeff Law
2020-11-13  8:14 ` [07/23] Add a class that multiplexes two pointer types Richard Sandiford
2020-11-25 20:23   ` Jeff Law
2020-11-26 16:15     ` Richard Sandiford
2020-11-30  1:28       ` Jeff Law
2020-11-25 23:33   ` Martin Sebor
2020-11-26 17:06     ` Richard Sandiford
2020-11-27 18:12       ` Richard Sandiford
2020-11-28  0:17       ` Martin Sebor
2020-12-17  0:17         ` Richard Sandiford
2020-12-17 14:21           ` Tom Tromey
2020-12-17 15:38             ` Richard Sandiford
2020-12-17 15:44               ` Nathan Sidwell
2021-01-04 15:32                 ` Jeff Law
2020-11-13  8:15 ` [08/23] Add an alternative splay tree implementation Richard Sandiford
2020-12-02 20:36   ` Jeff Law
2020-12-17  0:29     ` Richard Sandiford
2021-01-04 15:27       ` Jeff Law
2021-01-01  8:25   ` Andreas Schwab
2021-01-04 14:53     ` Richard Sandiford
2021-01-04 15:02       ` Andreas Schwab
2021-01-04 15:42         ` Richard Sandiford
2021-01-05 12:13           ` Richard Biener
2020-11-13  8:15 ` [09/23] Add a cut-down version of std::span (array_slice) Richard Sandiford
2020-11-30 19:56   ` Jeff Law
2022-08-03 15:13   ` Martin Jambor
2022-08-03 15:31     ` Richard Sandiford
2022-08-10 16:03   ` Martin Jambor
2022-08-11  6:58     ` Richard Biener
2022-08-16  7:59       ` Richard Sandiford
2020-11-13  8:16 ` [10/23] Tweak the way that is_a is implemented Richard Sandiford
2020-12-02  5:15   ` Jeff Law
2020-11-13  8:16 ` [11/23] Split update_cfg_for_uncondjump out of combine Richard Sandiford
2020-11-30  6:14   ` Jeff Law
2020-11-13  8:17 ` [12/23] Export print-rtl.c:print_insn_with_notes Richard Sandiford
2020-11-25 20:24   ` Jeff Law
2020-11-13  8:18 ` [13/23] recog: Split out a register_asm_p function Richard Sandiford
2020-11-25 20:24   ` Jeff Law
2020-11-13  8:18 ` [14/23] simplify-rtx: Put simplify routines into a class Richard Sandiford
2020-11-30 19:54   ` Jeff Law
2020-11-13  8:19 ` [15/23] recog: Add a validate_change_xveclen function Richard Sandiford
2020-11-30 20:03   ` Jeff Law
2020-11-13  8:19 ` [16/23] recog: Add a way of temporarily undoing changes Richard Sandiford
2020-11-25 20:27   ` Jeff Law
2020-12-17  0:22     ` Richard Sandiford
2020-11-13  8:20 ` [17/23] recog: Add a class for propagating into insns Richard Sandiford
2020-12-03 22:32   ` Jeff Law
2020-11-13  8:20 ` [18/23] recog: Add an RAII class for undoing insn changes Richard Sandiford
2020-11-25 20:27   ` Jeff Law
2020-11-13  8:20 ` [19/23] rtlanal: Add some new helper classes Richard Sandiford
2020-12-13 17:30   ` Jeff Law
2020-12-14 16:37     ` Richard Sandiford
2020-12-14 20:02       ` Jeff Law
2020-11-13  8:21 ` [20/23] rtlanal: Add simple_regno_set Richard Sandiford
2020-11-25 20:31   ` Jeff Law
2020-12-17  0:47     ` Richard Sandiford
2021-01-04 15:28       ` Jeff Law
2020-11-13  8:22 ` [21/23] doc: Add documentation for rtl-ssa Richard Sandiford
2020-11-30  6:26   ` Jeff Law
2020-11-13  8:23 ` Richard Sandiford [this message]
2020-12-16  3:31   ` [PATCH 22/23] Add rtl-ssa Jeff Law
2020-12-17  0:33     ` Richard Sandiford
2020-12-19 20:01       ` Jeff Law
2020-11-13  8:24 ` [PATCH 23/23] fwprop: Rewrite to use RTL SSA Richard Sandiford
2020-12-16  3:52   ` Jeff Law
2020-12-17  0:34     ` Richard Sandiford
2020-11-25 19:58 ` [00/23] Make fwprop use an on-the-side RTL SSA representation Jeff Law
2020-11-26 16:03   ` Richard Sandiford
2020-11-27 15:56     ` Michael Matz
2020-11-27 16:31       ` Richard Sandiford
2020-11-30 21:13         ` Jeff Law
2020-12-01  0:03           ` Michael Matz
2020-12-01 10:15             ` Richard Sandiford
2020-12-02  0:25             ` Jeff Law
2020-11-30  6:45     ` Jeff Law
2020-11-30 14:12       ` Richard Sandiford

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=mptmtzl64yh.fsf@arm.com \
    --to=richard.sandiford@arm.com \
    --cc=gcc-patches@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).