public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
From: Alexandre Oliva <aoliva@gcc.gnu.org>
To: gcc-cvs@gcc.gnu.org
Subject: [gcc(refs/users/aoliva/heads/strub)] strub with function and variable attributes
Date: Sun, 25 Jul 2021 01:43:14 +0000 (GMT)	[thread overview]
Message-ID: <20210725014314.7784B3945C08@sourceware.org> (raw)

https://gcc.gnu.org/g:e2b5b936e3fd1fab3cb0a7cdb580a677712300ea

commit e2b5b936e3fd1fab3cb0a7cdb580a677712300ea
Author: Alexandre Oliva <oliva@adacore.com>
Date:   Thu Jul 22 05:47:05 2021 -0300

    strub with function and variable attributes
    
    still missing verification of calls, documentation, tests,
    builtin expansion

Diff:
---
 gcc/Makefile.in                        |    1 +
 gcc/builtins.c                         |   12 +
 gcc/builtins.def                       |    4 +
 gcc/c-family/c-attribs.c               |   39 +
 gcc/common.opt                         |   16 +
 gcc/doc/extend.texi                    |    4 +
 gcc/ipa-inline.c                       |    6 +
 gcc/ipa-strub.c                        | 2308 ++++++++++++++++++++++++++++++++
 gcc/ipa-strub.h                        |   25 +
 gcc/passes.def                         |    2 +
 gcc/testsuite/g++.dg/wrappers/strub1.C |   18 +
 gcc/testsuite/g++.dg/wrappers/strub2.C |   22 +
 gcc/testsuite/g++.dg/wrappers/strub3.C |   22 +
 gcc/testsuite/g++.dg/wrappers/strub4.C |   18 +
 gcc/tree-cfg.c                         |    1 +
 gcc/tree-pass.h                        |    5 +-
 libgcc/Makefile.in                     |    2 +-
 libgcc/libgcc2.h                       |    4 +
 libgcc/strub.c                         |  112 ++
 19 files changed, 2618 insertions(+), 3 deletions(-)

diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 1666ef84d6a..163fd1a856c 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1468,6 +1468,7 @@ OBJS = \
 	ipa-reference.o \
 	ipa-ref.o \
 	ipa-utils.o \
+	ipa-strub.o \
 	ipa.o \
 	ira.o \
 	ira-build.o \
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 170d776c410..84539d73c8c 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -153,6 +153,7 @@ static rtx expand_builtin_strnlen (tree, rtx, machine_mode);
 static rtx expand_builtin_alloca (tree);
 static rtx expand_builtin_unop (machine_mode, tree, rtx, rtx, optab);
 static rtx expand_builtin_frame_address (tree, tree);
+static rtx expand_builtin_stack_address ();
 static tree stabilize_va_list_loc (location_t, tree, int);
 static rtx expand_builtin_expect (tree, rtx);
 static rtx expand_builtin_expect_with_probability (tree, rtx);
@@ -7893,6 +7894,14 @@ expand_builtin_frame_address (tree fndecl, tree exp)
     }
 }
 
+/* Expand a call to builtin functions __builtin_stack_address.  */
+
+static rtx
+expand_builtin_stack_address ()
+{
+  return copy_addr_to_reg (stack_pointer_rtx);
+}
+
 /* Expand EXP, a call to the alloca builtin.  Return NULL_RTX if we
    failed and the caller should emit a normal call.  */
 
@@ -10151,6 +10160,9 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
     case BUILT_IN_RETURN_ADDRESS:
       return expand_builtin_frame_address (fndecl, exp);
 
+    case BUILT_IN_STACK_ADDRESS:
+      return expand_builtin_stack_address ();
+
     /* Returns the address of the area where the structure is returned.
        0 otherwise.  */
     case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
diff --git a/gcc/builtins.def b/gcc/builtins.def
index ec556df4f66..4545033e664 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -878,6 +878,10 @@ DEF_EXT_LIB_BUILTIN    (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHRO
 DEF_EXT_LIB_BUILTIN    (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
 DEF_EXT_LIB_BUILTIN        (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST)
 DEF_GCC_BUILTIN        (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UINT, ATTR_NULL)
+DEF_GCC_BUILTIN        (BUILT_IN_STACK_ADDRESS, "stack_address", BT_FN_PTR, ATTR_NULL)
+DEF_BUILTIN_STUB       (BUILT_IN___STRUB_ENTER, "__builtin___strub_enter")
+DEF_BUILTIN_STUB       (BUILT_IN___STRUB_UPDATE, "__builtin___strub_update")
+DEF_BUILTIN_STUB       (BUILT_IN___STRUB_LEAVE, "__builtin___strub_leave")
 /* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed.  */
 DEF_LIB_BUILTIN        (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
 DEF_GCC_BUILTIN        (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL)
diff --git a/gcc/c-family/c-attribs.c b/gcc/c-family/c-attribs.c
index e60fb31d8c8..f596e3f7237 100644
--- a/gcc/c-family/c-attribs.c
+++ b/gcc/c-family/c-attribs.c
@@ -69,6 +69,7 @@ static tree handle_asan_odr_indicator_attribute (tree *, tree, tree, int,
 static tree handle_stack_protect_attribute (tree *, tree, tree, int, bool *);
 static tree handle_no_stack_protector_function_attribute (tree *, tree,
 							tree, int, bool *);
+static tree handle_strub_attribute (tree *, tree, tree, int, bool *);
 static tree handle_noinline_attribute (tree *, tree, tree, int, bool *);
 static tree handle_noclone_attribute (tree *, tree, tree, int, bool *);
 static tree handle_nocf_check_attribute (tree *, tree, tree, int, bool *);
@@ -306,6 +307,8 @@ const struct attribute_spec c_common_attribute_table[] =
   { "no_stack_protector",     0, 0, true, false, false, false,
 			      handle_no_stack_protector_function_attribute,
 			      attr_stack_protect_exclusions },
+  { "strub",		      0, 1, false, true, false, true,
+			      handle_strub_attribute, NULL },
   { "noinline",               0, 0, true,  false, false, false,
 			      handle_noinline_attribute,
 	                      attr_noinline_exclusions },
@@ -1290,6 +1293,42 @@ handle_noipa_attribute (tree *node, tree name, tree, int, bool *no_add_attrs)
   return NULL_TREE;
 }
 
+/* Handle a "strub" attribute; arguments as in
+   struct attribute_spec.handler.  */
+
+static tree
+handle_strub_attribute (tree *node, tree name,
+			tree args,
+			int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+  if (args
+      && (TREE_CODE (*node) == FUNCTION_TYPE
+	  || TREE_CODE (*node) == METHOD_TYPE))
+    {
+      /* Check that the supplied arg is acceptable.  */
+      if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST
+	  || !tree_fits_shwi_p (TREE_VALUE (args))
+	  /* Do not allow explicit -1 (STRUB_WRAPPED).  */
+	  || tree_to_shwi (TREE_VALUE (args)) < 0
+	  || tree_to_shwi (TREE_VALUE (args)) > 3)
+	{
+	  warning (OPT_Wattributes,
+		   "%qE attribute ignored because of argument %qE",
+		   name, TREE_VALUE (args));
+	  *no_add_attrs = true;
+	}
+
+      args = TREE_CHAIN (args);
+    }
+
+  if (args)
+    warning (OPT_Wattributes,
+	     "ignoring excess %qE attribute arguments starting at %qE",
+	     name, TREE_VALUE (args));
+
+  return NULL_TREE;
+}
+
 /* Handle a "noinline" attribute; arguments as in
    struct attribute_spec.handler.  */
 
diff --git a/gcc/common.opt b/gcc/common.opt
index d9da1131eda..72a0b9e7a0c 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -2687,6 +2687,22 @@ fstrict-overflow
 Common
 Treat signed overflow as undefined.  Negated as -fwrapv -fwrapv-pointer.
 
+fstrub
+Common Var(flag_strub, 3) Init(-2)
+Enable (or disable) stack scrubbing for all viable functions
+
+fstrub-default
+Common Var(flag_strub, -1)
+Enable stack scrub as requested through attributes
+
+fstrub-at-calls
+Common RejectNegative Var(flag_strub, 1)
+Enable at-calls stack scrubbing for all viable functions
+
+fstrub-internal
+Common RejectNegative Var(flag_strub, 2)
+Enable internal stack scrubbing for all viable functions
+
 fsync-libcalls
 Common Var(flag_sync_libcalls) Init(1)
 Implement __atomic operations via libcalls to legacy __sync functions.
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index b83cd4919bb..4e4206b2f35 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -11670,6 +11670,10 @@ option is in effect.  Such calls should only be made in debugging
 situations.
 @end deftypefn
 
+@deftypefn {Built-in Function} {void *} __builtin_stack_address ()
+This function returns the value of the stack pointer register.
+@end deftypefn
+
 @node Vector Extensions
 @section Using Vector Instructions through Built-in Functions
 
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 413446bcc46..7f4bc44d2bb 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -119,6 +119,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "stringpool.h"
 #include "attribs.h"
 #include "asan.h"
+#include "ipa-strub.h"
 
 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
@@ -396,6 +397,11 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
       e->inline_failed = CIF_SANITIZE_ATTRIBUTE_MISMATCH;
       inlinable = false;
     }
+  if (!strub_inlinable_p (callee, caller))
+    {
+      e->inline_failed = CIF_UNSPECIFIED;
+      inlinable = false;
+    }
   if (!inlinable && report)
     report_inline_failed_reason (e);
   return inlinable;
diff --git a/gcc/ipa-strub.c b/gcc/ipa-strub.c
new file mode 100644
index 00000000000..f9a3c92a115
--- /dev/null
+++ b/gcc/ipa-strub.c
@@ -0,0 +1,2308 @@
+/* strub (stack scrubbing) support.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   Contributed by Alexandre Oliva <oliva@adacore.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "tree.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "gimple-iterator.h"
+#include "gimplify-me.h"
+#include "tree-into-ssa.h"
+#include "tree-ssa.h"
+#include "tree-cfg.h"
+#include "cfghooks.h"
+#include "cfgloop.h"
+#include "cfgcleanup.h"
+#include "tree-eh.h"
+#include "except.h"
+#include "builtins.h"
+#include "attribs.h"
+#include "tree-inline.h"
+#include "cgraph.h"
+#include "alloc-pool.h"
+#include "symbol-summary.h"
+#include "ipa-prop.h"
+#include "ipa-fnsummary.h"
+#include "symtab-thunks.h"
+#include "gimple-fold.h"
+#include "fold-const.h"
+#include "gimple-walk.h"
+#include "tree-dfa.h"
+#include "langhooks.h"
+#include "calls.h"
+#include "vec.h"
+#include "stor-layout.h"
+#include "varasm.h"
+#include "alias.h"
+#include "diagnostic.h"
+#include "intl.h"
+#include "ipa-strub.h"
+#include "attr-fnspec.h"
+
+enum strub_mode {
+  /* This mode denotes a regular function, that does not require stack
+     scrubbing (strubbing).  It may call any other functions, but if
+     it calls AT_CALLS (or WRAPPED) ones, strubbing logic is
+     automatically introduced around those calls (the latter, by
+     inlining INTERNAL wrappers).  */
+  STRUB_DISABLED = 0,
+
+  /* This denotes a function whose signature is (to be) modified to
+     take an extra parameter, for stack use annotation, and its
+     callers must initialize and pass that argument, and perform the
+     strubbing.  Functions that are explicitly marked with attribute
+     strub must have the mark visible wherever the function is,
+     including aliases, and overriders and overriding methods.
+     Functions that are implicitly marked for strubbing, for accessing
+     variables explicitly marked as such, will only select this
+     strubbing method if they are internal to a translation unit.  It
+     can only be inlined into other strubbing functions, i.e.,
+     STRUB_AT_CALLS or STRUB_WRAPPED.  */
+  STRUB_AT_CALLS = 1,
+
+  /* This denotes a function that is to perform strubbing internally,
+     without any changes to its interface (the function is turned into
+     a strubbing wrapper, and its original body is moved to a separate
+     STRUB_WRAPPED function, with a modified interface).  Functions
+     may be explicitly marked with attribute strub(2), and the
+     attribute must be visible at the point of definition.  Functions
+     that are explicitly marked for strubbing, for accessing variables
+     explicitly marked as such, may select this strubbing mode if
+     their interface cannot change, e.g. because its interface is
+     visible to other translation units, directly, by indirection
+     (having its address taken), inheritance, etc.  Functions that use
+     this method must not have the noclone attribute, nor the noipa
+     one.  Functions marked as always_inline may select this mode, but
+     they are NOT wrapped, they remain unchanged, and are only inlined
+     into strubbed contexts.  Once non-always_inline functions are
+     wrapped, the wrapper becomes STRUB_WRAPPER, and the wrapped becomes
+     STRUB_WRAPPED.  */
+  STRUB_INTERNAL = 2,
+
+  /* This denotes a function whose stack is not strubbed, but that is
+     nevertheless explicitly or implicitly marked as callable from strubbing
+     functions.  Normally, only STRUB_AT_CALLS (and STRUB_INTERNAL ->
+     STRUB_WRAPPED) functions can be called from strubbing contexts (bodies of
+     STRUB_AT_CALLS, STRUB_INTERNAL and STRUB_WRAPPED functions), but attribute
+     strub(3) enables other functions to be (indirectly) called from these
+     contexts.  Some builtins and internal functions may be implicitly marked as
+     STRUB_CALLABLE.  */
+  STRUB_CALLABLE = 3,
+
+  /* This denotes the function that took over the body of a
+     STRUB_INTERNAL function.  At first, it's only called by its
+     wrapper, but the wrapper may be inlined.  The wrapped function,
+     in turn, can only be inlined into other functions whose stack
+     frames are strubbed, i.e., that are STRUB_WRAPPED or
+     STRUB_AT_CALLS.  */
+  STRUB_WRAPPED = -1,
+
+  /* This denotes the wrapper function that replaced the STRUB_INTERNAL
+     function.  This mode overrides the STRUB_INTERNAL mode at the time the
+     internal to-be-wrapped function becomes a wrapper, so that inlining logic
+     can tell one from the other.  */
+  STRUB_WRAPPER = -2,
+
+  /* This denotes an always_inline function that requires strubbing.  It can
+     only be called from, and inlined into, other strubbing contexts.  */
+  STRUB_INLINABLE = -3,
+};
+
+static tree
+get_strub_attr_from_type (tree type)
+{
+  return lookup_attribute ("strub", TYPE_ATTRIBUTES (type));
+}
+
+static tree
+get_strub_attr_from_decl (tree decl)
+{
+  tree ret = lookup_attribute ("strub", DECL_ATTRIBUTES (decl));
+  if (ret)
+    return ret;
+  return get_strub_attr_from_type (TREE_TYPE (decl));
+}
+
+tree
+get_strub_mode_attr_value (enum strub_mode mode)
+{
+  return tree_cons (NULL_TREE,
+		    build_int_cst (integer_type_node, (int)mode),
+		    NULL_TREE);
+
+#if 0 /* ??? use symbolic mode names with interned strings?  */
+  char *s = NULL;
+
+  switch (strub_mode)
+    {
+      
+    }
+#endif
+}
+
+static enum strub_mode
+get_strub_mode_from_attr (tree strub_attr)
+{
+  enum strub_mode mode = STRUB_DISABLED;
+
+  if (strub_attr)
+    {
+      if (!TREE_VALUE (strub_attr))
+	mode = STRUB_AT_CALLS;
+      else if (TREE_CODE (TREE_VALUE (TREE_VALUE (strub_attr))) == INTEGER_CST)
+	mode = (enum strub_mode) tree_to_shwi (TREE_VALUE
+					       (TREE_VALUE (strub_attr)));
+      else /* ??? Support symbolic mode names?  */
+	gcc_unreachable ();
+    }
+
+  return mode;
+}
+
+static enum strub_mode
+get_strub_mode (cgraph_node *node)
+{
+  return get_strub_mode_from_attr (get_strub_attr_from_decl (node->decl));
+}
+
+static bool
+calls_builtin_va_start_p (cgraph_node *node)
+{
+  bool result = false;
+
+  for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+    {
+      tree cdecl = e->callee->decl;
+      if (fndecl_built_in_p (cdecl, BUILT_IN_VA_START))
+	return true;
+    }
+
+  return result;
+}
+
+static bool
+calls_builtin_apply_args_p (cgraph_node *node, bool report = false)
+{
+  bool result = false;
+
+  for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+    {
+      tree cdecl = e->callee->decl;
+      if (!fndecl_built_in_p (cdecl, BUILT_IN_APPLY_ARGS))
+	continue;
+
+      result = true;
+
+      if (!report)
+	break;
+
+      sorry_at (gimple_location (e->call_stmt),
+		"at-calls strub does not support call to %qD",
+		cdecl);
+    }
+
+  return result;
+}
+
+static bool
+can_strub_at_calls_p (cgraph_node *node, bool report = false)
+{
+  return !calls_builtin_apply_args_p (node, report);
+}
+
+#define STRUB_INTERNAL_MAX_EXTRA_ARGS 3
+
+/* We can't perform internal strubbing if the function body involves certain
+   features:
+
+   - a non-default __builtin_va_start (e.g. x86's __builtin_ms_va_start) is
+   currently unsupported because we can't discover the corresponding va_copy and
+   va_end decls in the wrapper, and we don't convey the alternate variable
+   arguments ABI to the modified wrapped function.  The default
+   __builtin_va_start is supported by calling va_start/va_end at the wrapper,
+   that takes variable arguments, passing a pointer to the va_list object to the
+   wrapped function, that runs va_copy from it where the original function ran
+   va_start.
+
+   __builtin_next_arg is currently unsupported because the wrapped function
+   won't be a variable argument function.  We could process it in the wrapper,
+   that remains a variable argument function, and replace calls in the wrapped
+   body, but we currently don't.
+
+   __builtin_return_address is rejected because it's generally used when the
+   actual caller matters, and introducing a wrapper breaks such uses as those in
+   the unwinder.  */
+
+static bool
+can_strub_internally_p (cgraph_node *node, bool report = false)
+{
+  bool result = true;
+
+  for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+    {
+      tree cdecl = e->callee->decl;
+      if (!((fndecl_built_in_p (cdecl, BUILT_IN_VA_START)
+	     && cdecl != builtin_decl_explicit (BUILT_IN_VA_START))
+	    || fndecl_built_in_p (cdecl, BUILT_IN_NEXT_ARG)
+	    || fndecl_built_in_p (cdecl, BUILT_IN_RETURN_ADDRESS)))
+	continue;
+
+      result = false;
+
+      if (!report)
+	return result;
+
+      sorry_at (gimple_location (e->call_stmt),
+		"internal strub does not support call to %qD",
+		cdecl);
+    }
+
+  basic_block bb;
+  if (node->has_gimple_body_p ())
+    {
+      /* Label values referenced are not preserved when copying.  If referenced
+	 in nested functions, as in 920415-1.c and 920721-4.c their decls get
+	 remapped independently.  That might be too broad, in that we might be
+	 able to support correctly cases in which the labels are only used
+	 internally in a function, but disconnecting user labels from their
+	 original declarations is undesirable in general, and it probably
+	 doesn't matter, since explicitly-requested strub likely uses
+	 STRUB_AT_CALLS mode anyway.  */
+
+      FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
+	for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+	     !gsi_end_p (gsi); gsi_next (&gsi))
+	  {
+	    glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
+	    tree target;
+
+	    if (!label_stmt)
+	      break;
+
+	    target = gimple_label_label (label_stmt);
+
+	    /* Make an edge to every label block that has been marked as a
+	       potential target for a computed goto or a non-local goto.  */
+	    if (!FORCED_LABEL (target))
+	      continue;
+
+	    result = false;
+
+	    if (!report)
+	      return result;
+
+	    sorry_at (gimple_location (label_stmt),
+		      "internal strub does not support user labels");
+	  }
+    }
+
+  if (list_length (TYPE_ARG_TYPES (TREE_TYPE (node->decl)))
+      >= (((HOST_WIDE_INT) 1 << IPA_PARAM_MAX_INDEX_BITS)
+	  - STRUB_INTERNAL_MAX_EXTRA_ARGS))
+    {
+      result = false;
+
+      if (!report)
+	return result;
+
+      sorry_at (DECL_SOURCE_LOCATION (node->decl),
+		"%qD has too many arguments for internal strub",
+		node->decl);
+    }
+
+  return result;
+}
+
+static bool
+strub_from_body_p (cgraph_node *node)
+{
+  if (!node->has_gimple_body_p ())
+    return false;
+
+  /* If any local variable is marked for strub...  */
+  unsigned i;
+  tree var;
+  FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (node->decl),
+		       i, var)
+    if (get_strub_attr_from_decl (var))
+      return true;
+
+  /* Now scan the body for loads with strub types.  */
+  basic_block bb;
+  FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
+    for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+	 !gsi_end_p (gsi); gsi_next (&gsi))
+      {
+	gimple *stmt = gsi_stmt (gsi);
+
+	if (!gimple_assign_load_p (stmt))
+	  continue;
+
+	tree rhs = gimple_assign_rhs1 (stmt);
+	if (get_strub_attr_from_type (TREE_TYPE (rhs)))
+	  return true;
+      }
+
+  return false;
+}
+
+static enum strub_mode
+compute_strub_mode (cgraph_node *node, tree strub_attr)
+{
+  enum strub_mode req_mode = get_strub_mode_from_attr (strub_attr);
+
+  gcc_checking_assert (flag_strub >= -1 && flag_strub <= 3);
+
+  /* Symbolic encodings of the -fstrub-* flags.  */
+  /* Enable strub when explicitly requested through attributes to functions or
+     variables, reporting errors if the requests cannot be satisfied.  */
+  const bool strub_flag_auto = flag_strub < 0;
+  /* Disable strub altogether, ignore attributes entirely.  */
+  const bool strub_flag_disabled = flag_strub == 0;
+  /* On top of _auto, also enable strub implicitly for functions that can
+     safely undergo at-calls strubbing.  Internal mode will still be used in
+     functions that request it explicitly with attribute strub(2), or when the
+     function body requires strubbing and at-calls strubbing is not viable.  */
+  const bool strub_flag_at_calls = flag_strub == 1;
+  /* On top of default, also enable strub implicitly for functions that can
+     safely undergo internal strubbing.  At-calls mode will still be used in
+     functions that requiest it explicitly with attribute strub() or strub(1),
+     or when the function body requires strubbing and internal strubbing is not
+     viable.  */
+  const bool strub_flag_internal = flag_strub == 2;
+  /* On top of default, also enable strub implicitly for functions that can
+     safely undergo strubbing in either mode.  When both modes are viable,
+     at-calls is preferred.  */
+  const bool strub_flag_either = flag_strub == 3;
+  /* Besides the default behavior, enable strub implicitly for all viable
+     functions.  */
+  const bool strub_flag_viable = flag_strub > 0;
+
+  /* The consider_* variables should be true if selecting the corresponding
+     strub modes would be consistent with requests from attributes and command
+     line flags.  Attributes associated with functions pretty much mandate a
+     selection, and should report an error if not satisfied; strub_flag_auto
+     implicitly enables some viable strub mode if that's required by references
+     to variables marked for strub; strub_flag_viable enables strub if viable
+     (even when favoring one mode, body-requested strub can still be satisfied
+     by either mode), and falls back to callable, silently unless variables
+     require strubbing.  */
+
+  const bool consider_at_calls
+    = (!strub_flag_disabled
+       && (strub_attr
+	   ? req_mode == STRUB_AT_CALLS
+	   : true));
+  const bool consider_internal
+    = (!strub_flag_disabled
+       && (strub_attr
+	   ? req_mode == STRUB_INTERNAL
+	   : true));
+
+  const bool consider_callable
+    = (!strub_flag_disabled
+       && (strub_attr
+	   ? req_mode == STRUB_CALLABLE
+	   : strub_flag_viable));
+
+  /* This is a shorthand for either strub-enabled mode.  */
+  const bool consider_strub
+    = (consider_at_calls || consider_internal);
+
+  /* We can cope with always_inline functions even with noipa and noclone,
+     because we just leave them alone.  */
+  const bool is_always_inline
+    = (lookup_attribute ("always_inline",
+			 DECL_ATTRIBUTES (node->decl)));
+
+  /* Strubbing in general, and each specific strub mode, may have its own set of
+     requirements.  We require noipa for strubbing, either because of cloning
+     required for internal strub, or because of caller enumeration required for
+     at-calls strub.  We don't consider the at-calls mode eligible if it's not
+     even considered, it has no further requirements.  Internal mode requires
+     cloning and the absence of certain features in the body and, like at-calls,
+     it's not eligible if it's not even under consideration.
+
+     ??? Do we need target hooks for further constraints?  E.g., x86's
+     "interrupt" attribute breaks internal strubbing because the wrapped clone
+     carries the attribute and thus isn't callable; in this case, we could use a
+     target hook to adjust the clone instead.  */
+  const bool strub_eligible
+    = (consider_strub
+       && (is_always_inline
+	   || !lookup_attribute ("noipa",
+				 DECL_ATTRIBUTES (node->decl))));
+  const bool at_calls_eligible
+    = (consider_at_calls && strub_eligible
+       && can_strub_at_calls_p (node));
+  const bool internal_eligible
+    = (consider_internal && strub_eligible
+       && (is_always_inline
+	   || (!lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl))
+	       && can_strub_internally_p (node))));
+
+  /* In addition to the strict eligibility requirements, some additional
+     constraints are placed on implicit selection of certain modes.  These do
+     not prevent the selection of a mode if explicitly specified as part of a
+     function interface (the strub attribute), but they may prevent modes from
+     being selected by the command line or by function bodies.  The only actual
+     constraint is on at-calls mode: since we change the function's exposed
+     signature, we won't do it implicitly if the function can possibly be used
+     in ways that do not expect the signature change, e.g., if the function is
+     available to or interposable by other units, if its address is taken,
+     etc.  */
+  const bool at_calls_viable
+    = (at_calls_eligible
+       && (strub_attr
+	   || (node->has_gimple_body_p ()
+	       && node->get_availability () > AVAIL_INTERPOSABLE
+	       && ((!node->externally_visible
+#if 0
+		    /* We wish to bypass the test below for functions that are
+		       not externally visible, but that's a little too broad: we
+		       do not wish to skip them for e.g. gnu_inline
+		       functions.  */
+		    && !TREE_PUBLIC (node->decl)
+		    && !DECL_EXTERNAL (node->decl)
+#endif
+		    )
+		   || (node->binds_to_current_def_p ()
+		       && node->can_be_local_p ()))
+	       && node->only_called_directly_p ())));
+  const bool internal_viable
+    = (internal_eligible);
+
+  /* Shorthand.  */
+  const bool strub_viable
+    = (at_calls_viable || internal_viable);
+
+  /* We wish to analyze the body, to look for implicit requests for strub, both
+     to implicitly enable it when the body calls for it, and to report errors if
+     the body calls for it but neither mode is viable (even if that follows from
+     non-eligibility because of the explicit specification of some non-strubbing
+     mode).  We can refrain from scanning the body only in rare circumstances:
+     when strub is enabled by a function attribute (scanning might be redundant
+     in telling us to also enable it), and when we are enabling strub implicitly
+     but there are non-viable modes: we want to know whether strubbing is
+     required, to fallback to another mode, even if we're only enabling a
+     certain mode, or, when either mode would do, to report an error if neither
+     happens to be viable.  */
+  const bool analyze_body
+    = (strub_attr
+       ? !consider_strub
+       : (strub_flag_auto
+	  || (strub_flag_viable && (!at_calls_viable && !internal_viable))
+	  || (strub_flag_either && !strub_viable)));
+
+  /* Cases in which strubbing is enabled or disabled by strub_flag_auto.
+     Unsatisfiable requests ought to be reported.  */
+  const bool strub_required
+    = ((strub_attr && consider_strub)
+       || (analyze_body && strub_from_body_p (node)));
+
+  /* Besides the required cases, we want to abide by the requests to enabling on
+     an if-viable basis.  */
+  const bool strub_enable
+    = (strub_required
+       || (strub_flag_at_calls && at_calls_viable)
+       || (strub_flag_internal && internal_viable)
+       || (strub_flag_either && strub_viable));
+
+  /* And now we're finally ready to select a mode that abides by the viability
+     and eligibility constraints, and that satisfies the strubbing requirements
+     and requests, subject to the constraints.  If both modes are viable and
+     strub is to be enabled, pick STRUB_AT_CALLS unless STRUB_INTERNAL was named
+     as preferred.  */
+  const enum strub_mode mode
+    = ((strub_enable && is_always_inline)
+       ? (strub_required ? STRUB_INLINABLE : STRUB_CALLABLE)
+#if 0
+       : (!strub_enable && strub_required && strub_attr)
+       ? req_mode
+#endif
+       : (strub_enable && internal_viable
+	  && (strub_flag_internal || !at_calls_viable))
+       ? STRUB_INTERNAL
+       : (strub_enable && at_calls_viable)
+       ? STRUB_AT_CALLS
+       : consider_callable
+       ? STRUB_CALLABLE
+       : STRUB_DISABLED);
+
+  switch (mode)
+    {
+    case STRUB_CALLABLE:
+      if (is_always_inline)
+	break;
+      /* Fall through.  */
+
+    case STRUB_DISABLED:
+      if (strub_enable && !strub_attr)
+	{
+	  gcc_checking_assert (analyze_body);
+	  error_at (DECL_SOURCE_LOCATION (node->decl),
+		    "%qD requires strub, but no viable strub mode was found",
+		    node->decl);
+	  break;
+	}
+      /* Fall through.  */
+
+    case STRUB_AT_CALLS:
+    case STRUB_INTERNAL:
+    case STRUB_INLINABLE:
+      /* Differences from an mode requested through a function attribute are
+	 reported in set_strub_mode_to.  */
+      break;
+
+    case STRUB_WRAPPED:
+    case STRUB_WRAPPER:
+    default:
+      gcc_unreachable ();
+    }
+
+  return mode;
+}
+
+static void
+set_strub_mode_to (cgraph_node *node, enum strub_mode mode)
+{
+  tree attr = get_strub_attr_from_decl (node->decl);
+  enum strub_mode req_mode = get_strub_mode_from_attr (attr);
+
+  if (attr)
+    {
+      /* Check for and report incompatible mode changes.  */
+      if (mode != req_mode
+	  && !(req_mode == STRUB_INTERNAL
+	       && (mode == STRUB_WRAPPED
+		   || mode == STRUB_WRAPPER))
+	  && !((req_mode == STRUB_INTERNAL
+		|| req_mode == STRUB_AT_CALLS
+		|| req_mode == STRUB_CALLABLE)
+	       && mode == STRUB_INLINABLE))
+	{
+	  error_at (DECL_SOURCE_LOCATION (node->decl),
+		    "strub mode %i selected for %qD, when %i was requested",
+		    (int) mode, node->decl,
+		    (int) get_strub_mode_from_attr (attr));
+	  if (node->alias)
+	    {
+	      cgraph_node *target = node->ultimate_alias_target ();
+	      error_at (DECL_SOURCE_LOCATION (target->decl),
+			"the incompatible selection was determined"
+			" by ultimate alias target %qD",
+			target->decl);
+	    }
+
+	  /* Report any incompatibilities with explicitly-requested strub.  */
+	  switch (req_mode)
+	    {
+	    case STRUB_AT_CALLS:
+	      can_strub_at_calls_p (node, true);
+	      break;
+
+	    case STRUB_INTERNAL:
+	      can_strub_internally_p (node, true);
+	      break;
+
+	    default:
+	      break;
+	    }
+	}
+
+      /* Drop any incompatible strub attributes leading the decl attribute
+	 chain.  Return if we find one with the mode we need.  */
+      for (;;)
+	{
+	  if (mode == req_mode)
+	    return;
+
+	  if (DECL_ATTRIBUTES (node->decl) != attr)
+	    break;
+
+	  DECL_ATTRIBUTES (node->decl) = TREE_CHAIN (attr);
+	  attr = get_strub_attr_from_decl (node->decl);
+	  if (!attr)
+	    break;
+
+	  req_mode = get_strub_mode_from_attr (attr);
+	}
+    }
+  else if (mode == req_mode)
+    return;
+
+  DECL_ATTRIBUTES (node->decl) = tree_cons (get_identifier ("strub"),
+					    get_strub_mode_attr_value (mode),
+					    DECL_ATTRIBUTES (node->decl));
+}
+
+static enum strub_mode
+set_strub_mode (cgraph_node *node)
+{
+  tree attr = get_strub_attr_from_decl (node->decl);
+
+  enum strub_mode mode = (node->alias
+			  ? get_strub_mode (node->ultimate_alias_target ())
+			  : compute_strub_mode (node, attr));
+
+  set_strub_mode_to (node, mode);
+
+  return mode;
+}
+
+#if 0
+/* Non-strub functions shouldn't be called from strub functions,
+   except through callable ones.  */
+
+static bool
+strub_callable_from_p (cgraph_node *callee, cgraph_node *caller)
+{
+  strub_mode caller_mode = get_strub_mode (caller);
+
+  switch (caller_mode)
+    {
+    case STRUB_WRAPPED:
+    case STRUB_AT_CALLS:
+    case STRUB_INTERNAL:
+    case STRUB_INLINABLE:
+      break;
+
+    case STRUB_WRAPPER:
+    case STRUB_DISABLED:
+    case STRUB_CALLABLE:
+      return true;
+
+    default:
+      gcc_unreachable ();
+    }
+
+  strub_mode callee_mode = get_strub_mode (callee);
+
+  switch (caller_mode)
+    {
+    case STRUB_WRAPPED:
+    case STRUB_AT_CALLS:
+    case STRUB_INTERNAL:
+    case STRUB_INLINABLE:
+      break;
+
+    case STRUB_WRAPPER:
+    case STRUB_DISABLED:
+    case STRUB_CALLABLE:
+      return false;
+
+    default:
+      gcc_unreachable ();
+    }
+
+  return true;
+}
+#endif
+
+/* We wish to avoid inlining WRAPPED functions back into their
+   WRAPPERs.  More generally, we wish to avoid inlining
+   strubbed functions into non-strubbed ones.  */
+
+bool
+strub_inlinable_p (cgraph_node *callee, cgraph_node *caller)
+{
+  strub_mode callee_mode = get_strub_mode (callee);
+
+  switch (callee_mode)
+    {
+    case STRUB_WRAPPED:
+    case STRUB_AT_CALLS:
+    case STRUB_INTERNAL:
+    case STRUB_INLINABLE:
+      break;
+
+    case STRUB_WRAPPER:
+    case STRUB_DISABLED:
+    case STRUB_CALLABLE:
+      return true;
+
+    default:
+      gcc_unreachable ();
+    }
+
+  strub_mode caller_mode = get_strub_mode (caller);
+
+  switch (caller_mode)
+    {
+    case STRUB_WRAPPED:
+    case STRUB_AT_CALLS:
+    case STRUB_INTERNAL:
+    case STRUB_INLINABLE:
+      return true;
+
+    case STRUB_WRAPPER:
+    case STRUB_DISABLED:
+    case STRUB_CALLABLE:
+      break;
+
+    default:
+      gcc_unreachable ();
+    }
+
+  return false;
+}
+
+namespace {
+
+const pass_data pass_data_ipa_strub_mode = {
+  SIMPLE_IPA_PASS,
+  "strubm",
+  OPTGROUP_NONE,
+  TV_NONE,
+  PROP_cfg, // properties_required
+  0,	    // properties_provided
+  0,	    // properties_destroyed
+  0,	    // properties_start
+  0,	    // properties_finish
+};
+
+class pass_ipa_strub_mode : public simple_ipa_opt_pass
+{
+public:
+  pass_ipa_strub_mode (gcc::context *ctxt)
+    : simple_ipa_opt_pass (pass_data_ipa_strub_mode, ctxt)
+  {}
+  opt_pass *clone () { return new pass_ipa_strub_mode (m_ctxt); }
+  virtual bool gate (function *) { return flag_strub; }
+  virtual unsigned int execute (function *);
+};
+
+const pass_data pass_data_ipa_strub = {
+  SIMPLE_IPA_PASS,
+  "strub",
+  OPTGROUP_NONE,
+  TV_NONE,
+  PROP_cfg, // properties_required
+  0,	    // properties_provided
+  0,	    // properties_destroyed
+  0,	    // properties_start
+  TODO_update_ssa
+  | TODO_cleanup_cfg
+  | TODO_rebuild_cgraph_edges
+  | TODO_verify_il, // properties_finish
+};
+
+class pass_ipa_strub : public simple_ipa_opt_pass
+{
+public:
+  pass_ipa_strub (gcc::context *ctxt)
+    : simple_ipa_opt_pass (pass_data_ipa_strub, ctxt)
+  {}
+  opt_pass *clone () { return new pass_ipa_strub (m_ctxt); }
+  virtual bool gate (function *) { return flag_strub; }
+  virtual unsigned int execute (function *);
+
+#define DEF_NM_BUILTIN(NAME, CODE, FNTYPELIST)			\
+  static tree get_ ## NAME () {					\
+    tree decl = builtin_decl_explicit (CODE);			\
+    if (!decl)							\
+      {								\
+	tree type = build_function_type_list FNTYPELIST;	\
+	decl = add_builtin_function				\
+	  ("__builtin_" #NAME,					\
+	   type, CODE, BUILT_IN_NORMAL,				\
+	   NULL, NULL);						\
+	TREE_NOTHROW (decl) = true;				\
+	set_builtin_decl ((CODE), decl, true);			\
+	set_strub_mode_to (cgraph_node::get_create (decl),	\
+			   STRUB_CALLABLE);			\
+      }								\
+    return decl;						\
+  }
+
+  DEF_NM_BUILTIN (stack_address,
+		  BUILT_IN_STACK_ADDRESS,
+		  (ptr_type_node, NULL))
+
+#undef DEF_NM_BUILTIN
+
+#define DEF_SS_BUILTIN(NAME, FNSPEC, CODE, FNTYPELIST)		\
+  static tree get_ ## NAME () {					\
+    tree decl = builtin_decl_explicit (CODE);			\
+    if (!decl)							\
+      {								\
+	tree type = build_function_type_list FNTYPELIST;	\
+	tree attrs = NULL;					\
+	if (FNSPEC)						\
+	  attrs = tree_cons (get_identifier ("fn spec"),	\
+			     build_tree_list			\
+			     (NULL_TREE,			\
+			      build_string (strlen (FNSPEC),	\
+					    (FNSPEC))),		\
+			     attrs);				\
+	decl = add_builtin_function_ext_scope			\
+	  ("__builtin___strub_" #NAME,				\
+	   type, CODE, BUILT_IN_NORMAL,				\
+	   "__strub_" #NAME, attrs);				\
+	TREE_NOTHROW (decl) = true;				\
+	set_builtin_decl ((CODE), decl, true);			\
+	set_strub_mode_to (cgraph_node::get_create (decl),	\
+			   STRUB_CALLABLE);			\
+      }								\
+    return decl;						\
+  }
+
+  DEF_SS_BUILTIN (enter, ". Ot",
+		  BUILT_IN___STRUB_ENTER,
+		  (void_type_node, get_pptr (), NULL))
+  DEF_SS_BUILTIN (update, ". Wt",
+		  BUILT_IN___STRUB_UPDATE,
+		  (void_type_node, get_pptr (), NULL))
+  DEF_SS_BUILTIN (leave, ". w ",
+		  BUILT_IN___STRUB_LEAVE,
+		  (void_type_node, get_pptr (), NULL))
+
+#undef DEF_SS_BUILTIN
+
+#define DEF_IDENT(NAME)					\
+  static inline tree get_ ## NAME () {			\
+    static tree identifier = NULL_TREE;			\
+    if (!identifier)					\
+      identifier = get_identifier (".strub." #NAME);	\
+    return identifier;					\
+  }
+
+  DEF_IDENT (watermark_ptr)
+  DEF_IDENT (va_list_ptr)
+  DEF_IDENT (apply_args_ptr)
+
+#undef DEF_IDENT
+
+#define DEF_TYPE(NAME, INIT)			\
+  static inline tree get_ ## NAME () {		\
+    static tree type = NULL_TREE;		\
+    if (!type)					\
+      type = (INIT);				\
+    return type;				\
+  }
+
+  DEF_TYPE (pptr, build_pointer_type (ptr_type_node))
+
+  DEF_TYPE (qpptr,
+	    build_qualified_type (get_pptr (),
+				  TYPE_QUAL_RESTRICT
+				  | TYPE_QUAL_CONST))
+
+  DEF_TYPE (qpvalst,
+	    build_qualified_type (build_pointer_type
+				  (va_list_type_node),
+				  TYPE_QUAL_RESTRICT
+				  | TYPE_QUAL_CONST))
+
+#undef DEF_TYPE
+
+  static inline gimple_seq
+  call_update_watermark (tree wmptr, cgraph_node *node, profile_count count,
+			 gimple_seq seq = NULL)
+    {
+      tree uwm = get_update ();
+      gcall *update = gimple_build_call (uwm, 1, wmptr);
+      gimple_seq_add_stmt (&seq, update);
+      if (node)
+#if !IMPLICIT_CGRAPH_EDGES
+	node->create_edge (cgraph_node::get_create (uwm), update, count, false);
+#else
+	(void)count;
+#endif
+      return seq;
+    }
+
+};
+
+} // anon namespace
+
+#if 0
+static bool
+may_throw_p (gcall *stmt)
+{
+  return flag_exceptions && !gimple_call_nothrow_p (stmt);
+}
+
+static bool
+strub_this_call_p (gcall *stmt)
+{
+  if (gimple_call_internal_p (stmt))
+    return false;
+
+  /* If there's no outgoing path in which to do the scrubbing, don't
+     bother.  */
+  if (gimple_call_noreturn_p (stmt) && !may_throw_p (stmt))
+    return false;
+
+  /* ??? Maybe non-mandatory tail calls should be disabled for
+     scrubbing.  Or maybe it won't matter, as long as both tail-caller
+     and callee are scrubbing-capable.  */
+  if (gimple_call_must_tail_p (stmt) || gimple_call_tail_p (stmt))
+    return false;
+
+  if (gimple_alloca_call_p (stmt))
+    return true;
+
+  tree fndecl = gimple_call_fndecl (stmt);
+  if (!fndecl)
+    return true;
+
+  if (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
+    return true;
+
+  enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
+
+  switch (fcode)
+    {
+    CASE_BUILT_IN_ALLOCA:
+      return true;
+
+    case BUILT_IN_NONE:
+      return true;
+
+    case BUILT_IN___STRUB_ENTER:
+    case BUILT_IN___STRUB_UPDATE:
+    case BUILT_IN___STRUB_LEAVE:
+      return false;
+
+    case BUILT_IN_CLASSIFY_TYPE:
+    case BUILT_IN_CONSTANT_P:
+      return false;
+
+    case BUILT_IN_RETURN_ADDRESS:
+    case BUILT_IN_FRAME_ADDRESS:
+    case BUILT_IN_STACK_ADDRESS:
+    case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
+      return false;
+
+    case BUILT_IN_STACK_SAVE:
+    case BUILT_IN_STACK_RESTORE:
+    case BUILT_IN_ASAN_ALLOCAS_UNPOISON:
+      return false;
+
+    case BUILT_IN_SETJMP_SETUP:
+    case BUILT_IN_SETJMP_RECEIVER:
+    case BUILT_IN_LONGJMP:
+    case BUILT_IN_NONLOCAL_GOTO:
+    case BUILT_IN_UPDATE_SETJMP_BUF:
+    case BUILT_IN_TRAP:
+    case BUILT_IN_UNREACHABLE:
+      return false;
+
+    case BUILT_IN_UNWIND_INIT:
+    case BUILT_IN_DWARF_CFA:
+#ifdef DWARF2_UNWIND_INFO
+    case BUILT_IN_DWARF_SP_COLUMN:
+    case BUILT_IN_INIT_DWARF_REG_SIZES:
+#endif
+    case BUILT_IN_FROB_RETURN_ADDR:
+    case BUILT_IN_EXTRACT_RETURN_ADDR:
+    case BUILT_IN_EH_RETURN:
+    case BUILT_IN_EH_RETURN_DATA_REGNO:
+    case BUILT_IN_EXTEND_POINTER:
+    case BUILT_IN_EH_POINTER:
+    case BUILT_IN_EH_FILTER:
+    case BUILT_IN_EH_COPY_VALUES:
+      return false;
+
+    case BUILT_IN_VA_START:
+    case BUILT_IN_VA_END:
+    case BUILT_IN_VA_COPY:
+    case BUILT_IN_EXPECT:
+    case BUILT_IN_EXPECT_WITH_PROBABILITY:
+    case BUILT_IN_ASSUME_ALIGNED:
+    case BUILT_IN_PREFETCH:
+      return false;
+
+    case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
+    case BUILT_IN_OBJECT_SIZE:
+    case BUILT_IN_THREAD_POINTER:
+    case BUILT_IN_SET_THREAD_POINTER:
+      return false;
+
+    default:
+      return true;
+    }
+}
+#endif
+
+typedef hash_set<tree> indirect_parms_t;
+
+static tree
+maybe_make_indirect (indirect_parms_t &indirect_parms, tree op, int *rec)
+{
+  if (DECL_P (op))
+    {
+      *rec = 0;
+      if (indirect_parms.contains (op))
+	{
+	  tree ret = gimple_fold_indirect_ref (op);
+	  if (!ret)
+	    ret = build2 (MEM_REF,
+			  TREE_TYPE (TREE_TYPE (op)),
+			  op,
+			  build_int_cst (TREE_TYPE (op), 0));
+	  return ret;
+	}
+    }
+  else if (TREE_CODE (op) == ADDR_EXPR
+	   && DECL_P (TREE_OPERAND (op, 0)))
+    {
+      *rec = 0;
+      if (indirect_parms.contains (TREE_OPERAND (op, 0)))
+	return TREE_OPERAND (op, 0);
+    }
+
+  return NULL_TREE;
+}
+
+static tree
+walk_make_indirect (tree *op, int *rec, void *arg)
+{
+  walk_stmt_info *wi = (walk_stmt_info *)arg;
+  indirect_parms_t &indirect_parms = *(indirect_parms_t *)wi->info;
+
+  if (!*op || TYPE_P (*op))
+    {
+      *rec = 0;
+      return NULL_TREE;
+    }
+
+  if (tree repl = maybe_make_indirect (indirect_parms, *op, rec))
+    {
+      *op = repl;
+      wi->changed = true;
+    }
+
+  return NULL_TREE;
+}
+
+static tree
+walk_regimplify_addr_expr (tree *op, int *rec, void *arg)
+{
+  walk_stmt_info *wi = (walk_stmt_info *)arg;
+  gimple_stmt_iterator &gsi = *(gimple_stmt_iterator *)wi->info;
+
+  *rec = 0;
+
+  if (!*op || TREE_CODE (*op) != ADDR_EXPR)
+    return NULL_TREE;
+
+  if (!is_gimple_val (*op))
+    {
+      tree ret = force_gimple_operand_gsi (&gsi, *op, true,
+					   NULL_TREE, true, GSI_SAME_STMT);
+      gcc_assert (ret != *op);
+      *op = ret;
+      wi->changed = true;
+    }
+
+  return NULL_TREE;
+}
+
+static tree
+build_ref_type_for (tree parm, bool nonaliased = true)
+{
+  gcc_checking_assert (TREE_CODE (parm) == PARM_DECL);
+
+  tree ref_type = build_reference_type (TREE_TYPE (parm));
+
+  if (!nonaliased)
+    return ref_type;
+
+  /* Each PARM turned indirect still points to the distinct memory area at the
+     wrapper, and the reference in unchanging, so we might qualify it, but...
+     const is not really important, since we're only using default defs for the
+     reference parm anyway, and not introducing any defs, and restrict seems to
+     cause trouble.  E.g., libgnat/s-concat3.adb:str_concat_3 has memmoves that,
+     if it's wrapped, the memmoves are deleted in dse1.  Using a distinct alias
+     set seems to not run afoul of this problem, and it hopefully enables the
+     compiler to tell the pointers do point to objects that are not otherwise
+     aliased.  */
+#if 1
+  tree qref_type = build_variant_type_copy (ref_type);
+
+  TYPE_ALIAS_SET (qref_type) = new_alias_set ();
+  record_alias_subset (TYPE_ALIAS_SET (qref_type), get_alias_set (ref_type));
+
+  return qref_type;
+#else
+  tree qref_type = build_qualified_type (ref_type,
+					 TYPE_QUAL_RESTRICT
+					 | TYPE_QUAL_CONST);
+
+  return qref_type;
+#endif
+}
+
+/* Add cgraph edges from current_function_decl to callees in SEQ with frequency
+   COUNT, assuming all calls in SEQ are direct.  */
+static void
+add_call_edges_for_seq (gimple_seq seq, profile_count count)
+{
+#if IMPLICIT_CGRAPH_EDGES
+  return;
+#endif
+
+  cgraph_node *node = cgraph_node::get_create (current_function_decl);
+
+  for (gimple_stmt_iterator gsi = gsi_start (seq);
+       !gsi_end_p (gsi); gsi_next (&gsi))
+    {
+      gimple *stmt = gsi_stmt (gsi);
+
+      if (!is_a <gcall *> (stmt))
+	continue;
+
+      gcall *call = as_a <gcall *> (stmt);
+      tree callee = gimple_call_fndecl (call);
+      gcc_checking_assert (callee);
+      node->create_edge (cgraph_node::get_create (callee), call, count, false);
+    }
+}
+
+static void
+gsi_insert_finally_seq_after_call (gimple_stmt_iterator gsi, gimple_seq seq)
+{
+  gimple *stmt = gsi_stmt (gsi);
+
+  gcall *call = is_a <gcall *> (stmt) ? as_a <gcall *> (stmt) : NULL;
+  bool noreturn_p = call && gimple_call_noreturn_p (call);
+  int eh_lp = lookup_stmt_eh_lp (stmt);
+  bool must_not_throw_p = eh_lp < 0;
+  bool nothrow_p = (must_not_throw_p
+		    || (call && gimple_call_nothrow_p (call))
+		    || (eh_lp <= 0
+			&& (TREE_NOTHROW (cfun->decl)
+			    || !flag_exceptions)));
+
+  if (noreturn_p && nothrow_p)
+    return;
+
+  /* Don't expect an EH edge if we're not to throw, or if we're not in an EH
+     region yet.  */
+  bool no_eh_edge_p = (nothrow_p || !eh_lp);
+  bool must_end_bb = stmt_ends_bb_p (stmt);
+
+  edge eft = NULL, eeh = NULL;
+  if (must_end_bb && !(noreturn_p && no_eh_edge_p))
+    {
+      gcc_checking_assert (gsi_one_before_end_p (gsi));
+
+      edge e;
+      edge_iterator ei;
+      FOR_EACH_EDGE (e, ei, gsi_bb (gsi)->succs)
+	{
+	  if ((e->flags & EDGE_EH))
+	    {
+	      gcc_checking_assert (!eeh);
+	      eeh = e;
+#if !CHECKING_P
+	      if (eft || noreturn_p)
+		break;
+#endif
+	    }
+	  if ((e->flags & EDGE_FALLTHRU))
+	    {
+	      gcc_checking_assert (!eft);
+	      eft = e;
+#if !CHECKING_P
+	      if (eeh || no_eh_edge_p)
+		break;
+#endif
+	    }
+	}
+
+      gcc_checking_assert (!(eft && (eft->flags & EDGE_FALLTHRU))
+			   == noreturn_p);
+      gcc_checking_assert (!(eeh && (eeh->flags & EDGE_EH))
+			   == no_eh_edge_p);
+      gcc_checking_assert (eft != eeh);
+    }
+
+  if (!noreturn_p)
+    {
+      gimple_seq nseq = nothrow_p ? seq : gimple_seq_copy (seq);
+
+      if (must_end_bb)
+	{
+	  gcc_checking_assert (gsi_one_before_end_p (gsi));
+	  add_call_edges_for_seq (nseq, eft->count ());
+	  gsi_insert_seq_on_edge_immediate (eft, nseq);
+	}
+      else
+	{
+	  add_call_edges_for_seq (nseq, gsi_bb (gsi)->count);
+	  gsi_insert_seq_after (&gsi, nseq, GSI_SAME_STMT);
+	}
+    }
+
+  if (nothrow_p)
+    return;
+
+  if (eh_lp)
+    {
+      add_call_edges_for_seq (seq, eeh->count ());
+      gsi_insert_seq_on_edge_immediate (eeh, seq);
+      return;
+    }
+
+  /* A throwing call may appear within a basic block in a function that doesn't
+     have any EH regions.  We're going to add a cleanup if so, therefore the
+     block will have to be split.  */
+  basic_block bb = gsi_bb (gsi);
+  if (!gsi_one_before_end_p (gsi))
+    split_block (bb, stmt);
+
+  /* Create a new block for the EH cleanup.  */
+  basic_block bb_eh_cleanup = create_empty_bb (bb);
+  if (dom_info_available_p (CDI_DOMINATORS))
+    set_immediate_dominator (CDI_DOMINATORS, bb_eh_cleanup, bb);
+  if (current_loops)
+    add_bb_to_loop (bb_eh_cleanup, current_loops->tree_root);
+
+  /* Make the new block an EH cleanup for the call.  */
+  eh_region new_r = gen_eh_region_cleanup (NULL);
+  eh_landing_pad lp = gen_eh_landing_pad (new_r);
+  tree label = gimple_block_label (bb_eh_cleanup);
+  lp->post_landing_pad = label;
+  EH_LANDING_PAD_NR (label) = lp->index;
+  add_stmt_to_eh_lp (stmt, lp->index);
+
+  /* Add the cleanup code to the EH cleanup block.  */
+  gsi = gsi_after_labels (bb_eh_cleanup);
+  gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
+
+  /* And then propagate the exception further.  */
+  gresx *resx = gimple_build_resx (new_r->index);
+  gsi_insert_before (&gsi, resx, GSI_SAME_STMT);
+
+  /* Finally, wire the EH cleanup block into the CFG.  */
+  make_eh_edges (stmt);
+  add_call_edges_for_seq (seq, single_pred_edge (bb_eh_cleanup)->count ());
+}
+
+/* Copy the attribute list at *ATTRS, minus any NAME attributes, leaving
+   shareable trailing nodes alone.  */
+
+static inline void
+remove_named_attribute_unsharing (const char *name, tree *attrs)
+{
+  while (tree found = lookup_attribute (name, *attrs))
+    {
+      /* Copy nodes up to the next NAME attribute.  */
+      while (*attrs != found)
+	{
+	  *attrs = tree_cons (TREE_PURPOSE (*attrs),
+			      TREE_VALUE (*attrs),
+			      TREE_CHAIN (*attrs));
+	  attrs = &TREE_CHAIN (*attrs);
+	}
+      /* Then drop it.  */
+      gcc_checking_assert (*attrs == found);
+      *attrs = TREE_CHAIN (*attrs);
+    }
+}
+
+unsigned int
+pass_ipa_strub_mode::execute (function *)
+{
+  cgraph_node *onode;
+
+  /* If no strub flag was given in the command line,
+     set the actual default.  */
+  if (flag_strub == -2)
+    flag_strub = -1;
+
+  bool any_strub = false;
+
+  for (int aliases = 0; aliases <= 1; aliases++)
+    FOR_EACH_FUNCTION (onode)
+    {
+      if (!onode->alias != !aliases)
+	continue;
+
+      enum strub_mode mode = set_strub_mode (onode);
+
+      if (mode == STRUB_AT_CALLS || mode == STRUB_INTERNAL)
+	any_strub = true;
+    }
+
+  if (!any_strub)
+    flag_strub = 0;
+
+  return 0;
+}
+
+simple_ipa_opt_pass *
+make_pass_ipa_strub_mode (gcc::context *ctxt)
+{
+  return new pass_ipa_strub_mode (ctxt);
+}
+
+unsigned int
+pass_ipa_strub::execute (function *)
+{
+  cgraph_node *onode;
+
+  FOR_EACH_FUNCTION (onode)
+  {
+    enum strub_mode mode = get_strub_mode (onode);
+
+    if (mode == STRUB_AT_CALLS)
+      {
+	int named_args = 0;
+
+	/* Adjust the signature, and all callers.  Add the new argument after all
+	   named arguments, so as to not mess with attr_fnspec or any other
+	   attributes that reference parameters.  */
+	TREE_TYPE (onode->decl) = build_distinct_type_copy (TREE_TYPE
+							    (onode->decl));
+
+	tree *pargs = &DECL_ARGUMENTS (onode->decl);
+
+	/* A noninterposable_alias reuses the same parm decl chain, don't add
+	   the parm twice.  We still have to adjust the type.  */
+	bool aliased_parms = (onode->alias && *pargs
+			      && DECL_CONTEXT (*pargs) != onode->decl);
+
+	if (TYPE_ARG_TYPES (TREE_TYPE (onode->decl)))
+	  {
+	    tree *tlist = &TYPE_ARG_TYPES (TREE_TYPE (onode->decl));
+	    while (*pargs)
+	      {
+		named_args++;
+		*tlist = tree_cons (TREE_PURPOSE (*tlist),
+				    TREE_VALUE (*tlist),
+				    TREE_CHAIN (*tlist));
+		tlist = &TREE_CHAIN (*tlist);
+		pargs = &DECL_CHAIN (*pargs);
+	      }
+	    *tlist = tree_cons (NULL_TREE, get_qpptr (), *tlist);
+	  }
+
+	if (aliased_parms)
+	  continue;
+
+	tree wmptr = build_decl (DECL_SOURCE_LOCATION (onode->decl),
+				 PARM_DECL,
+				 get_watermark_ptr (),
+				 get_qpptr ());
+	DECL_ARTIFICIAL (wmptr) = 1;
+	DECL_ARG_TYPE (wmptr) = get_qpptr ();
+	DECL_CONTEXT (wmptr) = onode->decl;
+	TREE_USED (wmptr) = 1;
+	DECL_CHAIN (wmptr) = *pargs;
+	*pargs = wmptr;
+
+	if (onode->alias)
+	  continue;
+
+	unsigned c;
+	cgraph_edge *e;
+	FOR_EACH_VEC_ELT (onode->collect_callers (), c, e)
+	  {
+	    push_cfun (DECL_STRUCT_FUNCTION (e->caller->decl));
+
+	    gcall *ocall = e->call_stmt;
+	    gimple_stmt_iterator gsi = gsi_for_stmt (ocall);
+
+	    /* Initialize the watermark before the call.  */
+	    tree swm = create_tmp_var (ptr_type_node, ".strub.watermark");
+	    TREE_ADDRESSABLE (swm) = true;
+	    tree swmp = build1 (ADDR_EXPR, get_pptr (), swm);
+
+	    tree enter = get_enter ();
+	    gcall *stptr = gimple_build_call (enter, 1,
+					      unshare_expr (swmp));
+	    gsi_insert_before (&gsi, stptr, GSI_SAME_STMT);
+#if !IMPLICIT_CGRAPH_EDGES
+	    e->caller->create_edge (cgraph_node::get_create (enter),
+				    stptr, gsi_bb (gsi)->count, false);
+#endif
+
+	    /* Replace the call with one that passes the swmp argument first.  */
+	    gcall *wrcall;
+	    { gcall *stmt = ocall;
+	      // Mostly copied from gimple_call_copy_skip_args.
+	      int i = 0;
+	      int nargs = gimple_call_num_args (stmt);
+	      auto_vec<tree> vargs (nargs + 1);
+	      gcall *new_stmt;
+
+	      /* pr71109.c calls a prototypeless function, then defines it with
+		 additional arguments.  It's ill-formed, but after it's inlined,
+		 it somehow works out.  */
+	      for (; i < named_args && i < nargs; i++)
+		vargs.quick_push (gimple_call_arg (stmt, i));
+	      for (; i < named_args; i++)
+		vargs.quick_push (null_pointer_node);
+
+	      vargs.quick_push (unshare_expr (swmp));
+
+	      for (; i < nargs; i++)
+#if 0
+		if (!bitmap_bit_p (args_to_skip, i))
+#endif
+		  vargs.quick_push (gimple_call_arg (stmt, i));
+
+	      if (gimple_call_internal_p (stmt))
+#if 0
+		/*
+		  new_stmt = gimple_build_call_internal_vec (gimple_call_internal_fn (stmt),
+		  vargs);
+		*/
+#endif
+		gcc_unreachable ();
+	      else
+		new_stmt = gimple_build_call_vec (gimple_call_fn (stmt), vargs);
+
+	      if (gimple_call_lhs (stmt))
+		gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+
+#if 0
+	      gimple_set_vuse (new_stmt, gimple_vuse (stmt));
+	      gimple_set_vdef (new_stmt, gimple_vdef (stmt));
+#else
+	      gimple_move_vops (new_stmt, stmt);
+#endif
+
+	      if (gimple_has_location (stmt))
+		gimple_set_location (new_stmt, gimple_location (stmt));
+	      gimple_call_copy_flags (new_stmt, stmt);
+	      gimple_call_set_chain (new_stmt, gimple_call_chain (stmt));
+
+	      gimple_set_modified (new_stmt, true);
+
+	      wrcall = new_stmt;
+	    }
+
+	    update_stmt (wrcall);
+	    gsi_replace (&gsi, wrcall, true);
+	    cgraph_edge::set_call_stmt (e, wrcall, false);
+
+	    /* Insert the strub code after the call.  */
+	    gimple_seq seq = NULL;
+
+	    {
+#if 0
+	      tree lswm = create_tmp_var (ptr_type_node, ".L.strub.watermark");
+	      gassign *load = gimple_build_assign (lswm, swm);
+	      gimple_seq_add_stmt (&seq, load);
+#else
+	      tree lswm = unshare_expr (swmp);
+#endif
+
+	      gcall *sleave = gimple_build_call (get_leave (), 1, lswm);
+	      gimple_seq_add_stmt (&seq, sleave);
+
+	      gassign *clobber = gimple_build_assign (swm,
+						      build_clobber
+						      (TREE_TYPE (swm)));
+	      gimple_seq_add_stmt (&seq, clobber);
+	    }
+
+	    gsi_insert_finally_seq_after_call (gsi, seq);
+
+	    pop_cfun ();
+	  }
+
+	if (!onode->has_gimple_body_p ())
+	  continue;
+
+	cgraph_node *nnode = onode;
+	push_cfun (DECL_STRUCT_FUNCTION (nnode->decl));
+
+	{
+	  edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+	  gimple_seq seq = call_update_watermark (wmptr, nnode, e->src->count);
+	  gsi_insert_seq_on_edge_immediate (e, seq);
+	}
+
+	if (DECL_STRUCT_FUNCTION (nnode->decl)->calls_alloca)
+	  {
+	    basic_block bb;
+	    FOR_EACH_BB_FN (bb, cfun)
+	      for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+		   !gsi_end_p (gsi); gsi_next (&gsi))
+		{
+		  gimple *stmt = gsi_stmt (gsi);
+
+		  if (!is_gimple_call (stmt))
+		    continue;
+
+		  gcall *call = as_a <gcall *> (stmt);
+
+		  if (gimple_alloca_call_p (call))
+		    {
+		      /* Capture stack growth.  */
+		      gimple_seq seq = call_update_watermark (wmptr, NULL,
+							      gsi_bb (gsi)
+							      ->count);
+		      gsi_insert_finally_seq_after_call (gsi, seq);
+		    }
+		}
+	  }
+
+	pop_cfun ();
+
+#if 0
+	compute_fn_summary (onode, true);
+#endif
+	continue;
+      }
+
+    if (mode != STRUB_INTERNAL
+	|| !onode->has_gimple_body_p ())
+      continue;
+
+#if 0
+    /* Hmm, this is an i386-specific attribute.  Do we need machine-specific
+       logic?  */
+    remove_named_attribute_unsharing ("interrupt",
+				      &DECL_ATTRIBUTES (onode->decl));
+#endif
+
+    if (!DECL_STRUCT_FUNCTION (onode->decl))
+      {
+	inform (DECL_SOURCE_LOCATION (onode->decl),
+		"not splitting struct-less function %qD for stack scrubbing",
+		onode->decl);
+	continue;
+      }
+
+    if (!onode->lowered)
+      {
+	inform (DECL_SOURCE_LOCATION (onode->decl),
+		"not splitting non-lowered function %qD for stack scrubbing",
+		onode->decl);
+	continue;
+      }
+
+    /* Since we're not changing the function identity proper, just
+       moving its full implementation, we *could* disable
+       fun->cannot_be_copied_reason and/or temporarily drop a noclone
+       attribute.  FIXME.  */
+    if (!tree_versionable_function_p (onode->decl))
+      {
+	inform (DECL_SOURCE_LOCATION (onode->decl),
+		"%qD cannot be split for stack scrubbing",
+		onode->decl);
+	continue;
+      }
+
+    bool is_stdarg = calls_builtin_va_start_p (onode);;
+    bool apply_args = calls_builtin_apply_args_p (onode);
+
+    vec<ipa_adjusted_param, va_gc> *nparms = NULL;
+    unsigned j = 0;
+    {
+      // The following loop copied from ipa-split.c:split_function.
+      for (tree parm = DECL_ARGUMENTS (onode->decl);
+	   parm; parm = DECL_CHAIN (parm), j++)
+	{
+	  ipa_adjusted_param adj = {};
+	  adj.op = IPA_PARAM_OP_COPY;
+	  adj.base_index = j;
+	  adj.prev_clone_index = j;
+	  vec_safe_push (nparms, adj);
+	}
+
+      if (apply_args)
+	{
+	  ipa_adjusted_param aaadj = {};
+	  aaadj.op = IPA_PARAM_OP_NEW;
+	  aaadj.type = get_qpptr ();
+	  vec_safe_push (nparms, aaadj);
+	}
+
+      if (is_stdarg)
+	{
+	  ipa_adjusted_param vladj = {};
+	  vladj.op = IPA_PARAM_OP_NEW;
+	  vladj.type = get_qpvalst ();
+	  vec_safe_push (nparms, vladj);
+	}
+
+      ipa_adjusted_param wmadj = {};
+      wmadj.op = IPA_PARAM_OP_NEW;
+      wmadj.type = get_qpptr ();
+      vec_safe_push (nparms, wmadj);
+    }
+    ipa_param_adjustments adj (nparms, -1, false);
+
+    cgraph_node *nnode = onode->create_version_clone_with_body
+      (auto_vec<cgraph_edge *> (0),
+       NULL, &adj, NULL, NULL, "strub", NULL);
+
+    if (!nnode)
+      {
+	error_at (DECL_SOURCE_LOCATION (onode->decl),
+		  "failed to split %qD for stack scrubbing",
+		  onode->decl);
+	continue;
+      }
+
+    onode->split_part = true;
+    if (onode->calls_comdat_local)
+      nnode->add_to_same_comdat_group (onode);
+
+    gcc_checking_assert (!DECL_STRUCT_FUNCTION (nnode->decl)->stdarg);
+
+    set_strub_mode_to (onode, STRUB_WRAPPER);
+    set_strub_mode_to (nnode, STRUB_WRAPPED);
+
+    /* Decide which of the wrapped function's parms we want to turn into
+       references to the argument passed to the wrapper.  In general, we want to
+       copy small arguments, and avoid copying large ones.  Variable-sized array
+       lengths given by other arguments, as in 20020210-1.c, would lead to
+       problems if passed by value, after resetting the original function and
+       dropping the length computation; passing them by reference works.
+       DECL_BY_REFERENCE is *not* a substitute for this: it involves copying
+       anyway, but performed at the caller.  */
+    indirect_parms_t indirect_nparms (3, false);
+    unsigned adjust_ftype = 0;
+    for (tree parm = DECL_ARGUMENTS (onode->decl),
+	   nparm = DECL_ARGUMENTS (nnode->decl),
+	   nparmt = TYPE_ARG_TYPES (TREE_TYPE (nnode->decl));
+	 parm;
+	 parm = DECL_CHAIN (parm),
+	   nparm = DECL_CHAIN (nparm),
+	   nparmt = nparmt ? TREE_CHAIN (nparmt) : NULL_TREE)
+      if (!(0 /* DECL_BY_REFERENCE (narg) */
+	    || is_gimple_reg_type (TREE_TYPE (nparm))
+	    || VECTOR_TYPE_P (TREE_TYPE (nparm))
+	    || TREE_CODE (TREE_TYPE (nparm)) == COMPLEX_TYPE
+	    || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (nparm)))
+		&& (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (nparm)))
+		    <= 4 * UNITS_PER_WORD))))
+	{
+	  indirect_nparms.add (nparm);
+
+	  /* ??? Is there any case in which it is not safe to suggest the parms
+	     turned indirect don't alias anything else?  They are distinct,
+	     unaliased memory in the wrapper, and the wrapped can't possibly
+	     take pointers into them because none of the pointers passed to the
+	     wrapper can alias other incoming parameters passed by value, even
+	     if with transparent reference, and the wrapper doesn't take any
+	     extra parms that could point into wrapper's parms.  So we can
+	     probably drop the TREE_ADDRESSABLE and keep the true.  */
+	  tree ref_type = build_ref_type_for (nparm,
+					      true
+					      || !TREE_ADDRESSABLE (parm));
+
+	  DECL_ARG_TYPE (nparm) = TREE_TYPE (nparm) = ref_type;
+	  relayout_decl (nparm);
+	  TREE_ADDRESSABLE (nparm) = 0;
+	  DECL_BY_REFERENCE (nparm) = 0;
+	  DECL_NOT_GIMPLE_REG_P (nparm) = 0;
+	  /* ??? This avoids mismatches in debug info bind stmts in
+	     e.g. a-chahan .  */
+	  DECL_ABSTRACT_ORIGIN (nparm) = NULL;
+
+	  if (nparmt)
+	    adjust_ftype++;
+	}
+
+    /* Also adjust the wrapped function type, if needed.  */
+    if (adjust_ftype)
+      {
+	tree nftype = TREE_TYPE (nnode->decl);
+
+	/* We always add at least one argument at the end of the signature, when
+	   cloning the function, so we don't expect to need to duplicate the
+	   type here.  */
+	gcc_checking_assert (TYPE_ARG_TYPES (nftype)
+			     != TYPE_ARG_TYPES (TREE_TYPE (onode->decl)));
+
+	/* Check that fnspec still works for the modified function signature,
+	   and drop it otherwise.  */
+	bool drop_fnspec = false;
+	tree fnspec = lookup_attribute ("fn spec", TYPE_ATTRIBUTES (nftype));
+	attr_fnspec spec = fnspec ? attr_fnspec (fnspec) : attr_fnspec ("");
+
+	unsigned retcopy;
+	if (!(fnspec && spec.returns_arg (&retcopy)))
+	  retcopy = (unsigned) -1;
+
+	unsigned i = 0;
+	for (tree nparm = DECL_ARGUMENTS (nnode->decl),
+	       nparmt = TYPE_ARG_TYPES (nftype);
+	     adjust_ftype > 0;
+	     nparm = DECL_CHAIN (nparm), nparmt = TREE_CHAIN (nparmt), i++)
+	  if (indirect_nparms.contains (nparm))
+	    {
+	      TREE_VALUE (nparmt) = TREE_TYPE (nparm);
+	      adjust_ftype--;
+
+	      if (fnspec && !drop_fnspec)
+		{
+		  if (i == retcopy)
+		    drop_fnspec = true;
+		  else if (spec.arg_specified_p (i))
+		    {
+		      /* Properties that apply to pointers only must not be
+			 present, because we don't make pointers further
+			 indirect.  */
+		      gcc_checking_assert
+			(!spec.arg_max_access_size_given_by_arg_p (i, NULL));
+		      gcc_checking_assert (!spec.arg_copied_to_arg_p (i, NULL));
+
+		      /* Any claim of direct access only is invalidated by
+			 adding an indirection level.  */
+		      if (spec.arg_direct_p (i))
+			drop_fnspec = true;
+
+		      /* If there's a claim the argument is not read from, the
+			 added indirection invalidates it: if the argument is
+			 used at all, then the pointer will necessarily be
+			 read.  */
+		      if (!spec.arg_maybe_read_p (i)
+			  && spec.arg_used_p (i))
+			drop_fnspec = true;
+		    }
+		}
+	    }
+
+	/* ??? Maybe we could adjust it instead.  */
+	if (drop_fnspec)
+	  remove_named_attribute_unsharing ("fn spec",
+					    &TYPE_ATTRIBUTES (nftype));
+
+	TREE_TYPE (nnode->decl) = nftype;
+      }
+
+    {
+      tree decl = onode->decl;
+      cgraph_node *target = nnode;
+
+      { // copied from create_wrapper
+
+	/* Preserve DECL_RESULT so we get right by reference flag.  */
+	tree decl_result = DECL_RESULT (decl);
+
+	/* Remove the function's body but keep arguments to be reused
+	   for thunk.  */
+	onode->release_body (true);
+	onode->reset ();
+
+	DECL_UNINLINABLE (decl) = false;
+	DECL_RESULT (decl) = decl_result;
+	DECL_INITIAL (decl) = NULL;
+	allocate_struct_function (decl, false);
+	set_cfun (NULL);
+
+	/* Turn alias into thunk and expand it into GIMPLE representation.  */
+	onode->definition = true;
+
+	thunk_info::get_create (onode);
+	onode->thunk = true;
+#if !IMPLICIT_CGRAPH_EDGES
+	onode->create_edge (target, NULL, onode->count);
+#endif
+	onode->callees->can_throw_external = !TREE_NOTHROW (target->decl);
+
+	tree arguments = DECL_ARGUMENTS (decl);
+
+	while (arguments)
+	  {
+	    TREE_ADDRESSABLE (arguments) = false;
+	    arguments = TREE_CHAIN (arguments);
+	  }
+
+	{
+	  tree alias = onode->callees->callee->decl;
+	  tree thunk_fndecl = decl;
+	  tree a;
+
+	  int nxargs = 1 + is_stdarg + apply_args;
+
+	  { // Simplified from expand_thunk.
+	    tree restype;
+	    basic_block bb, then_bb, else_bb, return_bb;
+	    gimple_stmt_iterator bsi;
+	    int nargs = 0;
+	    tree arg;
+	    int i;
+	    tree resdecl;
+	    tree restmp = NULL;
+
+	    gcall *call;
+	    greturn *ret;
+	    bool alias_is_noreturn = TREE_THIS_VOLATILE (alias);
+
+	    a = DECL_ARGUMENTS (thunk_fndecl);
+
+	    current_function_decl = thunk_fndecl;
+
+	    /* Ensure thunks are emitted in their correct sections.  */
+	    resolve_unique_section (thunk_fndecl, 0,
+				    flag_function_sections);
+
+	    bitmap_obstack_initialize (NULL);
+
+	    /* Build the return declaration for the function.  */
+	    restype = TREE_TYPE (TREE_TYPE (thunk_fndecl));
+	    if (DECL_RESULT (thunk_fndecl) == NULL_TREE)
+	      {
+		resdecl = build_decl (input_location, RESULT_DECL, 0, restype);
+		DECL_ARTIFICIAL (resdecl) = 1;
+		DECL_IGNORED_P (resdecl) = 1;
+		DECL_CONTEXT (resdecl) = thunk_fndecl;
+		DECL_RESULT (thunk_fndecl) = resdecl;
+	      }
+	    else
+	      resdecl = DECL_RESULT (thunk_fndecl);
+
+	    profile_count cfg_count = onode->count;
+	    if (!cfg_count.initialized_p ())
+	      cfg_count = profile_count::from_gcov_type (BB_FREQ_MAX).guessed_local ();
+
+	    bb = then_bb = else_bb = return_bb
+	      = init_lowered_empty_function (thunk_fndecl, true, cfg_count);
+
+	    bsi = gsi_start_bb (bb);
+
+	    /* Build call to the function being thunked.  */
+	    if (!VOID_TYPE_P (restype)
+		&& (!alias_is_noreturn
+		    || TREE_ADDRESSABLE (restype)
+		    || TREE_CODE (TYPE_SIZE_UNIT (restype)) != INTEGER_CST))
+	      {
+		if (DECL_BY_REFERENCE (resdecl))
+		  {
+		    restmp = gimple_fold_indirect_ref (resdecl);
+		    if (!restmp)
+		      restmp = build2 (MEM_REF,
+				       TREE_TYPE (TREE_TYPE (resdecl)),
+				       resdecl,
+				       build_int_cst (TREE_TYPE (resdecl), 0));
+		  }
+		else if (!is_gimple_reg_type (restype))
+		  {
+		    if (aggregate_value_p (resdecl, TREE_TYPE (thunk_fndecl)))
+		      {
+			restmp = resdecl;
+
+			if (VAR_P (restmp))
+			  {
+			    add_local_decl (cfun, restmp);
+			    BLOCK_VARS (DECL_INITIAL (current_function_decl))
+			      = restmp;
+			  }
+		      }
+		    else
+		      restmp = create_tmp_var (restype, "retval");
+		  }
+		else
+		  restmp = create_tmp_reg (restype, "retval");
+	      }
+
+	    for (arg = a; arg; arg = DECL_CHAIN (arg))
+	      nargs++;
+	    auto_vec<tree> vargs (nargs + nxargs);
+	    i = 0;
+	    arg = a;
+
+	    if (nargs)
+	      for (tree nparm = DECL_ARGUMENTS (nnode->decl);
+		   i < nargs;
+		   i++, arg = DECL_CHAIN (arg), nparm = DECL_CHAIN (nparm))
+		{
+		  tree save_arg = arg;
+		  tree tmp = arg;
+
+		  /* Arrange to pass indirectly the parms, if we decided to do
+		     so, and revert its type in the wrapper.  */
+		  if (indirect_nparms.contains (nparm))
+		    {
+		      tree ref_type = TREE_TYPE (nparm);
+		      TREE_ADDRESSABLE (arg) = true;
+		      tree addr = build1 (ADDR_EXPR, ref_type, arg);
+		      tmp = arg = addr;
+		    }
+		  else
+		    DECL_NOT_GIMPLE_REG_P (arg) = 0;
+
+		  /* Convert the argument back to the type used by the calling
+		     conventions, e.g. a non-prototyped float type is passed as
+		     double, as in 930603-1.c, and needs to be converted back to
+		     double to be passed on unchanged to the wrapped
+		     function.  */
+		  if (TREE_TYPE (nparm) != DECL_ARG_TYPE (nparm))
+		    arg = fold_convert (DECL_ARG_TYPE (nparm), arg);
+
+		  if (!is_gimple_val (arg))
+		    {
+		      tmp = create_tmp_reg (TYPE_MAIN_VARIANT
+					    (TREE_TYPE (arg)), "arg");
+		      gimple *stmt = gimple_build_assign (tmp, arg);
+		      gsi_insert_after (&bsi, stmt, GSI_NEW_STMT);
+		    }
+		  vargs.quick_push (tmp);
+		  arg = save_arg;
+		}
+	    /* These strub arguments are adjusted later.  */
+	    if (apply_args)
+	      vargs.quick_push (null_pointer_node);
+	    if (is_stdarg)
+	      vargs.quick_push (null_pointer_node);
+	    vargs.quick_push (null_pointer_node);
+	    call = gimple_build_call_vec (build_fold_addr_expr_loc (0, alias),
+					  vargs);
+	    onode->callees->call_stmt = call;
+	    // gimple_call_set_from_thunk (call, true);
+	    if (DECL_STATIC_CHAIN (alias))
+	      {
+		tree p = DECL_STRUCT_FUNCTION (alias)->static_chain_decl;
+		tree type = TREE_TYPE (p);
+		tree decl = build_decl (DECL_SOURCE_LOCATION (thunk_fndecl),
+					PARM_DECL, create_tmp_var_name ("CHAIN"),
+					type);
+		DECL_ARTIFICIAL (decl) = 1;
+		DECL_IGNORED_P (decl) = 1;
+		TREE_USED (decl) = 1;
+		DECL_CONTEXT (decl) = thunk_fndecl;
+		DECL_ARG_TYPE (decl) = type;
+		TREE_READONLY (decl) = 1;
+
+		struct function *sf = DECL_STRUCT_FUNCTION (thunk_fndecl);
+		sf->static_chain_decl = decl;
+
+		gimple_call_set_chain (call, decl);
+	      }
+
+	    /* Return slot optimization is always possible and in fact required to
+	       return values with DECL_BY_REFERENCE.  */
+	    if (aggregate_value_p (resdecl, TREE_TYPE (thunk_fndecl))
+		&& (!is_gimple_reg_type (TREE_TYPE (resdecl))
+		    || DECL_BY_REFERENCE (resdecl)))
+	      gimple_call_set_return_slot_opt (call, true);
+
+	    if (restmp)
+	      {
+		gimple_call_set_lhs (call, restmp);
+		gcc_assert (useless_type_conversion_p (TREE_TYPE (restmp),
+						       TREE_TYPE (TREE_TYPE (alias))));
+	      }
+	    gsi_insert_after (&bsi, call, GSI_NEW_STMT);
+	    if (!alias_is_noreturn)
+	      {
+		/* Build return value.  */
+		if (!DECL_BY_REFERENCE (resdecl))
+		  ret = gimple_build_return (restmp);
+		else
+		  ret = gimple_build_return (resdecl);
+
+		gsi_insert_after (&bsi, ret, GSI_NEW_STMT);
+	      }
+	    else
+	      {
+		remove_edge (single_succ_edge (bb));
+	      }
+
+	    cfun->gimple_df->in_ssa_p = true;
+	    update_max_bb_count ();
+	    profile_status_for_fn (cfun)
+	      = cfg_count.initialized_p () && cfg_count.ipa_p ()
+	      ? PROFILE_READ : PROFILE_GUESSED;
+	    /* FIXME: C++ FE should stop setting TREE_ASM_WRITTEN on thunks.  */
+	    // TREE_ASM_WRITTEN (thunk_fndecl) = false;
+	    delete_unreachable_blocks ();
+	    update_ssa (TODO_update_ssa);
+	    checking_verify_flow_info ();
+	    free_dominance_info (CDI_DOMINATORS);
+
+	    /* Since we want to emit the thunk, we explicitly mark its name as
+	       referenced.  */
+	    onode->thunk = false;
+	    onode->lowered = true;
+	    bitmap_obstack_release (NULL);
+	  }
+	  current_function_decl = NULL;
+	  set_cfun (NULL);
+	}
+
+	thunk_info::remove (onode);
+
+	// some more of create_wrapper at the end of the next block.
+      }
+    }
+
+    {
+      tree aaptr = NULL_TREE;
+      tree vaptr = NULL_TREE;
+      tree wmptr = NULL_TREE;
+      for (tree arg = DECL_ARGUMENTS (nnode->decl); arg; arg = DECL_CHAIN (arg))
+	{
+	  aaptr = vaptr;
+	  vaptr = wmptr;
+	  wmptr = arg;
+	}
+
+      if (!apply_args)
+	aaptr = NULL_TREE;
+      /* The trailing args are [apply_args], [va_list_ptr], and
+	 watermark.  If we don't have a va_list_ptr, the penultimate
+	 argument is apply_args.
+       */
+      else if (!is_stdarg)
+	aaptr = vaptr;
+
+      if (!is_stdarg)
+	vaptr = NULL_TREE;
+
+      DECL_NAME (wmptr) = get_watermark_ptr ();
+      DECL_ARTIFICIAL (wmptr) = 1;
+      DECL_IGNORED_P (wmptr) = 1;
+      TREE_USED (wmptr) = 1;
+
+      if (is_stdarg)
+	{
+	  DECL_NAME (vaptr) = get_va_list_ptr ();
+	  DECL_ARTIFICIAL (vaptr) = 1;
+	  DECL_IGNORED_P (vaptr) = 1;
+	  TREE_USED (vaptr) = 1;
+	}
+
+      if (apply_args)
+	{
+	  DECL_NAME (aaptr) = get_apply_args_ptr ();
+	  DECL_ARTIFICIAL (aaptr) = 1;
+	  DECL_IGNORED_P (aaptr) = 1;
+	  TREE_USED (aaptr) = 1;
+	}
+
+      push_cfun (DECL_STRUCT_FUNCTION (nnode->decl));
+
+      {
+	edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+	gimple_seq seq = call_update_watermark (wmptr, nnode, e->src->count);
+	gsi_insert_seq_on_edge_immediate (e, seq);
+      }
+
+      bool any_indirect = !indirect_nparms.is_empty ();
+
+      if (any_indirect)
+	{
+	  basic_block bb;
+	  FOR_EACH_BB_FN (bb, cfun)
+	    for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+		 !gsi_end_p (gsi); gsi_next (&gsi))
+	      {
+		gimple *stmt = gsi_stmt (gsi);
+
+		walk_stmt_info wi = {};
+		wi.info = &indirect_nparms;
+		walk_gimple_op (stmt, walk_make_indirect, &wi);
+		if (wi.changed)
+		  {
+		    if (!is_gimple_debug (gsi_stmt (gsi)))
+		      {
+			wi.info = &gsi;
+			walk_gimple_op (stmt, walk_regimplify_addr_expr,
+					&wi);
+		      }
+		    update_stmt (stmt);
+		  }
+	      }
+	}
+
+      if (DECL_STRUCT_FUNCTION (nnode->decl)->calls_alloca
+	  || is_stdarg || apply_args)
+	for (cgraph_edge *e = nnode->callees, *enext; e; e = enext)
+	  {
+	    gcall *call = e->call_stmt;
+	    gimple_stmt_iterator gsi = gsi_for_stmt (call);
+	    tree fndecl = e->callee->decl;
+
+	    enext = e->next_callee;
+
+	    if (gimple_alloca_call_p (call))
+	      {
+		gimple_seq seq = call_update_watermark (wmptr, NULL,
+							gsi_bb (gsi)->count);
+		gsi_insert_finally_seq_after_call (gsi, seq);
+	      }
+	    else if (fndecl && is_stdarg
+		     && fndecl_built_in_p (fndecl, BUILT_IN_VA_START))
+	      {
+		if (builtin_decl_explicit (BUILT_IN_VA_START) != fndecl)
+		  sorry_at (gimple_location (call),
+			    "nonstandard stdarg conventions");
+		tree bvacopy = builtin_decl_explicit (BUILT_IN_VA_COPY);
+		gimple_call_set_fndecl (call, bvacopy);
+		tree arg = vaptr;
+		/* The va_copy source must be dereferenced, unless it's an array
+		   type, that would have decayed to a pointer.  */
+		if (TREE_CODE (TREE_TYPE (TREE_TYPE (vaptr))) != ARRAY_TYPE)
+		  {
+		    arg = gimple_fold_indirect_ref (vaptr);
+		    if (!arg)
+		      arg = build2 (MEM_REF,
+				    TREE_TYPE (TREE_TYPE (vaptr)),
+				    vaptr,
+				    build_int_cst (TREE_TYPE (vaptr), 0));
+		  }
+		gimple_call_set_arg (call, 1, arg);
+		update_stmt (call);
+		e->redirect_callee (cgraph_node::get_create (bvacopy));
+	      }
+	    else if (fndecl && apply_args
+		     && fndecl_built_in_p (fndecl, BUILT_IN_APPLY_ARGS))
+	      {
+		tree lhs = gimple_call_lhs (call);
+		gassign *assign = gimple_build_assign (lhs, aaptr);
+		gsi_replace (&gsi, assign, true);
+		cgraph_edge::remove (e);
+	      }
+	  }
+
+      { // a little more copied from create_wrapper
+
+	/* Inline summary set-up.  */
+	nnode->analyze ();
+	// inline_analyze_function (nnode);
+      }
+
+      pop_cfun ();
+    }
+
+    {
+      push_cfun (DECL_STRUCT_FUNCTION (onode->decl));
+      gimple_stmt_iterator gsi
+	= gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+      while (!is_gimple_call (gsi_stmt (gsi)))
+	gsi_next (&gsi);
+
+      gcall *wrcall = as_a <gcall *> (gsi_stmt (gsi));
+
+      tree swm = create_tmp_var (ptr_type_node, ".strub.watermark");
+      TREE_ADDRESSABLE (swm) = true;
+      tree swmp = build1 (ADDR_EXPR, get_pptr (), swm);
+
+      tree enter = get_enter ();
+      gcall *stptr = gimple_build_call (enter, 1, unshare_expr (swmp));
+      gsi_insert_before (&gsi, stptr, GSI_SAME_STMT);
+#if !IMPLICIT_CGRAPH_EDGES
+      onode->create_edge (cgraph_node::get_create (enter),
+			  stptr, gsi_bb (gsi)->count, false);
+#endif
+
+      int nargs = gimple_call_num_args (wrcall);
+
+      gimple_seq seq = NULL;
+
+      if (apply_args)
+	{
+	  tree aalst = create_tmp_var (ptr_type_node, ".strub.appargs");
+	  tree bappargs = builtin_decl_explicit (BUILT_IN_APPLY_ARGS);
+	  gcall *appargs = gimple_build_call (bappargs, 0);
+	  gimple_call_set_lhs (appargs, aalst);
+	  gsi_insert_before (&gsi, appargs, GSI_SAME_STMT);
+	  gimple_call_set_arg (wrcall, nargs - 2 - is_stdarg, aalst);
+#if !IMPLICIT_CGRAPH_EDGES
+	  onode->create_edge (cgraph_node::get_create (bappargs),
+			      appargs, gsi_bb (gsi)->count, false);
+#endif
+	}
+
+      if (is_stdarg)
+	{
+	  tree valst = create_tmp_var (va_list_type_node, ".strub.va_list");
+	  TREE_ADDRESSABLE (valst) = true;
+	  tree vaptr = build1 (ADDR_EXPR,
+			       build_pointer_type (va_list_type_node),
+			       valst);
+	  gimple_call_set_arg (wrcall, nargs - 2, unshare_expr (vaptr));
+
+	  tree bvastart = builtin_decl_explicit (BUILT_IN_VA_START);
+	  gcall *vastart = gimple_build_call (bvastart, 2,
+					      unshare_expr (vaptr),
+					      integer_zero_node);
+	  gsi_insert_before (&gsi, vastart, GSI_SAME_STMT);
+#if !IMPLICIT_CGRAPH_EDGES
+	  onode->create_edge (cgraph_node::get_create (bvastart),
+			      vastart, gsi_bb (gsi)->count, false);
+#endif
+
+	  tree bvaend = builtin_decl_explicit (BUILT_IN_VA_END);
+	  gcall *vaend = gimple_build_call (bvaend, 1, unshare_expr (vaptr));
+	  gimple_seq_add_stmt (&seq, vaend);
+	}
+
+      gimple_call_set_arg (wrcall, nargs - 1, unshare_expr (swmp));
+      // gimple_call_set_tail (wrcall, false);
+      update_stmt (wrcall);
+
+      {
+#if 0
+	tree lswm = create_tmp_var (ptr_type_node, ".L.strub.watermark");
+	gassign *load = gimple_build_assign (lswm, swm);
+	gimple_seq_add_stmt (&seq, load);
+#else
+	tree lswm = unshare_expr (swmp);
+#endif
+
+	gcall *sleave = gimple_build_call (get_leave (), 1, lswm);
+	gimple_seq_add_stmt (&seq, sleave);
+
+	gassign *clobber = gimple_build_assign (swm,
+						build_clobber
+						(TREE_TYPE (swm)));
+	gimple_seq_add_stmt (&seq, clobber);
+      }
+
+      gsi_insert_finally_seq_after_call (gsi, seq);
+
+      /* For nnode, we don't rebuild edges because we wish to retain
+	 any redirections copied to it from earlier passes, so we add
+	 call graph edges explicitly there, but for onode, we create a
+	 fresh function, so we may as well just issue the calls and
+	 then rebuild all cgraph edges.  */
+      // cgraph_edge::rebuild_edges ();
+      onode->analyze ();
+      // inline_analyze_function (onode);
+
+      pop_cfun ();
+    }
+
+#if 0
+    compute_fn_summary (onode, true);
+    compute_fn_summary (nnode, true);
+#endif
+  }
+
+  /* ??? Check that strub functions don't call non-strub functions, and that
+     always_inline strub functions are only called by strub functions.  */
+
+  return 0;
+}
+
+simple_ipa_opt_pass *
+make_pass_ipa_strub (gcc::context *ctxt)
+{
+  return new pass_ipa_strub (ctxt);
+}
diff --git a/gcc/ipa-strub.h b/gcc/ipa-strub.h
new file mode 100644
index 00000000000..cadbca5002a
--- /dev/null
+++ b/gcc/ipa-strub.h
@@ -0,0 +1,25 @@
+/* strub (stack scrubbing) infrastructure.
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   Contributed by Alexandre Oliva <oliva@adacore.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3.  If not see
+<http://www.gnu.org/licenses/>.  */
+
+/* Return TRUE if the first function can be inlined into the second,
+   as far as stack scrubbing constraints are concerned.  CALLEE
+   doesn't have to be called directly by CALLER, but the returned
+   value says nothing about intervening functions.  */
+extern bool strub_inlinable_p (cgraph_node *callee, cgraph_node *caller);
diff --git a/gcc/passes.def b/gcc/passes.def
index f5d88a61b0e..677c000e80b 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -53,6 +53,7 @@ along with GCC; see the file COPYING3.  If not see
   INSERT_PASSES_AFTER (all_small_ipa_passes)
   NEXT_PASS (pass_ipa_free_lang_data);
   NEXT_PASS (pass_ipa_function_and_variable_visibility);
+  NEXT_PASS (pass_ipa_strub_mode);
   NEXT_PASS (pass_build_ssa_passes);
   PUSH_INSERT_PASSES_WITHIN (pass_build_ssa_passes)
       NEXT_PASS (pass_fixup_cfg);
@@ -111,6 +112,7 @@ along with GCC; see the file COPYING3.  If not see
   POP_INSERT_PASSES ()
 
   NEXT_PASS (pass_ipa_remove_symbols);
+  NEXT_PASS (pass_ipa_strub);
   NEXT_PASS (pass_ipa_oacc);
   PUSH_INSERT_PASSES_WITHIN (pass_ipa_oacc)
       NEXT_PASS (pass_ipa_pta);
diff --git a/gcc/testsuite/g++.dg/wrappers/strub1.C b/gcc/testsuite/g++.dg/wrappers/strub1.C
new file mode 100644
index 00000000000..a474a929649
--- /dev/null
+++ b/gcc/testsuite/g++.dg/wrappers/strub1.C
@@ -0,0 +1,18 @@
+// { dg-do run }
+
+// Check that we don't get extra copies.
+
+struct T {
+  T &self;
+  void check () const { if (&self != this) __builtin_abort (); }
+  T() : self (*this) { check (); }
+  T(const T& ck) : self (*this) { ck.check (); check (); }
+  ~T() { check (); }
+};
+
+T foo (T) { return T(); }
+T bar (T p) { return foo (p); }
+
+int main () {
+  bar (T{});
+}
diff --git a/gcc/testsuite/g++.dg/wrappers/strub2.C b/gcc/testsuite/g++.dg/wrappers/strub2.C
new file mode 100644
index 00000000000..25a62166448
--- /dev/null
+++ b/gcc/testsuite/g++.dg/wrappers/strub2.C
@@ -0,0 +1,22 @@
+// { dg-do run }
+
+// This doesn't really test anything yet.  We should mark the
+// variables as requiring strubbing, and somehow check that the
+// wrapped functions take the parameter by reference.
+
+struct T {
+  char d[32 * sizeof(void*)];
+};
+
+T foo (T q) { asm ("" : : "m"(q)); return q; }
+T bar (T p) { return foo (p); }
+
+T tmp;
+T tmp2;
+
+int main () {
+  __builtin_memset (&tmp, 0x55, sizeof (tmp));
+  tmp2 = bar (tmp);
+  if (__builtin_memcmp (&tmp, &tmp2, sizeof (tmp)))
+    __builtin_abort ();
+}
diff --git a/gcc/testsuite/g++.dg/wrappers/strub3.C b/gcc/testsuite/g++.dg/wrappers/strub3.C
new file mode 100644
index 00000000000..e1b51cd0399
--- /dev/null
+++ b/gcc/testsuite/g++.dg/wrappers/strub3.C
@@ -0,0 +1,22 @@
+// { dg-do run }
+
+// This doesn't really test anything yet.  We should mark the
+// variables as requiring strubbing, and somehow check that the
+// wrapped functions take the parameter by reference.
+
+struct T {
+  char d[32 * sizeof(void*)];
+};
+
+static T foo (T q) { asm ("" : : "m"(q)); return q; }
+static T bar (T p) { return foo (p); }
+
+T tmp;
+T tmp2;
+
+int main () {
+  __builtin_memset (&tmp, 0x55, sizeof (tmp));
+  tmp2 = bar (tmp);
+  if (__builtin_memcmp (&tmp, &tmp2, sizeof (tmp)))
+    __builtin_abort ();
+}
diff --git a/gcc/testsuite/g++.dg/wrappers/strub4.C b/gcc/testsuite/g++.dg/wrappers/strub4.C
new file mode 100644
index 00000000000..d021fca88e4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/wrappers/strub4.C
@@ -0,0 +1,18 @@
+// { dg-do run }
+
+namespace
+{
+  class foo
+  {
+  public:
+    foo();
+  };
+
+  foo::foo() {}
+
+  foo bar;
+}
+
+int main()
+{
+}
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 38269a27b79..b5e0af8b3a1 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -5617,6 +5617,7 @@ gimple_verify_flow_info (void)
 	{
 	  gimple *stmt = gsi_stmt (gsi);
 
+	  /* Do NOT disregard debug stmts after found_ctrl_stmt.  */
 	  if (found_ctrl_stmt)
 	    {
 	      error ("control flow in the middle of basic block %d",
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index aa9757a2fe9..0210f84bc0c 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -494,8 +494,9 @@ extern gimple_opt_pass *make_pass_adjust_alignment (gcc::context *ctxt);
 
 /* IPA Passes */
 extern simple_ipa_opt_pass *make_pass_ipa_lower_emutls (gcc::context *ctxt);
-extern simple_ipa_opt_pass
-							      *make_pass_ipa_function_and_variable_visibility (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_function_and_variable_visibility (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_strub_mode (gcc::context *ctxt);
+extern simple_ipa_opt_pass *make_pass_ipa_strub (gcc::context *ctxt);
 extern simple_ipa_opt_pass *make_pass_ipa_tree_profile (gcc::context *ctxt);
 extern simple_ipa_opt_pass *make_pass_ipa_auto_profile (gcc::context *ctxt);
 
diff --git a/libgcc/Makefile.in b/libgcc/Makefile.in
index 2c8be561eb5..6b7b12493f5 100644
--- a/libgcc/Makefile.in
+++ b/libgcc/Makefile.in
@@ -428,7 +428,7 @@ ifneq ($(enable_shared),yes)
 iterator = $(patsubst %,$(srcdir)/static-object.mk,$(iter-items))
 endif
 
-LIB2ADD += enable-execute-stack.c
+LIB2ADD += enable-execute-stack.c $(srcdir)/strub.c
 
 # While emutls.c has nothing to do with EH, it is in LIB2ADDEH*
 # instead of LIB2ADD because that's the way to be sure on some targets
diff --git a/libgcc/libgcc2.h b/libgcc/libgcc2.h
index 1819ff3ac3d..857091e65c8 100644
--- a/libgcc/libgcc2.h
+++ b/libgcc/libgcc2.h
@@ -532,6 +532,10 @@ extern int __parityDI2 (UDWtype);
 
 extern void __enable_execute_stack (void *);
 
+extern void __strub_enter (void **);
+extern void __strub_update (void**);
+extern void __strub_leave (void **);
+
 #ifndef HIDE_EXPORTS
 #pragma GCC visibility pop
 #endif
diff --git a/libgcc/strub.c b/libgcc/strub.c
new file mode 100644
index 00000000000..fd6e27556e4
--- /dev/null
+++ b/libgcc/strub.c
@@ -0,0 +1,112 @@
+/* Stack scrubbing infrastructure
+   Copyright (C) 2021 Free Software Foundation, Inc.
+   Contributed by Alexandre Oliva <oliva@adacore.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+<http://www.gnu.org/licenses/>.  */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+#include "libgcc2.h"
+
+#ifndef STACK_GROWS_DOWNWARD
+# define TOPS >
+#else
+# define TOPS <
+#endif
+
+#define ATTRIBUTE_STRUB_CALLABLE __attribute__ ((__strub__ (3)))
+
+/* Enter a stack scrubbing context, initializing the watermark to the caller's
+   stack address.  */
+void ATTRIBUTE_STRUB_CALLABLE
+__strub_enter (void **watermark)
+{
+  *watermark = __builtin_frame_address (0);
+}
+
+/* Update the watermark within a stack scrubbing context with the current stack
+   pointer.  */
+void ATTRIBUTE_STRUB_CALLABLE
+__strub_update (void **watermark)
+{
+  void *sp = __builtin_frame_address (0);
+
+  if (sp TOPS *watermark)
+    *watermark = sp;
+}
+
+#ifndef TARGET_STRUB_USE_DYNAMIC_ARRAY
+# define TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY 1
+#endif
+
+#ifndef TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY
+# ifdef TARGET_STRUB_MAY_USE_MEMSET
+#  define TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY 1
+# else
+#  define TARGET_STRUB_MAY_USE_MEMSET 1
+# endif
+#endif
+
+/* Leave a stack scrubbing context, restoring and updating SAVED, and
+   clearing the stack between top and watermark.  */
+void ATTRIBUTE_STRUB_CALLABLE
+#if ! TARGET_STRUB_MAY_USE_MEMSET
+__attribute__ ((__optimize__ ("-fno-tree-loop-distribute-patterns")))
+#endif
+__strub_leave (void **mark)
+{
+  void *sp = __builtin_stack_address ();
+
+  void **base, **end;
+#ifndef STACK_GROWS_DOWNWARD
+  base = sp;
+  end = *mark;
+#else
+  base = *mark;
+  end = sp;
+#endif
+
+  ptrdiff_t len = end - base;
+  if (len <= 0)
+    return;
+
+#if ! TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY
+  /* Allocate a dynamically-sized array covering the desired range, so that we
+     can safely call memset on it.  */
+  void *ptr[len];
+  base = &ptr[0];
+  end = &ptr[len];
+#else
+  void **ptr = end;
+#endif /* TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY */
+
+  /* ldist turns this into a memset.  Without the dynamic array above, that call
+     is likely unsafe: possibly tail-called, and likely scribbling over its own
+     stack frame.  */
+  while (base < end)
+    *base++ = 0;
+
+  asm ("" : : "m" (ptr));
+}


             reply	other threads:[~2021-07-25  1:43 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-25  1:43 Alexandre Oliva [this message]
2021-07-25 15:58 Alexandre Oliva
2021-07-26 13:31 Alexandre Oliva
2021-07-28  6:33 Alexandre Oliva
2021-07-28  7:06 Alexandre Oliva
2021-07-28  9:35 Alexandre Oliva
2021-08-04  8:55 Alexandre Oliva
2021-08-19  2:58 Alexandre Oliva
2021-08-20  4:20 Alexandre Oliva
2021-08-20  4:21 Alexandre Oliva
2021-08-28  4:13 Alexandre Oliva
2021-08-28  6:30 Alexandre Oliva

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210725014314.7784B3945C08@sourceware.org \
    --to=aoliva@gcc.gnu.org \
    --cc=gcc-cvs@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).