public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Alexandre Oliva <oliva@adacore.com>
To: gcc-patches@gcc.gnu.org
Cc: Jeremy Bennett <jeremy.bennett@embecosm.com>,
	Craig Blackmore <craig.blackmore@embecosm.com>,
	Graham Markall <graham.markall@embecosm.com>,
	Martin Jambor <mjambor@suse.cz>, Jan Hubicka <hubicka@ucw.cz>,
	Richard Biener <richard.guenther@gmail.com>,
	Jim Wilson <wilson@tuliptree.org>
Subject: [PATCH v2 10/10] Introduce strub: strub pass
Date: Fri, 29 Jul 2022 03:34:32 -0300	[thread overview]
Message-ID: <orv8rgl953.fsf_-_@lxoliva.fsfla.org> (raw)
In-Reply-To: <or35eko33q.fsf_-_@lxoliva.fsfla.org> (Alexandre Oliva's message of "Fri, 29 Jul 2022 03:16:41 -0300")


This final fragment of ipa-strub.cc adds the strub pass, that
implements the needed function interface changes and adds calls to the
strub builtins.

+/* Define a pass to introduce strub transformations.  */
+const pass_data pass_data_ipa_strub = {
+  SIMPLE_IPA_PASS,
+  "strub",
+  OPTGROUP_NONE,
+  TV_NONE,
+  PROP_cfg | PROP_ssa, // properties_required
+  0,	    // properties_provided
+  0,	    // properties_destroyed
+  0,	    // properties_start
+  TODO_update_ssa
+  | TODO_cleanup_cfg
+  | TODO_rebuild_cgraph_edges
+  | TODO_verify_il, // properties_finish
+};
+
+class pass_ipa_strub : public simple_ipa_opt_pass
+{
+public:
+  pass_ipa_strub (gcc::context *ctxt)
+    : simple_ipa_opt_pass (pass_data_ipa_strub, ctxt)
+  {}
+  opt_pass *clone () { return new pass_ipa_strub (m_ctxt); }
+  virtual bool gate (function *) { return flag_strub && !seen_error (); }
+  virtual unsigned int execute (function *);
+
+  /* Define on demand and cache some types we use often.  */
+#define DEF_TYPE(NAME, INIT)			\
+  static inline tree get_ ## NAME () {		\
+    static tree type = NULL_TREE;		\
+    if (!type)					\
+      type = (INIT);				\
+    return type;				\
+  }
+
+  /* Use a distinct ptr_type_node to denote the watermark, so that we can
+     recognize it in arg lists and avoid modifying types twice.  */
+  DEF_TYPE (wmt, build_variant_type_copy (ptr_type_node))
+
+  DEF_TYPE (pwmt, build_reference_type (get_wmt ()))
+
+  DEF_TYPE (qpwmt,
+	    build_qualified_type (get_pwmt (),
+				  TYPE_QUAL_RESTRICT
+				  /* | TYPE_QUAL_CONST */))
+
+  DEF_TYPE (qptr,
+	    build_qualified_type (ptr_type_node,
+				  TYPE_QUAL_RESTRICT
+				  | TYPE_QUAL_CONST))
+
+  DEF_TYPE (qpvalst,
+	    build_qualified_type (build_reference_type
+				  (va_list_type_node),
+				  TYPE_QUAL_RESTRICT
+				  /* | TYPE_QUAL_CONST */))
+
+#undef DEF_TYPE
+
+  /* Define non-strub builtins on demand.  */
+#define DEF_NM_BUILTIN(NAME, CODE, FNTYPELIST)			\
+  static tree get_ ## NAME () {					\
+    tree decl = builtin_decl_explicit (CODE);			\
+    if (!decl)							\
+      {								\
+	tree type = build_function_type_list FNTYPELIST;	\
+	decl = add_builtin_function				\
+	  ("__builtin_" #NAME,					\
+	   type, CODE, BUILT_IN_NORMAL,				\
+	   NULL, NULL);						\
+	TREE_NOTHROW (decl) = true;				\
+	set_builtin_decl ((CODE), decl, true);			\
+      }								\
+    return decl;						\
+  }
+
+  DEF_NM_BUILTIN (stack_address,
+		  BUILT_IN_STACK_ADDRESS,
+		  (ptr_type_node, NULL))
+
+#undef DEF_NM_BUILTIN
+
+  /* Define strub builtins on demand.  */
+#define DEF_SS_BUILTIN(NAME, FNSPEC, CODE, FNTYPELIST)		\
+  static tree get_ ## NAME () {					\
+    tree decl = builtin_decl_explicit (CODE);			\
+    if (!decl)							\
+      {								\
+	tree type = build_function_type_list FNTYPELIST;	\
+	tree attrs = NULL;					\
+	if (FNSPEC && HAVE_ATTR_FNSPEC)				\
+	  attrs = tree_cons (get_identifier ("fn spec"),	\
+			     build_tree_list			\
+			     (NULL_TREE,			\
+			      build_string (strlen (FNSPEC),	\
+					    (FNSPEC))),		\
+			     attrs);				\
+	decl = add_builtin_function_ext_scope			\
+	  ("__builtin___strub_" #NAME,				\
+	   type, CODE, BUILT_IN_NORMAL,				\
+	   "__strub_" #NAME, attrs);				\
+	TREE_NOTHROW (decl) = true;				\
+	set_builtin_decl ((CODE), decl, true);			\
+      }								\
+    return decl;						\
+  }
+
+  DEF_SS_BUILTIN (enter, ". Ot",
+		  BUILT_IN___STRUB_ENTER,
+		  (void_type_node, get_qpwmt (), NULL))
+  DEF_SS_BUILTIN (update, ". Wt",
+		  BUILT_IN___STRUB_UPDATE,
+		  (void_type_node, get_qpwmt (), NULL))
+  DEF_SS_BUILTIN (leave, ". w ",
+		  BUILT_IN___STRUB_LEAVE,
+		  (void_type_node, get_qpwmt (), NULL))
+
+#undef DEF_SS_BUILTIN
+
+    /* Define strub identifiers on demand.  */
+#define DEF_IDENT(NAME)					\
+  static inline tree get_ ## NAME () {			\
+    static tree identifier = NULL_TREE;			\
+    if (!identifier)					\
+      identifier = get_identifier (".strub." #NAME);	\
+    return identifier;					\
+  }
+
+  DEF_IDENT (watermark_ptr)
+  DEF_IDENT (va_list_ptr)
+  DEF_IDENT (apply_args)
+
+#undef DEF_IDENT
+
+  static inline int adjust_at_calls_type (tree);
+  static inline void adjust_at_calls_call (cgraph_edge *, int, tree);
+  static inline void adjust_at_calls_calls (cgraph_node *);
+
+  /* Add to SEQ a call to the strub watermark update builtin, taking NODE's
+     location if given.  Optionally add the corresponding edge from NODE, with
+     execution frequency COUNT.  Return the modified SEQ.  */
+
+  static inline gimple_seq
+  call_update_watermark (tree wmptr, cgraph_node *node, profile_count count,
+			 gimple_seq seq = NULL)
+    {
+      tree uwm = get_update ();
+      gcall *update = gimple_build_call (uwm, 1, wmptr);
+      if (node)
+	gimple_set_location (update, DECL_SOURCE_LOCATION (node->decl));
+      gimple_seq_add_stmt (&seq, update);
+      if (node)
+#if !IMPLICIT_CGRAPH_EDGES
+	node->create_edge (cgraph_node::get_create (uwm), update, count, false);
+#else
+	(void)count;
+#endif
+      return seq;
+    }
+
+};
+
+} // anon namespace
+
+/* Gather with this type a collection of parameters that we're turning into
+   explicit references.  */
+
+typedef hash_set<tree> indirect_parms_t;
+
+/* Dereference OP's incoming turned-into-reference parm if it's an
+   INDIRECT_PARMS or an ADDR_EXPR thereof.  Set *REC and return according to
+   gimple-walking expectations.  */
+
+static tree
+maybe_make_indirect (indirect_parms_t &indirect_parms, tree op, int *rec)
+{
+  if (DECL_P (op))
+    {
+      *rec = 0;
+      if (indirect_parms.contains (op))
+	{
+	  tree ret = gimple_fold_indirect_ref (op);
+	  if (!ret)
+	    ret = build2 (MEM_REF,
+			  TREE_TYPE (TREE_TYPE (op)),
+			  op,
+			  build_int_cst (TREE_TYPE (op), 0));
+	  return ret;
+	}
+    }
+  else if (TREE_CODE (op) == ADDR_EXPR
+	   && DECL_P (TREE_OPERAND (op, 0)))
+    {
+      *rec = 0;
+      if (indirect_parms.contains (TREE_OPERAND (op, 0)))
+	{
+	  op = TREE_OPERAND (op, 0);
+	  return op;
+	}
+    }
+
+  return NULL_TREE;
+}
+
+/* A gimple-walking function that adds dereferencing to indirect parms.  */
+
+static tree
+walk_make_indirect (tree *op, int *rec, void *arg)
+{
+  walk_stmt_info *wi = (walk_stmt_info *)arg;
+  indirect_parms_t &indirect_parms = *(indirect_parms_t *)wi->info;
+
+  if (!*op || TYPE_P (*op))
+    {
+      *rec = 0;
+      return NULL_TREE;
+    }
+
+  if (tree repl = maybe_make_indirect (indirect_parms, *op, rec))
+    {
+      *op = repl;
+      wi->changed = true;
+    }
+
+  return NULL_TREE;
+}
+
+/* A gimple-walking function that turns any non-gimple-val ADDR_EXPRs into a
+   separate SSA.  Though addresses of e.g. parameters, and of members thereof,
+   are gimple vals, turning parameters into references, with an extra layer of
+   indirection and thus explicit dereferencing, need to be regimplified.  */
+
+static tree
+walk_regimplify_addr_expr (tree *op, int *rec, void *arg)
+{
+  walk_stmt_info *wi = (walk_stmt_info *)arg;
+  gimple_stmt_iterator &gsi = *(gimple_stmt_iterator *)wi->info;
+
+  *rec = 0;
+
+  if (!*op || TREE_CODE (*op) != ADDR_EXPR)
+    return NULL_TREE;
+
+  if (!is_gimple_val (*op))
+    {
+      tree ret = force_gimple_operand_gsi (&gsi, *op, true,
+					   NULL_TREE, true, GSI_SAME_STMT);
+      gcc_assert (ret != *op);
+      *op = ret;
+      wi->changed = true;
+    }
+
+  return NULL_TREE;
+}
+
+/* Turn STMT's PHI arg defs into separate SSA defs if they've become
+   non-gimple_val.  Return TRUE if any edge insertions need to be committed.  */
+
+static bool
+walk_regimplify_phi (gphi *stmt)
+{
+  bool needs_commit = false;
+
+  for (unsigned i = 0, n = gimple_phi_num_args (stmt); i < n; i++)
+    {
+      tree op = gimple_phi_arg_def (stmt, i);
+      if ((TREE_CODE (op) == ADDR_EXPR
+	   && !is_gimple_val (op))
+	  /* ??? A PARM_DECL that was addressable in the original function and
+	     had its address in PHI nodes, but that became a reference in the
+	     wrapped clone would NOT be updated by update_ssa in PHI nodes.
+	     Alas, if we were to create a default def for it now, update_ssa
+	     would complain that the symbol that needed rewriting already has
+	     SSA names associated with it.  OTOH, leaving the PARM_DECL alone,
+	     it eventually causes errors because it remains unchanged in PHI
+	     nodes, but it gets rewritten as expected if it appears in other
+	     stmts.  So we cheat a little here, and force the PARM_DECL out of
+	     the PHI node and into an assignment.  It's a little expensive,
+	     because we insert it at the edge, which introduces a basic block
+	     that's entirely unnecessary, but it works, and the block will be
+	     removed as the default def gets propagated back into the PHI node,
+	     so the final optimized code looks just as expected.  */
+	  || (TREE_CODE (op) == PARM_DECL
+	      && !TREE_ADDRESSABLE (op)))
+	{
+	  tree temp = make_ssa_name (TREE_TYPE (op), stmt);
+	  if (TREE_CODE (op) == PARM_DECL)
+	    SET_SSA_NAME_VAR_OR_IDENTIFIER (temp, DECL_NAME (op));
+	  SET_PHI_ARG_DEF (stmt, i, temp);
+
+	  gimple *assign = gimple_build_assign (temp, op);
+	  if (gimple_phi_arg_has_location (stmt, i))
+	    gimple_set_location (assign, gimple_phi_arg_location (stmt, i));
+	  gsi_insert_on_edge (gimple_phi_arg_edge (stmt, i), assign);
+	  needs_commit = true;
+	}
+    }
+
+  return needs_commit;
+}
+
+/* Create a reference type to use for PARM when turning it into a reference.
+   NONALIASED causes the reference type to gain its own separate alias set, so
+   that accessing the indirectly-passed parm won'will not add aliasing
+   noise.  */
+
+static tree
+build_ref_type_for (tree parm, bool nonaliased = true)
+{
+  gcc_checking_assert (TREE_CODE (parm) == PARM_DECL);
+
+  tree ref_type = build_reference_type (TREE_TYPE (parm));
+
+  if (!nonaliased)
+    return ref_type;
+
+  /* Each PARM turned indirect still points to the distinct memory area at the
+     wrapper, and the reference in unchanging, so we might qualify it, but...
+     const is not really important, since we're only using default defs for the
+     reference parm anyway, and not introducing any defs, and restrict seems to
+     cause trouble.  E.g., libgnat/s-concat3.adb:str_concat_3 has memmoves that,
+     if it's wrapped, the memmoves are deleted in dse1.  Using a distinct alias
+     set seems to not run afoul of this problem, and it hopefully enables the
+     compiler to tell the pointers do point to objects that are not otherwise
+     aliased.  */
+#if 1
+  tree qref_type = build_variant_type_copy (ref_type);
+
+  TYPE_ALIAS_SET (qref_type) = new_alias_set ();
+  record_alias_subset (TYPE_ALIAS_SET (qref_type), get_alias_set (ref_type));
+
+  return qref_type;
+#else
+  tree qref_type = build_qualified_type (ref_type,
+					 TYPE_QUAL_RESTRICT
+					 | TYPE_QUAL_CONST);
+
+  return qref_type;
+#endif
+}
+
+/* Add cgraph edges from current_function_decl to callees in SEQ with frequency
+   COUNT, assuming all calls in SEQ are direct.  */
+
+static void
+add_call_edges_for_seq (gimple_seq seq, profile_count count)
+{
+#if IMPLICIT_CGRAPH_EDGES
+  return;
+#endif
+
+  cgraph_node *node = cgraph_node::get_create (current_function_decl);
+
+  for (gimple_stmt_iterator gsi = gsi_start (seq);
+       !gsi_end_p (gsi); gsi_next (&gsi))
+    {
+      gimple *stmt = gsi_stmt (gsi);
+
+      gcall *call = dyn_cast <gcall *> (stmt);
+      if (!call)
+	continue;
+
+      tree callee = gimple_call_fndecl (call);
+      gcc_checking_assert (callee);
+      node->create_edge (cgraph_node::get_create (callee), call, count, false);
+    }
+}
+
+/* Insert SEQ after the call at GSI, as if the call was in a try block with SEQ
+   as finally, i.e., SEQ will run after the call whether it returns or
+   propagates an exception.  This handles block splitting, EH edge and block
+   creation, noreturn and nothrow optimizations, and even throwing calls without
+   preexisting local handlers.  */
+
+static void
+gsi_insert_finally_seq_after_call (gimple_stmt_iterator gsi, gimple_seq seq)
+{
+  if (!seq)
+    return;
+
+  gimple *stmt = gsi_stmt (gsi);
+
+  if (gimple_has_location (stmt))
+    annotate_all_with_location (seq, gimple_location (stmt));
+
+  gcall *call = dyn_cast <gcall *> (stmt);
+  bool noreturn_p = call && gimple_call_noreturn_p (call);
+  int eh_lp = lookup_stmt_eh_lp (stmt);
+  bool must_not_throw_p = eh_lp < 0;
+  bool nothrow_p = (must_not_throw_p
+		    || (call && gimple_call_nothrow_p (call))
+		    || (eh_lp <= 0
+			&& (TREE_NOTHROW (cfun->decl)
+			    || !flag_exceptions)));
+
+  if (noreturn_p && nothrow_p)
+    return;
+
+  /* Don't expect an EH edge if we're not to throw, or if we're not in an EH
+     region yet.  */
+  bool no_eh_edge_p = (nothrow_p || !eh_lp);
+  bool must_end_bb = stmt_ends_bb_p (stmt);
+
+  edge eft = NULL, eeh = NULL;
+  if (must_end_bb && !(noreturn_p && no_eh_edge_p))
+    {
+      gcc_checking_assert (gsi_one_before_end_p (gsi));
+
+      edge e;
+      edge_iterator ei;
+      FOR_EACH_EDGE (e, ei, gsi_bb (gsi)->succs)
+	{
+	  if ((e->flags & EDGE_EH))
+	    {
+	      gcc_checking_assert (!eeh);
+	      eeh = e;
+#if !CHECKING_P
+	      if (eft || noreturn_p)
+		break;
+#endif
+	    }
+	  if ((e->flags & EDGE_FALLTHRU))
+	    {
+	      gcc_checking_assert (!eft);
+	      eft = e;
+#if !CHECKING_P
+	      if (eeh || no_eh_edge_p)
+		break;
+#endif
+	    }
+	}
+
+      gcc_checking_assert (!(eft && (eft->flags & EDGE_FALLTHRU))
+			   == noreturn_p);
+      gcc_checking_assert (!(eeh && (eeh->flags & EDGE_EH))
+			   == no_eh_edge_p);
+      gcc_checking_assert (eft != eeh);
+    }
+
+  if (!noreturn_p)
+    {
+      gimple_seq nseq = nothrow_p ? seq : gimple_seq_copy (seq);
+
+      if (must_end_bb)
+	{
+	  gcc_checking_assert (gsi_one_before_end_p (gsi));
+	  add_call_edges_for_seq (nseq, eft->count ());
+	  gsi_insert_seq_on_edge_immediate (eft, nseq);
+	}
+      else
+	{
+	  add_call_edges_for_seq (nseq, gsi_bb (gsi)->count);
+	  gsi_insert_seq_after (&gsi, nseq, GSI_SAME_STMT);
+	}
+    }
+
+  if (nothrow_p)
+    return;
+
+  if (eh_lp)
+    {
+      add_call_edges_for_seq (seq, eeh->count ());
+      gsi_insert_seq_on_edge_immediate (eeh, seq);
+      return;
+    }
+
+  /* A throwing call may appear within a basic block in a function that doesn't
+     have any EH regions.  We're going to add a cleanup if so, therefore the
+     block will have to be split.  */
+  basic_block bb = gsi_bb (gsi);
+  if (!gsi_one_before_end_p (gsi))
+    split_block (bb, stmt);
+
+  /* Create a new block for the EH cleanup.  */
+  basic_block bb_eh_cleanup = create_empty_bb (bb);
+  if (dom_info_available_p (CDI_DOMINATORS))
+    set_immediate_dominator (CDI_DOMINATORS, bb_eh_cleanup, bb);
+  if (current_loops)
+    add_bb_to_loop (bb_eh_cleanup, current_loops->tree_root);
+
+  /* Make the new block an EH cleanup for the call.  */
+  eh_region new_r = gen_eh_region_cleanup (NULL);
+  eh_landing_pad lp = gen_eh_landing_pad (new_r);
+  tree label = gimple_block_label (bb_eh_cleanup);
+  lp->post_landing_pad = label;
+  EH_LANDING_PAD_NR (label) = lp->index;
+  add_stmt_to_eh_lp (stmt, lp->index);
+
+  /* Add the cleanup code to the EH cleanup block.  */
+  gsi = gsi_after_labels (bb_eh_cleanup);
+  gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
+
+  /* And then propagate the exception further.  */
+  gresx *resx = gimple_build_resx (new_r->index);
+  if (gimple_has_location (stmt))
+    gimple_set_location (resx, gimple_location (stmt));
+  gsi_insert_before (&gsi, resx, GSI_SAME_STMT);
+
+  /* Finally, wire the EH cleanup block into the CFG.  */
+  make_eh_edges (stmt);
+  add_call_edges_for_seq (seq, single_pred_edge (bb_eh_cleanup)->count ());
+}
+
+/* Copy the attribute list at *ATTRS, minus any NAME attributes, leaving
+   shareable trailing nodes alone.  */
+
+static inline void
+remove_named_attribute_unsharing (const char *name, tree *attrs)
+{
+  while (tree found = lookup_attribute (name, *attrs))
+    {
+      /* Copy nodes up to the next NAME attribute.  */
+      while (*attrs != found)
+	{
+	  *attrs = tree_cons (TREE_PURPOSE (*attrs),
+			      TREE_VALUE (*attrs),
+			      TREE_CHAIN (*attrs));
+	  attrs = &TREE_CHAIN (*attrs);
+	}
+      /* Then drop it.  */
+      gcc_checking_assert (*attrs == found);
+      *attrs = TREE_CHAIN (*attrs);
+    }
+}
+
+/* Record the order of the last cgraph entry whose mode we've already set, so
+   that we can perform mode setting incrementally without duplication.  */
+static int last_cgraph_order;
+
+/* Set strub modes for functions introduced since the last call.  */
+
+static void
+ipa_strub_set_mode_for_new_functions ()
+{
+  if (symtab->order == last_cgraph_order)
+    return;
+
+  cgraph_node *node;
+
+  /* Go through the functions twice, once over non-aliases, and then over
+     aliases, so that aliases can reuse the mode computation of their ultimate
+     targets.  */
+  for (int aliases = 0; aliases <= 1; aliases++)
+    FOR_EACH_FUNCTION (node)
+    {
+      if (!node->alias != !aliases)
+	continue;
+
+      /*  Already done.  */
+      if (node->order < last_cgraph_order)
+	continue;
+
+      set_strub_mode (node);
+    }
+
+  last_cgraph_order = symtab->order;
+}
+
+/* Return FALSE if NODE is a strub context, and TRUE otherwise.  */
+
+bool
+strub_splittable_p (cgraph_node *node)
+{
+  switch (get_strub_mode (node))
+    {
+    case STRUB_WRAPPED:
+    case STRUB_AT_CALLS:
+    case STRUB_AT_CALLS_OPT:
+    case STRUB_INLINABLE:
+    case STRUB_INTERNAL:
+    case STRUB_WRAPPER:
+      return false;
+
+    case STRUB_CALLABLE:
+    case STRUB_DISABLED:
+      break;
+
+    default:
+      gcc_unreachable ();
+    }
+
+  return true;
+}
+
+/* Return the PARM_DECL of the incoming watermark pointer, if there is one.  */
+
+tree
+strub_watermark_parm (tree fndecl)
+{
+  switch (get_strub_mode_from_fndecl (fndecl))
+    {
+    case STRUB_WRAPPED:
+    case STRUB_AT_CALLS:
+    case STRUB_AT_CALLS_OPT:
+      break;
+
+    case STRUB_INTERNAL:
+    case STRUB_WRAPPER:
+    case STRUB_CALLABLE:
+    case STRUB_DISABLED:
+    case STRUB_INLINABLE:
+      return NULL_TREE;
+
+    default:
+      gcc_unreachable ();
+    }
+
+  for (tree parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm))
+    /* The type (variant) compare finds the parameter even in a just-created
+       clone, before we set its name, but the type-based compare doesn't work
+       during builtin expansion within the lto compiler, because we'll have
+       created a separate variant in that run.  */
+    if (TREE_TYPE (parm) == pass_ipa_strub::get_qpwmt ()
+	|| DECL_NAME (parm) == pass_ipa_strub::get_watermark_ptr ())
+      return parm;
+
+  gcc_unreachable ();
+}
+
+/* Adjust a STRUB_AT_CALLS function TYPE, adding a watermark pointer if it
+   hasn't been added yet.  Return the named argument count.  */
+
+int
+pass_ipa_strub::adjust_at_calls_type (tree type)
+{
+  int named_args = 0;
+
+  gcc_checking_assert (same_strub_mode_in_variants_p (type));
+
+  if (!TYPE_ARG_TYPES (type))
+    return named_args;
+
+  tree *tlist = &TYPE_ARG_TYPES (type);
+  tree qpwmptrt = get_qpwmt ();
+  while (*tlist && TREE_VALUE (*tlist) != void_type_node)
+    {
+      /* The type has already been adjusted.  */
+      if (TREE_VALUE (*tlist) == qpwmptrt)
+	return named_args;
+      named_args++;
+      *tlist = tree_cons (TREE_PURPOSE (*tlist),
+			  TREE_VALUE (*tlist),
+			  TREE_CHAIN (*tlist));
+      tlist = &TREE_CHAIN (*tlist);
+    }
+
+  /* Add the new argument after all named arguments, so as to not mess with
+     attributes that reference parameters.  */
+  *tlist = tree_cons (NULL_TREE, get_qpwmt (), *tlist);
+
+#if ATTR_FNSPEC_DECONST_WATERMARK
+  if (!type_already_adjusted)
+    {
+      int flags = flags_from_decl_or_type (type);
+      tree fnspec = lookup_attribute ("fn spec", type);
+
+      if ((flags & (ECF_CONST | ECF_PURE | ECF_NOVOPS)) || fnspec)
+	{
+	  size_t xargs = 1;
+	  size_t curlen = 0, tgtlen = 2 + 2 * (named_args + xargs);
+	  auto_vec<char> nspecv (tgtlen);
+	  char *nspec = &nspecv[0]; /* It will *not* be NUL-terminated!  */
+	  if (fnspec)
+	    {
+	      tree fnspecstr = TREE_VALUE (TREE_VALUE (fnspec));
+	      curlen = TREE_STRING_LENGTH (fnspecstr);
+	      memcpy (nspec, TREE_STRING_POINTER (fnspecstr), curlen);
+	    }
+	  if (!curlen)
+	    {
+	      nspec[curlen++] = '.';
+	      nspec[curlen++] = ((flags & ECF_CONST)
+				 ? 'c'
+				 : (flags & ECF_PURE)
+				 ? 'p'
+				 : ' ');
+	    }
+	  while (curlen < tgtlen - 2 * xargs)
+	    {
+	      nspec[curlen++] = '.';
+	      nspec[curlen++] = ' ';
+	    }
+	  nspec[curlen++] = 'W';
+	  nspec[curlen++] = 't';
+
+	  /* The type has already been copied, if needed, before adding
+	     parameters.  */
+	  TYPE_ATTRIBUTES (type)
+	    = tree_cons (get_identifier ("fn spec"),
+			 build_tree_list (NULL_TREE,
+					  build_string (tgtlen, nspec)),
+			 TYPE_ATTRIBUTES (type));
+	}
+    }
+#endif
+
+  return named_args;
+}
+
+/* Adjust a call to an at-calls call target.  Create a watermark local variable
+   if needed, initialize it before, pass it to the callee according to the
+   modified at-calls interface, and release the callee's stack space after the
+   call, if not deferred.  If the call is const or pure, arrange for the
+   watermark to not be assumed unused or unchanged.  */
+
+void
+pass_ipa_strub::adjust_at_calls_call (cgraph_edge *e, int named_args,
+				      tree callee_fntype)
+{
+  gcall *ocall = e->call_stmt;
+  gimple_stmt_iterator gsi = gsi_for_stmt (ocall);
+
+  /* Make sure we haven't modified this call yet.  */
+  gcc_checking_assert (!(int (gimple_call_num_args (ocall)) > named_args
+			 && (TREE_TYPE (gimple_call_arg (ocall, named_args))
+			     == get_pwmt ())));
+
+  /* If we're already within a strub context, pass on the incoming watermark
+     pointer, and omit the enter and leave calls around the modified call, as an
+     optimization, or as a means to satisfy a tail-call requirement.  */
+  tree swmp = ((optimize_size || optimize > 2
+		|| gimple_call_must_tail_p (ocall)
+		|| (optimize == 2 && gimple_call_tail_p (ocall)))
+	       ? strub_watermark_parm (e->caller->decl)
+	       : NULL_TREE);
+  bool omit_own_watermark = swmp;
+  tree swm = NULL_TREE;
+  if (!omit_own_watermark)
+    {
+      swm = create_tmp_var (get_wmt (), ".strub.watermark");
+      TREE_ADDRESSABLE (swm) = true;
+      swmp = build1 (ADDR_EXPR, get_pwmt (), swm);
+
+      /* Initialize the watermark before the call.  */
+      tree enter = get_enter ();
+      gcall *stptr = gimple_build_call (enter, 1,
+					unshare_expr (swmp));
+      if (gimple_has_location (ocall))
+	gimple_set_location (stptr, gimple_location (ocall));
+      gsi_insert_before (&gsi, stptr, GSI_SAME_STMT);
+#if !IMPLICIT_CGRAPH_EDGES
+      e->caller->create_edge (cgraph_node::get_create (enter),
+			      stptr, gsi_bb (gsi)->count, false);
+#endif
+    }
+
+
+  /* Replace the call with one that passes the swmp argument first.  */
+  gcall *wrcall;
+  { gcall *stmt = ocall;
+    // Mostly copied from gimple_call_copy_skip_args.
+    int i = 0;
+    int nargs = gimple_call_num_args (stmt);
+    auto_vec<tree> vargs (MAX (nargs, named_args) + 1);
+    gcall *new_stmt;
+
+    /* pr71109.c calls a prototypeless function, then defines it with
+       additional arguments.  It's ill-formed, but after it's inlined,
+       it somehow works out.  */
+    for (; i < named_args && i < nargs; i++)
+      vargs.quick_push (gimple_call_arg (stmt, i));
+    for (; i < named_args; i++)
+      vargs.quick_push (null_pointer_node);
+
+    vargs.quick_push (unshare_expr (swmp));
+
+    for (; i < nargs; i++)
+#if 0
+      if (!bitmap_bit_p (args_to_skip, i))
+#endif
+	vargs.quick_push (gimple_call_arg (stmt, i));
+
+    if (gimple_call_internal_p (stmt))
+#if 0
+      new_stmt = gimple_build_call_internal_vec (gimple_call_internal_fn (stmt),
+						 vargs);
+#endif
+      gcc_unreachable ();
+    else
+      new_stmt = gimple_build_call_vec (gimple_call_fn (stmt), vargs);
+    gimple_call_set_fntype (new_stmt, callee_fntype);
+
+    if (gimple_call_lhs (stmt))
+      gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
+
+#if 0
+    gimple_set_vuse (new_stmt, gimple_vuse (stmt));
+    gimple_set_vdef (new_stmt, gimple_vdef (stmt));
+#else
+    gimple_move_vops (new_stmt, stmt);
+#endif
+
+    if (gimple_has_location (stmt))
+      gimple_set_location (new_stmt, gimple_location (stmt));
+    gimple_call_copy_flags (new_stmt, stmt);
+    gimple_call_set_chain (new_stmt, gimple_call_chain (stmt));
+
+    gimple_set_modified (new_stmt, true);
+
+    wrcall = new_stmt;
+  }
+
+  update_stmt (wrcall);
+  gsi_replace (&gsi, wrcall, true);
+  cgraph_edge::set_call_stmt (e, wrcall, false);
+
+  /* Insert the strub code after the call.  */
+  gimple_seq seq = NULL;
+
+#if !ATTR_FNSPEC_DECONST_WATERMARK
+  /* If the call will be assumed to not modify or even read the
+     watermark, make it read and modified ourselves.  */
+  if ((gimple_call_flags (wrcall)
+       & (ECF_CONST | ECF_PURE | ECF_NOVOPS)))
+    {
+      if (!swm)
+	swm = build2 (MEM_REF,
+		      TREE_TYPE (TREE_TYPE (swmp)),
+		      swmp,
+		      build_int_cst (TREE_TYPE (swmp), 0));
+
+      vec<tree, va_gc> *inputs = NULL;
+      vec<tree, va_gc> *outputs = NULL;
+      vec_safe_push (outputs,
+		     build_tree_list
+		     (build_tree_list
+		      (NULL_TREE, build_string (2, "=m")),
+		      unshare_expr (swm)));
+      vec_safe_push (inputs,
+		     build_tree_list
+		     (build_tree_list
+		      (NULL_TREE, build_string (1, "m")),
+		      unshare_expr (swm)));
+      gasm *forcemod = gimple_build_asm_vec ("", inputs, outputs,
+					     NULL, NULL);
+      gimple_seq_add_stmt (&seq, forcemod);
+
+      /* If the call will be assumed to not even read the watermark,
+	 make sure it is already in memory before the call.  */
+      if ((gimple_call_flags (wrcall) & ECF_CONST))
+	{
+	  vec<tree, va_gc> *inputs = NULL;
+	  vec_safe_push (inputs,
+			 build_tree_list
+			 (build_tree_list
+			  (NULL_TREE, build_string (1, "m")),
+			  unshare_expr (swm)));
+	  gasm *force_store = gimple_build_asm_vec ("", inputs, NULL,
+						    NULL, NULL);
+	  if (gimple_has_location (wrcall))
+	    gimple_set_location (force_store, gimple_location (wrcall));
+	  gsi_insert_before (&gsi, force_store, GSI_SAME_STMT);
+	}
+    }
+#endif
+
+  if (!omit_own_watermark)
+    {
+      gcall *sleave = gimple_build_call (get_leave (), 1,
+					 unshare_expr (swmp));
+      gimple_seq_add_stmt (&seq, sleave);
+
+      gassign *clobber = gimple_build_assign (swm,
+					      build_clobber
+					      (TREE_TYPE (swm)));
+      gimple_seq_add_stmt (&seq, clobber);
+    }
+
+  gsi_insert_finally_seq_after_call (gsi, seq);
+}
+
+/* Adjust all at-calls calls in NODE. */
+
+void
+pass_ipa_strub::adjust_at_calls_calls (cgraph_node *node)
+{
+  /* Adjust unknown-callee indirect calls with STRUB_AT_CALLS types within
+     onode.  */
+  if (node->indirect_calls)
+    {
+      push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+      for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
+	{
+	  gcc_checking_assert (e->indirect_unknown_callee);
+
+	  tree callee_fntype;
+	  enum strub_mode callee_mode
+	    = effective_strub_mode_for_call (e->call_stmt, &callee_fntype);
+
+	  if (callee_mode != STRUB_AT_CALLS
+	      && callee_mode != STRUB_AT_CALLS_OPT)
+	    continue;
+
+	  int named_args = adjust_at_calls_type (callee_fntype);
+
+	  adjust_at_calls_call (e, named_args, callee_fntype);
+	}
+      pop_cfun ();
+    }
+
+  if (node->callees)
+    {
+      push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+      for (cgraph_edge *e = node->callees; e; e = e->next_callee)
+	{
+	  gcc_checking_assert (!e->indirect_unknown_callee);
+
+	  tree callee_fntype;
+	  enum strub_mode callee_mode
+	    = effective_strub_mode_for_call (e->call_stmt, &callee_fntype);
+
+	  if (callee_mode != STRUB_AT_CALLS
+	      && callee_mode != STRUB_AT_CALLS_OPT)
+	    continue;
+
+	  int named_args = adjust_at_calls_type (callee_fntype);
+
+	  adjust_at_calls_call (e, named_args, callee_fntype);
+	}
+      pop_cfun ();
+    }
+}
+
+/* The strubm (strub mode) pass computes a strub mode for each function in the
+   call graph, and checks, before any inlining, that strub callability
+   requirements in effect are satisfied.  */
+
+unsigned int
+pass_ipa_strub_mode::execute (function *)
+{
+  last_cgraph_order = 0;
+  ipa_strub_set_mode_for_new_functions ();
+
+  /* Verify before any inlining or other transformations.  */
+  verify_strub ();
+
+  return 0;
+}
+
+/* Create a strub mode pass.  */
+
+simple_ipa_opt_pass *
+make_pass_ipa_strub_mode (gcc::context *ctxt)
+{
+  return new pass_ipa_strub_mode (ctxt);
+}
+
+/* The strub pass proper adjusts types, signatures, and at-calls calls, and
+   splits internal-strub functions.  */
+
+unsigned int
+pass_ipa_strub::execute (function *)
+{
+  cgraph_node *onode;
+
+  ipa_strub_set_mode_for_new_functions ();
+
+  /* First, adjust the signature of at-calls functions.  We adjust types of
+     at-calls functions first, so that we don't modify types in place unless
+     strub is explicitly requested.  */
+  FOR_EACH_FUNCTION (onode)
+  {
+    enum strub_mode mode = get_strub_mode (onode);
+
+    if (mode == STRUB_AT_CALLS
+	|| mode == STRUB_AT_CALLS_OPT)
+      {
+	/* Create a type variant if strubbing was not explicitly requested in
+	   the function type.  */
+	if (get_strub_mode_from_type (TREE_TYPE (onode->decl)) != mode)
+	  distinctify_node_type (onode);
+
+	int named_args = adjust_at_calls_type (TREE_TYPE (onode->decl));
+
+	/* An external function explicitly declared with strub won't have a
+	   body.  Even with implicit at-calls strub, a function may have had its
+	   body removed after we selected the mode, and then we have nothing
+	   further to do.  */
+	if (!onode->has_gimple_body_p ())
+	  continue;
+
+	tree *pargs = &DECL_ARGUMENTS (onode->decl);
+
+	/* A noninterposable_alias reuses the same parm decl chain, don't add
+	   the parm twice.  */
+	bool aliased_parms = (onode->alias && *pargs
+			      && DECL_CONTEXT (*pargs) != onode->decl);
+
+	if (aliased_parms)
+	  continue;
+
+	for (int i = 0; i < named_args; i++)
+	  pargs = &DECL_CHAIN (*pargs);
+
+	tree wmptr = build_decl (DECL_SOURCE_LOCATION (onode->decl),
+				 PARM_DECL,
+				 get_watermark_ptr (),
+				 get_qpwmt ());
+	DECL_ARTIFICIAL (wmptr) = 1;
+	DECL_ARG_TYPE (wmptr) = get_qpwmt ();
+	DECL_CONTEXT (wmptr) = onode->decl;
+	TREE_USED (wmptr) = 1;
+	DECL_CHAIN (wmptr) = *pargs;
+	*pargs = wmptr;
+
+	if (onode->alias)
+	  continue;
+
+	cgraph_node *nnode = onode;
+	push_cfun (DECL_STRUCT_FUNCTION (nnode->decl));
+
+	{
+	  edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+	  gimple_seq seq = call_update_watermark (wmptr, nnode, e->src->count);
+	  gsi_insert_seq_on_edge_immediate (e, seq);
+	}
+
+	if (DECL_STRUCT_FUNCTION (nnode->decl)->calls_alloca)
+	  {
+	    basic_block bb;
+	    FOR_EACH_BB_FN (bb, cfun)
+	      for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+		   !gsi_end_p (gsi); gsi_next (&gsi))
+		{
+		  gimple *stmt = gsi_stmt (gsi);
+
+		  gcall *call = dyn_cast <gcall *> (stmt);
+
+		  if (!call)
+		    continue;
+
+		  if (gimple_alloca_call_p (call))
+		    {
+		      /* Capture stack growth.  */
+		      gimple_seq seq = call_update_watermark (wmptr, NULL,
+							      gsi_bb (gsi)
+							      ->count);
+		      gsi_insert_finally_seq_after_call (gsi, seq);
+		    }
+		}
+	  }
+
+	pop_cfun ();
+      }
+  }
+
+  FOR_EACH_FUNCTION (onode)
+  {
+    if (!onode->has_gimple_body_p ())
+      continue;
+
+    enum strub_mode mode = get_strub_mode (onode);
+
+    if (mode != STRUB_INTERNAL)
+      {
+	adjust_at_calls_calls (onode);
+	continue;
+      }
+
+    bool is_stdarg = calls_builtin_va_start_p (onode);;
+    bool apply_args = calls_builtin_apply_args_p (onode);
+
+    vec<ipa_adjusted_param, va_gc> *nparms = NULL;
+    unsigned j = 0;
+    {
+      // The following loop copied from ipa-split.c:split_function.
+      for (tree parm = DECL_ARGUMENTS (onode->decl);
+	   parm; parm = DECL_CHAIN (parm), j++)
+	{
+	  ipa_adjusted_param adj = {};
+	  adj.op = IPA_PARAM_OP_COPY;
+	  adj.base_index = j;
+	  adj.prev_clone_index = j;
+	  vec_safe_push (nparms, adj);
+	}
+
+      if (apply_args)
+	{
+	  ipa_adjusted_param aaadj = {};
+	  aaadj.op = IPA_PARAM_OP_NEW;
+	  aaadj.type = get_qptr ();
+	  vec_safe_push (nparms, aaadj);
+	}
+
+      if (is_stdarg)
+	{
+	  ipa_adjusted_param vladj = {};
+	  vladj.op = IPA_PARAM_OP_NEW;
+	  vladj.type = get_qpvalst ();
+	  vec_safe_push (nparms, vladj);
+	}
+
+      ipa_adjusted_param wmadj = {};
+      wmadj.op = IPA_PARAM_OP_NEW;
+      wmadj.type = get_qpwmt ();
+      vec_safe_push (nparms, wmadj);
+    }
+    ipa_param_adjustments adj (nparms, -1, false);
+
+    cgraph_node *nnode = onode->create_version_clone_with_body
+      (auto_vec<cgraph_edge *> (0),
+       NULL, &adj, NULL, NULL, "strub", NULL);
+
+    if (!nnode)
+      {
+	error_at (DECL_SOURCE_LOCATION (onode->decl),
+		  "failed to split %qD for %<strub%>",
+		  onode->decl);
+	continue;
+      }
+
+    onode->split_part = true;
+    if (onode->calls_comdat_local)
+      nnode->add_to_same_comdat_group (onode);
+
+    set_strub_mode_to (onode, STRUB_WRAPPER);
+    set_strub_mode_to (nnode, STRUB_WRAPPED);
+
+    adjust_at_calls_calls (nnode);
+
+    /* Decide which of the wrapped function's parms we want to turn into
+       references to the argument passed to the wrapper.  In general, we want to
+       copy small arguments, and avoid copying large ones.  Variable-sized array
+       lengths given by other arguments, as in 20020210-1.c, would lead to
+       problems if passed by value, after resetting the original function and
+       dropping the length computation; passing them by reference works.
+       DECL_BY_REFERENCE is *not* a substitute for this: it involves copying
+       anyway, but performed at the caller.  */
+    indirect_parms_t indirect_nparms (3, false);
+    unsigned adjust_ftype = 0;
+    unsigned named_args = 0;
+    for (tree parm = DECL_ARGUMENTS (onode->decl),
+	   nparm = DECL_ARGUMENTS (nnode->decl),
+	   nparmt = TYPE_ARG_TYPES (TREE_TYPE (nnode->decl));
+	 parm;
+	 named_args++,
+	   parm = DECL_CHAIN (parm),
+	   nparm = DECL_CHAIN (nparm),
+	   nparmt = nparmt ? TREE_CHAIN (nparmt) : NULL_TREE)
+      if (!(0 /* DECL_BY_REFERENCE (narg) */
+	    || is_gimple_reg_type (TREE_TYPE (nparm))
+	    || VECTOR_TYPE_P (TREE_TYPE (nparm))
+	    || TREE_CODE (TREE_TYPE (nparm)) == COMPLEX_TYPE
+	    || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (nparm)))
+		&& (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (nparm)))
+		    <= 4 * UNITS_PER_WORD))))
+	{
+	  indirect_nparms.add (nparm);
+
+	  /* ??? Is there any case in which it is not safe to suggest the parms
+	     turned indirect don't alias anything else?  They are distinct,
+	     unaliased memory in the wrapper, and the wrapped can't possibly
+	     take pointers into them because none of the pointers passed to the
+	     wrapper can alias other incoming parameters passed by value, even
+	     if with transparent reference, and the wrapper doesn't take any
+	     extra parms that could point into wrapper's parms.  So we can
+	     probably drop the TREE_ADDRESSABLE and keep the TRUE.  */
+	  tree ref_type = build_ref_type_for (nparm,
+					      true
+					      || !TREE_ADDRESSABLE (parm));
+
+	  DECL_ARG_TYPE (nparm) = TREE_TYPE (nparm) = ref_type;
+	  relayout_decl (nparm);
+	  TREE_ADDRESSABLE (nparm) = 0;
+	  DECL_BY_REFERENCE (nparm) = 0;
+#if FOR_GCC_11P
+	  DECL_NOT_GIMPLE_REG_P (nparm) = 0;
+#else
+	  DECL_GIMPLE_REG_P (nparm) = 1;
+#endif
+	  /* ??? This avoids mismatches in debug info bind stmts in
+	     e.g. a-chahan .  */
+	  DECL_ABSTRACT_ORIGIN (nparm) = NULL;
+
+	  if (nparmt)
+	    adjust_ftype++;
+	}
+
+    /* Also adjust the wrapped function type, if needed.  */
+    if (adjust_ftype)
+      {
+	tree nftype = TREE_TYPE (nnode->decl);
+
+	/* We always add at least one argument at the end of the signature, when
+	   cloning the function, so we don't expect to need to duplicate the
+	   type here.  */
+	gcc_checking_assert (TYPE_ARG_TYPES (nftype)
+			     != TYPE_ARG_TYPES (TREE_TYPE (onode->decl)));
+
+#if HAVE_ATTR_FNSPEC
+	/* Check that fnspec still works for the modified function signature,
+	   and drop it otherwise.  */
+	bool drop_fnspec = false;
+	tree fnspec = lookup_attribute ("fn spec", TYPE_ATTRIBUTES (nftype));
+	attr_fnspec spec = fnspec ? attr_fnspec (fnspec) : attr_fnspec ("");
+
+	unsigned retcopy;
+	if (!(fnspec && spec.returns_arg (&retcopy)))
+	  retcopy = (unsigned) -1;
+
+	unsigned i = 0;
+#endif
+	for (tree nparm = DECL_ARGUMENTS (nnode->decl),
+	       nparmt = TYPE_ARG_TYPES (nftype);
+	     adjust_ftype > 0;
+#if HAVE_ATTR_FNSPEC
+	     i++,
+#endif
+	       nparm = DECL_CHAIN (nparm), nparmt = TREE_CHAIN (nparmt))
+	  if (indirect_nparms.contains (nparm))
+	    {
+	      TREE_VALUE (nparmt) = TREE_TYPE (nparm);
+	      adjust_ftype--;
+
+#if HAVE_ATTR_FNSPEC
+	      if (fnspec && !drop_fnspec)
+		{
+		  if (i == retcopy)
+		    drop_fnspec = true;
+		  else if (spec.arg_specified_p (i))
+		    {
+		      /* Properties that apply to pointers only must not be
+			 present, because we don't make pointers further
+			 indirect.  */
+		      gcc_checking_assert
+			(!spec.arg_max_access_size_given_by_arg_p (i, NULL));
+		      gcc_checking_assert (!spec.arg_copied_to_arg_p (i, NULL));
+
+		      /* Any claim of direct access only is invalidated by
+			 adding an indirection level.  */
+		      if (spec.arg_direct_p (i))
+			drop_fnspec = true;
+
+		      /* If there's a claim the argument is not read from, the
+			 added indirection invalidates it: if the argument is
+			 used at all, then the pointer will necessarily be
+			 read.  */
+		      if (!spec.arg_maybe_read_p (i)
+			  && spec.arg_used_p (i))
+			drop_fnspec = true;
+		    }
+		}
+#endif
+	    }
+
+#if HAVE_ATTR_FNSPEC
+	/* ??? Maybe we could adjust it instead.  */
+	if (drop_fnspec)
+	  remove_named_attribute_unsharing ("fn spec",
+					    &TYPE_ATTRIBUTES (nftype));
+#endif
+
+	TREE_TYPE (nnode->decl) = nftype;
+      }
+
+#if ATTR_FNSPEC_DECONST_WATERMARK
+    {
+      int flags = flags_from_decl_or_type (nnode->decl);
+      tree fnspec = lookup_attribute ("fn spec", TREE_TYPE (nnode->decl));
+
+      if ((flags & (ECF_CONST | ECF_PURE | ECF_NOVOPS)) || fnspec)
+	{
+	  size_t xargs = 1 + int (is_stdarg) + int (apply_args);
+	  size_t curlen = 0, tgtlen = 2 + 2 * (named_args + xargs);
+	  auto_vec<char> nspecv (tgtlen);
+	  char *nspec = &nspecv[0]; /* It will *not* be NUL-terminated!  */
+	  bool no_writes_p = true;
+	  if (fnspec)
+	    {
+	      tree fnspecstr = TREE_VALUE (TREE_VALUE (fnspec));
+	      curlen = TREE_STRING_LENGTH (fnspecstr);
+	      memcpy (nspec, TREE_STRING_POINTER (fnspecstr), curlen);
+	      if (!(flags & (ECF_CONST | ECF_PURE | ECF_NOVOPS))
+		  && curlen >= 2
+		  && nspec[1] != 'c' && nspec[1] != 'C'
+		  && nspec[1] != 'p' && nspec[1] != 'P')
+		no_writes_p = false;
+	    }
+	  if (!curlen)
+	    {
+	      nspec[curlen++] = '.';
+	      nspec[curlen++] = ((flags & ECF_CONST)
+				 ? 'c'
+				 : (flags & ECF_PURE)
+				 ? 'p'
+				 : ' ');
+	    }
+	  while (curlen < tgtlen - 2 * xargs)
+	    {
+	      nspec[curlen++] = '.';
+	      nspec[curlen++] = ' ';
+	    }
+
+	  /* These extra args are unlikely to be present in const or pure
+	     functions.  It's conceivable that a function that takes variable
+	     arguments, or that passes its arguments on to another function,
+	     could be const or pure, but it would not modify the arguments, and,
+	     being pure or const, it couldn't possibly modify or even access
+	     memory referenced by them.  But it can read from these internal
+	     data structures created by the wrapper, and from any
+	     argument-passing memory referenced by them, so we denote the
+	     possibility of reading from multiple levels of indirection, but
+	     only of reading because const/pure.  */
+	  if (apply_args)
+	    {
+	      nspec[curlen++] = 'r';
+	      nspec[curlen++] = ' ';
+	    }
+	  if (is_stdarg)
+	    {
+	      nspec[curlen++] = (no_writes_p ? 'r' : '.');
+	      nspec[curlen++] = (no_writes_p ? 't' : ' ');
+	    }
+
+	  nspec[curlen++] = 'W';
+	  nspec[curlen++] = 't';
+
+	  /* The type has already been copied before adding parameters.  */
+	  gcc_checking_assert (TYPE_ARG_TYPES (TREE_TYPE (nnode->decl))
+			       != TYPE_ARG_TYPES (TREE_TYPE (onode->decl)));
+	  TYPE_ATTRIBUTES (TREE_TYPE (nnode->decl))
+	    = tree_cons (get_identifier ("fn spec"),
+			 build_tree_list (NULL_TREE,
+					  build_string (tgtlen, nspec)),
+			 TYPE_ATTRIBUTES (TREE_TYPE (nnode->decl)));
+	}
+    }
+#endif
+
+    {
+      tree decl = onode->decl;
+      cgraph_node *target = nnode;
+
+      { // copied from create_wrapper
+
+	/* Preserve DECL_RESULT so we get right by reference flag.  */
+	tree decl_result = DECL_RESULT (decl);
+
+	/* Remove the function's body but keep arguments to be reused
+	   for thunk.  */
+	onode->release_body (true);
+	onode->reset ();
+
+	DECL_UNINLINABLE (decl) = false;
+	DECL_RESULT (decl) = decl_result;
+	DECL_INITIAL (decl) = NULL;
+	allocate_struct_function (decl, false);
+	set_cfun (NULL);
+
+	/* Turn alias into thunk and expand it into GIMPLE representation.  */
+	onode->definition = true;
+
+#if FOR_GCC_11P
+	thunk_info::get_create (onode);
+	onode->thunk = true;
+#else
+	memset (&onode->thunk, 0, sizeof (cgraph_thunk_info));
+	onode->thunk.thunk_p = true;
+	onode->thunk.alias = target->decl;
+#endif
+#if !IMPLICIT_CGRAPH_EDGES
+	onode->create_edge (target, NULL, onode->count);
+#endif
+	onode->callees->can_throw_external = !TREE_NOTHROW (target->decl);
+
+	tree arguments = DECL_ARGUMENTS (decl);
+
+	while (arguments)
+	  {
+	    TREE_ADDRESSABLE (arguments) = false;
+	    arguments = TREE_CHAIN (arguments);
+	  }
+
+	{
+	  tree alias = onode->callees->callee->decl;
+	  tree thunk_fndecl = decl;
+	  tree a;
+
+	  int nxargs = 1 + is_stdarg + apply_args;
+
+	  { // Simplified from expand_thunk.
+	    tree restype;
+	    basic_block bb, then_bb, else_bb, return_bb;
+	    gimple_stmt_iterator bsi;
+	    int nargs = 0;
+	    tree arg;
+	    int i;
+	    tree resdecl;
+	    tree restmp = NULL;
+
+	    gcall *call;
+	    greturn *ret;
+	    bool alias_is_noreturn = TREE_THIS_VOLATILE (alias);
+
+	    a = DECL_ARGUMENTS (thunk_fndecl);
+
+	    current_function_decl = thunk_fndecl;
+
+#if FOR_GCC_11P
+	    /* Ensure thunks are emitted in their correct sections.  */
+	    resolve_unique_section (thunk_fndecl, 0,
+				    flag_function_sections);
+#endif
+
+	    bitmap_obstack_initialize (NULL);
+
+	    /* Build the return declaration for the function.  */
+	    restype = TREE_TYPE (TREE_TYPE (thunk_fndecl));
+	    if (DECL_RESULT (thunk_fndecl) == NULL_TREE)
+	      {
+		resdecl = build_decl (input_location, RESULT_DECL, 0, restype);
+		DECL_ARTIFICIAL (resdecl) = 1;
+		DECL_IGNORED_P (resdecl) = 1;
+		DECL_CONTEXT (resdecl) = thunk_fndecl;
+		DECL_RESULT (thunk_fndecl) = resdecl;
+	      }
+	    else
+	      resdecl = DECL_RESULT (thunk_fndecl);
+
+	    profile_count cfg_count = onode->count;
+	    if (!cfg_count.initialized_p ())
+	      cfg_count = profile_count::from_gcov_type (BB_FREQ_MAX).guessed_local ();
+
+	    bb = then_bb = else_bb = return_bb
+	      = init_lowered_empty_function (thunk_fndecl, true, cfg_count);
+
+	    bsi = gsi_start_bb (bb);
+
+	    /* Build call to the function being thunked.  */
+	    if (!VOID_TYPE_P (restype)
+		&& (!alias_is_noreturn
+		    || TREE_ADDRESSABLE (restype)
+		    || TREE_CODE (TYPE_SIZE_UNIT (restype)) != INTEGER_CST))
+	      {
+		if (DECL_BY_REFERENCE (resdecl))
+		  {
+		    restmp = gimple_fold_indirect_ref (resdecl);
+		    if (!restmp)
+		      restmp = build2 (MEM_REF,
+				       TREE_TYPE (TREE_TYPE (resdecl)),
+				       resdecl,
+				       build_int_cst (TREE_TYPE (resdecl), 0));
+		  }
+		else if (!is_gimple_reg_type (restype))
+		  {
+		    if (aggregate_value_p (resdecl, TREE_TYPE (thunk_fndecl)))
+		      {
+			restmp = resdecl;
+
+			if (VAR_P (restmp))
+			  {
+			    add_local_decl (cfun, restmp);
+			    BLOCK_VARS (DECL_INITIAL (current_function_decl))
+			      = restmp;
+			  }
+		      }
+		    else
+		      restmp = create_tmp_var (restype, "retval");
+		  }
+		else
+		  restmp = create_tmp_reg (restype, "retval");
+	      }
+
+	    for (arg = a; arg; arg = DECL_CHAIN (arg))
+	      nargs++;
+	    auto_vec<tree> vargs (nargs + nxargs);
+	    i = 0;
+	    arg = a;
+
+	    if (nargs)
+	      for (tree nparm = DECL_ARGUMENTS (nnode->decl);
+		   i < nargs;
+		   i++, arg = DECL_CHAIN (arg), nparm = DECL_CHAIN (nparm))
+		{
+		  tree save_arg = arg;
+		  tree tmp = arg;
+
+		  /* Arrange to pass indirectly the parms, if we decided to do
+		     so, and revert its type in the wrapper.  */
+		  if (indirect_nparms.contains (nparm))
+		    {
+		      tree ref_type = TREE_TYPE (nparm);
+		      TREE_ADDRESSABLE (arg) = true;
+		      tree addr = build1 (ADDR_EXPR, ref_type, arg);
+		      tmp = arg = addr;
+		    }
+#if ! FOR_GCC_11P
+		  else if (VECTOR_TYPE_P (TREE_TYPE (arg))
+			   || TREE_CODE (TREE_TYPE (arg)) == COMPLEX_TYPE)
+		    DECL_GIMPLE_REG_P (arg) = 1;
+#else
+		  else
+		    DECL_NOT_GIMPLE_REG_P (arg) = 0;
+#endif
+
+		  /* Convert the argument back to the type used by the calling
+		     conventions, e.g. a non-prototyped float type is passed as
+		     double, as in 930603-1.c, and needs to be converted back to
+		     double to be passed on unchanged to the wrapped
+		     function.  */
+		  if (TREE_TYPE (nparm) != DECL_ARG_TYPE (nparm))
+		    arg = fold_convert (DECL_ARG_TYPE (nparm), arg);
+
+		  if (!is_gimple_val (arg))
+		    {
+		      tmp = create_tmp_reg (TYPE_MAIN_VARIANT
+					    (TREE_TYPE (arg)), "arg");
+		      gimple *stmt = gimple_build_assign (tmp, arg);
+		      gsi_insert_after (&bsi, stmt, GSI_NEW_STMT);
+		    }
+		  vargs.quick_push (tmp);
+		  arg = save_arg;
+		}
+	    /* These strub arguments are adjusted later.  */
+	    if (apply_args)
+	      vargs.quick_push (null_pointer_node);
+	    if (is_stdarg)
+	      vargs.quick_push (null_pointer_node);
+	    vargs.quick_push (null_pointer_node);
+	    call = gimple_build_call_vec (build_fold_addr_expr_loc (0, alias),
+					  vargs);
+	    onode->callees->call_stmt = call;
+	    // gimple_call_set_from_thunk (call, true);
+	    if (DECL_STATIC_CHAIN (alias))
+	      {
+		tree p = DECL_STRUCT_FUNCTION (alias)->static_chain_decl;
+		tree type = TREE_TYPE (p);
+		tree decl = build_decl (DECL_SOURCE_LOCATION (thunk_fndecl),
+					PARM_DECL, create_tmp_var_name ("CHAIN"),
+					type);
+		DECL_ARTIFICIAL (decl) = 1;
+		DECL_IGNORED_P (decl) = 1;
+		TREE_USED (decl) = 1;
+		DECL_CONTEXT (decl) = thunk_fndecl;
+		DECL_ARG_TYPE (decl) = type;
+		TREE_READONLY (decl) = 1;
+
+		struct function *sf = DECL_STRUCT_FUNCTION (thunk_fndecl);
+		sf->static_chain_decl = decl;
+
+		gimple_call_set_chain (call, decl);
+	      }
+
+	    /* Return slot optimization is always possible and in fact required to
+	       return values with DECL_BY_REFERENCE.  */
+	    if (aggregate_value_p (resdecl, TREE_TYPE (thunk_fndecl))
+		&& (!is_gimple_reg_type (TREE_TYPE (resdecl))
+		    || DECL_BY_REFERENCE (resdecl)))
+	      gimple_call_set_return_slot_opt (call, true);
+
+	    if (restmp)
+	      {
+		gimple_call_set_lhs (call, restmp);
+		gcc_assert (useless_type_conversion_p (TREE_TYPE (restmp),
+						       TREE_TYPE (TREE_TYPE (alias))));
+	      }
+	    gsi_insert_after (&bsi, call, GSI_NEW_STMT);
+	    if (!alias_is_noreturn)
+	      {
+		/* Build return value.  */
+		if (!DECL_BY_REFERENCE (resdecl))
+		  ret = gimple_build_return (restmp);
+		else
+		  ret = gimple_build_return (resdecl);
+
+		gsi_insert_after (&bsi, ret, GSI_NEW_STMT);
+	      }
+	    else
+	      {
+		remove_edge (single_succ_edge (bb));
+	      }
+
+	    cfun->gimple_df->in_ssa_p = true;
+	    update_max_bb_count ();
+	    profile_status_for_fn (cfun)
+	      = cfg_count.initialized_p () && cfg_count.ipa_p ()
+	      ? PROFILE_READ : PROFILE_GUESSED;
+#if FOR_GCC_11P
+	    /* FIXME: C++ FE should stop setting TREE_ASM_WRITTEN on thunks.  */
+	    // TREE_ASM_WRITTEN (thunk_fndecl) = false;
+#endif
+	    delete_unreachable_blocks ();
+	    update_ssa (TODO_update_ssa);
+	    checking_verify_flow_info ();
+	    free_dominance_info (CDI_DOMINATORS);
+
+	    /* Since we want to emit the thunk, we explicitly mark its name as
+	       referenced.  */
+#if FOR_GCC_11P
+	    onode->thunk = false;
+#else
+	    onode->thunk.thunk_p = false;
+#endif
+	    onode->lowered = true;
+	    bitmap_obstack_release (NULL);
+	  }
+	  current_function_decl = NULL;
+	  set_cfun (NULL);
+	}
+
+#if FOR_GCC_11P
+	thunk_info::remove (onode);
+#endif
+
+	// some more of create_wrapper at the end of the next block.
+      }
+    }
+
+    {
+      tree aaval = NULL_TREE;
+      tree vaptr = NULL_TREE;
+      tree wmptr = NULL_TREE;
+      for (tree arg = DECL_ARGUMENTS (nnode->decl); arg; arg = DECL_CHAIN (arg))
+	{
+	  aaval = vaptr;
+	  vaptr = wmptr;
+	  wmptr = arg;
+	}
+
+      if (!apply_args)
+	aaval = NULL_TREE;
+      /* The trailing args are [apply_args], [va_list_ptr], and
+	 watermark.  If we don't have a va_list_ptr, the penultimate
+	 argument is apply_args.
+       */
+      else if (!is_stdarg)
+	aaval = vaptr;
+
+      if (!is_stdarg)
+	vaptr = NULL_TREE;
+
+      DECL_NAME (wmptr) = get_watermark_ptr ();
+      DECL_ARTIFICIAL (wmptr) = 1;
+      DECL_IGNORED_P (wmptr) = 1;
+      TREE_USED (wmptr) = 1;
+
+      if (is_stdarg)
+	{
+	  DECL_NAME (vaptr) = get_va_list_ptr ();
+	  DECL_ARTIFICIAL (vaptr) = 1;
+	  DECL_IGNORED_P (vaptr) = 1;
+	  TREE_USED (vaptr) = 1;
+	}
+
+      if (apply_args)
+	{
+	  DECL_NAME (aaval) = get_apply_args ();
+	  DECL_ARTIFICIAL (aaval) = 1;
+	  DECL_IGNORED_P (aaval) = 1;
+	  TREE_USED (aaval) = 1;
+	}
+
+      push_cfun (DECL_STRUCT_FUNCTION (nnode->decl));
+
+      {
+	edge e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
+	gimple_seq seq = call_update_watermark (wmptr, nnode, e->src->count);
+	gsi_insert_seq_on_edge_immediate (e, seq);
+      }
+
+      bool any_indirect = !indirect_nparms.is_empty ();
+
+      if (any_indirect)
+	{
+	  basic_block bb;
+	  bool needs_commit = false;
+	  FOR_EACH_BB_FN (bb, cfun)
+	    {
+	      for (gphi_iterator gsi = gsi_start_nonvirtual_phis (bb);
+		   !gsi_end_p (gsi);
+		   gsi_next_nonvirtual_phi (&gsi))
+		{
+		  gphi *stmt = gsi.phi ();
+
+		  walk_stmt_info wi = {};
+		  wi.info = &indirect_nparms;
+		  walk_gimple_op (stmt, walk_make_indirect, &wi);
+		  if (wi.changed && !is_gimple_debug (gsi_stmt (gsi)))
+		    if (walk_regimplify_phi (stmt))
+		      needs_commit = true;
+		}
+
+	      for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+		   !gsi_end_p (gsi); gsi_next (&gsi))
+		{
+		  gimple *stmt = gsi_stmt (gsi);
+
+		  walk_stmt_info wi = {};
+		  wi.info = &indirect_nparms;
+		  walk_gimple_op (stmt, walk_make_indirect, &wi);
+		  if (wi.changed)
+		    {
+		      if (!is_gimple_debug (stmt))
+			{
+			  wi.info = &gsi;
+			  walk_gimple_op (stmt, walk_regimplify_addr_expr,
+					  &wi);
+			}
+		      update_stmt (stmt);
+		    }
+		}
+	    }
+	  if (needs_commit)
+	    gsi_commit_edge_inserts ();
+	}
+
+      if (DECL_STRUCT_FUNCTION (nnode->decl)->calls_alloca
+	  || is_stdarg || apply_args)
+	for (cgraph_edge *e = nnode->callees, *enext; e; e = enext)
+	  {
+	    gcall *call = e->call_stmt;
+	    gimple_stmt_iterator gsi = gsi_for_stmt (call);
+	    tree fndecl = e->callee->decl;
+
+	    enext = e->next_callee;
+
+	    if (gimple_alloca_call_p (call))
+	      {
+		gimple_seq seq = call_update_watermark (wmptr, NULL,
+							gsi_bb (gsi)->count);
+		gsi_insert_finally_seq_after_call (gsi, seq);
+	      }
+	    else if (fndecl && is_stdarg
+		     && fndecl_built_in_p (fndecl, BUILT_IN_VA_START))
+	      {
+		/* Using a non-default stdarg ABI makes the function ineligible
+		   for internal strub.  */
+		gcc_checking_assert (builtin_decl_explicit (BUILT_IN_VA_START)
+				     == fndecl);
+		tree bvacopy = builtin_decl_explicit (BUILT_IN_VA_COPY);
+		gimple_call_set_fndecl (call, bvacopy);
+		tree arg = vaptr;
+		/* The va_copy source must be dereferenced, unless it's an array
+		   type, that would have decayed to a pointer.  */
+		if (TREE_CODE (TREE_TYPE (TREE_TYPE (vaptr))) != ARRAY_TYPE)
+		  {
+		    arg = gimple_fold_indirect_ref (vaptr);
+		    if (!arg)
+		      arg = build2 (MEM_REF,
+				    TREE_TYPE (TREE_TYPE (vaptr)),
+				    vaptr,
+				    build_int_cst (TREE_TYPE (vaptr), 0));
+		  }
+		gimple_call_set_arg (call, 1, arg);
+		update_stmt (call);
+		e->redirect_callee (cgraph_node::get_create (bvacopy));
+	      }
+	    else if (fndecl && apply_args
+		     && fndecl_built_in_p (fndecl, BUILT_IN_APPLY_ARGS))
+	      {
+		tree lhs = gimple_call_lhs (call);
+		gimple *assign = (lhs
+				  ? gimple_build_assign (lhs, aaval)
+				  : gimple_build_nop ());
+		gsi_replace (&gsi, assign, true);
+		cgraph_edge::remove (e);
+	      }
+	  }
+
+      { // a little more copied from create_wrapper
+
+	/* Inline summary set-up.  */
+	nnode->analyze ();
+	// inline_analyze_function (nnode);
+      }
+
+      pop_cfun ();
+    }
+
+    {
+      push_cfun (DECL_STRUCT_FUNCTION (onode->decl));
+      gimple_stmt_iterator gsi
+	= gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
+
+      gcall *wrcall;
+      while (!(wrcall = dyn_cast <gcall *> (gsi_stmt (gsi))))
+	gsi_next (&gsi);
+
+      tree swm = create_tmp_var (get_wmt (), ".strub.watermark");
+      TREE_ADDRESSABLE (swm) = true;
+      tree swmp = build1 (ADDR_EXPR, get_pwmt (), swm);
+
+      tree enter = get_enter ();
+      gcall *stptr = gimple_build_call (enter, 1, unshare_expr (swmp));
+      gimple_set_location (stptr, gimple_location (wrcall));
+      gsi_insert_before (&gsi, stptr, GSI_SAME_STMT);
+#if !IMPLICIT_CGRAPH_EDGES
+      onode->create_edge (cgraph_node::get_create (enter),
+			  stptr, gsi_bb (gsi)->count, false);
+#endif
+
+      int nargs = gimple_call_num_args (wrcall);
+
+      gimple_seq seq = NULL;
+
+      if (apply_args)
+	{
+	  tree aalst = create_tmp_var (ptr_type_node, ".strub.apply_args");
+	  tree bappargs = builtin_decl_explicit (BUILT_IN_APPLY_ARGS);
+	  gcall *appargs = gimple_build_call (bappargs, 0);
+	  gimple_call_set_lhs (appargs, aalst);
+	  gimple_set_location (appargs, gimple_location (wrcall));
+	  gsi_insert_before (&gsi, appargs, GSI_SAME_STMT);
+	  gimple_call_set_arg (wrcall, nargs - 2 - is_stdarg, aalst);
+#if !IMPLICIT_CGRAPH_EDGES
+	  onode->create_edge (cgraph_node::get_create (bappargs),
+			      appargs, gsi_bb (gsi)->count, false);
+#endif
+	}
+
+      if (is_stdarg)
+	{
+	  tree valst = create_tmp_var (va_list_type_node, ".strub.va_list");
+	  TREE_ADDRESSABLE (valst) = true;
+	  tree vaptr = build1 (ADDR_EXPR,
+			       build_pointer_type (va_list_type_node),
+			       valst);
+	  gimple_call_set_arg (wrcall, nargs - 2, unshare_expr (vaptr));
+
+	  tree bvastart = builtin_decl_explicit (BUILT_IN_VA_START);
+	  gcall *vastart = gimple_build_call (bvastart, 2,
+					      unshare_expr (vaptr),
+					      integer_zero_node);
+	  gimple_set_location (vastart, gimple_location (wrcall));
+	  gsi_insert_before (&gsi, vastart, GSI_SAME_STMT);
+#if !IMPLICIT_CGRAPH_EDGES
+	  onode->create_edge (cgraph_node::get_create (bvastart),
+			      vastart, gsi_bb (gsi)->count, false);
+#endif
+
+	  tree bvaend = builtin_decl_explicit (BUILT_IN_VA_END);
+	  gcall *vaend = gimple_build_call (bvaend, 1, unshare_expr (vaptr));
+	  gimple_set_location (vaend, gimple_location (wrcall));
+	  gimple_seq_add_stmt (&seq, vaend);
+	}
+
+      gimple_call_set_arg (wrcall, nargs - 1, unshare_expr (swmp));
+      // gimple_call_set_tail (wrcall, false);
+      update_stmt (wrcall);
+
+      {
+#if !ATTR_FNSPEC_DECONST_WATERMARK
+	/* If the call will be assumed to not modify or even read the
+	   watermark, make it read and modified ourselves.  */
+	if ((gimple_call_flags (wrcall)
+	     & (ECF_CONST | ECF_PURE | ECF_NOVOPS)))
+	  {
+	    vec<tree, va_gc> *inputs = NULL;
+	    vec<tree, va_gc> *outputs = NULL;
+	    vec_safe_push (outputs,
+			   build_tree_list
+			   (build_tree_list
+			    (NULL_TREE, build_string (2, "=m")),
+			    swm));
+	    vec_safe_push (inputs,
+			   build_tree_list
+			   (build_tree_list
+			    (NULL_TREE, build_string (1, "m")),
+			    swm));
+	    gasm *forcemod = gimple_build_asm_vec ("", inputs, outputs,
+						   NULL, NULL);
+	    gimple_seq_add_stmt (&seq, forcemod);
+
+	    /* If the call will be assumed to not even read the watermark,
+	       make sure it is already in memory before the call.  */
+	    if ((gimple_call_flags (wrcall) & ECF_CONST))
+	      {
+		vec<tree, va_gc> *inputs = NULL;
+		vec_safe_push (inputs,
+			       build_tree_list
+			       (build_tree_list
+				(NULL_TREE, build_string (1, "m")),
+				swm));
+		gasm *force_store = gimple_build_asm_vec ("", inputs, NULL,
+							  NULL, NULL);
+		gimple_set_location (force_store, gimple_location (wrcall));
+		gsi_insert_before (&gsi, force_store, GSI_SAME_STMT);
+	      }
+	  }
+#endif
+
+	gcall *sleave = gimple_build_call (get_leave (), 1,
+					   unshare_expr (swmp));
+	gimple_seq_add_stmt (&seq, sleave);
+
+	gassign *clobber = gimple_build_assign (swm,
+						build_clobber
+						(TREE_TYPE (swm)));
+	gimple_seq_add_stmt (&seq, clobber);
+      }
+
+      gsi_insert_finally_seq_after_call (gsi, seq);
+
+      /* For nnode, we don't rebuild edges because we wish to retain
+	 any redirections copied to it from earlier passes, so we add
+	 call graph edges explicitly there, but for onode, we create a
+	 fresh function, so we may as well just issue the calls and
+	 then rebuild all cgraph edges.  */
+      // cgraph_edge::rebuild_edges ();
+      onode->analyze ();
+      // inline_analyze_function (onode);
+
+      pop_cfun ();
+    }
+  }
+
+  return 0;
+}
+
+simple_ipa_opt_pass *
+make_pass_ipa_strub (gcc::context *ctxt)
+{
+  return new pass_ipa_strub (ctxt);
+}

-- 
Alexandre Oliva, happy hacker                https://FSFLA.org/blogs/lxo/
   Free Software Activist                       GNU Toolchain Engineer
Disinformation flourishes because many people care deeply about injustice
but very few check the facts.  Ask me about <https://stallmansupport.org>

  parent reply	other threads:[~2022-07-29  6:34 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <ormtqpsbuc.fsf@lxoliva.fsfla.org>
2021-09-09  7:11 ` [PATCH] strub: machine-independent stack scrubbing Alexandre Oliva
2022-07-29  6:16   ` [PATCH v2 00/10] Introduce " Alexandre Oliva
2022-07-29  6:24     ` [PATCH v2 01/10] Introduce strub: documentation, and new command-line options Alexandre Oliva
2022-07-29  6:25     ` [PATCH v2 02/10] Introduce strub: torture tests for C and C++ Alexandre Oliva
2022-08-09 13:34       ` Alexandre Oliva
2022-07-29  6:25     ` [PATCH v2 03/10] Introduce strub: non-torture " Alexandre Oliva
2022-07-29  6:26     ` [PATCH v2 04/10] Introduce strub: tests for C++ and Ada Alexandre Oliva
2022-07-29  6:26     ` [PATCH v2 05/10] Introduce strub: builtins and runtime Alexandre Oliva
2022-07-29  6:27     ` [PATCH v2 06/10] Introduce strub: attributes Alexandre Oliva
2022-07-29  6:28     ` [PATCH v2 07/10] Introduce strub: infrastructure interfaces and adjustments Alexandre Oliva
2022-07-29  6:28     ` [PATCH v2 08/10] Introduce strub: strub modes Alexandre Oliva
2022-07-29  6:30     ` [PATCH v2 09/10] Introduce strub: strubm (mode assignment) pass Alexandre Oliva
2022-07-29  6:34     ` Alexandre Oliva [this message]
2022-07-29  6:36     ` [PATCH v2 00/10] Introduce strub: machine-independent stack scrubbing Alexandre Oliva
2022-10-10  8:48       ` Richard Biener
2022-10-11 11:57         ` Alexandre Oliva
2022-10-11 11:59           ` Richard Biener
2022-10-11 13:33             ` Alexandre Oliva
2022-10-13 11:38               ` Richard Biener
2022-10-13 13:15                 ` Alexandre Oliva
2023-06-16  6:09     ` [PATCH v3] " Alexandre Oliva
2023-06-27 21:28       ` Qing Zhao
2023-06-28  8:20         ` Alexandre Oliva
2023-10-20  6:03       ` [PATCH v4] " Alexandre Oliva
2023-10-26  6:15         ` Alexandre Oliva
2023-11-20 12:40           ` Alexandre Oliva
2023-11-22 14:14             ` Richard Biener
2023-11-23 10:56               ` Alexandre Oliva
2023-11-23 12:05                 ` Richard Biener
2023-11-29  8:53                   ` Alexandre Oliva
2023-11-29 12:48                     ` Richard Biener
2023-11-30  4:13                       ` Alexandre Oliva
2023-11-30 12:00                         ` Richard Biener
2023-12-02 17:56                           ` [PATCH v5] " Alexandre Oliva
2023-12-05  6:25                             ` Alexandre Oliva
2023-12-06  1:04                               ` Alexandre Oliva
2023-12-05  9:01                             ` Richard Biener
2023-12-06  8:36                             ` Causes to nvptx bootstrap fail: " Tobias Burnus
2023-12-06 11:32                               ` Thomas Schwinge
2023-12-06 22:12                                 ` Alexandre Oliva
2023-12-07  3:33                                   ` [PATCH] strub: enable conditional support Alexandre Oliva
2023-12-07  7:24                                     ` Richard Biener
2023-12-07 16:44                                     ` Thomas Schwinge
2023-12-07 17:52                                       ` [PATCH] Alexandre Oliva
2023-12-08  6:46                                         ` [PATCH] Richard Biener
2023-12-08  9:33                                         ` [PATCH] strub: skip emutls after strubm errors Thomas Schwinge
2023-12-10  9:16                                           ` FX Coudert
2023-12-07  7:21                                   ` Causes to nvptx bootstrap fail: [PATCH v5] Introduce strub: machine-independent stack scrubbing Richard Biener
2023-12-06 10:22                             ` Jan Hubicka
2023-12-07 21:19                               ` Alexandre Oliva
2023-12-07 21:39                               ` Alexandre Oliva
2023-12-09  2:08                                 ` [PATCH] strub: add note on attribute access Alexandre Oliva
2023-12-11  7:26                                   ` Richard Biener
2023-12-12 14:21                                   ` Jan Hubicka
2023-12-11  8:40                             ` [PATCH] testsuite: Disable -fstack-protector* for some strub tests Jakub Jelinek
2023-12-11  8:59                               ` Richard Biener
2023-12-20  8:15                           ` [PATCH FYI] www: new AdaCore-contributed hardening features in gcc 13 and 14 Alexandre Oliva
2023-11-30  5:04                       ` [PATCH v4] Introduce strub: machine-independent stack scrubbing Alexandre Oliva
2023-11-30 11:56                         ` Richard Biener

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=orv8rgl953.fsf_-_@lxoliva.fsfla.org \
    --to=oliva@adacore.com \
    --cc=craig.blackmore@embecosm.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=graham.markall@embecosm.com \
    --cc=hubicka@ucw.cz \
    --cc=jeremy.bennett@embecosm.com \
    --cc=mjambor@suse.cz \
    --cc=richard.guenther@gmail.com \
    --cc=wilson@tuliptree.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).