public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Alexandre Oliva <oliva@adacore.com>
To: gcc-patches@gcc.gnu.org
Cc: Jeremy Bennett <jeremy.bennett@embecosm.com>,
	Craig Blackmore <craig.blackmore@embecosm.com>,
	Graham Markall <graham.markall@embecosm.com>,
	Martin Jambor <mjambor@suse.cz>, Jan Hubicka <hubicka@ucw.cz>,
	Richard Biener <richard.guenther@gmail.com>,
	Jim Wilson <wilson@tuliptree.org>
Subject: [PATCH v2 05/10] Introduce strub: builtins and runtime
Date: Fri, 29 Jul 2022 03:26:38 -0300	[thread overview]
Message-ID: <orh730mo2p.fsf_-_@lxoliva.fsfla.org> (raw)
In-Reply-To: <or35eko33q.fsf_-_@lxoliva.fsfla.org> (Alexandre Oliva's message of "Fri, 29 Jul 2022 03:16:41 -0300")


for  gcc/ChangeLog

	* builtins.def (BUILT_IN_STACK_ADDRESS): New.
	(BUILT_IN___STRUB_ENTER): New.
	(BUILT_IN___STRUB_UPDATE): New.
	(BUILT_IN___STRUB_LEAVE): New.
	* builtins.cc: Include ipa-strub.h.
	(STACK_STOPS, STACK_UNSIGNED): Define.
	(expand_builtin_stack_address): New.
	(expand_builtin_strub_enter): New.
	(expand_builtin_strub_update): New.
	(expand_builtin_strub_leave): New.
	(expand_bulitin): Call them.

for  libgcc/ChangeLog

	* Makefile.in (LIB2ADD): Add strub.c.
	* libgcc2.h (__strub_enter, __strub_update, __strub_leave):
	Declare.
	* strub.c: New.

diff --git a/gcc/builtins.cc b/gcc/builtins.cc
index b08b4365da36b..656186c308997 100644
--- a/gcc/builtins.cc
+++ b/gcc/builtins.cc
@@ -71,6 +71,7 @@ along with GCC; see the file COPYING3.  If not see
 #include "gimple-fold.h"
 #include "intl.h"
 #include "file-prefix-map.h" /* remap_macro_filename()  */
+#include "ipa-strub.h" /* strub_watermark_parm()  */
 #include "gomp-constants.h"
 #include "omp-general.h"
 #include "tree-dfa.h"
@@ -151,6 +152,7 @@ static rtx expand_builtin_strnlen (tree, rtx, machine_mode);
 static rtx expand_builtin_alloca (tree);
 static rtx expand_builtin_unop (machine_mode, tree, rtx, rtx, optab);
 static rtx expand_builtin_frame_address (tree, tree);
+static rtx expand_builtin_stack_address ();
 static tree stabilize_va_list_loc (location_t, tree, int);
 static rtx expand_builtin_expect (tree, rtx);
 static rtx expand_builtin_expect_with_probability (tree, rtx);
@@ -4968,6 +4970,256 @@ expand_builtin_frame_address (tree fndecl, tree exp)
     }
 }
 
+#ifndef STACK_GROWS_DOWNWARD
+# define STACK_TOPS GT
+#else
+# define STACK_TOPS LT
+#endif
+
+#ifdef POINTERS_EXTEND_UNSIGNED
+# define STACK_UNSIGNED POINTERS_EXTEND_UNSIGNED
+#else
+# define STACK_UNSIGNED true
+#endif
+
+/* Expand a call to builtin function __builtin_stack_address.  */
+
+static rtx
+expand_builtin_stack_address ()
+{
+  return convert_to_mode (ptr_mode, copy_to_reg (stack_pointer_rtx),
+			  STACK_UNSIGNED);
+}
+
+/* Expand a call to builtin function __builtin_strub_enter.  */
+
+static rtx
+expand_builtin_strub_enter (tree exp)
+{
+  if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
+    return NULL_RTX;
+
+  if (optimize < 1 || flag_no_inline)
+    return NULL_RTX;
+
+  rtx stktop = NULL_RTX;
+
+#if 1 || defined RED_ZONE_SIZE
+  if (tree wmptr = (optimize
+		    ? strub_watermark_parm (current_function_decl)
+		    : NULL_TREE))
+    {
+      tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+      tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+				 build_int_cst (TREE_TYPE (wmptr), 0));
+      rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+      stktop = force_reg (ptr_mode, wmark);
+    }
+#endif
+
+  if (!stktop)
+    stktop = expand_builtin_stack_address ();
+
+  tree wmptr = CALL_EXPR_ARG (exp, 0);
+  tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+  tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+			     build_int_cst (TREE_TYPE (wmptr), 0));
+  rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+
+  emit_move_insn (wmark, stktop);
+
+  return const0_rtx;
+}
+
+/* Expand a call to builtin function __builtin_strub_update.  */
+
+static rtx
+expand_builtin_strub_update (tree exp)
+{
+  if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
+    return NULL_RTX;
+
+  if (optimize < 2 || flag_no_inline)
+    return NULL_RTX;
+
+  rtx stktop = expand_builtin_stack_address ();
+
+#ifdef RED_ZONE_SIZE
+  /* Here's how the strub enter, update and leave functions deal with red zones.
+
+     If it weren't for red zones, update, called from within a strub context,
+     would bump the watermark to the top of the stack.  Enter and leave, running
+     in the caller, would use the caller's top of stack address both to
+     initialize the watermark passed to the callee, and to start strubbing the
+     stack afterwards.
+
+     Ideally, we'd update the watermark so as to cover the used amount of red
+     zone, and strub starting at the caller's other end of the (presumably
+     unused) red zone.  Normally, only leaf functions use the red zone, but at
+     this point we can't tell whether a function is a leaf, nor can we tell how
+     much of the red zone it uses.  Furthermore, some strub contexts may have
+     been inlined so that update and leave are called from the same stack frame,
+     and the strub builtins may all have been inlined, turning a strub function
+     into a leaf.
+
+     So cleaning the range from the caller's stack pointer (one end of the red
+     zone) to the (potentially inlined) callee's (other end of the) red zone
+     could scribble over the caller's own red zone.
+
+     We avoid this possibility by arranging for callers that are strub contexts
+     to use their own watermark as the strub starting point.  So, if A calls B,
+     and B calls C, B will tell A to strub up to the end of B's red zone, and
+     will strub itself only the part of C's stack frame and red zone that
+     doesn't overlap with B's.  With that, we don't need to know who's leaf and
+     who isn't: inlined calls will shrink their strub window to zero, each
+     remaining call will strub some portion of the stack, and eventually the
+     strub context will return to a caller that isn't a strub context itself,
+     that will therefore use its own stack pointer as the strub starting point.
+     It's not a leaf, because strub contexts can't be inlined into non-strub
+     contexts, so it doesn't use the red zone, and it will therefore correctly
+     strub up the callee's stack frame up to the end of the callee's red zone.
+     Neat!  */
+  if (true /* (flags_from_decl_or_type (current_function_decl) & ECF_LEAF) */)
+    {
+      poly_int64 red_zone_size = RED_ZONE_SIZE;
+#if STACK_GROWS_DOWNWARD
+      red_zone_size = -red_zone_size;
+#endif
+      stktop = plus_constant (ptr_mode, stktop, red_zone_size);
+      stktop = force_reg (ptr_mode, stktop);
+    }
+#endif
+
+  tree wmptr = CALL_EXPR_ARG (exp, 0);
+  tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+  tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+			     build_int_cst (TREE_TYPE (wmptr), 0));
+  rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+
+  rtx wmarkr = force_reg (ptr_mode, wmark);
+
+  rtx_code_label *lab = gen_label_rtx ();
+  do_compare_rtx_and_jump (stktop, wmarkr, STACK_TOPS, STACK_UNSIGNED,
+			   ptr_mode, NULL_RTX, lab, NULL,
+			   profile_probability::very_likely ());
+  emit_move_insn (wmark, stktop);
+
+#if 1 || defined RED_ZONE_SIZE
+  /* If this is an inlined strub function, also bump the watermark for the
+     enclosing function.  This avoids a problem with the following scenario: A
+     calls B and B calls C, and both B and C get inlined into A.  B allocates
+     temporary stack space before calling C.  If we don't update A's watermark,
+     we may use an outdated baseline for the post-C strub_leave, erasing B's
+     temporary stack allocation.  We only need this if we're fully expanding
+     strub_leave inline.  */
+  tree xwmptr = (optimize > 2
+		 ? strub_watermark_parm (current_function_decl)
+		 : wmptr);
+  if (wmptr != xwmptr)
+    {
+      wmptr = xwmptr;
+      wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+      wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+			    build_int_cst (TREE_TYPE (wmptr), 0));
+      wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+      wmarkr = force_reg (ptr_mode, wmark);
+
+      do_compare_rtx_and_jump (stktop, wmarkr, STACK_TOPS, STACK_UNSIGNED,
+			       ptr_mode, NULL_RTX, lab, NULL,
+			       profile_probability::very_likely ());
+      emit_move_insn (wmark, stktop);
+    }
+#endif
+
+  emit_label (lab);
+
+  return const0_rtx;
+}
+
+
+/* Expand a call to builtin function __builtin_strub_leave.  */
+
+static rtx
+expand_builtin_strub_leave (tree exp)
+{
+  if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
+    return NULL_RTX;
+
+  if (optimize < 2 || optimize_size || flag_no_inline)
+    return NULL_RTX;
+
+  rtx stktop = NULL_RTX;
+
+#if 1 || defined RED_ZONE_SIZE
+  if (tree wmptr = (optimize
+		    ? strub_watermark_parm (current_function_decl)
+		    : NULL_TREE))
+    {
+      tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+      tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+				 build_int_cst (TREE_TYPE (wmptr), 0));
+      rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+      stktop = force_reg (ptr_mode, wmark);
+    }
+#endif
+
+  if (!stktop)
+    stktop = expand_builtin_stack_address ();
+
+  tree wmptr = CALL_EXPR_ARG (exp, 0);
+  tree wmtype = TREE_TYPE (TREE_TYPE (wmptr));
+  tree wmtree = fold_build2 (MEM_REF, wmtype, wmptr,
+			     build_int_cst (TREE_TYPE (wmptr), 0));
+  rtx wmark = expand_expr (wmtree, NULL_RTX, ptr_mode, EXPAND_MEMORY);
+
+  rtx wmarkr = force_reg (ptr_mode, wmark);
+
+#ifndef STACK_GROWS_DOWNWARD
+  rtx base = stktop;
+  rtx end = wmarkr;
+#else
+  rtx base = wmarkr;
+  rtx end = stktop;
+#endif
+
+  /* We're going to modify it, so make sure it's not e.g. the stack pointer.  */
+  base = copy_to_reg (base);
+
+  rtx_code_label *done = gen_label_rtx ();
+  do_compare_rtx_and_jump (base, end, LT, STACK_UNSIGNED,
+			   ptr_mode, NULL_RTX, done, NULL,
+			   profile_probability::very_likely ());
+
+  if (optimize < 3)
+    expand_call (exp, NULL_RTX, true);
+  else
+    {
+      /* Ok, now we've determined we want to copy the block, so convert the
+	 addresses to Pmode, as needed to dereference them to access ptr_mode
+	 memory locations, so that we don't have to convert anything within the
+	 loop.  */
+      base = memory_address (ptr_mode, base);
+      end = memory_address (ptr_mode, end);
+
+      rtx zero = force_operand (const0_rtx, NULL_RTX);
+      int ulen = GET_MODE_SIZE (ptr_mode);
+      rtx incr = plus_constant (Pmode, base, ulen);
+      rtx dstm = gen_rtx_MEM (ptr_mode, base);
+
+      rtx_code_label *loop = gen_label_rtx ();
+      emit_label (loop);
+      emit_move_insn (dstm, zero);
+      emit_move_insn (base, force_operand (incr, NULL_RTX));
+      do_compare_rtx_and_jump (base, end, LT, STACK_UNSIGNED,
+			       Pmode, NULL_RTX, NULL, loop,
+			       profile_probability::very_likely ());
+    }
+
+  emit_label (done);
+
+  return const0_rtx;
+}
+
 /* Expand EXP, a call to the alloca builtin.  Return NULL_RTX if we
    failed and the caller should emit a normal call.  */
 
@@ -7263,6 +7515,27 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
     case BUILT_IN_RETURN_ADDRESS:
       return expand_builtin_frame_address (fndecl, exp);
 
+    case BUILT_IN_STACK_ADDRESS:
+      return expand_builtin_stack_address ();
+
+    case BUILT_IN___STRUB_ENTER:
+      target = expand_builtin_strub_enter (exp);
+      if (target)
+	return target;
+      break;
+
+    case BUILT_IN___STRUB_UPDATE:
+      target = expand_builtin_strub_update (exp);
+      if (target)
+	return target;
+      break;
+
+    case BUILT_IN___STRUB_LEAVE:
+      target = expand_builtin_strub_leave (exp);
+      if (target)
+	return target;
+      break;
+
     /* Returns the address of the area where the structure is returned.
        0 otherwise.  */
     case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
diff --git a/gcc/builtins.def b/gcc/builtins.def
index 005976f34e913..98763df73da8c 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -874,6 +874,10 @@ DEF_EXT_LIB_BUILTIN    (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHRO
 DEF_EXT_LIB_BUILTIN    (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
 DEF_EXT_LIB_BUILTIN        (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST)
 DEF_GCC_BUILTIN        (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UINT, ATTR_NULL)
+DEF_GCC_BUILTIN        (BUILT_IN_STACK_ADDRESS, "stack_address", BT_FN_PTR, ATTR_NULL)
+DEF_BUILTIN_STUB       (BUILT_IN___STRUB_ENTER, "__builtin___strub_enter")
+DEF_BUILTIN_STUB       (BUILT_IN___STRUB_UPDATE, "__builtin___strub_update")
+DEF_BUILTIN_STUB       (BUILT_IN___STRUB_LEAVE, "__builtin___strub_leave")
 /* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed.  */
 DEF_LIB_BUILTIN        (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, ATTR_NOTHROW_LEAF_LIST)
 DEF_GCC_BUILTIN        (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL)
diff --git a/libgcc/Makefile.in b/libgcc/Makefile.in
index 1fe708a93f731..114076ad0a3f3 100644
--- a/libgcc/Makefile.in
+++ b/libgcc/Makefile.in
@@ -430,6 +430,9 @@ endif
 
 LIB2ADD += enable-execute-stack.c
 
+# Stack scrubbing infrastructure.
+LIB2ADD += $(srcdir)/strub.c
+
 # While emutls.c has nothing to do with EH, it is in LIB2ADDEH*
 # instead of LIB2ADD because that's the way to be sure on some targets
 # (e.g. *-*-darwin*) only one copy of it is linked.
diff --git a/libgcc/libgcc2.h b/libgcc/libgcc2.h
index fc24ac34502bc..c45973f18d23b 100644
--- a/libgcc/libgcc2.h
+++ b/libgcc/libgcc2.h
@@ -532,6 +532,10 @@ extern int __parityDI2 (UDWtype);
 
 extern void __enable_execute_stack (void *);
 
+extern void __strub_enter (void **);
+extern void __strub_update (void**);
+extern void __strub_leave (void **);
+
 #ifndef HIDE_EXPORTS
 #pragma GCC visibility pop
 #endif
diff --git a/libgcc/strub.c b/libgcc/strub.c
new file mode 100644
index 0000000000000..90d3e82067b2f
--- /dev/null
+++ b/libgcc/strub.c
@@ -0,0 +1,112 @@
+/* Stack scrubbing infrastructure
+   Copyright (C) 2021-2022 Free Software Foundation, Inc.
+   Contributed by Alexandre Oliva <oliva@adacore.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+<http://www.gnu.org/licenses/>.  */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+#include "libgcc2.h"
+
+#ifndef STACK_GROWS_DOWNWARD
+# define TOPS >
+#else
+# define TOPS <
+#endif
+
+#define ATTRIBUTE_STRUB_CALLABLE __attribute__ ((__strub__ ("callable")))
+
+/* Enter a stack scrubbing context, initializing the watermark to the caller's
+   stack address.  */
+void ATTRIBUTE_STRUB_CALLABLE
+__strub_enter (void **watermark)
+{
+  *watermark = __builtin_frame_address (0);
+}
+
+/* Update the watermark within a stack scrubbing context with the current stack
+   pointer.  */
+void ATTRIBUTE_STRUB_CALLABLE
+__strub_update (void **watermark)
+{
+  void *sp = __builtin_frame_address (0);
+
+  if (sp TOPS *watermark)
+    *watermark = sp;
+}
+
+#ifndef TARGET_STRUB_USE_DYNAMIC_ARRAY
+# define TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY 1
+#endif
+
+#ifndef TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY
+# ifdef TARGET_STRUB_MAY_USE_MEMSET
+#  define TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY 1
+# else
+#  define TARGET_STRUB_MAY_USE_MEMSET 1
+# endif
+#endif
+
+/* Leave a stack scrubbing context, restoring and updating SAVED, and
+   clearing the stack between top and watermark.  */
+void ATTRIBUTE_STRUB_CALLABLE
+#if ! TARGET_STRUB_MAY_USE_MEMSET
+__attribute__ ((__optimize__ ("-fno-tree-loop-distribute-patterns")))
+#endif
+__strub_leave (void **mark)
+{
+  void *sp = __builtin_stack_address ();
+
+  void **base, **end;
+#ifndef STACK_GROWS_DOWNWARD
+  base = sp;
+  end = *mark;
+#else
+  base = *mark;
+  end = sp;
+#endif
+
+  ptrdiff_t len = end - base;
+  if (len <= 0)
+    return;
+
+#if ! TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY
+  /* Allocate a dynamically-sized array covering the desired range, so that we
+     can safely call memset on it.  */
+  void *ptr[len];
+  base = &ptr[0];
+  end = &ptr[len];
+#else
+  void **ptr = end;
+#endif /* TARGET_STRUB_DONT_USE_DYNAMIC_ARRAY */
+
+  /* ldist turns this into a memset.  Without the dynamic array above, that call
+     is likely unsafe: possibly tail-called, and likely scribbling over its own
+     stack frame.  */
+  while (base < end)
+    *base++ = 0;
+
+  asm ("" : : "m" (ptr));
+}

-- 
Alexandre Oliva, happy hacker                https://FSFLA.org/blogs/lxo/
   Free Software Activist                       GNU Toolchain Engineer
Disinformation flourishes because many people care deeply about injustice
but very few check the facts.  Ask me about <https://stallmansupport.org>

  parent reply	other threads:[~2022-07-29  6:26 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <ormtqpsbuc.fsf@lxoliva.fsfla.org>
2021-09-09  7:11 ` [PATCH] strub: machine-independent stack scrubbing Alexandre Oliva
2022-07-29  6:16   ` [PATCH v2 00/10] Introduce " Alexandre Oliva
2022-07-29  6:24     ` [PATCH v2 01/10] Introduce strub: documentation, and new command-line options Alexandre Oliva
2022-07-29  6:25     ` [PATCH v2 02/10] Introduce strub: torture tests for C and C++ Alexandre Oliva
2022-08-09 13:34       ` Alexandre Oliva
2022-07-29  6:25     ` [PATCH v2 03/10] Introduce strub: non-torture " Alexandre Oliva
2022-07-29  6:26     ` [PATCH v2 04/10] Introduce strub: tests for C++ and Ada Alexandre Oliva
2022-07-29  6:26     ` Alexandre Oliva [this message]
2022-07-29  6:27     ` [PATCH v2 06/10] Introduce strub: attributes Alexandre Oliva
2022-07-29  6:28     ` [PATCH v2 07/10] Introduce strub: infrastructure interfaces and adjustments Alexandre Oliva
2022-07-29  6:28     ` [PATCH v2 08/10] Introduce strub: strub modes Alexandre Oliva
2022-07-29  6:30     ` [PATCH v2 09/10] Introduce strub: strubm (mode assignment) pass Alexandre Oliva
2022-07-29  6:34     ` [PATCH v2 10/10] Introduce strub: strub pass Alexandre Oliva
2022-07-29  6:36     ` [PATCH v2 00/10] Introduce strub: machine-independent stack scrubbing Alexandre Oliva
2022-10-10  8:48       ` Richard Biener
2022-10-11 11:57         ` Alexandre Oliva
2022-10-11 11:59           ` Richard Biener
2022-10-11 13:33             ` Alexandre Oliva
2022-10-13 11:38               ` Richard Biener
2022-10-13 13:15                 ` Alexandre Oliva
2023-06-16  6:09     ` [PATCH v3] " Alexandre Oliva
2023-06-27 21:28       ` Qing Zhao
2023-06-28  8:20         ` Alexandre Oliva
2023-10-20  6:03       ` [PATCH v4] " Alexandre Oliva
2023-10-26  6:15         ` Alexandre Oliva
2023-11-20 12:40           ` Alexandre Oliva
2023-11-22 14:14             ` Richard Biener
2023-11-23 10:56               ` Alexandre Oliva
2023-11-23 12:05                 ` Richard Biener
2023-11-29  8:53                   ` Alexandre Oliva
2023-11-29 12:48                     ` Richard Biener
2023-11-30  4:13                       ` Alexandre Oliva
2023-11-30 12:00                         ` Richard Biener
2023-12-02 17:56                           ` [PATCH v5] " Alexandre Oliva
2023-12-05  6:25                             ` Alexandre Oliva
2023-12-06  1:04                               ` Alexandre Oliva
2023-12-05  9:01                             ` Richard Biener
2023-12-06  8:36                             ` Causes to nvptx bootstrap fail: " Tobias Burnus
2023-12-06 11:32                               ` Thomas Schwinge
2023-12-06 22:12                                 ` Alexandre Oliva
2023-12-07  3:33                                   ` [PATCH] strub: enable conditional support Alexandre Oliva
2023-12-07  7:24                                     ` Richard Biener
2023-12-07 16:44                                     ` Thomas Schwinge
2023-12-07 17:52                                       ` [PATCH] Alexandre Oliva
2023-12-08  6:46                                         ` [PATCH] Richard Biener
2023-12-08  9:33                                         ` [PATCH] strub: skip emutls after strubm errors Thomas Schwinge
2023-12-10  9:16                                           ` FX Coudert
2023-12-07  7:21                                   ` Causes to nvptx bootstrap fail: [PATCH v5] Introduce strub: machine-independent stack scrubbing Richard Biener
2023-12-06 10:22                             ` Jan Hubicka
2023-12-07 21:19                               ` Alexandre Oliva
2023-12-07 21:39                               ` Alexandre Oliva
2023-12-09  2:08                                 ` [PATCH] strub: add note on attribute access Alexandre Oliva
2023-12-11  7:26                                   ` Richard Biener
2023-12-12 14:21                                   ` Jan Hubicka
2023-12-11  8:40                             ` [PATCH] testsuite: Disable -fstack-protector* for some strub tests Jakub Jelinek
2023-12-11  8:59                               ` Richard Biener
2023-12-20  8:15                           ` [PATCH FYI] www: new AdaCore-contributed hardening features in gcc 13 and 14 Alexandre Oliva
2023-11-30  5:04                       ` [PATCH v4] Introduce strub: machine-independent stack scrubbing Alexandre Oliva
2023-11-30 11:56                         ` Richard Biener

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=orh730mo2p.fsf_-_@lxoliva.fsfla.org \
    --to=oliva@adacore.com \
    --cc=craig.blackmore@embecosm.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=graham.markall@embecosm.com \
    --cc=hubicka@ucw.cz \
    --cc=jeremy.bennett@embecosm.com \
    --cc=mjambor@suse.cz \
    --cc=richard.guenther@gmail.com \
    --cc=wilson@tuliptree.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).