public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Richard Sandiford <richard.sandiford@arm.com>
To: gcc-patches@gcc.gnu.org
Subject: [47/67] Make subroutines of nonzero_bits operate on scalar_int_mode
Date: Fri, 09 Dec 2016 13:24:00 -0000	[thread overview]
Message-ID: <87twadgrqt.fsf@e105548-lin.cambridge.arm.com> (raw)
In-Reply-To: <87h96dp8u6.fsf@e105548-lin.cambridge.arm.com> (Richard	Sandiford's message of "Fri, 09 Dec 2016 12:48:01 +0000")

nonzero_bits1 assumed that all bits of a floating-point or vector mode
were needed.  It seems likely that fixed-point modes should have been
handled in the same way.  After excluding those, the only remaining
modes that are likely to be useful are scalar integer ones.

This patch moves the mode class check to nonzero_bits itself, along
with the handling of mode == VOIDmode.  The subroutines of nonzero_bits
can then take scalar_int_modes.

gcc/
2016-11-24  Richard Sandiford  <richard.sandiford@arm.com>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

	* rtlanal.c (nonzero_bits): Handle VOIDmode here rather than
	in subroutines.  Return the mode mask for non-integer modes.
	(cached_nonzero_bits): Change the type of the mode parameter
	to scalar_int_mode.
	(nonzero_bits1): Likewise.  Remove early exit for other mode
	classes.  Handle CONST_INT_P first and then check whether X
	also has a scalar integer mode.

diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index a7fb5cf..84e19e4 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -43,10 +43,10 @@ static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
 static int computed_jump_p_1 (const_rtx);
 static void parms_set (rtx, const_rtx, void *);
 
-static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, machine_mode,
+static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
                                                    const_rtx, machine_mode,
                                                    unsigned HOST_WIDE_INT);
-static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, machine_mode,
+static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
 					     const_rtx, machine_mode,
                                              unsigned HOST_WIDE_INT);
 static unsigned int cached_num_sign_bit_copies (const_rtx, machine_mode, const_rtx,
@@ -4197,7 +4197,12 @@ default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
 unsigned HOST_WIDE_INT
 nonzero_bits (const_rtx x, machine_mode mode)
 {
-  return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
+  if (mode == VOIDmode)
+    mode = GET_MODE (x);
+  scalar_int_mode int_mode;
+  if (!is_a <scalar_int_mode> (mode, &int_mode))
+    return GET_MODE_MASK (mode);
+  return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
 }
 
 unsigned int
@@ -4241,7 +4246,7 @@ nonzero_bits_binary_arith_p (const_rtx x)
    identical subexpressions on the first or the second level.  */
 
 static unsigned HOST_WIDE_INT
-cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
+cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
 		     machine_mode known_mode,
 		     unsigned HOST_WIDE_INT known_ret)
 {
@@ -4294,7 +4299,7 @@ cached_nonzero_bits (const_rtx x, machine_mode mode, const_rtx known_x,
    an arithmetic operation, we can do better.  */
 
 static unsigned HOST_WIDE_INT
-nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
+nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
 	       machine_mode known_mode,
 	       unsigned HOST_WIDE_INT known_ret)
 {
@@ -4302,19 +4307,31 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
   unsigned HOST_WIDE_INT inner_nz;
   enum rtx_code code;
   machine_mode inner_mode;
+  scalar_int_mode xmode;
+
   unsigned int mode_width = GET_MODE_PRECISION (mode);
 
-  /* For floating-point and vector values, assume all bits are needed.  */
-  if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode)
-      || VECTOR_MODE_P (GET_MODE (x)) || VECTOR_MODE_P (mode))
+  if (CONST_INT_P (x))
+    {
+      if (SHORT_IMMEDIATES_SIGN_EXTEND
+	  && INTVAL (x) > 0
+	  && mode_width < BITS_PER_WORD
+	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
+	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
+
+      return UINTVAL (x);
+    }
+
+  if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
     return nonzero;
+  unsigned int xmode_width = GET_MODE_PRECISION (xmode);
 
   /* If X is wider than MODE, use its mode instead.  */
-  if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width)
+  if (xmode_width > mode_width)
     {
-      mode = GET_MODE (x);
+      mode = xmode;
       nonzero = GET_MODE_MASK (mode);
-      mode_width = GET_MODE_PRECISION (mode);
+      mode_width = xmode_width;
     }
 
   if (mode_width > HOST_BITS_PER_WIDE_INT)
@@ -4330,15 +4347,13 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
      not known to be zero.  */
 
   if (!WORD_REGISTER_OPERATIONS
-      && GET_MODE (x) != VOIDmode
-      && GET_MODE (x) != mode
-      && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD
-      && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
-      && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x)))
+      && mode_width > xmode_width
+      && xmode_width <= BITS_PER_WORD
+      && xmode_width <= HOST_BITS_PER_WIDE_INT)
     {
-      nonzero &= cached_nonzero_bits (x, GET_MODE (x),
+      nonzero &= cached_nonzero_bits (x, xmode,
 				      known_x, known_mode, known_ret);
-      nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
+      nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
       return nonzero;
     }
 
@@ -4355,7 +4370,8 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
 	 we can do this only if the target does not support different pointer
 	 or address modes depending on the address space.  */
       if (target_default_pointer_address_modes_p ()
-	  && POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
+	  && POINTERS_EXTEND_UNSIGNED
+	  && xmode == Pmode
 	  && REG_POINTER (x)
 	  && !targetm.have_ptr_extend ())
 	nonzero &= GET_MODE_MASK (ptr_mode);
@@ -4398,22 +4414,12 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
 	return nonzero_for_hook;
       }
 
-    case CONST_INT:
-      /* If X is negative in MODE, sign-extend the value.  */
-      if (SHORT_IMMEDIATES_SIGN_EXTEND && INTVAL (x) > 0
-	  && mode_width < BITS_PER_WORD
-	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1)))
-	     != 0)
-	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
-
-      return UINTVAL (x);
-
     case MEM:
       /* In many, if not most, RISC machines, reading a byte from memory
 	 zeros the rest of the register.  Noticing that fact saves a lot
 	 of extra zero-extends.  */
-      if (load_extend_op (GET_MODE (x)) == ZERO_EXTEND)
-	nonzero &= GET_MODE_MASK (GET_MODE (x));
+      if (load_extend_op (xmode) == ZERO_EXTEND)
+	nonzero &= GET_MODE_MASK (xmode);
       break;
 
     case EQ:  case NE:
@@ -4430,7 +4436,7 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
 	 operation in, and not the actual operation mode.  We can wind
 	 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
 	 that describes the results of a vector compare.  */
-      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
+      if (GET_MODE_CLASS (xmode) == MODE_INT
 	  && mode_width <= HOST_BITS_PER_WIDE_INT)
 	nonzero = STORE_FLAG_VALUE;
       break;
@@ -4439,21 +4445,19 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
 #if 0
       /* Disabled to avoid exponential mutual recursion between nonzero_bits
 	 and num_sign_bit_copies.  */
-      if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
-	  == GET_MODE_PRECISION (GET_MODE (x)))
+      if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
 	nonzero = 1;
 #endif
 
-      if (GET_MODE_PRECISION (GET_MODE (x)) < mode_width)
-	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
+      if (xmode_width < mode_width)
+	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
       break;
 
     case ABS:
 #if 0
       /* Disabled to avoid exponential mutual recursion between nonzero_bits
 	 and num_sign_bit_copies.  */
-      if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
-	  == GET_MODE_PRECISION (GET_MODE (x)))
+      if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
 	nonzero = 1;
 #endif
       break;
@@ -4526,7 +4530,7 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
 	unsigned HOST_WIDE_INT nz1
 	  = cached_nonzero_bits (XEXP (x, 1), mode,
 				 known_x, known_mode, known_ret);
-	int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1;
+	int sign_index = xmode_width - 1;
 	int width0 = floor_log2 (nz0) + 1;
 	int width1 = floor_log2 (nz1) + 1;
 	int low0 = ctz_or_zero (nz0);
@@ -4598,8 +4602,8 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
 	 been zero-extended, we know that at least the high-order bits
 	 are zero, though others might be too.  */
       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
-	nonzero = GET_MODE_MASK (GET_MODE (x))
-		  & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
+	nonzero = GET_MODE_MASK (xmode)
+		  & cached_nonzero_bits (SUBREG_REG (x), xmode,
 					 known_x, known_mode, known_ret);
 
       /* If the inner mode is a single word for both the host and target
@@ -4623,10 +4627,8 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
 		   ? val_signbit_known_set_p (inner_mode, nonzero)
 		   : extend_op != ZERO_EXTEND)
 	       || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
-	      && GET_MODE_PRECISION (GET_MODE (x))
-		  > GET_MODE_PRECISION (inner_mode))
-	    nonzero
-	      |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
+	      && xmode_width > GET_MODE_PRECISION (inner_mode))
+	    nonzero |= (GET_MODE_MASK (xmode) & ~GET_MODE_MASK (inner_mode));
 	}
       break;
 
@@ -4635,7 +4637,7 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
     case ASHIFT:
     case ROTATE:
       /* The nonzero bits are in two classes: any bits within MODE
-	 that aren't in GET_MODE (x) are always significant.  The rest of the
+	 that aren't in xmode are always significant.  The rest of the
 	 nonzero bits are those that are significant in the operand of
 	 the shift when shifted the appropriate number of bits.  This
 	 shows that high-order bits are cleared by the right shift and
@@ -4643,19 +4645,17 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
       if (CONST_INT_P (XEXP (x, 1))
 	  && INTVAL (XEXP (x, 1)) >= 0
 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
-	  && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x)))
+	  && INTVAL (XEXP (x, 1)) < xmode_width)
 	{
-	  machine_mode inner_mode = GET_MODE (x);
-	  unsigned int width = GET_MODE_PRECISION (inner_mode);
 	  int count = INTVAL (XEXP (x, 1));
-	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
+	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
 	  unsigned HOST_WIDE_INT op_nonzero
 	    = cached_nonzero_bits (XEXP (x, 0), mode,
 				   known_x, known_mode, known_ret);
 	  unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
 	  unsigned HOST_WIDE_INT outer = 0;
 
-	  if (mode_width > width)
+	  if (mode_width > xmode_width)
 	    outer = (op_nonzero & nonzero & ~mode_mask);
 
 	  if (code == LSHIFTRT)
@@ -4667,15 +4667,16 @@ nonzero_bits1 (const_rtx x, machine_mode mode, const_rtx known_x,
 	      /* If the sign bit may have been nonzero before the shift, we
 		 need to mark all the places it could have been copied to
 		 by the shift as possibly nonzero.  */
-	      if (inner & (HOST_WIDE_INT_1U << (width - 1 - count)))
-		inner |= ((HOST_WIDE_INT_1U << count) - 1)
-			   << (width - count);
+	      if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
+		inner |= (((HOST_WIDE_INT_1U << count) - 1)
+			  << (xmode_width - count));
 	    }
 	  else if (code == ASHIFT)
 	    inner <<= count;
 	  else
-	    inner = ((inner << (count % width)
-		      | (inner >> (width - (count % width)))) & mode_mask);
+	    inner = ((inner << (count % xmode_width)
+		      | (inner >> (xmode_width - (count % xmode_width))))
+		     & mode_mask);
 
 	  nonzero &= (outer | inner);
 	}

  parent reply	other threads:[~2016-12-09 13:24 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-09 12:48 [0/67] Add wrapper classes for machine_modes Richard Sandiford
2016-12-09 12:50 ` [1/67] Add an E_ prefix to mode names and update case statements Richard Sandiford
2016-12-09 12:52 ` [2/67] Make machine_mode a class Richard Sandiford
2016-12-15 22:34   ` Trevor Saunders
2016-12-09 12:53 ` [3/67] Add GDB pretty printer for machine mode classes Richard Sandiford
2016-12-09 12:54 ` [4/67] Add FOR_EACH iterators for modes Richard Sandiford
2016-12-09 12:55 ` [6/67] Make GET_MODE_WIDER return an opt_mode Richard Sandiford
2016-12-09 12:55 ` [5/67] Small tweak to array_value_type Richard Sandiford
2016-12-09 12:57 ` [7/67] Add scalar_float_mode Richard Sandiford
2016-12-09 14:19   ` David Malcolm
2016-12-09 14:30     ` Richard Sandiford
2016-12-09 12:57 ` [8/67] Simplify gen_trunc/extend_conv_libfunc Richard Sandiford
2016-12-09 12:58 ` [9/67] Add SCALAR_FLOAT_TYPE_MODE Richard Sandiford
2016-12-09 12:59 ` [10/67] Make assemble_real take a scalar_float_mode Richard Sandiford
2016-12-09 13:00 ` [0/67] Add wrapper classes for machine_modes Richard Biener
2016-12-09 13:54   ` Richard Sandiford
2016-12-09 13:00 ` [12/67] Use opt_scalar_float_mode when iterating over float modes Richard Sandiford
2016-12-09 13:00 ` [11/67] Add a float_mode_for_size helper function Richard Sandiford
2016-12-09 13:01 ` [13/67] Make floatn_mode return an opt_scalar_float_mode Richard Sandiford
2016-12-09 13:02 ` [14/67] Make libgcc_floating_mode_supported_p take a scalar_float_mode Richard Sandiford
2016-12-09 13:03 ` [16/67] Add scalar_int_mode_pod Richard Sandiford
2016-12-09 13:03 ` [15/67] Add scalar_int_mode Richard Sandiford
2016-12-09 13:04 ` [17/67] Add an int_mode_for_size helper function Richard Sandiford
2016-12-09 13:05 ` [19/67] Add a smallest_int_mode_for_size " Richard Sandiford
2016-12-09 13:05 ` [18/67] Make int_mode_for_mode return an opt_scalar_int_mode Richard Sandiford
2016-12-09 13:06 ` [20/67] Replace MODE_INT checks with is_int_mode Richard Sandiford
2016-12-09 13:07 ` [22/67] Replace !VECTOR_MODE_P with is_a <scalar_int_mode> Richard Sandiford
2016-12-09 13:07 ` [21/67] Replace SCALAR_INT_MODE_P checks " Richard Sandiford
2016-12-09 13:08 ` [23/67] Replace != VOIDmode " Richard Sandiford
2016-12-09 13:08 ` [24/67] Replace a != BLKmode check " Richard Sandiford
2016-12-09 13:22   ` Richard Biener
2016-12-09 14:42     ` Richard Sandiford
2016-12-09 13:09 ` [25/67] Use is_a <scalar_int_mode> for bitmask optimisations Richard Sandiford
2016-12-09 13:10 ` [27/67] Use is_a <scalar_int_mode> before LOAD_EXTEND_OP Richard Sandiford
2016-12-09 13:10 ` [26/67] Use is_a <scalar_int_mode> in subreg/extract simplifications Richard Sandiford
2016-12-09 13:11 ` [28/67] Use is_a <scalar_int_mode> for miscellaneous types of test Richard Sandiford
2016-12-09 13:12 ` [30/67] Use scalar_int_mode for doubleword splits Richard Sandiford
2016-12-09 13:12 ` [29/67] Make some *_loc_descriptor helpers take scalar_int_mode Richard Sandiford
2016-12-09 13:13 ` [31/67] Use scalar_int_mode for move2add Richard Sandiford
2016-12-09 13:14 ` [32/67] Check is_a <scalar_int_mode> before calling valid_pointer_mode Richard Sandiford
2016-12-09 13:14 ` [33/67] Add a NARROWEST_INT_MODE macro Richard Sandiford
2016-12-09 13:15 ` [34/67] Add a SCALAR_INT_TYPE_MODE macro Richard Sandiford
2016-12-09 13:16 ` [35/67] Add uses of as_a <scalar_int_mode> Richard Sandiford
2016-12-09 13:16 ` [36/67] Use scalar_int_mode in the RTL iv routines Richard Sandiford
2016-12-09 13:17 ` [37/67] Use scalar_int_mode when emitting cstores Richard Sandiford
2016-12-09 13:17 ` [38/67] Move SCALAR_INT_MODE_P out of strict_volatile_bitfield_p Richard Sandiford
2016-12-09 13:18 ` [39/67] Two changes to the get_best_mode interface Richard Sandiford
2016-12-09 13:19 ` [40/67] Use scalar_int_mode for extraction_insn fields Richard Sandiford
2016-12-09 13:20 ` [41/67] Split scalar integer handling out of force_to_mode Richard Sandiford
2016-12-09 13:20 ` [42/67] Use scalar_int_mode in simplify_shift_const_1 Richard Sandiford
2016-12-09 13:22 ` [43/67] Use scalar_int_mode in simplify_comparison Richard Sandiford
2016-12-09 13:22 ` [44/67] Make simplify_and_const_int take a scalar_int_mode Richard Sandiford
2016-12-09 13:23 ` [45/67] Make extract_left_shift " Richard Sandiford
2016-12-09 13:23 ` [46/67] Make widest_int_mode_for_size return " Richard Sandiford
2016-12-09 13:24 ` Richard Sandiford [this message]
2016-12-09 13:25 ` [49/67] Simplify nonzero/num_sign_bits hooks Richard Sandiford
2016-12-09 13:25 ` [48/67] Make subroutines of num_sign_bit_copies operate on scalar_int_mode Richard Sandiford
2016-12-09 13:28 ` [50/67] Add helper routines for SUBREG_PROMOTED_VAR_P subregs Richard Sandiford
2016-12-09 13:30 ` [51/67] Use opt_scalar_int_mode when iterating over integer modes Richard Sandiford
2016-12-09 13:30 ` [52/67] Use scalar_int_mode in extract/store_bit_field Richard Sandiford
2016-12-09 13:31 ` [53/67] Pass a mode to const_scalar_mask_from_tree Richard Sandiford
2016-12-09 13:31 ` [54/67] Add explicit int checks for alternative optab implementations Richard Sandiford
2016-12-09 13:33 ` [55/67] Use scalar_int_mode in simplify_const_unary_operation Richard Sandiford
2016-12-09 13:33 ` [56/67] Use the more specific type when two modes are known to be equal Richard Sandiford
2016-12-09 13:34 ` [57/67] Use scalar_int_mode in expand_expr_addr_expr Richard Sandiford
2016-12-09 13:35 ` [58/67] Use scalar_int_mode in a try_combine optimisation Richard Sandiford
2016-12-09 13:36 ` [60/67] Pass scalar_int_modes to do_jump_by_parts_* Richard Sandiford
2016-12-09 13:36 ` [59/67] Add a rtx_jump_table_data::get_data_mode helper Richard Sandiford
2016-12-09 13:37 ` [61/67] Use scalar_int_mode in the AArch64 port Richard Sandiford
2016-12-09 13:38 ` [63/67] Simplifications after type switch Richard Sandiford
2016-12-09 13:38 ` [62/67] Big machine_mode to scalar_int_mode replacement Richard Sandiford
2016-12-09 13:40 ` [64/67] Add a scalar_mode class Richard Sandiford
2016-12-09 13:40 ` [65/67] Use scalar_mode in the AArch64 port Richard Sandiford
2016-12-09 13:40 ` [66/67] Add a scalar_mode_pod class Richard Sandiford
2016-12-09 13:41 ` [67/67] Add a complex_mode class Richard Sandiford
2016-12-09 18:20 ` [0/67] Add wrapper classes for machine_modes Sandra Loosemore
2017-05-05  7:08 ` Jeff Law
2017-05-24 14:33   ` Richard Sandiford
2017-06-28 17:27     ` Jeff Law

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87twadgrqt.fsf@e105548-lin.cambridge.arm.com \
    --to=richard.sandiford@arm.com \
    --cc=gcc-patches@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).