public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Richard Sandiford <richard.sandiford@arm.com>
To: gcc-patches@gcc.gnu.org
Subject: [42/67] Use scalar_int_mode in simplify_shift_const_1
Date: Fri, 09 Dec 2016 13:20:00 -0000	[thread overview]
Message-ID: <87fulxi6hd.fsf@e105548-lin.cambridge.arm.com> (raw)
In-Reply-To: <87h96dp8u6.fsf@e105548-lin.cambridge.arm.com> (Richard	Sandiford's message of "Fri, 09 Dec 2016 12:48:01 +0000")

This patch makes simplify_shift_const_1 use scalar_int_modes
for all code that is specific to scalars rather than vectors.
This includes situations in which the new shift mode is different
from the original one, since the function never changes the mode
of vector shifts.  That in turn makes it more natural to test
for equal modes in simplify_shift_const_1 rather than
try_widen_shift_mode (which only applies to scalars).

gcc/
2016-11-24  Richard Sandiford  <richard.sandiford@arm.com>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

	* combine.c (try_widen_shift_mode): Move check for equal modes to...
	(simplify_shift_const_1): ...here.  Use scalar_int_mode for
	shift_unit_mode and for modes involved in scalar shifts.

diff --git a/gcc/combine.c b/gcc/combine.c
index 0fcd428..3cb92a4 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -10296,8 +10296,6 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count,
 		      machine_mode orig_mode, machine_mode mode,
 		      enum rtx_code outer_code, HOST_WIDE_INT outer_const)
 {
-  if (orig_mode == mode)
-    return mode;
   gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
 
   /* In general we can't perform in wider mode for right shift and rotate.  */
@@ -10358,7 +10356,7 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
   int count;
   machine_mode mode = result_mode;
   machine_mode shift_mode;
-  scalar_int_mode tmode, inner_mode;
+  scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
   unsigned int mode_words
     = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
   /* We form (outer_op (code varop count) (outer_const)).  */
@@ -10398,9 +10396,19 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  count = bitsize - count;
 	}
 
-      shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
-					 mode, outer_op, outer_const);
-      machine_mode shift_unit_mode = GET_MODE_INNER (shift_mode);
+      shift_mode = result_mode;
+      if (shift_mode != mode)
+	{
+	  /* We only change the modes of scalar shifts.  */
+	  int_mode = as_a <scalar_int_mode> (mode);
+	  int_result_mode = as_a <scalar_int_mode> (result_mode);
+	  shift_mode = try_widen_shift_mode (code, varop, count,
+					     int_result_mode, int_mode,
+					     outer_op, outer_const);
+	}
+
+      scalar_int_mode shift_unit_mode
+	= as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
 
       /* Handle cases where the count is greater than the size of the mode
 	 minus 1.  For ASHIFT, use the size minus one as the count (this can
@@ -10495,6 +10503,7 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* The following rules apply only to scalars.  */
 	  if (shift_mode != shift_unit_mode)
 	    break;
+	  int_mode = as_a <scalar_int_mode> (mode);
 
 	  /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
 	     minus the width of a smaller mode, we can do this with a
@@ -10503,15 +10512,15 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	      && ! mode_dependent_address_p (XEXP (varop, 0),
 					     MEM_ADDR_SPACE (varop))
 	      && ! MEM_VOLATILE_P (varop)
-	      && (int_mode_for_size (GET_MODE_BITSIZE (mode) - count, 1)
+	      && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
 		  .exists (&tmode)))
 	    {
 	      new_rtx = adjust_address_nv (varop, tmode,
-				       BYTES_BIG_ENDIAN ? 0
-				       : count / BITS_PER_UNIT);
+					   BYTES_BIG_ENDIAN ? 0
+					   : count / BITS_PER_UNIT);
 
 	      varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
-				     : ZERO_EXTEND, mode, new_rtx);
+				     : ZERO_EXTEND, int_mode, new_rtx);
 	      count = 0;
 	      continue;
 	    }
@@ -10521,20 +10530,22 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* The following rules apply only to scalars.  */
 	  if (shift_mode != shift_unit_mode)
 	    break;
+	  int_mode = as_a <scalar_int_mode> (mode);
+	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
 
 	  /* If VAROP is a SUBREG, strip it as long as the inner operand has
 	     the same number of words as what we've seen so far.  Then store
 	     the widest mode in MODE.  */
 	  if (subreg_lowpart_p (varop)
 	      && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
-	      && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (GET_MODE (varop))
+	      && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
 	      && (unsigned int) ((GET_MODE_SIZE (inner_mode)
 				  + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
 		 == mode_words
-	      && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT)
+	      && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
 	    {
 	      varop = SUBREG_REG (varop);
-	      if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (mode))
+	      if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
 		mode = inner_mode;
 	      continue;
 	    }
@@ -10593,14 +10604,17 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* The following rules apply only to scalars.  */
 	  if (shift_mode != shift_unit_mode)
 	    break;
+	  int_mode = as_a <scalar_int_mode> (mode);
+	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
+	  int_result_mode = as_a <scalar_int_mode> (result_mode);
 
 	  /* Here we have two nested shifts.  The result is usually the
 	     AND of a new shift with a mask.  We compute the result below.  */
 	  if (CONST_INT_P (XEXP (varop, 1))
 	      && INTVAL (XEXP (varop, 1)) >= 0
-	      && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
-	      && HWI_COMPUTABLE_MODE_P (result_mode)
-	      && HWI_COMPUTABLE_MODE_P (mode))
+	      && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
+	      && HWI_COMPUTABLE_MODE_P (int_result_mode)
+	      && HWI_COMPUTABLE_MODE_P (int_mode))
 	    {
 	      enum rtx_code first_code = GET_CODE (varop);
 	      unsigned int first_count = INTVAL (XEXP (varop, 1));
@@ -10615,18 +10629,18 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 		 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
 		 This simplifies certain SIGN_EXTEND operations.  */
 	      if (code == ASHIFT && first_code == ASHIFTRT
-		  && count == (GET_MODE_PRECISION (result_mode)
-			       - GET_MODE_PRECISION (GET_MODE (varop))))
+		  && count == (GET_MODE_PRECISION (int_result_mode)
+			       - GET_MODE_PRECISION (int_varop_mode)))
 		{
 		  /* C3 has the low-order C1 bits zero.  */
 
-		  mask = GET_MODE_MASK (mode)
+		  mask = GET_MODE_MASK (int_mode)
 			 & ~((HOST_WIDE_INT_1U << first_count) - 1);
 
-		  varop = simplify_and_const_int (NULL_RTX, result_mode,
+		  varop = simplify_and_const_int (NULL_RTX, int_result_mode,
 						  XEXP (varop, 0), mask);
-		  varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
-						varop, count);
+		  varop = simplify_shift_const (NULL_RTX, ASHIFT,
+						int_result_mode, varop, count);
 		  count = first_count;
 		  code = ASHIFTRT;
 		  continue;
@@ -10637,11 +10651,11 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 		 this to either an ASHIFT or an ASHIFTRT depending on the
 		 two counts.
 
-		 We cannot do this if VAROP's mode is not SHIFT_MODE.  */
+		 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE.  */
 
 	      if (code == ASHIFTRT && first_code == ASHIFT
-		  && GET_MODE (varop) == shift_mode
-		  && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
+		  && int_varop_mode == shift_unit_mode
+		  && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
 		      > first_count))
 		{
 		  varop = XEXP (varop, 0);
@@ -10672,7 +10686,7 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 
 	      if (code == first_code)
 		{
-		  if (GET_MODE (varop) != result_mode
+		  if (int_varop_mode != int_result_mode
 		      && (code == ASHIFTRT || code == LSHIFTRT
 			  || code == ROTATE))
 		    break;
@@ -10684,8 +10698,8 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 
 	      if (code == ASHIFTRT
 		  || (code == ROTATE && first_code == ASHIFTRT)
-		  || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
-		  || (GET_MODE (varop) != result_mode
+		  || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
+		  || (int_varop_mode != int_result_mode
 		      && (first_code == ASHIFTRT || first_code == LSHIFTRT
 			  || first_code == ROTATE
 			  || code == ROTATE)))
@@ -10695,19 +10709,19 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 		 nonzero bits of the inner shift the same way the
 		 outer shift will.  */
 
-	      mask_rtx = gen_int_mode (nonzero_bits (varop, GET_MODE (varop)),
-				       result_mode);
+	      mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
+				       int_result_mode);
 
 	      mask_rtx
-		= simplify_const_binary_operation (code, result_mode, mask_rtx,
-						   GEN_INT (count));
+		= simplify_const_binary_operation (code, int_result_mode,
+						   mask_rtx, GEN_INT (count));
 
 	      /* Give up if we can't compute an outer operation to use.  */
 	      if (mask_rtx == 0
 		  || !CONST_INT_P (mask_rtx)
 		  || ! merge_outer_ops (&outer_op, &outer_const, AND,
 					INTVAL (mask_rtx),
-					result_mode, &complement_p))
+					int_result_mode, &complement_p))
 		break;
 
 	      /* If the shifts are in the same direction, we add the
@@ -10744,22 +10758,22 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	      /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
 		 sure the result will be masked.  See PR70222.  */
 	      if (code == LSHIFTRT
-		  && mode != result_mode
+		  && int_mode != int_result_mode
 		  && !merge_outer_ops (&outer_op, &outer_const, AND,
-				       GET_MODE_MASK (result_mode)
-				       >> orig_count, result_mode,
+				       GET_MODE_MASK (int_result_mode)
+				       >> orig_count, int_result_mode,
 				       &complement_p))
 		break;
 	      /* For ((int) (cstLL >> count)) >> cst2 just give up.  Queuing
 		 up outer sign extension (often left and right shift) is
 		 hardly more efficient than the original.  See PR70429.  */
-	      if (code == ASHIFTRT && mode != result_mode)
+	      if (code == ASHIFTRT && int_mode != int_result_mode)
 		break;
 
-	      rtx new_rtx = simplify_const_binary_operation (code, mode,
+	      rtx new_rtx = simplify_const_binary_operation (code, int_mode,
 							     XEXP (varop, 0),
 							     GEN_INT (count));
-	      varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
+	      varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
 	      count = 0;
 	      continue;
 	    }
@@ -10780,6 +10794,8 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* The following rules apply only to scalars.  */
 	  if (shift_mode != shift_unit_mode)
 	    break;
+	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
+	  int_result_mode = as_a <scalar_int_mode> (result_mode);
 
 	  /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
 	     with C the size of VAROP - 1 and the shift is logical if
@@ -10792,15 +10808,15 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	      && XEXP (XEXP (varop, 0), 1) == constm1_rtx
 	      && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
 	      && (code == LSHIFTRT || code == ASHIFTRT)
-	      && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
+	      && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
 	    {
 	      count = 0;
-	      varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
+	      varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
 				  const0_rtx);
 
 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
-		varop = gen_rtx_NEG (GET_MODE (varop), varop);
+		varop = gen_rtx_NEG (int_varop_mode, varop);
 
 	      continue;
 	    }
@@ -10813,19 +10829,20 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 
 	  if (CONST_INT_P (XEXP (varop, 1))
 	      /* We can't do this if we have (ashiftrt (xor))  and the
-		 constant has its sign bit set in shift_mode with shift_mode
-		 wider than result_mode.  */
+		 constant has its sign bit set in shift_unit_mode with
+		 shift_unit_mode wider than result_mode.  */
 	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
-		   && result_mode != shift_mode
+		   && int_result_mode != shift_unit_mode
 		   && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
-					      shift_mode))
+					      shift_unit_mode))
 	      && (new_rtx = simplify_const_binary_operation
-		  (code, result_mode,
-		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
+		  (code, int_result_mode,
+		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
 		   GEN_INT (count))) != 0
 	      && CONST_INT_P (new_rtx)
 	      && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
-				  INTVAL (new_rtx), result_mode, &complement_p))
+				  INTVAL (new_rtx), int_result_mode,
+				  &complement_p))
 	    {
 	      varop = XEXP (varop, 0);
 	      continue;
@@ -10838,16 +10855,16 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	     changes the sign bit.  */
 	  if (CONST_INT_P (XEXP (varop, 1))
 	     && !(code == ASHIFTRT && GET_CODE (varop) == XOR
-		  && result_mode != shift_mode
+		  && int_result_mode != shift_unit_mode
 		  && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
-					     shift_mode)))
+					     shift_unit_mode)))
 	    {
-	      rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
+	      rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
 					      XEXP (varop, 0), count);
-	      rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
+	      rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
 					      XEXP (varop, 1), count);
 
-	      varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
+	      varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
 					   lhs, rhs);
 	      varop = apply_distributive_law (varop);
 
@@ -10860,6 +10877,7 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* The following rules apply only to scalars.  */
 	  if (shift_mode != shift_unit_mode)
 	    break;
+	  int_result_mode = as_a <scalar_int_mode> (result_mode);
 
 	  /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
 	     says that the sign bit can be tested, FOO has mode MODE, C is
@@ -10867,13 +10885,13 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	     that may be nonzero.  */
 	  if (code == LSHIFTRT
 	      && XEXP (varop, 1) == const0_rtx
-	      && GET_MODE (XEXP (varop, 0)) == result_mode
-	      && count == (GET_MODE_PRECISION (result_mode) - 1)
-	      && HWI_COMPUTABLE_MODE_P (result_mode)
+	      && GET_MODE (XEXP (varop, 0)) == int_result_mode
+	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
+	      && HWI_COMPUTABLE_MODE_P (int_result_mode)
 	      && STORE_FLAG_VALUE == -1
-	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
-	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
-				  &complement_p))
+	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
+	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
+				  int_result_mode, &complement_p))
 	    {
 	      varop = XEXP (varop, 0);
 	      count = 0;
@@ -10885,12 +10903,13 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* The following rules apply only to scalars.  */
 	  if (shift_mode != shift_unit_mode)
 	    break;
+	  int_result_mode = as_a <scalar_int_mode> (result_mode);
 
 	  /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
 	     than the number of bits in the mode is equivalent to A.  */
 	  if (code == LSHIFTRT
-	      && count == (GET_MODE_PRECISION (result_mode) - 1)
-	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
+	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
+	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
 	    {
 	      varop = XEXP (varop, 0);
 	      count = 0;
@@ -10900,8 +10919,8 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* NEG commutes with ASHIFT since it is multiplication.  Move the
 	     NEG outside to allow shifts to combine.  */
 	  if (code == ASHIFT
-	      && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
-				  &complement_p))
+	      && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
+				  int_result_mode, &complement_p))
 	    {
 	      varop = XEXP (varop, 0);
 	      continue;
@@ -10912,16 +10931,17 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* The following rules apply only to scalars.  */
 	  if (shift_mode != shift_unit_mode)
 	    break;
+	  int_result_mode = as_a <scalar_int_mode> (result_mode);
 
 	  /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
 	     is one less than the number of bits in the mode is
 	     equivalent to (xor A 1).  */
 	  if (code == LSHIFTRT
-	      && count == (GET_MODE_PRECISION (result_mode) - 1)
+	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
 	      && XEXP (varop, 1) == constm1_rtx
-	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
-	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
-				  &complement_p))
+	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
+	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
+				  int_result_mode, &complement_p))
 	    {
 	      count = 0;
 	      varop = XEXP (varop, 0);
@@ -10936,21 +10956,20 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 
 	  if ((code == ASHIFTRT || code == LSHIFTRT)
 	      && count < HOST_BITS_PER_WIDE_INT
-	      && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
-	      && (nonzero_bits (XEXP (varop, 1), result_mode)
-		  & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
+	      && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
+	      && (nonzero_bits (XEXP (varop, 1), int_result_mode)
+		  & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
 	    {
 	      varop = XEXP (varop, 0);
 	      continue;
 	    }
 	  else if ((code == ASHIFTRT || code == LSHIFTRT)
 		   && count < HOST_BITS_PER_WIDE_INT
-		   && HWI_COMPUTABLE_MODE_P (result_mode)
-		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
+		   && HWI_COMPUTABLE_MODE_P (int_result_mode)
+		   && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
 			    >> count)
-		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
-			    & nonzero_bits (XEXP (varop, 1),
-						 result_mode)))
+		   && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
+			    & nonzero_bits (XEXP (varop, 1), int_result_mode)))
 	    {
 	      varop = XEXP (varop, 1);
 	      continue;
@@ -10960,12 +10979,13 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  if (code == ASHIFT
 	      && CONST_INT_P (XEXP (varop, 1))
 	      && (new_rtx = simplify_const_binary_operation
-		  (ASHIFT, result_mode,
-		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
+		  (ASHIFT, int_result_mode,
+		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
 		   GEN_INT (count))) != 0
 	      && CONST_INT_P (new_rtx)
 	      && merge_outer_ops (&outer_op, &outer_const, PLUS,
-				  INTVAL (new_rtx), result_mode, &complement_p))
+				  INTVAL (new_rtx), int_result_mode,
+				  &complement_p))
 	    {
 	      varop = XEXP (varop, 0);
 	      continue;
@@ -10978,14 +10998,15 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	     for reasoning in doing so.  */
 	  if (code == LSHIFTRT
 	      && CONST_INT_P (XEXP (varop, 1))
-	      && mode_signbit_p (result_mode, XEXP (varop, 1))
+	      && mode_signbit_p (int_result_mode, XEXP (varop, 1))
 	      && (new_rtx = simplify_const_binary_operation
-		  (code, result_mode,
-		   gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
+		  (code, int_result_mode,
+		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
 		   GEN_INT (count))) != 0
 	      && CONST_INT_P (new_rtx)
 	      && merge_outer_ops (&outer_op, &outer_const, XOR,
-				  INTVAL (new_rtx), result_mode, &complement_p))
+				  INTVAL (new_rtx), int_result_mode,
+				  &complement_p))
 	    {
 	      varop = XEXP (varop, 0);
 	      continue;
@@ -10997,6 +11018,7 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	  /* The following rules apply only to scalars.  */
 	  if (shift_mode != shift_unit_mode)
 	    break;
+	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
 
 	  /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
 	     with C the size of VAROP - 1 and the shift is logical if
@@ -11007,18 +11029,18 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 
 	  if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
 	      && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
-	      && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
+	      && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
 	      && (code == LSHIFTRT || code == ASHIFTRT)
 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
 	      && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
 	    {
 	      count = 0;
-	      varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
+	      varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
 				  const0_rtx);
 
 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
-		varop = gen_rtx_NEG (GET_MODE (varop), varop);
+		varop = gen_rtx_NEG (int_varop_mode, varop);
 
 	      continue;
 	    }
@@ -11054,8 +11076,15 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
       break;
     }
 
-  shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
-				     outer_op, outer_const);
+  shift_mode = result_mode;
+  if (shift_mode != mode)
+    {
+      /* We only change the modes of scalar shifts.  */
+      int_mode = as_a <scalar_int_mode> (mode);
+      int_result_mode = as_a <scalar_int_mode> (result_mode);
+      shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
+					 int_mode, outer_op, outer_const);
+    }
 
   /* We have now finished analyzing the shift.  The result should be
      a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places.  If
@@ -11090,8 +11119,9 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
   /* If we were doing an LSHIFTRT in a wider mode than it was originally,
      turn off all the bits that the shift would have turned off.  */
   if (orig_code == LSHIFTRT && result_mode != shift_mode)
-    x = simplify_and_const_int (NULL_RTX, shift_mode, x,
-				GET_MODE_MASK (result_mode) >> orig_count);
+    /* We only change the modes of scalar shifts.  */
+    x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
+				x, GET_MODE_MASK (result_mode) >> orig_count);
 
   /* Do the remainder of the processing in RESULT_MODE.  */
   x = gen_lowpart_or_truncate (result_mode, x);
@@ -11103,12 +11133,14 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 
   if (outer_op != UNKNOWN)
     {
+      int_result_mode = as_a <scalar_int_mode> (result_mode);
+
       if (GET_RTX_CLASS (outer_op) != RTX_UNARY
-	  && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
-	outer_const = trunc_int_for_mode (outer_const, result_mode);
+	  && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
+	outer_const = trunc_int_for_mode (outer_const, int_result_mode);
 
       if (outer_op == AND)
-	x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
+	x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
       else if (outer_op == SET)
 	{
 	  /* This means that we have determined that the result is
@@ -11117,9 +11149,9 @@ simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
 	    x = GEN_INT (outer_const);
 	}
       else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
-	x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
+	x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
       else
-	x = simplify_gen_binary (outer_op, result_mode, x,
+	x = simplify_gen_binary (outer_op, int_result_mode, x,
 				 GEN_INT (outer_const));
     }
 

  parent reply	other threads:[~2016-12-09 13:20 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-09 12:48 [0/67] Add wrapper classes for machine_modes Richard Sandiford
2016-12-09 12:50 ` [1/67] Add an E_ prefix to mode names and update case statements Richard Sandiford
2016-12-09 12:52 ` [2/67] Make machine_mode a class Richard Sandiford
2016-12-15 22:34   ` Trevor Saunders
2016-12-09 12:53 ` [3/67] Add GDB pretty printer for machine mode classes Richard Sandiford
2016-12-09 12:54 ` [4/67] Add FOR_EACH iterators for modes Richard Sandiford
2016-12-09 12:55 ` [6/67] Make GET_MODE_WIDER return an opt_mode Richard Sandiford
2016-12-09 12:55 ` [5/67] Small tweak to array_value_type Richard Sandiford
2016-12-09 12:57 ` [8/67] Simplify gen_trunc/extend_conv_libfunc Richard Sandiford
2016-12-09 12:57 ` [7/67] Add scalar_float_mode Richard Sandiford
2016-12-09 14:19   ` David Malcolm
2016-12-09 14:30     ` Richard Sandiford
2016-12-09 12:58 ` [9/67] Add SCALAR_FLOAT_TYPE_MODE Richard Sandiford
2016-12-09 12:59 ` [10/67] Make assemble_real take a scalar_float_mode Richard Sandiford
2016-12-09 13:00 ` [0/67] Add wrapper classes for machine_modes Richard Biener
2016-12-09 13:54   ` Richard Sandiford
2016-12-09 13:00 ` [11/67] Add a float_mode_for_size helper function Richard Sandiford
2016-12-09 13:00 ` [12/67] Use opt_scalar_float_mode when iterating over float modes Richard Sandiford
2016-12-09 13:01 ` [13/67] Make floatn_mode return an opt_scalar_float_mode Richard Sandiford
2016-12-09 13:02 ` [14/67] Make libgcc_floating_mode_supported_p take a scalar_float_mode Richard Sandiford
2016-12-09 13:03 ` [16/67] Add scalar_int_mode_pod Richard Sandiford
2016-12-09 13:03 ` [15/67] Add scalar_int_mode Richard Sandiford
2016-12-09 13:04 ` [17/67] Add an int_mode_for_size helper function Richard Sandiford
2016-12-09 13:05 ` [19/67] Add a smallest_int_mode_for_size " Richard Sandiford
2016-12-09 13:05 ` [18/67] Make int_mode_for_mode return an opt_scalar_int_mode Richard Sandiford
2016-12-09 13:06 ` [20/67] Replace MODE_INT checks with is_int_mode Richard Sandiford
2016-12-09 13:07 ` [21/67] Replace SCALAR_INT_MODE_P checks with is_a <scalar_int_mode> Richard Sandiford
2016-12-09 13:07 ` [22/67] Replace !VECTOR_MODE_P " Richard Sandiford
2016-12-09 13:08 ` [24/67] Replace a != BLKmode check " Richard Sandiford
2016-12-09 13:22   ` Richard Biener
2016-12-09 14:42     ` Richard Sandiford
2016-12-09 13:08 ` [23/67] Replace != VOIDmode checks " Richard Sandiford
2016-12-09 13:09 ` [25/67] Use is_a <scalar_int_mode> for bitmask optimisations Richard Sandiford
2016-12-09 13:10 ` [27/67] Use is_a <scalar_int_mode> before LOAD_EXTEND_OP Richard Sandiford
2016-12-09 13:10 ` [26/67] Use is_a <scalar_int_mode> in subreg/extract simplifications Richard Sandiford
2016-12-09 13:11 ` [28/67] Use is_a <scalar_int_mode> for miscellaneous types of test Richard Sandiford
2016-12-09 13:12 ` [29/67] Make some *_loc_descriptor helpers take scalar_int_mode Richard Sandiford
2016-12-09 13:12 ` [30/67] Use scalar_int_mode for doubleword splits Richard Sandiford
2016-12-09 13:13 ` [31/67] Use scalar_int_mode for move2add Richard Sandiford
2016-12-09 13:14 ` [33/67] Add a NARROWEST_INT_MODE macro Richard Sandiford
2016-12-09 13:14 ` [32/67] Check is_a <scalar_int_mode> before calling valid_pointer_mode Richard Sandiford
2016-12-09 13:15 ` [34/67] Add a SCALAR_INT_TYPE_MODE macro Richard Sandiford
2016-12-09 13:16 ` [36/67] Use scalar_int_mode in the RTL iv routines Richard Sandiford
2016-12-09 13:16 ` [35/67] Add uses of as_a <scalar_int_mode> Richard Sandiford
2016-12-09 13:17 ` [38/67] Move SCALAR_INT_MODE_P out of strict_volatile_bitfield_p Richard Sandiford
2016-12-09 13:17 ` [37/67] Use scalar_int_mode when emitting cstores Richard Sandiford
2016-12-09 13:18 ` [39/67] Two changes to the get_best_mode interface Richard Sandiford
2016-12-09 13:19 ` [40/67] Use scalar_int_mode for extraction_insn fields Richard Sandiford
2016-12-09 13:20 ` Richard Sandiford [this message]
2016-12-09 13:20 ` [41/67] Split scalar integer handling out of force_to_mode Richard Sandiford
2016-12-09 13:22 ` [43/67] Use scalar_int_mode in simplify_comparison Richard Sandiford
2016-12-09 13:22 ` [44/67] Make simplify_and_const_int take a scalar_int_mode Richard Sandiford
2016-12-09 13:23 ` [45/67] Make extract_left_shift " Richard Sandiford
2016-12-09 13:23 ` [46/67] Make widest_int_mode_for_size return " Richard Sandiford
2016-12-09 13:24 ` [47/67] Make subroutines of nonzero_bits operate on scalar_int_mode Richard Sandiford
2016-12-09 13:25 ` [49/67] Simplify nonzero/num_sign_bits hooks Richard Sandiford
2016-12-09 13:25 ` [48/67] Make subroutines of num_sign_bit_copies operate on scalar_int_mode Richard Sandiford
2016-12-09 13:28 ` [50/67] Add helper routines for SUBREG_PROMOTED_VAR_P subregs Richard Sandiford
2016-12-09 13:30 ` [51/67] Use opt_scalar_int_mode when iterating over integer modes Richard Sandiford
2016-12-09 13:30 ` [52/67] Use scalar_int_mode in extract/store_bit_field Richard Sandiford
2016-12-09 13:31 ` [54/67] Add explicit int checks for alternative optab implementations Richard Sandiford
2016-12-09 13:31 ` [53/67] Pass a mode to const_scalar_mask_from_tree Richard Sandiford
2016-12-09 13:33 ` [56/67] Use the more specific type when two modes are known to be equal Richard Sandiford
2016-12-09 13:33 ` [55/67] Use scalar_int_mode in simplify_const_unary_operation Richard Sandiford
2016-12-09 13:34 ` [57/67] Use scalar_int_mode in expand_expr_addr_expr Richard Sandiford
2016-12-09 13:35 ` [58/67] Use scalar_int_mode in a try_combine optimisation Richard Sandiford
2016-12-09 13:36 ` [60/67] Pass scalar_int_modes to do_jump_by_parts_* Richard Sandiford
2016-12-09 13:36 ` [59/67] Add a rtx_jump_table_data::get_data_mode helper Richard Sandiford
2016-12-09 13:37 ` [61/67] Use scalar_int_mode in the AArch64 port Richard Sandiford
2016-12-09 13:38 ` [63/67] Simplifications after type switch Richard Sandiford
2016-12-09 13:38 ` [62/67] Big machine_mode to scalar_int_mode replacement Richard Sandiford
2016-12-09 13:40 ` [64/67] Add a scalar_mode class Richard Sandiford
2016-12-09 13:40 ` [66/67] Add a scalar_mode_pod class Richard Sandiford
2016-12-09 13:40 ` [65/67] Use scalar_mode in the AArch64 port Richard Sandiford
2016-12-09 13:41 ` [67/67] Add a complex_mode class Richard Sandiford
2016-12-09 18:20 ` [0/67] Add wrapper classes for machine_modes Sandra Loosemore
2017-05-05  7:08 ` Jeff Law
2017-05-24 14:33   ` Richard Sandiford
2017-06-28 17:27     ` Jeff Law

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87fulxi6hd.fsf@e105548-lin.cambridge.arm.com \
    --to=richard.sandiford@arm.com \
    --cc=gcc-patches@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).