public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH]AArch64 Add special patterns for creating DI scalar and vector constant 1 << 63 [PR109154]
@ 2023-09-27  0:52 Tamar Christina
  2023-09-27 10:32 ` Richard Sandiford
  0 siblings, 1 reply; 4+ messages in thread
From: Tamar Christina @ 2023-09-27  0:52 UTC (permalink / raw)
  To: gcc-patches
  Cc: nd, Richard.Earnshaw, Marcus.Shawcroft, Kyrylo.Tkachov,
	richard.sandiford

[-- Attachment #1: Type: text/plain, Size: 9762 bytes --]

Hi All,

This adds a way to generate special sequences for creation of constants for
which we don't have single instructions sequences which would have normally
lead to a GP -> FP transfer or a literal load.

The patch starts out by adding support for creating 1 << 63 using fneg (mov 0).

Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

	PR tree-optimization/109154
	* config/aarch64/aarch64-protos.h (aarch64_simd_special_constant_p):
	New.
	* config/aarch64/aarch64-simd.md (*aarch64_simd_mov<VQMOV:mode>): Add
	new coden for special constants.
	* config/aarch64/aarch64.cc (aarch64_extract_vec_duplicate_wide_int):
	Take optional mode.
	(aarch64_simd_special_constant_p): New.
	* config/aarch64/aarch64.md (*movdi_aarch64): Add new codegen for
	special constants.
	* config/aarch64/constraints.md (Dx): new.

gcc/testsuite/ChangeLog:

	PR tree-optimization/109154
	* gcc.target/aarch64/fneg-abs_1.c: Updated.
	* gcc.target/aarch64/fneg-abs_2.c: Updated.
	* gcc.target/aarch64/fneg-abs_4.c: Updated.

--- inline copy of patch -- 
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 70303d6fd953e0c397b9138ede8858c2db2e53db..2af9f6a774c20268bf90756c17064bbff8f8ff87 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -827,6 +827,7 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
 bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
 			enum simd_immediate_check w = AARCH64_CHECK_MOV);
 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
+bool aarch64_simd_special_constant_p (rtx, rtx, machine_mode);
 bool aarch64_sve_index_immediate_p (rtx);
 bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
 bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 7b4d5a37a9795fefda785aaacc246918826ed0a2..63c802d942a186b5a94c66d2e83828a82a88ffa8 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -181,17 +181,28 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
      [?r , r ; multiple           , *   , 8] #
      [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
      [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
+     [w  , Dx; neon_move          , simd, 8] #
   }
   "&& reload_completed
-   && !(FP_REGNUM_P (REGNO (operands[0]))
-	&& FP_REGNUM_P (REGNO (operands[1])))"
+   && (!(FP_REGNUM_P (REGNO (operands[0]))
+	 && FP_REGNUM_P (REGNO (operands[1])))
+       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, <MODE>mode)
+	   && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
     if (GP_REGNUM_P (REGNO (operands[0]))
 	&& GP_REGNUM_P (REGNO (operands[1])))
       aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
     else
-      aarch64_split_simd_move (operands[0], operands[1]);
+      {
+	if (FP_REGNUM_P (REGNO (operands[0]))
+	    && <MODE>mode == V2DImode
+	    && aarch64_simd_special_constant_p (operands[1], operands[0],
+						<MODE>mode))
+	  ;
+	else
+	  aarch64_split_simd_move (operands[0], operands[1]);
+      }
     DONE;
   }
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 3739a44bfd909b69a76529cc6b0ae2f01d6fb36e..6e7ee446f1b31ee8bcf121c97c1c6fa87725bf42 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -11799,16 +11799,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum rtx_code comp_code)
 /* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
    duplicate of such constants.  If so, store in RET_WI the wide_int
    representation of the constant paired with the inner mode of the vector mode
-   or TImode for scalar X constants.  */
+   or SMODE for scalar X constants.  If SMODE is not provided then TImode is
+   used.  */
 
 static bool
-aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
+aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
+					scalar_mode mode = TImode)
 {
   rtx elt = unwrap_const_vec_duplicate (x);
   if (!CONST_SCALAR_INT_P (elt))
     return false;
   scalar_mode smode
-    = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
+    = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
   *ret_wi = rtx_mode_t (elt, smode);
   return true;
 }
@@ -11857,6 +11859,43 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
 	  && IN_RANGE (INTVAL (elt), minval, maxval));
 }
 
+/* Some constants can't be made using normal mov instructions in Advanced SIMD
+   but we can still create them in various ways.  If the constant in VAL can be
+   created using alternate methods then if TARGET then return true and set
+   TARGET to the rtx for the sequence, otherwise return false if sequence is
+   not possible.  */
+
+bool
+aarch64_simd_special_constant_p (rtx val, rtx target, machine_mode mode)
+{
+  wide_int wval;
+  machine_mode tmode = GET_MODE (val);
+  auto smode = GET_MODE_INNER (tmode != VOIDmode ? tmode : mode);
+  if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
+    return false;
+
+  /* For Advanced SIMD we can create an integer with only the top bit set
+     using fneg (0.0f).  */
+  if (TARGET_SIMD
+      && !TARGET_SVE
+      && smode == DImode
+      && wi::only_sign_bit_p (wval))
+    {
+      if (!target)
+	return true;
+
+      /* Use the same base type as aarch64_gen_shareable_zero.  */
+      rtx zero = CONST0_RTX (V4SImode);
+      emit_move_insn (target, lowpart_subreg (mode, zero, V4SImode));
+      rtx neg = lowpart_subreg (V2DFmode, target, mode);
+      emit_insn (gen_negv2df2 (neg, lowpart_subreg (V2DFmode, target, mode)));
+      emit_move_insn (target, lowpart_subreg (mode, neg, V2DFmode));
+      return true;
+    }
+
+  return false;
+}
+
 bool
 aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
 {
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 634cfd33b41d0f945ca00d8efc9eff1ede490544..b51f979dba12b726bff0c1109b75c6d2c7ae41ab 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1340,13 +1340,21 @@ (define_insn_and_split "*movdi_aarch64"
      [r, w  ; f_mrc    , fp  , 4] fmov\t%x0, %d1
      [w, w  ; fmov     , fp  , 4] fmov\t%d0, %d1
      [w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
+     [w, Dx ; neon_move, simd, 8] #
   }
-  "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), DImode)
-   && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
+  "CONST_INT_P (operands[1])
+   && REG_P (operands[0])
+   && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
+	&& GP_REGNUM_P (REGNO (operands[0])))
+       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, DImode)
+	   && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
+    if (GP_REGNUM_P (REGNO (operands[0])))
       aarch64_expand_mov_immediate (operands[0], operands[1]);
-      DONE;
+    else
+      aarch64_simd_special_constant_p (operands[1], operands[0], DImode);
+    DONE;
   }
 )
 
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index 371a00827d84d8ea4a06ba2b00a761d3b179ae90..11cf5a0d16b3364a7a4d0b2a2e5bb33063151479 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -488,6 +488,14 @@ (define_constraint "Dr"
  (and (match_code "const,const_vector")
       (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
 						 false)")))
+
+(define_constraint "Dx"
+  "@internal
+ A constraint that matches a vector of 64-bit immediates which we don't have a
+ single instruction to create but that we can create in creative ways."
+ (and (match_code "const_int,const,const_vector")
+      (match_test "aarch64_simd_special_constant_p (op, NULL_RTX, DImode)")))
+
 (define_constraint "Dz"
   "@internal
  A constraint that matches a vector of immediate zero."
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
index f823013c3ddf6b3a266c3abfcbf2642fc2a75fa6..43c37e21b50e13c09b8d6850686e88465cd8482a 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
@@ -28,8 +28,8 @@ float32x4_t t2 (float32x4_t a)
 
 /*
 ** t3:
-**	adrp	x0, .LC[0-9]+
-**	ldr	q[0-9]+, \[x0, #:lo12:.LC0\]
+**	movi	v[0-9]+.4s, 0
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
 **	ret
 */
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
index 141121176b309e4b2aa413dc55271a6e3c93d5e1..fb14ec3e2210e0feeff80f2410d777d3046a9f78 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
@@ -20,8 +20,8 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**	mov	x0, -9223372036854775808
-**	fmov	d[0-9]+, x0
+**	fmov	d[0-9]+, xzr
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **	ret
 */
@@ -29,3 +29,4 @@ float64_t f2 (float64_t a)
 {
   return -fabs (a);
 }
+
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
index 10879dea74462d34b26160eeb0bd54ead063166b..4ea0105f6c0a9756070bcc60d34f142f53d8242c 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
@@ -8,8 +8,8 @@
 
 /*
 ** negabs:
-**	mov	x0, -9223372036854775808
-**	fmov	d[0-9]+, x0
+**	fmov	d[0-9]+, xzr
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **	ret
 */




-- 

[-- Attachment #2: rb17722.patch --]
[-- Type: text/plain, Size: 8660 bytes --]

diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 70303d6fd953e0c397b9138ede8858c2db2e53db..2af9f6a774c20268bf90756c17064bbff8f8ff87 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -827,6 +827,7 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
 bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
 			enum simd_immediate_check w = AARCH64_CHECK_MOV);
 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
+bool aarch64_simd_special_constant_p (rtx, rtx, machine_mode);
 bool aarch64_sve_index_immediate_p (rtx);
 bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
 bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 7b4d5a37a9795fefda785aaacc246918826ed0a2..63c802d942a186b5a94c66d2e83828a82a88ffa8 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -181,17 +181,28 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
      [?r , r ; multiple           , *   , 8] #
      [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
      [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
+     [w  , Dx; neon_move          , simd, 8] #
   }
   "&& reload_completed
-   && !(FP_REGNUM_P (REGNO (operands[0]))
-	&& FP_REGNUM_P (REGNO (operands[1])))"
+   && (!(FP_REGNUM_P (REGNO (operands[0]))
+	 && FP_REGNUM_P (REGNO (operands[1])))
+       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, <MODE>mode)
+	   && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
     if (GP_REGNUM_P (REGNO (operands[0]))
 	&& GP_REGNUM_P (REGNO (operands[1])))
       aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
     else
-      aarch64_split_simd_move (operands[0], operands[1]);
+      {
+	if (FP_REGNUM_P (REGNO (operands[0]))
+	    && <MODE>mode == V2DImode
+	    && aarch64_simd_special_constant_p (operands[1], operands[0],
+						<MODE>mode))
+	  ;
+	else
+	  aarch64_split_simd_move (operands[0], operands[1]);
+      }
     DONE;
   }
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 3739a44bfd909b69a76529cc6b0ae2f01d6fb36e..6e7ee446f1b31ee8bcf121c97c1c6fa87725bf42 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -11799,16 +11799,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum rtx_code comp_code)
 /* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
    duplicate of such constants.  If so, store in RET_WI the wide_int
    representation of the constant paired with the inner mode of the vector mode
-   or TImode for scalar X constants.  */
+   or SMODE for scalar X constants.  If SMODE is not provided then TImode is
+   used.  */
 
 static bool
-aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
+aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
+					scalar_mode mode = TImode)
 {
   rtx elt = unwrap_const_vec_duplicate (x);
   if (!CONST_SCALAR_INT_P (elt))
     return false;
   scalar_mode smode
-    = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
+    = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
   *ret_wi = rtx_mode_t (elt, smode);
   return true;
 }
@@ -11857,6 +11859,43 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
 	  && IN_RANGE (INTVAL (elt), minval, maxval));
 }
 
+/* Some constants can't be made using normal mov instructions in Advanced SIMD
+   but we can still create them in various ways.  If the constant in VAL can be
+   created using alternate methods then if TARGET then return true and set
+   TARGET to the rtx for the sequence, otherwise return false if sequence is
+   not possible.  */
+
+bool
+aarch64_simd_special_constant_p (rtx val, rtx target, machine_mode mode)
+{
+  wide_int wval;
+  machine_mode tmode = GET_MODE (val);
+  auto smode = GET_MODE_INNER (tmode != VOIDmode ? tmode : mode);
+  if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
+    return false;
+
+  /* For Advanced SIMD we can create an integer with only the top bit set
+     using fneg (0.0f).  */
+  if (TARGET_SIMD
+      && !TARGET_SVE
+      && smode == DImode
+      && wi::only_sign_bit_p (wval))
+    {
+      if (!target)
+	return true;
+
+      /* Use the same base type as aarch64_gen_shareable_zero.  */
+      rtx zero = CONST0_RTX (V4SImode);
+      emit_move_insn (target, lowpart_subreg (mode, zero, V4SImode));
+      rtx neg = lowpart_subreg (V2DFmode, target, mode);
+      emit_insn (gen_negv2df2 (neg, lowpart_subreg (V2DFmode, target, mode)));
+      emit_move_insn (target, lowpart_subreg (mode, neg, V2DFmode));
+      return true;
+    }
+
+  return false;
+}
+
 bool
 aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
 {
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 634cfd33b41d0f945ca00d8efc9eff1ede490544..b51f979dba12b726bff0c1109b75c6d2c7ae41ab 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1340,13 +1340,21 @@ (define_insn_and_split "*movdi_aarch64"
      [r, w  ; f_mrc    , fp  , 4] fmov\t%x0, %d1
      [w, w  ; fmov     , fp  , 4] fmov\t%d0, %d1
      [w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
+     [w, Dx ; neon_move, simd, 8] #
   }
-  "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), DImode)
-   && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
+  "CONST_INT_P (operands[1])
+   && REG_P (operands[0])
+   && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
+	&& GP_REGNUM_P (REGNO (operands[0])))
+       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, DImode)
+	   && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
+    if (GP_REGNUM_P (REGNO (operands[0])))
       aarch64_expand_mov_immediate (operands[0], operands[1]);
-      DONE;
+    else
+      aarch64_simd_special_constant_p (operands[1], operands[0], DImode);
+    DONE;
   }
 )
 
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index 371a00827d84d8ea4a06ba2b00a761d3b179ae90..11cf5a0d16b3364a7a4d0b2a2e5bb33063151479 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -488,6 +488,14 @@ (define_constraint "Dr"
  (and (match_code "const,const_vector")
       (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
 						 false)")))
+
+(define_constraint "Dx"
+  "@internal
+ A constraint that matches a vector of 64-bit immediates which we don't have a
+ single instruction to create but that we can create in creative ways."
+ (and (match_code "const_int,const,const_vector")
+      (match_test "aarch64_simd_special_constant_p (op, NULL_RTX, DImode)")))
+
 (define_constraint "Dz"
   "@internal
  A constraint that matches a vector of immediate zero."
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
index f823013c3ddf6b3a266c3abfcbf2642fc2a75fa6..43c37e21b50e13c09b8d6850686e88465cd8482a 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
@@ -28,8 +28,8 @@ float32x4_t t2 (float32x4_t a)
 
 /*
 ** t3:
-**	adrp	x0, .LC[0-9]+
-**	ldr	q[0-9]+, \[x0, #:lo12:.LC0\]
+**	movi	v[0-9]+.4s, 0
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
 **	ret
 */
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
index 141121176b309e4b2aa413dc55271a6e3c93d5e1..fb14ec3e2210e0feeff80f2410d777d3046a9f78 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
@@ -20,8 +20,8 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**	mov	x0, -9223372036854775808
-**	fmov	d[0-9]+, x0
+**	fmov	d[0-9]+, xzr
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **	ret
 */
@@ -29,3 +29,4 @@ float64_t f2 (float64_t a)
 {
   return -fabs (a);
 }
+
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
index 10879dea74462d34b26160eeb0bd54ead063166b..4ea0105f6c0a9756070bcc60d34f142f53d8242c 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
@@ -8,8 +8,8 @@
 
 /*
 ** negabs:
-**	mov	x0, -9223372036854775808
-**	fmov	d[0-9]+, x0
+**	fmov	d[0-9]+, xzr
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **	ret
 */




^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH]AArch64 Add special patterns for creating DI scalar and vector constant 1 << 63 [PR109154]
  2023-09-27  0:52 [PATCH]AArch64 Add special patterns for creating DI scalar and vector constant 1 << 63 [PR109154] Tamar Christina
@ 2023-09-27 10:32 ` Richard Sandiford
  2023-10-05 18:18   ` Tamar Christina
  0 siblings, 1 reply; 4+ messages in thread
From: Richard Sandiford @ 2023-09-27 10:32 UTC (permalink / raw)
  To: Tamar Christina
  Cc: gcc-patches, nd, Richard.Earnshaw, Marcus.Shawcroft, Kyrylo.Tkachov

Tamar Christina <tamar.christina@arm.com> writes:
> Hi All,
>
> This adds a way to generate special sequences for creation of constants for
> which we don't have single instructions sequences which would have normally
> lead to a GP -> FP transfer or a literal load.
>
> The patch starts out by adding support for creating 1 << 63 using fneg (mov 0).
>
> Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
>
> Ok for master?
>
> Thanks,
> Tamar
>
> gcc/ChangeLog:
>
> 	PR tree-optimization/109154
> 	* config/aarch64/aarch64-protos.h (aarch64_simd_special_constant_p):
> 	New.
> 	* config/aarch64/aarch64-simd.md (*aarch64_simd_mov<VQMOV:mode>): Add
> 	new coden for special constants.
> 	* config/aarch64/aarch64.cc (aarch64_extract_vec_duplicate_wide_int):
> 	Take optional mode.
> 	(aarch64_simd_special_constant_p): New.
> 	* config/aarch64/aarch64.md (*movdi_aarch64): Add new codegen for
> 	special constants.
> 	* config/aarch64/constraints.md (Dx): new.
>
> gcc/testsuite/ChangeLog:
>
> 	PR tree-optimization/109154
> 	* gcc.target/aarch64/fneg-abs_1.c: Updated.
> 	* gcc.target/aarch64/fneg-abs_2.c: Updated.
> 	* gcc.target/aarch64/fneg-abs_4.c: Updated.
>
> --- inline copy of patch -- 
> diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
> index 70303d6fd953e0c397b9138ede8858c2db2e53db..2af9f6a774c20268bf90756c17064bbff8f8ff87 100644
> --- a/gcc/config/aarch64/aarch64-protos.h
> +++ b/gcc/config/aarch64/aarch64-protos.h
> @@ -827,6 +827,7 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
>  bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
>  			enum simd_immediate_check w = AARCH64_CHECK_MOV);
>  rtx aarch64_check_zero_based_sve_index_immediate (rtx);
> +bool aarch64_simd_special_constant_p (rtx, rtx, machine_mode);
>  bool aarch64_sve_index_immediate_p (rtx);
>  bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
>  bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 7b4d5a37a9795fefda785aaacc246918826ed0a2..63c802d942a186b5a94c66d2e83828a82a88ffa8 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -181,17 +181,28 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
>       [?r , r ; multiple           , *   , 8] #
>       [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
>       [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
> +     [w  , Dx; neon_move          , simd, 8] #
>    }
>    "&& reload_completed
> -   && !(FP_REGNUM_P (REGNO (operands[0]))
> -	&& FP_REGNUM_P (REGNO (operands[1])))"
> +   && (!(FP_REGNUM_P (REGNO (operands[0]))
> +	 && FP_REGNUM_P (REGNO (operands[1])))
> +       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, <MODE>mode)
> +	   && FP_REGNUM_P (REGNO (operands[0]))))"

Unless I'm missing something, the new test is already covered by the:

  !(FP_REGNUM_P (REGNO (operands[0]))
    && FP_REGNUM_P (REGNO (operands[1]))

>    [(const_int 0)]
>    {
>      if (GP_REGNUM_P (REGNO (operands[0]))
>  	&& GP_REGNUM_P (REGNO (operands[1])))
>        aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
>      else
> -      aarch64_split_simd_move (operands[0], operands[1]);
> +      {
> +	if (FP_REGNUM_P (REGNO (operands[0]))
> +	    && <MODE>mode == V2DImode
> +	    && aarch64_simd_special_constant_p (operands[1], operands[0],
> +						<MODE>mode))
> +	  ;

This looked odd at first, since _p functions don't normally have
side effects.  So it looked like this case was expanding to nothing.

How about renaming aarch64_simd_special_constant_p to
aarch64_maybe_generate_simd_constant, and then making
aarch64_simd_special_constant_p a wrapper that passes the NULL_RTX?
Minor nit, but most other functions put the destination first.

> +	else
> +	  aarch64_split_simd_move (operands[0], operands[1]);
> +      }
>      DONE;
>    }
>  )
> diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
> index 3739a44bfd909b69a76529cc6b0ae2f01d6fb36e..6e7ee446f1b31ee8bcf121c97c1c6fa87725bf42 100644
> --- a/gcc/config/aarch64/aarch64.cc
> +++ b/gcc/config/aarch64/aarch64.cc
> @@ -11799,16 +11799,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum rtx_code comp_code)
>  /* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
>     duplicate of such constants.  If so, store in RET_WI the wide_int
>     representation of the constant paired with the inner mode of the vector mode
> -   or TImode for scalar X constants.  */
> +   or SMODE for scalar X constants.  If SMODE is not provided then TImode is
> +   used.  */

s/SMODE/MODE/, based on the code.

>  
>  static bool
> -aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
> +aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
> +					scalar_mode mode = TImode)
>  {
>    rtx elt = unwrap_const_vec_duplicate (x);
>    if (!CONST_SCALAR_INT_P (elt))
>      return false;
>    scalar_mode smode
> -    = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
> +    = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
>    *ret_wi = rtx_mode_t (elt, smode);
>    return true;
>  }
> @@ -11857,6 +11859,43 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
>  	  && IN_RANGE (INTVAL (elt), minval, maxval));
>  }
>  
> +/* Some constants can't be made using normal mov instructions in Advanced SIMD
> +   but we can still create them in various ways.  If the constant in VAL can be
> +   created using alternate methods then if TARGET then return true and set
> +   TARGET to the rtx for the sequence, otherwise return false if sequence is
> +   not possible.  */

The return true bit applies regardless of TARGET.

> +
> +bool
> +aarch64_simd_special_constant_p (rtx val, rtx target, machine_mode mode)
> +{
> +  wide_int wval;
> +  machine_mode tmode = GET_MODE (val);
> +  auto smode = GET_MODE_INNER (tmode != VOIDmode ? tmode : mode);

Can we not use "mode" unconditionally?

> +  if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
> +    return false;
> +
> +  /* For Advanced SIMD we can create an integer with only the top bit set
> +     using fneg (0.0f).  */
> +  if (TARGET_SIMD
> +      && !TARGET_SVE
> +      && smode == DImode
> +      && wi::only_sign_bit_p (wval))
> +    {
> +      if (!target)
> +	return true;
> +
> +      /* Use the same base type as aarch64_gen_shareable_zero.  */
> +      rtx zero = CONST0_RTX (V4SImode);
> +      emit_move_insn (target, lowpart_subreg (mode, zero, V4SImode));

The lowpart_subreg should simplify this back into CONST0_RTX (mode),
making it no different from:

    emti_move_insn (target, CONST0_RTX (mode));

If the intention is to share zeros between modes (sounds good!),
then I think the subreg needs to be on the lhs instead.

> +      rtx neg = lowpart_subreg (V2DFmode, target, mode);
> +      emit_insn (gen_negv2df2 (neg, lowpart_subreg (V2DFmode, target, mode)));

The rhs seems simpler as copy_rtx (neg).  (Even the copy_rtx shouldn't
be needed after RA, but it's probably more future-proof to keep it.)

> +      emit_move_insn (target, lowpart_subreg (mode, neg, V2DFmode));

This shouldn't be needed, since neg is already a reference to target.

Overall, looks like a nice change/framework.

Thanks,
Richard

> +      return true;
> +    }
> +
> +  return false;
> +}
> +
>  bool
>  aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
>  {
> diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
> index 634cfd33b41d0f945ca00d8efc9eff1ede490544..b51f979dba12b726bff0c1109b75c6d2c7ae41ab 100644
> --- a/gcc/config/aarch64/aarch64.md
> +++ b/gcc/config/aarch64/aarch64.md
> @@ -1340,13 +1340,21 @@ (define_insn_and_split "*movdi_aarch64"
>       [r, w  ; f_mrc    , fp  , 4] fmov\t%x0, %d1
>       [w, w  ; fmov     , fp  , 4] fmov\t%d0, %d1
>       [w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
> +     [w, Dx ; neon_move, simd, 8] #
>    }
> -  "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), DImode)
> -   && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
> +  "CONST_INT_P (operands[1])
> +   && REG_P (operands[0])
> +   && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
> +	&& GP_REGNUM_P (REGNO (operands[0])))
> +       || (aarch64_simd_special_constant_p (operands[1], NULL_RTX, DImode)
> +	   && FP_REGNUM_P (REGNO (operands[0]))))"
>    [(const_int 0)]
>    {
> +    if (GP_REGNUM_P (REGNO (operands[0])))
>        aarch64_expand_mov_immediate (operands[0], operands[1]);
> -      DONE;
> +    else
> +      aarch64_simd_special_constant_p (operands[1], operands[0], DImode);
> +    DONE;
>    }
>  )
>  
> diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
> index 371a00827d84d8ea4a06ba2b00a761d3b179ae90..11cf5a0d16b3364a7a4d0b2a2e5bb33063151479 100644
> --- a/gcc/config/aarch64/constraints.md
> +++ b/gcc/config/aarch64/constraints.md
> @@ -488,6 +488,14 @@ (define_constraint "Dr"
>   (and (match_code "const,const_vector")
>        (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
>  						 false)")))
> +
> +(define_constraint "Dx"
> +  "@internal
> + A constraint that matches a vector of 64-bit immediates which we don't have a
> + single instruction to create but that we can create in creative ways."
> + (and (match_code "const_int,const,const_vector")
> +      (match_test "aarch64_simd_special_constant_p (op, NULL_RTX, DImode)")))
> +
>  (define_constraint "Dz"
>    "@internal
>   A constraint that matches a vector of immediate zero."
> diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
> index f823013c3ddf6b3a266c3abfcbf2642fc2a75fa6..43c37e21b50e13c09b8d6850686e88465cd8482a 100644
> --- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
> +++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
> @@ -28,8 +28,8 @@ float32x4_t t2 (float32x4_t a)
>  
>  /*
>  ** t3:
> -**	adrp	x0, .LC[0-9]+
> -**	ldr	q[0-9]+, \[x0, #:lo12:.LC0\]
> +**	movi	v[0-9]+.4s, 0
> +**	fneg	v[0-9]+.2d, v[0-9]+.2d
>  **	orr	v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
>  **	ret
>  */
> diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> index 141121176b309e4b2aa413dc55271a6e3c93d5e1..fb14ec3e2210e0feeff80f2410d777d3046a9f78 100644
> --- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> +++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> @@ -20,8 +20,8 @@ float32_t f1 (float32_t a)
>  
>  /*
>  ** f2:
> -**	mov	x0, -9223372036854775808
> -**	fmov	d[0-9]+, x0
> +**	fmov	d[0-9]+, xzr
> +**	fneg	v[0-9]+.2d, v[0-9]+.2d
>  **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>  **	ret
>  */
> @@ -29,3 +29,4 @@ float64_t f2 (float64_t a)
>  {
>    return -fabs (a);
>  }
> +
> diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> index 10879dea74462d34b26160eeb0bd54ead063166b..4ea0105f6c0a9756070bcc60d34f142f53d8242c 100644
> --- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> +++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> @@ -8,8 +8,8 @@
>  
>  /*
>  ** negabs:
> -**	mov	x0, -9223372036854775808
> -**	fmov	d[0-9]+, x0
> +**	fmov	d[0-9]+, xzr
> +**	fneg	v[0-9]+.2d, v[0-9]+.2d
>  **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>  **	ret
>  */

^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH]AArch64 Add special patterns for creating DI scalar and vector constant 1 << 63 [PR109154]
  2023-09-27 10:32 ` Richard Sandiford
@ 2023-10-05 18:18   ` Tamar Christina
  2023-10-05 19:47     ` Richard Sandiford
  0 siblings, 1 reply; 4+ messages in thread
From: Tamar Christina @ 2023-10-05 18:18 UTC (permalink / raw)
  To: Richard Sandiford
  Cc: gcc-patches, nd, Richard Earnshaw, Marcus Shawcroft, Kyrylo Tkachov

[-- Attachment #1: Type: text/plain, Size: 14907 bytes --]

Hi,

> The lowpart_subreg should simplify this back into CONST0_RTX (mode),
> making it no different from:
> 
>     emti_move_insn (target, CONST0_RTX (mode));
> 
> If the intention is to share zeros between modes (sounds good!), then I think
> the subreg needs to be on the lhs instead.
> 
> > +      rtx neg = lowpart_subreg (V2DFmode, target, mode);
> > +      emit_insn (gen_negv2df2 (neg, lowpart_subreg (V2DFmode, target,
> > + mode)));
> 
> The rhs seems simpler as copy_rtx (neg).  (Even the copy_rtx shouldn't be
> needed after RA, but it's probably more future-proof to keep it.)
> 
> > +      emit_move_insn (target, lowpart_subreg (mode, neg, V2DFmode));
> 
> This shouldn't be needed, since neg is already a reference to target.
> 
> Overall, looks like a nice change/framework.

Updated the patch, and in te process also realized this can be used for the
vector variants:

Hi All,

This adds a way to generate special sequences for creation of constants for
which we don't have single instructions sequences which would have normally
lead to a GP -> FP transfer or a literal load.

The patch starts out by adding support for creating 1 << 63 using fneg (mov 0).

Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

	PR tree-optimization/109154
	* config/aarch64/aarch64-protos.h (aarch64_simd_special_constant_p,
	aarch64_maybe_generate_simd_constant): New.
	* config/aarch64/aarch64-simd.md (*aarch64_simd_mov<VQMOV:mode>,
	*aarch64_simd_mov<VDMOV:mode>): Add new coden for special constants.
	* config/aarch64/aarch64.cc (aarch64_extract_vec_duplicate_wide_int):
	Take optional mode.
	(aarch64_simd_special_constant_p,
	aarch64_maybe_generate_simd_constant): New.
	* config/aarch64/aarch64.md (*movdi_aarch64): Add new codegen for
	special constants.
	* config/aarch64/constraints.md (Dx): new.

gcc/testsuite/ChangeLog:

	PR tree-optimization/109154
	* gcc.target/aarch64/fneg-abs_1.c: Updated.
	* gcc.target/aarch64/fneg-abs_2.c: Updated.
	* gcc.target/aarch64/fneg-abs_4.c: Updated.
	* gcc.target/aarch64/dbl_mov_immediate_1.c: Updated.

--- inline copy of patch ---

diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 60a55f4bc1956786ea687fc7cad7ec9e4a84e1f0..36d6c688bc888a51a9de174bd3665aebe891b8b1 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -831,6 +831,8 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
 bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
 			enum simd_immediate_check w = AARCH64_CHECK_MOV);
 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
+bool aarch64_maybe_generate_simd_constant (rtx, rtx, machine_mode);
+bool aarch64_simd_special_constant_p (rtx, machine_mode);
 bool aarch64_sve_index_immediate_p (rtx);
 bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
 bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 81ff5bad03d598fa0d48df93d172a28bc0d1d92e..33eceb436584ff73c7271f93639f2246d1af19e0 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -142,26 +142,35 @@ (define_insn "aarch64_dup_lane_<vswap_width_name><mode>"
   [(set_attr "type" "neon_dup<q>")]
 )
 
-(define_insn "*aarch64_simd_mov<VDMOV:mode>"
+(define_insn_and_split "*aarch64_simd_mov<VDMOV:mode>"
   [(set (match_operand:VDMOV 0 "nonimmediate_operand")
 	(match_operand:VDMOV 1 "general_operand"))]
   "TARGET_FLOAT
    && (register_operand (operands[0], <MODE>mode)
        || aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
-  {@ [cons: =0, 1; attrs: type, arch]
-     [w , m ; neon_load1_1reg<q> , *   ] ldr\t%d0, %1
-     [r , m ; load_8             , *   ] ldr\t%x0, %1
-     [m , Dz; store_8            , *   ] str\txzr, %0
-     [m , w ; neon_store1_1reg<q>, *   ] str\t%d1, %0
-     [m , r ; store_8            , *   ] str\t%x1, %0
-     [w , w ; neon_logic<q>      , simd] mov\t%0.<Vbtype>, %1.<Vbtype>
-     [w , w ; neon_logic<q>      , *   ] fmov\t%d0, %d1
-     [?r, w ; neon_to_gp<q>      , simd] umov\t%0, %1.d[0]
-     [?r, w ; neon_to_gp<q>      , *   ] fmov\t%x0, %d1
-     [?w, r ; f_mcr              , *   ] fmov\t%d0, %1
-     [?r, r ; mov_reg            , *   ] mov\t%0, %1
-     [w , Dn; neon_move<q>       , simd] << aarch64_output_simd_mov_immediate (operands[1], 64);
-     [w , Dz; f_mcr              , *   ] fmov\t%d0, xzr
+  {@ [cons: =0, 1; attrs: type, arch, length]
+     [w , m ; neon_load1_1reg<q> , *   , *] ldr\t%d0, %1
+     [r , m ; load_8             , *   , *] ldr\t%x0, %1
+     [m , Dz; store_8            , *   , *] str\txzr, %0
+     [m , w ; neon_store1_1reg<q>, *   , *] str\t%d1, %0
+     [m , r ; store_8            , *   , *] str\t%x1, %0
+     [w , w ; neon_logic<q>      , simd, *] mov\t%0.<Vbtype>, %1.<Vbtype>
+     [w , w ; neon_logic<q>      , *   , *] fmov\t%d0, %d1
+     [?r, w ; neon_to_gp<q>      , simd, *] umov\t%0, %1.d[0]
+     [?r, w ; neon_to_gp<q>      , *   , *] fmov\t%x0, %d1
+     [?w, r ; f_mcr              , *   , *] fmov\t%d0, %1
+     [?r, r ; mov_reg            , *   , *] mov\t%0, %1
+     [w , Dn; neon_move<q>       , simd, *] << aarch64_output_simd_mov_immediate (operands[1], 64);
+     [w , Dz; f_mcr              , *   , *] fmov\t%d0, xzr
+     [w , Dx; neon_move          , simd, 8] #
+  }
+  "CONST_INT_P (operands[1])
+   && aarch64_simd_special_constant_p (operands[1], <MODE>mode)
+   && FP_REGNUM_P (REGNO (operands[0]))"
+  [(const_int 0)]
+  {
+    aarch64_maybe_generate_simd_constant (operands[0], operands[1], <MODE>mode);
+    DONE;
   }
 )
 
@@ -181,19 +190,30 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
      [?r , r ; multiple           , *   , 8] #
      [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
      [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
+     [w  , Dx; neon_move          , simd, 8] #
   }
   "&& reload_completed
-   && (REG_P (operands[0])
+   && ((REG_P (operands[0])
 	&& REG_P (operands[1])
 	&& !(FP_REGNUM_P (REGNO (operands[0]))
-	     && FP_REGNUM_P (REGNO (operands[1]))))"
+	     && FP_REGNUM_P (REGNO (operands[1]))))
+       || (aarch64_simd_special_constant_p (operands[1], <MODE>mode)
+	   && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
     if (GP_REGNUM_P (REGNO (operands[0]))
 	&& GP_REGNUM_P (REGNO (operands[1])))
       aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
     else
-      aarch64_split_simd_move (operands[0], operands[1]);
+      {
+	if (FP_REGNUM_P (REGNO (operands[0]))
+	    && <MODE>mode == V2DImode
+	    && aarch64_maybe_generate_simd_constant (operands[0], operands[1],
+						     <MODE>mode))
+	  ;
+	else
+	  aarch64_split_simd_move (operands[0], operands[1]);
+      }
     DONE;
   }
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 9fbfc548a891f5d11940c6fd3c49a14bfbdec886..c5cf42f7801b291754840dcc5b304577e8e0d391 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -11873,16 +11873,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum rtx_code comp_code)
 /* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
    duplicate of such constants.  If so, store in RET_WI the wide_int
    representation of the constant paired with the inner mode of the vector mode
-   or TImode for scalar X constants.  */
+   or MODE for scalar X constants.  If MODE is not provided then TImode is
+   used.  */
 
 static bool
-aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
+aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
+					scalar_mode mode = TImode)
 {
   rtx elt = unwrap_const_vec_duplicate (x);
   if (!CONST_SCALAR_INT_P (elt))
     return false;
   scalar_mode smode
-    = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
+    = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
   *ret_wi = rtx_mode_t (elt, smode);
   return true;
 }
@@ -11931,6 +11933,49 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
 	  && IN_RANGE (INTVAL (elt), minval, maxval));
 }
 
+/* Some constants can't be made using normal mov instructions in Advanced SIMD
+   but we can still create them in various ways.  If the constant in VAL can be
+   created using alternate methods then if possible then return true and
+   additionally set TARGET to the rtx for the sequence if TARGET is not NULL.
+   Otherwise return false if sequence is not possible.  */
+
+bool
+aarch64_maybe_generate_simd_constant (rtx target, rtx val, machine_mode mode)
+{
+  wide_int wval;
+  auto smode = GET_MODE_INNER (mode);
+  if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
+    return false;
+
+  /* For Advanced SIMD we can create an integer with only the top bit set
+     using fneg (0.0f).  */
+  if (TARGET_SIMD
+      && !TARGET_SVE
+      && smode == DImode
+      && wi::only_sign_bit_p (wval))
+    {
+      if (!target)
+	return true;
+
+      /* Use the same base type as aarch64_gen_shareable_zero.  */
+      rtx zero = CONST0_RTX (V4SImode);
+      emit_move_insn (lowpart_subreg (V4SImode, target, mode), zero);
+      rtx neg = lowpart_subreg (V2DFmode, target, mode);
+      emit_insn (gen_negv2df2 (neg, copy_rtx (neg)));
+      return true;
+    }
+
+  return false;
+}
+
+/* Check if the value in VAL with mode MODE can be created using special
+   instruction sequences.  */
+
+bool aarch64_simd_special_constant_p (rtx val, machine_mode mode)
+{
+  return aarch64_maybe_generate_simd_constant (NULL_RTX, val, mode);
+}
+
 bool
 aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
 {
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 32c7adc89281b249b52ecedf2f1678749c289d18..6f7a6cd1830e5b7cdb3eab76f3143964278a8561 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1341,13 +1341,21 @@ (define_insn_and_split "*movdi_aarch64"
      [r, w  ; f_mrc    , fp  , 4] fmov\t%x0, %d1
      [w, w  ; fmov     , fp  , 4] fmov\t%d0, %d1
      [w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
-  }
-  "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), DImode)
-   && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
+     [w, Dx ; neon_move, simd, 8] #
+  }
+  "CONST_INT_P (operands[1])
+   && REG_P (operands[0])
+   && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
+	&& GP_REGNUM_P (REGNO (operands[0])))
+       || (aarch64_simd_special_constant_p (operands[1], DImode)
+	   && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
+    if (GP_REGNUM_P (REGNO (operands[0])))
       aarch64_expand_mov_immediate (operands[0], operands[1]);
-      DONE;
+    else
+      aarch64_maybe_generate_simd_constant (operands[0], operands[1], DImode);
+    DONE;
   }
 )
 
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index 371a00827d84d8ea4a06ba2b00a761d3b179ae90..b3922bcb9a8b362c995c96c6d1c6eef034990251 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -488,6 +488,14 @@ (define_constraint "Dr"
  (and (match_code "const,const_vector")
       (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
 						 false)")))
+
+(define_constraint "Dx"
+  "@internal
+ A constraint that matches a vector of 64-bit immediates which we don't have a
+ single instruction to create but that we can create in creative ways."
+ (and (match_code "const_int,const,const_vector")
+      (match_test "aarch64_simd_special_constant_p (op, DImode)")))
+
 (define_constraint "Dz"
   "@internal
  A constraint that matches a vector of immediate zero."
diff --git a/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c b/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
index ba6a230457ba7a86f1939665fe9177ecdb45f935..fb9088e9d2849c0ea10a8741795181a0543c3cb2 100644
--- a/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
@@ -48,6 +48,8 @@ double d4(void)
 
 /* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, 25838523252736"       1 } } */
 /* { dg-final { scan-assembler-times "movk\tx\[0-9\]+, 0x40fe, lsl 48"      1 } } */
-/* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, -9223372036854775808" 1 } } */
-/* { dg-final { scan-assembler-times "fmov\td\[0-9\]+, x\[0-9\]+"           2 } } */
+/* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, -9223372036854775808" 0 } } */
+/* { dg-final { scan-assembler-times {movi\tv[0-9]+.2d, #0} 1 } } */
+/* { dg-final { scan-assembler-times {fneg\tv[0-9]+.2d, v[0-9]+.2d} 1 } } */
+/* { dg-final { scan-assembler-times "fmov\td\[0-9\]+, x\[0-9\]+"           1 } } */
 
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
index f823013c3ddf6b3a266c3abfcbf2642fc2a75fa6..43c37e21b50e13c09b8d6850686e88465cd8482a 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
@@ -28,8 +28,8 @@ float32x4_t t2 (float32x4_t a)
 
 /*
 ** t3:
-**	adrp	x0, .LC[0-9]+
-**	ldr	q[0-9]+, \[x0, #:lo12:.LC0\]
+**	movi	v[0-9]+.4s, 0
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
 **	ret
 */
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
index 141121176b309e4b2aa413dc55271a6e3c93d5e1..fb14ec3e2210e0feeff80f2410d777d3046a9f78 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
@@ -20,8 +20,8 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**	mov	x0, -9223372036854775808
-**	fmov	d[0-9]+, x0
+**	fmov	d[0-9]+, xzr
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **	ret
 */
@@ -29,3 +29,4 @@ float64_t f2 (float64_t a)
 {
   return -fabs (a);
 }
+
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
index 10879dea74462d34b26160eeb0bd54ead063166b..4ea0105f6c0a9756070bcc60d34f142f53d8242c 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
@@ -8,8 +8,8 @@
 
 /*
 ** negabs:
-**	mov	x0, -9223372036854775808
-**	fmov	d[0-9]+, x0
+**	fmov	d[0-9]+, xzr
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **	ret
 */

[-- Attachment #2: rb17722.patch --]
[-- Type: application/octet-stream, Size: 12402 bytes --]

diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index 60a55f4bc1956786ea687fc7cad7ec9e4a84e1f0..36d6c688bc888a51a9de174bd3665aebe891b8b1 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -831,6 +831,8 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
 bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
 			enum simd_immediate_check w = AARCH64_CHECK_MOV);
 rtx aarch64_check_zero_based_sve_index_immediate (rtx);
+bool aarch64_maybe_generate_simd_constant (rtx, rtx, machine_mode);
+bool aarch64_simd_special_constant_p (rtx, machine_mode);
 bool aarch64_sve_index_immediate_p (rtx);
 bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
 bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 81ff5bad03d598fa0d48df93d172a28bc0d1d92e..33eceb436584ff73c7271f93639f2246d1af19e0 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -142,26 +142,35 @@ (define_insn "aarch64_dup_lane_<vswap_width_name><mode>"
   [(set_attr "type" "neon_dup<q>")]
 )
 
-(define_insn "*aarch64_simd_mov<VDMOV:mode>"
+(define_insn_and_split "*aarch64_simd_mov<VDMOV:mode>"
   [(set (match_operand:VDMOV 0 "nonimmediate_operand")
 	(match_operand:VDMOV 1 "general_operand"))]
   "TARGET_FLOAT
    && (register_operand (operands[0], <MODE>mode)
        || aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
-  {@ [cons: =0, 1; attrs: type, arch]
-     [w , m ; neon_load1_1reg<q> , *   ] ldr\t%d0, %1
-     [r , m ; load_8             , *   ] ldr\t%x0, %1
-     [m , Dz; store_8            , *   ] str\txzr, %0
-     [m , w ; neon_store1_1reg<q>, *   ] str\t%d1, %0
-     [m , r ; store_8            , *   ] str\t%x1, %0
-     [w , w ; neon_logic<q>      , simd] mov\t%0.<Vbtype>, %1.<Vbtype>
-     [w , w ; neon_logic<q>      , *   ] fmov\t%d0, %d1
-     [?r, w ; neon_to_gp<q>      , simd] umov\t%0, %1.d[0]
-     [?r, w ; neon_to_gp<q>      , *   ] fmov\t%x0, %d1
-     [?w, r ; f_mcr              , *   ] fmov\t%d0, %1
-     [?r, r ; mov_reg            , *   ] mov\t%0, %1
-     [w , Dn; neon_move<q>       , simd] << aarch64_output_simd_mov_immediate (operands[1], 64);
-     [w , Dz; f_mcr              , *   ] fmov\t%d0, xzr
+  {@ [cons: =0, 1; attrs: type, arch, length]
+     [w , m ; neon_load1_1reg<q> , *   , *] ldr\t%d0, %1
+     [r , m ; load_8             , *   , *] ldr\t%x0, %1
+     [m , Dz; store_8            , *   , *] str\txzr, %0
+     [m , w ; neon_store1_1reg<q>, *   , *] str\t%d1, %0
+     [m , r ; store_8            , *   , *] str\t%x1, %0
+     [w , w ; neon_logic<q>      , simd, *] mov\t%0.<Vbtype>, %1.<Vbtype>
+     [w , w ; neon_logic<q>      , *   , *] fmov\t%d0, %d1
+     [?r, w ; neon_to_gp<q>      , simd, *] umov\t%0, %1.d[0]
+     [?r, w ; neon_to_gp<q>      , *   , *] fmov\t%x0, %d1
+     [?w, r ; f_mcr              , *   , *] fmov\t%d0, %1
+     [?r, r ; mov_reg            , *   , *] mov\t%0, %1
+     [w , Dn; neon_move<q>       , simd, *] << aarch64_output_simd_mov_immediate (operands[1], 64);
+     [w , Dz; f_mcr              , *   , *] fmov\t%d0, xzr
+     [w , Dx; neon_move          , simd, 8] #
+  }
+  "CONST_INT_P (operands[1])
+   && aarch64_simd_special_constant_p (operands[1], <MODE>mode)
+   && FP_REGNUM_P (REGNO (operands[0]))"
+  [(const_int 0)]
+  {
+    aarch64_maybe_generate_simd_constant (operands[0], operands[1], <MODE>mode);
+    DONE;
   }
 )
 
@@ -181,19 +190,30 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
      [?r , r ; multiple           , *   , 8] #
      [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
      [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
+     [w  , Dx; neon_move          , simd, 8] #
   }
   "&& reload_completed
-   && (REG_P (operands[0])
+   && ((REG_P (operands[0])
 	&& REG_P (operands[1])
 	&& !(FP_REGNUM_P (REGNO (operands[0]))
-	     && FP_REGNUM_P (REGNO (operands[1]))))"
+	     && FP_REGNUM_P (REGNO (operands[1]))))
+       || (aarch64_simd_special_constant_p (operands[1], <MODE>mode)
+	   && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
     if (GP_REGNUM_P (REGNO (operands[0]))
 	&& GP_REGNUM_P (REGNO (operands[1])))
       aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
     else
-      aarch64_split_simd_move (operands[0], operands[1]);
+      {
+	if (FP_REGNUM_P (REGNO (operands[0]))
+	    && <MODE>mode == V2DImode
+	    && aarch64_maybe_generate_simd_constant (operands[0], operands[1],
+						     <MODE>mode))
+	  ;
+	else
+	  aarch64_split_simd_move (operands[0], operands[1]);
+      }
     DONE;
   }
 )
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index 9fbfc548a891f5d11940c6fd3c49a14bfbdec886..c5cf42f7801b291754840dcc5b304577e8e0d391 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -11873,16 +11873,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum rtx_code comp_code)
 /* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
    duplicate of such constants.  If so, store in RET_WI the wide_int
    representation of the constant paired with the inner mode of the vector mode
-   or TImode for scalar X constants.  */
+   or MODE for scalar X constants.  If MODE is not provided then TImode is
+   used.  */
 
 static bool
-aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
+aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
+					scalar_mode mode = TImode)
 {
   rtx elt = unwrap_const_vec_duplicate (x);
   if (!CONST_SCALAR_INT_P (elt))
     return false;
   scalar_mode smode
-    = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
+    = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
   *ret_wi = rtx_mode_t (elt, smode);
   return true;
 }
@@ -11931,6 +11933,49 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
 	  && IN_RANGE (INTVAL (elt), minval, maxval));
 }
 
+/* Some constants can't be made using normal mov instructions in Advanced SIMD
+   but we can still create them in various ways.  If the constant in VAL can be
+   created using alternate methods then if possible then return true and
+   additionally set TARGET to the rtx for the sequence if TARGET is not NULL.
+   Otherwise return false if sequence is not possible.  */
+
+bool
+aarch64_maybe_generate_simd_constant (rtx target, rtx val, machine_mode mode)
+{
+  wide_int wval;
+  auto smode = GET_MODE_INNER (mode);
+  if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
+    return false;
+
+  /* For Advanced SIMD we can create an integer with only the top bit set
+     using fneg (0.0f).  */
+  if (TARGET_SIMD
+      && !TARGET_SVE
+      && smode == DImode
+      && wi::only_sign_bit_p (wval))
+    {
+      if (!target)
+	return true;
+
+      /* Use the same base type as aarch64_gen_shareable_zero.  */
+      rtx zero = CONST0_RTX (V4SImode);
+      emit_move_insn (lowpart_subreg (V4SImode, target, mode), zero);
+      rtx neg = lowpart_subreg (V2DFmode, target, mode);
+      emit_insn (gen_negv2df2 (neg, copy_rtx (neg)));
+      return true;
+    }
+
+  return false;
+}
+
+/* Check if the value in VAL with mode MODE can be created using special
+   instruction sequences.  */
+
+bool aarch64_simd_special_constant_p (rtx val, machine_mode mode)
+{
+  return aarch64_maybe_generate_simd_constant (NULL_RTX, val, mode);
+}
+
 bool
 aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
 {
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 32c7adc89281b249b52ecedf2f1678749c289d18..6f7a6cd1830e5b7cdb3eab76f3143964278a8561 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -1341,13 +1341,21 @@ (define_insn_and_split "*movdi_aarch64"
      [r, w  ; f_mrc    , fp  , 4] fmov\t%x0, %d1
      [w, w  ; fmov     , fp  , 4] fmov\t%d0, %d1
      [w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
-  }
-  "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), DImode)
-   && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
+     [w, Dx ; neon_move, simd, 8] #
+  }
+  "CONST_INT_P (operands[1])
+   && REG_P (operands[0])
+   && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
+	&& GP_REGNUM_P (REGNO (operands[0])))
+       || (aarch64_simd_special_constant_p (operands[1], DImode)
+	   && FP_REGNUM_P (REGNO (operands[0]))))"
   [(const_int 0)]
   {
+    if (GP_REGNUM_P (REGNO (operands[0])))
       aarch64_expand_mov_immediate (operands[0], operands[1]);
-      DONE;
+    else
+      aarch64_maybe_generate_simd_constant (operands[0], operands[1], DImode);
+    DONE;
   }
 )
 
diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
index 371a00827d84d8ea4a06ba2b00a761d3b179ae90..b3922bcb9a8b362c995c96c6d1c6eef034990251 100644
--- a/gcc/config/aarch64/constraints.md
+++ b/gcc/config/aarch64/constraints.md
@@ -488,6 +488,14 @@ (define_constraint "Dr"
  (and (match_code "const,const_vector")
       (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
 						 false)")))
+
+(define_constraint "Dx"
+  "@internal
+ A constraint that matches a vector of 64-bit immediates which we don't have a
+ single instruction to create but that we can create in creative ways."
+ (and (match_code "const_int,const,const_vector")
+      (match_test "aarch64_simd_special_constant_p (op, DImode)")))
+
 (define_constraint "Dz"
   "@internal
  A constraint that matches a vector of immediate zero."
diff --git a/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c b/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
index ba6a230457ba7a86f1939665fe9177ecdb45f935..fb9088e9d2849c0ea10a8741795181a0543c3cb2 100644
--- a/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
@@ -48,6 +48,8 @@ double d4(void)
 
 /* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, 25838523252736"       1 } } */
 /* { dg-final { scan-assembler-times "movk\tx\[0-9\]+, 0x40fe, lsl 48"      1 } } */
-/* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, -9223372036854775808" 1 } } */
-/* { dg-final { scan-assembler-times "fmov\td\[0-9\]+, x\[0-9\]+"           2 } } */
+/* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, -9223372036854775808" 0 } } */
+/* { dg-final { scan-assembler-times {movi\tv[0-9]+.2d, #0} 1 } } */
+/* { dg-final { scan-assembler-times {fneg\tv[0-9]+.2d, v[0-9]+.2d} 1 } } */
+/* { dg-final { scan-assembler-times "fmov\td\[0-9\]+, x\[0-9\]+"           1 } } */
 
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
index f823013c3ddf6b3a266c3abfcbf2642fc2a75fa6..43c37e21b50e13c09b8d6850686e88465cd8482a 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
@@ -28,8 +28,8 @@ float32x4_t t2 (float32x4_t a)
 
 /*
 ** t3:
-**	adrp	x0, .LC[0-9]+
-**	ldr	q[0-9]+, \[x0, #:lo12:.LC0\]
+**	movi	v[0-9]+.4s, 0
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
 **	ret
 */
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
index 141121176b309e4b2aa413dc55271a6e3c93d5e1..fb14ec3e2210e0feeff80f2410d777d3046a9f78 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
@@ -20,8 +20,8 @@ float32_t f1 (float32_t a)
 
 /*
 ** f2:
-**	mov	x0, -9223372036854775808
-**	fmov	d[0-9]+, x0
+**	fmov	d[0-9]+, xzr
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **	ret
 */
@@ -29,3 +29,4 @@ float64_t f2 (float64_t a)
 {
   return -fabs (a);
 }
+
diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
index 10879dea74462d34b26160eeb0bd54ead063166b..4ea0105f6c0a9756070bcc60d34f142f53d8242c 100644
--- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
@@ -8,8 +8,8 @@
 
 /*
 ** negabs:
-**	mov	x0, -9223372036854775808
-**	fmov	d[0-9]+, x0
+**	fmov	d[0-9]+, xzr
+**	fneg	v[0-9]+.2d, v[0-9]+.2d
 **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
 **	ret
 */

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH]AArch64 Add special patterns for creating DI scalar and vector constant 1 << 63 [PR109154]
  2023-10-05 18:18   ` Tamar Christina
@ 2023-10-05 19:47     ` Richard Sandiford
  0 siblings, 0 replies; 4+ messages in thread
From: Richard Sandiford @ 2023-10-05 19:47 UTC (permalink / raw)
  To: Tamar Christina
  Cc: gcc-patches, nd, Richard Earnshaw, Marcus Shawcroft, Kyrylo Tkachov

Tamar Christina <Tamar.Christina@arm.com> writes:
> Hi,
>
>> The lowpart_subreg should simplify this back into CONST0_RTX (mode),
>> making it no different from:
>> 
>>     emti_move_insn (target, CONST0_RTX (mode));
>> 
>> If the intention is to share zeros between modes (sounds good!), then I think
>> the subreg needs to be on the lhs instead.
>> 
>> > +      rtx neg = lowpart_subreg (V2DFmode, target, mode);
>> > +      emit_insn (gen_negv2df2 (neg, lowpart_subreg (V2DFmode, target,
>> > + mode)));
>> 
>> The rhs seems simpler as copy_rtx (neg).  (Even the copy_rtx shouldn't be
>> needed after RA, but it's probably more future-proof to keep it.)
>> 
>> > +      emit_move_insn (target, lowpart_subreg (mode, neg, V2DFmode));
>> 
>> This shouldn't be needed, since neg is already a reference to target.
>> 
>> Overall, looks like a nice change/framework.
>
> Updated the patch, and in te process also realized this can be used for the
> vector variants:
>
> Hi All,
>
> This adds a way to generate special sequences for creation of constants for
> which we don't have single instructions sequences which would have normally
> lead to a GP -> FP transfer or a literal load.
>
> The patch starts out by adding support for creating 1 << 63 using fneg (mov 0).
>
> Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
>
> Ok for master?
>
> Thanks,
> Tamar
>
> gcc/ChangeLog:
>
> 	PR tree-optimization/109154
> 	* config/aarch64/aarch64-protos.h (aarch64_simd_special_constant_p,
> 	aarch64_maybe_generate_simd_constant): New.
> 	* config/aarch64/aarch64-simd.md (*aarch64_simd_mov<VQMOV:mode>,
> 	*aarch64_simd_mov<VDMOV:mode>): Add new coden for special constants.
> 	* config/aarch64/aarch64.cc (aarch64_extract_vec_duplicate_wide_int):
> 	Take optional mode.
> 	(aarch64_simd_special_constant_p,
> 	aarch64_maybe_generate_simd_constant): New.
> 	* config/aarch64/aarch64.md (*movdi_aarch64): Add new codegen for
> 	special constants.
> 	* config/aarch64/constraints.md (Dx): new.
>
> gcc/testsuite/ChangeLog:
>
> 	PR tree-optimization/109154
> 	* gcc.target/aarch64/fneg-abs_1.c: Updated.
> 	* gcc.target/aarch64/fneg-abs_2.c: Updated.
> 	* gcc.target/aarch64/fneg-abs_4.c: Updated.
> 	* gcc.target/aarch64/dbl_mov_immediate_1.c: Updated.
>
> --- inline copy of patch ---
>
> diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
> index 60a55f4bc1956786ea687fc7cad7ec9e4a84e1f0..36d6c688bc888a51a9de174bd3665aebe891b8b1 100644
> --- a/gcc/config/aarch64/aarch64-protos.h
> +++ b/gcc/config/aarch64/aarch64-protos.h
> @@ -831,6 +831,8 @@ bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
>  bool aarch64_simd_valid_immediate (rtx, struct simd_immediate_info *,
>  			enum simd_immediate_check w = AARCH64_CHECK_MOV);
>  rtx aarch64_check_zero_based_sve_index_immediate (rtx);
> +bool aarch64_maybe_generate_simd_constant (rtx, rtx, machine_mode);
> +bool aarch64_simd_special_constant_p (rtx, machine_mode);
>  bool aarch64_sve_index_immediate_p (rtx);
>  bool aarch64_sve_arith_immediate_p (machine_mode, rtx, bool);
>  bool aarch64_sve_sqadd_sqsub_immediate_p (machine_mode, rtx, bool);
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 81ff5bad03d598fa0d48df93d172a28bc0d1d92e..33eceb436584ff73c7271f93639f2246d1af19e0 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -142,26 +142,35 @@ (define_insn "aarch64_dup_lane_<vswap_width_name><mode>"
>    [(set_attr "type" "neon_dup<q>")]
>  )
>  
> -(define_insn "*aarch64_simd_mov<VDMOV:mode>"
> +(define_insn_and_split "*aarch64_simd_mov<VDMOV:mode>"
>    [(set (match_operand:VDMOV 0 "nonimmediate_operand")
>  	(match_operand:VDMOV 1 "general_operand"))]
>    "TARGET_FLOAT
>     && (register_operand (operands[0], <MODE>mode)
>         || aarch64_simd_reg_or_zero (operands[1], <MODE>mode))"
> -  {@ [cons: =0, 1; attrs: type, arch]
> -     [w , m ; neon_load1_1reg<q> , *   ] ldr\t%d0, %1
> -     [r , m ; load_8             , *   ] ldr\t%x0, %1
> -     [m , Dz; store_8            , *   ] str\txzr, %0
> -     [m , w ; neon_store1_1reg<q>, *   ] str\t%d1, %0
> -     [m , r ; store_8            , *   ] str\t%x1, %0
> -     [w , w ; neon_logic<q>      , simd] mov\t%0.<Vbtype>, %1.<Vbtype>
> -     [w , w ; neon_logic<q>      , *   ] fmov\t%d0, %d1
> -     [?r, w ; neon_to_gp<q>      , simd] umov\t%0, %1.d[0]
> -     [?r, w ; neon_to_gp<q>      , *   ] fmov\t%x0, %d1
> -     [?w, r ; f_mcr              , *   ] fmov\t%d0, %1
> -     [?r, r ; mov_reg            , *   ] mov\t%0, %1
> -     [w , Dn; neon_move<q>       , simd] << aarch64_output_simd_mov_immediate (operands[1], 64);
> -     [w , Dz; f_mcr              , *   ] fmov\t%d0, xzr
> +  {@ [cons: =0, 1; attrs: type, arch, length]
> +     [w , m ; neon_load1_1reg<q> , *   , *] ldr\t%d0, %1
> +     [r , m ; load_8             , *   , *] ldr\t%x0, %1
> +     [m , Dz; store_8            , *   , *] str\txzr, %0
> +     [m , w ; neon_store1_1reg<q>, *   , *] str\t%d1, %0
> +     [m , r ; store_8            , *   , *] str\t%x1, %0
> +     [w , w ; neon_logic<q>      , simd, *] mov\t%0.<Vbtype>, %1.<Vbtype>
> +     [w , w ; neon_logic<q>      , *   , *] fmov\t%d0, %d1
> +     [?r, w ; neon_to_gp<q>      , simd, *] umov\t%0, %1.d[0]
> +     [?r, w ; neon_to_gp<q>      , *   , *] fmov\t%x0, %d1
> +     [?w, r ; f_mcr              , *   , *] fmov\t%d0, %1
> +     [?r, r ; mov_reg            , *   , *] mov\t%0, %1
> +     [w , Dn; neon_move<q>       , simd, *] << aarch64_output_simd_mov_immediate (operands[1], 64);
> +     [w , Dz; f_mcr              , *   , *] fmov\t%d0, xzr
> +     [w , Dx; neon_move          , simd, 8] #
> +  }
> +  "CONST_INT_P (operands[1])
> +   && aarch64_simd_special_constant_p (operands[1], <MODE>mode)
> +   && FP_REGNUM_P (REGNO (operands[0]))"
> +  [(const_int 0)]
> +  {
> +    aarch64_maybe_generate_simd_constant (operands[0], operands[1], <MODE>mode);
> +    DONE;
>    }
>  )
>  
> @@ -181,19 +190,30 @@ (define_insn_and_split "*aarch64_simd_mov<VQMOV:mode>"
>       [?r , r ; multiple           , *   , 8] #
>       [w  , Dn; neon_move<q>       , simd, 4] << aarch64_output_simd_mov_immediate (operands[1], 128);
>       [w  , Dz; fmov               , *   , 4] fmov\t%d0, xzr
> +     [w  , Dx; neon_move          , simd, 8] #
>    }
>    "&& reload_completed
> -   && (REG_P (operands[0])
> +   && ((REG_P (operands[0])
>  	&& REG_P (operands[1])
>  	&& !(FP_REGNUM_P (REGNO (operands[0]))
> -	     && FP_REGNUM_P (REGNO (operands[1]))))"
> +	     && FP_REGNUM_P (REGNO (operands[1]))))
> +       || (aarch64_simd_special_constant_p (operands[1], <MODE>mode)
> +	   && FP_REGNUM_P (REGNO (operands[0]))))"
>    [(const_int 0)]
>    {
>      if (GP_REGNUM_P (REGNO (operands[0]))
>  	&& GP_REGNUM_P (REGNO (operands[1])))
>        aarch64_simd_emit_reg_reg_move (operands, DImode, 2);
>      else
> -      aarch64_split_simd_move (operands[0], operands[1]);
> +      {
> +	if (FP_REGNUM_P (REGNO (operands[0]))
> +	    && <MODE>mode == V2DImode
> +	    && aarch64_maybe_generate_simd_constant (operands[0], operands[1],
> +						     <MODE>mode))
> +	  ;
> +	else
> +	  aarch64_split_simd_move (operands[0], operands[1]);
> +      }
>      DONE;
>    }
>  )
> diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
> index 9fbfc548a891f5d11940c6fd3c49a14bfbdec886..c5cf42f7801b291754840dcc5b304577e8e0d391 100644
> --- a/gcc/config/aarch64/aarch64.cc
> +++ b/gcc/config/aarch64/aarch64.cc
> @@ -11873,16 +11873,18 @@ aarch64_get_condition_code_1 (machine_mode mode, enum rtx_code comp_code)
>  /* Return true if X is a CONST_INT, CONST_WIDE_INT or a constant vector
>     duplicate of such constants.  If so, store in RET_WI the wide_int
>     representation of the constant paired with the inner mode of the vector mode
> -   or TImode for scalar X constants.  */
> +   or MODE for scalar X constants.  If MODE is not provided then TImode is
> +   used.  */
>  
>  static bool
> -aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
> +aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi,
> +					scalar_mode mode = TImode)
>  {
>    rtx elt = unwrap_const_vec_duplicate (x);
>    if (!CONST_SCALAR_INT_P (elt))
>      return false;
>    scalar_mode smode
> -    = CONST_SCALAR_INT_P (x) ? TImode : GET_MODE_INNER (GET_MODE (x));
> +    = CONST_SCALAR_INT_P (x) ? mode : GET_MODE_INNER (GET_MODE (x));
>    *ret_wi = rtx_mode_t (elt, smode);
>    return true;
>  }
> @@ -11931,6 +11933,49 @@ aarch64_const_vec_all_same_in_range_p (rtx x,
>  	  && IN_RANGE (INTVAL (elt), minval, maxval));
>  }
>  
> +/* Some constants can't be made using normal mov instructions in Advanced SIMD
> +   but we can still create them in various ways.  If the constant in VAL can be
> +   created using alternate methods then if possible then return true and
> +   additionally set TARGET to the rtx for the sequence if TARGET is not NULL.
> +   Otherwise return false if sequence is not possible.  */
> +
> +bool
> +aarch64_maybe_generate_simd_constant (rtx target, rtx val, machine_mode mode)
> +{
> +  wide_int wval;
> +  auto smode = GET_MODE_INNER (mode);
> +  if (!aarch64_extract_vec_duplicate_wide_int (val, &wval, smode))
> +    return false;
> +
> +  /* For Advanced SIMD we can create an integer with only the top bit set
> +     using fneg (0.0f).  */
> +  if (TARGET_SIMD
> +      && !TARGET_SVE
> +      && smode == DImode
> +      && wi::only_sign_bit_p (wval))
> +    {
> +      if (!target)
> +	return true;
> +
> +      /* Use the same base type as aarch64_gen_shareable_zero.  */
> +      rtx zero = CONST0_RTX (V4SImode);
> +      emit_move_insn (lowpart_subreg (V4SImode, target, mode), zero);
> +      rtx neg = lowpart_subreg (V2DFmode, target, mode);
> +      emit_insn (gen_negv2df2 (neg, copy_rtx (neg)));
> +      return true;
> +    }
> +
> +  return false;
> +}
> +
> +/* Check if the value in VAL with mode MODE can be created using special
> +   instruction sequences.  */
> +
> +bool aarch64_simd_special_constant_p (rtx val, machine_mode mode)

Nit: should be line break after "bool".

> +{
> +  return aarch64_maybe_generate_simd_constant (NULL_RTX, val, mode);
> +}
> +
>  bool
>  aarch64_const_vec_all_same_int_p (rtx x, HOST_WIDE_INT val)
>  {
> diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
> index 32c7adc89281b249b52ecedf2f1678749c289d18..6f7a6cd1830e5b7cdb3eab76f3143964278a8561 100644
> --- a/gcc/config/aarch64/aarch64.md
> +++ b/gcc/config/aarch64/aarch64.md
> @@ -1341,13 +1341,21 @@ (define_insn_and_split "*movdi_aarch64"
>       [r, w  ; f_mrc    , fp  , 4] fmov\t%x0, %d1
>       [w, w  ; fmov     , fp  , 4] fmov\t%d0, %d1
>       [w, Dd ; neon_move, simd, 4] << aarch64_output_scalar_simd_mov_immediate (operands[1], DImode);
> -  }
> -  "CONST_INT_P (operands[1]) && !aarch64_move_imm (INTVAL (operands[1]), DImode)
> -   && REG_P (operands[0]) && GP_REGNUM_P (REGNO (operands[0]))"
> +     [w, Dx ; neon_move, simd, 8] #
> +  }
> +  "CONST_INT_P (operands[1])
> +   && REG_P (operands[0])
> +   && ((!aarch64_move_imm (INTVAL (operands[1]), DImode)
> +	&& GP_REGNUM_P (REGNO (operands[0])))
> +       || (aarch64_simd_special_constant_p (operands[1], DImode)
> +	   && FP_REGNUM_P (REGNO (operands[0]))))"
>    [(const_int 0)]
>    {
> +    if (GP_REGNUM_P (REGNO (operands[0])))
>        aarch64_expand_mov_immediate (operands[0], operands[1]);
> -      DONE;
> +    else
> +      aarch64_maybe_generate_simd_constant (operands[0], operands[1], DImode);
> +    DONE;
>    }
>  )
>  
> diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
> index 371a00827d84d8ea4a06ba2b00a761d3b179ae90..b3922bcb9a8b362c995c96c6d1c6eef034990251 100644
> --- a/gcc/config/aarch64/constraints.md
> +++ b/gcc/config/aarch64/constraints.md
> @@ -488,6 +488,14 @@ (define_constraint "Dr"
>   (and (match_code "const,const_vector")
>        (match_test "aarch64_simd_shift_imm_p (op, GET_MODE (op),
>  						 false)")))
> +
> +(define_constraint "Dx"
> +  "@internal
> + A constraint that matches a vector of 64-bit immediates which we don't have a
> + single instruction to create but that we can create in creative ways."
> + (and (match_code "const_int,const,const_vector")
> +      (match_test "aarch64_simd_special_constant_p (op, DImode)")))

Since this is used by vector iterators that span multiple modes,
I suppose we should test the mode too:

 (and (match_code "const_int,const,const_vector")
      (match_test "GET_MODE_INNER (GET_MODE (op)) == DImode
		   || GET_MODE (op) == VOIDmode")
      (match_test "aarch64_simd_special_constant_p (op, DImode)")))

Alternatively, and I think this is probably better than adding the
test above, we could have a separate constraint for vectors and
keep Dx for DImode scalars:

(define_constraint "Dx"
  "@internal
 A constraint that matches a scalar 64-bit immediate which we don't have a
 single instruction to create but that we can create in creative ways."
 (and (match_code "const_int")
      (match_test "aarch64_simd_special_constant_p (op, DImode)")))

(define_constraint "Dy"
  "@internal
 Like Dx, but for a vector of immediates (of any mode)."
 (and (match_code "const_vector")
      (match_test "aarch64_simd_special_constant_p
                    (op, GET_MODE_INNER (GET_MODE (op)))")))

(No need for the "const", that's legacy from before the VLA const_vector
encoding.)

I think we would need a split like that if we ever wanted to extend
this to SImode scalars.

OK with those changes, thanks.

Richard

> +
>  (define_constraint "Dz"
>    "@internal
>   A constraint that matches a vector of immediate zero."
> diff --git a/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c b/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
> index ba6a230457ba7a86f1939665fe9177ecdb45f935..fb9088e9d2849c0ea10a8741795181a0543c3cb2 100644
> --- a/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
> +++ b/gcc/testsuite/gcc.target/aarch64/dbl_mov_immediate_1.c
> @@ -48,6 +48,8 @@ double d4(void)
>  
>  /* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, 25838523252736"       1 } } */
>  /* { dg-final { scan-assembler-times "movk\tx\[0-9\]+, 0x40fe, lsl 48"      1 } } */
> -/* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, -9223372036854775808" 1 } } */
> -/* { dg-final { scan-assembler-times "fmov\td\[0-9\]+, x\[0-9\]+"           2 } } */
> +/* { dg-final { scan-assembler-times "mov\tx\[0-9\]+, -9223372036854775808" 0 } } */
> +/* { dg-final { scan-assembler-times {movi\tv[0-9]+.2d, #0} 1 } } */
> +/* { dg-final { scan-assembler-times {fneg\tv[0-9]+.2d, v[0-9]+.2d} 1 } } */
> +/* { dg-final { scan-assembler-times "fmov\td\[0-9\]+, x\[0-9\]+"           1 } } */
>  
> diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
> index f823013c3ddf6b3a266c3abfcbf2642fc2a75fa6..43c37e21b50e13c09b8d6850686e88465cd8482a 100644
> --- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
> +++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_1.c
> @@ -28,8 +28,8 @@ float32x4_t t2 (float32x4_t a)
>  
>  /*
>  ** t3:
> -**	adrp	x0, .LC[0-9]+
> -**	ldr	q[0-9]+, \[x0, #:lo12:.LC0\]
> +**	movi	v[0-9]+.4s, 0
> +**	fneg	v[0-9]+.2d, v[0-9]+.2d
>  **	orr	v[0-9]+.16b, v[0-9]+.16b, v[0-9]+.16b
>  **	ret
>  */
> diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> index 141121176b309e4b2aa413dc55271a6e3c93d5e1..fb14ec3e2210e0feeff80f2410d777d3046a9f78 100644
> --- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> +++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_2.c
> @@ -20,8 +20,8 @@ float32_t f1 (float32_t a)
>  
>  /*
>  ** f2:
> -**	mov	x0, -9223372036854775808
> -**	fmov	d[0-9]+, x0
> +**	fmov	d[0-9]+, xzr
> +**	fneg	v[0-9]+.2d, v[0-9]+.2d
>  **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>  **	ret
>  */
> @@ -29,3 +29,4 @@ float64_t f2 (float64_t a)
>  {
>    return -fabs (a);
>  }
> +
> diff --git a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> index 10879dea74462d34b26160eeb0bd54ead063166b..4ea0105f6c0a9756070bcc60d34f142f53d8242c 100644
> --- a/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> +++ b/gcc/testsuite/gcc.target/aarch64/fneg-abs_4.c
> @@ -8,8 +8,8 @@
>  
>  /*
>  ** negabs:
> -**	mov	x0, -9223372036854775808
> -**	fmov	d[0-9]+, x0
> +**	fmov	d[0-9]+, xzr
> +**	fneg	v[0-9]+.2d, v[0-9]+.2d
>  **	orr	v[0-9]+.8b, v[0-9]+.8b, v[0-9]+.8b
>  **	ret
>  */

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-10-05 19:47 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-27  0:52 [PATCH]AArch64 Add special patterns for creating DI scalar and vector constant 1 << 63 [PR109154] Tamar Christina
2023-09-27 10:32 ` Richard Sandiford
2023-10-05 18:18   ` Tamar Christina
2023-10-05 19:47     ` Richard Sandiford

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).