public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH][committed] aarch64: Clean up some rounding immediate predicates
@ 2023-06-26 16:54 Kyrylo Tkachov
  0 siblings, 0 replies; only message in thread
From: Kyrylo Tkachov @ 2023-06-26 16:54 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 1281 bytes --]

Hi all,

aarch64_simd_rsra_rnd_imm_vec is now used for more than just RSRA
and accepts more than just vectors so rename it to make it more
truthful.
The aarch64_simd_rshrn_imm_vec is now unused and can be deleted.
No behavioural change intended.

Bootstrapped and tested on aarch64-none-linux-gnu.
Pushing to trunk.
Thanks,
Kyrill

gcc/ChangeLog:

	* config/aarch64/aarch64-protos.h (aarch64_const_vec_rsra_rnd_imm_p):
	Rename to...
	(aarch64_rnd_imm_p): ... This.
	* config/aarch64/predicates.md (aarch64_simd_rsra_rnd_imm_vec):
	Rename to...
	(aarch64_int_rnd_operand): ... This.
	(aarch64_simd_rshrn_imm_vec): Delete.
	* config/aarch64/aarch64-simd.md (aarch64_<sra_op>rsra_n<mode>_insn):
	Adjust for the above.
	(aarch64_<sra_op>rshr_n<mode><vczle><vczbe>_insn): Likewise.
	(*aarch64_<shrn_op>rshrn_n<mode>_insn): Likewise.
	(*aarch64_sqrshrun_n<mode>_insn<vczle><vczbe>): Likewise.
	(aarch64_sqrshrun_n<mode>_insn): Likewise.
	(aarch64_<shrn_op>rshrn2_n<mode>_insn_le): Likewise.
	(aarch64_<shrn_op>rshrn2_n<mode>_insn_be): Likewise.
	(aarch64_sqrshrun2_n<mode>_insn_le): Likewise.
	(aarch64_sqrshrun2_n<mode>_insn_be): Likewise.
	* config/aarch64/aarch64.cc (aarch64_const_vec_rsra_rnd_imm_p):
	Rename to...
	(aarch64_rnd_imm_p): ... This.

[-- Attachment #2: rnd-imm.patch --]
[-- Type: application/octet-stream, Size: 7910 bytes --]

diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index a20a20ce15f77c58a25b8badd73b53e50bb5be8d..70303d6fd953e0c397b9138ede8858c2db2e53db 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -759,7 +759,7 @@ bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT);
 bool aarch64_const_vec_all_same_in_range_p (rtx, HOST_WIDE_INT,
 					    HOST_WIDE_INT);
 bool aarch64_const_vec_rnd_cst_p (rtx, rtx);
-bool aarch64_const_vec_rsra_rnd_imm_p (rtx);
+bool aarch64_rnd_imm_p (rtx);
 bool aarch64_constant_address_p (rtx);
 bool aarch64_emit_approx_div (rtx, rtx, rtx);
 bool aarch64_emit_approx_sqrt (rtx, rtx, bool);
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index f5c7d931577ca9e85d48c58878a373125e27c67d..3fcaeb68efb0d895ea82493a1a7a8593ac3f72cd 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1323,7 +1323,7 @@ (define_insn "aarch64_<sra_op>rsra_n<mode>_insn"
 	      (plus:<V2XWIDE>
 		(<SHIFTEXTEND>:<V2XWIDE>
 		  (match_operand:VSDQ_I_DI 2 "register_operand" "w"))
-		(match_operand:<V2XWIDE> 4 "aarch64_simd_rsra_rnd_imm_vec"))
+		(match_operand:<V2XWIDE> 4 "aarch64_int_rnd_operand"))
 	      (match_operand:VSDQ_I_DI 3 "aarch64_simd_shift_imm_<vec_or_offset>_<Vel>")))
 	  (match_operand:VSDQ_I_DI 1 "register_operand" "0")))]
   "TARGET_SIMD
@@ -6449,7 +6449,7 @@ (define_insn "aarch64_<sra_op>rshr_n<mode><vczle><vczbe>_insn"
 	    (plus:<V2XWIDE>
 	      (<SHIFTEXTEND>:<V2XWIDE>
 		(match_operand:VSDQ_I_DI 1 "register_operand" "w"))
-	      (match_operand:<V2XWIDE> 3 "aarch64_simd_rsra_rnd_imm_vec"))
+	      (match_operand:<V2XWIDE> 3 "aarch64_int_rnd_operand"))
 	    (match_operand:VSDQ_I_DI 2 "aarch64_simd_shift_imm_<vec_or_offset>_<Vel>"))))]
   "TARGET_SIMD
    && aarch64_const_vec_rnd_cst_p (operands[3], operands[2])"
@@ -6569,7 +6569,7 @@ (define_insn "*aarch64_<shrn_op>rshrn_n<mode>_insn<vczle><vczbe>"
 	    (plus:<V2XWIDE>
 	      (<TRUNCEXTEND>:<V2XWIDE>
 	        (match_operand:VQN 1 "register_operand" "w"))
-	      (match_operand:<V2XWIDE> 3 "aarch64_simd_rsra_rnd_imm_vec"))
+	      (match_operand:<V2XWIDE> 3 "aarch64_int_rnd_operand"))
 	    (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_<vn_mode>"))))]
   "TARGET_SIMD
    && aarch64_const_vec_rnd_cst_p (operands[3], operands[2])"
@@ -6584,7 +6584,7 @@ (define_insn "*aarch64_<shrn_op>rshrn_n<mode>_insn"
 	    (plus:<DWI>
 	      (<TRUNCEXTEND>:<DWI>
 	        (match_operand:SD_HSDI 1 "register_operand" "w"))
-	      (match_operand:<DWI> 3 "aarch64_simd_rsra_rnd_imm_vec"))
+	      (match_operand:<DWI> 3 "aarch64_int_rnd_operand"))
 	    (match_operand:SI 2 "aarch64_simd_shift_imm_offset_<ve_mode>"))))]
   "TARGET_SIMD
    && aarch64_const_vec_rnd_cst_p (operands[3], operands[2])"
@@ -6714,7 +6714,7 @@ (define_insn "*aarch64_sqrshrun_n<mode>_insn<vczle><vczbe>"
 		(plus:<V2XWIDE>
 		  (sign_extend:<V2XWIDE>
 		    (match_operand:VQN 1 "register_operand" "w"))
-		  (match_operand:<V2XWIDE> 3 "aarch64_simd_rsra_rnd_imm_vec"))
+		  (match_operand:<V2XWIDE> 3 "aarch64_int_rnd_operand"))
 		(match_operand:VQN 2 "aarch64_simd_shift_imm_vec_<vn_mode>"))
 	      (match_operand:<V2XWIDE> 4 "aarch64_simd_imm_zero"))
 	    (match_operand:<V2XWIDE> 5 "aarch64_simd_umax_quarter_mode"))))]
@@ -6732,7 +6732,7 @@ (define_insn "aarch64_sqrshrun_n<mode>_insn"
 	      (plus:<V2XWIDE>
 		(sign_extend:<V2XWIDE>
 		  (match_operand:SD_HSDI 1 "register_operand" "w"))
-		(match_operand:<V2XWIDE> 3 "aarch64_simd_rsra_rnd_imm_vec"))
+		(match_operand:<V2XWIDE> 3 "aarch64_int_rnd_operand"))
 	      (match_operand:SI 2 "aarch64_simd_shift_imm_offset_<ve_mode>"))
 	    (const_int 0))
 	  (const_int <half_mask>)))]
@@ -6843,7 +6843,7 @@ (define_insn "aarch64_<shrn_op>rshrn2_n<mode>_insn_le"
 	      (plus:<V2XWIDE>
 		(<TRUNCEXTEND>:<V2XWIDE>
 		  (match_operand:VQN 2 "register_operand" "w"))
-		(match_operand:<V2XWIDE> 4 "aarch64_simd_rsra_rnd_imm_vec"))
+		(match_operand:<V2XWIDE> 4 "aarch64_int_rnd_operand"))
 	      (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_<vn_mode>")))))]
   "TARGET_SIMD && !BYTES_BIG_ENDIAN
    && aarch64_const_vec_rnd_cst_p (operands[4], operands[3])"
@@ -6859,7 +6859,7 @@ (define_insn "aarch64_<shrn_op>rshrn2_n<mode>_insn_be"
 	      (plus:<V2XWIDE>
 		(<TRUNCEXTEND>:<V2XWIDE>
 		  (match_operand:VQN 2 "register_operand" "w"))
-		(match_operand:<V2XWIDE> 4 "aarch64_simd_rsra_rnd_imm_vec"))
+		(match_operand:<V2XWIDE> 4 "aarch64_int_rnd_operand"))
 	      (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_<vn_mode>")))
 	  (match_operand:<VNARROWQ> 1 "register_operand" "0")))]
   "TARGET_SIMD && BYTES_BIG_ENDIAN
@@ -6977,7 +6977,7 @@ (define_insn "aarch64_sqrshrun2_n<mode>_insn_le"
 		  (plus:<V2XWIDE>
 		    (sign_extend:<V2XWIDE>
 		      (match_operand:VQN 2 "register_operand" "w"))
-		    (match_operand:<V2XWIDE> 4 "aarch64_simd_rsra_rnd_imm_vec"))
+		    (match_operand:<V2XWIDE> 4 "aarch64_int_rnd_operand"))
 		  (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_<vn_mode>"))
 		(match_operand:<V2XWIDE> 5 "aarch64_simd_imm_zero"))
 	      (match_operand:<V2XWIDE> 6 "aarch64_simd_umax_quarter_mode")))))]
@@ -6997,7 +6997,7 @@ (define_insn "aarch64_sqrshrun2_n<mode>_insn_be"
 		  (plus:<V2XWIDE>
 		    (sign_extend:<V2XWIDE>
 		      (match_operand:VQN 2 "register_operand" "w"))
-		    (match_operand:<V2XWIDE> 4 "aarch64_simd_rsra_rnd_imm_vec"))
+		    (match_operand:<V2XWIDE> 4 "aarch64_int_rnd_operand"))
 		  (match_operand:VQN 3 "aarch64_simd_shift_imm_vec_<vn_mode>"))
 		(match_operand:<V2XWIDE> 5 "aarch64_simd_imm_zero"))
 	      (match_operand:<V2XWIDE> 6 "aarch64_simd_umax_quarter_mode")))
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index df37bde6a78c1651111cc82404eaf26bd703d948..edef4d1be49827ea0b1c0ef49131ef7a51c5662f 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -11751,14 +11751,14 @@ aarch64_extract_vec_duplicate_wide_int (rtx x, wide_int *ret_wi)
   return true;
 }
 
-/* Return true if X is a TImode constant or a constant vector of integer
-   immediates that represent the rounding constant used in the RSRA
-   instructions.
-   The accepted form of the constant is (1 << (C - 1)) where C is within
+/* Return true if X is a scalar or a constant vector of integer
+   immediates that represent the rounding constant used in the fixed-point
+   arithmetic instructions.
+   The accepted form of the constant is (1 << (C - 1)) where C is in the range
    [1, MODE_WIDTH/2].  */
 
 bool
-aarch64_const_vec_rsra_rnd_imm_p (rtx x)
+aarch64_rnd_imm_p (rtx x)
 {
   wide_int rnd_cst;
   if (!aarch64_extract_vec_duplicate_wide_int (x, &rnd_cst))
diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
index c3e1ee794c1a53179274578dfd7f22f173ab4e49..b19fde3c0386512f2b41691fd145a780131e5b74 100644
--- a/gcc/config/aarch64/predicates.md
+++ b/gcc/config/aarch64/predicates.md
@@ -626,15 +626,11 @@ (define_predicate "aarch64_simd_shift_imm_vec_di"
   (and (match_code "const_vector")
        (match_test "aarch64_const_vec_all_same_in_range_p (op, 1, 64)")))
 
-(define_predicate "aarch64_simd_rsra_rnd_imm_vec"
+;; A constant or vector of constants that represents an integer rounding
+;; constant added during fixed-point arithmetic calculations
+(define_predicate "aarch64_int_rnd_operand"
   (and (match_code "const_vector,const_int,const_wide_int")
-       (match_test "aarch64_const_vec_rsra_rnd_imm_p (op)")))
-
-(define_predicate "aarch64_simd_rshrn_imm_vec"
-  (and (match_code "const_vector")
-       (match_test "aarch64_const_vec_all_same_in_range_p (op, 1,
-				HOST_WIDE_INT_1U
-				<< (GET_MODE_UNIT_BITSIZE  (mode) - 1))")))
+       (match_test "aarch64_rnd_imm_p (op)")))
 
 (define_predicate "aarch64_simd_raddsubhn_imm_vec"
   (and (match_code "const_vector")

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-06-26 16:54 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-26 16:54 [PATCH][committed] aarch64: Clean up some rounding immediate predicates Kyrylo Tkachov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).