public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r12-6762] arm: Consistently use crypto_mode attribute in crypto patterns
@ 2022-01-20 11:29 Richard Earnshaw
  0 siblings, 0 replies; only message in thread
From: Richard Earnshaw @ 2022-01-20 11:29 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:c471ee0f05d8de576c195996cc3c8ae3ca73d978

commit r12-6762-gc471ee0f05d8de576c195996cc3c8ae3ca73d978
Author: Richard Earnshaw <rearnsha@arm.com>
Date:   Thu Oct 21 12:19:32 2021 +0100

    arm: Consistently use crypto_mode attribute in crypto patterns
    
    A couple of patterns in the crypto support code were hard-coding the
    mode rather than using the iterators.  While not incorrect, it was
    slightly confusing, so adapt those patterns to the style of the rest
    of the file.
    
    Also fix some white space issues.
    
    gcc/ChangeLog:
    
            * config/arm/crypto.md (crypto_<CYRPTO_AES:crypto_pattern>): Use
            <crypto_mode> rather than hard-coding the mode.
            (crypto_<CRYPTO_AESMC:crypto_pattern>): Fix white space.
            (crypto_<CRYPTO_AES:crypto_pattern>): Likewise.
            (*aarch32_crypto_aese_fused): Likewise.
            (*aarch32_crypto_aesd_fused): Likewise.
            (crypto_<CRYPTO_BINARY:crypto_pattern>): Likewise.
            (crypto_<CRYPTO_TERNARY:crypto_pattern>): Likewise.
            (crypto_sha1h_lb): Likewise.
            (crypto_vmullp64): Likewise.
            (crypto_<CRYPTO_SELECTING:crypto_pattern>): Likewise.
            (crypto_<CRYPTO_SELECTING:crypto_pattern>_lb): Likewise.

Diff:
---
 gcc/config/arm/crypto.md | 94 ++++++++++++++++++++++++------------------------
 1 file changed, 47 insertions(+), 47 deletions(-)

diff --git a/gcc/config/arm/crypto.md b/gcc/config/arm/crypto.md
index 6071ea17eac..020dfba7dcf 100644
--- a/gcc/config/arm/crypto.md
+++ b/gcc/config/arm/crypto.md
@@ -22,7 +22,7 @@
 (define_insn "crypto_<CRYPTO_AESMC:crypto_pattern>"
   [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
 	(unspec:<crypto_mode>
-		[(match_operand:<crypto_mode> 1 "register_operand" "w")]
+	 [(match_operand:<crypto_mode> 1 "register_operand" "w")]
 	 CRYPTO_AESMC))]
   "TARGET_CRYPTO"
   "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q1"
@@ -30,12 +30,12 @@
 )
 
 (define_insn "crypto_<CRYPTO_AES:crypto_pattern>"
-  [(set (match_operand:V16QI 0 "register_operand" "=w")
-	(unspec:V16QI
-		[(xor:V16QI
-		     (match_operand:V16QI 1 "register_operand" "%0")
-		     (match_operand:V16QI 2 "register_operand" "w"))]
-	CRYPTO_AES))]
+  [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
+	(unspec:<crypto_mode>
+	 [(xor:<crypto_mode>
+	   (match_operand:<crypto_mode> 1 "register_operand" "%0")
+	   (match_operand:<crypto_mode> 2 "register_operand" "w"))]
+	 CRYPTO_AES))]
   "TARGET_CRYPTO"
   "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q2"
   [(set_attr "type" "<crypto_type>")]
@@ -44,17 +44,16 @@
 ;; When AESE/AESMC fusion is enabled we really want to keep the two together
 ;; and enforce the register dependency without scheduling or register
 ;; allocation messing up the order or introducing moves inbetween.
-;;  Mash the two together during combine.
+;; Mash the two together during combine.
 
 (define_insn "*aarch32_crypto_aese_fused"
   [(set (match_operand:V16QI 0 "register_operand" "=w")
 	(unspec:V16QI
-		[(unspec:V16QI
-		    [(xor:V16QI
-			(match_operand:V16QI 1 "register_operand" "%0")
-			(match_operand:V16QI 2 "register_operand" "w"))]
-		UNSPEC_AESE)]
-	UNSPEC_AESMC))]
+	 [(unspec:V16QI [(xor:V16QI
+			  (match_operand:V16QI 1 "register_operand" "%0")
+			  (match_operand:V16QI 2 "register_operand" "w"))]
+	   UNSPEC_AESE)]
+	 UNSPEC_AESMC))]
   "TARGET_CRYPTO
    && arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)"
   "aese.8\\t%q0, %q2\;aesmc.8\\t%q0, %q0"
@@ -65,17 +64,16 @@
 ;; When AESD/AESIMC fusion is enabled we really want to keep the two together
 ;; and enforce the register dependency without scheduling or register
 ;; allocation messing up the order or introducing moves inbetween.
-;;  Mash the two together during combine.
+;; Mash the two together during combine.
 
 (define_insn "*aarch32_crypto_aesd_fused"
   [(set (match_operand:V16QI 0 "register_operand" "=w")
 	(unspec:V16QI
-		[(unspec:V16QI
-		    [(xor:V16QI
-			(match_operand:V16QI 1 "register_operand" "%0")
-			(match_operand:V16QI 2 "register_operand" "w"))]
-		UNSPEC_AESD)]
-	UNSPEC_AESIMC))]
+	 [(unspec:V16QI [(xor:V16QI
+			  (match_operand:V16QI 1 "register_operand" "%0")
+			  (match_operand:V16QI 2 "register_operand" "w"))]
+	   UNSPEC_AESD)]
+	 UNSPEC_AESIMC))]
   "TARGET_CRYPTO
    && arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)"
   "aesd.8\\t%q0, %q2\;aesimc.8\\t%q0, %q0"
@@ -86,9 +84,9 @@
 (define_insn "crypto_<CRYPTO_BINARY:crypto_pattern>"
   [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
 	(unspec:<crypto_mode>
-		[(match_operand:<crypto_mode> 1 "register_operand" "0")
-		(match_operand:<crypto_mode> 2 "register_operand" "w")]
-	CRYPTO_BINARY))]
+	 [(match_operand:<crypto_mode> 1 "register_operand" "0")
+	  (match_operand:<crypto_mode> 2 "register_operand" "w")]
+	 CRYPTO_BINARY))]
   "TARGET_CRYPTO"
   "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q2"
   [(set_attr "type" "<crypto_type>")]
@@ -96,18 +94,20 @@
 
 (define_insn "crypto_<CRYPTO_TERNARY:crypto_pattern>"
   [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
-        (unspec:<crypto_mode> [(match_operand:<crypto_mode> 1 "register_operand" "0")
-                      (match_operand:<crypto_mode> 2 "register_operand" "w")
-                      (match_operand:<crypto_mode> 3 "register_operand" "w")]
-         CRYPTO_TERNARY))]
+	(unspec:<crypto_mode>
+	 [(match_operand:<crypto_mode> 1 "register_operand" "0")
+	  (match_operand:<crypto_mode> 2 "register_operand" "w")
+	  (match_operand:<crypto_mode> 3 "register_operand" "w")]
+	 CRYPTO_TERNARY))]
   "TARGET_CRYPTO"
   "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q2, %q3"
   [(set_attr "type" "<crypto_type>")]
 )
 
-/* The vec_select operation always selects index 0 from the lower V2SI subreg
-   of the V4SI, adjusted for endianness. Required due to neon_vget_lane and
-   neon_set_lane that change the element ordering in memory for big-endian.  */
+;; The vec_select operation always selects index 0 from the lower V2SI
+;; subreg of the V4SI, adjusted for endianness. Required due to
+;; neon_vget_lane and neon_set_lane that change the element ordering
+;; in memory for big-endian.
 
 (define_expand "crypto_sha1h"
   [(set (match_operand:V4SI 0 "register_operand")
@@ -122,10 +122,10 @@
 (define_insn "crypto_sha1h_lb"
   [(set (match_operand:V4SI 0 "register_operand" "=w")
 	(unspec:V4SI
-	  [(vec_select:SI
+	 [(vec_select:SI
 	   (match_operand:V4SI 1 "register_operand" "w")
 	   (parallel [(match_operand:SI 2 "immediate_operand" "i")]))]
-	UNSPEC_SHA1H))]
+	 UNSPEC_SHA1H))]
   "TARGET_CRYPTO && INTVAL (operands[2]) == NEON_ENDIAN_LANE_N (V2SImode, 0)"
   "sha1h.32\\t%q0, %q1"
   [(set_attr "type" "crypto_sha1_fast")]
@@ -133,9 +133,9 @@
 
 (define_insn "crypto_vmullp64"
   [(set (match_operand:TI 0 "register_operand" "=w")
-        (unspec:TI [(match_operand:DI 1 "register_operand" "w")
-                    (match_operand:DI 2 "register_operand" "w")]
-         UNSPEC_VMULLP64))]
+	(unspec:TI [(match_operand:DI 1 "register_operand" "w")
+		    (match_operand:DI 2 "register_operand" "w")]
+	 UNSPEC_VMULLP64))]
   "TARGET_CRYPTO"
   "vmull.p64\\t%q0, %P1, %P2"
   [(set_attr "type" "crypto_pmull")]
@@ -148,10 +148,10 @@
 (define_expand "crypto_<CRYPTO_SELECTING:crypto_pattern>"
   [(set (match_operand:V4SI 0 "register_operand")
 	(unspec:<crypto_mode>
-		[(match_operand:<crypto_mode> 1 "register_operand")
-		 (match_operand:<crypto_mode> 2 "register_operand")
-		 (match_operand:<crypto_mode> 3 "register_operand")]
-	CRYPTO_SELECTING))]
+	 [(match_operand:<crypto_mode> 1 "register_operand")
+	  (match_operand:<crypto_mode> 2 "register_operand")
+	  (match_operand:<crypto_mode> 3 "register_operand")]
+	 CRYPTO_SELECTING))]
   "TARGET_CRYPTO"
 {
   rtx op4 = GEN_INT (NEON_ENDIAN_LANE_N (V2SImode, 0));
@@ -162,13 +162,13 @@
 
 (define_insn "crypto_<CRYPTO_SELECTING:crypto_pattern>_lb"
   [(set (match_operand:V4SI 0 "register_operand" "=w")
-        (unspec:<crypto_mode>
-                     [(match_operand:<crypto_mode> 1 "register_operand" "0")
-                      (vec_select:SI
-                        (match_operand:<crypto_mode> 2 "register_operand" "w")
-                        (parallel [(match_operand:SI 4 "immediate_operand" "i")]))
-                      (match_operand:<crypto_mode> 3 "register_operand" "w")]
-         CRYPTO_SELECTING))]
+	(unspec:<crypto_mode>
+	 [(match_operand:<crypto_mode> 1 "register_operand" "0")
+	  (vec_select:SI
+	   (match_operand:<crypto_mode> 2 "register_operand" "w")
+	   (parallel [(match_operand:SI 4 "immediate_operand" "i")]))
+	  (match_operand:<crypto_mode> 3 "register_operand" "w")]
+	 CRYPTO_SELECTING))]
   "TARGET_CRYPTO && INTVAL (operands[4]) == NEON_ENDIAN_LANE_N (V2SImode, 0)"
   "<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q2, %q3"
   [(set_attr "type" "<crypto_type>")]


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-01-20 11:29 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-20 11:29 [gcc r12-6762] arm: Consistently use crypto_mode attribute in crypto patterns Richard Earnshaw

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).