public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Christophe Lyon <christophe.lyon@arm.com>
To: <gcc-patches@gcc.gnu.org>, <kyrylo.tkachov@arm.com>,
	<richard.earnshaw@arm.com>, <richard.sandiford@arm.com>
Cc: Christophe Lyon <christophe.lyon@arm.com>
Subject: [PATCH 07/22] arm: [MVE intrinsics] factorize vadd vsubq vmulq
Date: Tue, 18 Apr 2023 15:45:53 +0200	[thread overview]
Message-ID: <20230418134608.244751-8-christophe.lyon@arm.com> (raw)
In-Reply-To: <20230418134608.244751-1-christophe.lyon@arm.com>

In order to avoid using a huge switch when generating all the
intrinsics (e.g. mve_vaddq_n_sv4si, ...), we want to generate a single
function taking the builtin code as parameter (e.g. mve_q_n (VADDQ_S,
....)
This is achieved by using the new mve_insn iterator.

Having done that, it becomes easier to share similar patterns, to
avoid useless/error-prone code duplication.

2022-09-08  Christophe Lyon  <christophe.lyon@arm.com>

gcc/ChangeLog:

	* config/arm/iterators.md (MVE_INT_BINARY_RTX, MVE_INT_M_BINARY)
	(MVE_INT_M_N_BINARY, MVE_INT_N_BINARY, MVE_FP_M_BINARY)
	(MVE_FP_M_N_BINARY, MVE_FP_N_BINARY, mve_addsubmul, mve_insn): New
	iterators.
	* config/arm/mve.md
	(mve_vsubq_n_f<mode>, mve_vaddq_n_f<mode>, mve_vmulq_n_f<mode>):
	Factorize into ...
	(@mve_<mve_insn>q_n_f<mode>): ... this.
	(mve_vaddq_n_<supf><mode>, mve_vmulq_n_<supf><mode>)
	(mve_vsubq_n_<supf><mode>): Factorize into ...
	(@mve_<mve_insn>q_n_<supf><mode>): ... this.
	(mve_vaddq<mode>, mve_vmulq<mode>, mve_vsubq<mode>): Factorize
	into ...
	(mve_<mve_addsubmul>q<mode>): ... this.
	(mve_vaddq_f<mode>, mve_vmulq_f<mode>, mve_vsubq_f<mode>):
	Factorize into ...
	(mve_<mve_addsubmul>q_f<mode>): ... this.
	(mve_vaddq_m_<supf><mode>, mve_vmulq_m_<supf><mode>)
	(mve_vsubq_m_<supf><mode>): Factorize into ...
	(@mve_<mve_insn>q_m_<supf><mode>): ... this,
	(mve_vaddq_m_n_<supf><mode>, mve_vmulq_m_n_<supf><mode>)
	(mve_vsubq_m_n_<supf><mode>): Factorize into ...
	(@mve_<mve_insn>q_m_n_<supf><mode>): ... this.
	(mve_vaddq_m_f<mode>, mve_vmulq_m_f<mode>, mve_vsubq_m_f<mode>):
	Factorize into ...
	(@mve_<mve_insn>q_m_f<mode>): ... this.
	(mve_vaddq_m_n_f<mode>, mve_vmulq_m_n_f<mode>)
	(mve_vsubq_m_n_f<mode>): Factorize into ...
	(@mve_<mve_insn>q_m_n_f<mode>): ... this.
---
 gcc/config/arm/iterators.md |  57 +++++++
 gcc/config/arm/mve.md       | 317 +++++-------------------------------
 2 files changed, 99 insertions(+), 275 deletions(-)

diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index 39895ad62aa..d3bef594775 100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -330,6 +330,63 @@ (define_code_iterator FCVT [unsigned_float float])
 ;; Saturating addition, subtraction
 (define_code_iterator SSPLUSMINUS [ss_plus ss_minus])
 
+;; MVE integer binary operations.
+(define_code_iterator MVE_INT_BINARY_RTX [plus minus mult])
+
+(define_int_iterator MVE_INT_M_BINARY   [
+		     VADDQ_M_S VADDQ_M_U
+		     VMULQ_M_S VMULQ_M_U
+		     VSUBQ_M_S VSUBQ_M_U
+		     ])
+
+(define_int_iterator MVE_INT_M_N_BINARY [
+		     VADDQ_M_N_S VADDQ_M_N_U
+		     VMULQ_M_N_S VMULQ_M_N_U
+		     VSUBQ_M_N_S VSUBQ_M_N_U
+		     ])
+
+(define_int_iterator MVE_INT_N_BINARY   [
+		     VADDQ_N_S VADDQ_N_U
+		     VMULQ_N_S VMULQ_N_U
+		     VSUBQ_N_S VSUBQ_N_U
+		     ])
+
+(define_int_iterator MVE_FP_M_BINARY   [
+		     VADDQ_M_F
+		     VMULQ_M_F
+		     VSUBQ_M_F
+		     ])
+
+(define_int_iterator MVE_FP_M_N_BINARY [
+		     VADDQ_M_N_F
+		     VMULQ_M_N_F
+		     VSUBQ_M_N_F
+		     ])
+
+(define_int_iterator MVE_FP_N_BINARY   [
+		     VADDQ_N_F
+		     VMULQ_N_F
+		     VSUBQ_N_F
+		     ])
+
+(define_code_attr mve_addsubmul [
+		 (minus "vsub")
+		 (mult "vmul")
+		 (plus "vadd")
+		 ])
+
+(define_int_attr mve_insn [
+		 (VADDQ_M_N_S "vadd") (VADDQ_M_N_U "vadd") (VADDQ_M_N_F "vadd")
+		 (VADDQ_M_S "vadd") (VADDQ_M_U "vadd") (VADDQ_M_F "vadd")
+		 (VADDQ_N_S "vadd") (VADDQ_N_U "vadd") (VADDQ_N_F "vadd")
+		 (VMULQ_M_N_S "vmul") (VMULQ_M_N_U "vmul") (VMULQ_M_N_F "vmul")
+		 (VMULQ_M_S "vmul") (VMULQ_M_U "vmul") (VMULQ_M_F "vmul")
+		 (VMULQ_N_S "vmul") (VMULQ_N_U "vmul") (VMULQ_N_F "vmul")
+		 (VSUBQ_M_N_S "vsub") (VSUBQ_M_N_U "vsub") (VSUBQ_M_N_F "vsub")
+		 (VSUBQ_M_S "vsub") (VSUBQ_M_U "vsub") (VSUBQ_M_F "vsub")
+		 (VSUBQ_N_S "vsub") (VSUBQ_N_U "vsub") (VSUBQ_N_F "vsub")
+		 ])
+
 ;; plus and minus are the only SHIFTABLE_OPS for which Thumb2 allows
 ;; a stack pointer operand.  The minus operation is a candidate for an rsub
 ;; and hence only plus is supported.
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index ab688396f97..5167fbc6add 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -668,21 +668,6 @@ (define_insn "mve_vpnotv16bi"
   [(set_attr "type" "mve_move")
 ])
 
-;;
-;; [vsubq_n_f])
-;;
-(define_insn "mve_vsubq_n_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w")
-		       (match_operand:<V_elem> 2 "s_register_operand" "r")]
-	 VSUBQ_N_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vsub.f<V_sz_elem>\t%q0, %q1, %2"
-  [(set_attr "type" "mve_move")
-])
-
 ;;
 ;; [vbrsrq_n_f])
 ;;
@@ -871,16 +856,18 @@ (define_insn "mve_vabdq_<supf><mode>"
 
 ;;
 ;; [vaddq_n_s, vaddq_n_u])
+;; [vsubq_n_s, vsubq_n_u])
+;; [vmulq_n_s, vmulq_n_u])
 ;;
-(define_insn "mve_vaddq_n_<supf><mode>"
+(define_insn "@mve_<mve_insn>q_n_<supf><mode>"
   [
    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
 	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
 		       (match_operand:<V_elem> 2 "s_register_operand" "r")]
-	 VADDQ_N))
+	 MVE_INT_N_BINARY))
   ]
   "TARGET_HAVE_MVE"
-  "vadd.i%#<V_sz_elem>\t%q0, %q1, %2"
+  "<mve_insn>.i%#<V_sz_elem>\t%q0, %q1, %2"
   [(set_attr "type" "mve_move")
 ])
 
@@ -1362,26 +1349,13 @@ (define_insn "mve_vmulltq_int_<supf><mode>"
 ])
 
 ;;
-;; [vmulq_n_u, vmulq_n_s])
-;;
-(define_insn "mve_vmulq_n_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
-		       (match_operand:<V_elem> 2 "s_register_operand" "r")]
-	 VMULQ_N))
-  ]
-  "TARGET_HAVE_MVE"
-  "vmul.i%#<V_sz_elem>\t%q0, %q1, %2"
-  [(set_attr "type" "mve_move")
-])
-
-;;
+;; [vaddq_s, vaddq_u])
 ;; [vmulq_u, vmulq_s])
+;; [vsubq_s, vsubq_u])
 ;;
 (define_insn "mve_vmulq_<supf><mode>"
   [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
 	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
 		       (match_operand:MVE_2 2 "s_register_operand" "w")]
 	 VMULQ))
@@ -1391,14 +1365,14 @@ (define_insn "mve_vmulq_<supf><mode>"
   [(set_attr "type" "mve_move")
 ])
 
-(define_insn "mve_vmulq<mode>"
+(define_insn "mve_<mve_addsubmul>q<mode>"
   [
    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(mult:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
-		    (match_operand:MVE_2 2 "s_register_operand" "w")))
+	(MVE_INT_BINARY_RTX:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
+			      (match_operand:MVE_2 2 "s_register_operand" "w")))
   ]
   "TARGET_HAVE_MVE"
-  "vmul.i%#<V_sz_elem>\t%q0, %q1, %q2"
+  "<mve_addsubmul>.i%#<V_sz_elem>\t%q0, %q1, %q2"
   [(set_attr "type" "mve_move")
 ])
 
@@ -1768,21 +1742,6 @@ (define_insn "mve_vshlq_r_<supf><mode>"
   [(set_attr "type" "mve_move")
 ])
 
-;;
-;; [vsubq_n_s, vsubq_n_u])
-;;
-(define_insn "mve_vsubq_n_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
-		       (match_operand:<V_elem> 2 "s_register_operand" "r")]
-	 VSUBQ_N))
-  ]
-  "TARGET_HAVE_MVE"
-  "vsub.i%#<V_sz_elem>\t%q0, %q1, %2"
-  [(set_attr "type" "mve_move")
-])
-
 ;;
 ;; [vsubq_s, vsubq_u])
 ;;
@@ -1798,17 +1757,6 @@ (define_insn "mve_vsubq_<supf><mode>"
   [(set_attr "type" "mve_move")
 ])
 
-(define_insn "mve_vsubq<mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(minus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
-		     (match_operand:MVE_2 2 "s_register_operand" "w")))
-  ]
-  "TARGET_HAVE_MVE"
-  "vsub.i%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
 ;;
 ;; [vabdq_f])
 ;;
@@ -1841,16 +1789,18 @@ (define_insn "mve_vaddlvaq_<supf>v4si"
 
 ;;
 ;; [vaddq_n_f])
+;; [vsubq_n_f])
+;; [vmulq_n_f])
 ;;
-(define_insn "mve_vaddq_n_f<mode>"
+(define_insn "@mve_<mve_insn>q_n_f<mode>"
   [
    (set (match_operand:MVE_0 0 "s_register_operand" "=w")
 	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w")
 		       (match_operand:<V_elem> 2 "s_register_operand" "r")]
-	 VADDQ_N_F))
+	 MVE_FP_N_BINARY))
   ]
   "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vadd.f%#<V_sz_elem>\t%q0, %q1, %2"
+  "<mve_insn>.f%#<V_sz_elem>\t%q0, %q1, %2"
   [(set_attr "type" "mve_move")
 ])
 
@@ -2224,31 +2174,18 @@ (define_insn "mve_vmovntq_<supf><mode>"
 ])
 
 ;;
+;; [vaddq_f])
 ;; [vmulq_f])
+;; [vsubq_f])
 ;;
-(define_insn "mve_vmulq_f<mode>"
+(define_insn "mve_<mve_addsubmul>q_f<mode>"
   [
    (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(mult:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
+	(MVE_INT_BINARY_RTX:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
 		    (match_operand:MVE_0 2 "s_register_operand" "w")))
   ]
   "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vmul.f%#<V_sz_elem>	%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vmulq_n_f])
-;;
-(define_insn "mve_vmulq_n_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "w")
-		       (match_operand:<V_elem> 2 "s_register_operand" "r")]
-	 VMULQ_N_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vmul.f%#<V_sz_elem>	%q0, %q1, %2"
+  "<mve_addsubmul>.f%#<V_sz_elem>\t%q0, %q1, %q2"
   [(set_attr "type" "mve_move")
 ])
 
@@ -2490,20 +2427,6 @@ (define_insn "mve_vshlltq_n_<supf><mode>"
   [(set_attr "type" "mve_move")
 ])
 
-;;
-;; [vsubq_f])
-;;
-(define_insn "mve_vsubq_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(minus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
-		     (match_operand:MVE_0 2 "s_register_operand" "w")))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vsub.f%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
 ;;
 ;; [vmulltq_poly_p])
 ;;
@@ -5032,23 +4955,6 @@ (define_insn "mve_vsriq_m_n_<supf><mode>"
   [(set_attr "type" "mve_move")
    (set_attr "length" "8")])
 
-;;
-;; [vsubq_m_u, vsubq_m_s])
-;;
-(define_insn "mve_vsubq_m_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
-		       (match_operand:MVE_2 2 "s_register_operand" "w")
-		       (match_operand:MVE_2 3 "s_register_operand" "w")
-		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VSUBQ_M))
-  ]
-  "TARGET_HAVE_MVE"
-  "vpst\;vsubt.i%#<V_sz_elem>\t%q0, %q2, %q3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length" "8")])
-
 ;;
 ;; [vcvtq_m_n_to_f_u, vcvtq_m_n_to_f_s])
 ;;
@@ -5084,35 +4990,39 @@ (define_insn "mve_vabdq_m_<supf><mode>"
 
 ;;
 ;; [vaddq_m_n_s, vaddq_m_n_u])
+;; [vsubq_m_n_s, vsubq_m_n_u])
+;; [vmulq_m_n_s, vmulq_m_n_u])
 ;;
-(define_insn "mve_vaddq_m_n_<supf><mode>"
+(define_insn "@mve_<mve_insn>q_m_n_<supf><mode>"
   [
    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
 	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
 		       (match_operand:MVE_2 2 "s_register_operand" "w")
 		       (match_operand:<V_elem> 3 "s_register_operand" "r")
 		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VADDQ_M_N))
+	 MVE_INT_M_N_BINARY))
   ]
   "TARGET_HAVE_MVE"
-  "vpst\;vaddt.i%#<V_sz_elem>	%q0, %q2, %3"
+  "vpst\;<mve_insn>t.i%#<V_sz_elem>	%q0, %q2, %3"
   [(set_attr "type" "mve_move")
    (set_attr "length""8")])
 
 ;;
 ;; [vaddq_m_u, vaddq_m_s])
+;; [vsubq_m_u, vsubq_m_s])
+;; [vmulq_m_u, vmulq_m_s])
 ;;
-(define_insn "mve_vaddq_m_<supf><mode>"
+(define_insn "@mve_<mve_insn>q_m_<supf><mode>"
   [
    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
 	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
 		       (match_operand:MVE_2 2 "s_register_operand" "w")
 		       (match_operand:MVE_2 3 "s_register_operand" "w")
 		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VADDQ_M))
+	 MVE_INT_M_BINARY))
   ]
   "TARGET_HAVE_MVE"
-  "vpst\;vaddt.i%#<V_sz_elem>	%q0, %q2, %q3"
+  "vpst\;<mve_insn>t.i%#<V_sz_elem>	%q0, %q2, %q3"
   [(set_attr "type" "mve_move")
    (set_attr "length""8")])
 
@@ -5422,40 +5332,6 @@ (define_insn "mve_vmulltq_int_m_<supf><mode>"
   [(set_attr "type" "mve_move")
    (set_attr "length""8")])
 
-;;
-;; [vmulq_m_n_u, vmulq_m_n_s])
-;;
-(define_insn "mve_vmulq_m_n_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
-		       (match_operand:MVE_2 2 "s_register_operand" "w")
-		       (match_operand:<V_elem> 3 "s_register_operand" "r")
-		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VMULQ_M_N))
-  ]
-  "TARGET_HAVE_MVE"
-  "vpst\;vmult.i%#<V_sz_elem>	%q0, %q2, %3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
-;;
-;; [vmulq_m_s, vmulq_m_u])
-;;
-(define_insn "mve_vmulq_m_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
-		       (match_operand:MVE_2 2 "s_register_operand" "w")
-		       (match_operand:MVE_2 3 "s_register_operand" "w")
-		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VMULQ_M))
-  ]
-  "TARGET_HAVE_MVE"
-  "vpst\;vmult.i%#<V_sz_elem>	%q0, %q2, %q3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
 ;;
 ;; [vornq_m_u, vornq_m_s])
 ;;
@@ -5796,23 +5672,6 @@ (define_insn "mve_vsliq_m_n_<supf><mode>"
   [(set_attr "type" "mve_move")
    (set_attr "length""8")])
 
-;;
-;; [vsubq_m_n_s, vsubq_m_n_u])
-;;
-(define_insn "mve_vsubq_m_n_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
-		       (match_operand:MVE_2 2 "s_register_operand" "w")
-		       (match_operand:<V_elem> 3 "s_register_operand" "r")
-		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VSUBQ_M_N))
-  ]
-  "TARGET_HAVE_MVE"
-  "vpst\;vsubt.i%#<V_sz_elem>\t%q0, %q2, %3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
 ;;
 ;; [vhcaddq_rot270_m_s])
 ;;
@@ -6613,35 +6472,39 @@ (define_insn "mve_vabdq_m_f<mode>"
 
 ;;
 ;; [vaddq_m_f])
+;; [vsubq_m_f])
+;; [vmulq_m_f])
 ;;
-(define_insn "mve_vaddq_m_f<mode>"
+(define_insn "@mve_<mve_insn>q_m_f<mode>"
   [
    (set (match_operand:MVE_0 0 "s_register_operand" "=w")
 	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
 		       (match_operand:MVE_0 2 "s_register_operand" "w")
 		       (match_operand:MVE_0 3 "s_register_operand" "w")
 		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VADDQ_M_F))
+	 MVE_FP_M_BINARY))
   ]
   "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vaddt.f%#<V_sz_elem>	%q0, %q2, %q3"
+  "vpst\;<mve_insn>t.f%#<V_sz_elem>	%q0, %q2, %q3"
   [(set_attr "type" "mve_move")
    (set_attr "length""8")])
 
 ;;
 ;; [vaddq_m_n_f])
+;; [vsubq_m_n_f])
+;; [vmulq_m_n_f])
 ;;
-(define_insn "mve_vaddq_m_n_f<mode>"
+(define_insn "@mve_<mve_insn>q_m_n_f<mode>"
   [
    (set (match_operand:MVE_0 0 "s_register_operand" "=w")
 	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
 		       (match_operand:MVE_0 2 "s_register_operand" "w")
 		       (match_operand:<V_elem> 3 "s_register_operand" "r")
 		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VADDQ_M_N_F))
+	 MVE_FP_M_N_BINARY))
   ]
   "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vaddt.f%#<V_sz_elem>	%q0, %q2, %3"
+  "vpst\;<mve_insn>t.f%#<V_sz_elem>	%q0, %q2, %3"
   [(set_attr "type" "mve_move")
    (set_attr "length""8")])
 
@@ -6985,40 +6848,6 @@ (define_insn "mve_vminnmq_m_f<mode>"
   [(set_attr "type" "mve_move")
    (set_attr "length""8")])
 
-;;
-;; [vmulq_m_f])
-;;
-(define_insn "mve_vmulq_m_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
-		       (match_operand:MVE_0 2 "s_register_operand" "w")
-		       (match_operand:MVE_0 3 "s_register_operand" "w")
-		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VMULQ_M_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vmult.f%#<V_sz_elem>	%q0, %q2, %q3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
-;;
-;; [vmulq_m_n_f])
-;;
-(define_insn "mve_vmulq_m_n_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
-		       (match_operand:MVE_0 2 "s_register_operand" "w")
-		       (match_operand:<V_elem> 3 "s_register_operand" "r")
-		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VMULQ_M_N_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vmult.f%#<V_sz_elem>	%q0, %q2, %3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
 ;;
 ;; [vornq_m_f])
 ;;
@@ -7053,40 +6882,6 @@ (define_insn "mve_vorrq_m_f<mode>"
   [(set_attr "type" "mve_move")
    (set_attr "length""8")])
 
-;;
-;; [vsubq_m_f])
-;;
-(define_insn "mve_vsubq_m_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
-		       (match_operand:MVE_0 2 "s_register_operand" "w")
-		       (match_operand:MVE_0 3 "s_register_operand" "w")
-		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VSUBQ_M_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vsubt.f%#<V_sz_elem>\t%q0, %q2, %q3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
-;;
-;; [vsubq_m_n_f])
-;;
-(define_insn "mve_vsubq_m_n_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
-		       (match_operand:MVE_0 2 "s_register_operand" "w")
-		       (match_operand:<V_elem> 3 "s_register_operand" "r")
-		       (match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
-	 VSUBQ_M_N_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vsubt.f%#<V_sz_elem>\t%q0, %q2, %3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
 ;;
 ;; [vstrbq_s vstrbq_u]
 ;;
@@ -8927,34 +8722,6 @@ (define_insn "mve_vstrwq_scatter_shifted_offset_<supf>v4si_insn"
   "vstrw.32\t%q2, [%0, %q1, uxtw #2]"
   [(set_attr "length" "4")])
 
-;;
-;; [vaddq_s, vaddq_u])
-;;
-(define_insn "mve_vaddq<mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-	(plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
-		    (match_operand:MVE_2 2 "s_register_operand" "w")))
-  ]
-  "TARGET_HAVE_MVE"
-  "vadd.i%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vaddq_f])
-;;
-(define_insn "mve_vaddq_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-	(plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
-		    (match_operand:MVE_0 2 "s_register_operand" "w")))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vadd.f%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
 ;;
 ;; [vidupq_n_u])
 ;;
-- 
2.34.1


  parent reply	other threads:[~2023-04-18 13:47 UTC|newest]

Thread overview: 55+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-18 13:45 [PATCH 00/22] arm: New framework for MVE intrinsics Christophe Lyon
2023-04-18 13:45 ` [PATCH 01/22] arm: move builtin function codes into general numberspace Christophe Lyon
2023-05-02  9:24   ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 02/22] arm: [MVE intrinsics] Add new framework Christophe Lyon
2023-05-02 10:17   ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 03/22] arm: [MVE intrinsics] Rework vreinterpretq Christophe Lyon
2023-05-02 10:26   ` Kyrylo Tkachov
2023-05-02 14:05     ` Christophe Lyon
2023-05-02 15:28       ` Kyrylo Tkachov
2023-05-02 15:49         ` Christophe Lyon
2023-05-03 14:37           ` [PATCH v2 " Christophe Lyon
2023-05-03 14:52             ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 04/22] arm: [MVE intrinsics] Rework vuninitialized Christophe Lyon
2023-05-02 16:13   ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 05/22] arm: [MVE intrinsics] add binary_opt_n shape Christophe Lyon
2023-05-02 16:16   ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 06/22] arm: [MVE intrinsics] add unspec_based_mve_function_exact_insn Christophe Lyon
2023-05-02 16:17   ` Kyrylo Tkachov
2023-04-18 13:45 ` Christophe Lyon [this message]
2023-05-02 16:19   ` [PATCH 07/22] arm: [MVE intrinsics] factorize vadd vsubq vmulq Kyrylo Tkachov
2023-05-02 16:22     ` Christophe Lyon
2023-04-18 13:45 ` [PATCH 08/22] arm: [MVE intrinsics] rework vaddq vmulq vsubq Christophe Lyon
2023-05-02 16:31   ` Kyrylo Tkachov
2023-05-03  9:06     ` Christophe Lyon
2023-04-18 13:45 ` [PATCH 09/22] arm: [MVE intrinsics] add binary shape Christophe Lyon
2023-05-02 16:32   ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 10/22] arm: [MVE intrinsics] factorize vandq veorq vorrq vbicq Christophe Lyon
2023-05-02 16:36   ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 11/22] arm: [MVE intrinsics] rework vandq veorq Christophe Lyon
2023-05-02 16:37   ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 12/22] arm: [MVE intrinsics] add binary_orrq shape Christophe Lyon
2023-05-02 16:39   ` Kyrylo Tkachov
2023-04-18 13:45 ` [PATCH 13/22] arm: [MVE intrinsics] rework vorrq Christophe Lyon
2023-05-02 16:41   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 14/22] arm: [MVE intrinsics] add unspec_mve_function_exact_insn Christophe Lyon
2023-05-03  8:40   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 15/22] arm: [MVE intrinsics] add create shape Christophe Lyon
2023-05-03  8:40   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 16/22] arm: [MVE intrinsics] factorize vcreateq Christophe Lyon
2023-05-03  8:42   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 17/22] arm: [MVE intrinsics] rework vcreateq Christophe Lyon
2023-05-03  8:44   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 18/22] arm: [MVE intrinsics] factorize several binary_m operations Christophe Lyon
2023-05-03  8:46   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 19/22] arm: [MVE intrinsics] factorize several binary _n operations Christophe Lyon
2023-05-03  8:47   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 20/22] arm: [MVE intrinsics] factorize several binary _m_n operations Christophe Lyon
2023-05-03  8:48   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 21/22] arm: [MVE intrinsics] factorize several binary operations Christophe Lyon
2023-05-03  8:49   ` Kyrylo Tkachov
2023-04-18 13:46 ` [PATCH 22/22] arm: [MVE intrinsics] rework vhaddq vhsubq vmulhq vqaddq vqsubq vqdmulhq vrhaddq vrmulhq Christophe Lyon
2023-05-03  8:51   ` Kyrylo Tkachov
2023-05-02  9:18 ` [PATCH 00/22] arm: New framework for MVE intrinsics Kyrylo Tkachov
2023-05-02 15:04   ` Christophe Lyon
2023-05-03 15:01     ` Christophe Lyon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230418134608.244751-8-christophe.lyon@arm.com \
    --to=christophe.lyon@arm.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=kyrylo.tkachov@arm.com \
    --cc=richard.earnshaw@arm.com \
    --cc=richard.sandiford@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).