public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics
@ 2021-04-28 14:31 Jonathan Wright
  2021-04-28 15:11 ` Richard Sandiford
  0 siblings, 1 reply; 4+ messages in thread
From: Jonathan Wright @ 2021-04-28 14:31 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 790 bytes --]

Hi,

As subject, this patch rewrites the floating-point vml[as][q]_n Neon
intrinsics to use RTL builtins rather than inline assembly code, allowing
for better scheduling and optimization.

Regression tested and bootstrapped on aarch64-none-linux-gnu - no
issues.

Ok for master?

Thanks,
Jonathan

---

gcc/ChangeLog:

2021-01-18  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add
	float_ml[as]_n builtin generator macros.
	* config/aarch64/aarch64-simd.md (mul_n<mode>3): Define.
	(aarch64_float_mla_n<mode>): Define.
	(aarch64_float_mls_n<mode>): Define.
	* config/aarch64/arm_neon.h (vmla_n_f32): Use RTL builtin
	instead of inline asm.
	(vmlaq_n_f32): Likewise.
	(vmls_n_f32): Likewise.
	(vmlsq_n_f32): Likewise.

[-- Attachment #2: rb14042.patch --]
[-- Type: application/octet-stream, Size: 5227 bytes --]

diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 0f44ed84ff9d08d808b1b2dfe528db5208b134f5..547509474c23daf6882ed2f8407ddb5caf1d1b91 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -664,6 +664,9 @@
   BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
   VAR1 (TERNOP, fnma, 4, FP, hf)
 
+  BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
+  BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
+
   /* Implemented by aarch64_simd_bsl<mode>.  */
   BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
   VAR2 (BSL_P, simd_bsl,0, NONE, di, v2di)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 5f701dd2775290156634ef8c6feccecd359e9ec9..d016970a2c278405b270a0ac745221e69f0f625e 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2614,6 +2614,17 @@
   [(set_attr "type" "neon_fp_mul_<stype><q>")]
 )
 
+(define_insn "mul_n<mode>3"
+ [(set (match_operand:VHSDF 0 "register_operand" "=w")
+	(mult:VHSDF
+	  (vec_duplicate:VHSDF
+	    (match_operand:<VEL> 2 "register_operand" "w"))
+	  (match_operand:VHSDF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]"
+  [(set_attr "type" "neon_fp_mul_<stype><q>")]
+)
+
 (define_expand "div<mode>3"
  [(set (match_operand:VHSDF 0 "register_operand")
        (div:VHSDF (match_operand:VHSDF 1 "register_operand")
@@ -2651,6 +2662,40 @@
   [(set_attr "type" "neon_fp_abs_<stype><q>")]
 )
 
+(define_expand "aarch64_float_mla_n<mode>"
+  [(set (match_operand:VDQSF 0 "register_operand")
+	(plus:VDQSF
+	  (mult:VDQSF
+	    (vec_duplicate:VDQSF
+	      (match_operand:<VEL> 3 "register_operand"))
+	    (match_operand:VDQSF 2 "register_operand"))
+	  (match_operand:VDQSF 1 "register_operand")))]
+  "TARGET_SIMD"
+  {
+    rtx scratch = gen_reg_rtx (<MODE>mode);
+    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
+    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+    DONE;
+  }
+)
+
+(define_expand "aarch64_float_mls_n<mode>"
+  [(set (match_operand:VDQSF 0 "register_operand")
+	(minus:VDQSF
+	  (match_operand:VDQSF 1 "register_operand")
+	  (mult:VDQSF
+	    (vec_duplicate:VDQSF
+	      (match_operand:<VEL> 3 "register_operand"))
+	    (match_operand:VDQSF 2 "register_operand"))))]
+  "TARGET_SIMD"
+  {
+    rtx scratch = gen_reg_rtx (<MODE>mode);
+    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
+    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+    DONE;
+  }
+)
+
 (define_insn "fma<mode>4"
   [(set (match_operand:VHSDF 0 "register_operand" "=w")
        (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 1c48c166b5b9aaf052761f95121c26845221dae9..c0399c4dc428fe63c07fce0d12bb1580ead1542f 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7050,13 +7050,7 @@ __extension__ extern __inline float32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
 {
-  float32x2_t __result;
-  float32x2_t __t1;
-  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
-           : "=w"(__result), "=w"(__t1)
-           : "0"(__a), "w"(__b), "w"(__c)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c);
 }
 
 __extension__ extern __inline int16x4_t
@@ -7403,13 +7397,7 @@ __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
 {
-  float32x4_t __result;
-  float32x4_t __t1;
-  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
-           : "=w"(__result), "=w"(__t1)
-           : "0"(__a), "w"(__b), "w"(__c)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c);
 }
 
 __extension__ extern __inline int16x8_t
@@ -7496,13 +7484,7 @@ __extension__ extern __inline float32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
 {
-  float32x2_t __result;
-  float32x2_t __t1;
-  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
-           : "=w"(__result), "=w"(__t1)
-           : "0"(__a), "w"(__b), "w"(__c)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c);
 }
 
 __extension__ extern __inline int16x4_t
@@ -7853,13 +7835,7 @@ __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
 {
-  float32x4_t __result;
-  float32x4_t __t1;
-  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
-           : "=w"(__result), "=w"(__t1)
-           : "0"(__a), "w"(__b), "w"(__c)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c);
 }
 
 __extension__ extern __inline int16x8_t

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics
  2021-04-28 14:31 [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics Jonathan Wright
@ 2021-04-28 15:11 ` Richard Sandiford
  2021-04-30 14:34   ` Jonathan Wright
  0 siblings, 1 reply; 4+ messages in thread
From: Richard Sandiford @ 2021-04-28 15:11 UTC (permalink / raw)
  To: Jonathan Wright via Gcc-patches

Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q]_n Neon
> intrinsics to use RTL builtins rather than inline assembly code, allowing
> for better scheduling and optimization.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-01-18  Jonathan Wright  <jonathan.wright@arm.com>
>
> 	* config/aarch64/aarch64-simd-builtins.def: Add
> 	float_ml[as]_n builtin generator macros.
> 	* config/aarch64/aarch64-simd.md (mul_n<mode>3): Define.
> 	(aarch64_float_mla_n<mode>): Define.
> 	(aarch64_float_mls_n<mode>): Define.
> 	* config/aarch64/arm_neon.h (vmla_n_f32): Use RTL builtin
> 	instead of inline asm.
> 	(vmlaq_n_f32): Likewise.
> 	(vmls_n_f32): Likewise.
> 	(vmlsq_n_f32): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 0f44ed84ff9d08d808b1b2dfe528db5208b134f5..547509474c23daf6882ed2f8407ddb5caf1d1b91 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -664,6 +664,9 @@
>    BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
>    VAR1 (TERNOP, fnma, 4, FP, hf)
>  
> +  BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
> +  BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> +
>    /* Implemented by aarch64_simd_bsl<mode>.  */
>    BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
>    VAR2 (BSL_P, simd_bsl,0, NONE, di, v2di)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 5f701dd2775290156634ef8c6feccecd359e9ec9..d016970a2c278405b270a0ac745221e69f0f625e 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2614,6 +2614,17 @@
>    [(set_attr "type" "neon_fp_mul_<stype><q>")]
>  )
>  
> +(define_insn "mul_n<mode>3"
> + [(set (match_operand:VHSDF 0 "register_operand" "=w")
> +	(mult:VHSDF
> +	  (vec_duplicate:VHSDF
> +	    (match_operand:<VEL> 2 "register_operand" "w"))
> +	  (match_operand:VHSDF 1 "register_operand" "w")))]
> + "TARGET_SIMD"
> + "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]"

This functionality should already be provided by:

(define_insn "*aarch64_mul3_elt_from_dup<mode>"
 [(set (match_operand:VMUL 0 "register_operand" "=w")
    (mult:VMUL
      (vec_duplicate:VMUL
	    (match_operand:<VEL> 1 "register_operand" "<h_con>"))
      (match_operand:VMUL 2 "register_operand" "w")))]
  "TARGET_SIMD"
  "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
  [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)

so I think we should instead rename that to mul_n<mode>3 and reorder
its operands.

Thanks,
Richard

> +  [(set_attr "type" "neon_fp_mul_<stype><q>")]
> +)
> +
>  (define_expand "div<mode>3"
>   [(set (match_operand:VHSDF 0 "register_operand")
>         (div:VHSDF (match_operand:VHSDF 1 "register_operand")
> @@ -2651,6 +2662,40 @@
>    [(set_attr "type" "neon_fp_abs_<stype><q>")]
>  )
>  
> +(define_expand "aarch64_float_mla_n<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +	(plus:VDQSF
> +	  (mult:VDQSF
> +	    (vec_duplicate:VDQSF
> +	      (match_operand:<VEL> 3 "register_operand"))
> +	    (match_operand:VDQSF 2 "register_operand"))
> +	  (match_operand:VDQSF 1 "register_operand")))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
> +(define_expand "aarch64_float_mls_n<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +	(minus:VDQSF
> +	  (match_operand:VDQSF 1 "register_operand")
> +	  (mult:VDQSF
> +	    (vec_duplicate:VDQSF
> +	      (match_operand:<VEL> 3 "register_operand"))
> +	    (match_operand:VDQSF 2 "register_operand"))))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
>  (define_insn "fma<mode>4"
>    [(set (match_operand:VHSDF 0 "register_operand" "=w")
>         (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 1c48c166b5b9aaf052761f95121c26845221dae9..c0399c4dc428fe63c07fce0d12bb1580ead1542f 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -7050,13 +7050,7 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>  {
> -  float32x2_t __result;
> -  float32x2_t __t1;
> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x4_t
> @@ -7403,13 +7397,7 @@ __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>  {
> -  float32x4_t __result;
> -  float32x4_t __t1;
> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x8_t
> @@ -7496,13 +7484,7 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>  {
> -  float32x2_t __result;
> -  float32x2_t __t1;
> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x4_t
> @@ -7853,13 +7835,7 @@ __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>  {
> -  float32x4_t __result;
> -  float32x4_t __t1;
> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x8_t

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics
  2021-04-28 15:11 ` Richard Sandiford
@ 2021-04-30 14:34   ` Jonathan Wright
  2021-04-30 15:40     ` Richard Sandiford
  0 siblings, 1 reply; 4+ messages in thread
From: Jonathan Wright @ 2021-04-30 14:34 UTC (permalink / raw)
  To: Richard Sandiford, Jonathan Wright via Gcc-patches

[-- Attachment #1: Type: text/plain, Size: 7686 bytes --]

Patch updated as per your suggestion.

Tested and bootstrapped on aarch64-none-linux-gnu - no issues.

Ok for master?

Thanks,
Jonathan
________________________________
From: Richard Sandiford <richard.sandiford@arm.com>
Sent: 28 April 2021 16:11
To: Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
Cc: Jonathan Wright <Jonathan.Wright@arm.com>
Subject: Re: [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics

Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q]_n Neon
> intrinsics to use RTL builtins rather than inline assembly code, allowing
> for better scheduling and optimization.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-01-18  Jonathan Wright  <jonathan.wright@arm.com>
>
>        * config/aarch64/aarch64-simd-builtins.def: Add
>        float_ml[as]_n builtin generator macros.
>        * config/aarch64/aarch64-simd.md (mul_n<mode>3): Define.
>        (aarch64_float_mla_n<mode>): Define.
>        (aarch64_float_mls_n<mode>): Define.
>        * config/aarch64/arm_neon.h (vmla_n_f32): Use RTL builtin
>        instead of inline asm.
>        (vmlaq_n_f32): Likewise.
>        (vmls_n_f32): Likewise.
>        (vmlsq_n_f32): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 0f44ed84ff9d08d808b1b2dfe528db5208b134f5..547509474c23daf6882ed2f8407ddb5caf1d1b91 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -664,6 +664,9 @@
>    BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
>    VAR1 (TERNOP, fnma, 4, FP, hf)
>
> +  BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
> +  BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> +
>    /* Implemented by aarch64_simd_bsl<mode>.  */
>    BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
>    VAR2 (BSL_P, simd_bsl,0, NONE, di, v2di)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 5f701dd2775290156634ef8c6feccecd359e9ec9..d016970a2c278405b270a0ac745221e69f0f625e 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2614,6 +2614,17 @@
>    [(set_attr "type" "neon_fp_mul_<stype><q>")]
>  )
>
> +(define_insn "mul_n<mode>3"
> + [(set (match_operand:VHSDF 0 "register_operand" "=w")
> +     (mult:VHSDF
> +       (vec_duplicate:VHSDF
> +         (match_operand:<VEL> 2 "register_operand" "w"))
> +       (match_operand:VHSDF 1 "register_operand" "w")))]
> + "TARGET_SIMD"
> + "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]"

This functionality should already be provided by:

(define_insn "*aarch64_mul3_elt_from_dup<mode>"
 [(set (match_operand:VMUL 0 "register_operand" "=w")
    (mult:VMUL
      (vec_duplicate:VMUL
            (match_operand:<VEL> 1 "register_operand" "<h_con>"))
      (match_operand:VMUL 2 "register_operand" "w")))]
  "TARGET_SIMD"
  "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
  [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)

so I think we should instead rename that to mul_n<mode>3 and reorder
its operands.

Thanks,
Richard

> +  [(set_attr "type" "neon_fp_mul_<stype><q>")]
> +)
> +
>  (define_expand "div<mode>3"
>   [(set (match_operand:VHSDF 0 "register_operand")
>         (div:VHSDF (match_operand:VHSDF 1 "register_operand")
> @@ -2651,6 +2662,40 @@
>    [(set_attr "type" "neon_fp_abs_<stype><q>")]
>  )
>
> +(define_expand "aarch64_float_mla_n<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +     (plus:VDQSF
> +       (mult:VDQSF
> +         (vec_duplicate:VDQSF
> +           (match_operand:<VEL> 3 "register_operand"))
> +         (match_operand:VDQSF 2 "register_operand"))
> +       (match_operand:VDQSF 1 "register_operand")))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
> +(define_expand "aarch64_float_mls_n<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +     (minus:VDQSF
> +       (match_operand:VDQSF 1 "register_operand")
> +       (mult:VDQSF
> +         (vec_duplicate:VDQSF
> +           (match_operand:<VEL> 3 "register_operand"))
> +         (match_operand:VDQSF 2 "register_operand"))))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
>  (define_insn "fma<mode>4"
>    [(set (match_operand:VHSDF 0 "register_operand" "=w")
>         (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 1c48c166b5b9aaf052761f95121c26845221dae9..c0399c4dc428fe63c07fce0d12bb1580ead1542f 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -7050,13 +7050,7 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>  {
> -  float32x2_t __result;
> -  float32x2_t __t1;
> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c);
>  }
>
>  __extension__ extern __inline int16x4_t
> @@ -7403,13 +7397,7 @@ __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>  {
> -  float32x4_t __result;
> -  float32x4_t __t1;
> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c);
>  }
>
>  __extension__ extern __inline int16x8_t
> @@ -7496,13 +7484,7 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>  {
> -  float32x2_t __result;
> -  float32x2_t __t1;
> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c);
>  }
>
>  __extension__ extern __inline int16x4_t
> @@ -7853,13 +7835,7 @@ __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>  {
> -  float32x4_t __result;
> -  float32x4_t __t1;
> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c);
>  }
>
>  __extension__ extern __inline int16x8_t

[-- Attachment #2: rb14042.patch --]
[-- Type: application/octet-stream, Size: 5371 bytes --]

diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 5d4c01f32e7e911cc53afb2fa5f0580039f77300..3b5e88443a8fbb0705956de55fab866534232f0c 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -668,6 +668,9 @@
   BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
   VAR1 (TERNOP, fnma, 4, FP, hf)
 
+  BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
+  BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
+
   /* Implemented by aarch64_simd_bsl<mode>.  */
   BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
   VAR2 (BSL_P, simd_bsl,0, NONE, di, v2di)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 65e63900e075722ebd93e433f3cc1fb449e02c7d..6edfd2d637ef5a76d027e90053359e6da607ab84 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -750,14 +750,14 @@
   [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
 )
 
-(define_insn "*aarch64_mul3_elt_from_dup<mode>"
+(define_insn "mul_n<mode>3"
  [(set (match_operand:VMUL 0 "register_operand" "=w")
-    (mult:VMUL
-      (vec_duplicate:VMUL
-	    (match_operand:<VEL> 1 "register_operand" "<h_con>"))
-      (match_operand:VMUL 2 "register_operand" "w")))]
+       (mult:VMUL
+	 (vec_duplicate:VMUL
+	   (match_operand:<VEL> 2 "register_operand" "<h_con>"))
+	 (match_operand:VMUL 1 "register_operand" "w")))]
   "TARGET_SIMD"
-  "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
+  "<f>mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]";
   [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
 )
 
@@ -2636,6 +2636,40 @@
   [(set_attr "type" "neon_fp_abs_<stype><q>")]
 )
 
+(define_expand "aarch64_float_mla_n<mode>"
+  [(set (match_operand:VDQSF 0 "register_operand")
+	(plus:VDQSF
+	  (mult:VDQSF
+	    (vec_duplicate:VDQSF
+	      (match_operand:<VEL> 3 "register_operand"))
+	    (match_operand:VDQSF 2 "register_operand"))
+	  (match_operand:VDQSF 1 "register_operand")))]
+  "TARGET_SIMD"
+  {
+    rtx scratch = gen_reg_rtx (<MODE>mode);
+    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
+    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+    DONE;
+  }
+)
+
+(define_expand "aarch64_float_mls_n<mode>"
+  [(set (match_operand:VDQSF 0 "register_operand")
+	(minus:VDQSF
+	  (match_operand:VDQSF 1 "register_operand")
+	  (mult:VDQSF
+	    (vec_duplicate:VDQSF
+	      (match_operand:<VEL> 3 "register_operand"))
+	    (match_operand:VDQSF 2 "register_operand"))))]
+  "TARGET_SIMD"
+  {
+    rtx scratch = gen_reg_rtx (<MODE>mode);
+    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
+    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+    DONE;
+  }
+)
+
 (define_insn "fma<mode>4"
   [(set (match_operand:VHSDF 0 "register_operand" "=w")
        (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index bde2d17fbd92f9d2a0ae2f47f2c92c622c365642..f1e1e0ee79144c553fe207a51ba211c6dfc522ac 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7035,13 +7035,7 @@ __extension__ extern __inline float32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
 {
-  float32x2_t __result;
-  float32x2_t __t1;
-  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
-           : "=w"(__result), "=w"(__t1)
-           : "0"(__a), "w"(__b), "w"(__c)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c);
 }
 
 __extension__ extern __inline int16x4_t
@@ -7388,13 +7382,7 @@ __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
 {
-  float32x4_t __result;
-  float32x4_t __t1;
-  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
-           : "=w"(__result), "=w"(__t1)
-           : "0"(__a), "w"(__b), "w"(__c)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c);
 }
 
 __extension__ extern __inline int16x8_t
@@ -7481,13 +7469,7 @@ __extension__ extern __inline float32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
 {
-  float32x2_t __result;
-  float32x2_t __t1;
-  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
-           : "=w"(__result), "=w"(__t1)
-           : "0"(__a), "w"(__b), "w"(__c)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c);
 }
 
 __extension__ extern __inline int16x4_t
@@ -7838,13 +7820,7 @@ __extension__ extern __inline float32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
 {
-  float32x4_t __result;
-  float32x4_t __t1;
-  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
-           : "=w"(__result), "=w"(__t1)
-           : "0"(__a), "w"(__b), "w"(__c)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c);
 }
 
 __extension__ extern __inline int16x8_t

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics
  2021-04-30 14:34   ` Jonathan Wright
@ 2021-04-30 15:40     ` Richard Sandiford
  0 siblings, 0 replies; 4+ messages in thread
From: Richard Sandiford @ 2021-04-30 15:40 UTC (permalink / raw)
  To: Jonathan Wright; +Cc: Jonathan Wright via Gcc-patches

Jonathan Wright <Jonathan.Wright@arm.com> writes:
> Patch updated as per your suggestion.
>
> Tested and bootstrapped on aarch64-none-linux-gnu - no issues.
>
> Ok for master?

OK, thanks.

Richard

> Thanks,
> Jonathan
> -------------------------------------------------------------------------------
> From: Richard Sandiford <richard.sandiford@arm.com>
> Sent: 28 April 2021 16:11
> To: Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
> Cc: Jonathan Wright <Jonathan.Wright@arm.com>
> Subject: Re: [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics
>  
> Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
>> Hi,
>>
>> As subject, this patch rewrites the floating-point vml[as][q]_n Neon
>> intrinsics to use RTL builtins rather than inline assembly code, allowing
>> for better scheduling and optimization.
>>
>> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
>> issues.
>>
>> Ok for master?
>>
>> Thanks,
>> Jonathan
>>
>> ---
>>
>> gcc/ChangeLog:
>>
>> 2021-01-18  Jonathan Wright  <jonathan.wright@arm.com>
>>
>>        * config/aarch64/aarch64-simd-builtins.def: Add
>>        float_ml[as]_n builtin generator macros.
>>        * config/aarch64/aarch64-simd.md (mul_n<mode>3): Define.
>>        (aarch64_float_mla_n<mode>): Define.
>>        (aarch64_float_mls_n<mode>): Define.
>>        * config/aarch64/arm_neon.h (vmla_n_f32): Use RTL builtin
>>        instead of inline asm.
>>        (vmlaq_n_f32): Likewise.
>>        (vmls_n_f32): Likewise.
>>        (vmlsq_n_f32): Likewise.
>>
>> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/
> aarch64/aarch64-simd-builtins.def
>> index
> 0f44ed84ff9d08d808b1b2dfe528db5208b134f5..547509474c23daf6882ed2f8407ddb5caf1d1b91
> 100644
>> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
>> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
>> @@ -664,6 +664,9 @@
>>    BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
>>    VAR1 (TERNOP, fnma, 4, FP, hf)
>> 
>> +  BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
>> +  BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
>> +
>>    /* Implemented by aarch64_simd_bsl<mode>.  */
>>    BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
>>    VAR2 (BSL_P, simd_bsl,0, NONE, di, v2di)
>> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/
> aarch64-simd.md
>> index
> 5f701dd2775290156634ef8c6feccecd359e9ec9..d016970a2c278405b270a0ac745221e69f0f625e
> 100644
>> --- a/gcc/config/aarch64/aarch64-simd.md
>> +++ b/gcc/config/aarch64/aarch64-simd.md
>> @@ -2614,6 +2614,17 @@
>>    [(set_attr "type" "neon_fp_mul_<stype><q>")]
>>  )
>> 
>> +(define_insn "mul_n<mode>3"
>> + [(set (match_operand:VHSDF 0 "register_operand" "=w")
>> +     (mult:VHSDF
>> +       (vec_duplicate:VHSDF
>> +         (match_operand:<VEL> 2 "register_operand" "w"))
>> +       (match_operand:VHSDF 1 "register_operand" "w")))]
>> + "TARGET_SIMD"
>> + "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]"
>
> This functionality should already be provided by:
>
> (define_insn "*aarch64_mul3_elt_from_dup<mode>"
>  [(set (match_operand:VMUL 0 "register_operand" "=w")
>     (mult:VMUL
>       (vec_duplicate:VMUL
>             (match_operand:<VEL> 1 "register_operand" "<h_con>"))
>       (match_operand:VMUL 2 "register_operand" "w")))]
>   "TARGET_SIMD"
>   "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
>   [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
> )
>
> so I think we should instead rename that to mul_n<mode>3 and reorder
> its operands.
>
> Thanks,
> Richard
>
>> +  [(set_attr "type" "neon_fp_mul_<stype><q>")]
>> +)
>> +
>>  (define_expand "div<mode>3"
>>   [(set (match_operand:VHSDF 0 "register_operand")
>>         (div:VHSDF (match_operand:VHSDF 1 "register_operand")
>> @@ -2651,6 +2662,40 @@
>>    [(set_attr "type" "neon_fp_abs_<stype><q>")]
>>  )
>> 
>> +(define_expand "aarch64_float_mla_n<mode>"
>> +  [(set (match_operand:VDQSF 0 "register_operand")
>> +     (plus:VDQSF
>> +       (mult:VDQSF
>> +         (vec_duplicate:VDQSF
>> +           (match_operand:<VEL> 3 "register_operand"))
>> +         (match_operand:VDQSF 2 "register_operand"))
>> +       (match_operand:VDQSF 1 "register_operand")))]
>> +  "TARGET_SIMD"
>> +  {
>> +    rtx scratch = gen_reg_rtx (<MODE>mode);
>> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
>> +    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
>> +    DONE;
>> +  }
>> +)
>> +
>> +(define_expand "aarch64_float_mls_n<mode>"
>> +  [(set (match_operand:VDQSF 0 "register_operand")
>> +     (minus:VDQSF
>> +       (match_operand:VDQSF 1 "register_operand")
>> +       (mult:VDQSF
>> +         (vec_duplicate:VDQSF
>> +           (match_operand:<VEL> 3 "register_operand"))
>> +         (match_operand:VDQSF 2 "register_operand"))))]
>> +  "TARGET_SIMD"
>> +  {
>> +    rtx scratch = gen_reg_rtx (<MODE>mode);
>> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
>> +    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
>> +    DONE;
>> +  }
>> +)
>> +
>>  (define_insn "fma<mode>4"
>>    [(set (match_operand:VHSDF 0 "register_operand" "=w")
>>         (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
>> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
>> index
> 1c48c166b5b9aaf052761f95121c26845221dae9..c0399c4dc428fe63c07fce0d12bb1580ead1542f
> 100644
>> --- a/gcc/config/aarch64/arm_neon.h
>> +++ b/gcc/config/aarch64/arm_neon.h
>> @@ -7050,13 +7050,7 @@ __extension__ extern __inline float32x2_t
>>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>>  vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>>  {
>> -  float32x2_t __result;
>> -  float32x2_t __t1;
>> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
>> -           : "=w"(__result), "=w"(__t1)
>> -           : "0"(__a), "w"(__b), "w"(__c)
>> -           : /* No clobbers */);
>> -  return __result;
>> +  return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c);
>>  }
>> 
>>  __extension__ extern __inline int16x4_t
>> @@ -7403,13 +7397,7 @@ __extension__ extern __inline float32x4_t
>>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>>  vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>>  {
>> -  float32x4_t __result;
>> -  float32x4_t __t1;
>> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
>> -           : "=w"(__result), "=w"(__t1)
>> -           : "0"(__a), "w"(__b), "w"(__c)
>> -           : /* No clobbers */);
>> -  return __result;
>> +  return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c);
>>  }
>> 
>>  __extension__ extern __inline int16x8_t
>> @@ -7496,13 +7484,7 @@ __extension__ extern __inline float32x2_t
>>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>>  vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>>  {
>> -  float32x2_t __result;
>> -  float32x2_t __t1;
>> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
>> -           : "=w"(__result), "=w"(__t1)
>> -           : "0"(__a), "w"(__b), "w"(__c)
>> -           : /* No clobbers */);
>> -  return __result;
>> +  return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c);
>>  }
>> 
>>  __extension__ extern __inline int16x4_t
>> @@ -7853,13 +7835,7 @@ __extension__ extern __inline float32x4_t
>>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>>  vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>>  {
>> -  float32x4_t __result;
>> -  float32x4_t __t1;
>> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
>> -           : "=w"(__result), "=w"(__t1)
>> -           : "0"(__a), "w"(__b), "w"(__c)
>> -           : /* No clobbers */);
>> -  return __result;
>> +  return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c);
>>  }
>> 
>>  __extension__ extern __inline int16x8_t
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 5d4c01f32e7e911cc53afb2fa5f0580039f77300..3b5e88443a8fbb0705956de55fab866534232f0c 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -668,6 +668,9 @@
>    BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
>    VAR1 (TERNOP, fnma, 4, FP, hf)
>  
> +  BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
> +  BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> +
>    /* Implemented by aarch64_simd_bsl<mode>.  */
>    BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
>    VAR2 (BSL_P, simd_bsl,0, NONE, di, v2di)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 65e63900e075722ebd93e433f3cc1fb449e02c7d..6edfd2d637ef5a76d027e90053359e6da607ab84 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -750,14 +750,14 @@
>    [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
>  )
>  
> -(define_insn "*aarch64_mul3_elt_from_dup<mode>"
> +(define_insn "mul_n<mode>3"
>   [(set (match_operand:VMUL 0 "register_operand" "=w")
> -    (mult:VMUL
> -      (vec_duplicate:VMUL
> -	    (match_operand:<VEL> 1 "register_operand" "<h_con>"))
> -      (match_operand:VMUL 2 "register_operand" "w")))]
> +       (mult:VMUL
> +	 (vec_duplicate:VMUL
> +	   (match_operand:<VEL> 2 "register_operand" "<h_con>"))
> +	 (match_operand:VMUL 1 "register_operand" "w")))]
>    "TARGET_SIMD"
> -  "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
> +  "<f>mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]";
>    [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
>  )
>  
> @@ -2636,6 +2636,40 @@
>    [(set_attr "type" "neon_fp_abs_<stype><q>")]
>  )
>  
> +(define_expand "aarch64_float_mla_n<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +	(plus:VDQSF
> +	  (mult:VDQSF
> +	    (vec_duplicate:VDQSF
> +	      (match_operand:<VEL> 3 "register_operand"))
> +	    (match_operand:VDQSF 2 "register_operand"))
> +	  (match_operand:VDQSF 1 "register_operand")))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
> +(define_expand "aarch64_float_mls_n<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +	(minus:VDQSF
> +	  (match_operand:VDQSF 1 "register_operand")
> +	  (mult:VDQSF
> +	    (vec_duplicate:VDQSF
> +	      (match_operand:<VEL> 3 "register_operand"))
> +	    (match_operand:VDQSF 2 "register_operand"))))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
>  (define_insn "fma<mode>4"
>    [(set (match_operand:VHSDF 0 "register_operand" "=w")
>         (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index bde2d17fbd92f9d2a0ae2f47f2c92c622c365642..f1e1e0ee79144c553fe207a51ba211c6dfc522ac 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -7035,13 +7035,7 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>  {
> -  float32x2_t __result;
> -  float32x2_t __t1;
> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x4_t
> @@ -7388,13 +7382,7 @@ __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>  {
> -  float32x4_t __result;
> -  float32x4_t __t1;
> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x8_t
> @@ -7481,13 +7469,7 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>  {
> -  float32x2_t __result;
> -  float32x2_t __t1;
> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x4_t
> @@ -7838,13 +7820,7 @@ __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>  {
> -  float32x4_t __result;
> -  float32x4_t __t1;
> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline int16x8_t

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2021-04-30 15:40 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-28 14:31 [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics Jonathan Wright
2021-04-28 15:11 ` Richard Sandiford
2021-04-30 14:34   ` Jonathan Wright
2021-04-30 15:40     ` Richard Sandiford

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).