* [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics
@ 2021-04-28 14:39 Jonathan Wright
2021-04-28 15:37 ` Richard Sandiford
0 siblings, 1 reply; 4+ messages in thread
From: Jonathan Wright @ 2021-04-28 14:39 UTC (permalink / raw)
To: gcc-patches
[-- Attachment #1: Type: text/plain, Size: 1161 bytes --]
Hi,
As subject, this patch rewrites the floating-point vml[as][q]_lane Neon
intrinsics to use RTL builtins rather than relying on the GCC vector
extensions. Using RTL builtins allows control over the emission of
fmla/fmls instructions (which we don't want here.)
With this commit, the code generated by these intrinsics changes from
a fused multiply-add/subtract instruction to an fmul followed by an
fadd/fsub instruction. If the programmer really wants fmla/fmls
instructions, they can use the vfm[as] intrinsics.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no
issues.
Ok for master?
Thanks,
Jonathan
---
gcc/ChangeLog:
2021-02-16 Jonathan Wright <jonathan.wright@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Add
float_ml[as]_lane builtin generator macros.
* config/aarch64/aarch64-simd.md (mul_lane<mode>3): Define.
(aarch64_float_mla_lane<mode>): Define.
(aarch64_float_mls_lane<mode>): Define.
* config/aarch64/arm_neon.h (vmla_lane_f32): Use RTL builtin
instead of GCC vector extensions.
(vmlaq_lane_f32): Likewise.
(vmls_lane_f32): Likewise.
(vmlsq_lane_f32): Likewise.
[-- Attachment #2: rb14212.patch --]
[-- Type: application/octet-stream, Size: 4876 bytes --]
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 55a5682baeb13041053ef9e6eaa831182ea8b10c..b702493e1351478272bb7d26991a5673943d61ec 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -668,6 +668,8 @@
BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 95363d7b5ad11f775aa03f24bbcb0b66d20abb7c..abc8b1708b86bcee2e5082cc4659a197c5821985 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2625,6 +2625,22 @@
[(set_attr "type" "neon_fp_mul_<stype><q>")]
)
+(define_insn "mul_lane<mode>3"
+ [(set (match_operand:VDQSF 0 "register_operand" "=w")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V2SF 2 "register_operand" "w")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ (match_operand:VDQSF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ {
+ operands[3] = aarch64_endian_lane_rtx (V2SFmode, INTVAL (operands[3]));
+ return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
+ }
+ [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
+)
+
(define_expand "div<mode>3"
[(set (match_operand:VHSDF 0 "register_operand")
(div:VHSDF (match_operand:VHSDF 1 "register_operand")
@@ -2728,6 +2744,46 @@
}
)
+(define_expand "aarch64_float_mla_lane<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (plus:VDQSF
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V2SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))
+ (match_operand:VDQSF 1 "register_operand")))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
+(define_expand "aarch64_float_mls_lane<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (minus:VDQSF
+ (match_operand:VDQSF 1 "register_operand")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V2SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
(define_insn "fma<mode>4"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index d4ed47249e3e39f8c88274657c809293e20bec9d..082409fe523cee6ae78f02574762b92d47885c42 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -20393,7 +20393,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@@ -20477,7 +20477,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
@@ -20591,7 +20591,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@@ -20675,7 +20675,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics
2021-04-28 14:39 [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics Jonathan Wright
@ 2021-04-28 15:37 ` Richard Sandiford
2021-04-30 14:51 ` Jonathan Wright
0 siblings, 1 reply; 4+ messages in thread
From: Richard Sandiford @ 2021-04-28 15:37 UTC (permalink / raw)
To: Jonathan Wright via Gcc-patches
Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q]_lane Neon
> intrinsics to use RTL builtins rather than relying on the GCC vector
> extensions. Using RTL builtins allows control over the emission of
> fmla/fmls instructions (which we don't want here.)
>
> With this commit, the code generated by these intrinsics changes from
> a fused multiply-add/subtract instruction to an fmul followed by an
> fadd/fsub instruction. If the programmer really wants fmla/fmls
> instructions, they can use the vfm[as] intrinsics.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-02-16 Jonathan Wright <jonathan.wright@arm.com>
>
> * config/aarch64/aarch64-simd-builtins.def: Add
> float_ml[as]_lane builtin generator macros.
> * config/aarch64/aarch64-simd.md (mul_lane<mode>3): Define.
> (aarch64_float_mla_lane<mode>): Define.
> (aarch64_float_mls_lane<mode>): Define.
> * config/aarch64/arm_neon.h (vmla_lane_f32): Use RTL builtin
> instead of GCC vector extensions.
> (vmlaq_lane_f32): Likewise.
> (vmls_lane_f32): Likewise.
> (vmlsq_lane_f32): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 55a5682baeb13041053ef9e6eaa831182ea8b10c..b702493e1351478272bb7d26991a5673943d61ec 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -668,6 +668,8 @@
> BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
> BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
> BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
>
> /* Implemented by aarch64_simd_bsl<mode>. */
> BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 95363d7b5ad11f775aa03f24bbcb0b66d20abb7c..abc8b1708b86bcee2e5082cc4659a197c5821985 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2625,6 +2625,22 @@
> [(set_attr "type" "neon_fp_mul_<stype><q>")]
> )
>
> +(define_insn "mul_lane<mode>3"
> + [(set (match_operand:VDQSF 0 "register_operand" "=w")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 2 "register_operand" "w")
> + (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
> + (match_operand:VDQSF 1 "register_operand" "w")))]
> + "TARGET_SIMD"
> + {
> + operands[3] = aarch64_endian_lane_rtx (V2SFmode, INTVAL (operands[3]));
> + return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
> + }
> + [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
> +)
> +
Similarly to the 10/20 patch (IIRC), we can instead reuse:
(define_insn "*aarch64_mul3_elt<mode>"
[(set (match_operand:VMUL 0 "register_operand" "=w")
(mult:VMUL
(vec_duplicate:VMUL
(vec_select:<VEL>
(match_operand:VMUL 1 "register_operand" "<h_con>")
(parallel [(match_operand:SI 2 "immediate_operand")])))
(match_operand:VMUL 3 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
Thanks,
Richard
> (define_expand "div<mode>3"
> [(set (match_operand:VHSDF 0 "register_operand")
> (div:VHSDF (match_operand:VHSDF 1 "register_operand")
> @@ -2728,6 +2744,46 @@
> }
> )
>
> +(define_expand "aarch64_float_mla_lane<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (plus:VDQSF
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))
> + (match_operand:VDQSF 1 "register_operand")))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> +(define_expand "aarch64_float_mls_lane<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (minus:VDQSF
> + (match_operand:VDQSF 1 "register_operand")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> (define_insn "fma<mode>4"
> [(set (match_operand:VHSDF 0 "register_operand" "=w")
> (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index d4ed47249e3e39f8c88274657c809293e20bec9d..082409fe523cee6ae78f02574762b92d47885c42 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -20393,7 +20393,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20477,7 +20477,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
> @@ -20591,7 +20591,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20675,7 +20675,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics
2021-04-28 15:37 ` Richard Sandiford
@ 2021-04-30 14:51 ` Jonathan Wright
2021-04-30 15:42 ` Richard Sandiford
0 siblings, 1 reply; 4+ messages in thread
From: Jonathan Wright @ 2021-04-30 14:51 UTC (permalink / raw)
To: Richard Sandiford, Jonathan Wright via Gcc-patches
[-- Attachment #1: Type: text/plain, Size: 7946 bytes --]
Patch updated as per suggestion (similar to patch 10/20.)
Tested and bootstrapped on aarch64-none-linux-gnu - no issues.
Ok for master?
Thanks,
Jonathan
________________________________
From: Richard Sandiford <richard.sandiford@arm.com>
Sent: 28 April 2021 16:37
To: Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
Cc: Jonathan Wright <Jonathan.Wright@arm.com>
Subject: Re: [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics
Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q]_lane Neon
> intrinsics to use RTL builtins rather than relying on the GCC vector
> extensions. Using RTL builtins allows control over the emission of
> fmla/fmls instructions (which we don't want here.)
>
> With this commit, the code generated by these intrinsics changes from
> a fused multiply-add/subtract instruction to an fmul followed by an
> fadd/fsub instruction. If the programmer really wants fmla/fmls
> instructions, they can use the vfm[as] intrinsics.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-02-16 Jonathan Wright <jonathan.wright@arm.com>
>
> * config/aarch64/aarch64-simd-builtins.def: Add
> float_ml[as]_lane builtin generator macros.
> * config/aarch64/aarch64-simd.md (mul_lane<mode>3): Define.
> (aarch64_float_mla_lane<mode>): Define.
> (aarch64_float_mls_lane<mode>): Define.
> * config/aarch64/arm_neon.h (vmla_lane_f32): Use RTL builtin
> instead of GCC vector extensions.
> (vmlaq_lane_f32): Likewise.
> (vmls_lane_f32): Likewise.
> (vmlsq_lane_f32): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 55a5682baeb13041053ef9e6eaa831182ea8b10c..b702493e1351478272bb7d26991a5673943d61ec 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -668,6 +668,8 @@
> BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
> BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
> BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
>
> /* Implemented by aarch64_simd_bsl<mode>. */
> BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 95363d7b5ad11f775aa03f24bbcb0b66d20abb7c..abc8b1708b86bcee2e5082cc4659a197c5821985 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2625,6 +2625,22 @@
> [(set_attr "type" "neon_fp_mul_<stype><q>")]
> )
>
> +(define_insn "mul_lane<mode>3"
> + [(set (match_operand:VDQSF 0 "register_operand" "=w")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 2 "register_operand" "w")
> + (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
> + (match_operand:VDQSF 1 "register_operand" "w")))]
> + "TARGET_SIMD"
> + {
> + operands[3] = aarch64_endian_lane_rtx (V2SFmode, INTVAL (operands[3]));
> + return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
> + }
> + [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
> +)
> +
Similarly to the 10/20 patch (IIRC), we can instead reuse:
(define_insn "*aarch64_mul3_elt<mode>"
[(set (match_operand:VMUL 0 "register_operand" "=w")
(mult:VMUL
(vec_duplicate:VMUL
(vec_select:<VEL>
(match_operand:VMUL 1 "register_operand" "<h_con>")
(parallel [(match_operand:SI 2 "immediate_operand")])))
(match_operand:VMUL 3 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
Thanks,
Richard
> (define_expand "div<mode>3"
> [(set (match_operand:VHSDF 0 "register_operand")
> (div:VHSDF (match_operand:VHSDF 1 "register_operand")
> @@ -2728,6 +2744,46 @@
> }
> )
>
> +(define_expand "aarch64_float_mla_lane<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (plus:VDQSF
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))
> + (match_operand:VDQSF 1 "register_operand")))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> +(define_expand "aarch64_float_mls_lane<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (minus:VDQSF
> + (match_operand:VDQSF 1 "register_operand")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> (define_insn "fma<mode>4"
> [(set (match_operand:VHSDF 0 "register_operand" "=w")
> (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index d4ed47249e3e39f8c88274657c809293e20bec9d..082409fe523cee6ae78f02574762b92d47885c42 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -20393,7 +20393,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20477,7 +20477,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
> @@ -20591,7 +20591,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20675,7 +20675,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
[-- Attachment #2: rb14212.patch --]
[-- Type: application/octet-stream, Size: 5123 bytes --]
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 2a2fc2076b11a83c1de0b9a7847488df73d312be..8e4b4edc8a46ffba777a42058f06ce7204152824 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -672,6 +672,8 @@
BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 0f96cd0bd512eb8437b6f16f45618f29e1d1526c..bdee49f74f4725409d33af733bb55be290b3f0e7 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -718,18 +718,18 @@
}
)
-(define_insn "*aarch64_mul3_elt<mode>"
+(define_insn "mul_lane<mode>3"
[(set (match_operand:VMUL 0 "register_operand" "=w")
- (mult:VMUL
- (vec_duplicate:VMUL
- (vec_select:<VEL>
- (match_operand:VMUL 1 "register_operand" "<h_con>")
- (parallel [(match_operand:SI 2 "immediate_operand")])))
- (match_operand:VMUL 3 "register_operand" "w")))]
+ (mult:VMUL
+ (vec_duplicate:VMUL
+ (vec_select:<VEL>
+ (match_operand:VMUL 2 "register_operand" "<h_con>")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ (match_operand:VMUL 1 "register_operand" "w")))]
"TARGET_SIMD"
{
- operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
- return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
+ operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
+ return "<f>mul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
@@ -2702,6 +2702,46 @@
}
)
+(define_expand "aarch64_float_mla_lane<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (plus:VDQSF
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V2SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))
+ (match_operand:VDQSF 1 "register_operand")))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
+(define_expand "aarch64_float_mls_lane<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (minus:VDQSF
+ (match_operand:VDQSF 1 "register_operand")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V2SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
(define_insn "fma<mode>4"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 0227cadb7e869ee23dddd7abb71f169093f3cd05..5328d447a424fdf4ce1941abf3c1218d4fe8f42a 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -20378,7 +20378,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@@ -20462,7 +20462,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
@@ -20576,7 +20576,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@@ -20660,7 +20660,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics
2021-04-30 14:51 ` Jonathan Wright
@ 2021-04-30 15:42 ` Richard Sandiford
0 siblings, 0 replies; 4+ messages in thread
From: Richard Sandiford @ 2021-04-30 15:42 UTC (permalink / raw)
To: Jonathan Wright; +Cc: Jonathan Wright via Gcc-patches
Jonathan Wright <Jonathan.Wright@arm.com> writes:
> Patch updated as per suggestion (similar to patch 10/20.)
>
> Tested and bootstrapped on aarch64-none-linux-gnu - no issues.
>
> Ok for master?
OK, thanks.
Richard
> Thanks,
> Jonathan
> -------------------------------------------------------------------------------
> From: Richard Sandiford <richard.sandiford@arm.com>
> Sent: 28 April 2021 16:37
> To: Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
> Cc: Jonathan Wright <Jonathan.Wright@arm.com>
> Subject: Re: [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane
> intrinsics
>
> Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
>> Hi,
>>
>> As subject, this patch rewrites the floating-point vml[as][q]_lane Neon
>> intrinsics to use RTL builtins rather than relying on the GCC vector
>> extensions. Using RTL builtins allows control over the emission of
>> fmla/fmls instructions (which we don't want here.)
>>
>> With this commit, the code generated by these intrinsics changes from
>> a fused multiply-add/subtract instruction to an fmul followed by an
>> fadd/fsub instruction. If the programmer really wants fmla/fmls
>> instructions, they can use the vfm[as] intrinsics.
>>
>> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
>> issues.
>>
>> Ok for master?
>>
>> Thanks,
>> Jonathan
>>
>> ---
>>
>> gcc/ChangeLog:
>>
>> 2021-02-16 Jonathan Wright <jonathan.wright@arm.com>
>>
>> * config/aarch64/aarch64-simd-builtins.def: Add
>> float_ml[as]_lane builtin generator macros.
>> * config/aarch64/aarch64-simd.md (mul_lane<mode>3): Define.
>> (aarch64_float_mla_lane<mode>): Define.
>> (aarch64_float_mls_lane<mode>): Define.
>> * config/aarch64/arm_neon.h (vmla_lane_f32): Use RTL builtin
>> instead of GCC vector extensions.
>> (vmlaq_lane_f32): Likewise.
>> (vmls_lane_f32): Likewise.
>> (vmlsq_lane_f32): Likewise.
>>
>> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/
> aarch64/aarch64-simd-builtins.def
>> index
> 55a5682baeb13041053ef9e6eaa831182ea8b10c..b702493e1351478272bb7d26991a5673943d61ec
> 100644
>> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
>> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
>> @@ -668,6 +668,8 @@
>> BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
>> BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
>> BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
>> + BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
>> + BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
>>
>> /* Implemented by aarch64_simd_bsl<mode>. */
>> BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
>> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/
> aarch64-simd.md
>> index
> 95363d7b5ad11f775aa03f24bbcb0b66d20abb7c..abc8b1708b86bcee2e5082cc4659a197c5821985
> 100644
>> --- a/gcc/config/aarch64/aarch64-simd.md
>> +++ b/gcc/config/aarch64/aarch64-simd.md
>> @@ -2625,6 +2625,22 @@
>> [(set_attr "type" "neon_fp_mul_<stype><q>")]
>> )
>>
>> +(define_insn "mul_lane<mode>3"
>> + [(set (match_operand:VDQSF 0 "register_operand" "=w")
>> + (mult:VDQSF
>> + (vec_duplicate:VDQSF
>> + (vec_select:<VEL>
>> + (match_operand:V2SF 2 "register_operand" "w")
>> + (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
>> + (match_operand:VDQSF 1 "register_operand" "w")))]
>> + "TARGET_SIMD"
>> + {
>> + operands[3] = aarch64_endian_lane_rtx (V2SFmode, INTVAL (operands[3]));
>> + return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
>> + }
>> + [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
>> +)
>> +
>
> Similarly to the 10/20 patch (IIRC), we can instead reuse:
>
> (define_insn "*aarch64_mul3_elt<mode>"
> [(set (match_operand:VMUL 0 "register_operand" "=w")
> (mult:VMUL
> (vec_duplicate:VMUL
> (vec_select:<VEL>
> (match_operand:VMUL 1 "register_operand" "<h_con>")
> (parallel [(match_operand:SI 2 "immediate_operand")])))
> (match_operand:VMUL 3 "register_operand" "w")))]
> "TARGET_SIMD"
> {
> operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
> return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
> }
> [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
> )
>
> Thanks,
> Richard
>
>> (define_expand "div<mode>3"
>> [(set (match_operand:VHSDF 0 "register_operand")
>> (div:VHSDF (match_operand:VHSDF 1 "register_operand")
>> @@ -2728,6 +2744,46 @@
>> }
>> )
>>
>> +(define_expand "aarch64_float_mla_lane<mode>"
>> + [(set (match_operand:VDQSF 0 "register_operand")
>> + (plus:VDQSF
>> + (mult:VDQSF
>> + (vec_duplicate:VDQSF
>> + (vec_select:<VEL>
>> + (match_operand:V2SF 3 "register_operand")
>> + (parallel [(match_operand:SI 4 "immediate_operand")])))
>> + (match_operand:VDQSF 2 "register_operand"))
>> + (match_operand:VDQSF 1 "register_operand")))]
>> + "TARGET_SIMD"
>> + {
>> + rtx scratch = gen_reg_rtx (<MODE>mode);
>> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
>> + operands[3], operands[4]));
>> + emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
>> + DONE;
>> + }
>> +)
>> +
>> +(define_expand "aarch64_float_mls_lane<mode>"
>> + [(set (match_operand:VDQSF 0 "register_operand")
>> + (minus:VDQSF
>> + (match_operand:VDQSF 1 "register_operand")
>> + (mult:VDQSF
>> + (vec_duplicate:VDQSF
>> + (vec_select:<VEL>
>> + (match_operand:V2SF 3 "register_operand")
>> + (parallel [(match_operand:SI 4 "immediate_operand")])))
>> + (match_operand:VDQSF 2 "register_operand"))))]
>> + "TARGET_SIMD"
>> + {
>> + rtx scratch = gen_reg_rtx (<MODE>mode);
>> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
>> + operands[3], operands[4]));
>> + emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
>> + DONE;
>> + }
>> +)
>> +
>> (define_insn "fma<mode>4"
>> [(set (match_operand:VHSDF 0 "register_operand" "=w")
>> (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
>> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
>> index
> d4ed47249e3e39f8c88274657c809293e20bec9d..082409fe523cee6ae78f02574762b92d47885c42
> 100644
>> --- a/gcc/config/aarch64/arm_neon.h
>> +++ b/gcc/config/aarch64/arm_neon.h
>> @@ -20393,7 +20393,7 @@ __attribute__ ((__always_inline__, __gnu_inline__,
> __artificial__))
>> vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
>> float32x2_t __c, const int __lane)
>> {
>> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
>> + return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
>> }
>>
>> __extension__ extern __inline int16x4_t
>> @@ -20477,7 +20477,7 @@ __attribute__ ((__always_inline__, __gnu_inline__,
> __artificial__))
>> vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
>> float32x2_t __c, const int __lane)
>> {
>> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
>> + return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
>> }
>>
>> __extension__ extern __inline int16x8_t
>> @@ -20591,7 +20591,7 @@ __attribute__ ((__always_inline__, __gnu_inline__,
> __artificial__))
>> vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
>> float32x2_t __c, const int __lane)
>> {
>> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
>> + return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
>> }
>>
>> __extension__ extern __inline int16x4_t
>> @@ -20675,7 +20675,7 @@ __attribute__ ((__always_inline__, __gnu_inline__,
> __artificial__))
>> vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
>> float32x2_t __c, const int __lane)
>> {
>> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
>> + return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
>> }
>>
>> __extension__ extern __inline int16x8_t
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 2a2fc2076b11a83c1de0b9a7847488df73d312be..8e4b4edc8a46ffba777a42058f06ce7204152824 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -672,6 +672,8 @@
> BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
> BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
> BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
>
> /* Implemented by aarch64_simd_bsl<mode>. */
> BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 0f96cd0bd512eb8437b6f16f45618f29e1d1526c..bdee49f74f4725409d33af733bb55be290b3f0e7 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -718,18 +718,18 @@
> }
> )
>
> -(define_insn "*aarch64_mul3_elt<mode>"
> +(define_insn "mul_lane<mode>3"
> [(set (match_operand:VMUL 0 "register_operand" "=w")
> - (mult:VMUL
> - (vec_duplicate:VMUL
> - (vec_select:<VEL>
> - (match_operand:VMUL 1 "register_operand" "<h_con>")
> - (parallel [(match_operand:SI 2 "immediate_operand")])))
> - (match_operand:VMUL 3 "register_operand" "w")))]
> + (mult:VMUL
> + (vec_duplicate:VMUL
> + (vec_select:<VEL>
> + (match_operand:VMUL 2 "register_operand" "<h_con>")
> + (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
> + (match_operand:VMUL 1 "register_operand" "w")))]
> "TARGET_SIMD"
> {
> - operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
> - return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
> + operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
> + return "<f>mul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
> }
> [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
> )
> @@ -2702,6 +2702,46 @@
> }
> )
>
> +(define_expand "aarch64_float_mla_lane<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (plus:VDQSF
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))
> + (match_operand:VDQSF 1 "register_operand")))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> +(define_expand "aarch64_float_mls_lane<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (minus:VDQSF
> + (match_operand:VDQSF 1 "register_operand")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V2SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_lane<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> (define_insn "fma<mode>4"
> [(set (match_operand:VHSDF 0 "register_operand" "=w")
> (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 0227cadb7e869ee23dddd7abb71f169093f3cd05..5328d447a424fdf4ce1941abf3c1218d4fe8f42a 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -20378,7 +20378,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_lanev2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20462,7 +20462,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_lanev4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
> @@ -20576,7 +20576,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_lanev2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20660,7 +20660,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
> float32x2_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_lanev4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-04-30 15:42 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-28 14:39 [PATCH 12/20] aarch64: Use RTL builtins for FP ml[as][q]_lane intrinsics Jonathan Wright
2021-04-28 15:37 ` Richard Sandiford
2021-04-30 14:51 ` Jonathan Wright
2021-04-30 15:42 ` Richard Sandiford
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).