* [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics
@ 2021-04-28 14:42 Jonathan Wright
2021-04-30 16:29 ` Jonathan Wright
0 siblings, 1 reply; 6+ messages in thread
From: Jonathan Wright @ 2021-04-28 14:42 UTC (permalink / raw)
To: gcc-patches
[-- Attachment #1: Type: text/plain, Size: 1173 bytes --]
Hi,
As subject, this patch rewrites the floating-point vml[as][q]_laneq Neon
intrinsics to use RTL builtins rather than relying on the GCC vector
extensions. Using RTL builtins allows control over the emission of
fmla/fmls instructions (which we don't want here.)
With this commit, the code generated by these intrinsics changes from
a fused multiply-add/subtract instruction to an fmul followed by an
fadd/fsub instruction. If the programmer really wants fmla/fmls
instructions, they can use the vfm[as] intrinsics.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no
issues.
Ok for master?
Thanks,
Jonathan
---
gcc/ChangeLog:
2021-02-17 Jonathan Wright <jonathan.wright@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Add
float_ml[as][q]_laneq builtin generator macros.
* config/aarch64/aarch64-simd.md (mul_laneq<mode>3): Define.
(aarch64_float_mla_laneq<mode>): Define.
(aarch64_float_mls_laneq<mode>): Define.
* config/aarch64/arm_neon.h (vmla_laneq_f32): Use RTL builtin
instead of GCC vector extensions.
(vmlaq_laneq_f32): Likewise.
(vmls_laneq_f32): Likewise.
(vmlsq_laneq_f32): Likewise.
[-- Attachment #2: rb14213.patch --]
[-- Type: application/octet-stream, Size: 4912 bytes --]
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index b702493e1351478272bb7d26991a5673943d61ec..52ae398858db1ec506a97376e7ccc1153aa210c5 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -670,6 +670,8 @@
BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mla_laneq, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mls_laneq, 0, FP)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index abc8b1708b86bcee2e5082cc4659a197c5821985..207d644487e77cd66d933dc7860a59e57fee523d 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2641,6 +2641,22 @@
[(set_attr "type" "neon_fp_mul_s_scalar<q>")]
)
+(define_insn "mul_laneq<mode>3"
+ [(set (match_operand:VDQSF 0 "register_operand" "=w")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 2 "register_operand" "w")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ (match_operand:VDQSF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ {
+ operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3]));
+ return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
+ }
+ [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
+)
+
(define_expand "div<mode>3"
[(set (match_operand:VHSDF 0 "register_operand")
(div:VHSDF (match_operand:VHSDF 1 "register_operand")
@@ -2784,6 +2800,46 @@
}
)
+(define_expand "aarch64_float_mla_laneq<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (plus:VDQSF
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))
+ (match_operand:VDQSF 1 "register_operand")))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
+(define_expand "aarch64_float_mls_laneq<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (minus:VDQSF
+ (match_operand:VDQSF 1 "register_operand")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
(define_insn "fma<mode>4"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 082409fe523cee6ae78f02574762b92d47885c42..5ff11e7ea4a9a722c66a37ee65319125313df436 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -20435,7 +20435,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_laneqv2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@@ -20519,7 +20519,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_laneqv4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
@@ -20633,7 +20633,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_laneqv2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@@ -20717,7 +20717,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_laneqv4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics
2021-04-28 14:42 [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics Jonathan Wright
@ 2021-04-30 16:29 ` Jonathan Wright
2021-04-30 16:38 ` Richard Sandiford
0 siblings, 1 reply; 6+ messages in thread
From: Jonathan Wright @ 2021-04-30 16:29 UTC (permalink / raw)
To: gcc-patches, Richard Sandiford
[-- Attachment #1: Type: text/plain, Size: 1746 bytes --]
Updated the patch to be more consistent with the others in the series.
Tested and bootstrapped on aarch64-none-linux-gnu - no issues.
Ok for master?
Thanks,
Jonathan
________________________________
From: Gcc-patches <gcc-patches-bounces@gcc.gnu.org> on behalf of Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
Sent: 28 April 2021 15:42
To: gcc-patches@gcc.gnu.org <gcc-patches@gcc.gnu.org>
Subject: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics
Hi,
As subject, this patch rewrites the floating-point vml[as][q]_laneq Neon
intrinsics to use RTL builtins rather than relying on the GCC vector
extensions. Using RTL builtins allows control over the emission of
fmla/fmls instructions (which we don't want here.)
With this commit, the code generated by these intrinsics changes from
a fused multiply-add/subtract instruction to an fmul followed by an
fadd/fsub instruction. If the programmer really wants fmla/fmls
instructions, they can use the vfm[as] intrinsics.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no
issues.
Ok for master?
Thanks,
Jonathan
---
gcc/ChangeLog:
2021-02-17 Jonathan Wright <jonathan.wright@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Add
float_ml[as][q]_laneq builtin generator macros.
* config/aarch64/aarch64-simd.md (mul_laneq<mode>3): Define.
(aarch64_float_mla_laneq<mode>): Define.
(aarch64_float_mls_laneq<mode>): Define.
* config/aarch64/arm_neon.h (vmla_laneq_f32): Use RTL builtin
instead of GCC vector extensions.
(vmlaq_laneq_f32): Likewise.
(vmls_laneq_f32): Likewise.
(vmlsq_laneq_f32): Likewise.
[-- Attachment #2: rb14213.patch --]
[-- Type: application/octet-stream, Size: 4935 bytes --]
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 8e4b4edc8a46ffba777a42058f06ce7204152824..1e81bb53287e9797f3539c2c64ed11c6c26d6e4e 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -674,6 +674,8 @@
BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mla_laneq, 0, FP)
+ BUILTIN_VDQSF (QUADOP_LANE, float_mls_laneq, 0, FP)
/* Implemented by aarch64_simd_bsl<mode>. */
BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index bdee49f74f4725409d33af733bb55be290b3f0e7..234762960bd6df057394f753072ef65a6628a43d 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -734,6 +734,22 @@
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
+(define_insn "mul_laneq<mode>3"
+ [(set (match_operand:VDQSF 0 "register_operand" "=w")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 2 "register_operand" "w")
+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
+ (match_operand:VDQSF 1 "register_operand" "w")))]
+ "TARGET_SIMD"
+ {
+ operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3]));
+ return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
+ }
+ [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
+)
+
(define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>"
[(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w")
(mult:VMUL_CHANGE_NLANES
@@ -2742,6 +2758,46 @@
}
)
+(define_expand "aarch64_float_mla_laneq<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (plus:VDQSF
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))
+ (match_operand:VDQSF 1 "register_operand")))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
+(define_expand "aarch64_float_mls_laneq<mode>"
+ [(set (match_operand:VDQSF 0 "register_operand")
+ (minus:VDQSF
+ (match_operand:VDQSF 1 "register_operand")
+ (mult:VDQSF
+ (vec_duplicate:VDQSF
+ (vec_select:<VEL>
+ (match_operand:V4SF 3 "register_operand")
+ (parallel [(match_operand:SI 4 "immediate_operand")])))
+ (match_operand:VDQSF 2 "register_operand"))))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
+ operands[3], operands[4]));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
(define_insn "fma<mode>4"
[(set (match_operand:VHSDF 0 "register_operand" "=w")
(fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 5328d447a424fdf4ce1941abf3c1218d4fe8f42a..17e059efb80fa86a8a32127ace4fc7f43e2040a8 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -20420,7 +20420,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_laneqv2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@@ -20504,7 +20504,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mla_laneqv4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
@@ -20618,7 +20618,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_laneqv2sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x4_t
@@ -20702,7 +20702,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
+ return __builtin_aarch64_float_mls_laneqv4sf (__a, __b, __c, __lane);
}
__extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics
2021-04-30 16:29 ` Jonathan Wright
@ 2021-04-30 16:38 ` Richard Sandiford
2021-04-30 18:18 ` Richard Sandiford
0 siblings, 1 reply; 6+ messages in thread
From: Richard Sandiford @ 2021-04-30 16:38 UTC (permalink / raw)
To: Jonathan Wright; +Cc: gcc-patches
Jonathan Wright <Jonathan.Wright@arm.com> writes:
> Updated the patch to be more consistent with the others in the series.
>
> Tested and bootstrapped on aarch64-none-linux-gnu - no issues.
>
> Ok for master?
OK, thanks.
Richard
>
> Thanks,
> Jonathan
> -------------------------------------------------------------------------------
> From: Gcc-patches <gcc-patches-bounces@gcc.gnu.org> on behalf of Jonathan
> Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
> Sent: 28 April 2021 15:42
> To: gcc-patches@gcc.gnu.org <gcc-patches@gcc.gnu.org>
> Subject: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq
> intrinsics
>
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q]_laneq Neon
> intrinsics to use RTL builtins rather than relying on the GCC vector
> extensions. Using RTL builtins allows control over the emission of
> fmla/fmls instructions (which we don't want here.)
>
> With this commit, the code generated by these intrinsics changes from
> a fused multiply-add/subtract instruction to an fmul followed by an
> fadd/fsub instruction. If the programmer really wants fmla/fmls
> instructions, they can use the vfm[as] intrinsics.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-02-17 Jonathan Wright <jonathan.wright@arm.com>
>
> * config/aarch64/aarch64-simd-builtins.def: Add
> float_ml[as][q]_laneq builtin generator macros.
> * config/aarch64/aarch64-simd.md (mul_laneq<mode>3): Define.
> (aarch64_float_mla_laneq<mode>): Define.
> (aarch64_float_mls_laneq<mode>): Define.
> * config/aarch64/arm_neon.h (vmla_laneq_f32): Use RTL builtin
> instead of GCC vector extensions.
> (vmlaq_laneq_f32): Likewise.
> (vmls_laneq_f32): Likewise.
> (vmlsq_laneq_f32): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 8e4b4edc8a46ffba777a42058f06ce7204152824..1e81bb53287e9797f3539c2c64ed11c6c26d6e4e 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -674,6 +674,8 @@
> BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
> BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mla_laneq, 0, FP)
> + BUILTIN_VDQSF (QUADOP_LANE, float_mls_laneq, 0, FP)
>
> /* Implemented by aarch64_simd_bsl<mode>. */
> BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index bdee49f74f4725409d33af733bb55be290b3f0e7..234762960bd6df057394f753072ef65a6628a43d 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -734,6 +734,22 @@
> [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
> )
>
> +(define_insn "mul_laneq<mode>3"
> + [(set (match_operand:VDQSF 0 "register_operand" "=w")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V4SF 2 "register_operand" "w")
> + (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
> + (match_operand:VDQSF 1 "register_operand" "w")))]
> + "TARGET_SIMD"
> + {
> + operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3]));
> + return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
> + }
> + [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
> +)
> +
> (define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>"
> [(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w")
> (mult:VMUL_CHANGE_NLANES
> @@ -2742,6 +2758,46 @@
> }
> )
>
> +(define_expand "aarch64_float_mla_laneq<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (plus:VDQSF
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V4SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))
> + (match_operand:VDQSF 1 "register_operand")))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> +(define_expand "aarch64_float_mls_laneq<mode>"
> + [(set (match_operand:VDQSF 0 "register_operand")
> + (minus:VDQSF
> + (match_operand:VDQSF 1 "register_operand")
> + (mult:VDQSF
> + (vec_duplicate:VDQSF
> + (vec_select:<VEL>
> + (match_operand:V4SF 3 "register_operand")
> + (parallel [(match_operand:SI 4 "immediate_operand")])))
> + (match_operand:VDQSF 2 "register_operand"))))]
> + "TARGET_SIMD"
> + {
> + rtx scratch = gen_reg_rtx (<MODE>mode);
> + emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
> + operands[3], operands[4]));
> + emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> + DONE;
> + }
> +)
> +
> (define_insn "fma<mode>4"
> [(set (match_operand:VHSDF 0 "register_operand" "=w")
> (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 5328d447a424fdf4ce1941abf3c1218d4fe8f42a..17e059efb80fa86a8a32127ace4fc7f43e2040a8 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -20420,7 +20420,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
> float32x4_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_laneqv2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20504,7 +20504,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
> float32x4_t __c, const int __lane)
> {
> - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mla_laneqv4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
> @@ -20618,7 +20618,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
> float32x4_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_laneqv2sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x4_t
> @@ -20702,7 +20702,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
> float32x4_t __c, const int __lane)
> {
> - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> + return __builtin_aarch64_float_mls_laneqv4sf (__a, __b, __c, __lane);
> }
>
> __extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics
2021-04-30 16:38 ` Richard Sandiford
@ 2021-04-30 18:18 ` Richard Sandiford
2021-05-04 12:36 ` Jonathan Wright
0 siblings, 1 reply; 6+ messages in thread
From: Richard Sandiford @ 2021-04-30 18:18 UTC (permalink / raw)
To: Jonathan Wright; +Cc: gcc-patches
Richard Sandiford via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Jonathan Wright <Jonathan.Wright@arm.com> writes:
>> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
>> index bdee49f74f4725409d33af733bb55be290b3f0e7..234762960bd6df057394f753072ef65a6628a43d 100644
>> --- a/gcc/config/aarch64/aarch64-simd.md
>> +++ b/gcc/config/aarch64/aarch64-simd.md
>> @@ -734,6 +734,22 @@
>> [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
>> )
>>
>> +(define_insn "mul_laneq<mode>3"
>> + [(set (match_operand:VDQSF 0 "register_operand" "=w")
>> + (mult:VDQSF
>> + (vec_duplicate:VDQSF
>> + (vec_select:<VEL>
>> + (match_operand:V4SF 2 "register_operand" "w")
>> + (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
>> + (match_operand:VDQSF 1 "register_operand" "w")))]
>> + "TARGET_SIMD"
>> + {
>> + operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3]));
>> + return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
>> + }
>> + [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
>> +)
>> +
Oops, sorry, I just realised that this pattern does already exist as:
(define_insn "*aarch64_mul3_elt<mode>"
[(set (match_operand:VMUL 0 "register_operand" "=w")
(mult:VMUL
(vec_duplicate:VMUL
(vec_select:<VEL>
(match_operand:VMUL 1 "register_operand" "<h_con>")
(parallel [(match_operand:SI 2 "immediate_operand")])))
(match_operand:VMUL 3 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
Thanks,
Richard
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics
2021-04-30 18:18 ` Richard Sandiford
@ 2021-05-04 12:36 ` Jonathan Wright
2021-05-04 16:40 ` Richard Sandiford
0 siblings, 1 reply; 6+ messages in thread
From: Jonathan Wright @ 2021-05-04 12:36 UTC (permalink / raw)
To: Richard Sandiford; +Cc: gcc-patches
Hi Richard,
I think you may be referencing an older checkout as we refactored this
pattern in a previous change to:
(define_insn "mul_lane<mode>3"
[(set (match_operand:VMUL 0 "register_operand" "=w")
(mult:VMUL
(vec_duplicate:VMUL
(vec_select:<VEL>
(match_operand:VMUL 2 "register_operand" "<h_con>")
(parallel [(match_operand:SI 3 "immediate_operand" "i")])))
(match_operand:VMUL 1 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
return "<f>mul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
which doesn't help us with the 'laneq' intrinsics as the machine mode for
operands 0 and 1 (of the laneq intrinsics) is narrower than the machine
mode for operand 2.
Thanks,
Jonathan
________________________________
From: Richard Sandiford <rdsandiford@googlemail.com>
Sent: 30 April 2021 19:18
To: Jonathan Wright <Jonathan.Wright@arm.com>
Cc: gcc-patches@gcc.gnu.org <gcc-patches@gcc.gnu.org>
Subject: Re: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics
Richard Sandiford via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Jonathan Wright <Jonathan.Wright@arm.com> writes:
>> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
>> index bdee49f74f4725409d33af733bb55be290b3f0e7..234762960bd6df057394f753072ef65a6628a43d 100644
>> --- a/gcc/config/aarch64/aarch64-simd.md
>> +++ b/gcc/config/aarch64/aarch64-simd.md
>> @@ -734,6 +734,22 @@
>> [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
>> )
>>
>> +(define_insn "mul_laneq<mode>3"
>> + [(set (match_operand:VDQSF 0 "register_operand" "=w")
>> + (mult:VDQSF
>> + (vec_duplicate:VDQSF
>> + (vec_select:<VEL>
>> + (match_operand:V4SF 2 "register_operand" "w")
>> + (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
>> + (match_operand:VDQSF 1 "register_operand" "w")))]
>> + "TARGET_SIMD"
>> + {
>> + operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3]));
>> + return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
>> + }
>> + [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
>> +)
>> +
Oops, sorry, I just realised that this pattern does already exist as:
(define_insn "*aarch64_mul3_elt<mode>"
[(set (match_operand:VMUL 0 "register_operand" "=w")
(mult:VMUL
(vec_duplicate:VMUL
(vec_select:<VEL>
(match_operand:VMUL 1 "register_operand" "<h_con>")
(parallel [(match_operand:SI 2 "immediate_operand")])))
(match_operand:VMUL 3 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2]));
return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
}
[(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)
Thanks,
Richard
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics
2021-05-04 12:36 ` Jonathan Wright
@ 2021-05-04 16:40 ` Richard Sandiford
0 siblings, 0 replies; 6+ messages in thread
From: Richard Sandiford @ 2021-05-04 16:40 UTC (permalink / raw)
To: Jonathan Wright via Gcc-patches
Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi Richard,
>
> I think you may be referencing an older checkout as we refactored this
> pattern in a previous change to:
>
> (define_insn "mul_lane<mode>3"
> [(set (match_operand:VMUL 0 "register_operand" "=w")
> (mult:VMUL
> (vec_duplicate:VMUL
> (vec_select:<VEL>
> (match_operand:VMUL 2 "register_operand" "<h_con>")
> (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
> (match_operand:VMUL 1 "register_operand" "w")))]
> "TARGET_SIMD"
> {
> operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3]));
> return "<f>mul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
> }
> [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
> )
>
> which doesn't help us with the 'laneq' intrinsics as the machine mode for
> operands 0 and 1 (of the laneq intrinsics) is narrower than the machine
> mode for operand 2.
Gah, I copied the wrong one, sorry. The one I meant was:
(define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>"
[(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w")
(mult:VMUL_CHANGE_NLANES
(vec_duplicate:VMUL_CHANGE_NLANES
(vec_select:<VEL>
(match_operand:<VSWAP_WIDTH> 1 "register_operand" "<h_con>")
(parallel [(match_operand:SI 2 "immediate_operand")])))
(match_operand:VMUL_CHANGE_NLANES 3 "register_operand" "w")))]
"TARGET_SIMD"
{
operands[2] = aarch64_endian_lane_rtx (<VSWAP_WIDTH>mode, INTVAL (operands[2]));
return "<f>mul\\t%0.<Vtype>, %3.<Vtype>, %1.<Vetype>[%2]";
}
[(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
)
This already provides patterns in which the indexed operand is
wider than the other operands.
Thanks,
Richard
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2021-05-04 16:40 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-28 14:42 [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq intrinsics Jonathan Wright
2021-04-30 16:29 ` Jonathan Wright
2021-04-30 16:38 ` Richard Sandiford
2021-04-30 18:18 ` Richard Sandiford
2021-05-04 12:36 ` Jonathan Wright
2021-05-04 16:40 ` Richard Sandiford
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).