* [PATCH 1/20] aarch64: Use RTL builtin for vmull[_high]_p8 intrinsics
@ 2021-04-28 13:33 Jonathan Wright
2021-04-28 14:11 ` Richard Sandiford
0 siblings, 1 reply; 4+ messages in thread
From: Jonathan Wright @ 2021-04-28 13:33 UTC (permalink / raw)
To: gcc-patches
[-- Attachment #1: Type: text/plain, Size: 746 bytes --]
Hi,
As subject, this patch rewrites the vmull[_high]_p8 Neon intrinsics to use RTL
builtins rather than inline assembly code, allowing for better scheduling and
optimization.
Regression tested and bootstrapped on aarch64-none-linux-gnu and
aarch64_be-none-elf - no issues.
Ok for master?
Thanks,
Jonathan
----
gcc/ChangeLog:
2021-02-05 Jonathan Wright <joanthan.wright@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Add pmull[2]
builtin generator macros.
* config/aarch64/aarch64-simd.md (aarch64_pmullv8qi): Define.
(aarch64_pmull_hiv16qi_insn): Define.
(aarch64_pmull_hiv16qi): Define.
* config/aarch64/arm_neon.h (vmull_high_p8): Use RTL builtin
instead of inline asm.
(vmull_p8): Likewise.
[-- Attachment #2: rb14128.patch --]
[-- Type: application/octet-stream, Size: 3483 bytes --]
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 29a7bbc24a7370fc077ab6c66f3de551f6926b7e..aa4090848baef984124e63497cd4138da113b394 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -46,6 +46,8 @@
BUILTIN_VDC (COMBINE, combine, 0, AUTO_FP)
VAR1 (COMBINEP, combine, 0, NONE, di)
BUILTIN_VB (BINOP, pmul, 0, NONE)
+ VAR1 (BINOP, pmull, 0, NONE, v8qi)
+ VAR1 (BINOP, pmull_hi, 0, NONE, v16qi)
BUILTIN_VHSDF_HSDF (BINOP, fmulx, 0, FP)
BUILTIN_VHSDF_DF (UNOP, sqrt, 2, FP)
BUILTIN_VD_BHSI (BINOP, addp, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 60eeddcc946daac87869e11cf138d837c7f0ea6f..86cf656e1b5601cb3dc1a7b0558277ed98a17749 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -4401,6 +4401,44 @@
[(set_attr "type" "neon_mul_<Vetype><q>")]
)
+(define_insn "aarch64_pmullv8qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=w")
+ (unspec:V8HI [(match_operand:V8QI 1 "register_operand" "w")
+ (match_operand:V8QI 2 "register_operand" "w")]
+ UNSPEC_PMULL))]
+ "TARGET_SIMD"
+ "pmull\\t%0.8h, %1.8b, %2.8b"
+ [(set_attr "type" "neon_mul_b_long")]
+)
+
+(define_insn "aarch64_pmull_hiv16qi_insn"
+ [(set (match_operand:V8HI 0 "register_operand" "=w")
+ (unspec:V8HI
+ [(vec_select:V8QI
+ (match_operand:V16QI 1 "register_operand" "w")
+ (match_operand:V16QI 3 "vect_par_cnst_hi_half" ""))
+ (vec_select:V8QI
+ (match_operand:V16QI 2 "register_operand" "w")
+ (match_dup 3))]
+ UNSPEC_PMULL2))]
+ "TARGET_SIMD"
+ "pmull2\\t%0.8h, %1.16b, %2.16b"
+ [(set_attr "type" "neon_mul_b_long")]
+)
+
+(define_expand "aarch64_pmull_hiv16qi"
+ [(match_operand:V8HI 0 "register_operand")
+ (match_operand:V16QI 1 "register_operand")
+ (match_operand:V16QI 2 "register_operand")]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (V16QImode, 16, true);
+ emit_insn (gen_aarch64_pmull_hiv16qi_insn (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+ }
+)
+
;; fmulx.
(define_insn "aarch64_fmulx<mode>"
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 2d776ef7ef4ed7fad166dd00c4b4eb8bcaf75fc8..9d59e621e4029e3f57ef85649ab452949f15a52c 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8256,12 +8256,8 @@ __extension__ extern __inline poly16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_p8 (poly8x16_t __a, poly8x16_t __b)
{
- poly16x8_t __result;
- __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (poly16x8_t) __builtin_aarch64_pmull_hiv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
}
__extension__ extern __inline int16x8_t
@@ -8394,12 +8390,8 @@ __extension__ extern __inline poly16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_p8 (poly8x8_t __a, poly8x8_t __b)
{
- poly16x8_t __result;
- __asm__ ("pmull %0.8h, %1.8b, %2.8b"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (poly16x8_t) __builtin_aarch64_pmullv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
}
__extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 1/20] aarch64: Use RTL builtin for vmull[_high]_p8 intrinsics
2021-04-28 13:33 [PATCH 1/20] aarch64: Use RTL builtin for vmull[_high]_p8 intrinsics Jonathan Wright
@ 2021-04-28 14:11 ` Richard Sandiford
2021-04-30 14:31 ` Jonathan Wright
0 siblings, 1 reply; 4+ messages in thread
From: Richard Sandiford @ 2021-04-28 14:11 UTC (permalink / raw)
To: Jonathan Wright via Gcc-patches
Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the vmull[_high]_p8 Neon intrinsics to use RTL
> builtins rather than inline assembly code, allowing for better scheduling and
> optimization.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu and
> aarch64_be-none-elf - no issues.
Thanks for doing this. Mostly LGTM, but one comment about the patterns:
> […]
> +(define_insn "aarch64_pmull_hiv16qi_insn"
> + [(set (match_operand:V8HI 0 "register_operand" "=w")
> + (unspec:V8HI
> + [(vec_select:V8QI
> + (match_operand:V16QI 1 "register_operand" "w")
> + (match_operand:V16QI 3 "vect_par_cnst_hi_half" ""))
> + (vec_select:V8QI
> + (match_operand:V16QI 2 "register_operand" "w")
> + (match_dup 3))]
> + UNSPEC_PMULL2))]
> + "TARGET_SIMD"
> + "pmull2\\t%0.8h, %1.16b, %2.16b"
> + [(set_attr "type" "neon_mul_b_long")]
> +)
As things stands, UNSPEC_PMULL2 has the vec_select “built in”:
(define_insn "aarch64_crypto_pmullv2di"
[(set (match_operand:TI 0 "register_operand" "=w")
(unspec:TI [(match_operand:V2DI 1 "register_operand" "w")
(match_operand:V2DI 2 "register_operand" "w")]
UNSPEC_PMULL2))]
"TARGET_SIMD && TARGET_AES"
"pmull2\\t%0.1q, %1.2d, %2.2d"
[(set_attr "type" "crypto_pmull")]
)
So I think it would be more consistent to do one of the following:
(1) Keep the vec_selects in the new pattern, but use UNSPEC_PMULL
for the operation instead of UNSPEC_PMULL2.
(2) Remove the vec_selects and keep the UNSPEC_PMULL2.
(1) in principle allows more combination opportunities than (2),
although I don't know how likely it is to help in practice.
Thanks,
Richard
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 1/20] aarch64: Use RTL builtin for vmull[_high]_p8 intrinsics
2021-04-28 14:11 ` Richard Sandiford
@ 2021-04-30 14:31 ` Jonathan Wright
2021-04-30 15:38 ` Richard Sandiford
0 siblings, 1 reply; 4+ messages in thread
From: Jonathan Wright @ 2021-04-30 14:31 UTC (permalink / raw)
To: Richard Sandiford, Jonathan Wright via Gcc-patches
[-- Attachment #1: Type: text/plain, Size: 2298 bytes --]
Thanks for the review, I've updated the patch as per option 1.
Tested and bootstrapped on aarch64-none-linux-gnu with no issues.
Ok for master?
Thanks,
Jonathan
________________________________
From: Richard Sandiford <richard.sandiford@arm.com>
Sent: 28 April 2021 15:11
To: Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
Cc: Jonathan Wright <Jonathan.Wright@arm.com>
Subject: Re: [PATCH 1/20] aarch64: Use RTL builtin for vmull[_high]_p8 intrinsics
Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the vmull[_high]_p8 Neon intrinsics to use RTL
> builtins rather than inline assembly code, allowing for better scheduling and
> optimization.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu and
> aarch64_be-none-elf - no issues.
Thanks for doing this. Mostly LGTM, but one comment about the patterns:
> […]
> +(define_insn "aarch64_pmull_hiv16qi_insn"
> + [(set (match_operand:V8HI 0 "register_operand" "=w")
> + (unspec:V8HI
> + [(vec_select:V8QI
> + (match_operand:V16QI 1 "register_operand" "w")
> + (match_operand:V16QI 3 "vect_par_cnst_hi_half" ""))
> + (vec_select:V8QI
> + (match_operand:V16QI 2 "register_operand" "w")
> + (match_dup 3))]
> + UNSPEC_PMULL2))]
> + "TARGET_SIMD"
> + "pmull2\\t%0.8h, %1.16b, %2.16b"
> + [(set_attr "type" "neon_mul_b_long")]
> +)
As things stands, UNSPEC_PMULL2 has the vec_select “built in”:
(define_insn "aarch64_crypto_pmullv2di"
[(set (match_operand:TI 0 "register_operand" "=w")
(unspec:TI [(match_operand:V2DI 1 "register_operand" "w")
(match_operand:V2DI 2 "register_operand" "w")]
UNSPEC_PMULL2))]
"TARGET_SIMD && TARGET_AES"
"pmull2\\t%0.1q, %1.2d, %2.2d"
[(set_attr "type" "crypto_pmull")]
)
So I think it would be more consistent to do one of the following:
(1) Keep the vec_selects in the new pattern, but use UNSPEC_PMULL
for the operation instead of UNSPEC_PMULL2.
(2) Remove the vec_selects and keep the UNSPEC_PMULL2.
(1) in principle allows more combination opportunities than (2),
although I don't know how likely it is to help in practice.
Thanks,
Richard
[-- Attachment #2: rb14128.patch --]
[-- Type: application/octet-stream, Size: 3480 bytes --]
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 337ec8d1f108b1a9f8e23ff85fb9a14dea0840c2..5d4c01f32e7e911cc53afb2fa5f0580039f77300 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -46,6 +46,8 @@
BUILTIN_VDC (COMBINE, combine, 0, AUTO_FP)
VAR1 (COMBINEP, combine, 0, NONE, di)
BUILTIN_VB (BINOP, pmul, 0, NONE)
+ VAR1 (BINOP, pmull, 0, NONE, v8qi)
+ VAR1 (BINOP, pmull_hi, 0, NONE, v16qi)
BUILTIN_VHSDF_HSDF (BINOP, fmulx, 0, FP)
BUILTIN_VHSDF_DF (UNOP, sqrt, 2, FP)
BUILTIN_VDQ_I (BINOP, addp, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index fbfed334e97db07157878a6fb06b01faa5c03937..65e63900e075722ebd93e433f3cc1fb449e02c7d 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -4471,6 +4471,44 @@
[(set_attr "type" "neon_mul_<Vetype><q>")]
)
+(define_insn "aarch64_pmullv8qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=w")
+ (unspec:V8HI [(match_operand:V8QI 1 "register_operand" "w")
+ (match_operand:V8QI 2 "register_operand" "w")]
+ UNSPEC_PMULL))]
+ "TARGET_SIMD"
+ "pmull\\t%0.8h, %1.8b, %2.8b"
+ [(set_attr "type" "neon_mul_b_long")]
+)
+
+(define_insn "aarch64_pmull_hiv16qi_insn"
+ [(set (match_operand:V8HI 0 "register_operand" "=w")
+ (unspec:V8HI
+ [(vec_select:V8QI
+ (match_operand:V16QI 1 "register_operand" "w")
+ (match_operand:V16QI 3 "vect_par_cnst_hi_half" ""))
+ (vec_select:V8QI
+ (match_operand:V16QI 2 "register_operand" "w")
+ (match_dup 3))]
+ UNSPEC_PMULL))]
+ "TARGET_SIMD"
+ "pmull2\\t%0.8h, %1.16b, %2.16b"
+ [(set_attr "type" "neon_mul_b_long")]
+)
+
+(define_expand "aarch64_pmull_hiv16qi"
+ [(match_operand:V8HI 0 "register_operand")
+ (match_operand:V16QI 1 "register_operand")
+ (match_operand:V16QI 2 "register_operand")]
+ "TARGET_SIMD"
+ {
+ rtx p = aarch64_simd_vect_par_cnst_half (V16QImode, 16, true);
+ emit_insn (gen_aarch64_pmull_hiv16qi_insn (operands[0], operands[1],
+ operands[2], p));
+ DONE;
+ }
+)
+
;; fmulx.
(define_insn "aarch64_fmulx<mode>"
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 4b8ec529f19826f28800741014de0c2ccff44e52..bde2d17fbd92f9d2a0ae2f47f2c92c622c365642 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8228,12 +8228,8 @@ __extension__ extern __inline poly16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_p8 (poly8x16_t __a, poly8x16_t __b)
{
- poly16x8_t __result;
- __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (poly16x8_t) __builtin_aarch64_pmull_hiv16qi ((int8x16_t) __a,
+ (int8x16_t) __b);
}
__extension__ extern __inline int16x8_t
@@ -8366,12 +8362,8 @@ __extension__ extern __inline poly16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmull_p8 (poly8x8_t __a, poly8x8_t __b)
{
- poly16x8_t __result;
- __asm__ ("pmull %0.8h, %1.8b, %2.8b"
- : "=w"(__result)
- : "w"(__a), "w"(__b)
- : /* No clobbers */);
- return __result;
+ return (poly16x8_t) __builtin_aarch64_pmullv8qi ((int8x8_t) __a,
+ (int8x8_t) __b);
}
__extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 1/20] aarch64: Use RTL builtin for vmull[_high]_p8 intrinsics
2021-04-30 14:31 ` Jonathan Wright
@ 2021-04-30 15:38 ` Richard Sandiford
0 siblings, 0 replies; 4+ messages in thread
From: Richard Sandiford @ 2021-04-30 15:38 UTC (permalink / raw)
To: Jonathan Wright; +Cc: Jonathan Wright via Gcc-patches
Jonathan Wright <Jonathan.Wright@arm.com> writes:
> Thanks for the review, I've updated the patch as per option 1.
>
> Tested and bootstrapped on aarch64-none-linux-gnu with no issues.
>
> Ok for master?
OK, thanks,
Richard
> Thanks,
> Jonathan
> -------------------------------------------------------------------------------
> From: Richard Sandiford <richard.sandiford@arm.com>
> Sent: 28 April 2021 15:11
> To: Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
> Cc: Jonathan Wright <Jonathan.Wright@arm.com>
> Subject: Re: [PATCH 1/20] aarch64: Use RTL builtin for vmull[_high]_p8
> intrinsics
>
> Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
>> Hi,
>>
>> As subject, this patch rewrites the vmull[_high]_p8 Neon intrinsics to use
> RTL
>> builtins rather than inline assembly code, allowing for better scheduling and
>> optimization.
>>
>> Regression tested and bootstrapped on aarch64-none-linux-gnu and
>> aarch64_be-none-elf - no issues.
>
> Thanks for doing this. Mostly LGTM, but one comment about the patterns:
>
>> […]
>> +(define_insn "aarch64_pmull_hiv16qi_insn"
>> + [(set (match_operand:V8HI 0 "register_operand" "=w")
>> + (unspec:V8HI
>> + [(vec_select:V8QI
>> + (match_operand:V16QI 1 "register_operand" "w")
>> + (match_operand:V16QI 3 "vect_par_cnst_hi_half" ""))
>> + (vec_select:V8QI
>> + (match_operand:V16QI 2 "register_operand" "w")
>> + (match_dup 3))]
>> + UNSPEC_PMULL2))]
>> + "TARGET_SIMD"
>> + "pmull2\\t%0.8h, %1.16b, %2.16b"
>> + [(set_attr "type" "neon_mul_b_long")]
>> +)
>
> As things stands, UNSPEC_PMULL2 has the vec_select “built in”:
>
> (define_insn "aarch64_crypto_pmullv2di"
> [(set (match_operand:TI 0 "register_operand" "=w")
> (unspec:TI [(match_operand:V2DI 1 "register_operand" "w")
> (match_operand:V2DI 2 "register_operand" "w")]
> UNSPEC_PMULL2))]
> "TARGET_SIMD && TARGET_AES"
> "pmull2\\t%0.1q, %1.2d, %2.2d"
> [(set_attr "type" "crypto_pmull")]
> )
>
> So I think it would be more consistent to do one of the following:
>
> (1) Keep the vec_selects in the new pattern, but use UNSPEC_PMULL
> for the operation instead of UNSPEC_PMULL2.
> (2) Remove the vec_selects and keep the UNSPEC_PMULL2.
>
> (1) in principle allows more combination opportunities than (2),
> although I don't know how likely it is to help in practice.
>
> Thanks,
> Richard
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 337ec8d1f108b1a9f8e23ff85fb9a14dea0840c2..5d4c01f32e7e911cc53afb2fa5f0580039f77300 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -46,6 +46,8 @@
> BUILTIN_VDC (COMBINE, combine, 0, AUTO_FP)
> VAR1 (COMBINEP, combine, 0, NONE, di)
> BUILTIN_VB (BINOP, pmul, 0, NONE)
> + VAR1 (BINOP, pmull, 0, NONE, v8qi)
> + VAR1 (BINOP, pmull_hi, 0, NONE, v16qi)
> BUILTIN_VHSDF_HSDF (BINOP, fmulx, 0, FP)
> BUILTIN_VHSDF_DF (UNOP, sqrt, 2, FP)
> BUILTIN_VDQ_I (BINOP, addp, 0, NONE)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index fbfed334e97db07157878a6fb06b01faa5c03937..65e63900e075722ebd93e433f3cc1fb449e02c7d 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -4471,6 +4471,44 @@
> [(set_attr "type" "neon_mul_<Vetype><q>")]
> )
>
> +(define_insn "aarch64_pmullv8qi"
> + [(set (match_operand:V8HI 0 "register_operand" "=w")
> + (unspec:V8HI [(match_operand:V8QI 1 "register_operand" "w")
> + (match_operand:V8QI 2 "register_operand" "w")]
> + UNSPEC_PMULL))]
> + "TARGET_SIMD"
> + "pmull\\t%0.8h, %1.8b, %2.8b"
> + [(set_attr "type" "neon_mul_b_long")]
> +)
> +
> +(define_insn "aarch64_pmull_hiv16qi_insn"
> + [(set (match_operand:V8HI 0 "register_operand" "=w")
> + (unspec:V8HI
> + [(vec_select:V8QI
> + (match_operand:V16QI 1 "register_operand" "w")
> + (match_operand:V16QI 3 "vect_par_cnst_hi_half" ""))
> + (vec_select:V8QI
> + (match_operand:V16QI 2 "register_operand" "w")
> + (match_dup 3))]
> + UNSPEC_PMULL))]
> + "TARGET_SIMD"
> + "pmull2\\t%0.8h, %1.16b, %2.16b"
> + [(set_attr "type" "neon_mul_b_long")]
> +)
> +
> +(define_expand "aarch64_pmull_hiv16qi"
> + [(match_operand:V8HI 0 "register_operand")
> + (match_operand:V16QI 1 "register_operand")
> + (match_operand:V16QI 2 "register_operand")]
> + "TARGET_SIMD"
> + {
> + rtx p = aarch64_simd_vect_par_cnst_half (V16QImode, 16, true);
> + emit_insn (gen_aarch64_pmull_hiv16qi_insn (operands[0], operands[1],
> + operands[2], p));
> + DONE;
> + }
> +)
> +
> ;; fmulx.
>
> (define_insn "aarch64_fmulx<mode>"
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 4b8ec529f19826f28800741014de0c2ccff44e52..bde2d17fbd92f9d2a0ae2f47f2c92c622c365642 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -8228,12 +8228,8 @@ __extension__ extern __inline poly16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmull_high_p8 (poly8x16_t __a, poly8x16_t __b)
> {
> - poly16x8_t __result;
> - __asm__ ("pmull2 %0.8h,%1.16b,%2.16b"
> - : "=w"(__result)
> - : "w"(__a), "w"(__b)
> - : /* No clobbers */);
> - return __result;
> + return (poly16x8_t) __builtin_aarch64_pmull_hiv16qi ((int8x16_t) __a,
> + (int8x16_t) __b);
> }
>
> __extension__ extern __inline int16x8_t
> @@ -8366,12 +8362,8 @@ __extension__ extern __inline poly16x8_t
> __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
> vmull_p8 (poly8x8_t __a, poly8x8_t __b)
> {
> - poly16x8_t __result;
> - __asm__ ("pmull %0.8h, %1.8b, %2.8b"
> - : "=w"(__result)
> - : "w"(__a), "w"(__b)
> - : /* No clobbers */);
> - return __result;
> + return (poly16x8_t) __builtin_aarch64_pmullv8qi ((int8x8_t) __a,
> + (int8x8_t) __b);
> }
>
> __extension__ extern __inline int16x8_t
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2021-04-30 15:38 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-28 13:33 [PATCH 1/20] aarch64: Use RTL builtin for vmull[_high]_p8 intrinsics Jonathan Wright
2021-04-28 14:11 ` Richard Sandiford
2021-04-30 14:31 ` Jonathan Wright
2021-04-30 15:38 ` Richard Sandiford
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).