public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH 4/20] aarch64: Use RTL builtins for [su]paddl[q] intrinsics
@ 2021-04-28 13:51 Jonathan Wright
  2021-04-28 14:30 ` Richard Sandiford
  0 siblings, 1 reply; 2+ messages in thread
From: Jonathan Wright @ 2021-04-28 13:51 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 1002 bytes --]

Hi,

As subject, this patch rewrites the [su]paddl[q] Neon intrinsics to use
RTL builtins rather than inline assembly code, allowing for better
scheduling and optimization.

Regression tested and bootstrapped on aarch64-none-linux-gnu - no
issues.

Ok for master?

Thanks,
Jonathan

---

gcc/ChangeLog:

2021-02-08  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add [su]addlp
	builtin generator macros.
	* config/aarch64/aarch64-simd.md (aarch64_<su>addlp<mode>):
	Define.
	* config/aarch64/arm_neon.h (vpaddl_s8): Use RTL builtin
	instead of inline asm.
	(vpaddl_s16): Likewise.
	(vpaddl_s32): Likewise.
	(vpaddl_u8): Likewise.
	(vpaddl_u16): Likewise.
	(vpaddl_u32): Likewise.
	(vpaddlq_s8): Likewise.
	(vpaddlq_s16): Likewise.
	(vpaddlq_s32): Likewise.
	(vpaddlq_u8): Likewise.
	(vpaddlq_u16): Likewise.
	(vpaddlq_u32): Liwewise.
	* config/aarch64/iterators.md: Define [SU]ADDLP unspecs with
	appropriate attributes.

[-- Attachment #2: rb14137.patch --]
[-- Type: application/octet-stream, Size: 7358 bytes --]

diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index dc7b5d22b654d5bcca0152907c1c6967755e9548..3430f627d09a99470dd9480c517be8a41c96ddf1 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -156,6 +156,10 @@
   BUILTIN_VDQ_BHSI (BINOP, srhadd, 0, NONE)
   BUILTIN_VDQ_BHSI (BINOP, urhadd, 0, NONE)
 
+  /* Implemented by aarch64_<su>addlp<mode>.  */
+  BUILTIN_VDQV_L (UNOP, saddlp, 0, NONE)
+  BUILTIN_VDQV_L (UNOPU, uaddlp, 0, NONE)
+
   /* Implemented by aarch64_<su>addlv<mode>.  */
   BUILTIN_VDQV_L (UNOP, saddlv, 0, NONE)
   BUILTIN_VDQV_L (UNOPU, uaddlv, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 6fc472c19493d6d10fb1c5d0686e519d53973692..dd97253f4f393ca37e608f391949a568729d452c 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -3164,6 +3164,15 @@
   [(set_attr "type" "neon_reduc_add<q>")]
 )
 
+(define_insn "aarch64_<su>addlp<mode>"
+ [(set (match_operand:<VDBLW> 0 "register_operand" "=w")
+       (unspec:<VDBLW> [(match_operand:VDQV_L 1 "register_operand" "w")]
+		    USADDLP))]
+ "TARGET_SIMD"
+ "<su>addlp\\t%0.<Vwhalf>, %1.<Vtype>"
+  [(set_attr "type" "neon_reduc_add<q>")]
+)
+
 ;; ADDV with result zero-extended to SI/DImode (for popcount).
 (define_insn "aarch64_zero_extend<GPI:mode>_reduc_plus_<VDQV_E:mode>"
  [(set (match_operand:GPI 0 "register_operand" "=w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index b8de77bcc02dfddf73980442919ec1990e28ee72..703070cd257e46bd041a64d49f1d64da321285ff 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8521,144 +8521,84 @@ __extension__ extern __inline int16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddl_s8 (int8x8_t __a)
 {
-  int16x4_t __result;
-  __asm__ ("saddlp %0.4h,%1.8b"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_saddlpv8qi (__a);
 }
 
 __extension__ extern __inline int32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddl_s16 (int16x4_t __a)
 {
-  int32x2_t __result;
-  __asm__ ("saddlp %0.2s,%1.4h"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_saddlpv4hi (__a);
 }
 
 __extension__ extern __inline int64x1_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddl_s32 (int32x2_t __a)
 {
-  int64x1_t __result;
-  __asm__ ("saddlp %0.1d,%1.2s"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return (int64x1_t) __builtin_aarch64_saddlpv2si (__a);
 }
 
 __extension__ extern __inline uint16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddl_u8 (uint8x8_t __a)
 {
-  uint16x4_t __result;
-  __asm__ ("uaddlp %0.4h,%1.8b"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_uaddlpv8qi_uu (__a);
 }
 
 __extension__ extern __inline uint32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddl_u16 (uint16x4_t __a)
 {
-  uint32x2_t __result;
-  __asm__ ("uaddlp %0.2s,%1.4h"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_uaddlpv4hi_uu (__a);
 }
 
 __extension__ extern __inline uint64x1_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddl_u32 (uint32x2_t __a)
 {
-  uint64x1_t __result;
-  __asm__ ("uaddlp %0.1d,%1.2s"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return (uint64x1_t) __builtin_aarch64_uaddlpv2si_uu (__a);
 }
 
 __extension__ extern __inline int16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddlq_s8 (int8x16_t __a)
 {
-  int16x8_t __result;
-  __asm__ ("saddlp %0.8h,%1.16b"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_saddlpv16qi (__a);
 }
 
 __extension__ extern __inline int32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddlq_s16 (int16x8_t __a)
 {
-  int32x4_t __result;
-  __asm__ ("saddlp %0.4s,%1.8h"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_saddlpv8hi (__a);
 }
 
 __extension__ extern __inline int64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddlq_s32 (int32x4_t __a)
 {
-  int64x2_t __result;
-  __asm__ ("saddlp %0.2d,%1.4s"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_saddlpv4si (__a);
 }
 
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddlq_u8 (uint8x16_t __a)
 {
-  uint16x8_t __result;
-  __asm__ ("uaddlp %0.8h,%1.16b"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_uaddlpv16qi_uu (__a);
 }
 
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddlq_u16 (uint16x8_t __a)
 {
-  uint32x4_t __result;
-  __asm__ ("uaddlp %0.4s,%1.8h"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_uaddlpv8hi_uu (__a);
 }
 
 __extension__ extern __inline uint64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vpaddlq_u32 (uint32x4_t __a)
 {
-  uint64x2_t __result;
-  __asm__ ("uaddlp %0.2d,%1.4s"
-           : "=w"(__result)
-           : "w"(__a)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_uaddlpv4si_uu (__a);
 }
 
 __extension__ extern __inline int8x16_t
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index fb6e228651eae6a2db8c1ac755885ae7ad9225d6..20ef49f011510349da5e8c6330a32cc4c5c92a4c 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -553,6 +553,8 @@
     UNSPEC_SSHLL	; Used in aarch64-simd.md.
     UNSPEC_USHLL	; Used in aarch64-simd.md.
     UNSPEC_ADDP		; Used in aarch64-simd.md.
+    UNSPEC_SADDLP	; Used in aarch64-simd.md.
+    UNSPEC_UADDLP	; Used in aarch64-simd.md.
     UNSPEC_TBL		; Used in vector permute patterns.
     UNSPEC_TBX		; Used in vector permute patterns.
     UNSPEC_CONCAT	; Used in vector permute patterns.
@@ -2210,6 +2212,8 @@
 
 (define_int_iterator SVE_INT_ADDV [UNSPEC_SADDV UNSPEC_UADDV])
 
+(define_int_iterator USADDLP [UNSPEC_SADDLP UNSPEC_UADDLP])
+
 (define_int_iterator USADDLV [UNSPEC_SADDLV UNSPEC_UADDLV])
 
 (define_int_iterator LOGICALF [UNSPEC_ANDF UNSPEC_IORF UNSPEC_XORF])
@@ -2962,6 +2966,8 @@
 ;; "s" for signed operations and "u" for unsigned ones.
 (define_int_attr su [(UNSPEC_SADDV "s")
 		     (UNSPEC_UADDV "u")
+		     (UNSPEC_SADDLP "s")
+		     (UNSPEC_UADDLP "u")
 		     (UNSPEC_SADDLV "s")
 		     (UNSPEC_UADDLV "u")
 		     (UNSPEC_UNPACKSHI "s")

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH 4/20] aarch64: Use RTL builtins for [su]paddl[q] intrinsics
  2021-04-28 13:51 [PATCH 4/20] aarch64: Use RTL builtins for [su]paddl[q] intrinsics Jonathan Wright
@ 2021-04-28 14:30 ` Richard Sandiford
  0 siblings, 0 replies; 2+ messages in thread
From: Richard Sandiford @ 2021-04-28 14:30 UTC (permalink / raw)
  To: Jonathan Wright via Gcc-patches

Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the [su]paddl[q] Neon intrinsics to use
> RTL builtins rather than inline assembly code, allowing for better
> scheduling and optimization.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?

OK, thanks.  For the record…

>  __extension__ extern __inline uint64x1_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vpaddl_u32 (uint32x2_t __a)
>  {
> -  uint64x1_t __result;
> -  __asm__ ("uaddlp %0.1d,%1.2s"
> -           : "=w"(__result)
> -           : "w"(__a)
> -           : /* No clobbers */);
> -  return __result;
> +  return (uint64x1_t) __builtin_aarch64_uaddlpv2si_uu (__a);
>  }

…I wasn't sure for this whether it would be better to use (uint64x1_t) {…}
instead of a scalar-to-vector conversion, since that seems to be the more
common style in the rest of arm_neon.h.  But there are already instances
of this kind of conversion too, and if anything it should be more
efficient than creating a distinct vector object.

Richard

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-04-28 14:30 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-28 13:51 [PATCH 4/20] aarch64: Use RTL builtins for [su]paddl[q] intrinsics Jonathan Wright
2021-04-28 14:30 ` Richard Sandiford

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).