public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH 9/20] aarch64: Use RTL builtins for v[q]tbx intrinsics
@ 2021-04-28 14:24 Jonathan Wright
  2021-04-28 15:02 ` Richard Sandiford
  0 siblings, 1 reply; 2+ messages in thread
From: Jonathan Wright @ 2021-04-28 14:24 UTC (permalink / raw)
  To: gcc-patches

[-- Attachment #1: Type: text/plain, Size: 809 bytes --]

Hi,

As subject, this patch rewrites the v[q]tbx Neon intrinsics to use RTL
builtins rather than inline assembly code, allowing for better scheduling
and optimization.

Regression tested and bootstrapped on aarch64-none-linux-gnu - no
issues.

Ok for master?

Thanks,
Jonathan

---

gcc/ChangeLog:

2021-02-12  Jonathan Wright  <jonathan.wright@arm.com>

	* config/aarch64/aarch64-simd-builtins.def: Add tbx1 builtin
	generator macros.
	* config/aarch64/aarch64-simd.md (aarch64_tbx1<mode>):
	Define.
	* config/aarch64/arm_neon.h (vqtbx1_s8): USE RTL builtin
	instead of inline asm.
	(vqtbx1_u8): Likewise.
	(vqtbx1_p8): Likewise.
	(vqtbx1q_s8): Likewise.
	(vqtbx1q_u8): Likewise.
	(vqtbx1q_p8): Likewise.
	(vtbx2_s8): Likewise.
	(vtbx2_u8): Likewise.
	(vtbx2_p8): Likewise.

[-- Attachment #2: rb14188.patch --]
[-- Type: application/octet-stream, Size: 6075 bytes --]

diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index a5cfb6754456a1e8f0fca57c68b009a53e09789e..0f44ed84ff9d08d808b1b2dfe528db5208b134f5 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -710,6 +710,10 @@
   VAR1 (BINOP, qtbl4, 0, NONE, v8qi)
   VAR1 (BINOP, qtbl4, 0, NONE, v16qi)
 
+  /* Implemented by aarch64_tbx1<mode>.  */
+  VAR2 (TERNOP, tbx1, 0, NONE, v8qi, v16qi)
+  VAR2 (TERNOPU, tbx1, 0, NONE, v8qi, v16qi)
+
   /* Implemented by aarch64_tbx4<mode>.  */
   VAR1 (TERNOP, tbx4, 0, NONE, v8qi)
   VAR1 (TERNOP, tbx4, 0, NONE, v16qi)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 939c281d533261bb84dc451586da707953018fb8..5f701dd2775290156634ef8c6feccecd359e9ec9 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -6905,6 +6905,17 @@
   [(set_attr "type" "neon_tbl1<q>")]
 )
 
+(define_insn "aarch64_tbx1<mode>"
+  [(set (match_operand:VB 0 "register_operand" "=w")
+	(unspec:VB [(match_operand:VB 1 "register_operand" "0")
+		    (match_operand:V16QI 2 "register_operand" "w")
+		    (match_operand:VB 3 "register_operand" "w")]
+		   UNSPEC_TBX))]
+  "TARGET_SIMD"
+  "tbx\\t%0.<Vtype>, {%2.16b}, %3.<Vtype>"
+  [(set_attr "type" "neon_tbl1<q>")]
+)
+
 ;; Two source registers.
 
 (define_insn "aarch64_tbl2v16qi"
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 46f919fb254b98f887db4748d3b410b7d18e8a4e..1c48c166b5b9aaf052761f95121c26845221dae9 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -9617,72 +9617,46 @@ __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqtbx1_s8 (int8x8_t __r, int8x16_t __tab, uint8x8_t __idx)
 {
-  int8x8_t __result = __r;
-  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
-           : "+w"(__result)
-           : "w"(__tab), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_tbx1v8qi (__r, __tab, (int8x8_t) __idx);
 }
 
 __extension__ extern __inline uint8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqtbx1_u8 (uint8x8_t __r, uint8x16_t __tab, uint8x8_t __idx)
 {
-  uint8x8_t __result = __r;
-  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
-           : "+w"(__result)
-           : "w"(__tab), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_tbx1v8qi_uuuu (__r, __tab, __idx);
 }
 
 __extension__ extern __inline poly8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqtbx1_p8 (poly8x8_t __r, poly8x16_t __tab, uint8x8_t __idx)
 {
-  poly8x8_t __result = __r;
-  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
-           : "+w"(__result)
-           : "w"(__tab), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r,
+						 (int8x16_t) __tab,
+						 (int8x8_t) __idx);
 }
 
 __extension__ extern __inline int8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqtbx1q_s8 (int8x16_t __r, int8x16_t __tab, uint8x16_t __idx)
 {
-  int8x16_t __result = __r;
-  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
-           : "+w"(__result)
-           : "w"(__tab), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_tbx1v16qi (__r, __tab, (int8x16_t) __idx);
 }
 
 __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqtbx1q_u8 (uint8x16_t __r, uint8x16_t __tab, uint8x16_t __idx)
 {
-  uint8x16_t __result = __r;
-  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
-           : "+w"(__result)
-           : "w"(__tab), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_tbx1v16qi_uuuu (__r, __tab, __idx);
 }
 
 __extension__ extern __inline poly8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vqtbx1q_p8 (poly8x16_t __r, poly8x16_t __tab, uint8x16_t __idx)
 {
-  poly8x16_t __result = __r;
-  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
-           : "+w"(__result)
-           : "w"(__tab), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return (poly8x16_t) __builtin_aarch64_tbx1v16qi ((int8x16_t) __r,
+						   (int8x16_t) __tab,
+						   (int8x16_t) __idx);
 }
 
 /* V7 legacy table intrinsics.  */
@@ -9846,39 +9820,26 @@ __extension__ extern __inline int8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vtbx2_s8 (int8x8_t __r, int8x8x2_t __tab, int8x8_t __idx)
 {
-  int8x8_t __result = __r;
   int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]);
-  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
-           : "+w"(__result)
-           : "w"(__temp), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_tbx1v8qi (__r, __temp, __idx);
 }
 
 __extension__ extern __inline uint8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vtbx2_u8 (uint8x8_t __r, uint8x8x2_t __tab, uint8x8_t __idx)
 {
-  uint8x8_t __result = __r;
   uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]);
-  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
-           : "+w"(__result)
-           : "w"(__temp), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return __builtin_aarch64_tbx1v8qi_uuuu (__r, __temp, __idx);
 }
 
 __extension__ extern __inline poly8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vtbx2_p8 (poly8x8_t __r, poly8x8x2_t __tab, uint8x8_t __idx)
 {
-  poly8x8_t __result = __r;
   poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]);
-  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
-           : "+w"(__result)
-           : "w"(__temp), "w"(__idx)
-           : /* No clobbers */);
-  return __result;
+  return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r,
+						 (int8x16_t) __temp,
+						 (int8x8_t) __idx);
 }
 
 /* End of temporary inline asm.  */

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH 9/20] aarch64: Use RTL builtins for v[q]tbx intrinsics
  2021-04-28 14:24 [PATCH 9/20] aarch64: Use RTL builtins for v[q]tbx intrinsics Jonathan Wright
@ 2021-04-28 15:02 ` Richard Sandiford
  0 siblings, 0 replies; 2+ messages in thread
From: Richard Sandiford @ 2021-04-28 15:02 UTC (permalink / raw)
  To: Jonathan Wright via Gcc-patches

Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the v[q]tbx Neon intrinsics to use RTL
> builtins rather than inline assembly code, allowing for better scheduling
> and optimization.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?

OK, thanks.  I see arm also reuses tbl1 for the scheduling type, which
makes sense.  We should only add a separate type if something actually
needs it.

Richard

>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-02-12  Jonathan Wright  <jonathan.wright@arm.com>
>
> 	* config/aarch64/aarch64-simd-builtins.def: Add tbx1 builtin
> 	generator macros.
> 	* config/aarch64/aarch64-simd.md (aarch64_tbx1<mode>):
> 	Define.
> 	* config/aarch64/arm_neon.h (vqtbx1_s8): USE RTL builtin
> 	instead of inline asm.
> 	(vqtbx1_u8): Likewise.
> 	(vqtbx1_p8): Likewise.
> 	(vqtbx1q_s8): Likewise.
> 	(vqtbx1q_u8): Likewise.
> 	(vqtbx1q_p8): Likewise.
> 	(vtbx2_s8): Likewise.
> 	(vtbx2_u8): Likewise.
> 	(vtbx2_p8): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
> index a5cfb6754456a1e8f0fca57c68b009a53e09789e..0f44ed84ff9d08d808b1b2dfe528db5208b134f5 100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -710,6 +710,10 @@
>    VAR1 (BINOP, qtbl4, 0, NONE, v8qi)
>    VAR1 (BINOP, qtbl4, 0, NONE, v16qi)
>  
> +  /* Implemented by aarch64_tbx1<mode>.  */
> +  VAR2 (TERNOP, tbx1, 0, NONE, v8qi, v16qi)
> +  VAR2 (TERNOPU, tbx1, 0, NONE, v8qi, v16qi)
> +
>    /* Implemented by aarch64_tbx4<mode>.  */
>    VAR1 (TERNOP, tbx4, 0, NONE, v8qi)
>    VAR1 (TERNOP, tbx4, 0, NONE, v16qi)
> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index 939c281d533261bb84dc451586da707953018fb8..5f701dd2775290156634ef8c6feccecd359e9ec9 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -6905,6 +6905,17 @@
>    [(set_attr "type" "neon_tbl1<q>")]
>  )
>  
> +(define_insn "aarch64_tbx1<mode>"
> +  [(set (match_operand:VB 0 "register_operand" "=w")
> +	(unspec:VB [(match_operand:VB 1 "register_operand" "0")
> +		    (match_operand:V16QI 2 "register_operand" "w")
> +		    (match_operand:VB 3 "register_operand" "w")]
> +		   UNSPEC_TBX))]
> +  "TARGET_SIMD"
> +  "tbx\\t%0.<Vtype>, {%2.16b}, %3.<Vtype>"
> +  [(set_attr "type" "neon_tbl1<q>")]
> +)
> +
>  ;; Two source registers.
>  
>  (define_insn "aarch64_tbl2v16qi"
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 46f919fb254b98f887db4748d3b410b7d18e8a4e..1c48c166b5b9aaf052761f95121c26845221dae9 100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -9617,72 +9617,46 @@ __extension__ extern __inline int8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vqtbx1_s8 (int8x8_t __r, int8x16_t __tab, uint8x8_t __idx)
>  {
> -  int8x8_t __result = __r;
> -  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
> -           : "+w"(__result)
> -           : "w"(__tab), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbx1v8qi (__r, __tab, (int8x8_t) __idx);
>  }
>  
>  __extension__ extern __inline uint8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vqtbx1_u8 (uint8x8_t __r, uint8x16_t __tab, uint8x8_t __idx)
>  {
> -  uint8x8_t __result = __r;
> -  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
> -           : "+w"(__result)
> -           : "w"(__tab), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbx1v8qi_uuuu (__r, __tab, __idx);
>  }
>  
>  __extension__ extern __inline poly8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vqtbx1_p8 (poly8x8_t __r, poly8x16_t __tab, uint8x8_t __idx)
>  {
> -  poly8x8_t __result = __r;
> -  __asm__ ("tbx %0.8b,{%1.16b},%2.8b"
> -           : "+w"(__result)
> -           : "w"(__tab), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r,
> +						 (int8x16_t) __tab,
> +						 (int8x8_t) __idx);
>  }
>  
>  __extension__ extern __inline int8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vqtbx1q_s8 (int8x16_t __r, int8x16_t __tab, uint8x16_t __idx)
>  {
> -  int8x16_t __result = __r;
> -  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
> -           : "+w"(__result)
> -           : "w"(__tab), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbx1v16qi (__r, __tab, (int8x16_t) __idx);
>  }
>  
>  __extension__ extern __inline uint8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vqtbx1q_u8 (uint8x16_t __r, uint8x16_t __tab, uint8x16_t __idx)
>  {
> -  uint8x16_t __result = __r;
> -  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
> -           : "+w"(__result)
> -           : "w"(__tab), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbx1v16qi_uuuu (__r, __tab, __idx);
>  }
>  
>  __extension__ extern __inline poly8x16_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vqtbx1q_p8 (poly8x16_t __r, poly8x16_t __tab, uint8x16_t __idx)
>  {
> -  poly8x16_t __result = __r;
> -  __asm__ ("tbx %0.16b,{%1.16b},%2.16b"
> -           : "+w"(__result)
> -           : "w"(__tab), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return (poly8x16_t) __builtin_aarch64_tbx1v16qi ((int8x16_t) __r,
> +						   (int8x16_t) __tab,
> +						   (int8x16_t) __idx);
>  }
>  
>  /* V7 legacy table intrinsics.  */
> @@ -9846,39 +9820,26 @@ __extension__ extern __inline int8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbx2_s8 (int8x8_t __r, int8x8x2_t __tab, int8x8_t __idx)
>  {
> -  int8x8_t __result = __r;
>    int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]);
> -  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
> -           : "+w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbx1v8qi (__r, __temp, __idx);
>  }
>  
>  __extension__ extern __inline uint8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbx2_u8 (uint8x8_t __r, uint8x8x2_t __tab, uint8x8_t __idx)
>  {
> -  uint8x8_t __result = __r;
>    uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]);
> -  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
> -           : "+w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_tbx1v8qi_uuuu (__r, __temp, __idx);
>  }
>  
>  __extension__ extern __inline poly8x8_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vtbx2_p8 (poly8x8_t __r, poly8x8x2_t __tab, uint8x8_t __idx)
>  {
> -  poly8x8_t __result = __r;
>    poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]);
> -  __asm__ ("tbx %0.8b, {%1.16b}, %2.8b"
> -           : "+w"(__result)
> -           : "w"(__temp), "w"(__idx)
> -           : /* No clobbers */);
> -  return __result;
> +  return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r,
> +						 (int8x16_t) __temp,
> +						 (int8x8_t) __idx);
>  }
>  
>  /* End of temporary inline asm.  */

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-04-28 15:02 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-28 14:24 [PATCH 9/20] aarch64: Use RTL builtins for v[q]tbx intrinsics Jonathan Wright
2021-04-28 15:02 ` Richard Sandiford

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).