From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by sourceware.org (Postfix) with ESMTP id 5C98A3A7705F for ; Wed, 28 Apr 2021 15:02:51 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.3.2 sourceware.org 5C98A3A7705F Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id DC98B1FB; Wed, 28 Apr 2021 08:02:50 -0700 (PDT) Received: from localhost (e121540-lin.manchester.arm.com [10.32.98.126]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 5CD723F70D; Wed, 28 Apr 2021 08:02:50 -0700 (PDT) From: Richard Sandiford To: Jonathan Wright via Gcc-patches Mail-Followup-To: Jonathan Wright via Gcc-patches , Jonathan Wright , richard.sandiford@arm.com Subject: Re: [PATCH 9/20] aarch64: Use RTL builtins for v[q]tbx intrinsics References: Date: Wed, 28 Apr 2021 16:02:49 +0100 In-Reply-To: (Jonathan Wright via Gcc-patches's message of "Wed, 28 Apr 2021 14:24:44 +0000") Message-ID: User-Agent: Gnus/5.13 (Gnus v5.13) Emacs/26.3 (gnu/linux) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable X-Spam-Status: No, score=-12.5 required=5.0 tests=BAYES_00, GIT_PATCH_0, KAM_DMARC_STATUS, KAM_LOTSOFHASH, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.2 X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on server2.sourceware.org X-BeenThere: gcc-patches@gcc.gnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Gcc-patches mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 28 Apr 2021 15:02:54 -0000 Jonathan Wright via Gcc-patches writes: > Hi, > > As subject, this patch rewrites the v[q]tbx Neon intrinsics to use RTL > builtins rather than inline assembly code, allowing for better scheduling > and optimization. > > Regression tested and bootstrapped on aarch64-none-linux-gnu - no > issues. > > Ok for master? OK, thanks. I see arm also reuses tbl1 for the scheduling type, which makes sense. We should only add a separate type if something actually needs it. Richard > > Thanks, > Jonathan > > --- > > gcc/ChangeLog: > > 2021-02-12 =C2=A0Jonathan Wright =C2=A0 > > * config/aarch64/aarch64-simd-builtins.def: Add tbx1 builtin > generator macros. > * config/aarch64/aarch64-simd.md (aarch64_tbx1): > Define. > * config/aarch64/arm_neon.h (vqtbx1_s8): USE RTL builtin > instead of inline asm. > (vqtbx1_u8): Likewise. > (vqtbx1_p8): Likewise. > (vqtbx1q_s8): Likewise. > (vqtbx1q_u8): Likewise. > (vqtbx1q_p8): Likewise. > (vtbx2_s8): Likewise. > (vtbx2_u8): Likewise. > (vtbx2_p8): Likewise. > > diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aa= rch64/aarch64-simd-builtins.def > index a5cfb6754456a1e8f0fca57c68b009a53e09789e..0f44ed84ff9d08d808b1b2dfe= 528db5208b134f5 100644 > --- a/gcc/config/aarch64/aarch64-simd-builtins.def > +++ b/gcc/config/aarch64/aarch64-simd-builtins.def > @@ -710,6 +710,10 @@ > VAR1 (BINOP, qtbl4, 0, NONE, v8qi) > VAR1 (BINOP, qtbl4, 0, NONE, v16qi) >=20=20 > + /* Implemented by aarch64_tbx1. */ > + VAR2 (TERNOP, tbx1, 0, NONE, v8qi, v16qi) > + VAR2 (TERNOPU, tbx1, 0, NONE, v8qi, v16qi) > + > /* Implemented by aarch64_tbx4. */ > VAR1 (TERNOP, tbx4, 0, NONE, v8qi) > VAR1 (TERNOP, tbx4, 0, NONE, v16qi) > diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarc= h64-simd.md > index 939c281d533261bb84dc451586da707953018fb8..5f701dd2775290156634ef8c6= feccecd359e9ec9 100644 > --- a/gcc/config/aarch64/aarch64-simd.md > +++ b/gcc/config/aarch64/aarch64-simd.md > @@ -6905,6 +6905,17 @@ > [(set_attr "type" "neon_tbl1")] > ) >=20=20 > +(define_insn "aarch64_tbx1" > + [(set (match_operand:VB 0 "register_operand" "=3Dw") > + (unspec:VB [(match_operand:VB 1 "register_operand" "0") > + (match_operand:V16QI 2 "register_operand" "w") > + (match_operand:VB 3 "register_operand" "w")] > + UNSPEC_TBX))] > + "TARGET_SIMD" > + "tbx\\t%0., {%2.16b}, %3." > + [(set_attr "type" "neon_tbl1")] > +) > + > ;; Two source registers. >=20=20 > (define_insn "aarch64_tbl2v16qi" > diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h > index 46f919fb254b98f887db4748d3b410b7d18e8a4e..1c48c166b5b9aaf052761f951= 21c26845221dae9 100644 > --- a/gcc/config/aarch64/arm_neon.h > +++ b/gcc/config/aarch64/arm_neon.h > @@ -9617,72 +9617,46 @@ __extension__ extern __inline int8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqtbx1_s8 (int8x8_t __r, int8x16_t __tab, uint8x8_t __idx) > { > - int8x8_t __result =3D __r; > - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" > - : "+w"(__result) > - : "w"(__tab), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_tbx1v8qi (__r, __tab, (int8x8_t) __idx); > } >=20=20 > __extension__ extern __inline uint8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqtbx1_u8 (uint8x8_t __r, uint8x16_t __tab, uint8x8_t __idx) > { > - uint8x8_t __result =3D __r; > - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" > - : "+w"(__result) > - : "w"(__tab), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_tbx1v8qi_uuuu (__r, __tab, __idx); > } >=20=20 > __extension__ extern __inline poly8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqtbx1_p8 (poly8x8_t __r, poly8x16_t __tab, uint8x8_t __idx) > { > - poly8x8_t __result =3D __r; > - __asm__ ("tbx %0.8b,{%1.16b},%2.8b" > - : "+w"(__result) > - : "w"(__tab), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r, > + (int8x16_t) __tab, > + (int8x8_t) __idx); > } >=20=20 > __extension__ extern __inline int8x16_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqtbx1q_s8 (int8x16_t __r, int8x16_t __tab, uint8x16_t __idx) > { > - int8x16_t __result =3D __r; > - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" > - : "+w"(__result) > - : "w"(__tab), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_tbx1v16qi (__r, __tab, (int8x16_t) __idx); > } >=20=20 > __extension__ extern __inline uint8x16_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqtbx1q_u8 (uint8x16_t __r, uint8x16_t __tab, uint8x16_t __idx) > { > - uint8x16_t __result =3D __r; > - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" > - : "+w"(__result) > - : "w"(__tab), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_tbx1v16qi_uuuu (__r, __tab, __idx); > } >=20=20 > __extension__ extern __inline poly8x16_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqtbx1q_p8 (poly8x16_t __r, poly8x16_t __tab, uint8x16_t __idx) > { > - poly8x16_t __result =3D __r; > - __asm__ ("tbx %0.16b,{%1.16b},%2.16b" > - : "+w"(__result) > - : "w"(__tab), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return (poly8x16_t) __builtin_aarch64_tbx1v16qi ((int8x16_t) __r, > + (int8x16_t) __tab, > + (int8x16_t) __idx); > } >=20=20 > /* V7 legacy table intrinsics. */ > @@ -9846,39 +9820,26 @@ __extension__ extern __inline int8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vtbx2_s8 (int8x8_t __r, int8x8x2_t __tab, int8x8_t __idx) > { > - int8x8_t __result =3D __r; > int8x16_t __temp =3D vcombine_s8 (__tab.val[0], __tab.val[1]); > - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" > - : "+w"(__result) > - : "w"(__temp), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_tbx1v8qi (__r, __temp, __idx); > } >=20=20 > __extension__ extern __inline uint8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vtbx2_u8 (uint8x8_t __r, uint8x8x2_t __tab, uint8x8_t __idx) > { > - uint8x8_t __result =3D __r; > uint8x16_t __temp =3D vcombine_u8 (__tab.val[0], __tab.val[1]); > - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" > - : "+w"(__result) > - : "w"(__temp), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_tbx1v8qi_uuuu (__r, __temp, __idx); > } >=20=20 > __extension__ extern __inline poly8x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vtbx2_p8 (poly8x8_t __r, poly8x8x2_t __tab, uint8x8_t __idx) > { > - poly8x8_t __result =3D __r; > poly8x16_t __temp =3D vcombine_p8 (__tab.val[0], __tab.val[1]); > - __asm__ ("tbx %0.8b, {%1.16b}, %2.8b" > - : "+w"(__result) > - : "w"(__temp), "w"(__idx) > - : /* No clobbers */); > - return __result; > + return (poly8x8_t) __builtin_aarch64_tbx1v8qi ((int8x8_t) __r, > + (int8x16_t) __temp, > + (int8x8_t) __idx); > } >=20=20 > /* End of temporary inline asm. */