From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 1816) id B8BBE3858D33; Thu, 28 Jan 2021 11:44:11 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org B8BBE3858D33 MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Type: text/plain; charset="utf-8" From: Kyrylo Tkachov To: gcc-cvs@gcc.gnu.org Subject: [gcc r11-6952] aarch64: Reimplement vshrn_n* intrinsics using builtins X-Act-Checkin: gcc X-Git-Author: Kyrylo Tkachov X-Git-Refname: refs/heads/master X-Git-Oldrev: f7a6d314e7f7eeb6240a4f62511c189c90ef300c X-Git-Newrev: fdb904a1822c38db5d69a50878b21041c476f045 Message-Id: <20210128114411.B8BBE3858D33@sourceware.org> Date: Thu, 28 Jan 2021 11:44:11 +0000 (GMT) X-BeenThere: gcc-cvs@gcc.gnu.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Gcc-cvs mailing list List-Unsubscribe: , List-Archive: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 28 Jan 2021 11:44:11 -0000 https://gcc.gnu.org/g:fdb904a1822c38db5d69a50878b21041c476f045 commit r11-6952-gfdb904a1822c38db5d69a50878b21041c476f045 Author: Kyrylo Tkachov Date: Fri Jan 22 14:16:30 2021 +0000 aarch64: Reimplement vshrn_n* intrinsics using builtins This patch reimplements the vshrn_n* intrinsics to use RTL builtins. These perform a narrowing right shift. Although the intrinsic generates the half-width mode (e.g. V8HI -> V8QI), the new pattern generates a full 128-bit mode (V8HI -> V16QI) by representing the fill-with-zeroes semantics of the SHRN instruction. The narrower (V8QI) result is extracted with a lowpart subreg. I found this allows the RTL optimisers to do a better job at optimising redundant moves away in frequently-occurring SHRN+SRHN2 pairs, like in: uint8x16_t foo (uint16x8_t in1, uint16x8_t in2) { uint8x8_t tmp = vshrn_n_u16 (in2, 7); uint8x16_t tmp2 = vshrn_high_n_u16 (tmp, in1, 4); return tmp2; } gcc/ChangeLog: * config/aarch64/aarch64-simd-builtins.def (shrn): Define builtin. * config/aarch64/aarch64-simd.md (aarch64_shrn_insn_le): Define. (aarch64_shrn_insn_be): Likewise. (aarch64_shrn): Likewise. * config/aarch64/arm_neon.h (vshrn_n_s16): Reimplement using builtins. (vshrn_n_s32): Likewise. (vshrn_n_s64): Likewise. (vshrn_n_u16): Likewise. (vshrn_n_u32): Likewise. (vshrn_n_u64): Likewise. * config/aarch64/iterators.md (vn_mode): New mode attribute. Diff: --- gcc/config/aarch64/aarch64-simd-builtins.def | 3 + gcc/config/aarch64/aarch64-simd.md | 50 ++++++++++++ gcc/config/aarch64/arm_neon.h | 113 ++++++++++----------------- gcc/config/aarch64/iterators.md | 3 + 4 files changed, 97 insertions(+), 72 deletions(-) diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index a71ae4d7241..13bc6928d4d 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -188,6 +188,9 @@ /* Implemented by aarch64_mls_n. */ BUILTIN_VDQHS (TERNOP, mls_n, 0, NONE) + /* Implemented by aarch64_shrn". */ + BUILTIN_VQN (SHIFTIMM, shrn, 0, NONE) + /* Implemented by aarch64_mlsl. */ BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE) BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index db56b61baf2..872aa83fc92 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1679,6 +1679,56 @@ DONE; }) +(define_insn "aarch64_shrn_insn_le" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (truncate: + (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w") + (match_operand:VQN 2 "aarch64_simd_rshift_imm"))) + (match_operand: 3 "aarch64_simd_or_scalar_imm_zero")))] + "TARGET_SIMD && !BYTES_BIG_ENDIAN" + "shrn\\t%0., %1., %2" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_insn "aarch64_shrn_insn_be" + [(set (match_operand: 0 "register_operand" "=w") + (vec_concat: + (match_operand: 3 "aarch64_simd_or_scalar_imm_zero") + (truncate: + (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w") + (match_operand:VQN 2 "aarch64_simd_rshift_imm")))))] + "TARGET_SIMD && BYTES_BIG_ENDIAN" + "shrn\\t%0., %1., %2" + [(set_attr "type" "neon_shift_imm_narrow_q")] +) + +(define_expand "aarch64_shrn" + [(set (match_operand: 0 "register_operand") + (truncate: + (lshiftrt:VQN (match_operand:VQN 1 "register_operand") + (match_operand:SI 2 "aarch64_simd_shift_imm_offset_"))))] + "TARGET_SIMD" + { + operands[2] = aarch64_simd_gen_const_vector_dup (mode, + INTVAL (operands[2])); + rtx tmp = gen_reg_rtx (mode); + if (BYTES_BIG_ENDIAN) + emit_insn (gen_aarch64_shrn_insn_be (tmp, operands[1], + operands[2], CONST0_RTX (mode))); + else + emit_insn (gen_aarch64_shrn_insn_le (tmp, operands[1], + operands[2], CONST0_RTX (mode))); + + /* The intrinsic expects a narrow result, so emit a subreg that will get + optimized away as appropriate. */ + emit_move_insn (operands[0], lowpart_subreg (mode, tmp, + mode)); + DONE; + } +) + + ;; For quads. (define_insn "vec_pack_trunc_" diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index b5c1f062bbc..80d75555a71 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -8584,6 +8584,47 @@ vmovn_u64 (uint64x2_t __a) return (uint32x2_t) __builtin_aarch64_xtnv2di ((int64x2_t) __a); } +__extension__ extern __inline int8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_s16 (int16x8_t __a, const int __b) +{ + return __builtin_aarch64_shrnv8hi (__a, __b); +} + +__extension__ extern __inline int16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_s32 (int32x4_t __a, const int __b) +{ + return __builtin_aarch64_shrnv4si (__a, __b); +} + +__extension__ extern __inline int32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_s64 (int64x2_t __a, const int __b) +{ + return __builtin_aarch64_shrnv2di (__a, __b); +} + +__extension__ extern __inline uint8x8_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_u16 (uint16x8_t __a, const int __b) +{ + return (uint8x8_t)__builtin_aarch64_shrnv8hi ((int16x8_t)__a, __b); +} + +__extension__ extern __inline uint16x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_u32 (uint32x4_t __a, const int __b) +{ + return (uint16x4_t)__builtin_aarch64_shrnv4si ((int32x4_t)__a, __b); +} + +__extension__ extern __inline uint32x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vshrn_n_u64 (uint64x2_t __a, const int __b) +{ + return (uint32x2_t)__builtin_aarch64_shrnv2di ((int64x2_t)__a, __b); +} #define vmull_high_lane_s16(a, b, c) \ __extension__ \ ({ \ @@ -9858,78 +9899,6 @@ vrsqrteq_u32 (uint32x4_t __a) result; \ }) -#define vshrn_n_s16(a, b) \ - __extension__ \ - ({ \ - int16x8_t a_ = (a); \ - int8x8_t result; \ - __asm__ ("shrn %0.8b,%1.8h,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) - -#define vshrn_n_s32(a, b) \ - __extension__ \ - ({ \ - int32x4_t a_ = (a); \ - int16x4_t result; \ - __asm__ ("shrn %0.4h,%1.4s,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) - -#define vshrn_n_s64(a, b) \ - __extension__ \ - ({ \ - int64x2_t a_ = (a); \ - int32x2_t result; \ - __asm__ ("shrn %0.2s,%1.2d,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) - -#define vshrn_n_u16(a, b) \ - __extension__ \ - ({ \ - uint16x8_t a_ = (a); \ - uint8x8_t result; \ - __asm__ ("shrn %0.8b,%1.8h,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) - -#define vshrn_n_u32(a, b) \ - __extension__ \ - ({ \ - uint32x4_t a_ = (a); \ - uint16x4_t result; \ - __asm__ ("shrn %0.4h,%1.4s,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) - -#define vshrn_n_u64(a, b) \ - __extension__ \ - ({ \ - uint64x2_t a_ = (a); \ - uint32x2_t result; \ - __asm__ ("shrn %0.2s,%1.2d,%2" \ - : "=w"(result) \ - : "w"(a_), "i"(b) \ - : /* No clobbers */); \ - result; \ - }) - #define vsli_n_p8(a, b, c) \ __extension__ \ ({ \ diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index b64d77037af..7db343e1c99 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -1458,6 +1458,9 @@ (QI "qi") (HI "hi") (SI "si")]) +;; Like ve_mode but for the half-width modes. +(define_mode_attr vn_mode [(V8HI "qi") (V4SI "hi") (V2DI "si")]) + ;; Vm for lane instructions is restricted to FP_LO_REGS. (define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x") (V2SI "w") (V4SI "w") (SI "w")])