public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r11-6952] aarch64: Reimplement vshrn_n* intrinsics using builtins
@ 2021-01-28 11:44 Kyrylo Tkachov
0 siblings, 0 replies; only message in thread
From: Kyrylo Tkachov @ 2021-01-28 11:44 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:fdb904a1822c38db5d69a50878b21041c476f045
commit r11-6952-gfdb904a1822c38db5d69a50878b21041c476f045
Author: Kyrylo Tkachov <kyrylo.tkachov@arm.com>
Date: Fri Jan 22 14:16:30 2021 +0000
aarch64: Reimplement vshrn_n* intrinsics using builtins
This patch reimplements the vshrn_n* intrinsics to use RTL builtins.
These perform a narrowing right shift.
Although the intrinsic generates the half-width mode (e.g. V8HI ->
V8QI), the new pattern generates a full 128-bit mode (V8HI -> V16QI) by representing the
fill-with-zeroes semantics of the SHRN instruction. The narrower (V8QI) result is extracted with a
lowpart subreg.
I found this allows the RTL optimisers to do a better job at optimising
redundant moves away in frequently-occurring SHRN+SRHN2 pairs, like in:
uint8x16_t
foo (uint16x8_t in1, uint16x8_t in2)
{
uint8x8_t tmp = vshrn_n_u16 (in2, 7);
uint8x16_t tmp2 = vshrn_high_n_u16 (tmp, in1, 4);
return tmp2;
}
gcc/ChangeLog:
* config/aarch64/aarch64-simd-builtins.def (shrn): Define
builtin.
* config/aarch64/aarch64-simd.md (aarch64_shrn<mode>_insn_le):
Define.
(aarch64_shrn<mode>_insn_be): Likewise.
(aarch64_shrn<mode>): Likewise.
* config/aarch64/arm_neon.h (vshrn_n_s16): Reimplement using
builtins.
(vshrn_n_s32): Likewise.
(vshrn_n_s64): Likewise.
(vshrn_n_u16): Likewise.
(vshrn_n_u32): Likewise.
(vshrn_n_u64): Likewise.
* config/aarch64/iterators.md (vn_mode): New mode attribute.
Diff:
---
gcc/config/aarch64/aarch64-simd-builtins.def | 3 +
gcc/config/aarch64/aarch64-simd.md | 50 ++++++++++++
gcc/config/aarch64/arm_neon.h | 113 ++++++++++-----------------
gcc/config/aarch64/iterators.md | 3 +
4 files changed, 97 insertions(+), 72 deletions(-)
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index a71ae4d7241..13bc6928d4d 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -188,6 +188,9 @@
/* Implemented by aarch64_mls_n<mode>. */
BUILTIN_VDQHS (TERNOP, mls_n, 0, NONE)
+ /* Implemented by aarch64_shrn<mode>". */
+ BUILTIN_VQN (SHIFTIMM, shrn, 0, NONE)
+
/* Implemented by aarch64_<su>mlsl<mode>. */
BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE)
BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index db56b61baf2..872aa83fc92 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1679,6 +1679,56 @@
DONE;
})
+(define_insn "aarch64_shrn<mode>_insn_le"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (truncate:<VNARROWQ>
+ (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
+ (match_operand:VQN 2 "aarch64_simd_rshift_imm")))
+ (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")))]
+ "TARGET_SIMD && !BYTES_BIG_ENDIAN"
+ "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_insn "aarch64_shrn<mode>_insn_be"
+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
+ (vec_concat:<VNARROWQ2>
+ (match_operand:<VNARROWQ> 3 "aarch64_simd_or_scalar_imm_zero")
+ (truncate:<VNARROWQ>
+ (lshiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
+ (match_operand:VQN 2 "aarch64_simd_rshift_imm")))))]
+ "TARGET_SIMD && BYTES_BIG_ENDIAN"
+ "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
+ [(set_attr "type" "neon_shift_imm_narrow_q")]
+)
+
+(define_expand "aarch64_shrn<mode>"
+ [(set (match_operand:<VNARROWQ> 0 "register_operand")
+ (truncate:<VNARROWQ>
+ (lshiftrt:VQN (match_operand:VQN 1 "register_operand")
+ (match_operand:SI 2 "aarch64_simd_shift_imm_offset_<vn_mode>"))))]
+ "TARGET_SIMD"
+ {
+ operands[2] = aarch64_simd_gen_const_vector_dup (<MODE>mode,
+ INTVAL (operands[2]));
+ rtx tmp = gen_reg_rtx (<VNARROWQ2>mode);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_aarch64_shrn<mode>_insn_be (tmp, operands[1],
+ operands[2], CONST0_RTX (<VNARROWQ>mode)));
+ else
+ emit_insn (gen_aarch64_shrn<mode>_insn_le (tmp, operands[1],
+ operands[2], CONST0_RTX (<VNARROWQ>mode)));
+
+ /* The intrinsic expects a narrow result, so emit a subreg that will get
+ optimized away as appropriate. */
+ emit_move_insn (operands[0], lowpart_subreg (<VNARROWQ>mode, tmp,
+ <VNARROWQ2>mode));
+ DONE;
+ }
+)
+
+
;; For quads.
(define_insn "vec_pack_trunc_<mode>"
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index b5c1f062bbc..80d75555a71 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8584,6 +8584,47 @@ vmovn_u64 (uint64x2_t __a)
return (uint32x2_t) __builtin_aarch64_xtnv2di ((int64x2_t) __a);
}
+__extension__ extern __inline int8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s16 (int16x8_t __a, const int __b)
+{
+ return __builtin_aarch64_shrnv8hi (__a, __b);
+}
+
+__extension__ extern __inline int16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s32 (int32x4_t __a, const int __b)
+{
+ return __builtin_aarch64_shrnv4si (__a, __b);
+}
+
+__extension__ extern __inline int32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_s64 (int64x2_t __a, const int __b)
+{
+ return __builtin_aarch64_shrnv2di (__a, __b);
+}
+
+__extension__ extern __inline uint8x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u16 (uint16x8_t __a, const int __b)
+{
+ return (uint8x8_t)__builtin_aarch64_shrnv8hi ((int16x8_t)__a, __b);
+}
+
+__extension__ extern __inline uint16x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u32 (uint32x4_t __a, const int __b)
+{
+ return (uint16x4_t)__builtin_aarch64_shrnv4si ((int32x4_t)__a, __b);
+}
+
+__extension__ extern __inline uint32x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vshrn_n_u64 (uint64x2_t __a, const int __b)
+{
+ return (uint32x2_t)__builtin_aarch64_shrnv2di ((int64x2_t)__a, __b);
+}
#define vmull_high_lane_s16(a, b, c) \
__extension__ \
({ \
@@ -9858,78 +9899,6 @@ vrsqrteq_u32 (uint32x4_t __a)
result; \
})
-#define vshrn_n_s16(a, b) \
- __extension__ \
- ({ \
- int16x8_t a_ = (a); \
- int8x8_t result; \
- __asm__ ("shrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_s32(a, b) \
- __extension__ \
- ({ \
- int32x4_t a_ = (a); \
- int16x4_t result; \
- __asm__ ("shrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_s64(a, b) \
- __extension__ \
- ({ \
- int64x2_t a_ = (a); \
- int32x2_t result; \
- __asm__ ("shrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u16(a, b) \
- __extension__ \
- ({ \
- uint16x8_t a_ = (a); \
- uint8x8_t result; \
- __asm__ ("shrn %0.8b,%1.8h,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u32(a, b) \
- __extension__ \
- ({ \
- uint32x4_t a_ = (a); \
- uint16x4_t result; \
- __asm__ ("shrn %0.4h,%1.4s,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
-#define vshrn_n_u64(a, b) \
- __extension__ \
- ({ \
- uint64x2_t a_ = (a); \
- uint32x2_t result; \
- __asm__ ("shrn %0.2s,%1.2d,%2" \
- : "=w"(result) \
- : "w"(a_), "i"(b) \
- : /* No clobbers */); \
- result; \
- })
-
#define vsli_n_p8(a, b, c) \
__extension__ \
({ \
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index b64d77037af..7db343e1c99 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -1458,6 +1458,9 @@
(QI "qi") (HI "hi")
(SI "si")])
+;; Like ve_mode but for the half-width modes.
+(define_mode_attr vn_mode [(V8HI "qi") (V4SI "hi") (V2DI "si")])
+
;; Vm for lane instructions is restricted to FP_LO_REGS.
(define_mode_attr vwx [(V4HI "x") (V8HI "x") (HI "x")
(V2SI "w") (V4SI "w") (SI "w")])
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2021-01-28 11:44 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-28 11:44 [gcc r11-6952] aarch64: Reimplement vshrn_n* intrinsics using builtins Kyrylo Tkachov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).