public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/users/marxin/heads/marxin-gcc-benchmark-branch)] [ARM][GCC][14x]: MVE ACLE whole vector left shift with carry intrinsics.
@ 2020-03-30 11:03 Martin Liska
  0 siblings, 0 replies; only message in thread
From: Martin Liska @ 2020-03-30 11:03 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:88c9a831f3a54a17e9722e15cb99459e21bccaad

commit 88c9a831f3a54a17e9722e15cb99459e21bccaad
Author: Srinath Parvathaneni <srinath.parvathaneni@arm.com>
Date:   Mon Mar 23 18:29:17 2020 +0000

    [ARM][GCC][14x]: MVE ACLE whole vector left shift with carry intrinsics.
    
    This patch supports following MVE ACLE whole vector left shift with carry intrinsics.
    
    vshlcq_m_s8, vshlcq_m_s16, vshlcq_m_s32, vshlcq_m_u8, vshlcq_m_u16, vshlcq_m_u32.
    
    Please refer to M-profile Vector Extension (MVE) intrinsics [1]  for more details.
    [1] https://developer.arm.com/architectures/instruction-sets/simd-isas/helium/mve-intrinsics
    
    2020-03-23  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
                Andre Vieira  <andre.simoesdiasvieira@arm.com>
                Mihail Ionescu  <mihail.ionescu@arm.com>
    
            * config/arm/arm_mve.h (vshlcq_m_s8): Define macro.
            (vshlcq_m_u8): Likewise.
            (vshlcq_m_s16): Likewise.
            (vshlcq_m_u16): Likewise.
            (vshlcq_m_s32): Likewise.
            (vshlcq_m_u32): Likewise.
            (__arm_vshlcq_m_s8): Define intrinsic.
            (__arm_vshlcq_m_u8): Likewise.
            (__arm_vshlcq_m_s16): Likewise.
            (__arm_vshlcq_m_u16): Likewise.
            (__arm_vshlcq_m_s32): Likewise.
            (__arm_vshlcq_m_u32): Likewise.
            (vshlcq_m): Define polymorphic variant.
            * config/arm/arm_mve_builtins.def (QUADOP_NONE_NONE_UNONE_IMM_UNONE):
            Use builtin qualifier.
            (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE): Likewise.
            * config/arm/mve.md (mve_vshlcq_m_vec_<supf><mode>): Define RTL pattern.
            (mve_vshlcq_m_carry_<supf><mode>): Likewise.
            (mve_vshlcq_m_<supf><mode>): Likewise.
    
    gcc/testsuite/ChangeLog:
    
    2020-03-23  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
                Andre Vieira  <andre.simoesdiasvieira@arm.com>
                Mihail Ionescu  <mihail.ionescu@arm.com>
    
            * gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c: New test.
            * gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c: Likewise.
            * gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c: Likewise.
            * gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c: Likewise.
            * gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c: Likewise.
            * gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c: Likewise.

Diff:
---
 gcc/ChangeLog                                      | 24 ++++++++
 gcc/config/arm/arm_mve.h                           | 70 ++++++++++++++++++++++
 gcc/config/arm/arm_mve_builtins.def                |  4 ++
 gcc/config/arm/mve.md                              | 62 ++++++++++++++++++-
 gcc/testsuite/ChangeLog                            | 11 ++++
 .../gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c   | 23 +++++++
 .../gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c   | 23 +++++++
 .../gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c    | 23 +++++++
 .../gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c   | 23 +++++++
 .../gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c   | 23 +++++++
 .../gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c    | 23 +++++++
 11 files changed, 306 insertions(+), 3 deletions(-)

diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 3825df67385..852773d2f5f 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,27 @@
+2020-03-23  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
+            Andre Vieira  <andre.simoesdiasvieira@arm.com>
+            Mihail Ionescu  <mihail.ionescu@arm.com>
+
+	* config/arm/arm_mve.h (vshlcq_m_s8): Define macro.
+	(vshlcq_m_u8): Likewise.
+	(vshlcq_m_s16): Likewise.
+	(vshlcq_m_u16): Likewise.
+	(vshlcq_m_s32): Likewise.
+	(vshlcq_m_u32): Likewise.
+	(__arm_vshlcq_m_s8): Define intrinsic.
+	(__arm_vshlcq_m_u8): Likewise.
+	(__arm_vshlcq_m_s16): Likewise.
+	(__arm_vshlcq_m_u16): Likewise.
+	(__arm_vshlcq_m_s32): Likewise.
+	(__arm_vshlcq_m_u32): Likewise.
+	(vshlcq_m): Define polymorphic variant.
+	* config/arm/arm_mve_builtins.def (QUADOP_NONE_NONE_UNONE_IMM_UNONE):
+	Use builtin qualifier.
+	(QUADOP_UNONE_UNONE_UNONE_IMM_UNONE): Likewise.
+	* config/arm/mve.md (mve_vshlcq_m_vec_<supf><mode>): Define RTL pattern.
+	(mve_vshlcq_m_carry_<supf><mode>): Likewise.
+	(mve_vshlcq_m_<supf><mode>): Likewise.
+
 2020-03-23  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
 
 	* config/arm/arm-builtins.c (LSLL_QUALIFIERS): Define builtin qualifier.
diff --git a/gcc/config/arm/arm_mve.h b/gcc/config/arm/arm_mve.h
index f2d80ee6360..14b6ec857bf 100644
--- a/gcc/config/arm/arm_mve.h
+++ b/gcc/config/arm/arm_mve.h
@@ -2546,6 +2546,12 @@ typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
 #define urshrl(__p0, __p1) __arm_urshrl(__p0, __p1)
 #define lsll(__p0, __p1) __arm_lsll(__p0, __p1)
 #define asrl(__p0, __p1) __arm_asrl(__p0, __p1)
+#define vshlcq_m_s8(__a,  __b,  __imm, __p) __arm_vshlcq_m_s8(__a,  __b,  __imm, __p)
+#define vshlcq_m_u8(__a,  __b,  __imm, __p) __arm_vshlcq_m_u8(__a,  __b,  __imm, __p)
+#define vshlcq_m_s16(__a,  __b,  __imm, __p) __arm_vshlcq_m_s16(__a,  __b,  __imm, __p)
+#define vshlcq_m_u16(__a,  __b,  __imm, __p) __arm_vshlcq_m_u16(__a,  __b,  __imm, __p)
+#define vshlcq_m_s32(__a,  __b,  __imm, __p) __arm_vshlcq_m_s32(__a,  __b,  __imm, __p)
+#define vshlcq_m_u32(__a,  __b,  __imm, __p) __arm_vshlcq_m_u32(__a,  __b,  __imm, __p)
 #endif
 
 /* For big-endian, GCC's vector indices are reversed within each 64 bits
@@ -16671,6 +16677,60 @@ __arm_srshr (int32_t value, const int shift)
   return __builtin_mve_srshr_si (value, shift);
 }
 
+__extension__ extern __inline int8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s8 (int8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+  int8x16_t __res = __builtin_mve_vshlcq_m_vec_sv16qi (__a, *__b, __imm, __p);
+  *__b = __builtin_mve_vshlcq_m_carry_sv16qi (__a, *__b, __imm, __p);
+  return __res;
+}
+
+__extension__ extern __inline uint8x16_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u8 (uint8x16_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+  uint8x16_t __res = __builtin_mve_vshlcq_m_vec_uv16qi (__a, *__b, __imm, __p);
+  *__b = __builtin_mve_vshlcq_m_carry_uv16qi (__a, *__b, __imm, __p);
+  return __res;
+}
+
+__extension__ extern __inline int16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s16 (int16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+  int16x8_t __res = __builtin_mve_vshlcq_m_vec_sv8hi (__a, *__b, __imm, __p);
+  *__b = __builtin_mve_vshlcq_m_carry_sv8hi (__a, *__b, __imm, __p);
+  return __res;
+}
+
+__extension__ extern __inline uint16x8_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u16 (uint16x8_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+  uint16x8_t __res = __builtin_mve_vshlcq_m_vec_uv8hi (__a, *__b, __imm, __p);
+  *__b = __builtin_mve_vshlcq_m_carry_uv8hi (__a, *__b, __imm, __p);
+  return __res;
+}
+
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_s32 (int32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+  int32x4_t __res = __builtin_mve_vshlcq_m_vec_sv4si (__a, *__b, __imm, __p);
+  *__b = __builtin_mve_vshlcq_m_carry_sv4si (__a, *__b, __imm, __p);
+  return __res;
+}
+
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+__arm_vshlcq_m_u32 (uint32x4_t __a, uint32_t * __b, const int __imm, mve_pred16_t __p)
+{
+  uint32x4_t __res = __builtin_mve_vshlcq_m_vec_uv4si (__a, *__b, __imm, __p);
+  *__b = __builtin_mve_vshlcq_m_carry_uv4si (__a, *__b, __imm, __p);
+  return __res;
+}
+
 #if (__ARM_FEATURE_MVE & 2) /* MVE Floating point.  */
 
 __extension__ extern __inline void
@@ -27485,6 +27545,16 @@ extern void *__ARM_undef;
   int (*)[__ARM_mve_type_uint32_t]: __arm_vdwdupq_n_u8 (__ARM_mve_coerce(__p0, uint32_t), p1, p2), \
   int (*)[__ARM_mve_type_uint32_t_ptr]: __arm_vdwdupq_wb_u8 (__ARM_mve_coerce(__p0, uint32_t *), p1, p2));})
 
+#define vshlcq_m(p0,p1,p2,p3) __arm_vshlcq_m(p0,p1,p2,p3)
+#define __arm_vshlcq_m(p0,p1,p2,p3) ({ __typeof(p0) __p0 = (p0); \
+  _Generic( (int (*)[__ARM_mve_typeid(__p0)])0, \
+  int (*)[__ARM_mve_type_int8x16_t]: __arm_vshlcq_m_s8 (__ARM_mve_coerce(__p0, int8x16_t), p1, p2, p3), \
+  int (*)[__ARM_mve_type_int16x8_t]: __arm_vshlcq_m_s16 (__ARM_mve_coerce(__p0, int16x8_t), p1, p2, p3), \
+  int (*)[__ARM_mve_type_int32x4_t]: __arm_vshlcq_m_s32 (__ARM_mve_coerce(__p0, int32x4_t), p1, p2, p3), \
+  int (*)[__ARM_mve_type_uint8x16_t]: __arm_vshlcq_m_u8 (__ARM_mve_coerce(__p0, uint8x16_t), p1, p2, p3), \
+  int (*)[__ARM_mve_type_uint16x8_t]: __arm_vshlcq_m_u16 (__ARM_mve_coerce(__p0, uint16x8_t), p1, p2, p3), \
+  int (*)[__ARM_mve_type_uint32x4_t]: __arm_vshlcq_m_u32 (__ARM_mve_coerce(__p0, uint32x4_t), p1, p2, p3));})
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/gcc/config/arm/arm_mve_builtins.def b/gcc/config/arm/arm_mve_builtins.def
index 9379927ae50..2fb975944b9 100644
--- a/gcc/config/arm/arm_mve_builtins.def
+++ b/gcc/config/arm/arm_mve_builtins.def
@@ -890,3 +890,7 @@ VAR1 (UQSHL, urshr_, si)
 VAR1 (UQSHL, urshrl_, di)
 VAR1 (UQSHL, uqshl_, si)
 VAR1 (UQSHL, uqshll_, di)
+VAR3 (QUADOP_NONE_NONE_UNONE_IMM_UNONE, vshlcq_m_vec_s, v16qi, v8hi, v4si)
+VAR3 (QUADOP_NONE_NONE_UNONE_IMM_UNONE, vshlcq_m_carry_s, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshlcq_m_vec_u, v16qi, v8hi, v4si)
+VAR3 (QUADOP_UNONE_UNONE_UNONE_IMM_UNONE, vshlcq_m_carry_u, v16qi, v8hi, v4si)
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index 6522dc89fbd..df602b07840 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -215,8 +215,8 @@
 			 VADCQ_M_S VSBCIQ_U VSBCIQ_S VSBCIQ_M_U VSBCIQ_M_S
 			 VSBCQ_U VSBCQ_S VSBCQ_M_U VSBCQ_M_S VADCIQ_U VADCIQ_M_U
 			 VADCIQ_S VADCIQ_M_S VLD2Q VLD4Q VST2Q SRSHRL SRSHR
-			 URSHR URSHRL SQRSHR UQRSHL UQRSHLL_64
-			 UQRSHLL_48 SQRSHRL_64 SQRSHRL_48])
+			 URSHR URSHRL SQRSHR UQRSHL UQRSHLL_64 VSHLCQ_M_U
+			 UQRSHLL_48 SQRSHRL_64 SQRSHRL_48 VSHLCQ_M_S])
 
 (define_mode_attr MVE_CNVT [(V8HI "V8HF") (V4SI "V4SF") (V8HF "V8HI")
 			    (V4SF "V4SI")])
@@ -394,7 +394,8 @@
 		       (VADCQ_U "u")  (VADCQ_M_U "u") (VADCQ_S "s")
 		       (VADCIQ_U "u") (VADCIQ_M_U "u") (VADCIQ_S "s")
 		       (VADCIQ_M_S "s") (SQRSHRL_64 "64") (SQRSHRL_48 "48")
-		       (UQRSHLL_64 "64") (UQRSHLL_48 "48")])
+		       (UQRSHLL_64 "64") (UQRSHLL_48 "48") (VSHLCQ_M_S "s")
+		       (VSHLCQ_M_U "u")])
 
 (define_int_attr mode1 [(VCTP8Q "8") (VCTP16Q "16") (VCTP32Q "32")
 			(VCTP64Q "64") (VCTP8Q_M "8") (VCTP16Q_M "16")
@@ -662,6 +663,7 @@
 (define_int_iterator VADCQ_M [VADCQ_M_U VADCQ_M_S])
 (define_int_iterator UQRSHLLQ [UQRSHLL_64 UQRSHLL_48])
 (define_int_iterator SQRSHRLQ [SQRSHRL_64 SQRSHRL_48])
+(define_int_iterator VSHLCQ_M [VSHLCQ_M_S VSHLCQ_M_U])
 
 (define_insn "*mve_mov<mode>"
   [(set (match_operand:MVE_types 0 "nonimmediate_operand" "=w,w,r,w,w,r,w,Us")
@@ -11152,3 +11154,57 @@
   "TARGET_HAVE_MVE"
   "sqshll%?\\t%Q1, %R1, %2"
   [(set_attr "predicable" "yes")])
+
+;;
+;; [vshlcq_m_u vshlcq_m_s]
+;;
+(define_expand "mve_vshlcq_m_vec_<supf><mode>"
+ [(match_operand:MVE_2 0 "s_register_operand")
+  (match_operand:MVE_2 1 "s_register_operand")
+  (match_operand:SI 2 "s_register_operand")
+  (match_operand:SI 3 "mve_imm_32")
+  (match_operand:HI 4 "vpr_register_operand")
+  (unspec:MVE_2 [(const_int 0)] VSHLCQ_M)]
+ "TARGET_HAVE_MVE"
+{
+  rtx ignore_wb = gen_reg_rtx (SImode);
+  emit_insn (gen_mve_vshlcq_m_<supf><mode> (operands[0], ignore_wb, operands[1],
+					    operands[2], operands[3],
+					    operands[4]));
+  DONE;
+})
+
+(define_expand "mve_vshlcq_m_carry_<supf><mode>"
+ [(match_operand:SI 0 "s_register_operand")
+  (match_operand:MVE_2 1 "s_register_operand")
+  (match_operand:SI 2 "s_register_operand")
+  (match_operand:SI 3 "mve_imm_32")
+  (match_operand:HI 4 "vpr_register_operand")
+  (unspec:MVE_2 [(const_int 0)] VSHLCQ_M)]
+ "TARGET_HAVE_MVE"
+{
+  rtx ignore_vec = gen_reg_rtx (<MODE>mode);
+  emit_insn (gen_mve_vshlcq_m_<supf><mode> (ignore_vec, operands[0],
+					    operands[1], operands[2],
+					    operands[3], operands[4]));
+  DONE;
+})
+
+(define_insn "mve_vshlcq_m_<supf><mode>"
+ [(set (match_operand:MVE_2 0 "s_register_operand" "=w")
+       (unspec:MVE_2 [(match_operand:MVE_2 2 "s_register_operand" "0")
+		      (match_operand:SI 3 "s_register_operand" "1")
+		      (match_operand:SI 4 "mve_imm_32" "Rf")
+		      (match_operand:HI 5 "vpr_register_operand" "Up")]
+	VSHLCQ_M))
+  (set (match_operand:SI  1 "s_register_operand" "=r")
+       (unspec:SI [(match_dup 2)
+		   (match_dup 3)
+		   (match_dup 4)
+		   (match_dup 5)]
+	VSHLCQ_M))
+ ]
+ "TARGET_HAVE_MVE"
+ "vpst\;vshlct\t%q0, %1, %4"
+ [(set_attr "type" "mve_move")
+  (set_attr "length" "8")])
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index c13710b3764..c1ab2bc79bb 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,14 @@
+2020-03-23  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
+            Andre Vieira  <andre.simoesdiasvieira@arm.com>
+            Mihail Ionescu  <mihail.ionescu@arm.com>
+
+	* gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c: New test.
+	* gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c: Likewise.
+	* gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c: Likewise.
+	* gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c: Likewise.
+	* gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c: Likewise.
+	* gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c: Likewise.
+
 2020-03-23  Srinath Parvathaneni  <srinath.parvathaneni@arm.com>
 
 	* gcc.target/arm/mve/intrinsics/asrl.c: New test.
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c
new file mode 100644
index 00000000000..c4c77f2559c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s16.c
@@ -0,0 +1,23 @@
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int16x8_t
+foo (int16x8_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m_s16 (a, b, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
+
+int16x8_t
+foo1 (int16x8_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m (a, b, 32, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c
new file mode 100644
index 00000000000..20cfd09c82d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s32.c
@@ -0,0 +1,23 @@
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int32x4_t
+foo (int32x4_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m_s32 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
+
+int32x4_t
+foo1 (int32x4_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c
new file mode 100644
index 00000000000..33dde10e4a8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_s8.c
@@ -0,0 +1,23 @@
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+int8x16_t
+foo (int8x16_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m_s8 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
+
+int8x16_t
+foo1 (int8x16_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c
new file mode 100644
index 00000000000..2bf69f0c465
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u16.c
@@ -0,0 +1,23 @@
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint16x8_t
+foo (uint16x8_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m_u16 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
+
+uint16x8_t
+foo1 (uint16x8_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c
new file mode 100644
index 00000000000..e6650302ea7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u32.c
@@ -0,0 +1,23 @@
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint32x4_t
+foo (uint32x4_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m_u32 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
+
+uint32x4_t
+foo1 (uint32x4_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
diff --git a/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c
new file mode 100644
index 00000000000..95857f09371
--- /dev/null
+++ b/gcc/testsuite/gcc.target/arm/mve/intrinsics/vshlcq_m_u8.c
@@ -0,0 +1,23 @@
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O2" } */
+
+#include "arm_mve.h"
+
+uint8x16_t
+foo (uint8x16_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m_u8 (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */
+
+uint8x16_t
+foo1 (uint8x16_t a, uint32_t * b, mve_pred16_t p)
+{
+  return vshlcq_m (a, b, 1, p);
+}
+
+/* { dg-final { scan-assembler "vpst" } } */
+/* { dg-final { scan-assembler "vshlct"  }  } */


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2020-03-30 11:03 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-30 11:03 [gcc(refs/users/marxin/heads/marxin-gcc-benchmark-branch)] [ARM][GCC][14x]: MVE ACLE whole vector left shift with carry intrinsics Martin Liska

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).