From: Christophe Lyon <christophe.lyon@linaro.org>
To: gcc-patches@gcc.gnu.org
Subject: [Patch ARM-AArch64/testsuite v2 04/21] Add comparison operators: vceq, vcge, vcgt, vcle and vclt.
Date: Tue, 01 Jul 2014 10:07:00 -0000 [thread overview]
Message-ID: <1404209174-25364-5-git-send-email-christophe.lyon@linaro.org> (raw)
In-Reply-To: <1404209174-25364-1-git-send-email-christophe.lyon@linaro.org>
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 73709c6..7af7fd0 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,5 +1,14 @@
2014-06-30 Christophe Lyon <christophe.lyon@linaro.org>
+ * gcc.target/aarch64/neon-intrinsics/cmp_op.inc: New file.
+ * gcc.target/aarch64/neon-intrinsics/vceq.c: Likewise.
+ * gcc.target/aarch64/neon-intrinsics/vcge.c: Likewise.
+ * gcc.target/aarch64/neon-intrinsics/vcgt.c: Likewise.
+ * gcc.target/aarch64/neon-intrinsics/vcle.c: Likewise.
+ * gcc.target/aarch64/neon-intrinsics/vclt.c: Likewise.
+
+2014-06-30 Christophe Lyon <christophe.lyon@linaro.org>
+
* gcc.target/aarch64/neon-intrinsics/binary_op.inc: New file.
* gcc.target/aarch64/neon-intrinsics/vadd.c: Likewise.
* gcc.target/aarch64/neon-intrinsics/vand.c: Likewise.
diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/cmp_op.inc b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/cmp_op.inc
new file mode 100644
index 0000000..a09c5f5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/cmp_op.inc
@@ -0,0 +1,224 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+#include <math.h>
+
+/* Additional expected results declaration, they are initialized in
+ each test file. */
+extern ARRAY(expected_uint, uint, 8, 8);
+extern ARRAY(expected_uint, uint, 16, 4);
+extern ARRAY(expected_uint, uint, 32, 2);
+extern ARRAY(expected_q_uint, uint, 8, 16);
+extern ARRAY(expected_q_uint, uint, 16, 8);
+extern ARRAY(expected_q_uint, uint, 32, 4);
+extern ARRAY(expected_float, uint, 32, 2);
+extern ARRAY(expected_q_float, uint, 32, 4);
+extern ARRAY(expected_uint2, uint, 32, 2);
+extern ARRAY(expected_uint3, uint, 32, 2);
+extern ARRAY(expected_uint4, uint, 32, 2);
+extern ARRAY(expected_nan, uint, 32, 2);
+extern ARRAY(expected_mnan, uint, 32, 2);
+extern ARRAY(expected_nan2, uint, 32, 2);
+extern ARRAY(expected_inf, uint, 32, 2);
+extern ARRAY(expected_minf, uint, 32, 2);
+extern ARRAY(expected_inf2, uint, 32, 2);
+extern ARRAY(expected_mzero, uint, 32, 2);
+extern ARRAY(expected_p8, uint, 8, 8);
+extern ARRAY(expected_q_p8, uint, 8, 16);
+
+#define FNNAME1(NAME) exec_ ## NAME
+#define FNNAME(NAME) FNNAME1(NAME)
+
+void FNNAME (INSN_NAME) (void)
+{
+ /* Basic test: y=vcomp(x1,x2), then store the result. */
+#define TEST_VCOMP1(INSN, Q, T1, T2, T3, W, N) \
+ VECT_VAR(vector_res, T3, W, N) = \
+ INSN##Q##_##T2##W(VECT_VAR(vector, T1, W, N), \
+ VECT_VAR(vector2, T1, W, N)); \
+ vst1##Q##_u##W(VECT_VAR(result, T3, W, N), VECT_VAR(vector_res, T3, W, N))
+
+#define TEST_VCOMP(INSN, Q, T1, T2, T3, W, N) \
+ TEST_VCOMP1(INSN, Q, T1, T2, T3, W, N)
+
+ /* No need for 64 bits elements. */
+ DECL_VARIABLE(vector, int, 8, 8);
+ DECL_VARIABLE(vector, int, 16, 4);
+ DECL_VARIABLE(vector, int, 32, 2);
+ DECL_VARIABLE(vector, uint, 8, 8);
+ DECL_VARIABLE(vector, uint, 16, 4);
+ DECL_VARIABLE(vector, uint, 32, 2);
+ DECL_VARIABLE(vector, float, 32, 2);
+ DECL_VARIABLE(vector, int, 8, 16);
+ DECL_VARIABLE(vector, int, 16, 8);
+ DECL_VARIABLE(vector, int, 32, 4);
+ DECL_VARIABLE(vector, uint, 8, 16);
+ DECL_VARIABLE(vector, uint, 16, 8);
+ DECL_VARIABLE(vector, uint, 32, 4);
+ DECL_VARIABLE(vector, float, 32, 4);
+
+ DECL_VARIABLE(vector2, int, 8, 8);
+ DECL_VARIABLE(vector2, int, 16, 4);
+ DECL_VARIABLE(vector2, int, 32, 2);
+ DECL_VARIABLE(vector2, uint, 8, 8);
+ DECL_VARIABLE(vector2, uint, 16, 4);
+ DECL_VARIABLE(vector2, uint, 32, 2);
+ DECL_VARIABLE(vector2, float, 32, 2);
+ DECL_VARIABLE(vector2, int, 8, 16);
+ DECL_VARIABLE(vector2, int, 16, 8);
+ DECL_VARIABLE(vector2, int, 32, 4);
+ DECL_VARIABLE(vector2, uint, 8, 16);
+ DECL_VARIABLE(vector2, uint, 16, 8);
+ DECL_VARIABLE(vector2, uint, 32, 4);
+ DECL_VARIABLE(vector2, float, 32, 4);
+
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 16, 4);
+ DECL_VARIABLE(vector_res, uint, 32, 2);
+ DECL_VARIABLE(vector_res, uint, 8, 16);
+ DECL_VARIABLE(vector_res, uint, 16, 8);
+ DECL_VARIABLE(vector_res, uint, 32, 4);
+
+ clean_results ();
+
+ /* There is no 64 bits variant, don't use the generic initializer. */
+ VLOAD(vector, buffer, , int, s, 8, 8);
+ VLOAD(vector, buffer, , int, s, 16, 4);
+ VLOAD(vector, buffer, , int, s, 32, 2);
+ VLOAD(vector, buffer, , uint, u, 8, 8);
+ VLOAD(vector, buffer, , uint, u, 16, 4);
+ VLOAD(vector, buffer, , uint, u, 32, 2);
+ VLOAD(vector, buffer, , float, f, 32, 2);
+
+ VLOAD(vector, buffer, q, int, s, 8, 16);
+ VLOAD(vector, buffer, q, int, s, 16, 8);
+ VLOAD(vector, buffer, q, int, s, 32, 4);
+ VLOAD(vector, buffer, q, uint, u, 8, 16);
+ VLOAD(vector, buffer, q, uint, u, 16, 8);
+ VLOAD(vector, buffer, q, uint, u, 32, 4);
+ VLOAD(vector, buffer, q, float, f, 32, 4);
+
+ /* Choose init value arbitrarily, will be used for vector
+ comparison. */
+ VDUP(vector2, , int, s, 8, 8, -10);
+ VDUP(vector2, , int, s, 16, 4, -14);
+ VDUP(vector2, , int, s, 32, 2, -16);
+ VDUP(vector2, , uint, u, 8, 8, 0xF3);
+ VDUP(vector2, , uint, u, 16, 4, 0xFFF2);
+ VDUP(vector2, , uint, u, 32, 2, 0xFFFFFFF1);
+ VDUP(vector2, , float, f, 32, 2, -15.0f);
+
+ VDUP(vector2, q, int, s, 8, 16, -4);
+ VDUP(vector2, q, int, s, 16, 8, -10);
+ VDUP(vector2, q, int, s, 32, 4, -14);
+ VDUP(vector2, q, uint, u, 8, 16, 0xF4);
+ VDUP(vector2, q, uint, u, 16, 8, 0xFFF6);
+ VDUP(vector2, q, uint, u, 32, 4, 0xFFFFFFF2);
+ VDUP(vector2, q, float, f, 32, 4, -14.0f);
+
+ /* The comparison operators produce only unsigned results, which
+ means that our tests with uint* inputs write their results in the
+ same vectors as the int* variants. As a consequence, we have to
+ execute and test the int* first, then the uint* ones.
+ Same thing for float and poly8.
+ */
+
+ /* Apply operator named INSN_NAME. */
+ TEST_VCOMP(INSN_NAME, , int, s, uint, 8, 8);
+ TEST_VCOMP(INSN_NAME, , int, s, uint, 16, 4);
+ TEST_VCOMP(INSN_NAME, , int, s, uint, 32, 2);
+ TEST_VCOMP(INSN_NAME, q, int, s, uint, 8, 16);
+ TEST_VCOMP(INSN_NAME, q, int, s, uint, 16, 8);
+ TEST_VCOMP(INSN_NAME, q, int, s, uint, 32, 4);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected, "");
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected, "");
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected, "");
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected, "");
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected, "");
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected, "");
+
+ /* Now the uint* variants. */
+ TEST_VCOMP(INSN_NAME, , uint, u, uint, 8, 8);
+ TEST_VCOMP(INSN_NAME, , uint, u, uint, 16, 4);
+ TEST_VCOMP(INSN_NAME, , uint, u, uint, 32, 2);
+ TEST_VCOMP(INSN_NAME, q, uint, u, uint, 8, 16);
+ TEST_VCOMP(INSN_NAME, q, uint, u, uint, 16, 8);
+ TEST_VCOMP(INSN_NAME, q, uint, u, uint, 32, 4);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_uint, "");
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_uint, "");
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_uint, "");
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_q_uint, "");
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_q_uint, "");
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_q_uint, "");
+
+ /* The float variants. */
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_float, "");
+
+ TEST_VCOMP(INSN_NAME, q, float, f, uint, 32, 4);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_q_float, "");
+
+ /* Some "special" input values to test some corner cases. */
+ /* Extra tests to have 100% coverage on all the variants. */
+ VDUP(vector2, , uint, u, 32, 2, 0xFFFFFFF0);
+ TEST_VCOMP(INSN_NAME, , uint, u, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_uint2, "uint 0xfffffff0");
+
+ VDUP(vector2, , int, s, 32, 2, -15);
+ TEST_VCOMP(INSN_NAME, , int, s, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_uint3, "int -15");
+
+ VDUP(vector2, , float, f, 32, 2, -16.0f);
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_uint4, "float -16.0f");
+
+
+ /* Extra FP tests with special values (NaN, ....). */
+ VDUP(vector, , float, f, 32, 2, 1.0);
+ VDUP(vector2, , float, f, 32, 2, NAN);
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_nan, "FP special (NaN)");
+
+ VDUP(vector, , float, f, 32, 2, 1.0);
+ VDUP(vector2, , float, f, 32, 2, -NAN);
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_mnan, " FP special (-NaN)");
+
+ VDUP(vector, , float, f, 32, 2, NAN);
+ VDUP(vector2, , float, f, 32, 2, 1.0);
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_nan2, " FP special (NaN)");
+
+ VDUP(vector, , float, f, 32, 2, 1.0);
+ VDUP(vector2, , float, f, 32, 2, HUGE_VALF);
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_inf, " FP special (inf)");
+
+ VDUP(vector, , float, f, 32, 2, 1.0);
+ VDUP(vector2, , float, f, 32, 2, -HUGE_VALF);
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_minf, " FP special (-inf)");
+
+ VDUP(vector, , float, f, 32, 2, HUGE_VALF);
+ VDUP(vector2, , float, f, 32, 2, 1.0);
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_inf2, " FP special (inf)");
+
+ VDUP(vector, , float, f, 32, 2, -0.0);
+ VDUP(vector2, , float, f, 32, 2, 0.0);
+ TEST_VCOMP(INSN_NAME, , float, f, uint, 32, 2);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_mzero, " FP special (-0.0)");
+
+#ifdef EXTRA_TESTS
+ EXTRA_TESTS();
+#endif
+}
+
+int main (void)
+{
+ FNNAME (INSN_NAME) ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vceq.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vceq.c
new file mode 100644
index 0000000..aa095df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vceq.c
@@ -0,0 +1,113 @@
+#define INSN_NAME vceq
+#define TEST_MSG "VCEQ/VCEQQ"
+
+/* Extra tests for _p8 variants, which exist only for vceq. */
+void exec_vceq_p8(void);
+#define EXTRA_TESTS exec_vceq_p8
+
+#include "cmp_op.inc"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x0, 0x0, 0xffff, 0x0 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x333, 0x3333, 0x3333, 0x3333,
+ 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0xff, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0xffff, 0x0 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0x0, 0x0, 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
+ 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+
+VECT_VAR_DECL(expected_uint,uint,8,8) [] = { 0x0, 0x0, 0x0, 0xff,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_uint,uint,16,4) [] = { 0x0, 0x0, 0xffff, 0x0 };
+VECT_VAR_DECL(expected_uint,uint,32,2) [] = { 0x0, 0xffffffff };
+
+VECT_VAR_DECL(expected_q_uint,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0xff, 0x0, 0x0, 0x0,
+ 0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_q_uint,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0xffff, 0x0 };
+VECT_VAR_DECL(expected_q_uint,uint,32,4) [] = { 0x0, 0x0, 0xffffffff, 0x0 };
+
+VECT_VAR_DECL(expected_float,uint,32,2) [] = { 0x0, 0xffffffff };
+VECT_VAR_DECL(expected_q_float,uint,32,4) [] = { 0x0, 0x0, 0xffffffff, 0x0 };
+
+VECT_VAR_DECL(expected_uint2,uint,32,2) [] = { 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected_uint3,uint,32,2) [] = { 0x0, 0xffffffff };
+VECT_VAR_DECL(expected_uint4,uint,32,2) [] = { 0xffffffff, 0x0 };
+
+VECT_VAR_DECL(expected_nan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_mnan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_nan2,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_inf,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_minf,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_inf2,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_mzero,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+VECT_VAR_DECL(expected_p8,uint,8,8) [] = { 0x0, 0x0, 0x0, 0xff,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_q_p8,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0xff, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+
+void exec_vceq_p8(void)
+{
+ DECL_VARIABLE(vector, poly, 8, 8);
+ DECL_VARIABLE(vector, poly, 8, 16);
+
+ DECL_VARIABLE(vector2, poly, 8, 8);
+ DECL_VARIABLE(vector2, poly, 8, 16);
+
+ DECL_VARIABLE(vector_res, uint, 8, 8);
+ DECL_VARIABLE(vector_res, uint, 8, 16);
+
+ clean_results ();
+
+ VLOAD(vector, buffer, , poly, p, 8, 8);
+ VLOAD(vector, buffer, q, poly, p, 8, 16);
+
+ VDUP(vector2, , poly, p, 8, 8, 0xF3);
+ VDUP(vector2, q, poly, p, 8, 16, 0xF4);
+
+ TEST_VCOMP(INSN_NAME, , poly, p, uint, 8, 8);
+ TEST_VCOMP(INSN_NAME, q, poly, p, uint, 8, 16);
+
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_p8, "p8");
+ CHECK(TEST_MSG, uint, 8, 16, PRIx8, expected_q_p8, "p8");
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcge.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcge.c
new file mode 100644
index 0000000..236fd82
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcge.c
@@ -0,0 +1,76 @@
+#define INSN_NAME vcge
+#define TEST_MSG "VCGE/VCGEQ"
+
+#include "cmp_op.inc"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x0, 0x0, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x333, 0x3333, 0x3333, 0x3333,
+ 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0xffff, 0xffff };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0x0, 0x0, 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
+ 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+
+VECT_VAR_DECL(expected_uint,uint,8,8) [] = { 0x0, 0x0, 0x0, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_uint,uint,16,4) [] = { 0x0, 0x0, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_uint,uint,32,2) [] = { 0x0, 0xffffffff };
+
+VECT_VAR_DECL(expected_q_uint,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_uint,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0, 0x0, 0xffff, 0xffff };
+VECT_VAR_DECL(expected_q_uint,uint,32,4) [] = { 0x0, 0x0, 0xffffffff, 0xffffffff };
+
+VECT_VAR_DECL(expected_float,uint,32,2) [] = { 0x0, 0xffffffff };
+VECT_VAR_DECL(expected_q_float,uint,32,4) [] = { 0x0, 0x0, 0xffffffff, 0xffffffff };
+
+VECT_VAR_DECL(expected_uint2,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_uint3,uint,32,2) [] = { 0x0, 0xffffffff };
+VECT_VAR_DECL(expected_uint4,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+VECT_VAR_DECL(expected_nan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_mnan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_nan2,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_inf,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_minf,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_inf2,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+VECT_VAR_DECL(expected_mzero,uint,32,2) [] = { 0xffffffff, 0xffffffff };
diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcgt.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcgt.c
new file mode 100644
index 0000000..23aaa01
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcgt.c
@@ -0,0 +1,76 @@
+#define INSN_NAME vcgt
+#define TEST_MSG "VCGT/VCGTQ"
+
+#include "cmp_op.inc"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0x0, 0x0, 0x0, 0xffff };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0x0, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x333, 0x3333, 0x3333, 0x3333,
+ 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0xffff };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0x0, 0x0, 0x0, 0xffffffff };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
+ 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+
+VECT_VAR_DECL(expected_uint,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_uint,uint,16,4) [] = { 0x0, 0x0, 0x0, 0xffff };
+VECT_VAR_DECL(expected_uint,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_q_uint,uint,8,16) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff };
+VECT_VAR_DECL(expected_q_uint,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0xffff };
+VECT_VAR_DECL(expected_q_uint,uint,32,4) [] = { 0x0, 0x0, 0x0, 0xffffffff };
+
+VECT_VAR_DECL(expected_float,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_q_float,uint,32,4) [] = { 0x0, 0x0, 0x0, 0xffffffff };
+
+VECT_VAR_DECL(expected_uint2,uint,32,2) [] = { 0x0, 0xffffffff };
+VECT_VAR_DECL(expected_uint3,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_uint4,uint,32,2) [] = { 0x0, 0xffffffff };
+
+VECT_VAR_DECL(expected_nan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_mnan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_nan2,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_inf,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_minf,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_inf2,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+VECT_VAR_DECL(expected_mzero,uint,32,2) [] = { 0x0, 0x0 };
diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcle.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcle.c
new file mode 100644
index 0000000..e4cad0c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vcle.c
@@ -0,0 +1,80 @@
+#define INSN_NAME vcle
+#define TEST_MSG "VCLE/VCLEQ"
+
+#include "cmp_op.inc"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x0 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0x0 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x333, 0x3333, 0x3333, 0x3333,
+ 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0x0 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
+ 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+
+VECT_VAR_DECL(expected_uint,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_uint,uint,16,4) [] = { 0xffff, 0xffff, 0xffff, 0x0 };
+VECT_VAR_DECL(expected_uint,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+
+VECT_VAR_DECL(expected_q_uint,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_q_uint,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0xffff, 0x0 };
+VECT_VAR_DECL(expected_q_uint,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x0 };
+
+VECT_VAR_DECL(expected_float,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_q_float,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x0 };
+
+VECT_VAR_DECL(expected_uint2,uint,32,2) [] = { 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected_uint3,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_uint4,uint,32,2) [] = { 0xffffffff, 0x0 };
+
+VECT_VAR_DECL(expected_nan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_mnan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_nan2,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_inf,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_minf,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_inf2,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_mzero,uint,32,2) [] = { 0xffffffff, 0xffffffff };
diff --git a/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vclt.c b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vclt.c
new file mode 100644
index 0000000..d437eae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/neon-intrinsics/vclt.c
@@ -0,0 +1,79 @@
+#define INSN_NAME vclt
+#define TEST_MSG "VCLT/VCLTQ"
+
+#include "cmp_op.inc"
+
+/* Expected results. */
+VECT_VAR_DECL(expected,int,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,4) [] = { 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,8) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,16,4) [] = { 0xffff, 0xffff, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,64,1) [] = { 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,8) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,4) [] = { 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,int,16,8) [] = { 0x333, 0x3333, 0x3333, 0x3333,
+ 0x333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,int,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+VECT_VAR_DECL(expected,int,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,32,4) [] = { 0xffffffff, 0xffffffff, 0x0, 0x0 };
+VECT_VAR_DECL(expected,uint,64,2) [] = { 0x3333333333333333,
+ 0x3333333333333333 };
+VECT_VAR_DECL(expected,poly,8,16) [] = { 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33,
+ 0x33, 0x33, 0x33, 0x33 };
+VECT_VAR_DECL(expected,poly,16,8) [] = { 0x3333, 0x3333, 0x3333, 0x3333,
+ 0x3333, 0x3333, 0x3333, 0x3333 };
+VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x33333333, 0x33333333,
+ 0x33333333, 0x33333333 };
+
+VECT_VAR_DECL(expected_uint,uint,8,8) [] = { 0xff, 0xff, 0xff, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_uint,uint,16,4) [] = { 0xffff, 0xffff, 0x0, 0x0 };
+VECT_VAR_DECL(expected_uint,uint,32,2) [] = { 0xffffffff, 0x0 };
+
+VECT_VAR_DECL(expected_q_uint,uint,8,16) [] = { 0xff, 0xff, 0xff, 0xff,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_q_uint,uint,16,8) [] = { 0xffff, 0xffff, 0xffff, 0xffff,
+ 0xffff, 0xffff, 0x0, 0x0 };
+VECT_VAR_DECL(expected_q_uint,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_float,uint,32,2) [] = { 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected_q_float,uint,32,4) [] = { 0xffffffff, 0xffffffff,
+ 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_uint2,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_uint3,uint,32,2) [] = { 0xffffffff, 0x0 };
+VECT_VAR_DECL(expected_uint4,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_nan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_mnan,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_nan2,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_inf,uint,32,2) [] = { 0xffffffff, 0xffffffff };
+VECT_VAR_DECL(expected_minf,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_inf2,uint,32,2) [] = { 0x0, 0x0 };
+
+VECT_VAR_DECL(expected_mzero,uint,32,2) [] = { 0x0, 0x0 };
--
1.8.3.2
next prev parent reply other threads:[~2014-07-01 10:07 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-07-01 10:07 [Patch ARM-AArch64/testsuite v2 00/21] Neon intrinsics executable tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 21/21] Add vuzp and vzip tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 14/21] Add vbsl tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 19/21] Add vld2_lane, vld3_lane and vld4_lane Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 13/21] Add vaddw tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 12/21] Add vaddl tests Christophe Lyon
2014-07-01 10:07 ` Christophe Lyon [this message]
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 20/21] Add vmul tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 15/21] Add vclz tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 01/21] Neon intrinsics execution tests initial framework Christophe Lyon
2014-07-03 11:06 ` Ramana Radhakrishnan
2014-07-03 21:04 ` Christophe Lyon
2014-07-10 10:12 ` Marcus Shawcroft
2014-07-11 10:41 ` Richard Earnshaw
2014-09-30 14:27 ` Christophe Lyon
2014-10-01 15:11 ` Marcus Shawcroft
2014-10-07 13:33 ` Christophe Lyon
2014-10-08 12:16 ` Ramana Radhakrishnan
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 18/21] Add vld2/vld3/vld4 tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 16/21] Add vdup and vmov tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 11/21] Add vaddhn tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 06/21] Add unary saturating operators: vqabs and vqneg Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 10/21] Add vabdl tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 17/21] Add vld1_dup tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 03/21] Add binary operators: vadd, vand, vbic, veor, vorn, vorr, vsub Christophe Lyon
2014-07-03 11:08 ` Ramana Radhakrishnan
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 08/21] Add vabal tests Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 02/21] Add unary operators: vabs and vneg Christophe Lyon
2014-07-03 11:07 ` Ramana Radhakrishnan
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 05/21] Add comparison operators with floating-point operands: vcage, vcagt, vcale and cvalt Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 07/21] Add binary saturating operators: vqadd, vqsub Christophe Lyon
2014-07-01 10:07 ` [Patch ARM-AArch64/testsuite v2 09/21] Add vabd tests Christophe Lyon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1404209174-25364-5-git-send-email-christophe.lyon@linaro.org \
--to=christophe.lyon@linaro.org \
--cc=gcc-patches@gcc.gnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).