From: Christophe Lyon <christophe.lyon@linaro.org>
To: gcc-patches@gcc.gnu.org
Subject: [Patch ARM-AArch64/testsuite Neon intrinsics 18/20] Add vstX_lane tests.
Date: Wed, 27 May 2015 20:17:00 -0000 [thread overview]
Message-ID: <1432757747-4891-19-git-send-email-christophe.lyon@linaro.org> (raw)
In-Reply-To: <1432757747-4891-1-git-send-email-christophe.lyon@linaro.org>
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c
new file mode 100644
index 0000000..26644ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vstX_lane.c
@@ -0,0 +1,578 @@
+#include <arm_neon.h>
+#include "arm-neon-ref.h"
+#include "compute-ref-data.h"
+
+/* Expected results for vst2, chunk 0. */
+VECT_VAR_DECL(expected_st2_0,int,8,8) [] = { 0xf0, 0xf1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,int,16,4) [] = { 0xfff0, 0xfff1, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,int,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st2_0,uint,8,8) [] = { 0xf0, 0xf1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,uint,16,4) [] = { 0xfff0, 0xfff1, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st2_0,poly,8,8) [] = { 0xf0, 0xf1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
+VECT_VAR_DECL(expected_st2_0,int,16,8) [] = { 0xfff0, 0xfff1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,int,32,4) [] = { 0xfffffff0, 0xfffffff1, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,uint,16,8) [] = { 0xfff0, 0xfff1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0x0, 0x0 };
+
+/* Expected results for vst2, chunk 1. */
+VECT_VAR_DECL(expected_st2_1,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,hfloat,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st2_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst3, chunk 0. */
+VECT_VAR_DECL(expected_st3_0,int,8,8) [] = { 0xf0, 0xf1, 0xf2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,int,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st3_0,uint,8,8) [] = { 0xf0, 0xf1, 0xf2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,uint,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st3_0,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
+VECT_VAR_DECL(expected_st3_0,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,int,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,uint,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0x0 };
+VECT_VAR_DECL(expected_st3_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0xc1600000, 0x0 };
+
+/* Expected results for vst3, chunk 1. */
+VECT_VAR_DECL(expected_st3_1,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,int,32,2) [] = { 0xfffffff2, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,32,2) [] = { 0xfffffff2, 0x0 };
+VECT_VAR_DECL(expected_st3_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,hfloat,32,2) [] = { 0xc1600000, 0x0 };
+VECT_VAR_DECL(expected_st3_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst3, chunk 2. */
+VECT_VAR_DECL(expected_st3_2,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,hfloat,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st3_2,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst4, chunk 0. */
+VECT_VAR_DECL(expected_st4_0,int,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,int,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_st4_0,int,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st4_0,uint,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,uint,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_st4_0,uint,32,2) [] = { 0xfffffff0, 0xfffffff1 };
+VECT_VAR_DECL(expected_st4_0,poly,8,8) [] = { 0xf0, 0xf1, 0xf2, 0xf3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,poly,16,4) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3 };
+VECT_VAR_DECL(expected_st4_0,hfloat,32,2) [] = { 0xc1800000, 0xc1700000 };
+VECT_VAR_DECL(expected_st4_0,int,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,int,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_st4_0,uint,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,uint,32,4) [] = { 0xfffffff0, 0xfffffff1,
+ 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_st4_0,poly,16,8) [] = { 0xfff0, 0xfff1, 0xfff2, 0xfff3,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_0,hfloat,32,4) [] = { 0xc1800000, 0xc1700000,
+ 0xc1600000, 0xc1500000 };
+
+/* Expected results for vst4, chunk 1. */
+VECT_VAR_DECL(expected_st4_1,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,int,32,2) [] = { 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_st4_1,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,uint,32,2) [] = { 0xfffffff2, 0xfffffff3 };
+VECT_VAR_DECL(expected_st4_1,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,hfloat,32,2) [] = { 0xc1600000, 0xc1500000 };
+VECT_VAR_DECL(expected_st4_1,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_1,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst4, chunk 2. */
+VECT_VAR_DECL(expected_st4_2,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,hfloat,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_2,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Expected results for vst4, chunk 3. */
+VECT_VAR_DECL(expected_st4_3,int,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,int,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,int,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,poly,8,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,poly,16,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,hfloat,32,2) [] = { 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,int,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,int,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,uint,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,poly,16,8) [] = { 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0 };
+VECT_VAR_DECL(expected_st4_3,hfloat,32,4) [] = { 0x0, 0x0, 0x0, 0x0 };
+
+/* Declare additional input buffers as needed. */
+/* Input buffers for vld2_lane. */
+VECT_VAR_DECL_INIT(buffer_vld2_lane, int, 8, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, int, 16, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, int, 32, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, int, 64, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 8, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 16, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 32, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, uint, 64, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, poly, 8, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, poly, 16, 2);
+VECT_VAR_DECL_INIT(buffer_vld2_lane, float, 32, 2);
+
+/* Input buffers for vld3_lane. */
+VECT_VAR_DECL_INIT(buffer_vld3_lane, int, 8, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, int, 16, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, int, 32, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, int, 64, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 8, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 16, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 32, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, uint, 64, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, poly, 8, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, poly, 16, 3);
+VECT_VAR_DECL_INIT(buffer_vld3_lane, float, 32, 3);
+
+/* Input buffers for vld4_lane. */
+VECT_VAR_DECL_INIT(buffer_vld4_lane, int, 8, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, int, 16, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, int, 32, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, int, 64, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 8, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 16, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 32, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, uint, 64, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, poly, 8, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, poly, 16, 4);
+VECT_VAR_DECL_INIT(buffer_vld4_lane, float, 32, 4);
+
+void exec_vstX_lane (void)
+{
+ /* In this case, input variables are arrays of vectors. */
+#define DECL_VSTX_LANE(T1, W, N, X) \
+ VECT_ARRAY_TYPE(T1, W, N, X) VECT_ARRAY_VAR(vector, T1, W, N, X); \
+ VECT_ARRAY_TYPE(T1, W, N, X) VECT_ARRAY_VAR(vector_src, T1, W, N, X); \
+ VECT_VAR_DECL(result_bis_##X, T1, W, N)[X * N]
+
+ /* We need to use a temporary result buffer (result_bis), because
+ the one used for other tests is not large enough. A subset of the
+ result data is moved from result_bis to result, and it is this
+ subset which is used to check the actual behaviour. The next
+ macro enables to move another chunk of data from result_bis to
+ result. */
+ /* We also use another extra input buffer (buffer_src), which we
+ fill with 0xAA, and which it used to load a vector from which we
+ read a given lane. */
+#define TEST_VSTX_LANE(Q, T1, T2, W, N, X, L) \
+ memset (VECT_VAR(buffer_src, T1, W, N), 0xAA, \
+ sizeof(VECT_VAR(buffer_src, T1, W, N))); \
+ memset (VECT_VAR(result_bis_##X, T1, W, N), 0, \
+ sizeof(VECT_VAR(result_bis_##X, T1, W, N))); \
+ \
+ VECT_ARRAY_VAR(vector_src, T1, W, N, X) = \
+ vld##X##Q##_##T2##W(VECT_VAR(buffer_src, T1, W, N)); \
+ \
+ VECT_ARRAY_VAR(vector, T1, W, N, X) = \
+ /* Use dedicated init buffer, of size X. */ \
+ vld##X##Q##_lane_##T2##W(VECT_VAR(buffer_vld##X##_lane, T1, W, X), \
+ VECT_ARRAY_VAR(vector_src, T1, W, N, X), \
+ L); \
+ vst##X##Q##_lane_##T2##W(VECT_VAR(result_bis_##X, T1, W, N), \
+ VECT_ARRAY_VAR(vector, T1, W, N, X), \
+ L); \
+ memcpy(VECT_VAR(result, T1, W, N), VECT_VAR(result_bis_##X, T1, W, N), \
+ sizeof(VECT_VAR(result, T1, W, N)));
+
+ /* Overwrite "result" with the contents of "result_bis"[Y]. */
+#define TEST_EXTRA_CHUNK(T1, W, N, X, Y) \
+ memcpy(VECT_VAR(result, T1, W, N), \
+ &(VECT_VAR(result_bis_##X, T1, W, N)[Y*N]), \
+ sizeof(VECT_VAR(result, T1, W, N)));
+
+ /* We need all variants in 64 bits, but there is no 64x2 variant,
+ nor 128 bits vectors of int8/uint8/poly8. */
+#define DECL_ALL_VSTX_LANE(X) \
+ DECL_VSTX_LANE(int, 8, 8, X); \
+ DECL_VSTX_LANE(int, 16, 4, X); \
+ DECL_VSTX_LANE(int, 32, 2, X); \
+ DECL_VSTX_LANE(uint, 8, 8, X); \
+ DECL_VSTX_LANE(uint, 16, 4, X); \
+ DECL_VSTX_LANE(uint, 32, 2, X); \
+ DECL_VSTX_LANE(poly, 8, 8, X); \
+ DECL_VSTX_LANE(poly, 16, 4, X); \
+ DECL_VSTX_LANE(float, 32, 2, X); \
+ DECL_VSTX_LANE(int, 16, 8, X); \
+ DECL_VSTX_LANE(int, 32, 4, X); \
+ DECL_VSTX_LANE(uint, 16, 8, X); \
+ DECL_VSTX_LANE(uint, 32, 4, X); \
+ DECL_VSTX_LANE(poly, 16, 8, X); \
+ DECL_VSTX_LANE(float, 32, 4, X)
+
+#define DUMMY_ARRAY(V, T, W, N, L) VECT_VAR_DECL(V,T,W,N)[N*L]
+
+ /* Use the same lanes regardless of the size of the array (X), for
+ simplicity. */
+#define TEST_ALL_VSTX_LANE(X) \
+ TEST_VSTX_LANE(, int, s, 8, 8, X, 7); \
+ TEST_VSTX_LANE(, int, s, 16, 4, X, 2); \
+ TEST_VSTX_LANE(, int, s, 32, 2, X, 0); \
+ TEST_VSTX_LANE(, float, f, 32, 2, X, 0); \
+ TEST_VSTX_LANE(, uint, u, 8, 8, X, 4); \
+ TEST_VSTX_LANE(, uint, u, 16, 4, X, 3); \
+ TEST_VSTX_LANE(, uint, u, 32, 2, X, 1); \
+ TEST_VSTX_LANE(, poly, p, 8, 8, X, 4); \
+ TEST_VSTX_LANE(, poly, p, 16, 4, X, 3); \
+ TEST_VSTX_LANE(q, int, s, 16, 8, X, 6); \
+ TEST_VSTX_LANE(q, int, s, 32, 4, X, 2); \
+ TEST_VSTX_LANE(q, uint, u, 16, 8, X, 5); \
+ TEST_VSTX_LANE(q, uint, u, 32, 4, X, 0); \
+ TEST_VSTX_LANE(q, poly, p, 16, 8, X, 5); \
+ TEST_VSTX_LANE(q, float, f, 32, 4, X, 2)
+
+#define TEST_ALL_EXTRA_CHUNKS(X, Y) \
+ TEST_EXTRA_CHUNK(int, 8, 8, X, Y); \
+ TEST_EXTRA_CHUNK(int, 16, 4, X, Y); \
+ TEST_EXTRA_CHUNK(int, 32, 2, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 8, 8, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 16, 4, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 32, 2, X, Y); \
+ TEST_EXTRA_CHUNK(poly, 8, 8, X, Y); \
+ TEST_EXTRA_CHUNK(poly, 16, 4, X, Y); \
+ TEST_EXTRA_CHUNK(float, 32, 2, X, Y); \
+ TEST_EXTRA_CHUNK(int, 16, 8, X, Y); \
+ TEST_EXTRA_CHUNK(int, 32, 4, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 16, 8, X, Y); \
+ TEST_EXTRA_CHUNK(uint, 32, 4, X, Y); \
+ TEST_EXTRA_CHUNK(poly, 16, 8, X, Y); \
+ TEST_EXTRA_CHUNK(float, 32, 4, X, Y)
+
+ /* Declare the temporary buffers / variables. */
+ DECL_ALL_VSTX_LANE(2);
+ DECL_ALL_VSTX_LANE(3);
+ DECL_ALL_VSTX_LANE(4);
+
+ /* Define dummy input arrays, large enough for x4 vectors. */
+ DUMMY_ARRAY(buffer_src, int, 8, 8, 4);
+ DUMMY_ARRAY(buffer_src, int, 16, 4, 4);
+ DUMMY_ARRAY(buffer_src, int, 32, 2, 4);
+ DUMMY_ARRAY(buffer_src, uint, 8, 8, 4);
+ DUMMY_ARRAY(buffer_src, uint, 16, 4, 4);
+ DUMMY_ARRAY(buffer_src, uint, 32, 2, 4);
+ DUMMY_ARRAY(buffer_src, poly, 8, 8, 4);
+ DUMMY_ARRAY(buffer_src, poly, 16, 4, 4);
+ DUMMY_ARRAY(buffer_src, float, 32, 2, 4);
+ DUMMY_ARRAY(buffer_src, int, 16, 8, 4);
+ DUMMY_ARRAY(buffer_src, int, 32, 4, 4);
+ DUMMY_ARRAY(buffer_src, uint, 16, 8, 4);
+ DUMMY_ARRAY(buffer_src, uint, 32, 4, 4);
+ DUMMY_ARRAY(buffer_src, poly, 16, 8, 4);
+ DUMMY_ARRAY(buffer_src, float, 32, 4, 4);
+
+ /* Check vst2_lane/vst2q_lane. */
+ clean_results ();
+#define TEST_MSG "VST2_LANE/VST2Q_LANE"
+ TEST_ALL_VSTX_LANE(2);
+
+#define CMT " (chunk 0)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st2_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st2_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st2_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st2_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st2_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st2_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st2_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st2_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st2_0, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(2, 1);
+#undef CMT
+#define CMT " chunk 1"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st2_1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st2_1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st2_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st2_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st2_1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st2_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st2_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st2_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st2_1, CMT);
+
+
+ /* Check vst3_lane/vst3q_lane. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VST3_LANE/VST3Q_LANE"
+ TEST_ALL_VSTX_LANE(3);
+
+#undef CMT
+#define CMT " (chunk 0)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st3_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st3_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st3_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st3_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st3_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st3_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_0, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(3, 1);
+
+#undef CMT
+#define CMT " (chunk 1)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st3_1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st3_1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st3_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st3_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st3_1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st3_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_1, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(3, 2);
+
+#undef CMT
+#define CMT " (chunk 2)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st3_2, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st3_2, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st3_2, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st3_2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st3_2, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st3_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st3_2, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st3_2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st3_2, CMT);
+
+
+ /* Check vst4_lane/vst4q_lane. */
+ clean_results ();
+#undef TEST_MSG
+#define TEST_MSG "VST4_LANE/VST4Q_LANE"
+ TEST_ALL_VSTX_LANE(4);
+
+#undef CMT
+#define CMT " (chunk 0)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st4_0, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st4_0, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st4_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st4_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st4_0, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st4_0, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_0, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_0, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_0, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(4, 1);
+
+#undef CMT
+#define CMT " (chunk 1)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st4_1, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st4_1, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st4_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st4_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st4_1, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st4_1, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_1, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_1, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_1, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(4, 2);
+
+#undef CMT
+#define CMT " (chunk 2)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st4_2, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st4_2, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st4_2, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st4_2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st4_2, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st4_2, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_2, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_2, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_2, CMT);
+
+ TEST_ALL_EXTRA_CHUNKS(4, 3);
+
+#undef CMT
+#define CMT " (chunk 3)"
+ CHECK(TEST_MSG, int, 8, 8, PRIx8, expected_st4_3, CMT);
+ CHECK(TEST_MSG, int, 16, 4, PRIx16, expected_st4_3, CMT);
+ CHECK(TEST_MSG, int, 32, 2, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 8, 8, PRIx8, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 16, 4, PRIx16, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 32, 2, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, poly, 8, 8, PRIx8, expected_st4_3, CMT);
+ CHECK(TEST_MSG, poly, 16, 4, PRIx16, expected_st4_3, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 2, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, int, 16, 8, PRIx16, expected_st4_3, CMT);
+ CHECK(TEST_MSG, int, 32, 4, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 16, 8, PRIx16, expected_st4_3, CMT);
+ CHECK(TEST_MSG, uint, 32, 4, PRIx32, expected_st4_3, CMT);
+ CHECK(TEST_MSG, poly, 16, 8, PRIx16, expected_st4_3, CMT);
+ CHECK_FP(TEST_MSG, float, 32, 4, PRIx32, expected_st4_3, CMT);
+}
+
+int main (void)
+{
+ exec_vstX_lane ();
+ return 0;
+}
--
2.1.4
next prev parent reply other threads:[~2015-05-27 20:17 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-05-27 20:16 [Patch ARM-AArch64/testsuite Neon intrinsics 00/20] Executable tests Christophe Lyon
2015-05-27 20:16 ` [Patch ARM-AArch64/testsuite Neon intrinsics 08/20] Add vrshrn_n tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 15/20] Add vshrn_n tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 11/20] Add vrsra_n tests Christophe Lyon
2015-05-27 20:17 ` Christophe Lyon [this message]
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 19/20] Add vtbX tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 12/20] Add vset_lane tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 01/20] Add vrecpe tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 05/20] Add vrshl tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 02/20] Add vrecps tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 06/20] Add vshr_n tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 17/20] Add vst1_lane tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 10/20] Add vrsqrts tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 13/20] Add vshll_n tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 09/20] Add vrsqrte tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 03/20] Add vreinterpret tests Christophe Lyon
2015-05-27 20:17 ` [Patch ARM-AArch64/testsuite Neon intrinsics 14/20] Add vshl_n tests Christophe Lyon
2015-05-27 20:26 ` [Patch ARM-AArch64/testsuite Neon intrinsics 16/20] Add vsra_n tests Christophe Lyon
2015-05-27 20:30 ` [Patch ARM-AArch64/testsuite Neon intrinsics 20/20] Add vtst tests Christophe Lyon
2015-05-27 20:40 ` [Patch ARM-AArch64/testsuite Neon intrinsics 07/20] Add vrshr_n tests Christophe Lyon
2015-05-27 20:51 ` [Patch ARM-AArch64/testsuite Neon intrinsics 04/20] Add vrev tests Christophe Lyon
2015-06-15 22:15 ` [Patch ARM-AArch64/testsuite Neon intrinsics 00/20] Executable tests Christophe Lyon
2015-06-16 10:10 ` James Greenhalgh
2015-06-16 13:26 ` Christophe Lyon
2015-11-02 14:20 ` Jiong Wang
2015-11-02 14:38 ` Christophe Lyon
2015-11-02 14:47 ` Jiong Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1432757747-4891-19-git-send-email-christophe.lyon@linaro.org \
--to=christophe.lyon@linaro.org \
--cc=gcc-patches@gcc.gnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).