public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [AArch64, 3/4] Reimplement multiply by element to get rid of inline assembly
       [not found] ` <57398D5E.6070503@foss.arm.com>
       [not found]   ` <57398D8B.8060902@foss.arm.com>
@ 2016-05-16  9:09   ` Jiong Wang
  2016-05-17 12:37     ` James Greenhalgh
  1 sibling, 1 reply; 10+ messages in thread
From: Jiong Wang @ 2016-05-16  9:09 UTC (permalink / raw)
  To: GCC Patches

[-- Attachment #1: Type: text/plain, Size: 683 bytes --]

This patch reimplement vector multiply by element on top of the existed
vmul_lane* intrinsics instead of inline assembly.

There is no code generation change from this patch.

OK for trunk?

2016-05-16  Jiong Wang<jiong.wang@arm.com>

gcc/
   * config/aarch64/aarch64-simd.md (vmul_n_f32): Remove inline assembly.
   Use builtin.
   (vmul_n_s16): Likewise.
   (vmul_n_s32): Likewise.
   (vmul_n_u16): Likewise.
   (vmul_n_u32): Likewise.
   (vmulq_n_f32): Likewise.
   (vmulq_n_f64): Likewise.
   (vmulq_n_s16): Likewise.
   (vmulq_n_s32): Likewise.
   (vmulq_n_u16): Likewise.
   (vmulq_n_u32): Likewise.

gcc/testsuite/
   * gcc.target/aarch64/simd/vmul_elem_1.c: Use intrinsics.


[-- Attachment #2: 3.patch --]
[-- Type: text/x-patch, Size: 17843 bytes --]

diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index ca7ace5aa656163826569d046fcbf02f9f7d4d6c..84931aeec2d885f8552197fe8a72500f127e2bbb 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7938,61 +7938,6 @@ vmovn_u64 (uint64x2_t a)
   return result;
 }
 
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vmul_n_f32 (float32x2_t a, float32_t b)
-{
-  float32x2_t result;
-  __asm__ ("fmul %0.2s,%1.2s,%2.s[0]"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmul_n_s16 (int16x4_t a, int16_t b)
-{
-  int16x4_t result;
-  __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
-           : "=w"(result)
-           : "w"(a), "x"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmul_n_s32 (int32x2_t a, int32_t b)
-{
-  int32x2_t result;
-  __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmul_n_u16 (uint16x4_t a, uint16_t b)
-{
-  uint16x4_t result;
-  __asm__ ("mul %0.4h,%1.4h,%2.h[0]"
-           : "=w"(result)
-           : "w"(a), "x"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmul_n_u32 (uint32x2_t a, uint32_t b)
-{
-  uint32x2_t result;
-  __asm__ ("mul %0.2s,%1.2s,%2.s[0]"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
 #define vmull_high_lane_s16(a, b, c)                                    \
   __extension__                                                         \
     ({                                                                  \
@@ -8443,72 +8388,6 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
   return result;
 }
 
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vmulq_n_f32 (float32x4_t a, float32_t b)
-{
-  float32x4_t result;
-  __asm__ ("fmul %0.4s,%1.4s,%2.s[0]"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vmulq_n_f64 (float64x2_t a, float64_t b)
-{
-  float64x2_t result;
-  __asm__ ("fmul %0.2d,%1.2d,%2.d[0]"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmulq_n_s16 (int16x8_t a, int16_t b)
-{
-  int16x8_t result;
-  __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
-           : "=w"(result)
-           : "w"(a), "x"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmulq_n_s32 (int32x4_t a, int32_t b)
-{
-  int32x4_t result;
-  __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmulq_n_u16 (uint16x8_t a, uint16_t b)
-{
-  uint16x8_t result;
-  __asm__ ("mul %0.8h,%1.8h,%2.h[0]"
-           : "=w"(result)
-           : "w"(a), "x"(b)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmulq_n_u32 (uint32x4_t a, uint32_t b)
-{
-  uint32x4_t result;
-  __asm__ ("mul %0.4s,%1.4s,%2.s[0]"
-           : "=w"(result)
-           : "w"(a), "w"(b)
-           : /* No clobbers */);
-  return result;
-}
-
 __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
 vmvn_p8 (poly8x8_t a)
 {
@@ -18924,6 +18803,74 @@ vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane)
   return __a * __aarch64_vget_lane_any (__b, __lane);
 }
 
+/* vmul_n.  */
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vmul_n_f32 (float32x2_t __a, float32_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vmulq_n_f32 (float32x4_t __a, float32_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vmulq_n_f64 (float64x2_t __a, float64_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmul_n_s16 (int16x4_t __a, int16_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmulq_n_s16 (int16x8_t __a, int16_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmul_n_s32 (int32x2_t __a, int32_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmulq_n_s32 (int32x4_t __a, int32_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmul_n_u16 (uint16x4_t __a, uint16_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmulq_n_u16 (uint16x8_t __a, uint16_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmul_n_u32 (uint32x2_t __a, uint32_t __b)
+{
+  return __a * __b;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
+{
+  return __a * __b;
+}
+
 /* vneg  */
 
 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
index 290a4e9adbc5d9ce1335ca28120e437293776f30..155cac3b4a5579318244533c3ab590250c150dd6 100644
--- a/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
@@ -142,13 +142,13 @@ check_v2sf (float32_t elemA, float32_t elemB)
   int32_t indx;
   const float32_t vec32x2_buf[2] = {A, B};
   float32x2_t vec32x2_src = vld1_f32 (vec32x2_buf);
-  float32x2_t vec32x2_res = vec32x2_src * elemA;
+  float32x2_t vec32x2_res = vmul_n_f32 (vec32x2_src, elemA);
 
   for (indx = 0; indx < 2; indx++)
     if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_1[indx])
       abort ();
 
-  vec32x2_res = vec32x2_src * elemB;
+  vec32x2_res = vmul_n_f32 (vec32x2_src, elemB);
 
   for (indx = 0; indx < 2; indx++)
     if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_2[indx])
@@ -163,25 +163,25 @@ check_v4sf (float32_t elemA, float32_t elemB, float32_t elemC, float32_t elemD)
   int32_t indx;
   const float32_t vec32x4_buf[4] = {A, B, C, D};
   float32x4_t vec32x4_src = vld1q_f32 (vec32x4_buf);
-  float32x4_t vec32x4_res = vec32x4_src * elemA;
+  float32x4_t vec32x4_res = vmulq_n_f32 (vec32x4_src, elemA);
 
   for (indx = 0; indx < 4; indx++)
     if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_1[indx])
       abort ();
 
-  vec32x4_res = vec32x4_src * elemB;
+  vec32x4_res = vmulq_n_f32 (vec32x4_src, elemB);
 
   for (indx = 0; indx < 4; indx++)
     if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_2[indx])
       abort ();
 
-  vec32x4_res = vec32x4_src * elemC;
+  vec32x4_res = vmulq_n_f32 (vec32x4_src, elemC);
 
   for (indx = 0; indx < 4; indx++)
     if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_3[indx])
       abort ();
 
-  vec32x4_res = vec32x4_src * elemD;
+  vec32x4_res = vmulq_n_f32 (vec32x4_src, elemD);
 
   for (indx = 0; indx < 4; indx++)
     if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_4[indx])
@@ -196,13 +196,13 @@ check_v2df (float64_t elemdC, float64_t elemdD)
   int32_t indx;
   const float64_t vec64x2_buf[2] = {AD, BD};
   float64x2_t vec64x2_src = vld1q_f64 (vec64x2_buf);
-  float64x2_t vec64x2_res = vec64x2_src * elemdC;
+  float64x2_t vec64x2_res = vmulq_n_f64 (vec64x2_src, elemdC);
 
   for (indx = 0; indx < 2; indx++)
     if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_1[indx])
       abort ();
 
-  vec64x2_res = vec64x2_src * elemdD;
+  vec64x2_res = vmulq_n_f64 (vec64x2_src, elemdD);
 
   for (indx = 0; indx < 2; indx++)
     if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_2[indx])
@@ -217,13 +217,13 @@ check_v2si (int32_t elemsA, int32_t elemsB)
   int32_t indx;
   const int32_t vecs32x2_buf[2] = {AS, BS};
   int32x2_t vecs32x2_src = vld1_s32 (vecs32x2_buf);
-  int32x2_t vecs32x2_res = vecs32x2_src * elemsA;
+  int32x2_t vecs32x2_res = vmul_n_s32 (vecs32x2_src, elemsA);
 
   for (indx = 0; indx < 2; indx++)
     if (vecs32x2_res[indx] != expecteds2_1[indx])
       abort ();
 
-  vecs32x2_res = vecs32x2_src * elemsB;
+  vecs32x2_res = vmul_n_s32 (vecs32x2_src, elemsB);
 
   for (indx = 0; indx < 2; indx++)
     if (vecs32x2_res[indx] != expecteds2_2[indx])
@@ -236,13 +236,13 @@ check_v2si_unsigned (uint32_t elemusA, uint32_t elemusB)
   int indx;
   const uint32_t vecus32x2_buf[2] = {AUS, BUS};
   uint32x2_t vecus32x2_src = vld1_u32 (vecus32x2_buf);
-  uint32x2_t vecus32x2_res = vecus32x2_src * elemusA;
+  uint32x2_t vecus32x2_res = vmul_n_u32 (vecus32x2_src, elemusA);
 
   for (indx = 0; indx < 2; indx++)
     if (vecus32x2_res[indx] != expectedus2_1[indx])
       abort ();
 
-  vecus32x2_res = vecus32x2_src * elemusB;
+  vecus32x2_res = vmul_n_u32 (vecus32x2_src, elemusB);
 
   for (indx = 0; indx < 2; indx++)
     if (vecus32x2_res[indx] != expectedus2_2[indx])
@@ -257,25 +257,25 @@ check_v4si (int32_t elemsA, int32_t elemsB, int32_t elemsC, int32_t elemsD)
   int32_t indx;
   const int32_t vecs32x4_buf[4] = {AS, BS, CS, DS};
   int32x4_t vecs32x4_src = vld1q_s32 (vecs32x4_buf);
-  int32x4_t vecs32x4_res = vecs32x4_src * elemsA;
+  int32x4_t vecs32x4_res = vmulq_n_s32 (vecs32x4_src, elemsA);
 
   for (indx = 0; indx < 4; indx++)
     if (vecs32x4_res[indx] != expecteds4_1[indx])
       abort ();
 
-  vecs32x4_res = vecs32x4_src * elemsB;
+  vecs32x4_res = vmulq_n_s32 (vecs32x4_src, elemsB);
 
   for (indx = 0; indx < 4; indx++)
     if (vecs32x4_res[indx] != expecteds4_2[indx])
       abort ();
 
-  vecs32x4_res = vecs32x4_src * elemsC;
+  vecs32x4_res = vmulq_n_s32 (vecs32x4_src, elemsC);
 
   for (indx = 0; indx < 4; indx++)
     if (vecs32x4_res[indx] != expecteds4_3[indx])
       abort ();
 
-  vecs32x4_res = vecs32x4_src * elemsD;
+  vecs32x4_res = vmulq_n_s32 (vecs32x4_src, elemsD);
 
   for (indx = 0; indx < 4; indx++)
     if (vecs32x4_res[indx] != expecteds4_4[indx])
@@ -289,25 +289,25 @@ check_v4si_unsigned (uint32_t elemusA, uint32_t elemusB, uint32_t elemusC,
   int indx;
   const uint32_t vecus32x4_buf[4] = {AUS, BUS, CUS, DUS};
   uint32x4_t vecus32x4_src = vld1q_u32 (vecus32x4_buf);
-  uint32x4_t vecus32x4_res = vecus32x4_src * elemusA;
+  uint32x4_t vecus32x4_res = vmulq_n_u32 (vecus32x4_src, elemusA);
 
   for (indx = 0; indx < 4; indx++)
     if (vecus32x4_res[indx] != expectedus4_1[indx])
       abort ();
 
-  vecus32x4_res = vecus32x4_src * elemusB;
+  vecus32x4_res = vmulq_n_u32 (vecus32x4_src, elemusB);
 
   for (indx = 0; indx < 4; indx++)
     if (vecus32x4_res[indx] != expectedus4_2[indx])
       abort ();
 
-  vecus32x4_res = vecus32x4_src * elemusC;
+  vecus32x4_res = vmulq_n_u32 (vecus32x4_src, elemusC);
 
   for (indx = 0; indx < 4; indx++)
     if (vecus32x4_res[indx] != expectedus4_3[indx])
       abort ();
 
-  vecus32x4_res = vecus32x4_src * elemusD;
+  vecus32x4_res = vmulq_n_u32 (vecus32x4_src, elemusD);
 
   for (indx = 0; indx < 4; indx++)
     if (vecus32x4_res[indx] != expectedus4_4[indx])
@@ -323,25 +323,25 @@ check_v4hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD)
   int32_t indx;
   const int16_t vech16x4_buf[4] = {AH, BH, CH, DH};
   int16x4_t vech16x4_src = vld1_s16 (vech16x4_buf);
-  int16x4_t vech16x4_res = vech16x4_src * elemhA;
+  int16x4_t vech16x4_res = vmul_n_s16 (vech16x4_src, elemhA);
 
   for (indx = 0; indx < 4; indx++)
     if (vech16x4_res[indx] != expectedh4_1[indx])
       abort ();
 
-  vech16x4_res = vech16x4_src * elemhB;
+  vech16x4_res = vmul_n_s16 (vech16x4_src, elemhB);
 
   for (indx = 0; indx < 4; indx++)
     if (vech16x4_res[indx] != expectedh4_2[indx])
       abort ();
 
-  vech16x4_res = vech16x4_src * elemhC;
+  vech16x4_res = vmul_n_s16 (vech16x4_src, elemhC);
 
   for (indx = 0; indx < 4; indx++)
     if (vech16x4_res[indx] != expectedh4_3[indx])
       abort ();
 
-  vech16x4_res = vech16x4_src * elemhD;
+  vech16x4_res = vmul_n_s16 (vech16x4_src, elemhD);
 
   for (indx = 0; indx < 4; indx++)
     if (vech16x4_res[indx] != expectedh4_4[indx])
@@ -355,25 +355,25 @@ check_v4hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
   int indx;
   const uint16_t vecuh16x4_buf[4] = {AUH, BUH, CUH, DUH};
   uint16x4_t vecuh16x4_src = vld1_u16 (vecuh16x4_buf);
-  uint16x4_t vecuh16x4_res = vecuh16x4_src * elemuhA;
+  uint16x4_t vecuh16x4_res = vmul_n_u16 (vecuh16x4_src, elemuhA);
 
   for (indx = 0; indx < 4; indx++)
     if (vecuh16x4_res[indx] != expecteduh4_1[indx])
       abort ();
 
-  vecuh16x4_res = vecuh16x4_src * elemuhB;
+  vecuh16x4_res = vmul_n_u16 (vecuh16x4_src, elemuhB);
 
   for (indx = 0; indx < 4; indx++)
     if (vecuh16x4_res[indx] != expecteduh4_2[indx])
       abort ();
 
-  vecuh16x4_res = vecuh16x4_src * elemuhC;
+  vecuh16x4_res = vmul_n_u16 (vecuh16x4_src, elemuhC);
 
   for (indx = 0; indx < 4; indx++)
     if (vecuh16x4_res[indx] != expecteduh4_3[indx])
       abort ();
 
-  vecuh16x4_res = vecuh16x4_src * elemuhD;
+  vecuh16x4_res = vmul_n_u16 (vecuh16x4_src, elemuhD);
 
   for (indx = 0; indx < 4; indx++)
     if (vecuh16x4_res[indx] != expecteduh4_4[indx])
@@ -389,49 +389,49 @@ check_v8hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD,
   int32_t indx;
   const int16_t vech16x8_buf[8] = {AH, BH, CH, DH, EH, FH, GH, HH};
   int16x8_t vech16x8_src = vld1q_s16 (vech16x8_buf);
-  int16x8_t vech16x8_res = vech16x8_src * elemhA;
+  int16x8_t vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhA);
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_1[indx])
       abort ();
 
-  vech16x8_res = vech16x8_src * elemhB;
+  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhB);
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_2[indx])
       abort ();
 
-  vech16x8_res = vech16x8_src * elemhC;
+  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhC);
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_3[indx])
       abort ();
 
-  vech16x8_res = vech16x8_src * elemhD;
+  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhD);
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_4[indx])
       abort ();
 
-  vech16x8_res = vech16x8_src * elemhE;
+  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhE);
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_5[indx])
       abort ();
 
-  vech16x8_res = vech16x8_src * elemhF;
+  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhF);
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_6[indx])
       abort ();
 
-  vech16x8_res = vech16x8_src * elemhG;
+  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhG);
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_7[indx])
       abort ();
 
-  vech16x8_res = vech16x8_src * elemhH;
+  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhH);
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_8[indx])
@@ -446,49 +446,49 @@ check_v8hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
   int indx;
   const uint16_t vecuh16x8_buf[8] = {AUH, BUH, CUH, DUH, EUH, FUH, GUH, HUH};
   uint16x8_t vecuh16x8_src = vld1q_u16 (vecuh16x8_buf);
-  uint16x8_t vecuh16x8_res = vecuh16x8_src * elemuhA;
+  uint16x8_t vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhA);
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_1[indx])
       abort ();
 
-  vecuh16x8_res = vecuh16x8_src * elemuhB;
+  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhB);
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_2[indx])
       abort ();
 
-  vecuh16x8_res = vecuh16x8_src * elemuhC;
+  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhC);
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_3[indx])
       abort ();
 
-  vecuh16x8_res = vecuh16x8_src * elemuhD;
+  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhD);
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_4[indx])
       abort ();
 
-  vecuh16x8_res = vecuh16x8_src * elemuhE;
+  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhE);
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_5[indx])
       abort ();
 
-  vecuh16x8_res = vecuh16x8_src * elemuhF;
+  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhF);
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_6[indx])
       abort ();
 
-  vecuh16x8_res = vecuh16x8_src * elemuhG;
+  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhG);
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_7[indx])
       abort ();
 
-  vecuh16x8_res = vecuh16x8_src * elemuhH;
+  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhH);
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_8[indx])


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [AArch64, 2/4] Extend vector mutiply by element to all supported modes
       [not found] <57398D3D.1040806@foss.arm.com>
       [not found] ` <57398D5E.6070503@foss.arm.com>
@ 2016-05-16  9:09 ` Jiong Wang
  2016-05-17 12:28   ` James Greenhalgh
  1 sibling, 1 reply; 10+ messages in thread
From: Jiong Wang @ 2016-05-16  9:09 UTC (permalink / raw)
  To: GCC Patches

[-- Attachment #1: Type: text/plain, Size: 1265 bytes --]

AArch64 support vector multiply by element for V2DF, V2SF, V4SF, V2SI,
V4SI, V4HI, V8HI.

All above are well supported by "*aarch64_mul3_elt<mode>" pattern and
"*aarch64_mul3_elt_<vswap_width_name><mode>" if there is lane size
change.

Above patterns are trying to match "(mul (vec_dup (vec_select)))"
which is genuinely vector multiply by element.

While vector multiply by element can also comes from "(mul (vec_dup
(scalar" where the scalar value is already sitting in vector register
then duplicated to other lanes, and there is no lane size change.

We have "*aarch64_mul3_elt_to_128df" to match this already, but it's
restricted for V2DF while this patch extends this support to more modes,
for example vector integer operations.

For the testcase included, the following codegen change will happen:


-       ldr     w0, [x3, 160]
-       dup     v1.2s, w0
-       mul     v1.2s, v1.2s, v2.2s
+       ldr     s1, [x3, 160]
+       mul     v1.2s, v0.2s, v1.s[0]

OK for trunk?

2016-05-16  Jiong Wang<jiong.wang@arm.com>

gcc/
   * config/aarch64/aarch64-simd.md (*aarch64_mul3_elt_to_128df): Extend to all
   supported modes.  Rename to "*aarch64_mul3_elt_from_dup".

gcc/testsuite/
   * /gcc.target/aarch64/simd/vmul_elem_1.c: New.

   
-- 
Regards,
Jiong


[-- Attachment #2: 2.patch --]
[-- Type: text/x-patch, Size: 17640 bytes --]

diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index eb18defef15c24bf2334045e92bf7c34b989136d..7f338ff78fabccee868a4befbffed54c3e842dc9 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -371,15 +371,15 @@
   [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
 )
 
-(define_insn "*aarch64_mul3_elt_to_128df"
-  [(set (match_operand:V2DF 0 "register_operand" "=w")
-     (mult:V2DF
-       (vec_duplicate:V2DF
-	 (match_operand:DF 2 "register_operand" "w"))
-      (match_operand:V2DF 1 "register_operand" "w")))]
+(define_insn "*aarch64_mul3_elt_from_dup<mode>"
+ [(set (match_operand:VMUL 0 "register_operand" "=w")
+    (mult:VMUL
+      (vec_duplicate:VMUL
+	    (match_operand:<VEL> 1 "register_operand" "<h_con>"))
+      (match_operand:VMUL 2 "register_operand" "w")))]
   "TARGET_SIMD"
-  "fmul\\t%0.2d, %1.2d, %2.d[0]"
-  [(set_attr "type" "neon_fp_mul_d_scalar_q")]
+  "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
+  [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
 )
 
 (define_insn "aarch64_rsqrte_<mode>2"
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
new file mode 100644
index 0000000000000000000000000000000000000000..290a4e9adbc5d9ce1335ca28120e437293776f30
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
@@ -0,0 +1,519 @@
+/* Test the vmul_n_f64 AArch64 SIMD intrinsic.  */
+
+/* { dg-do run } */
+/* { dg-options "-O2 --save-temps" } */
+
+#include "arm_neon.h"
+
+extern void abort (void);
+
+#define A (132.4f)
+#define B (-0.0f)
+#define C (-34.8f)
+#define D (289.34f)
+float32_t expected2_1[2] = {A * A, B * A};
+float32_t expected2_2[2] = {A * B, B * B};
+float32_t expected4_1[4] = {A * A, B * A, C * A, D * A};
+float32_t expected4_2[4] = {A * B, B * B, C * B, D * B};
+float32_t expected4_3[4] = {A * C, B * C, C * C, D * C};
+float32_t expected4_4[4] = {A * D, B * D, C * D, D * D};
+float32_t _elemA = A;
+float32_t _elemB = B;
+float32_t _elemC = C;
+float32_t _elemD = D;
+
+#define AD (1234.5)
+#define BD (-0.0)
+#define CD (71.3)
+#define DD (-1024.4)
+float64_t expectedd2_1[2] = {AD * CD, BD * CD};
+float64_t expectedd2_2[2] = {AD * DD, BD * DD};
+float64_t _elemdC = CD;
+float64_t _elemdD = DD;
+
+
+#define AS (1024)
+#define BS (-31)
+#define CS (0)
+#define DS (655)
+int32_t expecteds2_1[2] = {AS * AS, BS * AS};
+int32_t expecteds2_2[2] = {AS * BS, BS * BS};
+int32_t expecteds4_1[4] = {AS * AS, BS * AS, CS * AS, DS * AS};
+int32_t expecteds4_2[4] = {AS * BS, BS * BS, CS * BS, DS * BS};
+int32_t expecteds4_3[4] = {AS * CS, BS * CS, CS * CS, DS * CS};
+int32_t expecteds4_4[4] = {AS * DS, BS * DS, CS * DS, DS * DS};
+int32_t _elemsA = AS;
+int32_t _elemsB = BS;
+int32_t _elemsC = CS;
+int32_t _elemsD = DS;
+
+#define AH ((int16_t) 0)
+#define BH ((int16_t) -32)
+#define CH ((int16_t) 102)
+#define DH ((int16_t) -51)
+#define EH ((int16_t) 71)
+#define FH ((int16_t) -91)
+#define GH ((int16_t) 48)
+#define HH ((int16_t) 255)
+int16_t expectedh4_1[4] = {AH * AH, BH * AH, CH * AH, DH * AH};
+int16_t expectedh4_2[4] = {AH * BH, BH * BH, CH * BH, DH * BH};
+int16_t expectedh4_3[4] = {AH * CH, BH * CH, CH * CH, DH * CH};
+int16_t expectedh4_4[4] = {AH * DH, BH * DH, CH * DH, DH * DH};
+int16_t expectedh8_1[8] = {AH * AH, BH * AH, CH * AH, DH * AH,
+			   EH * AH, FH * AH, GH * AH, HH * AH};
+int16_t expectedh8_2[8] = {AH * BH, BH * BH, CH * BH, DH * BH,
+			   EH * BH, FH * BH, GH * BH, HH * BH};
+int16_t expectedh8_3[8] = {AH * CH, BH * CH, CH * CH, DH * CH,
+			   EH * CH, FH * CH, GH * CH, HH * CH};
+int16_t expectedh8_4[8] = {AH * DH, BH * DH, CH * DH, DH * DH,
+			   EH * DH, FH * DH, GH * DH, HH * DH};
+int16_t expectedh8_5[8] = {AH * EH, BH * EH, CH * EH, DH * EH,
+			   EH * EH, FH * EH, GH * EH, HH * EH};
+int16_t expectedh8_6[8] = {AH * FH, BH * FH, CH * FH, DH * FH,
+			   EH * FH, FH * FH, GH * FH, HH * FH};
+int16_t expectedh8_7[8] = {AH * GH, BH * GH, CH * GH, DH * GH,
+			   EH * GH, FH * GH, GH * GH, HH * GH};
+int16_t expectedh8_8[8] = {AH * HH, BH * HH, CH * HH, DH * HH,
+			   EH * HH, FH * HH, GH * HH, HH * HH};
+int16_t _elemhA = AH;
+int16_t _elemhB = BH;
+int16_t _elemhC = CH;
+int16_t _elemhD = DH;
+int16_t _elemhE = EH;
+int16_t _elemhF = FH;
+int16_t _elemhG = GH;
+int16_t _elemhH = HH;
+
+#define AUS (1024)
+#define BUS (31)
+#define CUS (0)
+#define DUS (655)
+uint32_t expectedus2_1[2] = {AUS * AUS, BUS * AUS};
+uint32_t expectedus2_2[2] = {AUS * BUS, BUS * BUS};
+uint32_t expectedus4_1[4] = {AUS * AUS, BUS * AUS, CUS * AUS, DUS * AUS};
+uint32_t expectedus4_2[4] = {AUS * BUS, BUS * BUS, CUS * BUS, DUS * BUS};
+uint32_t expectedus4_3[4] = {AUS * CUS, BUS * CUS, CUS * CUS, DUS * CUS};
+uint32_t expectedus4_4[4] = {AUS * DUS, BUS * DUS, CUS * DUS, DUS * DUS};
+uint32_t _elemusA = AUS;
+uint32_t _elemusB = BUS;
+uint32_t _elemusC = CUS;
+uint32_t _elemusD = DUS;
+
+#define AUH ((uint16_t) 0)
+#define BUH ((uint16_t) 32)
+#define CUH ((uint16_t) 102)
+#define DUH ((uint16_t) 51)
+#define EUH ((uint16_t) 71)
+#define FUH ((uint16_t) 91)
+#define GUH ((uint16_t) 48)
+#define HUH ((uint16_t) 255)
+uint16_t expecteduh4_1[4] = {AUH * AUH, BUH * AUH, CUH * AUH, DUH * AUH};
+uint16_t expecteduh4_2[4] = {AUH * BUH, BUH * BUH, CUH * BUH, DUH * BUH};
+uint16_t expecteduh4_3[4] = {AUH * CUH, BUH * CUH, CUH * CUH, DUH * CUH};
+uint16_t expecteduh4_4[4] = {AUH * DUH, BUH * DUH, CUH * DUH, DUH * DUH};
+uint16_t expecteduh8_1[8] = {AUH * AUH, BUH * AUH, CUH * AUH, DUH * AUH,
+			     EUH * AUH, FUH * AUH, GUH * AUH, HUH * AUH};
+uint16_t expecteduh8_2[8] = {AUH * BUH, BUH * BUH, CUH * BUH, DUH * BUH,
+			     EUH * BUH, FUH * BUH, GUH * BUH, HUH * BUH};
+uint16_t expecteduh8_3[8] = {AUH * CUH, BUH * CUH, CUH * CUH, DUH * CUH,
+			     EUH * CUH, FUH * CUH, GUH * CUH, HUH * CUH};
+uint16_t expecteduh8_4[8] = {AUH * DUH, BUH * DUH, CUH * DUH, DUH * DUH,
+			     EUH * DUH, FUH * DUH, GUH * DUH, HUH * DUH};
+uint16_t expecteduh8_5[8] = {AUH * EUH, BUH * EUH, CUH * EUH, DUH * EUH,
+			     EUH * EUH, FUH * EUH, GUH * EUH, HUH * EUH};
+uint16_t expecteduh8_6[8] = {AUH * FUH, BUH * FUH, CUH * FUH, DUH * FUH,
+			     EUH * FUH, FUH * FUH, GUH * FUH, HUH * FUH};
+uint16_t expecteduh8_7[8] = {AUH * GUH, BUH * GUH, CUH * GUH, DUH * GUH,
+			     EUH * GUH, FUH * GUH, GUH * GUH, HUH * GUH};
+uint16_t expecteduh8_8[8] = {AUH * HUH, BUH * HUH, CUH * HUH, DUH * HUH,
+			     EUH * HUH, FUH * HUH, GUH * HUH, HUH * HUH};
+uint16_t _elemuhA = AUH;
+uint16_t _elemuhB = BUH;
+uint16_t _elemuhC = CUH;
+uint16_t _elemuhD = DUH;
+uint16_t _elemuhE = EUH;
+uint16_t _elemuhF = FUH;
+uint16_t _elemuhG = GUH;
+uint16_t _elemuhH = HUH;
+
+void
+check_v2sf (float32_t elemA, float32_t elemB)
+{
+  int32_t indx;
+  const float32_t vec32x2_buf[2] = {A, B};
+  float32x2_t vec32x2_src = vld1_f32 (vec32x2_buf);
+  float32x2_t vec32x2_res = vec32x2_src * elemA;
+
+  for (indx = 0; indx < 2; indx++)
+    if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_1[indx])
+      abort ();
+
+  vec32x2_res = vec32x2_src * elemB;
+
+  for (indx = 0; indx < 2; indx++)
+    if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_2[indx])
+      abort ();
+
+/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.2s, v\[0-9\]+\.2s, v\[0-9\]+\.s\\\[0\\\]" 2 } } */
+}
+
+void
+check_v4sf (float32_t elemA, float32_t elemB, float32_t elemC, float32_t elemD)
+{
+  int32_t indx;
+  const float32_t vec32x4_buf[4] = {A, B, C, D};
+  float32x4_t vec32x4_src = vld1q_f32 (vec32x4_buf);
+  float32x4_t vec32x4_res = vec32x4_src * elemA;
+
+  for (indx = 0; indx < 4; indx++)
+    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_1[indx])
+      abort ();
+
+  vec32x4_res = vec32x4_src * elemB;
+
+  for (indx = 0; indx < 4; indx++)
+    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_2[indx])
+      abort ();
+
+  vec32x4_res = vec32x4_src * elemC;
+
+  for (indx = 0; indx < 4; indx++)
+    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_3[indx])
+      abort ();
+
+  vec32x4_res = vec32x4_src * elemD;
+
+  for (indx = 0; indx < 4; indx++)
+    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_4[indx])
+      abort ();
+
+/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.4s, v\[0-9\]+\.4s, v\[0-9\]+\.s\\\[0\\\]" 4 } } */
+}
+
+void
+check_v2df (float64_t elemdC, float64_t elemdD)
+{
+  int32_t indx;
+  const float64_t vec64x2_buf[2] = {AD, BD};
+  float64x2_t vec64x2_src = vld1q_f64 (vec64x2_buf);
+  float64x2_t vec64x2_res = vec64x2_src * elemdC;
+
+  for (indx = 0; indx < 2; indx++)
+    if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_1[indx])
+      abort ();
+
+  vec64x2_res = vec64x2_src * elemdD;
+
+  for (indx = 0; indx < 2; indx++)
+    if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_2[indx])
+      abort ();
+
+/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.2d, v\[0-9\]+\.2d, v\[0-9\]+\.d\\\[0\\\]" 2 } } */
+}
+
+void
+check_v2si (int32_t elemsA, int32_t elemsB)
+{
+  int32_t indx;
+  const int32_t vecs32x2_buf[2] = {AS, BS};
+  int32x2_t vecs32x2_src = vld1_s32 (vecs32x2_buf);
+  int32x2_t vecs32x2_res = vecs32x2_src * elemsA;
+
+  for (indx = 0; indx < 2; indx++)
+    if (vecs32x2_res[indx] != expecteds2_1[indx])
+      abort ();
+
+  vecs32x2_res = vecs32x2_src * elemsB;
+
+  for (indx = 0; indx < 2; indx++)
+    if (vecs32x2_res[indx] != expecteds2_2[indx])
+      abort ();
+}
+
+void
+check_v2si_unsigned (uint32_t elemusA, uint32_t elemusB)
+{
+  int indx;
+  const uint32_t vecus32x2_buf[2] = {AUS, BUS};
+  uint32x2_t vecus32x2_src = vld1_u32 (vecus32x2_buf);
+  uint32x2_t vecus32x2_res = vecus32x2_src * elemusA;
+
+  for (indx = 0; indx < 2; indx++)
+    if (vecus32x2_res[indx] != expectedus2_1[indx])
+      abort ();
+
+  vecus32x2_res = vecus32x2_src * elemusB;
+
+  for (indx = 0; indx < 2; indx++)
+    if (vecus32x2_res[indx] != expectedus2_2[indx])
+      abort ();
+
+/* { dg-final { scan-assembler-times "\tmul\tv\[0-9\]+\.2s, v\[0-9\]+\.2s, v\[0-9\]+\.s\\\[0\\\]" 4 } } */
+}
+
+void
+check_v4si (int32_t elemsA, int32_t elemsB, int32_t elemsC, int32_t elemsD)
+{
+  int32_t indx;
+  const int32_t vecs32x4_buf[4] = {AS, BS, CS, DS};
+  int32x4_t vecs32x4_src = vld1q_s32 (vecs32x4_buf);
+  int32x4_t vecs32x4_res = vecs32x4_src * elemsA;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecs32x4_res[indx] != expecteds4_1[indx])
+      abort ();
+
+  vecs32x4_res = vecs32x4_src * elemsB;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecs32x4_res[indx] != expecteds4_2[indx])
+      abort ();
+
+  vecs32x4_res = vecs32x4_src * elemsC;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecs32x4_res[indx] != expecteds4_3[indx])
+      abort ();
+
+  vecs32x4_res = vecs32x4_src * elemsD;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecs32x4_res[indx] != expecteds4_4[indx])
+      abort ();
+}
+
+void
+check_v4si_unsigned (uint32_t elemusA, uint32_t elemusB, uint32_t elemusC,
+		     uint32_t elemusD)
+{
+  int indx;
+  const uint32_t vecus32x4_buf[4] = {AUS, BUS, CUS, DUS};
+  uint32x4_t vecus32x4_src = vld1q_u32 (vecus32x4_buf);
+  uint32x4_t vecus32x4_res = vecus32x4_src * elemusA;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecus32x4_res[indx] != expectedus4_1[indx])
+      abort ();
+
+  vecus32x4_res = vecus32x4_src * elemusB;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecus32x4_res[indx] != expectedus4_2[indx])
+      abort ();
+
+  vecus32x4_res = vecus32x4_src * elemusC;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecus32x4_res[indx] != expectedus4_3[indx])
+      abort ();
+
+  vecus32x4_res = vecus32x4_src * elemusD;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecus32x4_res[indx] != expectedus4_4[indx])
+      abort ();
+
+/* { dg-final { scan-assembler-times "\tmul\tv\[0-9\]+\.4s, v\[0-9\]+\.4s, v\[0-9\]+\.s\\\[0\\\]" 8 } } */
+}
+
+
+void
+check_v4hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD)
+{
+  int32_t indx;
+  const int16_t vech16x4_buf[4] = {AH, BH, CH, DH};
+  int16x4_t vech16x4_src = vld1_s16 (vech16x4_buf);
+  int16x4_t vech16x4_res = vech16x4_src * elemhA;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vech16x4_res[indx] != expectedh4_1[indx])
+      abort ();
+
+  vech16x4_res = vech16x4_src * elemhB;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vech16x4_res[indx] != expectedh4_2[indx])
+      abort ();
+
+  vech16x4_res = vech16x4_src * elemhC;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vech16x4_res[indx] != expectedh4_3[indx])
+      abort ();
+
+  vech16x4_res = vech16x4_src * elemhD;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vech16x4_res[indx] != expectedh4_4[indx])
+      abort ();
+}
+
+void
+check_v4hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
+		     uint16_t elemuhD)
+{
+  int indx;
+  const uint16_t vecuh16x4_buf[4] = {AUH, BUH, CUH, DUH};
+  uint16x4_t vecuh16x4_src = vld1_u16 (vecuh16x4_buf);
+  uint16x4_t vecuh16x4_res = vecuh16x4_src * elemuhA;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecuh16x4_res[indx] != expecteduh4_1[indx])
+      abort ();
+
+  vecuh16x4_res = vecuh16x4_src * elemuhB;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecuh16x4_res[indx] != expecteduh4_2[indx])
+      abort ();
+
+  vecuh16x4_res = vecuh16x4_src * elemuhC;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecuh16x4_res[indx] != expecteduh4_3[indx])
+      abort ();
+
+  vecuh16x4_res = vecuh16x4_src * elemuhD;
+
+  for (indx = 0; indx < 4; indx++)
+    if (vecuh16x4_res[indx] != expecteduh4_4[indx])
+      abort ();
+
+/* { dg-final { scan-assembler-times "mul\tv\[0-9\]+\.4h, v\[0-9\]+\.4h, v\[0-9\]+\.h\\\[0\\\]" 8 } } */
+}
+
+void
+check_v8hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD,
+	    int16_t elemhE, int16_t elemhF, int16_t elemhG, int16_t elemhH)
+{
+  int32_t indx;
+  const int16_t vech16x8_buf[8] = {AH, BH, CH, DH, EH, FH, GH, HH};
+  int16x8_t vech16x8_src = vld1q_s16 (vech16x8_buf);
+  int16x8_t vech16x8_res = vech16x8_src * elemhA;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vech16x8_res[indx] != expectedh8_1[indx])
+      abort ();
+
+  vech16x8_res = vech16x8_src * elemhB;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vech16x8_res[indx] != expectedh8_2[indx])
+      abort ();
+
+  vech16x8_res = vech16x8_src * elemhC;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vech16x8_res[indx] != expectedh8_3[indx])
+      abort ();
+
+  vech16x8_res = vech16x8_src * elemhD;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vech16x8_res[indx] != expectedh8_4[indx])
+      abort ();
+
+  vech16x8_res = vech16x8_src * elemhE;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vech16x8_res[indx] != expectedh8_5[indx])
+      abort ();
+
+  vech16x8_res = vech16x8_src * elemhF;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vech16x8_res[indx] != expectedh8_6[indx])
+      abort ();
+
+  vech16x8_res = vech16x8_src * elemhG;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vech16x8_res[indx] != expectedh8_7[indx])
+      abort ();
+
+  vech16x8_res = vech16x8_src * elemhH;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vech16x8_res[indx] != expectedh8_8[indx])
+      abort ();
+}
+
+void
+check_v8hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
+		     uint16_t elemuhD, uint16_t elemuhE, uint16_t elemuhF,
+		     uint16_t elemuhG, uint16_t elemuhH)
+{
+  int indx;
+  const uint16_t vecuh16x8_buf[8] = {AUH, BUH, CUH, DUH, EUH, FUH, GUH, HUH};
+  uint16x8_t vecuh16x8_src = vld1q_u16 (vecuh16x8_buf);
+  uint16x8_t vecuh16x8_res = vecuh16x8_src * elemuhA;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vecuh16x8_res[indx] != expecteduh8_1[indx])
+      abort ();
+
+  vecuh16x8_res = vecuh16x8_src * elemuhB;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vecuh16x8_res[indx] != expecteduh8_2[indx])
+      abort ();
+
+  vecuh16x8_res = vecuh16x8_src * elemuhC;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vecuh16x8_res[indx] != expecteduh8_3[indx])
+      abort ();
+
+  vecuh16x8_res = vecuh16x8_src * elemuhD;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vecuh16x8_res[indx] != expecteduh8_4[indx])
+      abort ();
+
+  vecuh16x8_res = vecuh16x8_src * elemuhE;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vecuh16x8_res[indx] != expecteduh8_5[indx])
+      abort ();
+
+  vecuh16x8_res = vecuh16x8_src * elemuhF;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vecuh16x8_res[indx] != expecteduh8_6[indx])
+      abort ();
+
+  vecuh16x8_res = vecuh16x8_src * elemuhG;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vecuh16x8_res[indx] != expecteduh8_7[indx])
+      abort ();
+
+  vecuh16x8_res = vecuh16x8_src * elemuhH;
+
+  for (indx = 0; indx < 8; indx++)
+    if (vecuh16x8_res[indx] != expecteduh8_8[indx])
+      abort ();
+
+/* { dg-final { scan-assembler-times "mul\tv\[0-9\]+\.8h, v\[0-9\]+\.8h, v\[0-9\]+\.h\\\[0\\\]" 16 } } */
+}
+
+int
+main (void)
+{
+  check_v2sf (_elemA, _elemB);
+  check_v4sf (_elemA, _elemB, _elemC, _elemD);
+  check_v2df (_elemdC, _elemdD);
+  check_v2si (_elemsA, _elemsB);
+  check_v4si (_elemsA, _elemsB, _elemsC, _elemsD);
+  check_v4hi (_elemhA, _elemhB, _elemhC, _elemhD);
+  check_v8hi (_elemhA, _elemhB, _elemhC, _elemhD,
+	      _elemhE, _elemhF, _elemhG, _elemhH);
+  check_v2si_unsigned (_elemusA, _elemusB);
+  check_v4si_unsigned (_elemusA, _elemusB, _elemusC, _elemusD);
+  check_v4hi_unsigned (_elemuhA, _elemuhB, _elemuhC, _elemuhD);
+  check_v8hi_unsigned (_elemuhA, _elemuhB, _elemuhC, _elemuhD,
+		       _elemuhE, _elemuhF, _elemuhG, _elemuhH);
+
+  return 0;
+}
+


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [AArch64, 4/4] Reimplement vmvn* intrinscis, remove inline assembly
       [not found]   ` <57398D8B.8060902@foss.arm.com>
@ 2016-05-16  9:09     ` Jiong Wang
  2016-05-17 12:38       ` James Greenhalgh
  0 siblings, 1 reply; 10+ messages in thread
From: Jiong Wang @ 2016-05-16  9:09 UTC (permalink / raw)
  To: GCC Patches

[-- Attachment #1: Type: text/plain, Size: 809 bytes --]

This patch remove inline assembly and reimplement all mvn/mvnq vector
integer intrinsics through the standard "one_cmpl<mode>2" pattern was
introduced later after the initial implementation of those intrinsics.
that's why inline assembly was used historically.

OK for trunk?

no regression on the exist advsimd-intrinsics/vmvn.c.

2016-05-16  Jiong Wang<jiong.wang@arm.com>

gcc/
   * config/aarch64/arm_neon.h (vmvn_s8): Reimplement using C operator.
   Remove inline assembly.
   (vmvn_s16): Likewise.
   (vmvn_s32): Likewise.
   (vmvn_u8): Likewise.
   (vmvn_u16): Likewise.
   (vmvn_u32): Likewise.
   (vmvnq_s8): Likewise.
   (vmvnq_s16): Likewise.
   (vmvnq_s32): Likewise.
   (vmvnq_u8): Likewise.
   (vmvnq_u16): Likewise.
   (vmvnq_u32): Likewise.
   (vmvn_p8): Likewise.
   (vmvnq_p16): Likewise.


[-- Attachment #2: 4.patch --]
[-- Type: text/x-patch, Size: 6163 bytes --]

diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 432a1fad9a6df6fef844896df5e8ad29cc31f548..ae4c429a87822a8807f2d2ec054d3194b39ef6ac 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8093,161 +8093,6 @@ vmull_u32 (uint32x2_t a, uint32x2_t b)
   return result;
 }
 
-__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
-vmvn_p8 (poly8x8_t a)
-{
-  poly8x8_t result;
-  __asm__ ("mvn %0.8b,%1.8b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-vmvn_s8 (int8x8_t a)
-{
-  int8x8_t result;
-  __asm__ ("mvn %0.8b,%1.8b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-vmvn_s16 (int16x4_t a)
-{
-  int16x4_t result;
-  __asm__ ("mvn %0.8b,%1.8b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-vmvn_s32 (int32x2_t a)
-{
-  int32x2_t result;
-  __asm__ ("mvn %0.8b,%1.8b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-vmvn_u8 (uint8x8_t a)
-{
-  uint8x8_t result;
-  __asm__ ("mvn %0.8b,%1.8b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-vmvn_u16 (uint16x4_t a)
-{
-  uint16x4_t result;
-  __asm__ ("mvn %0.8b,%1.8b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-vmvn_u32 (uint32x2_t a)
-{
-  uint32x2_t result;
-  __asm__ ("mvn %0.8b,%1.8b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
-vmvnq_p8 (poly8x16_t a)
-{
-  poly8x16_t result;
-  __asm__ ("mvn %0.16b,%1.16b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
-vmvnq_s8 (int8x16_t a)
-{
-  int8x16_t result;
-  __asm__ ("mvn %0.16b,%1.16b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
-vmvnq_s16 (int16x8_t a)
-{
-  int16x8_t result;
-  __asm__ ("mvn %0.16b,%1.16b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
-vmvnq_s32 (int32x4_t a)
-{
-  int32x4_t result;
-  __asm__ ("mvn %0.16b,%1.16b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
-vmvnq_u8 (uint8x16_t a)
-{
-  uint8x16_t result;
-  __asm__ ("mvn %0.16b,%1.16b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
-vmvnq_u16 (uint16x8_t a)
-{
-  uint16x8_t result;
-  __asm__ ("mvn %0.16b,%1.16b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
-vmvnq_u32 (uint32x4_t a)
-{
-  uint32x4_t result;
-  __asm__ ("mvn %0.16b,%1.16b"
-           : "=w"(result)
-           : "w"(a)
-           : /* No clobbers */);
-  return result;
-}
-
-
 __extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
 vpadal_s8 (int16x4_t a, int8x8_t b)
 {
@@ -18622,6 +18467,92 @@ vmulq_n_u32 (uint32x4_t __a, uint32_t __b)
   return __a * __b;
 }
 
+/* vmvn  */
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vmvn_p8 (poly8x8_t __a)
+{
+  return (poly8x8_t) ~((int8x8_t) __a);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vmvn_s8 (int8x8_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vmvn_s16 (int16x4_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vmvn_s32 (int32x2_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vmvn_u8 (uint8x8_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vmvn_u16 (uint16x4_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vmvn_u32 (uint32x2_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__))
+vmvnq_p8 (poly8x16_t __a)
+{
+  return (poly8x16_t) ~((int8x16_t) __a);
+}
+
+__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
+vmvnq_s8 (int8x16_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
+vmvnq_s16 (int16x8_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
+vmvnq_s32 (int32x4_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
+vmvnq_u8 (uint8x16_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
+vmvnq_u16 (uint16x8_t __a)
+{
+  return ~__a;
+}
+
+__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
+vmvnq_u32 (uint32x4_t __a)
+{
+  return ~__a;
+}
+
 /* vneg  */
 
 __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [AArch64, 2/4] Extend vector mutiply by element to all supported modes
  2016-05-16  9:09 ` [AArch64, 2/4] Extend vector mutiply by element to all supported modes Jiong Wang
@ 2016-05-17 12:28   ` James Greenhalgh
  2016-05-18  8:17     ` Christophe Lyon
  0 siblings, 1 reply; 10+ messages in thread
From: James Greenhalgh @ 2016-05-17 12:28 UTC (permalink / raw)
  To: Jiong Wang; +Cc: GCC Patches, nd

On Mon, May 16, 2016 at 10:09:31AM +0100, Jiong Wang wrote:
> AArch64 support vector multiply by element for V2DF, V2SF, V4SF, V2SI,
> V4SI, V4HI, V8HI.
> 
> All above are well supported by "*aarch64_mul3_elt<mode>" pattern and
> "*aarch64_mul3_elt_<vswap_width_name><mode>" if there is lane size
> change.
> 
> Above patterns are trying to match "(mul (vec_dup (vec_select)))"
> which is genuinely vector multiply by element.
> 
> While vector multiply by element can also comes from "(mul (vec_dup
> (scalar" where the scalar value is already sitting in vector register
> then duplicated to other lanes, and there is no lane size change.
> 
> We have "*aarch64_mul3_elt_to_128df" to match this already, but it's
> restricted for V2DF while this patch extends this support to more modes,
> for example vector integer operations.
> 
> For the testcase included, the following codegen change will happen:
> 
> 
> -       ldr     w0, [x3, 160]
> -       dup     v1.2s, w0
> -       mul     v1.2s, v1.2s, v2.2s
> +       ldr     s1, [x3, 160]
> +       mul     v1.2s, v0.2s, v1.s[0]
> 
> OK for trunk?
> 
> 2016-05-16  Jiong Wang<jiong.wang@arm.com>
> 
> gcc/
>   * config/aarch64/aarch64-simd.md (*aarch64_mul3_elt_to_128df): Extend to all
>   supported modes.  Rename to "*aarch64_mul3_elt_from_dup".
> 
> gcc/testsuite/
>   * /gcc.target/aarch64/simd/vmul_elem_1.c: New.


This ChangeLog formatting is incorrect. It should look like:

gcc/

2016-05-17  Jiong Wang  <jiong.wang@arm.com>

	* config/aarch64/aarch64-simd.md (*aarch64_mul3_elt_to_128df): Extend
	to all supported modes.  Rename to...
	(*aarch64_mul3_elt_from_dup): ...this.

gcc/testsuite/

2016-05-17  Jiong Wang  <jiong.wang@arm.com>

	* gcc.target/aarch64/simd/vmul_elem_1.c: New.

Otherwise, this patch is OK.

Thanks,
James

> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
> index eb18defef15c24bf2334045e92bf7c34b989136d..7f338ff78fabccee868a4befbffed54c3e842dc9 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -371,15 +371,15 @@
>    [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
>  )
>  
> -(define_insn "*aarch64_mul3_elt_to_128df"
> -  [(set (match_operand:V2DF 0 "register_operand" "=w")
> -     (mult:V2DF
> -       (vec_duplicate:V2DF
> -	 (match_operand:DF 2 "register_operand" "w"))
> -      (match_operand:V2DF 1 "register_operand" "w")))]
> +(define_insn "*aarch64_mul3_elt_from_dup<mode>"
> + [(set (match_operand:VMUL 0 "register_operand" "=w")
> +    (mult:VMUL
> +      (vec_duplicate:VMUL
> +	    (match_operand:<VEL> 1 "register_operand" "<h_con>"))
> +      (match_operand:VMUL 2 "register_operand" "w")))]
>    "TARGET_SIMD"
> -  "fmul\\t%0.2d, %1.2d, %2.d[0]"
> -  [(set_attr "type" "neon_fp_mul_d_scalar_q")]
> +  "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
> +  [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
>  )
>  
>  (define_insn "aarch64_rsqrte_<mode>2"
> diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
> new file mode 100644
> index 0000000000000000000000000000000000000000..290a4e9adbc5d9ce1335ca28120e437293776f30
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
> @@ -0,0 +1,519 @@
> +/* Test the vmul_n_f64 AArch64 SIMD intrinsic.  */
> +
> +/* { dg-do run } */
> +/* { dg-options "-O2 --save-temps" } */
> +
> +#include "arm_neon.h"
> +
> +extern void abort (void);
> +
> +#define A (132.4f)
> +#define B (-0.0f)
> +#define C (-34.8f)
> +#define D (289.34f)
> +float32_t expected2_1[2] = {A * A, B * A};
> +float32_t expected2_2[2] = {A * B, B * B};
> +float32_t expected4_1[4] = {A * A, B * A, C * A, D * A};
> +float32_t expected4_2[4] = {A * B, B * B, C * B, D * B};
> +float32_t expected4_3[4] = {A * C, B * C, C * C, D * C};
> +float32_t expected4_4[4] = {A * D, B * D, C * D, D * D};
> +float32_t _elemA = A;
> +float32_t _elemB = B;
> +float32_t _elemC = C;
> +float32_t _elemD = D;
> +
> +#define AD (1234.5)
> +#define BD (-0.0)
> +#define CD (71.3)
> +#define DD (-1024.4)
> +float64_t expectedd2_1[2] = {AD * CD, BD * CD};
> +float64_t expectedd2_2[2] = {AD * DD, BD * DD};
> +float64_t _elemdC = CD;
> +float64_t _elemdD = DD;
> +
> +
> +#define AS (1024)
> +#define BS (-31)
> +#define CS (0)
> +#define DS (655)
> +int32_t expecteds2_1[2] = {AS * AS, BS * AS};
> +int32_t expecteds2_2[2] = {AS * BS, BS * BS};
> +int32_t expecteds4_1[4] = {AS * AS, BS * AS, CS * AS, DS * AS};
> +int32_t expecteds4_2[4] = {AS * BS, BS * BS, CS * BS, DS * BS};
> +int32_t expecteds4_3[4] = {AS * CS, BS * CS, CS * CS, DS * CS};
> +int32_t expecteds4_4[4] = {AS * DS, BS * DS, CS * DS, DS * DS};
> +int32_t _elemsA = AS;
> +int32_t _elemsB = BS;
> +int32_t _elemsC = CS;
> +int32_t _elemsD = DS;
> +
> +#define AH ((int16_t) 0)
> +#define BH ((int16_t) -32)
> +#define CH ((int16_t) 102)
> +#define DH ((int16_t) -51)
> +#define EH ((int16_t) 71)
> +#define FH ((int16_t) -91)
> +#define GH ((int16_t) 48)
> +#define HH ((int16_t) 255)
> +int16_t expectedh4_1[4] = {AH * AH, BH * AH, CH * AH, DH * AH};
> +int16_t expectedh4_2[4] = {AH * BH, BH * BH, CH * BH, DH * BH};
> +int16_t expectedh4_3[4] = {AH * CH, BH * CH, CH * CH, DH * CH};
> +int16_t expectedh4_4[4] = {AH * DH, BH * DH, CH * DH, DH * DH};
> +int16_t expectedh8_1[8] = {AH * AH, BH * AH, CH * AH, DH * AH,
> +			   EH * AH, FH * AH, GH * AH, HH * AH};
> +int16_t expectedh8_2[8] = {AH * BH, BH * BH, CH * BH, DH * BH,
> +			   EH * BH, FH * BH, GH * BH, HH * BH};
> +int16_t expectedh8_3[8] = {AH * CH, BH * CH, CH * CH, DH * CH,
> +			   EH * CH, FH * CH, GH * CH, HH * CH};
> +int16_t expectedh8_4[8] = {AH * DH, BH * DH, CH * DH, DH * DH,
> +			   EH * DH, FH * DH, GH * DH, HH * DH};
> +int16_t expectedh8_5[8] = {AH * EH, BH * EH, CH * EH, DH * EH,
> +			   EH * EH, FH * EH, GH * EH, HH * EH};
> +int16_t expectedh8_6[8] = {AH * FH, BH * FH, CH * FH, DH * FH,
> +			   EH * FH, FH * FH, GH * FH, HH * FH};
> +int16_t expectedh8_7[8] = {AH * GH, BH * GH, CH * GH, DH * GH,
> +			   EH * GH, FH * GH, GH * GH, HH * GH};
> +int16_t expectedh8_8[8] = {AH * HH, BH * HH, CH * HH, DH * HH,
> +			   EH * HH, FH * HH, GH * HH, HH * HH};
> +int16_t _elemhA = AH;
> +int16_t _elemhB = BH;
> +int16_t _elemhC = CH;
> +int16_t _elemhD = DH;
> +int16_t _elemhE = EH;
> +int16_t _elemhF = FH;
> +int16_t _elemhG = GH;
> +int16_t _elemhH = HH;
> +
> +#define AUS (1024)
> +#define BUS (31)
> +#define CUS (0)
> +#define DUS (655)
> +uint32_t expectedus2_1[2] = {AUS * AUS, BUS * AUS};
> +uint32_t expectedus2_2[2] = {AUS * BUS, BUS * BUS};
> +uint32_t expectedus4_1[4] = {AUS * AUS, BUS * AUS, CUS * AUS, DUS * AUS};
> +uint32_t expectedus4_2[4] = {AUS * BUS, BUS * BUS, CUS * BUS, DUS * BUS};
> +uint32_t expectedus4_3[4] = {AUS * CUS, BUS * CUS, CUS * CUS, DUS * CUS};
> +uint32_t expectedus4_4[4] = {AUS * DUS, BUS * DUS, CUS * DUS, DUS * DUS};
> +uint32_t _elemusA = AUS;
> +uint32_t _elemusB = BUS;
> +uint32_t _elemusC = CUS;
> +uint32_t _elemusD = DUS;
> +
> +#define AUH ((uint16_t) 0)
> +#define BUH ((uint16_t) 32)
> +#define CUH ((uint16_t) 102)
> +#define DUH ((uint16_t) 51)
> +#define EUH ((uint16_t) 71)
> +#define FUH ((uint16_t) 91)
> +#define GUH ((uint16_t) 48)
> +#define HUH ((uint16_t) 255)
> +uint16_t expecteduh4_1[4] = {AUH * AUH, BUH * AUH, CUH * AUH, DUH * AUH};
> +uint16_t expecteduh4_2[4] = {AUH * BUH, BUH * BUH, CUH * BUH, DUH * BUH};
> +uint16_t expecteduh4_3[4] = {AUH * CUH, BUH * CUH, CUH * CUH, DUH * CUH};
> +uint16_t expecteduh4_4[4] = {AUH * DUH, BUH * DUH, CUH * DUH, DUH * DUH};
> +uint16_t expecteduh8_1[8] = {AUH * AUH, BUH * AUH, CUH * AUH, DUH * AUH,
> +			     EUH * AUH, FUH * AUH, GUH * AUH, HUH * AUH};
> +uint16_t expecteduh8_2[8] = {AUH * BUH, BUH * BUH, CUH * BUH, DUH * BUH,
> +			     EUH * BUH, FUH * BUH, GUH * BUH, HUH * BUH};
> +uint16_t expecteduh8_3[8] = {AUH * CUH, BUH * CUH, CUH * CUH, DUH * CUH,
> +			     EUH * CUH, FUH * CUH, GUH * CUH, HUH * CUH};
> +uint16_t expecteduh8_4[8] = {AUH * DUH, BUH * DUH, CUH * DUH, DUH * DUH,
> +			     EUH * DUH, FUH * DUH, GUH * DUH, HUH * DUH};
> +uint16_t expecteduh8_5[8] = {AUH * EUH, BUH * EUH, CUH * EUH, DUH * EUH,
> +			     EUH * EUH, FUH * EUH, GUH * EUH, HUH * EUH};
> +uint16_t expecteduh8_6[8] = {AUH * FUH, BUH * FUH, CUH * FUH, DUH * FUH,
> +			     EUH * FUH, FUH * FUH, GUH * FUH, HUH * FUH};
> +uint16_t expecteduh8_7[8] = {AUH * GUH, BUH * GUH, CUH * GUH, DUH * GUH,
> +			     EUH * GUH, FUH * GUH, GUH * GUH, HUH * GUH};
> +uint16_t expecteduh8_8[8] = {AUH * HUH, BUH * HUH, CUH * HUH, DUH * HUH,
> +			     EUH * HUH, FUH * HUH, GUH * HUH, HUH * HUH};
> +uint16_t _elemuhA = AUH;
> +uint16_t _elemuhB = BUH;
> +uint16_t _elemuhC = CUH;
> +uint16_t _elemuhD = DUH;
> +uint16_t _elemuhE = EUH;
> +uint16_t _elemuhF = FUH;
> +uint16_t _elemuhG = GUH;
> +uint16_t _elemuhH = HUH;
> +
> +void
> +check_v2sf (float32_t elemA, float32_t elemB)
> +{
> +  int32_t indx;
> +  const float32_t vec32x2_buf[2] = {A, B};
> +  float32x2_t vec32x2_src = vld1_f32 (vec32x2_buf);
> +  float32x2_t vec32x2_res = vec32x2_src * elemA;
> +
> +  for (indx = 0; indx < 2; indx++)
> +    if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_1[indx])
> +      abort ();
> +
> +  vec32x2_res = vec32x2_src * elemB;
> +
> +  for (indx = 0; indx < 2; indx++)
> +    if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_2[indx])
> +      abort ();
> +
> +/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.2s, v\[0-9\]+\.2s, v\[0-9\]+\.s\\\[0\\\]" 2 } } */
> +}
> +
> +void
> +check_v4sf (float32_t elemA, float32_t elemB, float32_t elemC, float32_t elemD)
> +{
> +  int32_t indx;
> +  const float32_t vec32x4_buf[4] = {A, B, C, D};
> +  float32x4_t vec32x4_src = vld1q_f32 (vec32x4_buf);
> +  float32x4_t vec32x4_res = vec32x4_src * elemA;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_1[indx])
> +      abort ();
> +
> +  vec32x4_res = vec32x4_src * elemB;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_2[indx])
> +      abort ();
> +
> +  vec32x4_res = vec32x4_src * elemC;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_3[indx])
> +      abort ();
> +
> +  vec32x4_res = vec32x4_src * elemD;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_4[indx])
> +      abort ();
> +
> +/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.4s, v\[0-9\]+\.4s, v\[0-9\]+\.s\\\[0\\\]" 4 } } */
> +}
> +
> +void
> +check_v2df (float64_t elemdC, float64_t elemdD)
> +{
> +  int32_t indx;
> +  const float64_t vec64x2_buf[2] = {AD, BD};
> +  float64x2_t vec64x2_src = vld1q_f64 (vec64x2_buf);
> +  float64x2_t vec64x2_res = vec64x2_src * elemdC;
> +
> +  for (indx = 0; indx < 2; indx++)
> +    if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_1[indx])
> +      abort ();
> +
> +  vec64x2_res = vec64x2_src * elemdD;
> +
> +  for (indx = 0; indx < 2; indx++)
> +    if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_2[indx])
> +      abort ();
> +
> +/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.2d, v\[0-9\]+\.2d, v\[0-9\]+\.d\\\[0\\\]" 2 } } */
> +}
> +
> +void
> +check_v2si (int32_t elemsA, int32_t elemsB)
> +{
> +  int32_t indx;
> +  const int32_t vecs32x2_buf[2] = {AS, BS};
> +  int32x2_t vecs32x2_src = vld1_s32 (vecs32x2_buf);
> +  int32x2_t vecs32x2_res = vecs32x2_src * elemsA;
> +
> +  for (indx = 0; indx < 2; indx++)
> +    if (vecs32x2_res[indx] != expecteds2_1[indx])
> +      abort ();
> +
> +  vecs32x2_res = vecs32x2_src * elemsB;
> +
> +  for (indx = 0; indx < 2; indx++)
> +    if (vecs32x2_res[indx] != expecteds2_2[indx])
> +      abort ();
> +}
> +
> +void
> +check_v2si_unsigned (uint32_t elemusA, uint32_t elemusB)
> +{
> +  int indx;
> +  const uint32_t vecus32x2_buf[2] = {AUS, BUS};
> +  uint32x2_t vecus32x2_src = vld1_u32 (vecus32x2_buf);
> +  uint32x2_t vecus32x2_res = vecus32x2_src * elemusA;
> +
> +  for (indx = 0; indx < 2; indx++)
> +    if (vecus32x2_res[indx] != expectedus2_1[indx])
> +      abort ();
> +
> +  vecus32x2_res = vecus32x2_src * elemusB;
> +
> +  for (indx = 0; indx < 2; indx++)
> +    if (vecus32x2_res[indx] != expectedus2_2[indx])
> +      abort ();
> +
> +/* { dg-final { scan-assembler-times "\tmul\tv\[0-9\]+\.2s, v\[0-9\]+\.2s, v\[0-9\]+\.s\\\[0\\\]" 4 } } */
> +}
> +
> +void
> +check_v4si (int32_t elemsA, int32_t elemsB, int32_t elemsC, int32_t elemsD)
> +{
> +  int32_t indx;
> +  const int32_t vecs32x4_buf[4] = {AS, BS, CS, DS};
> +  int32x4_t vecs32x4_src = vld1q_s32 (vecs32x4_buf);
> +  int32x4_t vecs32x4_res = vecs32x4_src * elemsA;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecs32x4_res[indx] != expecteds4_1[indx])
> +      abort ();
> +
> +  vecs32x4_res = vecs32x4_src * elemsB;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecs32x4_res[indx] != expecteds4_2[indx])
> +      abort ();
> +
> +  vecs32x4_res = vecs32x4_src * elemsC;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecs32x4_res[indx] != expecteds4_3[indx])
> +      abort ();
> +
> +  vecs32x4_res = vecs32x4_src * elemsD;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecs32x4_res[indx] != expecteds4_4[indx])
> +      abort ();
> +}
> +
> +void
> +check_v4si_unsigned (uint32_t elemusA, uint32_t elemusB, uint32_t elemusC,
> +		     uint32_t elemusD)
> +{
> +  int indx;
> +  const uint32_t vecus32x4_buf[4] = {AUS, BUS, CUS, DUS};
> +  uint32x4_t vecus32x4_src = vld1q_u32 (vecus32x4_buf);
> +  uint32x4_t vecus32x4_res = vecus32x4_src * elemusA;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecus32x4_res[indx] != expectedus4_1[indx])
> +      abort ();
> +
> +  vecus32x4_res = vecus32x4_src * elemusB;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecus32x4_res[indx] != expectedus4_2[indx])
> +      abort ();
> +
> +  vecus32x4_res = vecus32x4_src * elemusC;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecus32x4_res[indx] != expectedus4_3[indx])
> +      abort ();
> +
> +  vecus32x4_res = vecus32x4_src * elemusD;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecus32x4_res[indx] != expectedus4_4[indx])
> +      abort ();
> +
> +/* { dg-final { scan-assembler-times "\tmul\tv\[0-9\]+\.4s, v\[0-9\]+\.4s, v\[0-9\]+\.s\\\[0\\\]" 8 } } */
> +}
> +
> +
> +void
> +check_v4hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD)
> +{
> +  int32_t indx;
> +  const int16_t vech16x4_buf[4] = {AH, BH, CH, DH};
> +  int16x4_t vech16x4_src = vld1_s16 (vech16x4_buf);
> +  int16x4_t vech16x4_res = vech16x4_src * elemhA;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vech16x4_res[indx] != expectedh4_1[indx])
> +      abort ();
> +
> +  vech16x4_res = vech16x4_src * elemhB;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vech16x4_res[indx] != expectedh4_2[indx])
> +      abort ();
> +
> +  vech16x4_res = vech16x4_src * elemhC;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vech16x4_res[indx] != expectedh4_3[indx])
> +      abort ();
> +
> +  vech16x4_res = vech16x4_src * elemhD;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vech16x4_res[indx] != expectedh4_4[indx])
> +      abort ();
> +}
> +
> +void
> +check_v4hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
> +		     uint16_t elemuhD)
> +{
> +  int indx;
> +  const uint16_t vecuh16x4_buf[4] = {AUH, BUH, CUH, DUH};
> +  uint16x4_t vecuh16x4_src = vld1_u16 (vecuh16x4_buf);
> +  uint16x4_t vecuh16x4_res = vecuh16x4_src * elemuhA;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecuh16x4_res[indx] != expecteduh4_1[indx])
> +      abort ();
> +
> +  vecuh16x4_res = vecuh16x4_src * elemuhB;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecuh16x4_res[indx] != expecteduh4_2[indx])
> +      abort ();
> +
> +  vecuh16x4_res = vecuh16x4_src * elemuhC;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecuh16x4_res[indx] != expecteduh4_3[indx])
> +      abort ();
> +
> +  vecuh16x4_res = vecuh16x4_src * elemuhD;
> +
> +  for (indx = 0; indx < 4; indx++)
> +    if (vecuh16x4_res[indx] != expecteduh4_4[indx])
> +      abort ();
> +
> +/* { dg-final { scan-assembler-times "mul\tv\[0-9\]+\.4h, v\[0-9\]+\.4h, v\[0-9\]+\.h\\\[0\\\]" 8 } } */
> +}
> +
> +void
> +check_v8hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD,
> +	    int16_t elemhE, int16_t elemhF, int16_t elemhG, int16_t elemhH)
> +{
> +  int32_t indx;
> +  const int16_t vech16x8_buf[8] = {AH, BH, CH, DH, EH, FH, GH, HH};
> +  int16x8_t vech16x8_src = vld1q_s16 (vech16x8_buf);
> +  int16x8_t vech16x8_res = vech16x8_src * elemhA;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vech16x8_res[indx] != expectedh8_1[indx])
> +      abort ();
> +
> +  vech16x8_res = vech16x8_src * elemhB;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vech16x8_res[indx] != expectedh8_2[indx])
> +      abort ();
> +
> +  vech16x8_res = vech16x8_src * elemhC;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vech16x8_res[indx] != expectedh8_3[indx])
> +      abort ();
> +
> +  vech16x8_res = vech16x8_src * elemhD;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vech16x8_res[indx] != expectedh8_4[indx])
> +      abort ();
> +
> +  vech16x8_res = vech16x8_src * elemhE;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vech16x8_res[indx] != expectedh8_5[indx])
> +      abort ();
> +
> +  vech16x8_res = vech16x8_src * elemhF;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vech16x8_res[indx] != expectedh8_6[indx])
> +      abort ();
> +
> +  vech16x8_res = vech16x8_src * elemhG;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vech16x8_res[indx] != expectedh8_7[indx])
> +      abort ();
> +
> +  vech16x8_res = vech16x8_src * elemhH;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vech16x8_res[indx] != expectedh8_8[indx])
> +      abort ();
> +}
> +
> +void
> +check_v8hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
> +		     uint16_t elemuhD, uint16_t elemuhE, uint16_t elemuhF,
> +		     uint16_t elemuhG, uint16_t elemuhH)
> +{
> +  int indx;
> +  const uint16_t vecuh16x8_buf[8] = {AUH, BUH, CUH, DUH, EUH, FUH, GUH, HUH};
> +  uint16x8_t vecuh16x8_src = vld1q_u16 (vecuh16x8_buf);
> +  uint16x8_t vecuh16x8_res = vecuh16x8_src * elemuhA;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vecuh16x8_res[indx] != expecteduh8_1[indx])
> +      abort ();
> +
> +  vecuh16x8_res = vecuh16x8_src * elemuhB;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vecuh16x8_res[indx] != expecteduh8_2[indx])
> +      abort ();
> +
> +  vecuh16x8_res = vecuh16x8_src * elemuhC;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vecuh16x8_res[indx] != expecteduh8_3[indx])
> +      abort ();
> +
> +  vecuh16x8_res = vecuh16x8_src * elemuhD;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vecuh16x8_res[indx] != expecteduh8_4[indx])
> +      abort ();
> +
> +  vecuh16x8_res = vecuh16x8_src * elemuhE;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vecuh16x8_res[indx] != expecteduh8_5[indx])
> +      abort ();
> +
> +  vecuh16x8_res = vecuh16x8_src * elemuhF;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vecuh16x8_res[indx] != expecteduh8_6[indx])
> +      abort ();
> +
> +  vecuh16x8_res = vecuh16x8_src * elemuhG;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vecuh16x8_res[indx] != expecteduh8_7[indx])
> +      abort ();
> +
> +  vecuh16x8_res = vecuh16x8_src * elemuhH;
> +
> +  for (indx = 0; indx < 8; indx++)
> +    if (vecuh16x8_res[indx] != expecteduh8_8[indx])
> +      abort ();
> +
> +/* { dg-final { scan-assembler-times "mul\tv\[0-9\]+\.8h, v\[0-9\]+\.8h, v\[0-9\]+\.h\\\[0\\\]" 16 } } */
> +}
> +
> +int
> +main (void)
> +{
> +  check_v2sf (_elemA, _elemB);
> +  check_v4sf (_elemA, _elemB, _elemC, _elemD);
> +  check_v2df (_elemdC, _elemdD);
> +  check_v2si (_elemsA, _elemsB);
> +  check_v4si (_elemsA, _elemsB, _elemsC, _elemsD);
> +  check_v4hi (_elemhA, _elemhB, _elemhC, _elemhD);
> +  check_v8hi (_elemhA, _elemhB, _elemhC, _elemhD,
> +	      _elemhE, _elemhF, _elemhG, _elemhH);
> +  check_v2si_unsigned (_elemusA, _elemusB);
> +  check_v4si_unsigned (_elemusA, _elemusB, _elemusC, _elemusD);
> +  check_v4hi_unsigned (_elemuhA, _elemuhB, _elemuhC, _elemuhD);
> +  check_v8hi_unsigned (_elemuhA, _elemuhB, _elemuhC, _elemuhD,
> +		       _elemuhE, _elemuhF, _elemuhG, _elemuhH);
> +
> +  return 0;
> +}
> +
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [AArch64, 3/4] Reimplement multiply by element to get rid of inline assembly
  2016-05-16  9:09   ` [AArch64, 3/4] Reimplement multiply by element to get rid of " Jiong Wang
@ 2016-05-17 12:37     ` James Greenhalgh
  0 siblings, 0 replies; 10+ messages in thread
From: James Greenhalgh @ 2016-05-17 12:37 UTC (permalink / raw)
  To: Jiong Wang; +Cc: GCC Patches, nd

On Mon, May 16, 2016 at 10:09:37AM +0100, Jiong Wang wrote:
> This patch reimplement vector multiply by element on top of the existed
> vmul_lane* intrinsics instead of inline assembly.
> 
> There is no code generation change from this patch.
> 
> OK for trunk?
> 
> 2016-05-16  Jiong Wang<jiong.wang@arm.com>
> 
> gcc/
>   * config/aarch64/aarch64-simd.md (vmul_n_f32): Remove inline assembly.
>   Use builtin.
>   (vmul_n_s16): Likewise.
>   (vmul_n_s32): Likewise.
>   (vmul_n_u16): Likewise.
>   (vmul_n_u32): Likewise.
>   (vmulq_n_f32): Likewise.
>   (vmulq_n_f64): Likewise.
>   (vmulq_n_s16): Likewise.
>   (vmulq_n_s32): Likewise.
>   (vmulq_n_u16): Likewise.
>   (vmulq_n_u32): Likewise.
> 
> gcc/testsuite/
>   * gcc.target/aarch64/simd/vmul_elem_1.c: Use intrinsics.

Please format these ChangeLogs correctly, otherwise this is OK.

Thanks,
James

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [AArch64, 4/4] Reimplement vmvn* intrinscis, remove inline assembly
  2016-05-16  9:09     ` [AArch64, 4/4] Reimplement vmvn* intrinscis, remove inline assembly Jiong Wang
@ 2016-05-17 12:38       ` James Greenhalgh
  0 siblings, 0 replies; 10+ messages in thread
From: James Greenhalgh @ 2016-05-17 12:38 UTC (permalink / raw)
  To: Jiong Wang; +Cc: GCC Patches, nd

On Mon, May 16, 2016 at 10:09:42AM +0100, Jiong Wang wrote:
> This patch remove inline assembly and reimplement all mvn/mvnq vector
> integer intrinsics through the standard "one_cmpl<mode>2" pattern was
> introduced later after the initial implementation of those intrinsics.
> that's why inline assembly was used historically.
> 
> OK for trunk?
> 
> no regression on the exist advsimd-intrinsics/vmvn.c.
> 
> 2016-05-16  Jiong Wang<jiong.wang@arm.com>
> 
> gcc/
>   * config/aarch64/arm_neon.h (vmvn_s8): Reimplement using C operator.
>   Remove inline assembly.
>   (vmvn_s16): Likewise.
>   (vmvn_s32): Likewise.
>   (vmvn_u8): Likewise.
>   (vmvn_u16): Likewise.
>   (vmvn_u32): Likewise.
>   (vmvnq_s8): Likewise.
>   (vmvnq_s16): Likewise.
>   (vmvnq_s32): Likewise.
>   (vmvnq_u8): Likewise.
>   (vmvnq_u16): Likewise.
>   (vmvnq_u32): Likewise.
>   (vmvn_p8): Likewise.
>   (vmvnq_p16): Likewise.

ChangeLog formatting is incorrect.

Otherwise, this is OK.

Thanks,
James


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [AArch64, 2/4] Extend vector mutiply by element to all supported modes
  2016-05-17 12:28   ` James Greenhalgh
@ 2016-05-18  8:17     ` Christophe Lyon
  2016-05-18 13:14       ` Jiong Wang
  0 siblings, 1 reply; 10+ messages in thread
From: Christophe Lyon @ 2016-05-18  8:17 UTC (permalink / raw)
  To: James Greenhalgh; +Cc: Jiong Wang, GCC Patches, nd

On 17 May 2016 at 14:27, James Greenhalgh <james.greenhalgh@arm.com> wrote:
> On Mon, May 16, 2016 at 10:09:31AM +0100, Jiong Wang wrote:
>> AArch64 support vector multiply by element for V2DF, V2SF, V4SF, V2SI,
>> V4SI, V4HI, V8HI.
>>
>> All above are well supported by "*aarch64_mul3_elt<mode>" pattern and
>> "*aarch64_mul3_elt_<vswap_width_name><mode>" if there is lane size
>> change.
>>
>> Above patterns are trying to match "(mul (vec_dup (vec_select)))"
>> which is genuinely vector multiply by element.
>>
>> While vector multiply by element can also comes from "(mul (vec_dup
>> (scalar" where the scalar value is already sitting in vector register
>> then duplicated to other lanes, and there is no lane size change.
>>
>> We have "*aarch64_mul3_elt_to_128df" to match this already, but it's
>> restricted for V2DF while this patch extends this support to more modes,
>> for example vector integer operations.
>>
>> For the testcase included, the following codegen change will happen:
>>
>>
>> -       ldr     w0, [x3, 160]
>> -       dup     v1.2s, w0
>> -       mul     v1.2s, v1.2s, v2.2s
>> +       ldr     s1, [x3, 160]
>> +       mul     v1.2s, v0.2s, v1.s[0]
>>
>> OK for trunk?
>>
>> 2016-05-16  Jiong Wang<jiong.wang@arm.com>
>>
>> gcc/
>>   * config/aarch64/aarch64-simd.md (*aarch64_mul3_elt_to_128df): Extend to all
>>   supported modes.  Rename to "*aarch64_mul3_elt_from_dup".
>>
>> gcc/testsuite/
>>   * /gcc.target/aarch64/simd/vmul_elem_1.c: New.
>
>
> This ChangeLog formatting is incorrect. It should look like:
>
> gcc/
>
> 2016-05-17  Jiong Wang  <jiong.wang@arm.com>
>
>         * config/aarch64/aarch64-simd.md (*aarch64_mul3_elt_to_128df): Extend
>         to all supported modes.  Rename to...
>         (*aarch64_mul3_elt_from_dup): ...this.
>
> gcc/testsuite/
>
> 2016-05-17  Jiong Wang  <jiong.wang@arm.com>
>
>         * gcc.target/aarch64/simd/vmul_elem_1.c: New.
>
> Otherwise, this patch is OK.
>

Hi Jiong,

The new testcase fails on aarch64_be, at execution time.

Christophe.

> Thanks,
> James
>
>> diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
>> index eb18defef15c24bf2334045e92bf7c34b989136d..7f338ff78fabccee868a4befbffed54c3e842dc9 100644
>> --- a/gcc/config/aarch64/aarch64-simd.md
>> +++ b/gcc/config/aarch64/aarch64-simd.md
>> @@ -371,15 +371,15 @@
>>    [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
>>  )
>>
>> -(define_insn "*aarch64_mul3_elt_to_128df"
>> -  [(set (match_operand:V2DF 0 "register_operand" "=w")
>> -     (mult:V2DF
>> -       (vec_duplicate:V2DF
>> -      (match_operand:DF 2 "register_operand" "w"))
>> -      (match_operand:V2DF 1 "register_operand" "w")))]
>> +(define_insn "*aarch64_mul3_elt_from_dup<mode>"
>> + [(set (match_operand:VMUL 0 "register_operand" "=w")
>> +    (mult:VMUL
>> +      (vec_duplicate:VMUL
>> +         (match_operand:<VEL> 1 "register_operand" "<h_con>"))
>> +      (match_operand:VMUL 2 "register_operand" "w")))]
>>    "TARGET_SIMD"
>> -  "fmul\\t%0.2d, %1.2d, %2.d[0]"
>> -  [(set_attr "type" "neon_fp_mul_d_scalar_q")]
>> +  "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
>> +  [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")]
>>  )
>>
>>  (define_insn "aarch64_rsqrte_<mode>2"
>> diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
>> new file mode 100644
>> index 0000000000000000000000000000000000000000..290a4e9adbc5d9ce1335ca28120e437293776f30
>> --- /dev/null
>> +++ b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
>> @@ -0,0 +1,519 @@
>> +/* Test the vmul_n_f64 AArch64 SIMD intrinsic.  */
>> +
>> +/* { dg-do run } */
>> +/* { dg-options "-O2 --save-temps" } */
>> +
>> +#include "arm_neon.h"
>> +
>> +extern void abort (void);
>> +
>> +#define A (132.4f)
>> +#define B (-0.0f)
>> +#define C (-34.8f)
>> +#define D (289.34f)
>> +float32_t expected2_1[2] = {A * A, B * A};
>> +float32_t expected2_2[2] = {A * B, B * B};
>> +float32_t expected4_1[4] = {A * A, B * A, C * A, D * A};
>> +float32_t expected4_2[4] = {A * B, B * B, C * B, D * B};
>> +float32_t expected4_3[4] = {A * C, B * C, C * C, D * C};
>> +float32_t expected4_4[4] = {A * D, B * D, C * D, D * D};
>> +float32_t _elemA = A;
>> +float32_t _elemB = B;
>> +float32_t _elemC = C;
>> +float32_t _elemD = D;
>> +
>> +#define AD (1234.5)
>> +#define BD (-0.0)
>> +#define CD (71.3)
>> +#define DD (-1024.4)
>> +float64_t expectedd2_1[2] = {AD * CD, BD * CD};
>> +float64_t expectedd2_2[2] = {AD * DD, BD * DD};
>> +float64_t _elemdC = CD;
>> +float64_t _elemdD = DD;
>> +
>> +
>> +#define AS (1024)
>> +#define BS (-31)
>> +#define CS (0)
>> +#define DS (655)
>> +int32_t expecteds2_1[2] = {AS * AS, BS * AS};
>> +int32_t expecteds2_2[2] = {AS * BS, BS * BS};
>> +int32_t expecteds4_1[4] = {AS * AS, BS * AS, CS * AS, DS * AS};
>> +int32_t expecteds4_2[4] = {AS * BS, BS * BS, CS * BS, DS * BS};
>> +int32_t expecteds4_3[4] = {AS * CS, BS * CS, CS * CS, DS * CS};
>> +int32_t expecteds4_4[4] = {AS * DS, BS * DS, CS * DS, DS * DS};
>> +int32_t _elemsA = AS;
>> +int32_t _elemsB = BS;
>> +int32_t _elemsC = CS;
>> +int32_t _elemsD = DS;
>> +
>> +#define AH ((int16_t) 0)
>> +#define BH ((int16_t) -32)
>> +#define CH ((int16_t) 102)
>> +#define DH ((int16_t) -51)
>> +#define EH ((int16_t) 71)
>> +#define FH ((int16_t) -91)
>> +#define GH ((int16_t) 48)
>> +#define HH ((int16_t) 255)
>> +int16_t expectedh4_1[4] = {AH * AH, BH * AH, CH * AH, DH * AH};
>> +int16_t expectedh4_2[4] = {AH * BH, BH * BH, CH * BH, DH * BH};
>> +int16_t expectedh4_3[4] = {AH * CH, BH * CH, CH * CH, DH * CH};
>> +int16_t expectedh4_4[4] = {AH * DH, BH * DH, CH * DH, DH * DH};
>> +int16_t expectedh8_1[8] = {AH * AH, BH * AH, CH * AH, DH * AH,
>> +                        EH * AH, FH * AH, GH * AH, HH * AH};
>> +int16_t expectedh8_2[8] = {AH * BH, BH * BH, CH * BH, DH * BH,
>> +                        EH * BH, FH * BH, GH * BH, HH * BH};
>> +int16_t expectedh8_3[8] = {AH * CH, BH * CH, CH * CH, DH * CH,
>> +                        EH * CH, FH * CH, GH * CH, HH * CH};
>> +int16_t expectedh8_4[8] = {AH * DH, BH * DH, CH * DH, DH * DH,
>> +                        EH * DH, FH * DH, GH * DH, HH * DH};
>> +int16_t expectedh8_5[8] = {AH * EH, BH * EH, CH * EH, DH * EH,
>> +                        EH * EH, FH * EH, GH * EH, HH * EH};
>> +int16_t expectedh8_6[8] = {AH * FH, BH * FH, CH * FH, DH * FH,
>> +                        EH * FH, FH * FH, GH * FH, HH * FH};
>> +int16_t expectedh8_7[8] = {AH * GH, BH * GH, CH * GH, DH * GH,
>> +                        EH * GH, FH * GH, GH * GH, HH * GH};
>> +int16_t expectedh8_8[8] = {AH * HH, BH * HH, CH * HH, DH * HH,
>> +                        EH * HH, FH * HH, GH * HH, HH * HH};
>> +int16_t _elemhA = AH;
>> +int16_t _elemhB = BH;
>> +int16_t _elemhC = CH;
>> +int16_t _elemhD = DH;
>> +int16_t _elemhE = EH;
>> +int16_t _elemhF = FH;
>> +int16_t _elemhG = GH;
>> +int16_t _elemhH = HH;
>> +
>> +#define AUS (1024)
>> +#define BUS (31)
>> +#define CUS (0)
>> +#define DUS (655)
>> +uint32_t expectedus2_1[2] = {AUS * AUS, BUS * AUS};
>> +uint32_t expectedus2_2[2] = {AUS * BUS, BUS * BUS};
>> +uint32_t expectedus4_1[4] = {AUS * AUS, BUS * AUS, CUS * AUS, DUS * AUS};
>> +uint32_t expectedus4_2[4] = {AUS * BUS, BUS * BUS, CUS * BUS, DUS * BUS};
>> +uint32_t expectedus4_3[4] = {AUS * CUS, BUS * CUS, CUS * CUS, DUS * CUS};
>> +uint32_t expectedus4_4[4] = {AUS * DUS, BUS * DUS, CUS * DUS, DUS * DUS};
>> +uint32_t _elemusA = AUS;
>> +uint32_t _elemusB = BUS;
>> +uint32_t _elemusC = CUS;
>> +uint32_t _elemusD = DUS;
>> +
>> +#define AUH ((uint16_t) 0)
>> +#define BUH ((uint16_t) 32)
>> +#define CUH ((uint16_t) 102)
>> +#define DUH ((uint16_t) 51)
>> +#define EUH ((uint16_t) 71)
>> +#define FUH ((uint16_t) 91)
>> +#define GUH ((uint16_t) 48)
>> +#define HUH ((uint16_t) 255)
>> +uint16_t expecteduh4_1[4] = {AUH * AUH, BUH * AUH, CUH * AUH, DUH * AUH};
>> +uint16_t expecteduh4_2[4] = {AUH * BUH, BUH * BUH, CUH * BUH, DUH * BUH};
>> +uint16_t expecteduh4_3[4] = {AUH * CUH, BUH * CUH, CUH * CUH, DUH * CUH};
>> +uint16_t expecteduh4_4[4] = {AUH * DUH, BUH * DUH, CUH * DUH, DUH * DUH};
>> +uint16_t expecteduh8_1[8] = {AUH * AUH, BUH * AUH, CUH * AUH, DUH * AUH,
>> +                          EUH * AUH, FUH * AUH, GUH * AUH, HUH * AUH};
>> +uint16_t expecteduh8_2[8] = {AUH * BUH, BUH * BUH, CUH * BUH, DUH * BUH,
>> +                          EUH * BUH, FUH * BUH, GUH * BUH, HUH * BUH};
>> +uint16_t expecteduh8_3[8] = {AUH * CUH, BUH * CUH, CUH * CUH, DUH * CUH,
>> +                          EUH * CUH, FUH * CUH, GUH * CUH, HUH * CUH};
>> +uint16_t expecteduh8_4[8] = {AUH * DUH, BUH * DUH, CUH * DUH, DUH * DUH,
>> +                          EUH * DUH, FUH * DUH, GUH * DUH, HUH * DUH};
>> +uint16_t expecteduh8_5[8] = {AUH * EUH, BUH * EUH, CUH * EUH, DUH * EUH,
>> +                          EUH * EUH, FUH * EUH, GUH * EUH, HUH * EUH};
>> +uint16_t expecteduh8_6[8] = {AUH * FUH, BUH * FUH, CUH * FUH, DUH * FUH,
>> +                          EUH * FUH, FUH * FUH, GUH * FUH, HUH * FUH};
>> +uint16_t expecteduh8_7[8] = {AUH * GUH, BUH * GUH, CUH * GUH, DUH * GUH,
>> +                          EUH * GUH, FUH * GUH, GUH * GUH, HUH * GUH};
>> +uint16_t expecteduh8_8[8] = {AUH * HUH, BUH * HUH, CUH * HUH, DUH * HUH,
>> +                          EUH * HUH, FUH * HUH, GUH * HUH, HUH * HUH};
>> +uint16_t _elemuhA = AUH;
>> +uint16_t _elemuhB = BUH;
>> +uint16_t _elemuhC = CUH;
>> +uint16_t _elemuhD = DUH;
>> +uint16_t _elemuhE = EUH;
>> +uint16_t _elemuhF = FUH;
>> +uint16_t _elemuhG = GUH;
>> +uint16_t _elemuhH = HUH;
>> +
>> +void
>> +check_v2sf (float32_t elemA, float32_t elemB)
>> +{
>> +  int32_t indx;
>> +  const float32_t vec32x2_buf[2] = {A, B};
>> +  float32x2_t vec32x2_src = vld1_f32 (vec32x2_buf);
>> +  float32x2_t vec32x2_res = vec32x2_src * elemA;
>> +
>> +  for (indx = 0; indx < 2; indx++)
>> +    if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_1[indx])
>> +      abort ();
>> +
>> +  vec32x2_res = vec32x2_src * elemB;
>> +
>> +  for (indx = 0; indx < 2; indx++)
>> +    if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_2[indx])
>> +      abort ();
>> +
>> +/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.2s, v\[0-9\]+\.2s, v\[0-9\]+\.s\\\[0\\\]" 2 } } */
>> +}
>> +
>> +void
>> +check_v4sf (float32_t elemA, float32_t elemB, float32_t elemC, float32_t elemD)
>> +{
>> +  int32_t indx;
>> +  const float32_t vec32x4_buf[4] = {A, B, C, D};
>> +  float32x4_t vec32x4_src = vld1q_f32 (vec32x4_buf);
>> +  float32x4_t vec32x4_res = vec32x4_src * elemA;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_1[indx])
>> +      abort ();
>> +
>> +  vec32x4_res = vec32x4_src * elemB;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_2[indx])
>> +      abort ();
>> +
>> +  vec32x4_res = vec32x4_src * elemC;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_3[indx])
>> +      abort ();
>> +
>> +  vec32x4_res = vec32x4_src * elemD;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_4[indx])
>> +      abort ();
>> +
>> +/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.4s, v\[0-9\]+\.4s, v\[0-9\]+\.s\\\[0\\\]" 4 } } */
>> +}
>> +
>> +void
>> +check_v2df (float64_t elemdC, float64_t elemdD)
>> +{
>> +  int32_t indx;
>> +  const float64_t vec64x2_buf[2] = {AD, BD};
>> +  float64x2_t vec64x2_src = vld1q_f64 (vec64x2_buf);
>> +  float64x2_t vec64x2_res = vec64x2_src * elemdC;
>> +
>> +  for (indx = 0; indx < 2; indx++)
>> +    if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_1[indx])
>> +      abort ();
>> +
>> +  vec64x2_res = vec64x2_src * elemdD;
>> +
>> +  for (indx = 0; indx < 2; indx++)
>> +    if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_2[indx])
>> +      abort ();
>> +
>> +/* { dg-final { scan-assembler-times "fmul\tv\[0-9\]+\.2d, v\[0-9\]+\.2d, v\[0-9\]+\.d\\\[0\\\]" 2 } } */
>> +}
>> +
>> +void
>> +check_v2si (int32_t elemsA, int32_t elemsB)
>> +{
>> +  int32_t indx;
>> +  const int32_t vecs32x2_buf[2] = {AS, BS};
>> +  int32x2_t vecs32x2_src = vld1_s32 (vecs32x2_buf);
>> +  int32x2_t vecs32x2_res = vecs32x2_src * elemsA;
>> +
>> +  for (indx = 0; indx < 2; indx++)
>> +    if (vecs32x2_res[indx] != expecteds2_1[indx])
>> +      abort ();
>> +
>> +  vecs32x2_res = vecs32x2_src * elemsB;
>> +
>> +  for (indx = 0; indx < 2; indx++)
>> +    if (vecs32x2_res[indx] != expecteds2_2[indx])
>> +      abort ();
>> +}
>> +
>> +void
>> +check_v2si_unsigned (uint32_t elemusA, uint32_t elemusB)
>> +{
>> +  int indx;
>> +  const uint32_t vecus32x2_buf[2] = {AUS, BUS};
>> +  uint32x2_t vecus32x2_src = vld1_u32 (vecus32x2_buf);
>> +  uint32x2_t vecus32x2_res = vecus32x2_src * elemusA;
>> +
>> +  for (indx = 0; indx < 2; indx++)
>> +    if (vecus32x2_res[indx] != expectedus2_1[indx])
>> +      abort ();
>> +
>> +  vecus32x2_res = vecus32x2_src * elemusB;
>> +
>> +  for (indx = 0; indx < 2; indx++)
>> +    if (vecus32x2_res[indx] != expectedus2_2[indx])
>> +      abort ();
>> +
>> +/* { dg-final { scan-assembler-times "\tmul\tv\[0-9\]+\.2s, v\[0-9\]+\.2s, v\[0-9\]+\.s\\\[0\\\]" 4 } } */
>> +}
>> +
>> +void
>> +check_v4si (int32_t elemsA, int32_t elemsB, int32_t elemsC, int32_t elemsD)
>> +{
>> +  int32_t indx;
>> +  const int32_t vecs32x4_buf[4] = {AS, BS, CS, DS};
>> +  int32x4_t vecs32x4_src = vld1q_s32 (vecs32x4_buf);
>> +  int32x4_t vecs32x4_res = vecs32x4_src * elemsA;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecs32x4_res[indx] != expecteds4_1[indx])
>> +      abort ();
>> +
>> +  vecs32x4_res = vecs32x4_src * elemsB;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecs32x4_res[indx] != expecteds4_2[indx])
>> +      abort ();
>> +
>> +  vecs32x4_res = vecs32x4_src * elemsC;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecs32x4_res[indx] != expecteds4_3[indx])
>> +      abort ();
>> +
>> +  vecs32x4_res = vecs32x4_src * elemsD;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecs32x4_res[indx] != expecteds4_4[indx])
>> +      abort ();
>> +}
>> +
>> +void
>> +check_v4si_unsigned (uint32_t elemusA, uint32_t elemusB, uint32_t elemusC,
>> +                  uint32_t elemusD)
>> +{
>> +  int indx;
>> +  const uint32_t vecus32x4_buf[4] = {AUS, BUS, CUS, DUS};
>> +  uint32x4_t vecus32x4_src = vld1q_u32 (vecus32x4_buf);
>> +  uint32x4_t vecus32x4_res = vecus32x4_src * elemusA;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecus32x4_res[indx] != expectedus4_1[indx])
>> +      abort ();
>> +
>> +  vecus32x4_res = vecus32x4_src * elemusB;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecus32x4_res[indx] != expectedus4_2[indx])
>> +      abort ();
>> +
>> +  vecus32x4_res = vecus32x4_src * elemusC;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecus32x4_res[indx] != expectedus4_3[indx])
>> +      abort ();
>> +
>> +  vecus32x4_res = vecus32x4_src * elemusD;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecus32x4_res[indx] != expectedus4_4[indx])
>> +      abort ();
>> +
>> +/* { dg-final { scan-assembler-times "\tmul\tv\[0-9\]+\.4s, v\[0-9\]+\.4s, v\[0-9\]+\.s\\\[0\\\]" 8 } } */
>> +}
>> +
>> +
>> +void
>> +check_v4hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD)
>> +{
>> +  int32_t indx;
>> +  const int16_t vech16x4_buf[4] = {AH, BH, CH, DH};
>> +  int16x4_t vech16x4_src = vld1_s16 (vech16x4_buf);
>> +  int16x4_t vech16x4_res = vech16x4_src * elemhA;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vech16x4_res[indx] != expectedh4_1[indx])
>> +      abort ();
>> +
>> +  vech16x4_res = vech16x4_src * elemhB;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vech16x4_res[indx] != expectedh4_2[indx])
>> +      abort ();
>> +
>> +  vech16x4_res = vech16x4_src * elemhC;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vech16x4_res[indx] != expectedh4_3[indx])
>> +      abort ();
>> +
>> +  vech16x4_res = vech16x4_src * elemhD;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vech16x4_res[indx] != expectedh4_4[indx])
>> +      abort ();
>> +}
>> +
>> +void
>> +check_v4hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
>> +                  uint16_t elemuhD)
>> +{
>> +  int indx;
>> +  const uint16_t vecuh16x4_buf[4] = {AUH, BUH, CUH, DUH};
>> +  uint16x4_t vecuh16x4_src = vld1_u16 (vecuh16x4_buf);
>> +  uint16x4_t vecuh16x4_res = vecuh16x4_src * elemuhA;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecuh16x4_res[indx] != expecteduh4_1[indx])
>> +      abort ();
>> +
>> +  vecuh16x4_res = vecuh16x4_src * elemuhB;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecuh16x4_res[indx] != expecteduh4_2[indx])
>> +      abort ();
>> +
>> +  vecuh16x4_res = vecuh16x4_src * elemuhC;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecuh16x4_res[indx] != expecteduh4_3[indx])
>> +      abort ();
>> +
>> +  vecuh16x4_res = vecuh16x4_src * elemuhD;
>> +
>> +  for (indx = 0; indx < 4; indx++)
>> +    if (vecuh16x4_res[indx] != expecteduh4_4[indx])
>> +      abort ();
>> +
>> +/* { dg-final { scan-assembler-times "mul\tv\[0-9\]+\.4h, v\[0-9\]+\.4h, v\[0-9\]+\.h\\\[0\\\]" 8 } } */
>> +}
>> +
>> +void
>> +check_v8hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD,
>> +         int16_t elemhE, int16_t elemhF, int16_t elemhG, int16_t elemhH)
>> +{
>> +  int32_t indx;
>> +  const int16_t vech16x8_buf[8] = {AH, BH, CH, DH, EH, FH, GH, HH};
>> +  int16x8_t vech16x8_src = vld1q_s16 (vech16x8_buf);
>> +  int16x8_t vech16x8_res = vech16x8_src * elemhA;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vech16x8_res[indx] != expectedh8_1[indx])
>> +      abort ();
>> +
>> +  vech16x8_res = vech16x8_src * elemhB;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vech16x8_res[indx] != expectedh8_2[indx])
>> +      abort ();
>> +
>> +  vech16x8_res = vech16x8_src * elemhC;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vech16x8_res[indx] != expectedh8_3[indx])
>> +      abort ();
>> +
>> +  vech16x8_res = vech16x8_src * elemhD;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vech16x8_res[indx] != expectedh8_4[indx])
>> +      abort ();
>> +
>> +  vech16x8_res = vech16x8_src * elemhE;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vech16x8_res[indx] != expectedh8_5[indx])
>> +      abort ();
>> +
>> +  vech16x8_res = vech16x8_src * elemhF;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vech16x8_res[indx] != expectedh8_6[indx])
>> +      abort ();
>> +
>> +  vech16x8_res = vech16x8_src * elemhG;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vech16x8_res[indx] != expectedh8_7[indx])
>> +      abort ();
>> +
>> +  vech16x8_res = vech16x8_src * elemhH;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vech16x8_res[indx] != expectedh8_8[indx])
>> +      abort ();
>> +}
>> +
>> +void
>> +check_v8hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
>> +                  uint16_t elemuhD, uint16_t elemuhE, uint16_t elemuhF,
>> +                  uint16_t elemuhG, uint16_t elemuhH)
>> +{
>> +  int indx;
>> +  const uint16_t vecuh16x8_buf[8] = {AUH, BUH, CUH, DUH, EUH, FUH, GUH, HUH};
>> +  uint16x8_t vecuh16x8_src = vld1q_u16 (vecuh16x8_buf);
>> +  uint16x8_t vecuh16x8_res = vecuh16x8_src * elemuhA;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vecuh16x8_res[indx] != expecteduh8_1[indx])
>> +      abort ();
>> +
>> +  vecuh16x8_res = vecuh16x8_src * elemuhB;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vecuh16x8_res[indx] != expecteduh8_2[indx])
>> +      abort ();
>> +
>> +  vecuh16x8_res = vecuh16x8_src * elemuhC;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vecuh16x8_res[indx] != expecteduh8_3[indx])
>> +      abort ();
>> +
>> +  vecuh16x8_res = vecuh16x8_src * elemuhD;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vecuh16x8_res[indx] != expecteduh8_4[indx])
>> +      abort ();
>> +
>> +  vecuh16x8_res = vecuh16x8_src * elemuhE;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vecuh16x8_res[indx] != expecteduh8_5[indx])
>> +      abort ();
>> +
>> +  vecuh16x8_res = vecuh16x8_src * elemuhF;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vecuh16x8_res[indx] != expecteduh8_6[indx])
>> +      abort ();
>> +
>> +  vecuh16x8_res = vecuh16x8_src * elemuhG;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vecuh16x8_res[indx] != expecteduh8_7[indx])
>> +      abort ();
>> +
>> +  vecuh16x8_res = vecuh16x8_src * elemuhH;
>> +
>> +  for (indx = 0; indx < 8; indx++)
>> +    if (vecuh16x8_res[indx] != expecteduh8_8[indx])
>> +      abort ();
>> +
>> +/* { dg-final { scan-assembler-times "mul\tv\[0-9\]+\.8h, v\[0-9\]+\.8h, v\[0-9\]+\.h\\\[0\\\]" 16 } } */
>> +}
>> +
>> +int
>> +main (void)
>> +{
>> +  check_v2sf (_elemA, _elemB);
>> +  check_v4sf (_elemA, _elemB, _elemC, _elemD);
>> +  check_v2df (_elemdC, _elemdD);
>> +  check_v2si (_elemsA, _elemsB);
>> +  check_v4si (_elemsA, _elemsB, _elemsC, _elemsD);
>> +  check_v4hi (_elemhA, _elemhB, _elemhC, _elemhD);
>> +  check_v8hi (_elemhA, _elemhB, _elemhC, _elemhD,
>> +           _elemhE, _elemhF, _elemhG, _elemhH);
>> +  check_v2si_unsigned (_elemusA, _elemusB);
>> +  check_v4si_unsigned (_elemusA, _elemusB, _elemusC, _elemusD);
>> +  check_v4hi_unsigned (_elemuhA, _elemuhB, _elemuhC, _elemuhD);
>> +  check_v8hi_unsigned (_elemuhA, _elemuhB, _elemuhC, _elemuhD,
>> +                    _elemuhE, _elemuhF, _elemuhG, _elemuhH);
>> +
>> +  return 0;
>> +}
>> +
>>
>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [AArch64, 2/4] Extend vector mutiply by element to all supported modes
  2016-05-18  8:17     ` Christophe Lyon
@ 2016-05-18 13:14       ` Jiong Wang
  2016-05-26 10:41         ` [AArch64, testsuite] Fix vmul_elem_1.c on big-endian Jiong Wang
  2016-05-26 11:04         ` [AArch64, 2/4] Extend vector mutiply by element to all supported modes James Greenhalgh
  0 siblings, 2 replies; 10+ messages in thread
From: Jiong Wang @ 2016-05-18 13:14 UTC (permalink / raw)
  To: Christophe Lyon, James Greenhalgh; +Cc: GCC Patches

[-- Attachment #1: Type: text/plain, Size: 2596 bytes --]



On 18/05/16 09:17, Christophe Lyon wrote:
> On 17 May 2016 at 14:27, James Greenhalgh <james.greenhalgh@arm.com> wrote:
>> On Mon, May 16, 2016 at 10:09:31AM +0100, Jiong Wang wrote:
>>> AArch64 support vector multiply by element for V2DF, V2SF, V4SF, V2SI,
>>> V4SI, V4HI, V8HI.
>>>
>>> All above are well supported by "*aarch64_mul3_elt<mode>" pattern and
>>> "*aarch64_mul3_elt_<vswap_width_name><mode>" if there is lane size
>>> change.
>>>
>>> Above patterns are trying to match "(mul (vec_dup (vec_select)))"
>>> which is genuinely vector multiply by element.
>>>
>>> While vector multiply by element can also comes from "(mul (vec_dup
>>> (scalar" where the scalar value is already sitting in vector register
>>> then duplicated to other lanes, and there is no lane size change.
>>>
>>> We have "*aarch64_mul3_elt_to_128df" to match this already, but it's
>>> restricted for V2DF while this patch extends this support to more modes,
>>> for example vector integer operations.
>>>
>>> For the testcase included, the following codegen change will happen:
>>>
>>>
>>> -       ldr     w0, [x3, 160]
>>> -       dup     v1.2s, w0
>>> -       mul     v1.2s, v1.2s, v2.2s
>>> +       ldr     s1, [x3, 160]
>>> +       mul     v1.2s, v0.2s, v1.s[0]
>>>
>>> OK for trunk?
>>>
>>> 2016-05-16  Jiong Wang<jiong.wang@arm.com>
>>>
>>> gcc/
>>>    * config/aarch64/aarch64-simd.md (*aarch64_mul3_elt_to_128df): Extend to all
>>>    supported modes.  Rename to "*aarch64_mul3_elt_from_dup".
>>>
>>> gcc/testsuite/
>>>    * /gcc.target/aarch64/simd/vmul_elem_1.c: New.
>>
>> This ChangeLog formatting is incorrect. It should look like:
>>
>> gcc/
>>
>> 2016-05-17  Jiong Wang  <jiong.wang@arm.com>
>>
>>          * config/aarch64/aarch64-simd.md (*aarch64_mul3_elt_to_128df): Extend
>>          to all supported modes.  Rename to...
>>          (*aarch64_mul3_elt_from_dup): ...this.
>>
>> gcc/testsuite/
>>
>> 2016-05-17  Jiong Wang  <jiong.wang@arm.com>
>>
>>          * gcc.target/aarch64/simd/vmul_elem_1.c: New.
>>
>> Otherwise, this patch is OK.
>>
> Hi Jiong,
>
> The new testcase fails on aarch64_be, at execution time.
>
> Christophe.

Thanks for reporting this.

Yes, reproduced. I should force those res* local variable into
memory so they can be in the same order as the expected result
which is kept in memory.

The following patch fix this.

vmul_elem_1 pass on both aarch64_be-none-elf and aarch64-linux.

OK for trunk?

gcc/testsuite/

2016-05-18  Jiong Wang  <jiong.wang@arm.com>

         * gcc.target/aarch64/simd/vmul_elem_1.c: Force result variables to be
         kept in memory.


[-- Attachment #2: k.patch --]
[-- Type: text/x-patch, Size: 13153 bytes --]

diff --git a/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
index 155cac3..a1faefd 100644
--- a/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/simd/vmul_elem_1.c
@@ -142,13 +142,15 @@ check_v2sf (float32_t elemA, float32_t elemB)
   int32_t indx;
   const float32_t vec32x2_buf[2] = {A, B};
   float32x2_t vec32x2_src = vld1_f32 (vec32x2_buf);
-  float32x2_t vec32x2_res = vmul_n_f32 (vec32x2_src, elemA);
+  float32_t vec32x2_res[2];
+
+  vst1_f32 (vec32x2_res, vmul_n_f32 (vec32x2_src, elemA));
 
   for (indx = 0; indx < 2; indx++)
     if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_1[indx])
       abort ();
 
-  vec32x2_res = vmul_n_f32 (vec32x2_src, elemB);
+  vst1_f32 (vec32x2_res, vmul_n_f32 (vec32x2_src, elemB));
 
   for (indx = 0; indx < 2; indx++)
     if (* (uint32_t *) &vec32x2_res[indx] != * (uint32_t *) &expected2_2[indx])
@@ -163,25 +165,27 @@ check_v4sf (float32_t elemA, float32_t elemB, float32_t elemC, float32_t elemD)
   int32_t indx;
   const float32_t vec32x4_buf[4] = {A, B, C, D};
   float32x4_t vec32x4_src = vld1q_f32 (vec32x4_buf);
-  float32x4_t vec32x4_res = vmulq_n_f32 (vec32x4_src, elemA);
+  float32_t vec32x4_res[4];
+
+  vst1q_f32 (vec32x4_res, vmulq_n_f32 (vec32x4_src, elemA));
 
   for (indx = 0; indx < 4; indx++)
     if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_1[indx])
       abort ();
 
-  vec32x4_res = vmulq_n_f32 (vec32x4_src, elemB);
+  vst1q_f32 (vec32x4_res, vmulq_n_f32 (vec32x4_src, elemB));
 
   for (indx = 0; indx < 4; indx++)
     if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_2[indx])
       abort ();
 
-  vec32x4_res = vmulq_n_f32 (vec32x4_src, elemC);
+  vst1q_f32 (vec32x4_res, vmulq_n_f32 (vec32x4_src, elemC));
 
   for (indx = 0; indx < 4; indx++)
     if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_3[indx])
       abort ();
 
-  vec32x4_res = vmulq_n_f32 (vec32x4_src, elemD);
+  vst1q_f32 (vec32x4_res, vmulq_n_f32 (vec32x4_src, elemD));
 
   for (indx = 0; indx < 4; indx++)
     if (* (uint32_t *) &vec32x4_res[indx] != * (uint32_t *) &expected4_4[indx])
@@ -196,13 +200,15 @@ check_v2df (float64_t elemdC, float64_t elemdD)
   int32_t indx;
   const float64_t vec64x2_buf[2] = {AD, BD};
   float64x2_t vec64x2_src = vld1q_f64 (vec64x2_buf);
-  float64x2_t vec64x2_res = vmulq_n_f64 (vec64x2_src, elemdC);
+  float64_t vec64x2_res[2];
+
+  vst1q_f64 (vec64x2_res, vmulq_n_f64 (vec64x2_src, elemdC));
 
   for (indx = 0; indx < 2; indx++)
     if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_1[indx])
       abort ();
 
-  vec64x2_res = vmulq_n_f64 (vec64x2_src, elemdD);
+  vst1q_f64 (vec64x2_res, vmulq_n_f64 (vec64x2_src, elemdD));
 
   for (indx = 0; indx < 2; indx++)
     if (* (uint64_t *) &vec64x2_res[indx] != * (uint64_t *) &expectedd2_2[indx])
@@ -217,13 +223,15 @@ check_v2si (int32_t elemsA, int32_t elemsB)
   int32_t indx;
   const int32_t vecs32x2_buf[2] = {AS, BS};
   int32x2_t vecs32x2_src = vld1_s32 (vecs32x2_buf);
-  int32x2_t vecs32x2_res = vmul_n_s32 (vecs32x2_src, elemsA);
+  int32_t vecs32x2_res[2];
+
+  vst1_s32 (vecs32x2_res, vmul_n_s32 (vecs32x2_src, elemsA));
 
   for (indx = 0; indx < 2; indx++)
     if (vecs32x2_res[indx] != expecteds2_1[indx])
       abort ();
 
-  vecs32x2_res = vmul_n_s32 (vecs32x2_src, elemsB);
+  vst1_s32 (vecs32x2_res, vmul_n_s32 (vecs32x2_src, elemsB));
 
   for (indx = 0; indx < 2; indx++)
     if (vecs32x2_res[indx] != expecteds2_2[indx])
@@ -236,13 +244,15 @@ check_v2si_unsigned (uint32_t elemusA, uint32_t elemusB)
   int indx;
   const uint32_t vecus32x2_buf[2] = {AUS, BUS};
   uint32x2_t vecus32x2_src = vld1_u32 (vecus32x2_buf);
-  uint32x2_t vecus32x2_res = vmul_n_u32 (vecus32x2_src, elemusA);
+  uint32_t vecus32x2_res[2];
+
+  vst1_u32 (vecus32x2_res, vmul_n_u32 (vecus32x2_src, elemusA));
 
   for (indx = 0; indx < 2; indx++)
     if (vecus32x2_res[indx] != expectedus2_1[indx])
       abort ();
 
-  vecus32x2_res = vmul_n_u32 (vecus32x2_src, elemusB);
+  vst1_u32 (vecus32x2_res, vmul_n_u32 (vecus32x2_src, elemusB));
 
   for (indx = 0; indx < 2; indx++)
     if (vecus32x2_res[indx] != expectedus2_2[indx])
@@ -257,25 +267,27 @@ check_v4si (int32_t elemsA, int32_t elemsB, int32_t elemsC, int32_t elemsD)
   int32_t indx;
   const int32_t vecs32x4_buf[4] = {AS, BS, CS, DS};
   int32x4_t vecs32x4_src = vld1q_s32 (vecs32x4_buf);
-  int32x4_t vecs32x4_res = vmulq_n_s32 (vecs32x4_src, elemsA);
+  int32_t vecs32x4_res[4];
+
+  vst1q_s32 (vecs32x4_res, vmulq_n_s32 (vecs32x4_src, elemsA));
 
   for (indx = 0; indx < 4; indx++)
     if (vecs32x4_res[indx] != expecteds4_1[indx])
       abort ();
 
-  vecs32x4_res = vmulq_n_s32 (vecs32x4_src, elemsB);
+  vst1q_s32 (vecs32x4_res, vmulq_n_s32 (vecs32x4_src, elemsB));
 
   for (indx = 0; indx < 4; indx++)
     if (vecs32x4_res[indx] != expecteds4_2[indx])
       abort ();
 
-  vecs32x4_res = vmulq_n_s32 (vecs32x4_src, elemsC);
+  vst1q_s32 (vecs32x4_res, vmulq_n_s32 (vecs32x4_src, elemsC));
 
   for (indx = 0; indx < 4; indx++)
     if (vecs32x4_res[indx] != expecteds4_3[indx])
       abort ();
 
-  vecs32x4_res = vmulq_n_s32 (vecs32x4_src, elemsD);
+  vst1q_s32 (vecs32x4_res, vmulq_n_s32 (vecs32x4_src, elemsD));
 
   for (indx = 0; indx < 4; indx++)
     if (vecs32x4_res[indx] != expecteds4_4[indx])
@@ -289,25 +301,27 @@ check_v4si_unsigned (uint32_t elemusA, uint32_t elemusB, uint32_t elemusC,
   int indx;
   const uint32_t vecus32x4_buf[4] = {AUS, BUS, CUS, DUS};
   uint32x4_t vecus32x4_src = vld1q_u32 (vecus32x4_buf);
-  uint32x4_t vecus32x4_res = vmulq_n_u32 (vecus32x4_src, elemusA);
+  uint32_t vecus32x4_res[4];
+
+  vst1q_u32 (vecus32x4_res, vmulq_n_u32 (vecus32x4_src, elemusA));
 
   for (indx = 0; indx < 4; indx++)
     if (vecus32x4_res[indx] != expectedus4_1[indx])
       abort ();
 
-  vecus32x4_res = vmulq_n_u32 (vecus32x4_src, elemusB);
+  vst1q_u32 (vecus32x4_res, vmulq_n_u32 (vecus32x4_src, elemusB));
 
   for (indx = 0; indx < 4; indx++)
     if (vecus32x4_res[indx] != expectedus4_2[indx])
       abort ();
 
-  vecus32x4_res = vmulq_n_u32 (vecus32x4_src, elemusC);
+  vst1q_u32 (vecus32x4_res, vmulq_n_u32 (vecus32x4_src, elemusC));
 
   for (indx = 0; indx < 4; indx++)
     if (vecus32x4_res[indx] != expectedus4_3[indx])
       abort ();
 
-  vecus32x4_res = vmulq_n_u32 (vecus32x4_src, elemusD);
+  vst1q_u32 (vecus32x4_res, vmulq_n_u32 (vecus32x4_src, elemusD));
 
   for (indx = 0; indx < 4; indx++)
     if (vecus32x4_res[indx] != expectedus4_4[indx])
@@ -323,25 +337,27 @@ check_v4hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD)
   int32_t indx;
   const int16_t vech16x4_buf[4] = {AH, BH, CH, DH};
   int16x4_t vech16x4_src = vld1_s16 (vech16x4_buf);
-  int16x4_t vech16x4_res = vmul_n_s16 (vech16x4_src, elemhA);
+  int16_t vech16x4_res[4];
+
+  vst1_s16 (vech16x4_res, vmul_n_s16 (vech16x4_src, elemhA));
 
   for (indx = 0; indx < 4; indx++)
     if (vech16x4_res[indx] != expectedh4_1[indx])
       abort ();
 
-  vech16x4_res = vmul_n_s16 (vech16x4_src, elemhB);
+  vst1_s16 (vech16x4_res, vmul_n_s16 (vech16x4_src, elemhB));
 
   for (indx = 0; indx < 4; indx++)
     if (vech16x4_res[indx] != expectedh4_2[indx])
       abort ();
 
-  vech16x4_res = vmul_n_s16 (vech16x4_src, elemhC);
+  vst1_s16 (vech16x4_res, vmul_n_s16 (vech16x4_src, elemhC));
 
   for (indx = 0; indx < 4; indx++)
     if (vech16x4_res[indx] != expectedh4_3[indx])
       abort ();
 
-  vech16x4_res = vmul_n_s16 (vech16x4_src, elemhD);
+  vst1_s16 (vech16x4_res, vmul_n_s16 (vech16x4_src, elemhD));
 
   for (indx = 0; indx < 4; indx++)
     if (vech16x4_res[indx] != expectedh4_4[indx])
@@ -355,25 +371,27 @@ check_v4hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
   int indx;
   const uint16_t vecuh16x4_buf[4] = {AUH, BUH, CUH, DUH};
   uint16x4_t vecuh16x4_src = vld1_u16 (vecuh16x4_buf);
-  uint16x4_t vecuh16x4_res = vmul_n_u16 (vecuh16x4_src, elemuhA);
+  uint16_t vecuh16x4_res[4];
+
+  vst1_u16 (vecuh16x4_res, vmul_n_u16 (vecuh16x4_src, elemuhA));
 
   for (indx = 0; indx < 4; indx++)
     if (vecuh16x4_res[indx] != expecteduh4_1[indx])
       abort ();
 
-  vecuh16x4_res = vmul_n_u16 (vecuh16x4_src, elemuhB);
+  vst1_u16 (vecuh16x4_res, vmul_n_u16 (vecuh16x4_src, elemuhB));
 
   for (indx = 0; indx < 4; indx++)
     if (vecuh16x4_res[indx] != expecteduh4_2[indx])
       abort ();
 
-  vecuh16x4_res = vmul_n_u16 (vecuh16x4_src, elemuhC);
+  vst1_u16 (vecuh16x4_res, vmul_n_u16 (vecuh16x4_src, elemuhC));
 
   for (indx = 0; indx < 4; indx++)
     if (vecuh16x4_res[indx] != expecteduh4_3[indx])
       abort ();
 
-  vecuh16x4_res = vmul_n_u16 (vecuh16x4_src, elemuhD);
+  vst1_u16 (vecuh16x4_res, vmul_n_u16 (vecuh16x4_src, elemuhD));
 
   for (indx = 0; indx < 4; indx++)
     if (vecuh16x4_res[indx] != expecteduh4_4[indx])
@@ -389,49 +407,51 @@ check_v8hi (int16_t elemhA, int16_t elemhB, int16_t elemhC, int16_t elemhD,
   int32_t indx;
   const int16_t vech16x8_buf[8] = {AH, BH, CH, DH, EH, FH, GH, HH};
   int16x8_t vech16x8_src = vld1q_s16 (vech16x8_buf);
-  int16x8_t vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhA);
+  int16_t vech16x8_res[8];
+
+  vst1q_s16 (vech16x8_res, vmulq_n_s16 (vech16x8_src, elemhA));
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_1[indx])
       abort ();
 
-  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhB);
+  vst1q_s16 (vech16x8_res, vmulq_n_s16 (vech16x8_src, elemhB));
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_2[indx])
       abort ();
 
-  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhC);
+  vst1q_s16 (vech16x8_res, vmulq_n_s16 (vech16x8_src, elemhC));
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_3[indx])
       abort ();
 
-  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhD);
+  vst1q_s16 (vech16x8_res, vmulq_n_s16 (vech16x8_src, elemhD));
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_4[indx])
       abort ();
 
-  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhE);
+  vst1q_s16 (vech16x8_res, vmulq_n_s16 (vech16x8_src, elemhE));
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_5[indx])
       abort ();
 
-  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhF);
+  vst1q_s16 (vech16x8_res, vmulq_n_s16 (vech16x8_src, elemhF));
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_6[indx])
       abort ();
 
-  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhG);
+  vst1q_s16 (vech16x8_res, vmulq_n_s16 (vech16x8_src, elemhG));
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_7[indx])
       abort ();
 
-  vech16x8_res = vmulq_n_s16 (vech16x8_src, elemhH);
+  vst1q_s16 (vech16x8_res, vmulq_n_s16 (vech16x8_src, elemhH));
 
   for (indx = 0; indx < 8; indx++)
     if (vech16x8_res[indx] != expectedh8_8[indx])
@@ -446,49 +466,51 @@ check_v8hi_unsigned (uint16_t elemuhA, uint16_t elemuhB, uint16_t elemuhC,
   int indx;
   const uint16_t vecuh16x8_buf[8] = {AUH, BUH, CUH, DUH, EUH, FUH, GUH, HUH};
   uint16x8_t vecuh16x8_src = vld1q_u16 (vecuh16x8_buf);
-  uint16x8_t vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhA);
+  uint16_t vecuh16x8_res[8];
+
+  vst1q_u16 (vecuh16x8_res, vmulq_n_u16 (vecuh16x8_src, elemuhA));
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_1[indx])
       abort ();
 
-  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhB);
+  vst1q_u16 (vecuh16x8_res, vmulq_n_u16 (vecuh16x8_src, elemuhB));
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_2[indx])
       abort ();
 
-  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhC);
+  vst1q_u16 (vecuh16x8_res, vmulq_n_u16 (vecuh16x8_src, elemuhC));
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_3[indx])
       abort ();
 
-  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhD);
+  vst1q_u16 (vecuh16x8_res, vmulq_n_u16 (vecuh16x8_src, elemuhD));
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_4[indx])
       abort ();
 
-  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhE);
+  vst1q_u16 (vecuh16x8_res, vmulq_n_u16 (vecuh16x8_src, elemuhE));
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_5[indx])
       abort ();
 
-  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhF);
+  vst1q_u16 (vecuh16x8_res, vmulq_n_u16 (vecuh16x8_src, elemuhF));
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_6[indx])
       abort ();
 
-  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhG);
+  vst1q_u16 (vecuh16x8_res, vmulq_n_u16 (vecuh16x8_src, elemuhG));
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_7[indx])
       abort ();
 
-  vecuh16x8_res = vmulq_n_u16 (vecuh16x8_src, elemuhH);
+  vst1q_u16 (vecuh16x8_res, vmulq_n_u16 (vecuh16x8_src, elemuhH));
 
   for (indx = 0; indx < 8; indx++)
     if (vecuh16x8_res[indx] != expecteduh8_8[indx])

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [AArch64, testsuite] Fix vmul_elem_1.c on big-endian
  2016-05-18 13:14       ` Jiong Wang
@ 2016-05-26 10:41         ` Jiong Wang
  2016-05-26 11:04         ` [AArch64, 2/4] Extend vector mutiply by element to all supported modes James Greenhalgh
  1 sibling, 0 replies; 10+ messages in thread
From: Jiong Wang @ 2016-05-26 10:41 UTC (permalink / raw)
  To: GCC Patches; +Cc: Christophe Lyon, James Greenhalgh

On 18/05/16 14:13, Jiong Wang wrote:
>
>
> On 18/05/16 09:17, Christophe Lyon wrote:
>>
>>> gcc/
>>>
>>> 2016-05-17  Jiong Wang  <jiong.wang@arm.com>
>>>
>>>          * config/aarch64/aarch64-simd.md 
>>> (*aarch64_mul3_elt_to_128df): Extend
>>>          to all supported modes.  Rename to...
>>>          (*aarch64_mul3_elt_from_dup): ...this.
>>>
>>> gcc/testsuite/
>>>
>>> 2016-05-17  Jiong Wang  <jiong.wang@arm.com>
>>>
>>>          * gcc.target/aarch64/simd/vmul_elem_1.c: New.
>>>
>>> Otherwise, this patch is OK.
>>>
>> Hi Jiong,
>>
>> The new testcase fails on aarch64_be, at execution time.
>>
>> Christophe.
>
> Thanks for reporting this.
>
> Yes, reproduced. I should force those res* local variable into
> memory so they can be in the same order as the expected result
> which is kept in memory.
>
> The following patch fix this.
>
> vmul_elem_1 pass on both aarch64_be-none-elf and aarch64-linux.
>
> OK for trunk?
>
> gcc/testsuite/
>
> 2016-05-18  Jiong Wang  <jiong.wang@arm.com>
>
>         * gcc.target/aarch64/simd/vmul_elem_1.c: Force result 
> variables to be
>         kept in memory.
>

Ping ~

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [AArch64, 2/4] Extend vector mutiply by element to all supported modes
  2016-05-18 13:14       ` Jiong Wang
  2016-05-26 10:41         ` [AArch64, testsuite] Fix vmul_elem_1.c on big-endian Jiong Wang
@ 2016-05-26 11:04         ` James Greenhalgh
  1 sibling, 0 replies; 10+ messages in thread
From: James Greenhalgh @ 2016-05-26 11:04 UTC (permalink / raw)
  To: Jiong Wang; +Cc: Christophe Lyon, GCC Patches, nd

On Wed, May 18, 2016 at 02:13:53PM +0100, Jiong Wang wrote:
> Thanks for reporting this.
> 
> Yes, reproduced. I should force those res* local variable into
> memory so they can be in the same order as the expected result
> which is kept in memory.
> 
> The following patch fix this.
> 
> vmul_elem_1 pass on both aarch64_be-none-elf and aarch64-linux.
> 
> OK for trunk?

OK.

Thanks,
James

> 
> gcc/testsuite/
> 
> 2016-05-18  Jiong Wang  <jiong.wang@arm.com>
> 
>         * gcc.target/aarch64/simd/vmul_elem_1.c: Force result variables to be
>         kept in memory.
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2016-05-26  8:32 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <57398D3D.1040806@foss.arm.com>
     [not found] ` <57398D5E.6070503@foss.arm.com>
     [not found]   ` <57398D8B.8060902@foss.arm.com>
2016-05-16  9:09     ` [AArch64, 4/4] Reimplement vmvn* intrinscis, remove inline assembly Jiong Wang
2016-05-17 12:38       ` James Greenhalgh
2016-05-16  9:09   ` [AArch64, 3/4] Reimplement multiply by element to get rid of " Jiong Wang
2016-05-17 12:37     ` James Greenhalgh
2016-05-16  9:09 ` [AArch64, 2/4] Extend vector mutiply by element to all supported modes Jiong Wang
2016-05-17 12:28   ` James Greenhalgh
2016-05-18  8:17     ` Christophe Lyon
2016-05-18 13:14       ` Jiong Wang
2016-05-26 10:41         ` [AArch64, testsuite] Fix vmul_elem_1.c on big-endian Jiong Wang
2016-05-26 11:04         ` [AArch64, 2/4] Extend vector mutiply by element to all supported modes James Greenhalgh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).