public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH] RISC-V: Remove earlyclobber from widen reduction
@ 2023-12-04  8:51 Juzhe-Zhong
  2023-12-04 10:34 ` Robin Dapp
  0 siblings, 1 reply; 2+ messages in thread
From: Juzhe-Zhong @ 2023-12-04  8:51 UTC (permalink / raw)
  To: gcc-patches; +Cc: kito.cheng, kito.cheng, jeffreyalaw, rdapp.gcc, Juzhe-Zhong

Since the destination of reduction is not a vector register group, there
is no need to apply overlap constraint.

Also confirm Clang:

The mir in LLVM has early clobber:
early-clobber %49:vrm2 = PseudoVWADD_VX_M1 $noreg(tied-def 0), killed %17:vr, %48:gpr, %0:gprnox0, 3, 0; example.c:59:24

The mir in LLVM doesn't have early clobber:
%48:vr = PseudoVWREDSUM_VS_M2_E8 $noreg(tied-def 0), %17:vrm2, killed %33:vr, %0:gprnox0, 3, 1; example.c:60:26

And also confirm both:

vwredsum.vs     v24, v8, v24 and vwredsum.vs     v8, v8, v24 all legal on LLVM.

Align with LLVM and honor RISC-V V spec, remove earlyclobber.

Before this patch:

	vwredsum.vs     v8,v24,v8
        vwredsum.vs     v7,v22,v7
        vwredsum.vs     v6,v20,v6
        vwredsum.vs     v5,v18,v5
        vwredsum.vs     v4,v16,v4
        vwredsum.vs     v3,v14,v3
        vwredsum.vs     v2,v12,v2
        vwredsum.vs     v1,v10,v1
        vmv1r.v v9,v8
        vwredsum.vs     v9,v24,v9
        vmv1r.v v24,v7
        vwredsum.vs     v24,v22,v24
        vmv1r.v v22,v6
        vwredsum.vs     v22,v20,v22
        vmv1r.v v20,v5
        vwredsum.vs     v20,v18,v20
        vmv1r.v v18,v4
        vwredsum.vs     v18,v16,v18
        vmv1r.v v16,v3
        vwredsum.vs     v16,v14,v16
        vmv1r.v v14,v2
        vwredsum.vs     v14,v12,v14
        vmv1r.v v12,v1
        vwredsum.vs     v12,v10,v12

After this patch:

	vfwredusum.vs	v17,v12,v17
	vfwredusum.vs	v18,v10,v18
	vfwredusum.vs	v15,v26,v15
	vfwredusum.vs	v16,v24,v16
	vfwredusum.vs	v12,v12,v17
	vfwredusum.vs	v10,v10,v18
	vfwredusum.vs	v13,v6,v20
	vfwredusum.vs	v11,v8,v19
	vfwredusum.vs	v6,v6,v13
	vfwredusum.vs	v8,v8,v11
	vfwredusum.vs	v7,v4,v21
	vfwredusum.vs	v9,v2,v22
	vfwredusum.vs	v14,v26,v15
	vfwredusum.vs	v1,v24,v16
	vfwredusum.vs	v4,v4,v7
	vfwredusum.vs	v2,v2,v9

Same behavior as LLVM, and honor RISC-V V spec.

	PR 112431

gcc/ChangeLog:

	* config/riscv/vector.md: Remove earlyclobber from widen reduction.

gcc/testsuite/ChangeLog:

	* gcc.target/riscv/rvv/base/pr112431-35.c: New test.
	* gcc.target/riscv/rvv/base/pr112431-36.c: New test.

---
 gcc/config/riscv/vector.md                    |   8 +-
 .../gcc.target/riscv/rvv/base/pr112431-35.c   | 107 ++++++++++++++++++
 .../gcc.target/riscv/rvv/base/pr112431-36.c   | 107 ++++++++++++++++++
 3 files changed, 218 insertions(+), 4 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-35.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-36.c

diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 731057416cd..72cf3553e45 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -7861,7 +7861,7 @@
 
 ;; Integer Widen Reduction Sum (vwredsum[u].vs)
 (define_insn "@pred_<reduc_op><mode>"
-  [(set (match_operand:<V_EXT_LMUL1>       0 "register_operand"      "=&vr,&vr")
+  [(set (match_operand:<V_EXT_LMUL1>       0 "register_operand"        "=vr,   vr")
 	(unspec:<V_EXT_LMUL1>
 	  [(unspec:<VM>
 	    [(match_operand:<VM>           1 "vector_mask_operand"   "vmWc1,vmWc1")
@@ -7872,7 +7872,7 @@
 	     (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
            (unspec:<V_EXT_LMUL1> [
 	     (match_operand:VI_QHS         3 "register_operand"      "   vr,   vr")
-	     (match_operand:<V_EXT_LMUL1>  4 "register_operand"      "  vr0,  vr0")
+	     (match_operand:<V_EXT_LMUL1>  4 "register_operand"      "   vr,   vr")
            ] ANY_WREDUC)
 	   (match_operand:<V_EXT_LMUL1>    2 "vector_merge_operand"  "   vu,    0")] UNSPEC_REDUC))]
   "TARGET_VECTOR"
@@ -7928,7 +7928,7 @@
 
 ;; Float Widen Reduction Sum (vfwred[ou]sum.vs)
 (define_insn "@pred_<reduc_op><mode>"
-  [(set (match_operand:<V_EXT_LMUL1>         0 "register_operand"      "=&vr, &vr")
+  [(set (match_operand:<V_EXT_LMUL1>         0 "register_operand"      "=vr,   vr")
 	(unspec:<V_EXT_LMUL1>
 	  [(unspec:<VM>
 	    [(match_operand:<VM>           1 "vector_mask_operand"   "vmWc1,vmWc1")
@@ -7941,7 +7941,7 @@
 	     (reg:SI FRM_REGNUM)] UNSPEC_VPREDICATE)
            (unspec:<V_EXT_LMUL1> [
 	     (match_operand:VF_HS          3 "register_operand"      "   vr,   vr")
-	     (match_operand:<V_EXT_LMUL1>  4 "register_operand"      "  vr0,  vr0")
+	     (match_operand:<V_EXT_LMUL1>  4 "register_operand"      "   vr,   vr")
            ] ANY_FWREDUC_SUM)
 	   (match_operand:<V_EXT_LMUL1>    2 "vector_merge_operand"  "   vu,    0")] UNSPEC_REDUC))]
   "TARGET_VECTOR"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-35.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-35.c
new file mode 100644
index 00000000000..6f72e93aa38
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-35.c
@@ -0,0 +1,107 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+	  size_t sum5, size_t sum6, size_t sum7,
+	  size_t sum0_2, size_t sum1_2, size_t sum2_2, size_t sum3_2, size_t sum4_2,
+	  size_t sum5_2, size_t sum6_2, size_t sum7_2)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7
+  + sum0_2 + sum1_2 + sum2_2 + sum3_2 + sum4_2 + sum5_2 + sum6_2 + sum7_2;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+  size_t sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vint8m2_t v0 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+      it += vl;
+      vint8m2_t v1 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+      it += vl;
+      vint8m2_t v2 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+      it += vl;
+      vint8m2_t v3 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+      it += vl;
+      vint8m2_t v4 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+      it += vl;
+      vint8m2_t v5 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+      it += vl;
+      vint8m2_t v6 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+      it += vl;
+      vint8m2_t v7 = __riscv_vle8_v_i8m2 ((void *) it, vl);
+      it += vl;
+
+      vint16m1_t vw0 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+      it += vl;
+      vint16m1_t vw1 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+      it += vl;
+      vint16m1_t vw2 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+      it += vl;
+      vint16m1_t vw3 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+      it += vl;
+      vint16m1_t vw4 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+      it += vl;
+      vint16m1_t vw5 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+      it += vl;
+      vint16m1_t vw6 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+      it += vl;
+      vint16m1_t vw7 = __riscv_vle16_v_i16m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vint16m1_t vw0_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v0, vw0, vl);
+      vint16m1_t vw1_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v1, vw1, vl);
+      vint16m1_t vw2_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v2, vw2, vl);
+      vint16m1_t vw3_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v3, vw3, vl);
+      vint16m1_t vw4_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v4, vw4, vl);
+      vint16m1_t vw5_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v5, vw5, vl);
+      vint16m1_t vw6_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v6, vw6, vl);
+      vint16m1_t vw7_2 = __riscv_vwredsum_vs_i8m2_i16m1 (v7, vw7, vl);
+      
+      vw0 = __riscv_vwredsum_vs_i8m2_i16m1 (v0, vw0_2, vl);
+      vw1 = __riscv_vwredsum_vs_i8m2_i16m1 (v1, vw1_2, vl);
+      vw2 = __riscv_vwredsum_vs_i8m2_i16m1 (v2, vw2_2, vl);
+      vw3 = __riscv_vwredsum_vs_i8m2_i16m1 (v3, vw3_2, vl);
+      vw4 = __riscv_vwredsum_vs_i8m2_i16m1 (v4, vw4_2, vl);
+      vw5 = __riscv_vwredsum_vs_i8m2_i16m1 (v5, vw5_2, vl);
+      vw6 = __riscv_vwredsum_vs_i8m2_i16m1 (v6, vw6_2, vl);
+      vw7 = __riscv_vwredsum_vs_i8m2_i16m1 (v7, vw7_2, vl);
+
+      asm volatile("nop" ::: "memory");
+      size_t sum0 = __riscv_vmv_x_s_i16m1_i16 (vw0);
+      size_t sum1 = __riscv_vmv_x_s_i16m1_i16 (vw1);
+      size_t sum2 = __riscv_vmv_x_s_i16m1_i16 (vw2);
+      size_t sum3 = __riscv_vmv_x_s_i16m1_i16 (vw3);
+      size_t sum4 = __riscv_vmv_x_s_i16m1_i16 (vw4);
+      size_t sum5 = __riscv_vmv_x_s_i16m1_i16 (vw5);
+      size_t sum6 = __riscv_vmv_x_s_i16m1_i16 (vw6);
+      size_t sum7 = __riscv_vmv_x_s_i16m1_i16 (vw7);
+
+      size_t sum0_2 = __riscv_vmv_x_s_i16m1_i16 (vw0_2);
+      size_t sum1_2 = __riscv_vmv_x_s_i16m1_i16 (vw1_2);
+      size_t sum2_2 = __riscv_vmv_x_s_i16m1_i16 (vw2_2);
+      size_t sum3_2 = __riscv_vmv_x_s_i16m1_i16 (vw3_2);
+      size_t sum4_2 = __riscv_vmv_x_s_i16m1_i16 (vw4_2);
+      size_t sum5_2 = __riscv_vmv_x_s_i16m1_i16 (vw5_2);
+      size_t sum6_2 = __riscv_vmv_x_s_i16m1_i16 (vw6_2);
+      size_t sum7_2 = __riscv_vmv_x_s_i16m1_i16 (vw7_2);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7,
+      sum0_2, sum1_2, sum2_2, sum3_2, sum4_2, sum5_2, sum6_2, sum7_2);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-36.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-36.c
new file mode 100644
index 00000000000..7756bdbad46
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr112431-36.c
@@ -0,0 +1,107 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */
+
+#include "riscv_vector.h"
+
+size_t __attribute__ ((noinline))
+sumation (size_t sum0, size_t sum1, size_t sum2, size_t sum3, size_t sum4,
+	  size_t sum5, size_t sum6, size_t sum7,
+	  size_t sum0_2, size_t sum1_2, size_t sum2_2, size_t sum3_2, size_t sum4_2,
+	  size_t sum5_2, size_t sum6_2, size_t sum7_2)
+{
+  return sum0 + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7
+  + sum0_2 + sum1_2 + sum2_2 + sum3_2 + sum4_2 + sum5_2 + sum6_2 + sum7_2;
+}
+
+size_t
+foo (char const *buf, size_t len)
+{
+  size_t sum = 0;
+  size_t vl = __riscv_vsetvlmax_e8m8 ();
+  size_t step = vl * 4;
+  const char *it = buf, *end = buf + len;
+  for (; it + step <= end;)
+    {
+      vfloat32m2_t v0 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v1 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v2 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v3 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v4 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v5 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v6 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+      vfloat32m2_t v7 = __riscv_vle32_v_f32m2 ((void *) it, vl);
+      it += vl;
+
+      vfloat64m1_t vw0 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+      it += vl;
+      vfloat64m1_t vw1 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+      it += vl;
+      vfloat64m1_t vw2 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+      it += vl;
+      vfloat64m1_t vw3 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+      it += vl;
+      vfloat64m1_t vw4 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+      it += vl;
+      vfloat64m1_t vw5 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+      it += vl;
+      vfloat64m1_t vw6 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+      it += vl;
+      vfloat64m1_t vw7 = __riscv_vle64_v_f64m1 ((void *) it, vl);
+      it += vl;
+      
+      asm volatile("nop" ::: "memory");
+      vfloat64m1_t vw0_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v0, vw0, vl);
+      vfloat64m1_t vw1_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v1, vw1, vl);
+      vfloat64m1_t vw2_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v2, vw2, vl);
+      vfloat64m1_t vw3_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v3, vw3, vl);
+      vfloat64m1_t vw4_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v4, vw4, vl);
+      vfloat64m1_t vw5_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v5, vw5, vl);
+      vfloat64m1_t vw6_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v6, vw6, vl);
+      vfloat64m1_t vw7_2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v7, vw7, vl);
+      
+      vw0 = __riscv_vfwredusum_vs_f32m2_f64m1 (v0, vw0_2, vl);
+      vw1 = __riscv_vfwredusum_vs_f32m2_f64m1 (v1, vw1_2, vl);
+      vw2 = __riscv_vfwredusum_vs_f32m2_f64m1 (v2, vw2_2, vl);
+      vw3 = __riscv_vfwredusum_vs_f32m2_f64m1 (v3, vw3_2, vl);
+      vw4 = __riscv_vfwredusum_vs_f32m2_f64m1 (v4, vw4_2, vl);
+      vw5 = __riscv_vfwredusum_vs_f32m2_f64m1 (v5, vw5_2, vl);
+      vw6 = __riscv_vfwredusum_vs_f32m2_f64m1 (v6, vw6_2, vl);
+      vw7 = __riscv_vfwredusum_vs_f32m2_f64m1 (v7, vw7_2, vl);
+
+      asm volatile("nop" ::: "memory");
+      size_t sum0 = __riscv_vfmv_f_s_f64m1_f64 (vw0);
+      size_t sum1 = __riscv_vfmv_f_s_f64m1_f64 (vw1);
+      size_t sum2 = __riscv_vfmv_f_s_f64m1_f64 (vw2);
+      size_t sum3 = __riscv_vfmv_f_s_f64m1_f64 (vw3);
+      size_t sum4 = __riscv_vfmv_f_s_f64m1_f64 (vw4);
+      size_t sum5 = __riscv_vfmv_f_s_f64m1_f64 (vw5);
+      size_t sum6 = __riscv_vfmv_f_s_f64m1_f64 (vw6);
+      size_t sum7 = __riscv_vfmv_f_s_f64m1_f64 (vw7);
+
+      size_t sum0_2 = __riscv_vfmv_f_s_f64m1_f64 (vw0_2);
+      size_t sum1_2 = __riscv_vfmv_f_s_f64m1_f64 (vw1_2);
+      size_t sum2_2 = __riscv_vfmv_f_s_f64m1_f64 (vw2_2);
+      size_t sum3_2 = __riscv_vfmv_f_s_f64m1_f64 (vw3_2);
+      size_t sum4_2 = __riscv_vfmv_f_s_f64m1_f64 (vw4_2);
+      size_t sum5_2 = __riscv_vfmv_f_s_f64m1_f64 (vw5_2);
+      size_t sum6_2 = __riscv_vfmv_f_s_f64m1_f64 (vw6_2);
+      size_t sum7_2 = __riscv_vfmv_f_s_f64m1_f64 (vw7_2);
+
+      sum += sumation (sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7,
+      sum0_2, sum1_2, sum2_2, sum3_2, sum4_2, sum5_2, sum6_2, sum7_2);
+    }
+  return sum;
+}
+
+/* { dg-final { scan-assembler-not {vmv1r} } } */
+/* { dg-final { scan-assembler-not {vmv2r} } } */
+/* { dg-final { scan-assembler-not {vmv4r} } } */
+/* { dg-final { scan-assembler-not {vmv8r} } } */
+/* { dg-final { scan-assembler-not {csrr} } } */
-- 
2.36.3


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] RISC-V: Remove earlyclobber from widen reduction
  2023-12-04  8:51 [PATCH] RISC-V: Remove earlyclobber from widen reduction Juzhe-Zhong
@ 2023-12-04 10:34 ` Robin Dapp
  0 siblings, 0 replies; 2+ messages in thread
From: Robin Dapp @ 2023-12-04 10:34 UTC (permalink / raw)
  To: Juzhe-Zhong, gcc-patches; +Cc: rdapp.gcc, kito.cheng, kito.cheng, jeffreyalaw

LGTM.

Regards
 Robin


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-12-04 10:34 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-04  8:51 [PATCH] RISC-V: Remove earlyclobber from widen reduction Juzhe-Zhong
2023-12-04 10:34 ` Robin Dapp

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).