public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH] RISC-V: Fix reg order of RVV registers.
@ 2023-03-13  8:19 juzhe.zhong
  2023-03-14 18:01 ` Jeff Law
  2023-04-18 13:50 ` Jeff Law
  0 siblings, 2 replies; 5+ messages in thread
From: juzhe.zhong @ 2023-03-13  8:19 UTC (permalink / raw)
  To: gcc-patches; +Cc: kito.cheng, Ju-Zhe Zhong, kito-cheng

From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>

Co-authored-by: kito-cheng <kito.cheng@sifive.com>
Co-authored-by: kito-cheng <kito.cheng@gmail.com>

Consider this case:
void f19 (void *base,void *base2,void *out,size_t vl, int n)
{
    vuint64m8_t bindex = __riscv_vle64_v_u64m8 (base + 100, vl);
    for (int i = 0; i < n; i++){
      vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
      vuint64m8_t v = __riscv_vluxei64_v_u64m8_m(m,base,bindex,vl);
      vuint64m8_t v2 = __riscv_vle64_v_u64m8_tu (v, base2 + i, vl);
      vint8m1_t v3 = __riscv_vluxei64_v_i8m1_m(m,base,v,vl);
      vint8m1_t v4 = __riscv_vluxei64_v_i8m1_m(m,base,v2,vl);
      __riscv_vse8_v_i8m1 (out + 100*i,v3,vl);
      __riscv_vse8_v_i8m1 (out + 222*i,v4,vl);
    }
}

Due to the current unreasonable reg order, this case produce unnecessary
register spillings.

Fix the order can help for RA.

Signed-off-by: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
Co-authored-by: kito-cheng <kito.cheng@sifive.com>
Co-authored-by: kito-cheng <kito.cheng@gmail.com>

gcc/ChangeLog:

        * config/riscv/riscv.h (enum reg_class): Fix reg order.

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/rvv/base/spill-1.c: Adapt test.
        * gcc.target/riscv/rvv/base/spill-2.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-3.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-4.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-5.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-6.c: Ditto.
        * gcc.target/riscv/rvv/base/spill-7.c: Ditto.

---
 gcc/config/riscv/riscv.h                      | 13 ++--
 .../gcc.target/riscv/rvv/base/spill-1.c       | 62 +++++++++----------
 .../gcc.target/riscv/rvv/base/spill-2.c       | 48 +++++++-------
 .../gcc.target/riscv/rvv/base/spill-3.c       | 32 +++++-----
 .../gcc.target/riscv/rvv/base/spill-4.c       | 16 ++---
 .../gcc.target/riscv/rvv/base/spill-5.c       | 16 ++---
 .../gcc.target/riscv/rvv/base/spill-6.c       |  8 +--
 .../gcc.target/riscv/rvv/base/spill-7.c       | 56 ++++++++---------
 8 files changed, 125 insertions(+), 126 deletions(-)

diff --git a/gcc/config/riscv/riscv.h b/gcc/config/riscv/riscv.h
index 5bc7f2f467d..e14bccc0b5d 100644
--- a/gcc/config/riscv/riscv.h
+++ b/gcc/config/riscv/riscv.h
@@ -553,13 +553,12 @@ enum reg_class
   60, 61, 62, 63,							\
   /* Call-saved FPRs.  */						\
   40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,			\
-  /* V24 ~ V31.  */							\
-  120, 121, 122, 123, 124, 125, 126, 127,				\
-  /* V8 ~ V23.  */							\
-  104, 105, 106, 107, 108, 109, 110, 111,				\
-  112, 113, 114, 115, 116, 117, 118, 119,				\
-  /* V0 ~ V7.  */							\
-  96, 97, 98, 99, 100, 101, 102, 103,					\
+  /* v1 ~ v31 vector registers.  */					\
+  97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,	\
+  111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123,	\
+  124, 125, 126, 127,							\
+  /* The vector mask register.  */					\
+  96,									\
   /* None of the remaining classes have defined call-saved		\
      registers.  */							\
   64, 65, 66, 67							\
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-1.c
index b1220c48f1b..ec38a828ee7 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-1.c
@@ -15,15 +15,15 @@
 **  slli\ta3,a2,3
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vse8.v\tv24,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,3
 **  slli\ta3,a2,3
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vle8.v\tv24,0\(a3\)
-**  vse8.v\tv24,0\(a1\)
+**  vle8.v\tv[0-9]+,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -42,21 +42,21 @@ spill_1 (int8_t *in, int8_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e8,mf4,ta,ma
-**  vle8.v\tv24,0\(a0\)
+**  vle8.v\tv[0-9]+,0\(a0\)
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,2
 **  slli\ta3,a2,2
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vse8.v\tv24,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,2
 **  slli\ta3,a2,2
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vle8.v\tv24,0\(a3\)
-**  vse8.v\tv24,0\(a1\)
+**  vle8.v\tv[0-9]+,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -75,17 +75,17 @@ spill_2 (int8_t *in, int8_t *out)
 ** csrr\tt0,vlenb
 ** sub\tsp,sp,t0
 ** vsetvli\ta5,zero,e8,mf2,ta,ma
-** vle8.v\tv24,0\(a0\)
+** vle8.v\tv[0-9]+,0\(a0\)
 ** csrr\ta3,vlenb
 ** srli\ta3,a3,1
 ** add\ta3,a3,sp
-** vse8.v\tv24,0\(a3\)
+** vse8.v\tv[0-9]+,0\(a3\)
 **  ...
 ** csrr\ta3,vlenb
 ** srli\ta3,a3,1
 ** add\ta3,a3,sp
-** vle8.v\tv24,0\(a3\)
-** vse8.v\tv24,0\(a1\)
+** vle8.v\tv[0-9]+,0\(a3\)
+** vse8.v\tv[0-9]+,0\(a1\)
 ** csrr\tt0,vlenb
 ** add\tsp,sp,t0
 **  ...
@@ -104,7 +104,7 @@ spill_3 (int8_t *in, int8_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re8.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -128,7 +128,7 @@ spill_4 (int8_t *in, int8_t *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re8.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -152,7 +152,7 @@ spill_5 (int8_t *in, int8_t *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re8.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -176,7 +176,7 @@ spill_6 (int8_t *in, int8_t *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re8.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
@@ -199,21 +199,21 @@ spill_7 (int8_t *in, int8_t *out)
 ** csrr\tt0,vlenb
 ** sub\tsp,sp,t0
 ** vsetvli\ta5,zero,e8,mf8,ta,ma
-** vle8.v\tv24,0\(a0\)
+** vle8.v\tv[0-9]+,0\(a0\)
 ** csrr\ta2,vlenb
 ** srli\ta2,a2,3
 ** slli\ta3,a2,3
 ** sub\ta3,a3,a2
 ** add\ta3,a3,sp
-** vse8.v\tv24,0\(a3\)
+** vse8.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,3
 **  slli\ta3,a2,3
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vle8.v\tv24,0\(a3\)
-**  vse8.v\tv24,0\(a1\)
+**  vle8.v\tv[0-9]+,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -232,21 +232,21 @@ spill_8 (uint8_t *in, uint8_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e8,mf4,ta,ma
-**  vle8.v\tv24,0\(a0\)
+**  vle8.v\tv[0-9]+,0\(a0\)
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,2
 **  slli\ta3,a2,2
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vse8.v\tv24,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,2
 **  slli\ta3,a2,2
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vle8.v\tv24,0\(a3\)
-**  vse8.v\tv24,0\(a1\)
+**  vle8.v\tv[0-9]+,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -265,17 +265,17 @@ spill_9 (uint8_t *in, uint8_t *out)
 ** csrr\tt0,vlenb
 ** sub\tsp,sp,t0
 ** vsetvli\ta5,zero,e8,mf2,ta,ma
-** vle8.v\tv24,0\(a0\)
+** vle8.v\tv[0-9]+,0\(a0\)
 ** csrr\ta3,vlenb
 ** srli\ta3,a3,1
 ** add\ta3,a3,sp
-** vse8.v\tv24,0\(a3\)
+** vse8.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vle8.v\tv24,0\(a3\)
-**  vse8.v\tv24,0\(a1\)
+**  vle8.v\tv[0-9]+,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -294,7 +294,7 @@ spill_10 (uint8_t *in, uint8_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re8.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -318,7 +318,7 @@ spill_11 (uint8_t *in, uint8_t *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re8.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -342,7 +342,7 @@ spill_12 (uint8_t *in, uint8_t *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re8.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -366,7 +366,7 @@ spill_13 (uint8_t *in, uint8_t *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re8.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-2.c
index ca1904b830d..147a727b134 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-2.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-2.c
@@ -10,21 +10,21 @@
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e16,mf4,ta,ma
-**  vle16.v\tv24,0\(a0\)
+**  vle16.v\tv[0-9]+,0\(a0\)
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,2
 **  slli\ta3,a2,2
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vse16.v\tv24,0\(a3\)
+**  vse16.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,2
 **  slli\ta3,a2,2
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vle16.v\tv24,0\(a3\)
-**  vse16.v\tv24,0\(a1\)
+**  vle16.v\tv[0-9]+,0\(a3\)
+**  vse16.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -43,17 +43,17 @@ spill_2 (int16_t *in, int16_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e16,mf2,ta,ma
-**  vle16.v\tv24,0\(a0\)
+**  vle16.v\tv[0-9]+,0\(a0\)
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vse16.v\tv24,0\(a3\)
+**  vse16.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vle16.v\tv24,0\(a3\)
-**  vse16.v\tv24,0\(a1\)
+**  vle16.v\tv[0-9]+,0\(a3\)
+**  vse16.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -72,7 +72,7 @@ spill_3 (int16_t *in, int16_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re16.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -96,7 +96,7 @@ spill_4 (int16_t *in, int16_t *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re16.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -120,7 +120,7 @@ spill_5 (int16_t *in, int16_t *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re16.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -144,7 +144,7 @@ spill_6 (int16_t *in, int16_t *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re16.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
@@ -167,21 +167,21 @@ spill_7 (int16_t *in, int16_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e16,mf4,ta,ma
-**  vle16.v\tv24,0\(a0\)
+**  vle16.v\tv[0-9]+,0\(a0\)
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,2
 **  slli\ta3,a2,2
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vse16.v\tv24,0\(a3\)
+**  vse16.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,2
 **  slli\ta3,a2,2
 **  sub\ta3,a3,a2
 **  add\ta3,a3,sp
-**  vle16.v\tv24,0\(a3\)
-**  vse16.v\tv24,0\(a1\)
+**  vle16.v\tv[0-9]+,0\(a3\)
+**  vse16.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -200,17 +200,17 @@ spill_9 (uint16_t *in, uint16_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e16,mf2,ta,ma
-**  vle16.v\tv24,0\(a0\)
+**  vle16.v\tv[0-9]+,0\(a0\)
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vse16.v\tv24,0\(a3\)
+**  vse16.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vle16.v\tv24,0\(a3\)
-**  vse16.v\tv24,0\(a1\)
+**  vle16.v\tv[0-9]+,0\(a3\)
+**  vse16.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -229,7 +229,7 @@ spill_10 (uint16_t *in, uint16_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re16.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -253,7 +253,7 @@ spill_11 (uint16_t *in, uint16_t *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re16.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -277,7 +277,7 @@ spill_12 (uint16_t *in, uint16_t *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re16.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -301,7 +301,7 @@ spill_13 (uint16_t *in, uint16_t *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re16.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-3.c
index 2039ca34516..81d695a2a73 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-3.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-3.c
@@ -10,17 +10,17 @@
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e32,mf2,ta,ma
-**  vle32.v\tv24,0\(a0\)
+**  vle32.v\tv[0-9]+,0\(a0\)
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vse32.v\tv24,0\(a3\)
+**  vse32.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vle32.v\tv24,0\(a3\)
-**  vse32.v\tv24,0\(a1\)
+**  vle32.v\tv[0-9]+,0\(a3\)
+**  vse32.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -39,7 +39,7 @@ spill_3 (int32_t *in, int32_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re32.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -63,7 +63,7 @@ spill_4 (int32_t *in, int32_t *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re32.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -87,7 +87,7 @@ spill_5 (int32_t *in, int32_t *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re32.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -111,7 +111,7 @@ spill_6 (int32_t *in, int32_t *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re32.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
@@ -134,17 +134,17 @@ spill_7 (int32_t *in, int32_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e32,mf2,ta,ma
-**  vle32.v\tv24,0\(a0\)
+**  vle32.v\tv[0-9]+,0\(a0\)
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vse32.v\tv24,0\(a3\)
+**  vse32.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vle32.v\tv24,0\(a3\)
-**  vse32.v\tv24,0\(a1\)
+**  vle32.v\tv[0-9]+,0\(a3\)
+**  vse32.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -163,7 +163,7 @@ spill_10 (uint32_t *in, uint32_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re32.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -187,7 +187,7 @@ spill_11 (uint32_t *in, uint32_t *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re32.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -211,7 +211,7 @@ spill_12 (uint32_t *in, uint32_t *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re32.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -235,7 +235,7 @@ spill_13 (uint32_t *in, uint32_t *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re32.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-4.c b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-4.c
index 83c80b0b045..12604cf85ff 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-4.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-4.c
@@ -10,7 +10,7 @@
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re64.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -34,7 +34,7 @@ spill_4 (int64_t *in, int64_t *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re64.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -58,7 +58,7 @@ spill_5 (int64_t *in, int64_t *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re64.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -82,7 +82,7 @@ spill_6 (int64_t *in, int64_t *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re64.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
@@ -105,7 +105,7 @@ spill_7 (int64_t *in, int64_t *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re64.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -129,7 +129,7 @@ spill_11 (uint64_t *in, uint64_t *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re64.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -153,7 +153,7 @@ spill_12 (uint64_t *in, uint64_t *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re64.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -177,7 +177,7 @@ spill_13 (uint64_t *in, uint64_t *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re64.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-5.c b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-5.c
index 3c228a00c48..8ec7a2d4b2b 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-5.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-5.c
@@ -10,17 +10,17 @@
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  vsetvli\ta5,zero,e32,mf2,ta,ma
-**  vle32.v\tv24,0\(a0\)
+**  vle32.v\tv[0-9]+,0\(a0\)
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vse32.v\tv24,0\(a3\)
+**  vse32.v\tv[0-9]+,0\(a3\)
 **  ...
 **  csrr\ta3,vlenb
 **  srli\ta3,a3,1
 **  add\ta3,a3,sp
-**  vle32.v\tv24,0\(a3\)
-**  vse32.v\tv24,0\(a1\)
+**  vle32.v\tv[0-9]+,0\(a3\)
+**  vse32.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  add\tsp,sp,t0
 **  ...
@@ -39,7 +39,7 @@ spill_3 (float *in, float *out)
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re32.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -63,7 +63,7 @@ spill_4 (float *in, float *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re32.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -87,7 +87,7 @@ spill_5 (float *in, float *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re32.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -111,7 +111,7 @@ spill_6 (float *in, float *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re32.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-6.c b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-6.c
index 340029da88b..72992a0830e 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-6.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-6.c
@@ -10,7 +10,7 @@
 **  csrr\tt0,vlenb
 **  sub\tsp,sp,t0
 **  ...
-**  vs1r.v\tv24,0\(sp\)
+**  vs1r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl1re64.v\tv2,0\(sp\)
 **  vs1r.v\tv2,0\(a1\)
@@ -34,7 +34,7 @@ spill_4 (double *in, double *out)
 **  slli\tt1,t0,1
 **  sub\tsp,sp,t1
 **  ...
-**  vs2r.v\tv24,0\(sp\)
+**  vs2r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl2re64.v\tv4,0\(sp\)
 **  vs2r.v\tv4,0\(a1\)
@@ -58,7 +58,7 @@ spill_5 (double *in, double *out)
 **  slli\tt1,t0,2
 **  sub\tsp,sp,t1
 **  ...
-**  vs4r.v\tv24,0\(sp\)
+**  vs4r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl4re64.v\tv8,0\(sp\)
 **  vs4r.v\tv8,0\(a1\)
@@ -82,7 +82,7 @@ spill_6 (double *in, double *out)
 **  slli\tt1,t0,3
 **  sub\tsp,sp,t1
 **  ...
-**  vs8r.v\tv24,0\(sp\)
+**  vs8r.v\tv[0-9]+,0\(sp\)
 **  ...
 **  vl8re64.v\tv16,0\(sp\)
 **  vs8r.v\tv16,0\(a1\)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-7.c
index cf1eea2fa3f..e852a75578e 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/spill-7.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/spill-7.c
@@ -11,82 +11,82 @@
 **  slli\tt1,t0,4
 **  sub\tsp,sp,t1
 **  vsetvli\ta3,zero,e8,mf8,ta,ma
-**  vle8.v\tv24,0\(a0\)
+**  vle8.v\tv[0-9]+,0\(a0\)
 **  csrr\ta5,vlenb
 **  srli\ta5,a5,3
 **  add\ta5,a5,sp
-**  vse8.v\tv24,0\(a5\)
+**  vse8.v\tv[0-9]+,0\(a5\)
 **  addi\ta5,a0,1
 **  vsetvli\ta4,zero,e8,mf4,ta,ma
-**  vle8.v\tv24,0\(a5\)
+**  vle8.v\tv[0-9]+,0\(a5\)
 **  csrr\ta5,vlenb
 **  srli\ta5,a5,2
 **  add\ta5,a5,sp
-**  vse8.v\tv24,0\(a5\)
+**  vse8.v\tv[0-9]+,0\(a5\)
 **  addi\ta2,a0,2
 **  vsetvli\ta5,zero,e8,mf2,ta,ma
-**  vle8.v\tv24,0\(a2\)
+**  vle8.v\tv[0-9]+,0\(a2\)
 **  csrr\ta2,vlenb
 **  srli\ta2,a2,1
 **  add\ta2,a2,sp
-**  vse8.v\tv24,0\(a2\)
+**  vse8.v\tv[0-9]+,0\(a2\)
 **  addi\ta2,a0,3
-**  vl1re8.v\tv24,0\(a2\)
+**  vl1re8.v\tv[0-9]+,0\(a2\)
 **  csrr\ta2,vlenb
 **  add\ta2,a2,sp
-**  vs1r.v\tv24,0\(a2\)
+**  vs1r.v\tv[0-9]+,0\(a2\)
 **  addi\ta2,a0,4
-**  vl2re8.v\tv24,0\(a2\)
+**  vl2re8.v\tv[0-9]+,0\(a2\)
 **  csrr\tt3,vlenb
 **  slli\ta2,t3,1
 **  add\ta2,a2,sp
-**  vs2r.v\tv24,0\(a2\)
+**  vs2r.v\tv[0-9]+,0\(a2\)
 **  addi\ta2,a0,5
-**  vl4re8.v\tv24,0\(a2\)
+**  vl4re8.v\tv[0-9]+,0\(a2\)
 **  mv\ta2,t3
 **  slli\tt3,t3,2
 **  add\tt3,t3,sp
-**  vs4r.v\tv24,0\(t3\)
+**  vs4r.v\tv[0-9]+,0\(t3\)
 **  addi\ta0,a0,6
-**  vl8re8.v\tv24,0\(a0\)
+**  vl8re8.v\tv[0-9]+,0\(a0\)
 **  slli\ta0,a2,3
 **  add\ta0,a0,sp
-**  vs8r.v\tv24,0\(a0\)
+**  vs8r.v\tv[0-9]+,0\(a0\)
 **  ...
 **  srli\ta0,a2,3
 **  add\ta0,a0,sp
 **  ...
-**  vle8.v\tv27,0\(a0\)
-**  vse8.v\tv27,0\(a1\)
+**  vle8.v\tv[0-9]+,0\(a0\)
+**  vse8.v\tv[0-9]+,0\(a1\)
 **  addi\ta3,a1,1
 **  srli\ta0,a2,2
 **  add\ta0,a0,sp
 **  ...
-**  vle8.v\tv27,0\(a0\)
-**  vse8.v\tv27,0\(a3\)
+**  vle8.v\tv[0-9]+,0\(a0\)
+**  vse8.v\tv[0-9]+,0\(a3\)
 **  addi\ta4,a1,2
 **  srli\ta3,a2,1
 **  add\ta3,a3,sp
 **  ...
-**  vle8.v\tv27,0\(a3\)
-**  vse8.v\tv27,0\(a4\)
+**  vle8.v\tv[0-9]+,0\(a3\)
+**  vse8.v\tv[0-9]+,0\(a4\)
 **  addi\ta5,a1,3
 **  add\ta4,a2,sp
-**  vl1re8.v\tv25,0\(a4\)
-**  vs1r.v\tv25,0\(a5\)
+**  vl1re8.v\tv[0-9]+,0\(a4\)
+**  vs1r.v\tv[0-9]+,0\(a5\)
 **  addi\ta5,a1,4
 **  slli\ta4,a2,1
 **  add\ta4,a4,sp
-**  vl2re8.v\tv26,0\(a4\)
-**  vs2r.v\tv26,0\(a5\)
+**  vl2re8.v\tv[0-9]+,0\(a4\)
+**  vs2r.v\tv[0-9]+,0\(a5\)
 **  addi\ta5,a1,5
-**  vl4re8.v\tv28,0\(t3\)
-**  vs4r.v\tv28,0\(a5\)
+**  vl4re8.v\tv[0-9]+,0\(t3\)
+**  vs4r.v\tv[0-9]+,0\(a5\)
 **  addi\ta1,a1,6
 **  slli\ta5,a2,3
 **  add\ta5,a5,sp
-**  vl8re8.v\tv24,0\(a5\)
-**  vs8r.v\tv24,0\(a1\)
+**  vl8re8.v\tv[0-9]+,0\(a5\)
+**  vs8r.v\tv[0-9]+,0\(a1\)
 **  csrr\tt0,vlenb
 **  slli\tt1,t0,4
 **  add\tsp,sp,t1
-- 
2.36.3


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] RISC-V: Fix reg order of RVV registers.
  2023-03-13  8:19 [PATCH] RISC-V: Fix reg order of RVV registers juzhe.zhong
@ 2023-03-14 18:01 ` Jeff Law
  2023-03-15 13:48   ` Kito Cheng
  2023-04-18 13:50 ` Jeff Law
  1 sibling, 1 reply; 5+ messages in thread
From: Jeff Law @ 2023-03-14 18:01 UTC (permalink / raw)
  To: juzhe.zhong, gcc-patches; +Cc: kito.cheng, kito-cheng



On 3/13/23 02:19, juzhe.zhong@rivai.ai wrote:
> From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
> 
> Co-authored-by: kito-cheng <kito.cheng@sifive.com>
> Co-authored-by: kito-cheng <kito.cheng@gmail.com>
> 
> Consider this case:
> void f19 (void *base,void *base2,void *out,size_t vl, int n)
> {
>      vuint64m8_t bindex = __riscv_vle64_v_u64m8 (base + 100, vl);
>      for (int i = 0; i < n; i++){
>        vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
>        vuint64m8_t v = __riscv_vluxei64_v_u64m8_m(m,base,bindex,vl);
>        vuint64m8_t v2 = __riscv_vle64_v_u64m8_tu (v, base2 + i, vl);
>        vint8m1_t v3 = __riscv_vluxei64_v_i8m1_m(m,base,v,vl);
>        vint8m1_t v4 = __riscv_vluxei64_v_i8m1_m(m,base,v2,vl);
>        __riscv_vse8_v_i8m1 (out + 100*i,v3,vl);
>        __riscv_vse8_v_i8m1 (out + 222*i,v4,vl);
>      }
> }
> 
> Due to the current unreasonable reg order, this case produce unnecessary
> register spillings.
> 
> Fix the order can help for RA.
> 
> Signed-off-by: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
> Co-authored-by: kito-cheng <kito.cheng@sifive.com>
> Co-authored-by: kito-cheng <kito.cheng@gmail.com>
> 
> gcc/ChangeLog:
> 
>          * config/riscv/riscv.h (enum reg_class): Fix reg order.
> 
> gcc/testsuite/ChangeLog:
> 
>          * gcc.target/riscv/rvv/base/spill-1.c: Adapt test.
>          * gcc.target/riscv/rvv/base/spill-2.c: Ditto.
>          * gcc.target/riscv/rvv/base/spill-3.c: Ditto.
>          * gcc.target/riscv/rvv/base/spill-4.c: Ditto.
>          * gcc.target/riscv/rvv/base/spill-5.c: Ditto.
>          * gcc.target/riscv/rvv/base/spill-6.c: Ditto.
>          * gcc.target/riscv/rvv/base/spill-7.c: Ditto.
Are you OK with deferring this to gcc-14?

jeff

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] RISC-V: Fix reg order of RVV registers.
  2023-03-14 18:01 ` Jeff Law
@ 2023-03-15 13:48   ` Kito Cheng
  0 siblings, 0 replies; 5+ messages in thread
From: Kito Cheng @ 2023-03-15 13:48 UTC (permalink / raw)
  To: Jeff Law; +Cc: juzhe.zhong, gcc-patches, kito-cheng

Hi Jeff:

We promised only to commit intrinsic implication and bug fix this
moment, so yes, those optimization and non-bug fix pattern turning
include this will all defer to gcc-14.

On Wed, Mar 15, 2023 at 2:02 AM Jeff Law via Gcc-patches
<gcc-patches@gcc.gnu.org> wrote:
>
>
>
> On 3/13/23 02:19, juzhe.zhong@rivai.ai wrote:
> > From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
> >
> > Co-authored-by: kito-cheng <kito.cheng@sifive.com>
> > Co-authored-by: kito-cheng <kito.cheng@gmail.com>
> >
> > Consider this case:
> > void f19 (void *base,void *base2,void *out,size_t vl, int n)
> > {
> >      vuint64m8_t bindex = __riscv_vle64_v_u64m8 (base + 100, vl);
> >      for (int i = 0; i < n; i++){
> >        vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
> >        vuint64m8_t v = __riscv_vluxei64_v_u64m8_m(m,base,bindex,vl);
> >        vuint64m8_t v2 = __riscv_vle64_v_u64m8_tu (v, base2 + i, vl);
> >        vint8m1_t v3 = __riscv_vluxei64_v_i8m1_m(m,base,v,vl);
> >        vint8m1_t v4 = __riscv_vluxei64_v_i8m1_m(m,base,v2,vl);
> >        __riscv_vse8_v_i8m1 (out + 100*i,v3,vl);
> >        __riscv_vse8_v_i8m1 (out + 222*i,v4,vl);
> >      }
> > }
> >
> > Due to the current unreasonable reg order, this case produce unnecessary
> > register spillings.
> >
> > Fix the order can help for RA.
> >
> > Signed-off-by: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
> > Co-authored-by: kito-cheng <kito.cheng@sifive.com>
> > Co-authored-by: kito-cheng <kito.cheng@gmail.com>
> >
> > gcc/ChangeLog:
> >
> >          * config/riscv/riscv.h (enum reg_class): Fix reg order.
> >
> > gcc/testsuite/ChangeLog:
> >
> >          * gcc.target/riscv/rvv/base/spill-1.c: Adapt test.
> >          * gcc.target/riscv/rvv/base/spill-2.c: Ditto.
> >          * gcc.target/riscv/rvv/base/spill-3.c: Ditto.
> >          * gcc.target/riscv/rvv/base/spill-4.c: Ditto.
> >          * gcc.target/riscv/rvv/base/spill-5.c: Ditto.
> >          * gcc.target/riscv/rvv/base/spill-6.c: Ditto.
> >          * gcc.target/riscv/rvv/base/spill-7.c: Ditto.
> Are you OK with deferring this to gcc-14?
>
> jeff

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] RISC-V: Fix reg order of RVV registers.
  2023-03-13  8:19 [PATCH] RISC-V: Fix reg order of RVV registers juzhe.zhong
  2023-03-14 18:01 ` Jeff Law
@ 2023-04-18 13:50 ` Jeff Law
  2023-04-20 13:36   ` Kito Cheng
  1 sibling, 1 reply; 5+ messages in thread
From: Jeff Law @ 2023-04-18 13:50 UTC (permalink / raw)
  To: juzhe.zhong, gcc-patches; +Cc: kito.cheng, kito-cheng



On 3/13/23 02:19, juzhe.zhong@rivai.ai wrote:
> From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
> 
> Co-authored-by: kito-cheng <kito.cheng@sifive.com>
> Co-authored-by: kito-cheng <kito.cheng@gmail.com>
> 
> Consider this case:
> void f19 (void *base,void *base2,void *out,size_t vl, int n)
> {
>      vuint64m8_t bindex = __riscv_vle64_v_u64m8 (base + 100, vl);
>      for (int i = 0; i < n; i++){
>        vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
>        vuint64m8_t v = __riscv_vluxei64_v_u64m8_m(m,base,bindex,vl);
>        vuint64m8_t v2 = __riscv_vle64_v_u64m8_tu (v, base2 + i, vl);
>        vint8m1_t v3 = __riscv_vluxei64_v_i8m1_m(m,base,v,vl);
>        vint8m1_t v4 = __riscv_vluxei64_v_i8m1_m(m,base,v2,vl);
>        __riscv_vse8_v_i8m1 (out + 100*i,v3,vl);
>        __riscv_vse8_v_i8m1 (out + 222*i,v4,vl);
>      }
> }
> 
> Due to the current unreasonable reg order, this case produce unnecessary
> register spillings.
> 
> Fix the order can help for RA.
Note that this is likely a losing game -- over time you're likely to 
find that one ordering works better for one set of inputs while another 
ordering works better for a different set of inputs.

So while I don't object to the patch, in general we try to find a 
reasonable setting, knowing that it's likely not to be optimal in all cases.

Probably the most important aspect of this patch in my mind is moving 
the vector mask register to the end so that it's only used for vectors 
when we've exhausted the whole vector register file.  Thus it's more 
likely to be usable as a mask when we need it for that purpose.

OK for the trunk and backporting to the shared RISC-V sub-branch off 
gcc-13 (once it's created).

jeff

>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] RISC-V: Fix reg order of RVV registers.
  2023-04-18 13:50 ` Jeff Law
@ 2023-04-20 13:36   ` Kito Cheng
  0 siblings, 0 replies; 5+ messages in thread
From: Kito Cheng @ 2023-04-20 13:36 UTC (permalink / raw)
  To: Jeff Law; +Cc: juzhe.zhong, gcc-patches, kito-cheng

Committed to trunk, thanks :)

On Tue, Apr 18, 2023 at 9:50 PM Jeff Law <jeffreyalaw@gmail.com> wrote:
>
>
>
> On 3/13/23 02:19, juzhe.zhong@rivai.ai wrote:
> > From: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
> >
> > Co-authored-by: kito-cheng <kito.cheng@sifive.com>
> > Co-authored-by: kito-cheng <kito.cheng@gmail.com>
> >
> > Consider this case:
> > void f19 (void *base,void *base2,void *out,size_t vl, int n)
> > {
> >      vuint64m8_t bindex = __riscv_vle64_v_u64m8 (base + 100, vl);
> >      for (int i = 0; i < n; i++){
> >        vbool8_t m = __riscv_vlm_v_b8 (base + i, vl);
> >        vuint64m8_t v = __riscv_vluxei64_v_u64m8_m(m,base,bindex,vl);
> >        vuint64m8_t v2 = __riscv_vle64_v_u64m8_tu (v, base2 + i, vl);
> >        vint8m1_t v3 = __riscv_vluxei64_v_i8m1_m(m,base,v,vl);
> >        vint8m1_t v4 = __riscv_vluxei64_v_i8m1_m(m,base,v2,vl);
> >        __riscv_vse8_v_i8m1 (out + 100*i,v3,vl);
> >        __riscv_vse8_v_i8m1 (out + 222*i,v4,vl);
> >      }
> > }
> >
> > Due to the current unreasonable reg order, this case produce unnecessary
> > register spillings.
> >
> > Fix the order can help for RA.
> Note that this is likely a losing game -- over time you're likely to
> find that one ordering works better for one set of inputs while another
> ordering works better for a different set of inputs.
>
> So while I don't object to the patch, in general we try to find a
> reasonable setting, knowing that it's likely not to be optimal in all cases.
>
> Probably the most important aspect of this patch in my mind is moving
> the vector mask register to the end so that it's only used for vectors
> when we've exhausted the whole vector register file.  Thus it's more
> likely to be usable as a mask when we need it for that purpose.
>
> OK for the trunk and backporting to the shared RISC-V sub-branch off
> gcc-13 (once it's created).
>
> jeff
>
> >

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-04-20 13:36 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-13  8:19 [PATCH] RISC-V: Fix reg order of RVV registers juzhe.zhong
2023-03-14 18:01 ` Jeff Law
2023-03-15 13:48   ` Kito Cheng
2023-04-18 13:50 ` Jeff Law
2023-04-20 13:36   ` Kito Cheng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).