public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r13-5774] RISC-V: Add vmul.vv C++ API tests
@ 2023-02-10 11:28 Kito Cheng
  0 siblings, 0 replies; only message in thread
From: Kito Cheng @ 2023-02-10 11:28 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:1b0bd520f5ab03807c0be297d2d210bed2e44cc7

commit r13-5774-g1b0bd520f5ab03807c0be297d2d210bed2e44cc7
Author: Ju-Zhe Zhong <juzhe.zhong@rivai.ai>
Date:   Fri Feb 3 15:02:54 2023 +0800

    RISC-V: Add vmul.vv C++ API tests
    
    gcc/testsuite/ChangeLog:
    
            * g++.target/riscv/rvv/base/vmul_vv-1.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv-2.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv-3.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_mu-1.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_mu-2.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_mu-3.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tu-1.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tu-2.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tu-3.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tum-1.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tum-2.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tum-3.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tumu-1.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tumu-2.C: New test.
            * g++.target/riscv/rvv/base/vmul_vv_tumu-3.C: New test.

Diff:
---
 .../g++.target/riscv/rvv/base/vmul_vv-1.C          | 578 +++++++++++++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv-2.C          | 578 +++++++++++++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv-3.C          | 578 +++++++++++++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_mu-1.C       | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_mu-2.C       | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_mu-3.C       | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tu-1.C       | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tu-2.C       | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tu-3.C       | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tum-1.C      | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tum-2.C      | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tum-3.C      | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tumu-1.C     | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tumu-2.C     | 292 +++++++++++
 .../g++.target/riscv/rvv/base/vmul_vv_tumu-3.C     | 292 +++++++++++
 15 files changed, 5238 insertions(+)

diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-1.C
new file mode 100644
index 00000000000..14e10ed4f4f
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-1.C
@@ -0,0 +1,578 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,vl);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-2.C
new file mode 100644
index 00000000000..1b3caee8f40
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-2.C
@@ -0,0 +1,578 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,31);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-3.C
new file mode 100644
index 00000000000..4a0fae845e2
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv-3.C
@@ -0,0 +1,578 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul(vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul(vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul(vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul(vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul(vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul(vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul(vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul(vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul(vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul(vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul(vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul(vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul(vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul(vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul(vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul(vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul(vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul(vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul(vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul(vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul(vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul(vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul(vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul(vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul(vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul(vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul(vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul(vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul(vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul(vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul(vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul(vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul(vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul(vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul(vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul(vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul(vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul(vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(op1,op2,32);
+}
+
+
+vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul(mask,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-1.C
new file mode 100644
index 00000000000..c2ff0fe6750
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-1.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-2.C
new file mode 100644
index 00000000000..c59f4d70955
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-2.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-3.C
new file mode 100644
index 00000000000..d1c329f9133
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_mu-3.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_mu(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-1.C
new file mode 100644
index 00000000000..35d84cba7b9
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-1.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-2.C
new file mode 100644
index 00000000000..2de9d4ae6b2
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-2.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-3.C
new file mode 100644
index 00000000000..91611b78d13
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tu-3.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tu(merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-1.C
new file mode 100644
index 00000000000..6cdc8dfe38e
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-1.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-2.C
new file mode 100644
index 00000000000..4f352634bb5
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-2.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-3.C
new file mode 100644
index 00000000000..f8faa9a8046
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tum-3.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tum(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-1.C
new file mode 100644
index 00000000000..8e9b0236041
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-1.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,vl);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-2.C
new file mode 100644
index 00000000000..f229685822a
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-2.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,31);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-3.C
new file mode 100644
index 00000000000..c479deaf38b
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vv_tumu-3.C
@@ -0,0 +1,292 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
+
+#include "riscv_vector.h"
+
+vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,vint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,vint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,vint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,vint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,vint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,vint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,vint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,vint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,vint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,vint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,vint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,vint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,vint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,vint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,vint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,vint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,vint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,vint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,vint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,vint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,vint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,vint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,vuint8m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,vuint8m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,vuint8m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,vuint8m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,vuint16m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,vuint16m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,vuint16m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,vuint16m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,vuint32m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,vuint32m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,vuint32m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,vuint32m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,vuint64m1_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,vuint64m2_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,vuint64m4_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,vuint64m8_t op2,size_t vl)
+{
+    return __riscv_vmul_tumu(mask,merge,op1,op2,32);
+}
+
+
+
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */
+/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 2 } } */

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-02-10 11:28 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-10 11:28 [gcc r13-5774] RISC-V: Add vmul.vv C++ API tests Kito Cheng

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).