From: juzhe.zhong@rivai.ai
To: gcc-patches@gcc.gnu.org
Cc: kito.cheng@gmail.com, palmer@dabbelt.com, juzhe.zhong@rivai.ai
Subject: [PATCH 12/21] Add set get intrinsic support
Date: Tue, 31 May 2022 16:50:03 +0800 [thread overview]
Message-ID: <20220531085012.269719-13-juzhe.zhong@rivai.ai> (raw)
In-Reply-To: <20220531085012.269719-1-juzhe.zhong@rivai.ai>
From: zhongjuzhe <juzhe.zhong@rivai.ai>
gcc/ChangeLog:
* config/riscv/riscv-vector-builtins-functions.cc (vset::assemble_name): New function.
(vset::get_argument_types): New function.
(vset::expand): New function.
(vget::assemble_name): New function.
(vget::get_argument_types): New function.
(vget::expand): New function.
* config/riscv/riscv-vector-builtins-functions.def (vset): New macro define.
(vget): New macro define.
* config/riscv/riscv-vector-builtins-functions.h (class vset): New class.
(class vget): New class.
gcc/testsuite/ChangeLog:
* g++.target/riscv/rvv/set-get.C: New test.
* gcc.target/riscv/rvv/intrinsic/set-get.c: New test.
---
.../riscv/riscv-vector-builtins-functions.cc | 73 ++
.../riscv/riscv-vector-builtins-functions.def | 6 +
.../riscv/riscv-vector-builtins-functions.h | 28 +
gcc/testsuite/g++.target/riscv/rvv/set-get.C | 730 ++++++++++++++++++
.../gcc.target/riscv/rvv/intrinsic/set-get.c | 730 ++++++++++++++++++
5 files changed, 1567 insertions(+)
create mode 100644 gcc/testsuite/g++.target/riscv/rvv/set-get.C
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/intrinsic/set-get.c
diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.cc b/gcc/config/riscv/riscv-vector-builtins-functions.cc
index fa39eedcd86..9d2895c3d3e 100644
--- a/gcc/config/riscv/riscv-vector-builtins-functions.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-functions.cc
@@ -1510,6 +1510,79 @@ vundefined::expand (const function_instance &, tree, rtx target) const
return target;
}
+/* A function implementation for vset functions. */
+char *
+vset::assemble_name (function_instance &instance)
+{
+ machine_mode tmode = instance.get_arg_pattern ().arg_list[0];
+ machine_mode smode = instance.get_arg_pattern ().arg_list[2];
+ if (GET_MODE_INNER (tmode) != GET_MODE_INNER (smode))
+ return nullptr;
+
+ if (tmode == smode)
+ return nullptr;
+
+ if (known_lt (GET_MODE_SIZE (tmode), GET_MODE_SIZE (smode)))
+ return nullptr;
+
+ intrinsic_rename (instance, 0, 2);
+ append_name (instance.get_base_name ());
+ return finish_name ();
+}
+
+void
+vset::get_argument_types (const function_instance &instance,
+ vec<tree> &argument_types) const
+{
+ misc::get_argument_types (instance, argument_types);
+ argument_types.quick_push (size_type_node);
+ argument_types.quick_push (get_dt_t_with_index (instance, 2));
+}
+
+rtx
+vset::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ enum insn_code icode = code_for_vset (instance.get_arg_pattern ().arg_list[0]);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
+/* A function implementation for vget functions. */
+char *
+vget::assemble_name (function_instance &instance)
+{
+ machine_mode tmode = instance.get_arg_pattern ().arg_list[0];
+ machine_mode smode = instance.get_arg_pattern ().arg_list[1];
+ if (GET_MODE_INNER (tmode) != GET_MODE_INNER (smode))
+ return nullptr;
+
+ if (tmode == smode)
+ return nullptr;
+
+ if (known_gt (GET_MODE_SIZE (tmode), GET_MODE_SIZE (smode)))
+ return nullptr;
+
+ bool unsigned_p = instance.get_data_type_list ()[0] == DT_unsigned;
+ intrinsic_rename (instance, 0, 1);
+ append_name (instance.get_base_name ());
+ append_name (mode2data_type_str (tmode, unsigned_p, false));
+ return finish_name ();
+}
+
+void
+vget::get_argument_types (const function_instance &instance,
+ vec<tree> &argument_types) const
+{
+ misc::get_argument_types (instance, argument_types);
+ argument_types.quick_push (size_type_node);
+}
+
+rtx
+vget::expand (const function_instance &instance, tree exp, rtx target) const
+{
+ enum insn_code icode = code_for_vget (instance.get_arg_pattern ().arg_list[0]);
+ return expand_builtin_insn (icode, exp, target, instance);
+}
+
/* A function implementation for loadstore functions. */
char *
loadstore::assemble_name (function_instance &instance)
diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.def b/gcc/config/riscv/riscv-vector-builtins-functions.def
index deb32ccd031..739ae60fff5 100644
--- a/gcc/config/riscv/riscv-vector-builtins-functions.def
+++ b/gcc/config/riscv/riscv-vector-builtins-functions.def
@@ -56,6 +56,12 @@ DEF_RVV_FUNCTION(vlmul_trunc, vlmul_trunc, (2, VITER(VLMULTRUNC, signed), VITER(
DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VI, signed)), PAT_none, PRED_none, OP_none)
DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VI, unsigned)), PAT_none, PRED_none, OP_none)
DEF_RVV_FUNCTION(vundefined, vundefined, (1, VITER(VF, signed)), PAT_none, PRED_none, OP_none)
+DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETI, signed), VATTR(0, VSETI, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETI, unsigned), VATTR(0, VSETI, unsigned), VITER(VFULL, unsigned)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vset, vset, (3, VITER(VSETF, signed), VATTR(0, VSETF, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETI, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETI, unsigned), VITER(VFULL, unsigned)), PAT_none, PRED_none, OP_v)
+DEF_RVV_FUNCTION(vget, vget, (2, VITER(VGETF, signed), VITER(VFULL, signed)), PAT_none, PRED_none, OP_v)
/* 7. Vector Loads and Stores. */
DEF_RVV_FUNCTION(vle, vle, (2, VITER(VI, signed), VATTR(0, VSUB, c_ptr)), pat_mask_tail, pred_all, OP_v)
DEF_RVV_FUNCTION(vle, vle, (2, VITER(VI, unsigned), VATTR(0, VSUB, c_uptr)), pat_mask_tail, pred_all, OP_v)
diff --git a/gcc/config/riscv/riscv-vector-builtins-functions.h b/gcc/config/riscv/riscv-vector-builtins-functions.h
index c9e1b2a34ca..90063005024 100644
--- a/gcc/config/riscv/riscv-vector-builtins-functions.h
+++ b/gcc/config/riscv/riscv-vector-builtins-functions.h
@@ -584,6 +584,34 @@ public:
virtual rtx expand (const function_instance &, tree, rtx) const override;
};
+/* A function_base for vset functions. */
+class vset : public misc
+{
+public:
+ // use the same construction function as the misc
+ using misc::misc;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
+/* A function_base for vget functions. */
+class vget : public misc
+{
+public:
+ // use the same construction function as the misc
+ using misc::misc;
+
+ virtual char * assemble_name (function_instance &) override;
+
+ virtual void get_argument_types (const function_instance &, vec<tree> &) const override;
+
+ virtual rtx expand (const function_instance &, tree, rtx) const override;
+};
+
/* A function_base for loadstore functions. */
class loadstore : public function_builder
{
diff --git a/gcc/testsuite/g++.target/riscv/rvv/set-get.C b/gcc/testsuite/g++.target/riscv/rvv/set-get.C
new file mode 100644
index 00000000000..7c8deb96a39
--- /dev/null
+++ b/gcc/testsuite/g++.target/riscv/rvv/set-get.C
@@ -0,0 +1,730 @@
+/* { dg-do compile } */
+/* { dg-skip-if "test vector intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */
+
+#include <stddef.h>
+#include <riscv_vector.h>
+
+
+vint8m2_t
+test_vset_v_i8m1_i8m2 (vint8m2_t dest, vint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m4_t
+test_vset_v_i8m1_i8m4 (vint8m4_t dest, vint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m4_t
+test_vset_v_i8m2_i8m4 (vint8m4_t dest, vint8m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m1_i8m8 (vint8m8_t dest, vint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m2_i8m8 (vint8m8_t dest, vint8m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m4_i8m8 (vint8m8_t dest, vint8m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint8m1_t
+test_vget_v_i8m2_i8m1 (vint8m2_t src)
+{
+ return vget_i8m1(src, 1);
+}
+
+vint8m1_t
+test_vget_v_i8m4_i8m1 (vint8m4_t src)
+{
+ return vget_i8m1(src, 1);
+}
+
+vint8m1_t
+test_vget_v_i8m8_i8m1 (vint8m8_t src)
+{
+ return vget_i8m1(src, 1);
+}
+
+vint8m2_t
+test_vget_v_i8m4_i8m2 (vint8m4_t src)
+{
+ return vget_i8m2(src, 1);
+}
+
+vint8m2_t
+test_vget_v_i8m8_i8m2 (vint8m8_t src)
+{
+ return vget_i8m2(src, 1);
+}
+
+vint8m4_t
+test_vget_v_i8m8_i8m4 (vint8m8_t src)
+{
+ return vget_i8m4(src, 1);
+}
+
+vint16m2_t
+test_vset_v_i16m1_i16m2 (vint16m2_t dest, vint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m4_t
+test_vset_v_i16m1_i16m4 (vint16m4_t dest, vint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m4_t
+test_vset_v_i16m2_i16m4 (vint16m4_t dest, vint16m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m1_i16m8 (vint16m8_t dest, vint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m2_i16m8 (vint16m8_t dest, vint16m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m4_i16m8 (vint16m8_t dest, vint16m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint16m1_t
+test_vget_v_i16m2_i16m1 (vint16m2_t src)
+{
+ return vget_i16m1(src, 1);
+}
+
+vint16m1_t
+test_vget_v_i16m4_i16m1 (vint16m4_t src)
+{
+ return vget_i16m1(src, 1);
+}
+
+vint16m1_t
+test_vget_v_i16m8_i16m1 (vint16m8_t src)
+{
+ return vget_i16m1(src, 1);
+}
+
+vint16m2_t
+test_vget_v_i16m4_i16m2 (vint16m4_t src)
+{
+ return vget_i16m2(src, 1);
+}
+
+vint16m2_t
+test_vget_v_i16m8_i16m2 (vint16m8_t src)
+{
+ return vget_i16m2(src, 1);
+}
+
+vint16m4_t
+test_vget_v_i16m8_i16m4 (vint16m8_t src)
+{
+ return vget_i16m4(src, 1);
+}
+
+vint32m2_t
+test_vset_v_i32m1_i32m2 (vint32m2_t dest, vint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m4_t
+test_vset_v_i32m1_i32m4 (vint32m4_t dest, vint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m4_t
+test_vset_v_i32m2_i32m4 (vint32m4_t dest, vint32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m1_i32m8 (vint32m8_t dest, vint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m2_i32m8 (vint32m8_t dest, vint32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m4_i32m8 (vint32m8_t dest, vint32m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint32m1_t
+test_vget_v_i32m2_i32m1 (vint32m2_t src)
+{
+ return vget_i32m1(src, 1);
+}
+
+vint32m1_t
+test_vget_v_i32m4_i32m1 (vint32m4_t src)
+{
+ return vget_i32m1(src, 1);
+}
+
+vint32m1_t
+test_vget_v_i32m8_i32m1 (vint32m8_t src)
+{
+ return vget_i32m1(src, 1);
+}
+
+vint32m2_t
+test_vget_v_i32m4_i32m2 (vint32m4_t src)
+{
+ return vget_i32m2(src, 1);
+}
+
+vint32m2_t
+test_vget_v_i32m8_i32m2 (vint32m8_t src)
+{
+ return vget_i32m2(src, 1);
+}
+
+vint32m4_t
+test_vget_v_i32m8_i32m4 (vint32m8_t src)
+{
+ return vget_i32m4(src, 1);
+}
+
+vint64m2_t
+test_vset_v_i64m1_i64m2 (vint64m2_t dest, vint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m4_t
+test_vset_v_i64m1_i64m4 (vint64m4_t dest, vint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m4_t
+test_vset_v_i64m2_i64m4 (vint64m4_t dest, vint64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m1_i64m8 (vint64m8_t dest, vint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m2_i64m8 (vint64m8_t dest, vint64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m4_i64m8 (vint64m8_t dest, vint64m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vint64m1_t
+test_vget_v_i64m2_i64m1 (vint64m2_t src)
+{
+ return vget_i64m1(src, 1);
+}
+
+vint64m1_t
+test_vget_v_i64m4_i64m1 (vint64m4_t src)
+{
+ return vget_i64m1(src, 1);
+}
+
+vint64m1_t
+test_vget_v_i64m8_i64m1 (vint64m8_t src)
+{
+ return vget_i64m1(src, 1);
+}
+
+vint64m2_t
+test_vget_v_i64m4_i64m2 (vint64m4_t src)
+{
+ return vget_i64m2(src, 1);
+}
+
+vint64m2_t
+test_vget_v_i64m8_i64m2 (vint64m8_t src)
+{
+ return vget_i64m2(src, 1);
+}
+
+vint64m4_t
+test_vget_v_i64m8_i64m4 (vint64m8_t src)
+{
+ return vget_i64m4(src, 1);
+}
+
+vuint8m2_t
+test_vset_v_u8m1_u8m2 (vuint8m2_t dest, vuint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m4_t
+test_vset_v_u8m1_u8m4 (vuint8m4_t dest, vuint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m4_t
+test_vset_v_u8m2_u8m4 (vuint8m4_t dest, vuint8m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m1_u8m8 (vuint8m8_t dest, vuint8m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m2_u8m8 (vuint8m8_t dest, vuint8m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m4_u8m8 (vuint8m8_t dest, vuint8m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint8m1_t
+test_vget_v_u8m2_u8m1 (vuint8m2_t src)
+{
+ return vget_u8m1(src, 1);
+}
+
+vuint8m1_t
+test_vget_v_u8m4_u8m1 (vuint8m4_t src)
+{
+ return vget_u8m1(src, 1);
+}
+
+vuint8m1_t
+test_vget_v_u8m8_u8m1 (vuint8m8_t src)
+{
+ return vget_u8m1(src, 1);
+}
+
+vuint8m2_t
+test_vget_v_u8m4_u8m2 (vuint8m4_t src)
+{
+ return vget_u8m2(src, 1);
+}
+
+vuint8m2_t
+test_vget_v_u8m8_u8m2 (vuint8m8_t src)
+{
+ return vget_u8m2(src, 1);
+}
+
+vuint8m4_t
+test_vget_v_u8m8_u8m4 (vuint8m8_t src)
+{
+ return vget_u8m4(src, 1);
+}
+
+vuint16m2_t
+test_vset_v_u16m1_u16m2 (vuint16m2_t dest, vuint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m4_t
+test_vset_v_u16m1_u16m4 (vuint16m4_t dest, vuint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m4_t
+test_vset_v_u16m2_u16m4 (vuint16m4_t dest, vuint16m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m1_u16m8 (vuint16m8_t dest, vuint16m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m2_u16m8 (vuint16m8_t dest, vuint16m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m4_u16m8 (vuint16m8_t dest, vuint16m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint16m1_t
+test_vget_v_u16m2_u16m1 (vuint16m2_t src)
+{
+ return vget_u16m1(src, 1);
+}
+
+vuint16m1_t
+test_vget_v_u16m4_u16m1 (vuint16m4_t src)
+{
+ return vget_u16m1(src, 1);
+}
+
+vuint16m1_t
+test_vget_v_u16m8_u16m1 (vuint16m8_t src)
+{
+ return vget_u16m1(src, 1);
+}
+
+vuint16m2_t
+test_vget_v_u16m4_u16m2 (vuint16m4_t src)
+{
+ return vget_u16m2(src, 1);
+}
+
+vuint16m2_t
+test_vget_v_u16m8_u16m2 (vuint16m8_t src)
+{
+ return vget_u16m2(src, 1);
+}
+
+vuint16m4_t
+test_vget_v_u16m8_u16m4 (vuint16m8_t src)
+{
+ return vget_u16m4(src, 1);
+}
+
+vuint32m2_t
+test_vset_v_u32m1_u32m2 (vuint32m2_t dest, vuint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m4_t
+test_vset_v_u32m1_u32m4 (vuint32m4_t dest, vuint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m4_t
+test_vset_v_u32m2_u32m4 (vuint32m4_t dest, vuint32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m1_u32m8 (vuint32m8_t dest, vuint32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m2_u32m8 (vuint32m8_t dest, vuint32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m4_u32m8 (vuint32m8_t dest, vuint32m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint32m1_t
+test_vget_v_u32m2_u32m1 (vuint32m2_t src)
+{
+ return vget_u32m1(src, 1);
+}
+
+vuint32m1_t
+test_vget_v_u32m4_u32m1 (vuint32m4_t src)
+{
+ return vget_u32m1(src, 1);
+}
+
+vuint32m1_t
+test_vget_v_u32m8_u32m1 (vuint32m8_t src)
+{
+ return vget_u32m1(src, 1);
+}
+
+vuint32m2_t
+test_vget_v_u32m4_u32m2 (vuint32m4_t src)
+{
+ return vget_u32m2(src, 1);
+}
+
+vuint32m2_t
+test_vget_v_u32m8_u32m2 (vuint32m8_t src)
+{
+ return vget_u32m2(src, 1);
+}
+
+vuint32m4_t
+test_vget_v_u32m8_u32m4 (vuint32m8_t src)
+{
+ return vget_u32m4(src, 1);
+}
+
+vuint64m2_t
+test_vset_v_u64m1_u64m2 (vuint64m2_t dest, vuint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m4_t
+test_vset_v_u64m1_u64m4 (vuint64m4_t dest, vuint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m4_t
+test_vset_v_u64m2_u64m4 (vuint64m4_t dest, vuint64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m1_u64m8 (vuint64m8_t dest, vuint64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m2_u64m8 (vuint64m8_t dest, vuint64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m4_u64m8 (vuint64m8_t dest, vuint64m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vuint64m1_t
+test_vget_v_u64m2_u64m1 (vuint64m2_t src)
+{
+ return vget_u64m1(src, 1);
+}
+
+vuint64m1_t
+test_vget_v_u64m4_u64m1 (vuint64m4_t src)
+{
+ return vget_u64m1(src, 1);
+}
+
+vuint64m1_t
+test_vget_v_u64m8_u64m1 (vuint64m8_t src)
+{
+ return vget_u64m1(src, 1);
+}
+
+vuint64m2_t
+test_vget_v_u64m4_u64m2 (vuint64m4_t src)
+{
+ return vget_u64m2(src, 1);
+}
+
+vuint64m2_t
+test_vget_v_u64m8_u64m2 (vuint64m8_t src)
+{
+ return vget_u64m2(src, 1);
+}
+
+vuint64m4_t
+test_vget_v_u64m8_u64m4 (vuint64m8_t src)
+{
+ return vget_u64m4(src, 1);
+}
+
+vfloat32m2_t
+test_vset_v_f32m1_f32m2 (vfloat32m2_t dest, vfloat32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m4_t
+test_vset_v_f32m1_f32m4 (vfloat32m4_t dest, vfloat32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m4_t
+test_vset_v_f32m2_f32m4 (vfloat32m4_t dest, vfloat32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m1_f32m8 (vfloat32m8_t dest, vfloat32m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m2_f32m8 (vfloat32m8_t dest, vfloat32m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m4_f32m8 (vfloat32m8_t dest, vfloat32m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat32m1_t
+test_vget_v_f32m2_f32m1 (vfloat32m2_t src)
+{
+ return vget_f32m1(src, 1);
+}
+
+vfloat32m1_t
+test_vget_v_f32m4_f32m1 (vfloat32m4_t src)
+{
+ return vget_f32m1(src, 1);
+}
+
+vfloat32m1_t
+test_vget_v_f32m8_f32m1 (vfloat32m8_t src)
+{
+ return vget_f32m1(src, 1);
+}
+
+vfloat32m2_t
+test_vget_v_f32m4_f32m2 (vfloat32m4_t src)
+{
+ return vget_f32m2(src, 1);
+}
+
+vfloat32m2_t
+test_vget_v_f32m8_f32m2 (vfloat32m8_t src)
+{
+ return vget_f32m2(src, 1);
+}
+
+vfloat32m4_t
+test_vget_v_f32m8_f32m4 (vfloat32m8_t src)
+{
+ return vget_f32m4(src, 1);
+}
+
+vfloat64m2_t
+test_vset_v_f64m1_f64m2 (vfloat64m2_t dest, vfloat64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m4_t
+test_vset_v_f64m1_f64m4 (vfloat64m4_t dest, vfloat64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m4_t
+test_vset_v_f64m2_f64m4 (vfloat64m4_t dest, vfloat64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m1_f64m8 (vfloat64m8_t dest, vfloat64m1_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m2_f64m8 (vfloat64m8_t dest, vfloat64m2_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m4_f64m8 (vfloat64m8_t dest, vfloat64m4_t val)
+{
+ return vset(dest, 1, val);
+}
+
+vfloat64m1_t
+test_vget_v_f64m2_f64m1 (vfloat64m2_t src)
+{
+ return vget_f64m1(src, 1);
+}
+
+vfloat64m1_t
+test_vget_v_f64m4_f64m1 (vfloat64m4_t src)
+{
+ return vget_f64m1(src, 1);
+}
+
+vfloat64m1_t
+test_vget_v_f64m8_f64m1 (vfloat64m8_t src)
+{
+ return vget_f64m1(src, 1);
+}
+
+vfloat64m2_t
+test_vget_v_f64m4_f64m2 (vfloat64m4_t src)
+{
+ return vget_f64m2(src, 1);
+}
+
+vfloat64m2_t
+test_vget_v_f64m8_f64m2 (vfloat64m8_t src)
+{
+ return vget_f64m2(src, 1);
+}
+
+vfloat64m4_t
+test_vget_v_f64m8_f64m4 (vfloat64m8_t src)
+{
+ return vget_f64m4(src, 1);
+}
+/* { dg-final { scan-assembler-times {vmv1r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 60 } } */
+/* { dg-final { scan-assembler-times {vmv2r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 40 } } */
+/* { dg-final { scan-assembler-times {vmv4r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 20 } } */
+
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/intrinsic/set-get.c b/gcc/testsuite/gcc.target/riscv/rvv/intrinsic/set-get.c
new file mode 100644
index 00000000000..33d5a129aae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/intrinsic/set-get.c
@@ -0,0 +1,730 @@
+
+/* { dg-do compile } */
+/* { dg-skip-if "test vector intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */
+
+#include <stddef.h>
+#include <riscv_vector.h>
+
+
+vint8m2_t
+test_vset_v_i8m1_i8m2 (vint8m2_t dest, vint8m1_t val)
+{
+ return vset_v_i8m1_i8m2(dest, 1, val);
+}
+
+vint8m4_t
+test_vset_v_i8m1_i8m4 (vint8m4_t dest, vint8m1_t val)
+{
+ return vset_v_i8m1_i8m4(dest, 1, val);
+}
+
+vint8m4_t
+test_vset_v_i8m2_i8m4 (vint8m4_t dest, vint8m2_t val)
+{
+ return vset_v_i8m2_i8m4(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m1_i8m8 (vint8m8_t dest, vint8m1_t val)
+{
+ return vset_v_i8m1_i8m8(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m2_i8m8 (vint8m8_t dest, vint8m2_t val)
+{
+ return vset_v_i8m2_i8m8(dest, 1, val);
+}
+
+vint8m8_t
+test_vset_v_i8m4_i8m8 (vint8m8_t dest, vint8m4_t val)
+{
+ return vset_v_i8m4_i8m8(dest, 1, val);
+}
+
+vint8m1_t
+test_vget_v_i8m2_i8m1 (vint8m2_t src)
+{
+ return vget_v_i8m2_i8m1(src, 1);
+}
+
+vint8m1_t
+test_vget_v_i8m4_i8m1 (vint8m4_t src)
+{
+ return vget_v_i8m4_i8m1(src, 1);
+}
+
+vint8m1_t
+test_vget_v_i8m8_i8m1 (vint8m8_t src)
+{
+ return vget_v_i8m8_i8m1(src, 1);
+}
+
+vint8m2_t
+test_vget_v_i8m4_i8m2 (vint8m4_t src)
+{
+ return vget_v_i8m4_i8m2(src, 1);
+}
+
+vint8m2_t
+test_vget_v_i8m8_i8m2 (vint8m8_t src)
+{
+ return vget_v_i8m8_i8m2(src, 1);
+}
+
+vint8m4_t
+test_vget_v_i8m8_i8m4 (vint8m8_t src)
+{
+ return vget_v_i8m8_i8m4(src, 1);
+}
+
+vint16m2_t
+test_vset_v_i16m1_i16m2 (vint16m2_t dest, vint16m1_t val)
+{
+ return vset_v_i16m1_i16m2(dest, 1, val);
+}
+
+vint16m4_t
+test_vset_v_i16m1_i16m4 (vint16m4_t dest, vint16m1_t val)
+{
+ return vset_v_i16m1_i16m4(dest, 1, val);
+}
+
+vint16m4_t
+test_vset_v_i16m2_i16m4 (vint16m4_t dest, vint16m2_t val)
+{
+ return vset_v_i16m2_i16m4(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m1_i16m8 (vint16m8_t dest, vint16m1_t val)
+{
+ return vset_v_i16m1_i16m8(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m2_i16m8 (vint16m8_t dest, vint16m2_t val)
+{
+ return vset_v_i16m2_i16m8(dest, 1, val);
+}
+
+vint16m8_t
+test_vset_v_i16m4_i16m8 (vint16m8_t dest, vint16m4_t val)
+{
+ return vset_v_i16m4_i16m8(dest, 1, val);
+}
+
+vint16m1_t
+test_vget_v_i16m2_i16m1 (vint16m2_t src)
+{
+ return vget_v_i16m2_i16m1(src, 1);
+}
+
+vint16m1_t
+test_vget_v_i16m4_i16m1 (vint16m4_t src)
+{
+ return vget_v_i16m4_i16m1(src, 1);
+}
+
+vint16m1_t
+test_vget_v_i16m8_i16m1 (vint16m8_t src)
+{
+ return vget_v_i16m8_i16m1(src, 1);
+}
+
+vint16m2_t
+test_vget_v_i16m4_i16m2 (vint16m4_t src)
+{
+ return vget_v_i16m4_i16m2(src, 1);
+}
+
+vint16m2_t
+test_vget_v_i16m8_i16m2 (vint16m8_t src)
+{
+ return vget_v_i16m8_i16m2(src, 1);
+}
+
+vint16m4_t
+test_vget_v_i16m8_i16m4 (vint16m8_t src)
+{
+ return vget_v_i16m8_i16m4(src, 1);
+}
+
+vint32m2_t
+test_vset_v_i32m1_i32m2 (vint32m2_t dest, vint32m1_t val)
+{
+ return vset_v_i32m1_i32m2(dest, 1, val);
+}
+
+vint32m4_t
+test_vset_v_i32m1_i32m4 (vint32m4_t dest, vint32m1_t val)
+{
+ return vset_v_i32m1_i32m4(dest, 1, val);
+}
+
+vint32m4_t
+test_vset_v_i32m2_i32m4 (vint32m4_t dest, vint32m2_t val)
+{
+ return vset_v_i32m2_i32m4(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m1_i32m8 (vint32m8_t dest, vint32m1_t val)
+{
+ return vset_v_i32m1_i32m8(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m2_i32m8 (vint32m8_t dest, vint32m2_t val)
+{
+ return vset_v_i32m2_i32m8(dest, 1, val);
+}
+
+vint32m8_t
+test_vset_v_i32m4_i32m8 (vint32m8_t dest, vint32m4_t val)
+{
+ return vset_v_i32m4_i32m8(dest, 1, val);
+}
+
+vint32m1_t
+test_vget_v_i32m2_i32m1 (vint32m2_t src)
+{
+ return vget_v_i32m2_i32m1(src, 1);
+}
+
+vint32m1_t
+test_vget_v_i32m4_i32m1 (vint32m4_t src)
+{
+ return vget_v_i32m4_i32m1(src, 1);
+}
+
+vint32m1_t
+test_vget_v_i32m8_i32m1 (vint32m8_t src)
+{
+ return vget_v_i32m8_i32m1(src, 1);
+}
+
+vint32m2_t
+test_vget_v_i32m4_i32m2 (vint32m4_t src)
+{
+ return vget_v_i32m4_i32m2(src, 1);
+}
+
+vint32m2_t
+test_vget_v_i32m8_i32m2 (vint32m8_t src)
+{
+ return vget_v_i32m8_i32m2(src, 1);
+}
+
+vint32m4_t
+test_vget_v_i32m8_i32m4 (vint32m8_t src)
+{
+ return vget_v_i32m8_i32m4(src, 1);
+}
+
+vint64m2_t
+test_vset_v_i64m1_i64m2 (vint64m2_t dest, vint64m1_t val)
+{
+ return vset_v_i64m1_i64m2(dest, 1, val);
+}
+
+vint64m4_t
+test_vset_v_i64m1_i64m4 (vint64m4_t dest, vint64m1_t val)
+{
+ return vset_v_i64m1_i64m4(dest, 1, val);
+}
+
+vint64m4_t
+test_vset_v_i64m2_i64m4 (vint64m4_t dest, vint64m2_t val)
+{
+ return vset_v_i64m2_i64m4(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m1_i64m8 (vint64m8_t dest, vint64m1_t val)
+{
+ return vset_v_i64m1_i64m8(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m2_i64m8 (vint64m8_t dest, vint64m2_t val)
+{
+ return vset_v_i64m2_i64m8(dest, 1, val);
+}
+
+vint64m8_t
+test_vset_v_i64m4_i64m8 (vint64m8_t dest, vint64m4_t val)
+{
+ return vset_v_i64m4_i64m8(dest, 1, val);
+}
+
+vint64m1_t
+test_vget_v_i64m2_i64m1 (vint64m2_t src)
+{
+ return vget_v_i64m2_i64m1(src, 1);
+}
+
+vint64m1_t
+test_vget_v_i64m4_i64m1 (vint64m4_t src)
+{
+ return vget_v_i64m4_i64m1(src, 1);
+}
+
+vint64m1_t
+test_vget_v_i64m8_i64m1 (vint64m8_t src)
+{
+ return vget_v_i64m8_i64m1(src, 1);
+}
+
+vint64m2_t
+test_vget_v_i64m4_i64m2 (vint64m4_t src)
+{
+ return vget_v_i64m4_i64m2(src, 1);
+}
+
+vint64m2_t
+test_vget_v_i64m8_i64m2 (vint64m8_t src)
+{
+ return vget_v_i64m8_i64m2(src, 1);
+}
+
+vint64m4_t
+test_vget_v_i64m8_i64m4 (vint64m8_t src)
+{
+ return vget_v_i64m8_i64m4(src, 1);
+}
+
+vuint8m2_t
+test_vset_v_u8m1_u8m2 (vuint8m2_t dest, vuint8m1_t val)
+{
+ return vset_v_u8m1_u8m2(dest, 1, val);
+}
+
+vuint8m4_t
+test_vset_v_u8m1_u8m4 (vuint8m4_t dest, vuint8m1_t val)
+{
+ return vset_v_u8m1_u8m4(dest, 1, val);
+}
+
+vuint8m4_t
+test_vset_v_u8m2_u8m4 (vuint8m4_t dest, vuint8m2_t val)
+{
+ return vset_v_u8m2_u8m4(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m1_u8m8 (vuint8m8_t dest, vuint8m1_t val)
+{
+ return vset_v_u8m1_u8m8(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m2_u8m8 (vuint8m8_t dest, vuint8m2_t val)
+{
+ return vset_v_u8m2_u8m8(dest, 1, val);
+}
+
+vuint8m8_t
+test_vset_v_u8m4_u8m8 (vuint8m8_t dest, vuint8m4_t val)
+{
+ return vset_v_u8m4_u8m8(dest, 1, val);
+}
+
+vuint8m1_t
+test_vget_v_u8m2_u8m1 (vuint8m2_t src)
+{
+ return vget_v_u8m2_u8m1(src, 1);
+}
+
+vuint8m1_t
+test_vget_v_u8m4_u8m1 (vuint8m4_t src)
+{
+ return vget_v_u8m4_u8m1(src, 1);
+}
+
+vuint8m1_t
+test_vget_v_u8m8_u8m1 (vuint8m8_t src)
+{
+ return vget_v_u8m8_u8m1(src, 1);
+}
+
+vuint8m2_t
+test_vget_v_u8m4_u8m2 (vuint8m4_t src)
+{
+ return vget_v_u8m4_u8m2(src, 1);
+}
+
+vuint8m2_t
+test_vget_v_u8m8_u8m2 (vuint8m8_t src)
+{
+ return vget_v_u8m8_u8m2(src, 1);
+}
+
+vuint8m4_t
+test_vget_v_u8m8_u8m4 (vuint8m8_t src)
+{
+ return vget_v_u8m8_u8m4(src, 1);
+}
+
+vuint16m2_t
+test_vset_v_u16m1_u16m2 (vuint16m2_t dest, vuint16m1_t val)
+{
+ return vset_v_u16m1_u16m2(dest, 1, val);
+}
+
+vuint16m4_t
+test_vset_v_u16m1_u16m4 (vuint16m4_t dest, vuint16m1_t val)
+{
+ return vset_v_u16m1_u16m4(dest, 1, val);
+}
+
+vuint16m4_t
+test_vset_v_u16m2_u16m4 (vuint16m4_t dest, vuint16m2_t val)
+{
+ return vset_v_u16m2_u16m4(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m1_u16m8 (vuint16m8_t dest, vuint16m1_t val)
+{
+ return vset_v_u16m1_u16m8(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m2_u16m8 (vuint16m8_t dest, vuint16m2_t val)
+{
+ return vset_v_u16m2_u16m8(dest, 1, val);
+}
+
+vuint16m8_t
+test_vset_v_u16m4_u16m8 (vuint16m8_t dest, vuint16m4_t val)
+{
+ return vset_v_u16m4_u16m8(dest, 1, val);
+}
+
+vuint16m1_t
+test_vget_v_u16m2_u16m1 (vuint16m2_t src)
+{
+ return vget_v_u16m2_u16m1(src, 1);
+}
+
+vuint16m1_t
+test_vget_v_u16m4_u16m1 (vuint16m4_t src)
+{
+ return vget_v_u16m4_u16m1(src, 1);
+}
+
+vuint16m1_t
+test_vget_v_u16m8_u16m1 (vuint16m8_t src)
+{
+ return vget_v_u16m8_u16m1(src, 1);
+}
+
+vuint16m2_t
+test_vget_v_u16m4_u16m2 (vuint16m4_t src)
+{
+ return vget_v_u16m4_u16m2(src, 1);
+}
+
+vuint16m2_t
+test_vget_v_u16m8_u16m2 (vuint16m8_t src)
+{
+ return vget_v_u16m8_u16m2(src, 1);
+}
+
+vuint16m4_t
+test_vget_v_u16m8_u16m4 (vuint16m8_t src)
+{
+ return vget_v_u16m8_u16m4(src, 1);
+}
+
+vuint32m2_t
+test_vset_v_u32m1_u32m2 (vuint32m2_t dest, vuint32m1_t val)
+{
+ return vset_v_u32m1_u32m2(dest, 1, val);
+}
+
+vuint32m4_t
+test_vset_v_u32m1_u32m4 (vuint32m4_t dest, vuint32m1_t val)
+{
+ return vset_v_u32m1_u32m4(dest, 1, val);
+}
+
+vuint32m4_t
+test_vset_v_u32m2_u32m4 (vuint32m4_t dest, vuint32m2_t val)
+{
+ return vset_v_u32m2_u32m4(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m1_u32m8 (vuint32m8_t dest, vuint32m1_t val)
+{
+ return vset_v_u32m1_u32m8(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m2_u32m8 (vuint32m8_t dest, vuint32m2_t val)
+{
+ return vset_v_u32m2_u32m8(dest, 1, val);
+}
+
+vuint32m8_t
+test_vset_v_u32m4_u32m8 (vuint32m8_t dest, vuint32m4_t val)
+{
+ return vset_v_u32m4_u32m8(dest, 1, val);
+}
+
+vuint32m1_t
+test_vget_v_u32m2_u32m1 (vuint32m2_t src)
+{
+ return vget_v_u32m2_u32m1(src, 1);
+}
+
+vuint32m1_t
+test_vget_v_u32m4_u32m1 (vuint32m4_t src)
+{
+ return vget_v_u32m4_u32m1(src, 1);
+}
+
+vuint32m1_t
+test_vget_v_u32m8_u32m1 (vuint32m8_t src)
+{
+ return vget_v_u32m8_u32m1(src, 1);
+}
+
+vuint32m2_t
+test_vget_v_u32m4_u32m2 (vuint32m4_t src)
+{
+ return vget_v_u32m4_u32m2(src, 1);
+}
+
+vuint32m2_t
+test_vget_v_u32m8_u32m2 (vuint32m8_t src)
+{
+ return vget_v_u32m8_u32m2(src, 1);
+}
+
+vuint32m4_t
+test_vget_v_u32m8_u32m4 (vuint32m8_t src)
+{
+ return vget_v_u32m8_u32m4(src, 1);
+}
+
+vuint64m2_t
+test_vset_v_u64m1_u64m2 (vuint64m2_t dest, vuint64m1_t val)
+{
+ return vset_v_u64m1_u64m2(dest, 1, val);
+}
+
+vuint64m4_t
+test_vset_v_u64m1_u64m4 (vuint64m4_t dest, vuint64m1_t val)
+{
+ return vset_v_u64m1_u64m4(dest, 1, val);
+}
+
+vuint64m4_t
+test_vset_v_u64m2_u64m4 (vuint64m4_t dest, vuint64m2_t val)
+{
+ return vset_v_u64m2_u64m4(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m1_u64m8 (vuint64m8_t dest, vuint64m1_t val)
+{
+ return vset_v_u64m1_u64m8(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m2_u64m8 (vuint64m8_t dest, vuint64m2_t val)
+{
+ return vset_v_u64m2_u64m8(dest, 1, val);
+}
+
+vuint64m8_t
+test_vset_v_u64m4_u64m8 (vuint64m8_t dest, vuint64m4_t val)
+{
+ return vset_v_u64m4_u64m8(dest, 1, val);
+}
+
+vuint64m1_t
+test_vget_v_u64m2_u64m1 (vuint64m2_t src)
+{
+ return vget_v_u64m2_u64m1(src, 1);
+}
+
+vuint64m1_t
+test_vget_v_u64m4_u64m1 (vuint64m4_t src)
+{
+ return vget_v_u64m4_u64m1(src, 1);
+}
+
+vuint64m1_t
+test_vget_v_u64m8_u64m1 (vuint64m8_t src)
+{
+ return vget_v_u64m8_u64m1(src, 1);
+}
+
+vuint64m2_t
+test_vget_v_u64m4_u64m2 (vuint64m4_t src)
+{
+ return vget_v_u64m4_u64m2(src, 1);
+}
+
+vuint64m2_t
+test_vget_v_u64m8_u64m2 (vuint64m8_t src)
+{
+ return vget_v_u64m8_u64m2(src, 1);
+}
+
+vuint64m4_t
+test_vget_v_u64m8_u64m4 (vuint64m8_t src)
+{
+ return vget_v_u64m8_u64m4(src, 1);
+}
+
+vfloat32m2_t
+test_vset_v_f32m1_f32m2 (vfloat32m2_t dest, vfloat32m1_t val)
+{
+ return vset_v_f32m1_f32m2(dest, 1, val);
+}
+
+vfloat32m4_t
+test_vset_v_f32m1_f32m4 (vfloat32m4_t dest, vfloat32m1_t val)
+{
+ return vset_v_f32m1_f32m4(dest, 1, val);
+}
+
+vfloat32m4_t
+test_vset_v_f32m2_f32m4 (vfloat32m4_t dest, vfloat32m2_t val)
+{
+ return vset_v_f32m2_f32m4(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m1_f32m8 (vfloat32m8_t dest, vfloat32m1_t val)
+{
+ return vset_v_f32m1_f32m8(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m2_f32m8 (vfloat32m8_t dest, vfloat32m2_t val)
+{
+ return vset_v_f32m2_f32m8(dest, 1, val);
+}
+
+vfloat32m8_t
+test_vset_v_f32m4_f32m8 (vfloat32m8_t dest, vfloat32m4_t val)
+{
+ return vset_v_f32m4_f32m8(dest, 1, val);
+}
+
+vfloat32m1_t
+test_vget_v_f32m2_f32m1 (vfloat32m2_t src)
+{
+ return vget_v_f32m2_f32m1(src, 1);
+}
+
+vfloat32m1_t
+test_vget_v_f32m4_f32m1 (vfloat32m4_t src)
+{
+ return vget_v_f32m4_f32m1(src, 1);
+}
+
+vfloat32m1_t
+test_vget_v_f32m8_f32m1 (vfloat32m8_t src)
+{
+ return vget_v_f32m8_f32m1(src, 1);
+}
+
+vfloat32m2_t
+test_vget_v_f32m4_f32m2 (vfloat32m4_t src)
+{
+ return vget_v_f32m4_f32m2(src, 1);
+}
+
+vfloat32m2_t
+test_vget_v_f32m8_f32m2 (vfloat32m8_t src)
+{
+ return vget_v_f32m8_f32m2(src, 1);
+}
+
+vfloat32m4_t
+test_vget_v_f32m8_f32m4 (vfloat32m8_t src)
+{
+ return vget_v_f32m8_f32m4(src, 1);
+}
+
+vfloat64m2_t
+test_vset_v_f64m1_f64m2 (vfloat64m2_t dest, vfloat64m1_t val)
+{
+ return vset_v_f64m1_f64m2(dest, 1, val);
+}
+
+vfloat64m4_t
+test_vset_v_f64m1_f64m4 (vfloat64m4_t dest, vfloat64m1_t val)
+{
+ return vset_v_f64m1_f64m4(dest, 1, val);
+}
+
+vfloat64m4_t
+test_vset_v_f64m2_f64m4 (vfloat64m4_t dest, vfloat64m2_t val)
+{
+ return vset_v_f64m2_f64m4(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m1_f64m8 (vfloat64m8_t dest, vfloat64m1_t val)
+{
+ return vset_v_f64m1_f64m8(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m2_f64m8 (vfloat64m8_t dest, vfloat64m2_t val)
+{
+ return vset_v_f64m2_f64m8(dest, 1, val);
+}
+
+vfloat64m8_t
+test_vset_v_f64m4_f64m8 (vfloat64m8_t dest, vfloat64m4_t val)
+{
+ return vset_v_f64m4_f64m8(dest, 1, val);
+}
+
+vfloat64m1_t
+test_vget_v_f64m2_f64m1 (vfloat64m2_t src)
+{
+ return vget_v_f64m2_f64m1(src, 1);
+}
+
+vfloat64m1_t
+test_vget_v_f64m4_f64m1 (vfloat64m4_t src)
+{
+ return vget_v_f64m4_f64m1(src, 1);
+}
+
+vfloat64m1_t
+test_vget_v_f64m8_f64m1 (vfloat64m8_t src)
+{
+ return vget_v_f64m8_f64m1(src, 1);
+}
+
+vfloat64m2_t
+test_vget_v_f64m4_f64m2 (vfloat64m4_t src)
+{
+ return vget_v_f64m4_f64m2(src, 1);
+}
+
+vfloat64m2_t
+test_vget_v_f64m8_f64m2 (vfloat64m8_t src)
+{
+ return vget_v_f64m8_f64m2(src, 1);
+}
+
+vfloat64m4_t
+test_vget_v_f64m8_f64m4 (vfloat64m8_t src)
+{
+ return vget_v_f64m8_f64m4(src, 1);
+}
+/* { dg-final { scan-assembler-times {vmv1r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 60 } } */
+/* { dg-final { scan-assembler-times {vmv2r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 40 } } */
+/* { dg-final { scan-assembler-times {vmv4r\.v\s+(?:v[0-9]|v[1-2][0-9]|v3[0-1]),\s*(?:v[0-9]|v[1-2][0-9]|v3[0-1])} 20 } } */
--
2.36.1
next prev parent reply other threads:[~2022-05-31 8:50 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-31 8:49 [PATCH 00/21] *** Add RVV (RISC-V 'V' Extension) support *** juzhe.zhong
2022-05-31 8:49 ` [PATCH 01/21] Add RVV modes and support scalable vector juzhe.zhong
2022-05-31 8:49 ` [PATCH 02/21] Add RVV intrinsic framework juzhe.zhong
2022-05-31 8:49 ` [PATCH 03/21] Add RVV datatypes juzhe.zhong
2022-05-31 8:49 ` [PATCH 04/21] Add RVV intrinsic enable #pragma riscv intrinsic "vector" and introduce RVV header "riscv_vector.h" juzhe.zhong
2022-05-31 8:49 ` [PATCH 05/21] Add RVV configuration intrinsic juzhe.zhong
2022-05-31 8:49 ` [PATCH 06/21] Add insert-vsetvl pass juzhe.zhong
2022-05-31 8:49 ` [PATCH 07/21] Add register spilling support juzhe.zhong
2022-05-31 8:49 ` [PATCH 08/21] Add poly manipulation juzhe.zhong
2022-05-31 8:50 ` [PATCH 09/21] Add misc function intrinsic support juzhe.zhong
2022-05-31 8:50 ` [PATCH 11/21] Add calling function support juzhe.zhong
2022-05-31 8:50 ` juzhe.zhong [this message]
2022-05-31 8:50 ` [PATCH 13/21] Adjust scalable frame and full testcases juzhe.zhong
2022-05-31 8:50 ` [PATCH 15/21] Add integer intrinsics juzhe.zhong
2022-05-31 8:50 ` [PATCH 18/21] Add rest intrinsic support juzhe.zhong
2022-05-31 16:51 ` [PATCH 00/21] *** Add RVV (RISC-V 'V' Extension) support *** Palmer Dabbelt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220531085012.269719-13-juzhe.zhong@rivai.ai \
--to=juzhe.zhong@rivai.ai \
--cc=gcc-patches@gcc.gnu.org \
--cc=kito.cheng@gmail.com \
--cc=palmer@dabbelt.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).