From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 7924) id ABE963858D35; Fri, 16 Jun 2023 23:37:41 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org ABE963858D35 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gcc.gnu.org; s=default; t=1686958661; bh=4+ZbLSsfXidxeEBNQ0VDZLoUoP5QG5r04OA7I/rCHMo=; h=From:To:Subject:Date:From; b=PYuRnVe5g+nO88YZEaVma5j83iA+3PpnaPtimA1Nfhv/fAwOKO9/mIiYOzmL99zCL t0VjWgzb4LXjtg5d0nv4TKxJ6Io+T5G93QKzTmzFUxGe+gcJvbJRPqe64Eq0Xxoqbr n++2zGzHegtEA9DuksryGAvjCCGQI8JV/PTi9v3o= MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Type: text/plain; charset="utf-8" From: Pan Li To: gcc-cvs@gcc.gnu.org Subject: [gcc r14-1899] RISC-V: Bugfix for RVV integer reduction in ZVE32/64. X-Act-Checkin: gcc X-Git-Author: Pan Li X-Git-Refname: refs/heads/master X-Git-Oldrev: dd6e1cbac8682106c5167c105f2807014288b852 X-Git-Newrev: d0cf0c6c8449009697ad29dd7cb60e7f655628f2 Message-Id: <20230616233741.ABE963858D35@sourceware.org> Date: Fri, 16 Jun 2023 23:37:41 +0000 (GMT) List-Id: https://gcc.gnu.org/g:d0cf0c6c8449009697ad29dd7cb60e7f655628f2 commit r14-1899-gd0cf0c6c8449009697ad29dd7cb60e7f655628f2 Author: Pan Li Date: Fri Jun 16 15:01:46 2023 +0800 RISC-V: Bugfix for RVV integer reduction in ZVE32/64. The rvv integer reduction has 3 different patterns for zve128+, zve64 and zve32. They take the same iterator with different attributions. However, we need the generated function code_for_reduc (code, mode1, mode2). The implementation of code_for_reduc may look like below. code_for_reduc (code, mode1, mode2) { if (code == max && mode1 == VNx1QI && mode2 == VNx1QI) return CODE_FOR_pred_reduc_maxvnx1qivnx16qi; // ZVE128+ if (code == max && mode1 == VNx1QI && mode2 == VNx1QI) return CODE_FOR_pred_reduc_maxvnx1qivnx8qi; // ZVE64 if (code == max && mode1 == VNx1QI && mode2 == VNx1QI) return CODE_FOR_pred_reduc_maxvnx1qivnx4qi; // ZVE32 } Thus there will be a problem here. For example zve32, we will have code_for_reduc (max, VNx1QI, VNx1QI) which will return the code of the ZVE128+ instead of the ZVE32 logically. This patch will merge the 3 patterns into pattern, and pass both the input_vector and the ret_vector of code_for_reduc. For example, ZVE32 will be code_for_reduc (max, VNx1Q1, VNx8QI), then the correct code of ZVE32 will be returned as expectation. Please note both GCC 13 and 14 are impacted by this issue. Signed-off-by: Pan Li Co-Authored by: Juzhe-Zhong PR target/110265 gcc/ChangeLog: * config/riscv/riscv-vector-builtins-bases.cc: Add ret_mode for integer reduction expand. * config/riscv/vector-iterators.md: Add VQI, VHI, VSI and VDI, and the LMUL1 attr respectively. * config/riscv/vector.md (@pred_reduc_): Removed. (@pred_reduc_): Likewise. (@pred_reduc_): Likewise. (@pred_reduc_): New pattern. (@pred_reduc_): Likewise. (@pred_reduc_): Likewise. (@pred_reduc_): Likewise. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/base/pr110265-1.c: New test. * gcc.target/riscv/rvv/base/pr110265-1.h: New test. * gcc.target/riscv/rvv/base/pr110265-2.c: New test. * gcc.target/riscv/rvv/base/pr110265-2.h: New test. * gcc.target/riscv/rvv/base/pr110265-3.c: New test. Diff: --- gcc/config/riscv/riscv-vector-builtins-bases.cc | 13 +- gcc/config/riscv/vector-iterators.md | 61 ++++++ gcc/config/riscv/vector.md | 208 +++++++++++++++------ .../gcc.target/riscv/rvv/base/pr110265-1.c | 13 ++ .../gcc.target/riscv/rvv/base/pr110265-1.h | 65 +++++++ .../gcc.target/riscv/rvv/base/pr110265-2.c | 14 ++ .../gcc.target/riscv/rvv/base/pr110265-2.h | 57 ++++++ .../gcc.target/riscv/rvv/base/pr110265-3.c | 14 ++ 8 files changed, 385 insertions(+), 60 deletions(-) diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc index 87a684dd127..53bd0ed2534 100644 --- a/gcc/config/riscv/riscv-vector-builtins-bases.cc +++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc @@ -1396,8 +1396,17 @@ public: rtx expand (function_expander &e) const override { - return e.use_exact_insn ( - code_for_pred_reduc (CODE, e.vector_mode (), e.vector_mode ())); + machine_mode mode = e.vector_mode (); + machine_mode ret_mode = e.ret_mode (); + + /* TODO: we will use ret_mode after all types of PR110265 are addressed. */ + if ((GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT) + || GET_MODE_INNER (mode) != GET_MODE_INNER (ret_mode)) + return e.use_exact_insn ( + code_for_pred_reduc (CODE, e.vector_mode (), e.vector_mode ())); + else + return e.use_exact_insn ( + code_for_pred_reduc (CODE, e.vector_mode (), e.ret_mode ())); } }; diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md index 8c71c9e22cc..e2c8ade98eb 100644 --- a/gcc/config/riscv/vector-iterators.md +++ b/gcc/config/riscv/vector-iterators.md @@ -929,6 +929,67 @@ (VNx2x64QI "TARGET_MIN_VLEN >= 128") ]) +(define_mode_iterator VQI [ + (VNx1QI "TARGET_MIN_VLEN < 128") + VNx2QI + VNx4QI + VNx8QI + VNx16QI + VNx32QI + (VNx64QI "TARGET_MIN_VLEN > 32") + (VNx128QI "TARGET_MIN_VLEN >= 128") +]) + +(define_mode_iterator VHI [ + (VNx1HI "TARGET_MIN_VLEN < 128") + VNx2HI + VNx4HI + VNx8HI + VNx16HI + (VNx32HI "TARGET_MIN_VLEN > 32") + (VNx64HI "TARGET_MIN_VLEN >= 128") +]) + +(define_mode_iterator VSI [ + (VNx1SI "TARGET_MIN_VLEN < 128") + VNx2SI + VNx4SI + VNx8SI + (VNx16SI "TARGET_MIN_VLEN > 32") + (VNx32SI "TARGET_MIN_VLEN >= 128") +]) + +(define_mode_iterator VDI [ + (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN < 128") + (VNx2DI "TARGET_VECTOR_ELEN_64") + (VNx4DI "TARGET_VECTOR_ELEN_64") + (VNx8DI "TARGET_VECTOR_ELEN_64") + (VNx16DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") +]) + +(define_mode_iterator VQI_LMUL1 [ + (VNx16QI "TARGET_MIN_VLEN >= 128") + (VNx8QI "TARGET_MIN_VLEN == 64") + (VNx4QI "TARGET_MIN_VLEN == 32") +]) + +(define_mode_iterator VHI_LMUL1 [ + (VNx8HI "TARGET_MIN_VLEN >= 128") + (VNx4HI "TARGET_MIN_VLEN == 64") + (VNx2HI "TARGET_MIN_VLEN == 32") +]) + +(define_mode_iterator VSI_LMUL1 [ + (VNx4SI "TARGET_MIN_VLEN >= 128") + (VNx2SI "TARGET_MIN_VLEN == 64") + (VNx1SI "TARGET_MIN_VLEN == 32") +]) + +(define_mode_iterator VDI_LMUL1 [ + (VNx2DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (VNx1DI "TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN == 64") +]) + (define_mode_attr VLMULX2 [ (VNx1QI "VNx2QI") (VNx2QI "VNx4QI") (VNx4QI "VNx8QI") (VNx8QI "VNx16QI") (VNx16QI "VNx32QI") (VNx32QI "VNx64QI") (VNx64QI "VNx128QI") (VNx1HI "VNx2HI") (VNx2HI "VNx4HI") (VNx4HI "VNx8HI") (VNx8HI "VNx16HI") (VNx16HI "VNx32HI") (VNx32HI "VNx64HI") diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md index 1d1847bd85a..d396e278503 100644 --- a/gcc/config/riscv/vector.md +++ b/gcc/config/riscv/vector.md @@ -7244,76 +7244,168 @@ ;; ------------------------------------------------------------------------------- ;; For reduction operations, we should have seperate patterns for -;; TARGET_MIN_VLEN == 32 and TARGET_MIN_VLEN > 32. +;; different types. For each type, we will cover MIN_VLEN == 32, MIN_VLEN == 64 +;; and the MIN_VLEN >= 128 from the well defined iterators. ;; Since reduction need LMUL = 1 scalar operand as the input operand ;; and they are different. ;; For example, The LMUL = 1 corresponding mode of VNx16QImode is VNx4QImode ;; for -march=rv*zve32* wheras VNx8QImode for -march=rv*zve64* -(define_insn "@pred_reduc_" - [(set (match_operand: 0 "register_operand" "=vr, vr") - (unspec: - [(unspec: - [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") - (match_operand 5 "vector_length_operand" " rK, rK") - (match_operand 6 "const_int_operand" " i, i") - (match_operand 7 "const_int_operand" " i, i") + +;; Integer Reduction for QI +(define_insn "@pred_reduc_" + [ + (set + (match_operand:VQI_LMUL1 0 "register_operand" "=vr, vr") + (unspec:VQI_LMUL1 + [ + (unspec: + [ + (match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") + (match_operand 5 "vector_length_operand" " rK, rK") + (match_operand 6 "const_int_operand" " i, i") + (match_operand 7 "const_int_operand" " i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) - (any_reduc:VI - (vec_duplicate:VI - (vec_select: - (match_operand: 4 "register_operand" " vr, vr") - (parallel [(const_int 0)]))) - (match_operand:VI 3 "register_operand" " vr, vr")) - (match_operand: 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))] - "TARGET_VECTOR && TARGET_MIN_VLEN >= 128" + (reg:SI VTYPE_REGNUM) + ] UNSPEC_VPREDICATE + ) + (any_reduc:VQI + (vec_duplicate:VQI + (vec_select: + (match_operand:VQI_LMUL1 4 "register_operand" " vr, vr") + (parallel [(const_int 0)]) + ) + ) + (match_operand:VQI 3 "register_operand" " vr, vr") + ) + (match_operand:VQI_LMUL1 2 "vector_merge_operand" " vu, 0") + ] UNSPEC_REDUC + ) + ) + ] + "TARGET_VECTOR" "vred.vs\t%0,%3,%4%p1" - [(set_attr "type" "vired") - (set_attr "mode" "")]) + [ + (set_attr "type" "vired") + (set_attr "mode" "") + ] +) -(define_insn "@pred_reduc_" - [(set (match_operand: 0 "register_operand" "=vr, vr") - (unspec: - [(unspec: - [(match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") - (match_operand 5 "vector_length_operand" " rK, rK") - (match_operand 6 "const_int_operand" " i, i") - (match_operand 7 "const_int_operand" " i, i") +;; Integer Reduction for HI +(define_insn "@pred_reduc_" + [ + (set + (match_operand:VHI_LMUL1 0 "register_operand" "=vr, vr") + (unspec:VHI_LMUL1 + [ + (unspec: + [ + (match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") + (match_operand 5 "vector_length_operand" " rK, rK") + (match_operand 6 "const_int_operand" " i, i") + (match_operand 7 "const_int_operand" " i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) - (any_reduc:VI_ZVE64 - (vec_duplicate:VI_ZVE64 - (vec_select: - (match_operand: 4 "register_operand" " vr, vr") - (parallel [(const_int 0)]))) - (match_operand:VI_ZVE64 3 "register_operand" " vr, vr")) - (match_operand: 2 "vector_merge_operand" " vu, 0")] UNSPEC_REDUC))] - "TARGET_VECTOR && TARGET_MIN_VLEN == 64" + (reg:SI VTYPE_REGNUM) + ] UNSPEC_VPREDICATE + ) + (any_reduc:VHI + (vec_duplicate:VHI + (vec_select: + (match_operand:VHI_LMUL1 4 "register_operand" " vr, vr") + (parallel [(const_int 0)]) + ) + ) + (match_operand:VHI 3 "register_operand" " vr, vr") + ) + (match_operand:VHI_LMUL1 2 "vector_merge_operand" " vu, 0") + ] UNSPEC_REDUC + ) + ) + ] + "TARGET_VECTOR" "vred.vs\t%0,%3,%4%p1" - [(set_attr "type" "vired") - (set_attr "mode" "")]) + [ + (set_attr "type" "vired") + (set_attr "mode" "") + ] +) -(define_insn "@pred_reduc_" - [(set (match_operand: 0 "register_operand" "=vd, vd, vr, vr") - (unspec: - [(unspec: - [(match_operand: 1 "vector_mask_operand" " vm, vm,Wc1,Wc1") - (match_operand 5 "vector_length_operand" " rK, rK, rK, rK") - (match_operand 6 "const_int_operand" " i, i, i, i") - (match_operand 7 "const_int_operand" " i, i, i, i") +;; Integer Reduction for SI +(define_insn "@pred_reduc_" + [ + (set + (match_operand:VSI_LMUL1 0 "register_operand" "=vr, vr") + (unspec:VSI_LMUL1 + [ + (unspec: + [ + (match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") + (match_operand 5 "vector_length_operand" " rK, rK") + (match_operand 6 "const_int_operand" " i, i") + (match_operand 7 "const_int_operand" " i, i") (reg:SI VL_REGNUM) - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) - (any_reduc:VI_ZVE32 - (vec_duplicate:VI_ZVE32 - (vec_select: - (match_operand: 4 "register_operand" " vr, vr, vr, vr") - (parallel [(const_int 0)]))) - (match_operand:VI_ZVE32 3 "register_operand" " vr, vr, vr, vr")) - (match_operand: 2 "vector_merge_operand" " vu, 0, vu, 0")] UNSPEC_REDUC))] - "TARGET_VECTOR && TARGET_MIN_VLEN == 32" + (reg:SI VTYPE_REGNUM) + ] UNSPEC_VPREDICATE + ) + (any_reduc:VSI + (vec_duplicate:VSI + (vec_select: + (match_operand:VSI_LMUL1 4 "register_operand" " vr, vr") + (parallel [(const_int 0)]) + ) + ) + (match_operand:VSI 3 "register_operand" " vr, vr") + ) + (match_operand:VSI_LMUL1 2 "vector_merge_operand" " vu, 0") + ] UNSPEC_REDUC + ) + ) + ] + "TARGET_VECTOR" "vred.vs\t%0,%3,%4%p1" - [(set_attr "type" "vired") - (set_attr "mode" "")]) + [ + (set_attr "type" "vired") + (set_attr "mode" "") + ] +) + +;; Integer Reduction for DI +(define_insn "@pred_reduc_" + [ + (set + (match_operand:VDI_LMUL1 0 "register_operand" "=vr, vr") + (unspec:VDI_LMUL1 + [ + (unspec: + [ + (match_operand: 1 "vector_mask_operand" "vmWc1,vmWc1") + (match_operand 5 "vector_length_operand" " rK, rK") + (match_operand 6 "const_int_operand" " i, i") + (match_operand 7 "const_int_operand" " i, i") + (reg:SI VL_REGNUM) + (reg:SI VTYPE_REGNUM) + ] UNSPEC_VPREDICATE + ) + (any_reduc:VDI + (vec_duplicate:VDI + (vec_select: + (match_operand:VDI_LMUL1 4 "register_operand" " vr, vr") + (parallel [(const_int 0)]) + ) + ) + (match_operand:VDI 3 "register_operand" " vr, vr") + ) + (match_operand:VDI_LMUL1 2 "vector_merge_operand" " vu, 0") + ] UNSPEC_REDUC + ) + ) + ] + "TARGET_VECTOR" + "vred.vs\t%0,%3,%4%p1" + [ + (set_attr "type" "vired") + (set_attr "mode" "") + ] +) (define_insn "@pred_widen_reduc_plus" [(set (match_operand: 0 "register_operand" "=&vr, &vr") diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-1.c new file mode 100644 index 00000000000..2e4aeb5b90b --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-1.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gc_zve32f -mabi=ilp32f -O3 -Wno-psabi" } */ + +#include "pr110265-1.h" + +/* { dg-final { scan-assembler-times {vredand\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vredmax\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vredmaxu\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vredmin\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vredminu\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vredor\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vredsum\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vredxor\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-1.h b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-1.h new file mode 100644 index 00000000000..ade44cc27ea --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-1.h @@ -0,0 +1,65 @@ +#include "riscv_vector.h" + +vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8mf4_i8m1(vector, scalar, vl); +} + +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1(vector, scalar, vl); +} + +vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8mf4_i8m1(vector, scalar, vl); +} + +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1(vector, scalar, vl); +} + +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8mf4_u8m1(vector, scalar, vl); +} + +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); +} + +vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8mf4_i8m1(vector, scalar, vl); +} + +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1(vector, scalar, vl); +} + +vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8mf4_u8m1(vector, scalar, vl); +} + +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1(vector, scalar, vl); +} + +vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8mf4_i8m1(vector, scalar, vl); +} + +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1(vector, scalar, vl); +} + +vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8mf4_i8m1(vector, scalar, vl); +} + +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1(vector, scalar, vl); +} + +vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8mf4_i8m1(vector, scalar, vl); +} + +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1(vector, scalar, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-2.c new file mode 100644 index 00000000000..7454c1cc918 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-2.c @@ -0,0 +1,14 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gc_zve64d -mabi=ilp32d -O3 -Wno-psabi" } */ + +#include "pr110265-1.h" +#include "pr110265-2.h" + +/* { dg-final { scan-assembler-times {vredand\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredmax\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 3 } } */ +/* { dg-final { scan-assembler-times {vredmaxu\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredmin\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 3 } } */ +/* { dg-final { scan-assembler-times {vredminu\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredor\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredsum\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredxor\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-2.h b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-2.h new file mode 100644 index 00000000000..6a7e14e51f8 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-2.h @@ -0,0 +1,57 @@ +#include "riscv_vector.h" + +vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8mf8_i8m1(vector, scalar, vl); +} + +vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8mf8_i8m1(vector, scalar, vl); +} + +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8mf8_u8m1(vector, scalar, vl); +} + +vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8mf8_i8m1(vector, scalar, vl); +} + +vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8mf8_u8m1(vector, scalar, vl); +} + +vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8mf8_i8m1(vector, scalar, vl); +} + +vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8mf8_i8m1(vector, scalar, vl); +} + +vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8mf8_i8m1(vector, scalar, vl); +} + +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1(vector, scalar, vl); +} + +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); +} + +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1(vector, scalar, vl); +} + +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1(vector, scalar, vl); +} + +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1(vector, scalar, vl); +} + +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1(vector, scalar, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-3.c new file mode 100644 index 00000000000..0ed1fbae35a --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr110265-3.c @@ -0,0 +1,14 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gc_zve64f -mabi=ilp32f -O3 -Wno-psabi" } */ + +#include "pr110265-1.h" +#include "pr110265-2.h" + +/* { dg-final { scan-assembler-times {vredand\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredmax\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 3 } } */ +/* { dg-final { scan-assembler-times {vredmaxu\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredmin\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 3 } } */ +/* { dg-final { scan-assembler-times {vredminu\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredor\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredsum\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-times {vredxor\.vs\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */