From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 2093) id C83F43857C45; Fri, 3 Feb 2023 07:15:19 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org C83F43857C45 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gcc.gnu.org; s=default; t=1675408519; bh=2WIVWsz7yvtKBNPs9PdBuV8V487pItZMA5eVstn1A6s=; h=From:To:Subject:Date:From; b=THSfbNGSbZjINwjDw5ptFWg8QDAi+uyul/27btFTD8JgKSiFH3fyBG27CdQ5mmByF jqWEwsGwNa4phFKmxRsRHhCnwrMBFuZd+KsKa0OOBkppITHjH26P6zONtRyFlfXeTE qgaDXPoy/CL1SS4wQoiUXhCFId/2Qs7bxxhRWsYQ= MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Type: text/plain; charset="utf-8" From: Kito Cheng To: gcc-cvs@gcc.gnu.org Subject: [gcc r13-5672] RISC-V: Add vsll.vx C++ API tests X-Act-Checkin: gcc X-Git-Author: Ju-Zhe Zhong X-Git-Refname: refs/heads/master X-Git-Oldrev: f890b9e76f98b00a063a4fa9913c715fa3196ab0 X-Git-Newrev: 07fba8d6f2da481c8d2915e66d49525c65ca04d6 Message-Id: <20230203071519.C83F43857C45@sourceware.org> Date: Fri, 3 Feb 2023 07:15:19 +0000 (GMT) List-Id: https://gcc.gnu.org/g:07fba8d6f2da481c8d2915e66d49525c65ca04d6 commit r13-5672-g07fba8d6f2da481c8d2915e66d49525c65ca04d6 Author: Ju-Zhe Zhong Date: Wed Feb 1 06:13:25 2023 +0800 RISC-V: Add vsll.vx C++ API tests gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/base/vsll_vx-1.c: New test. * gcc.target/riscv/rvv/base/vsll_vx-2.c: New test. * gcc.target/riscv/rvv/base/vsll_vx-3.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_m-1.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_m-2.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_m-3.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_mu-1.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_mu-2.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_mu-3.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tu-1.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tu-2.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tu-3.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tum-1.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tum-2.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tum-3.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tumu-1.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tumu-2.c: New test. * gcc.target/riscv/rvv/base/vsll_vx_tumu-3.c: New test. Diff: --- .../gcc.target/riscv/rvv/base/vsll_vx-1.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx-2.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx-3.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_m-1.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_m-2.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_m-3.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_mu-1.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_mu-2.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_mu-3.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tu-1.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tu-2.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tu-3.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tum-1.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tum-2.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tum-3.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tumu-1.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tumu-2.c | 292 +++++++++++++++++++++ .../gcc.target/riscv/rvv/base/vsll_vx_tumu-3.c | 292 +++++++++++++++++++++ 18 files changed, 5256 insertions(+) diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-1.c new file mode 100644 index 00000000000..bae766913e7 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8(vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8(op1,shift,vl); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4(vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4(op1,shift,vl); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2(vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2(op1,shift,vl); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1(vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1(op1,shift,vl); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2(vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2(op1,shift,vl); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4(vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4(op1,shift,vl); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8(vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8(op1,shift,vl); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4(vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4(op1,shift,vl); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2(vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2(op1,shift,vl); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1(vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1(op1,shift,vl); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2(vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2(op1,shift,vl); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4(vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4(op1,shift,vl); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8(vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8(op1,shift,vl); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2(vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2(op1,shift,vl); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1(vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1(op1,shift,vl); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2(vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2(op1,shift,vl); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4(vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4(op1,shift,vl); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8(vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8(op1,shift,vl); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1(vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1(op1,shift,vl); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2(vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2(op1,shift,vl); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4(vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4(op1,shift,vl); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8(vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8(op1,shift,vl); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8(vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8(op1,shift,vl); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4(vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4(op1,shift,vl); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2(vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2(op1,shift,vl); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1(vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1(op1,shift,vl); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2(vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2(op1,shift,vl); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4(vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4(op1,shift,vl); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8(vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8(op1,shift,vl); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4(vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4(op1,shift,vl); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2(vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2(op1,shift,vl); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1(vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1(op1,shift,vl); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2(vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2(op1,shift,vl); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4(vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4(op1,shift,vl); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8(vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8(op1,shift,vl); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2(vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2(op1,shift,vl); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1(vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1(op1,shift,vl); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2(vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2(op1,shift,vl); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4(vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4(op1,shift,vl); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8(vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8(op1,shift,vl); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1(vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1(op1,shift,vl); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2(vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2(op1,shift,vl); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4(vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4(op1,shift,vl); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8(vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8(op1,shift,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-2.c new file mode 100644 index 00000000000..04e28e842eb --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8(vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8(op1,shift,31); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4(vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4(op1,shift,31); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2(vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2(op1,shift,31); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1(vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1(op1,shift,31); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2(vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2(op1,shift,31); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4(vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4(op1,shift,31); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8(vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8(op1,shift,31); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4(vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4(op1,shift,31); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2(vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2(op1,shift,31); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1(vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1(op1,shift,31); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2(vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2(op1,shift,31); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4(vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4(op1,shift,31); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8(vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8(op1,shift,31); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2(vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2(op1,shift,31); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1(vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1(op1,shift,31); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2(vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2(op1,shift,31); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4(vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4(op1,shift,31); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8(vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8(op1,shift,31); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1(vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1(op1,shift,31); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2(vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2(op1,shift,31); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4(vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4(op1,shift,31); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8(vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8(op1,shift,31); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8(vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8(op1,shift,31); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4(vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4(op1,shift,31); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2(vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2(op1,shift,31); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1(vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1(op1,shift,31); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2(vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2(op1,shift,31); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4(vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4(op1,shift,31); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8(vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8(op1,shift,31); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4(vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4(op1,shift,31); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2(vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2(op1,shift,31); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1(vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1(op1,shift,31); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2(vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2(op1,shift,31); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4(vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4(op1,shift,31); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8(vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8(op1,shift,31); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2(vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2(op1,shift,31); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1(vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1(op1,shift,31); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2(vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2(op1,shift,31); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4(vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4(op1,shift,31); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8(vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8(op1,shift,31); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1(vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1(op1,shift,31); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2(vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2(op1,shift,31); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4(vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4(op1,shift,31); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8(vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8(op1,shift,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-3.c new file mode 100644 index 00000000000..521c51a54ae --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8(vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8(op1,shift,32); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4(vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4(op1,shift,32); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2(vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2(op1,shift,32); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1(vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1(op1,shift,32); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2(vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2(op1,shift,32); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4(vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4(op1,shift,32); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8(vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8(op1,shift,32); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4(vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4(op1,shift,32); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2(vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2(op1,shift,32); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1(vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1(op1,shift,32); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2(vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2(op1,shift,32); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4(vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4(op1,shift,32); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8(vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8(op1,shift,32); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2(vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2(op1,shift,32); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1(vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1(op1,shift,32); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2(vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2(op1,shift,32); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4(vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4(op1,shift,32); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8(vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8(op1,shift,32); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1(vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1(op1,shift,32); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2(vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2(op1,shift,32); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4(vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4(op1,shift,32); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8(vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8(op1,shift,32); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8(vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8(op1,shift,32); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4(vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4(op1,shift,32); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2(vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2(op1,shift,32); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1(vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1(op1,shift,32); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2(vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2(op1,shift,32); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4(vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4(op1,shift,32); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8(vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8(op1,shift,32); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4(vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4(op1,shift,32); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2(vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2(op1,shift,32); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1(vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1(op1,shift,32); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2(vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2(op1,shift,32); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4(vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4(op1,shift,32); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8(vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8(op1,shift,32); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2(vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2(op1,shift,32); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1(vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1(op1,shift,32); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2(vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2(op1,shift,32); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4(vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4(op1,shift,32); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8(vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8(op1,shift,32); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1(vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1(op1,shift,32); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2(vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2(op1,shift,32); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4(vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4(op1,shift,32); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8(vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8(op1,shift,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-1.c new file mode 100644 index 00000000000..6538b6ff016 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_m(mask,op1,shift,vl); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_m(mask,op1,shift,vl); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_m(mask,op1,shift,vl); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_m(mask,op1,shift,vl); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_m(mask,op1,shift,vl); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_m(mask,op1,shift,vl); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_m(mask,op1,shift,vl); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_m(mask,op1,shift,vl); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_m(mask,op1,shift,vl); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_m(mask,op1,shift,vl); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_m(mask,op1,shift,vl); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_m(mask,op1,shift,vl); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_m(mask,op1,shift,vl); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_m(mask,op1,shift,vl); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_m(mask,op1,shift,vl); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_m(mask,op1,shift,vl); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_m(mask,op1,shift,vl); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_m(mask,op1,shift,vl); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_m(mask,op1,shift,vl); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_m(mask,op1,shift,vl); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_m(mask,op1,shift,vl); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_m(mask,op1,shift,vl); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_m(mask,op1,shift,vl); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_m(mask,op1,shift,vl); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_m(mask,op1,shift,vl); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_m(mask,op1,shift,vl); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_m(mask,op1,shift,vl); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_m(mask,op1,shift,vl); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_m(mask,op1,shift,vl); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_m(mask,op1,shift,vl); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_m(mask,op1,shift,vl); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_m(mask,op1,shift,vl); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_m(mask,op1,shift,vl); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_m(mask,op1,shift,vl); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_m(mask,op1,shift,vl); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_m(mask,op1,shift,vl); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_m(mask,op1,shift,vl); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_m(mask,op1,shift,vl); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_m(mask,op1,shift,vl); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_m(mask,op1,shift,vl); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_m(mask,op1,shift,vl); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_m(mask,op1,shift,vl); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_m(mask,op1,shift,vl); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_m(mask,op1,shift,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-2.c new file mode 100644 index 00000000000..cecee8154f3 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_m(mask,op1,shift,31); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_m(mask,op1,shift,31); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_m(mask,op1,shift,31); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_m(mask,op1,shift,31); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_m(mask,op1,shift,31); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_m(mask,op1,shift,31); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_m(mask,op1,shift,31); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_m(mask,op1,shift,31); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_m(mask,op1,shift,31); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_m(mask,op1,shift,31); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_m(mask,op1,shift,31); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_m(mask,op1,shift,31); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_m(mask,op1,shift,31); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_m(mask,op1,shift,31); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_m(mask,op1,shift,31); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_m(mask,op1,shift,31); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_m(mask,op1,shift,31); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_m(mask,op1,shift,31); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_m(mask,op1,shift,31); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_m(mask,op1,shift,31); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_m(mask,op1,shift,31); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_m(mask,op1,shift,31); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_m(mask,op1,shift,31); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_m(mask,op1,shift,31); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_m(mask,op1,shift,31); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_m(mask,op1,shift,31); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_m(mask,op1,shift,31); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_m(mask,op1,shift,31); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_m(mask,op1,shift,31); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_m(mask,op1,shift,31); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_m(mask,op1,shift,31); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_m(mask,op1,shift,31); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_m(mask,op1,shift,31); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_m(mask,op1,shift,31); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_m(mask,op1,shift,31); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_m(mask,op1,shift,31); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_m(mask,op1,shift,31); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_m(mask,op1,shift,31); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_m(mask,op1,shift,31); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_m(mask,op1,shift,31); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_m(mask,op1,shift,31); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_m(mask,op1,shift,31); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_m(mask,op1,shift,31); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_m(mask,op1,shift,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-3.c new file mode 100644 index 00000000000..a60498ecc07 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_m-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_m(vbool64_t mask,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_m(mask,op1,shift,32); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_m(vbool32_t mask,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_m(mask,op1,shift,32); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_m(vbool16_t mask,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_m(mask,op1,shift,32); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_m(vbool8_t mask,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_m(mask,op1,shift,32); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_m(vbool4_t mask,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_m(mask,op1,shift,32); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_m(vbool2_t mask,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_m(mask,op1,shift,32); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_m(vbool1_t mask,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_m(mask,op1,shift,32); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_m(vbool64_t mask,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_m(mask,op1,shift,32); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_m(vbool32_t mask,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_m(mask,op1,shift,32); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_m(vbool16_t mask,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_m(mask,op1,shift,32); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_m(vbool8_t mask,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_m(mask,op1,shift,32); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_m(vbool4_t mask,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_m(mask,op1,shift,32); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_m(vbool2_t mask,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_m(mask,op1,shift,32); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_m(vbool64_t mask,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_m(mask,op1,shift,32); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_m(vbool32_t mask,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_m(mask,op1,shift,32); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_m(vbool16_t mask,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_m(mask,op1,shift,32); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_m(vbool8_t mask,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_m(mask,op1,shift,32); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_m(vbool4_t mask,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_m(mask,op1,shift,32); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_m(vbool64_t mask,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_m(mask,op1,shift,32); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_m(vbool32_t mask,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_m(mask,op1,shift,32); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_m(vbool16_t mask,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_m(mask,op1,shift,32); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_m(vbool8_t mask,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_m(mask,op1,shift,32); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_m(mask,op1,shift,32); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_m(mask,op1,shift,32); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_m(mask,op1,shift,32); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_m(vbool8_t mask,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_m(mask,op1,shift,32); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_m(vbool4_t mask,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_m(mask,op1,shift,32); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_m(vbool2_t mask,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_m(mask,op1,shift,32); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_m(vbool1_t mask,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_m(mask,op1,shift,32); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_m(mask,op1,shift,32); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_m(mask,op1,shift,32); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_m(vbool16_t mask,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_m(mask,op1,shift,32); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_m(vbool8_t mask,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_m(mask,op1,shift,32); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_m(vbool4_t mask,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_m(mask,op1,shift,32); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_m(vbool2_t mask,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_m(mask,op1,shift,32); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_m(mask,op1,shift,32); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_m(vbool32_t mask,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_m(mask,op1,shift,32); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_m(vbool16_t mask,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_m(mask,op1,shift,32); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_m(vbool8_t mask,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_m(mask,op1,shift,32); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_m(vbool4_t mask,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_m(mask,op1,shift,32); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_m(vbool64_t mask,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_m(mask,op1,shift,32); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_m(vbool32_t mask,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_m(mask,op1,shift,32); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_m(vbool16_t mask,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_m(mask,op1,shift,32); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_m(vbool8_t mask,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_m(mask,op1,shift,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-1.c new file mode 100644 index 00000000000..6e1730d2372 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_mu(mask,merge,op1,shift,vl); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_mu(mask,merge,op1,shift,vl); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_mu(mask,merge,op1,shift,vl); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_mu(mask,merge,op1,shift,vl); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_mu(mask,merge,op1,shift,vl); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_mu(mask,merge,op1,shift,vl); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_mu(mask,merge,op1,shift,vl); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_mu(mask,merge,op1,shift,vl); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_mu(mask,merge,op1,shift,vl); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_mu(mask,merge,op1,shift,vl); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_mu(mask,merge,op1,shift,vl); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_mu(mask,merge,op1,shift,vl); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_mu(mask,merge,op1,shift,vl); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_mu(mask,merge,op1,shift,vl); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_mu(mask,merge,op1,shift,vl); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_mu(mask,merge,op1,shift,vl); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_mu(mask,merge,op1,shift,vl); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_mu(mask,merge,op1,shift,vl); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_mu(mask,merge,op1,shift,vl); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_mu(mask,merge,op1,shift,vl); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_mu(mask,merge,op1,shift,vl); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_mu(mask,merge,op1,shift,vl); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_mu(mask,merge,op1,shift,vl); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_mu(mask,merge,op1,shift,vl); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_mu(mask,merge,op1,shift,vl); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_mu(mask,merge,op1,shift,vl); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_mu(mask,merge,op1,shift,vl); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_mu(mask,merge,op1,shift,vl); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_mu(mask,merge,op1,shift,vl); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_mu(mask,merge,op1,shift,vl); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_mu(mask,merge,op1,shift,vl); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_mu(mask,merge,op1,shift,vl); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_mu(mask,merge,op1,shift,vl); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_mu(mask,merge,op1,shift,vl); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_mu(mask,merge,op1,shift,vl); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_mu(mask,merge,op1,shift,vl); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_mu(mask,merge,op1,shift,vl); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_mu(mask,merge,op1,shift,vl); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_mu(mask,merge,op1,shift,vl); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_mu(mask,merge,op1,shift,vl); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_mu(mask,merge,op1,shift,vl); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_mu(mask,merge,op1,shift,vl); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_mu(mask,merge,op1,shift,vl); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_mu(mask,merge,op1,shift,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-2.c new file mode 100644 index 00000000000..ddc2a1d9140 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_mu(mask,merge,op1,shift,31); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_mu(mask,merge,op1,shift,31); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_mu(mask,merge,op1,shift,31); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_mu(mask,merge,op1,shift,31); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_mu(mask,merge,op1,shift,31); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_mu(mask,merge,op1,shift,31); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_mu(mask,merge,op1,shift,31); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_mu(mask,merge,op1,shift,31); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_mu(mask,merge,op1,shift,31); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_mu(mask,merge,op1,shift,31); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_mu(mask,merge,op1,shift,31); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_mu(mask,merge,op1,shift,31); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_mu(mask,merge,op1,shift,31); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_mu(mask,merge,op1,shift,31); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_mu(mask,merge,op1,shift,31); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_mu(mask,merge,op1,shift,31); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_mu(mask,merge,op1,shift,31); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_mu(mask,merge,op1,shift,31); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_mu(mask,merge,op1,shift,31); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_mu(mask,merge,op1,shift,31); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_mu(mask,merge,op1,shift,31); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_mu(mask,merge,op1,shift,31); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_mu(mask,merge,op1,shift,31); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_mu(mask,merge,op1,shift,31); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_mu(mask,merge,op1,shift,31); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_mu(mask,merge,op1,shift,31); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_mu(mask,merge,op1,shift,31); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_mu(mask,merge,op1,shift,31); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_mu(mask,merge,op1,shift,31); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_mu(mask,merge,op1,shift,31); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_mu(mask,merge,op1,shift,31); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_mu(mask,merge,op1,shift,31); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_mu(mask,merge,op1,shift,31); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_mu(mask,merge,op1,shift,31); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_mu(mask,merge,op1,shift,31); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_mu(mask,merge,op1,shift,31); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_mu(mask,merge,op1,shift,31); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_mu(mask,merge,op1,shift,31); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_mu(mask,merge,op1,shift,31); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_mu(mask,merge,op1,shift,31); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_mu(mask,merge,op1,shift,31); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_mu(mask,merge,op1,shift,31); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_mu(mask,merge,op1,shift,31); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_mu(mask,merge,op1,shift,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-3.c new file mode 100644 index 00000000000..26a38a39cb7 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_mu-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_mu(mask,merge,op1,shift,32); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_mu(mask,merge,op1,shift,32); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_mu(mask,merge,op1,shift,32); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_mu(mask,merge,op1,shift,32); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_mu(mask,merge,op1,shift,32); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_mu(mask,merge,op1,shift,32); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_mu(mask,merge,op1,shift,32); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_mu(mask,merge,op1,shift,32); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_mu(mask,merge,op1,shift,32); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_mu(mask,merge,op1,shift,32); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_mu(mask,merge,op1,shift,32); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_mu(mask,merge,op1,shift,32); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_mu(mask,merge,op1,shift,32); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_mu(mask,merge,op1,shift,32); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_mu(mask,merge,op1,shift,32); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_mu(mask,merge,op1,shift,32); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_mu(mask,merge,op1,shift,32); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_mu(mask,merge,op1,shift,32); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_mu(mask,merge,op1,shift,32); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_mu(mask,merge,op1,shift,32); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_mu(mask,merge,op1,shift,32); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_mu(mask,merge,op1,shift,32); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_mu(mask,merge,op1,shift,32); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_mu(mask,merge,op1,shift,32); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_mu(mask,merge,op1,shift,32); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_mu(mask,merge,op1,shift,32); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_mu(mask,merge,op1,shift,32); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_mu(mask,merge,op1,shift,32); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_mu(mask,merge,op1,shift,32); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_mu(mask,merge,op1,shift,32); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_mu(mask,merge,op1,shift,32); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_mu(mask,merge,op1,shift,32); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_mu(mask,merge,op1,shift,32); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_mu(mask,merge,op1,shift,32); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_mu(mask,merge,op1,shift,32); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_mu(mask,merge,op1,shift,32); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_mu(mask,merge,op1,shift,32); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_mu(mask,merge,op1,shift,32); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_mu(mask,merge,op1,shift,32); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_mu(mask,merge,op1,shift,32); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_mu(mask,merge,op1,shift,32); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_mu(mask,merge,op1,shift,32); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_mu(mask,merge,op1,shift,32); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_mu(mask,merge,op1,shift,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-1.c new file mode 100644 index 00000000000..628f6d9101b --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tu(merge,op1,shift,vl); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tu(merge,op1,shift,vl); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tu(merge,op1,shift,vl); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tu(merge,op1,shift,vl); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tu(merge,op1,shift,vl); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tu(merge,op1,shift,vl); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tu(merge,op1,shift,vl); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tu(merge,op1,shift,vl); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tu(merge,op1,shift,vl); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tu(merge,op1,shift,vl); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tu(merge,op1,shift,vl); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tu(merge,op1,shift,vl); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tu(merge,op1,shift,vl); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tu(merge,op1,shift,vl); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tu(merge,op1,shift,vl); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tu(merge,op1,shift,vl); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tu(merge,op1,shift,vl); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tu(merge,op1,shift,vl); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tu(merge,op1,shift,vl); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tu(merge,op1,shift,vl); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tu(merge,op1,shift,vl); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tu(merge,op1,shift,vl); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tu(merge,op1,shift,vl); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tu(merge,op1,shift,vl); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tu(merge,op1,shift,vl); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tu(merge,op1,shift,vl); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tu(merge,op1,shift,vl); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tu(merge,op1,shift,vl); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tu(merge,op1,shift,vl); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tu(merge,op1,shift,vl); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tu(merge,op1,shift,vl); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tu(merge,op1,shift,vl); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tu(merge,op1,shift,vl); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tu(merge,op1,shift,vl); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tu(merge,op1,shift,vl); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tu(merge,op1,shift,vl); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tu(merge,op1,shift,vl); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tu(merge,op1,shift,vl); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tu(merge,op1,shift,vl); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tu(merge,op1,shift,vl); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tu(merge,op1,shift,vl); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tu(merge,op1,shift,vl); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tu(merge,op1,shift,vl); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tu(merge,op1,shift,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-2.c new file mode 100644 index 00000000000..11688aaf9a7 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tu(merge,op1,shift,31); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tu(merge,op1,shift,31); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tu(merge,op1,shift,31); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tu(merge,op1,shift,31); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tu(merge,op1,shift,31); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tu(merge,op1,shift,31); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tu(merge,op1,shift,31); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tu(merge,op1,shift,31); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tu(merge,op1,shift,31); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tu(merge,op1,shift,31); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tu(merge,op1,shift,31); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tu(merge,op1,shift,31); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tu(merge,op1,shift,31); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tu(merge,op1,shift,31); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tu(merge,op1,shift,31); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tu(merge,op1,shift,31); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tu(merge,op1,shift,31); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tu(merge,op1,shift,31); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tu(merge,op1,shift,31); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tu(merge,op1,shift,31); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tu(merge,op1,shift,31); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tu(merge,op1,shift,31); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tu(merge,op1,shift,31); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tu(merge,op1,shift,31); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tu(merge,op1,shift,31); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tu(merge,op1,shift,31); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tu(merge,op1,shift,31); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tu(merge,op1,shift,31); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tu(merge,op1,shift,31); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tu(merge,op1,shift,31); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tu(merge,op1,shift,31); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tu(merge,op1,shift,31); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tu(merge,op1,shift,31); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tu(merge,op1,shift,31); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tu(merge,op1,shift,31); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tu(merge,op1,shift,31); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tu(merge,op1,shift,31); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tu(merge,op1,shift,31); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tu(merge,op1,shift,31); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tu(merge,op1,shift,31); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tu(merge,op1,shift,31); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tu(merge,op1,shift,31); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tu(merge,op1,shift,31); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tu(merge,op1,shift,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-3.c new file mode 100644 index 00000000000..4038ff4e6a3 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tu-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tu(vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tu(merge,op1,shift,32); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tu(vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tu(merge,op1,shift,32); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tu(vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tu(merge,op1,shift,32); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tu(vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tu(merge,op1,shift,32); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tu(vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tu(merge,op1,shift,32); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tu(vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tu(merge,op1,shift,32); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tu(vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tu(merge,op1,shift,32); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tu(vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tu(merge,op1,shift,32); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tu(vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tu(merge,op1,shift,32); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tu(vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tu(merge,op1,shift,32); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tu(vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tu(merge,op1,shift,32); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tu(vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tu(merge,op1,shift,32); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tu(vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tu(merge,op1,shift,32); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tu(vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tu(merge,op1,shift,32); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tu(vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tu(merge,op1,shift,32); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tu(vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tu(merge,op1,shift,32); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tu(vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tu(merge,op1,shift,32); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tu(vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tu(merge,op1,shift,32); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tu(vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tu(merge,op1,shift,32); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tu(vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tu(merge,op1,shift,32); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tu(vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tu(merge,op1,shift,32); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tu(vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tu(merge,op1,shift,32); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tu(vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tu(merge,op1,shift,32); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tu(vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tu(merge,op1,shift,32); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tu(vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tu(merge,op1,shift,32); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tu(vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tu(merge,op1,shift,32); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tu(vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tu(merge,op1,shift,32); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tu(vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tu(merge,op1,shift,32); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tu(vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tu(merge,op1,shift,32); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tu(vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tu(merge,op1,shift,32); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tu(vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tu(merge,op1,shift,32); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tu(vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tu(merge,op1,shift,32); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tu(vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tu(merge,op1,shift,32); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tu(vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tu(merge,op1,shift,32); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tu(vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tu(merge,op1,shift,32); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tu(vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tu(merge,op1,shift,32); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tu(vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tu(merge,op1,shift,32); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tu(vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tu(merge,op1,shift,32); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tu(vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tu(merge,op1,shift,32); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tu(vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tu(merge,op1,shift,32); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tu(vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tu(merge,op1,shift,32); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tu(vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tu(merge,op1,shift,32); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tu(vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tu(merge,op1,shift,32); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tu(vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tu(merge,op1,shift,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-1.c new file mode 100644 index 00000000000..a51f8b36547 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tum(mask,merge,op1,shift,vl); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tum(mask,merge,op1,shift,vl); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tum(mask,merge,op1,shift,vl); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tum(mask,merge,op1,shift,vl); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tum(mask,merge,op1,shift,vl); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tum(mask,merge,op1,shift,vl); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tum(mask,merge,op1,shift,vl); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tum(mask,merge,op1,shift,vl); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tum(mask,merge,op1,shift,vl); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tum(mask,merge,op1,shift,vl); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tum(mask,merge,op1,shift,vl); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tum(mask,merge,op1,shift,vl); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tum(mask,merge,op1,shift,vl); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tum(mask,merge,op1,shift,vl); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tum(mask,merge,op1,shift,vl); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tum(mask,merge,op1,shift,vl); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tum(mask,merge,op1,shift,vl); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tum(mask,merge,op1,shift,vl); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tum(mask,merge,op1,shift,vl); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tum(mask,merge,op1,shift,vl); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tum(mask,merge,op1,shift,vl); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tum(mask,merge,op1,shift,vl); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tum(mask,merge,op1,shift,vl); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tum(mask,merge,op1,shift,vl); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tum(mask,merge,op1,shift,vl); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tum(mask,merge,op1,shift,vl); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tum(mask,merge,op1,shift,vl); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tum(mask,merge,op1,shift,vl); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tum(mask,merge,op1,shift,vl); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tum(mask,merge,op1,shift,vl); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tum(mask,merge,op1,shift,vl); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tum(mask,merge,op1,shift,vl); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tum(mask,merge,op1,shift,vl); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tum(mask,merge,op1,shift,vl); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tum(mask,merge,op1,shift,vl); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tum(mask,merge,op1,shift,vl); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tum(mask,merge,op1,shift,vl); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tum(mask,merge,op1,shift,vl); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tum(mask,merge,op1,shift,vl); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tum(mask,merge,op1,shift,vl); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tum(mask,merge,op1,shift,vl); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tum(mask,merge,op1,shift,vl); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tum(mask,merge,op1,shift,vl); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tum(mask,merge,op1,shift,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-2.c new file mode 100644 index 00000000000..0465ab4330a --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tum(mask,merge,op1,shift,31); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tum(mask,merge,op1,shift,31); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tum(mask,merge,op1,shift,31); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tum(mask,merge,op1,shift,31); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tum(mask,merge,op1,shift,31); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tum(mask,merge,op1,shift,31); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tum(mask,merge,op1,shift,31); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tum(mask,merge,op1,shift,31); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tum(mask,merge,op1,shift,31); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tum(mask,merge,op1,shift,31); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tum(mask,merge,op1,shift,31); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tum(mask,merge,op1,shift,31); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tum(mask,merge,op1,shift,31); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tum(mask,merge,op1,shift,31); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tum(mask,merge,op1,shift,31); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tum(mask,merge,op1,shift,31); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tum(mask,merge,op1,shift,31); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tum(mask,merge,op1,shift,31); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tum(mask,merge,op1,shift,31); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tum(mask,merge,op1,shift,31); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tum(mask,merge,op1,shift,31); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tum(mask,merge,op1,shift,31); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tum(mask,merge,op1,shift,31); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tum(mask,merge,op1,shift,31); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tum(mask,merge,op1,shift,31); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tum(mask,merge,op1,shift,31); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tum(mask,merge,op1,shift,31); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tum(mask,merge,op1,shift,31); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tum(mask,merge,op1,shift,31); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tum(mask,merge,op1,shift,31); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tum(mask,merge,op1,shift,31); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tum(mask,merge,op1,shift,31); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tum(mask,merge,op1,shift,31); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tum(mask,merge,op1,shift,31); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tum(mask,merge,op1,shift,31); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tum(mask,merge,op1,shift,31); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tum(mask,merge,op1,shift,31); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tum(mask,merge,op1,shift,31); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tum(mask,merge,op1,shift,31); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tum(mask,merge,op1,shift,31); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tum(mask,merge,op1,shift,31); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tum(mask,merge,op1,shift,31); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tum(mask,merge,op1,shift,31); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tum(mask,merge,op1,shift,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-3.c new file mode 100644 index 00000000000..3481d4d441e --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tum-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tum(mask,merge,op1,shift,32); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tum(mask,merge,op1,shift,32); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tum(mask,merge,op1,shift,32); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tum(mask,merge,op1,shift,32); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tum(mask,merge,op1,shift,32); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tum(mask,merge,op1,shift,32); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tum(mask,merge,op1,shift,32); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tum(mask,merge,op1,shift,32); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tum(mask,merge,op1,shift,32); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tum(mask,merge,op1,shift,32); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tum(mask,merge,op1,shift,32); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tum(mask,merge,op1,shift,32); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tum(mask,merge,op1,shift,32); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tum(mask,merge,op1,shift,32); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tum(mask,merge,op1,shift,32); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tum(mask,merge,op1,shift,32); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tum(mask,merge,op1,shift,32); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tum(mask,merge,op1,shift,32); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tum(mask,merge,op1,shift,32); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tum(mask,merge,op1,shift,32); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tum(mask,merge,op1,shift,32); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tum(mask,merge,op1,shift,32); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tum(mask,merge,op1,shift,32); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tum(mask,merge,op1,shift,32); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tum(mask,merge,op1,shift,32); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tum(mask,merge,op1,shift,32); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tum(mask,merge,op1,shift,32); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tum(mask,merge,op1,shift,32); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tum(mask,merge,op1,shift,32); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tum(mask,merge,op1,shift,32); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tum(mask,merge,op1,shift,32); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tum(mask,merge,op1,shift,32); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tum(mask,merge,op1,shift,32); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tum(mask,merge,op1,shift,32); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tum(mask,merge,op1,shift,32); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tum(mask,merge,op1,shift,32); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tum(mask,merge,op1,shift,32); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tum(mask,merge,op1,shift,32); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tum(mask,merge,op1,shift,32); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tum(mask,merge,op1,shift,32); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tum(mask,merge,op1,shift,32); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tum(mask,merge,op1,shift,32); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tum(mask,merge,op1,shift,32); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tum(mask,merge,op1,shift,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-1.c new file mode 100644 index 00000000000..50aa03156a9 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-1.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tumu(mask,merge,op1,shift,vl); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tumu(mask,merge,op1,shift,vl); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tumu(mask,merge,op1,shift,vl); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tumu(mask,merge,op1,shift,vl); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tumu(mask,merge,op1,shift,vl); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tumu(mask,merge,op1,shift,vl); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tumu(mask,merge,op1,shift,vl); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tumu(mask,merge,op1,shift,vl); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tumu(mask,merge,op1,shift,vl); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tumu(mask,merge,op1,shift,vl); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tumu(mask,merge,op1,shift,vl); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tumu(mask,merge,op1,shift,vl); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tumu(mask,merge,op1,shift,vl); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tumu(mask,merge,op1,shift,vl); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tumu(mask,merge,op1,shift,vl); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tumu(mask,merge,op1,shift,vl); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tumu(mask,merge,op1,shift,vl); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tumu(mask,merge,op1,shift,vl); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tumu(mask,merge,op1,shift,vl); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tumu(mask,merge,op1,shift,vl); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tumu(mask,merge,op1,shift,vl); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tumu(mask,merge,op1,shift,vl); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tumu(mask,merge,op1,shift,vl); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tumu(mask,merge,op1,shift,vl); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tumu(mask,merge,op1,shift,vl); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tumu(mask,merge,op1,shift,vl); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tumu(mask,merge,op1,shift,vl); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tumu(mask,merge,op1,shift,vl); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tumu(mask,merge,op1,shift,vl); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tumu(mask,merge,op1,shift,vl); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tumu(mask,merge,op1,shift,vl); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tumu(mask,merge,op1,shift,vl); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tumu(mask,merge,op1,shift,vl); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tumu(mask,merge,op1,shift,vl); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tumu(mask,merge,op1,shift,vl); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tumu(mask,merge,op1,shift,vl); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tumu(mask,merge,op1,shift,vl); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tumu(mask,merge,op1,shift,vl); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tumu(mask,merge,op1,shift,vl); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tumu(mask,merge,op1,shift,vl); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tumu(mask,merge,op1,shift,vl); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tumu(mask,merge,op1,shift,vl); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tumu(mask,merge,op1,shift,vl); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tumu(mask,merge,op1,shift,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-2.c new file mode 100644 index 00000000000..0e7cb71ef41 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-2.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tumu(mask,merge,op1,shift,31); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tumu(mask,merge,op1,shift,31); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tumu(mask,merge,op1,shift,31); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tumu(mask,merge,op1,shift,31); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tumu(mask,merge,op1,shift,31); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tumu(mask,merge,op1,shift,31); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tumu(mask,merge,op1,shift,31); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tumu(mask,merge,op1,shift,31); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tumu(mask,merge,op1,shift,31); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tumu(mask,merge,op1,shift,31); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tumu(mask,merge,op1,shift,31); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tumu(mask,merge,op1,shift,31); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tumu(mask,merge,op1,shift,31); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tumu(mask,merge,op1,shift,31); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tumu(mask,merge,op1,shift,31); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tumu(mask,merge,op1,shift,31); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tumu(mask,merge,op1,shift,31); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tumu(mask,merge,op1,shift,31); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tumu(mask,merge,op1,shift,31); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tumu(mask,merge,op1,shift,31); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tumu(mask,merge,op1,shift,31); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tumu(mask,merge,op1,shift,31); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tumu(mask,merge,op1,shift,31); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tumu(mask,merge,op1,shift,31); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tumu(mask,merge,op1,shift,31); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tumu(mask,merge,op1,shift,31); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tumu(mask,merge,op1,shift,31); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tumu(mask,merge,op1,shift,31); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tumu(mask,merge,op1,shift,31); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tumu(mask,merge,op1,shift,31); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tumu(mask,merge,op1,shift,31); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tumu(mask,merge,op1,shift,31); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tumu(mask,merge,op1,shift,31); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tumu(mask,merge,op1,shift,31); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tumu(mask,merge,op1,shift,31); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tumu(mask,merge,op1,shift,31); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tumu(mask,merge,op1,shift,31); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tumu(mask,merge,op1,shift,31); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tumu(mask,merge,op1,shift,31); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tumu(mask,merge,op1,shift,31); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tumu(mask,merge,op1,shift,31); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tumu(mask,merge,op1,shift,31); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tumu(mask,merge,op1,shift,31); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tumu(mask,merge,op1,shift,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-3.c new file mode 100644 index 00000000000..0ccfb2e71a2 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/vsll_vx_tumu-3.c @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vsll_vx_i8mf8_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf8_tumu(mask,merge,op1,shift,32); +} + + +vint8mf4_t test___riscv_vsll_vx_i8mf4_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf4_tumu(mask,merge,op1,shift,32); +} + + +vint8mf2_t test___riscv_vsll_vx_i8mf2_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8mf2_tumu(mask,merge,op1,shift,32); +} + + +vint8m1_t test___riscv_vsll_vx_i8m1_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m1_tumu(mask,merge,op1,shift,32); +} + + +vint8m2_t test___riscv_vsll_vx_i8m2_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m2_tumu(mask,merge,op1,shift,32); +} + + +vint8m4_t test___riscv_vsll_vx_i8m4_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m4_tumu(mask,merge,op1,shift,32); +} + + +vint8m8_t test___riscv_vsll_vx_i8m8_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i8m8_tumu(mask,merge,op1,shift,32); +} + + +vint16mf4_t test___riscv_vsll_vx_i16mf4_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf4_tumu(mask,merge,op1,shift,32); +} + + +vint16mf2_t test___riscv_vsll_vx_i16mf2_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16mf2_tumu(mask,merge,op1,shift,32); +} + + +vint16m1_t test___riscv_vsll_vx_i16m1_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m1_tumu(mask,merge,op1,shift,32); +} + + +vint16m2_t test___riscv_vsll_vx_i16m2_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m2_tumu(mask,merge,op1,shift,32); +} + + +vint16m4_t test___riscv_vsll_vx_i16m4_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m4_tumu(mask,merge,op1,shift,32); +} + + +vint16m8_t test___riscv_vsll_vx_i16m8_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i16m8_tumu(mask,merge,op1,shift,32); +} + + +vint32mf2_t test___riscv_vsll_vx_i32mf2_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32mf2_tumu(mask,merge,op1,shift,32); +} + + +vint32m1_t test___riscv_vsll_vx_i32m1_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m1_tumu(mask,merge,op1,shift,32); +} + + +vint32m2_t test___riscv_vsll_vx_i32m2_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m2_tumu(mask,merge,op1,shift,32); +} + + +vint32m4_t test___riscv_vsll_vx_i32m4_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m4_tumu(mask,merge,op1,shift,32); +} + + +vint32m8_t test___riscv_vsll_vx_i32m8_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i32m8_tumu(mask,merge,op1,shift,32); +} + + +vint64m1_t test___riscv_vsll_vx_i64m1_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m1_tumu(mask,merge,op1,shift,32); +} + + +vint64m2_t test___riscv_vsll_vx_i64m2_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m2_tumu(mask,merge,op1,shift,32); +} + + +vint64m4_t test___riscv_vsll_vx_i64m4_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m4_tumu(mask,merge,op1,shift,32); +} + + +vint64m8_t test___riscv_vsll_vx_i64m8_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_i64m8_tumu(mask,merge,op1,shift,32); +} + + +vuint8mf8_t test___riscv_vsll_vx_u8mf8_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf8_tumu(mask,merge,op1,shift,32); +} + + +vuint8mf4_t test___riscv_vsll_vx_u8mf4_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf4_tumu(mask,merge,op1,shift,32); +} + + +vuint8mf2_t test___riscv_vsll_vx_u8mf2_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8mf2_tumu(mask,merge,op1,shift,32); +} + + +vuint8m1_t test___riscv_vsll_vx_u8m1_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m1_tumu(mask,merge,op1,shift,32); +} + + +vuint8m2_t test___riscv_vsll_vx_u8m2_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m2_tumu(mask,merge,op1,shift,32); +} + + +vuint8m4_t test___riscv_vsll_vx_u8m4_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m4_tumu(mask,merge,op1,shift,32); +} + + +vuint8m8_t test___riscv_vsll_vx_u8m8_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u8m8_tumu(mask,merge,op1,shift,32); +} + + +vuint16mf4_t test___riscv_vsll_vx_u16mf4_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf4_tumu(mask,merge,op1,shift,32); +} + + +vuint16mf2_t test___riscv_vsll_vx_u16mf2_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16mf2_tumu(mask,merge,op1,shift,32); +} + + +vuint16m1_t test___riscv_vsll_vx_u16m1_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m1_tumu(mask,merge,op1,shift,32); +} + + +vuint16m2_t test___riscv_vsll_vx_u16m2_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m2_tumu(mask,merge,op1,shift,32); +} + + +vuint16m4_t test___riscv_vsll_vx_u16m4_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m4_tumu(mask,merge,op1,shift,32); +} + + +vuint16m8_t test___riscv_vsll_vx_u16m8_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u16m8_tumu(mask,merge,op1,shift,32); +} + + +vuint32mf2_t test___riscv_vsll_vx_u32mf2_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32mf2_tumu(mask,merge,op1,shift,32); +} + + +vuint32m1_t test___riscv_vsll_vx_u32m1_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m1_tumu(mask,merge,op1,shift,32); +} + + +vuint32m2_t test___riscv_vsll_vx_u32m2_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m2_tumu(mask,merge,op1,shift,32); +} + + +vuint32m4_t test___riscv_vsll_vx_u32m4_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m4_tumu(mask,merge,op1,shift,32); +} + + +vuint32m8_t test___riscv_vsll_vx_u32m8_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u32m8_tumu(mask,merge,op1,shift,32); +} + + +vuint64m1_t test___riscv_vsll_vx_u64m1_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m1_tumu(mask,merge,op1,shift,32); +} + + +vuint64m2_t test___riscv_vsll_vx_u64m2_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m2_tumu(mask,merge,op1,shift,32); +} + + +vuint64m4_t test___riscv_vsll_vx_u64m4_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m4_tumu(mask,merge,op1,shift,32); +} + + +vuint64m8_t test___riscv_vsll_vx_u64m8_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,size_t shift,size_t vl) +{ + return __riscv_vsll_vx_u64m8_tumu(mask,merge,op1,shift,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vsll\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t} 2 } } */