From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 2093) id 0C96F385B51B; Wed, 15 Feb 2023 13:45:16 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 0C96F385B51B DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gcc.gnu.org; s=default; t=1676468716; bh=3Wv+k3B11rvldeCGV2MDmAvI/nOZGMbx2lzBEzYJDgk=; h=From:To:Subject:Date:From; b=Am1isxL3gqi3qB4hQ5NYUKIG2Uw57Y8qhAVdl54hpBHiuOG0oG5yf1/dkjR3mlqWc JFN1x84J3zDmw/GEM+VF468WHNDT+fRGFr3jQ8ZZlwu3pg9U3Da8LLIA/TieT9hiX2 xx0bTI6udIKvRso9l5Yg4N/dp0UEz6rOlvj8K9S8= MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Type: text/plain; charset="utf-8" From: Kito Cheng To: gcc-cvs@gcc.gnu.org Subject: [gcc r13-6046] RISC-V: Add ternary constraint tests X-Act-Checkin: gcc X-Git-Author: Ju-Zhe Zhong X-Git-Refname: refs/heads/master X-Git-Oldrev: ddd7c2e948923da7ffbed10b85f9bee2eeb42492 X-Git-Newrev: 5cf9afc5965e36acf430d0fef0bd15fe8f3454f9 Message-Id: <20230215134516.0C96F385B51B@sourceware.org> Date: Wed, 15 Feb 2023 13:45:16 +0000 (GMT) List-Id: https://gcc.gnu.org/g:5cf9afc5965e36acf430d0fef0bd15fe8f3454f9 commit r13-6046-g5cf9afc5965e36acf430d0fef0bd15fe8f3454f9 Author: Ju-Zhe Zhong Date: Tue Feb 14 22:08:13 2023 +0800 RISC-V: Add ternary constraint tests gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/base/ternop_vv_constraint-1.c: New test. * gcc.target/riscv/rvv/base/ternop_vv_constraint-2.c: New test. * gcc.target/riscv/rvv/base/ternop_vx_constraint-1.c: New test. * gcc.target/riscv/rvv/base/ternop_vx_constraint-2.c: New test. * gcc.target/riscv/rvv/base/ternop_vx_constraint-3.c: New test. * gcc.target/riscv/rvv/base/ternop_vx_constraint-4.c: New test. * gcc.target/riscv/rvv/base/ternop_vx_constraint-5.c: New test. * gcc.target/riscv/rvv/base/ternop_vx_constraint-6.c: New test. * gcc.target/riscv/rvv/base/ternop_vx_constraint-7.c: New test. Diff: --- .../riscv/rvv/base/ternop_vv_constraint-1.c | 83 +++++++++++++ .../riscv/rvv/base/ternop_vv_constraint-2.c | 83 +++++++++++++ .../riscv/rvv/base/ternop_vx_constraint-1.c | 71 +++++++++++ .../riscv/rvv/base/ternop_vx_constraint-2.c | 38 ++++++ .../riscv/rvv/base/ternop_vx_constraint-3.c | 125 ++++++++++++++++++++ .../riscv/rvv/base/ternop_vx_constraint-4.c | 123 +++++++++++++++++++ .../riscv/rvv/base/ternop_vx_constraint-5.c | 123 +++++++++++++++++++ .../riscv/rvv/base/ternop_vx_constraint-6.c | 130 +++++++++++++++++++++ .../riscv/rvv/base/ternop_vx_constraint-7.c | 130 +++++++++++++++++++++ 9 files changed, 906 insertions(+) diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vv_constraint-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vv_constraint-1.c new file mode 100644 index 00000000000..838776e5c50 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vv_constraint-1.c @@ -0,0 +1,83 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ +#include "riscv_vector.h" + +/* +** f1: +** vsetivli\tzero,4,e32,m1,ta,ma +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vse32\.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f1 (void * in, void * in2, void *out) +{ + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4); + vint32m1_t v3 = __riscv_vmacc_vv_i32m1 (v, v2, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vv_i32m1(v3, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1 (v4, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1 (v4, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1 (v4, v2, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* +** f2: +** vsetivli\tzero,4,e32,m1,tu,ma +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vse32\.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f2 (void * in, void * in2, void *out) +{ + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4); + vint32m1_t v3 = __riscv_vmacc_vv_i32m1_tu (v, v2, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vv_i32m1_tu(v3, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1_tu (v4, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1_tu (v4, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1_tu (v4, v2, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* +** f3: +** vsetivli\tzero,4,e32,m1,ta,ma +** vlm\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vse32\.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f3 (void * in, void * in2, void * in3, void *out) +{ + vbool32_t m = __riscv_vlm_v_b32 (in3, 4); + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4); + vint32m1_t v3 = __riscv_vmacc_vv_i32m1_m (m, v, v2, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vv_i32m1_m(m, v3, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1_m (m, v4, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1_m (m, v4, v2, v2, 4); + v4 = __riscv_vmacc_vv_i32m1_m (m, v4, v2, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* { dg-final { scan-assembler-not {vmv} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vv_constraint-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vv_constraint-2.c new file mode 100644 index 00000000000..54506c1c918 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vv_constraint-2.c @@ -0,0 +1,83 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ +#include "riscv_vector.h" + +/* +** f1: +** vsetivli\tzero,4,e32,m1,ta,ma +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vse32\.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f1 (void * in, void * in2, void *out) +{ + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4); + vint32m1_t v3 = __riscv_vmadd_vv_i32m1 (v, v2, v2, 4); + vint32m1_t v4 = __riscv_vmadd_vv_i32m1(v3, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1 (v4, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1 (v4, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1 (v4, v2, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* +** f2: +** vsetivli\tzero,4,e32,m1,tu,ma +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vse32\.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f2 (void * in, void * in2, void *out) +{ + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4); + vint32m1_t v3 = __riscv_vmadd_vv_i32m1_tu (v, v2, v2, 4); + vint32m1_t v4 = __riscv_vmadd_vv_i32m1_tu(v3, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1_tu (v4, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1_tu (v4, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1_tu (v4, v2, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* +** f3: +** vsetivli\tzero,4,e32,m1,ta,ma +** vlm\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vse32\.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f3 (void * in, void * in2, void * in3, void *out) +{ + vbool32_t m = __riscv_vlm_v_b32 (in3, 4); + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4); + vint32m1_t v3 = __riscv_vmadd_vv_i32m1_m (m, v, v2, v2, 4); + vint32m1_t v4 = __riscv_vmadd_vv_i32m1_m(m, v3, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1_m (m, v4, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1_m (m, v4, v2, v2, 4); + v4 = __riscv_vmadd_vv_i32m1_m (m, v4, v2, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* { dg-final { scan-assembler-not {vmv} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-1.c new file mode 100644 index 00000000000..90e120655d7 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-1.c @@ -0,0 +1,71 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ +#include "riscv_vector.h" + +/* +** f1: +** vsetivli\tzero,4,e32,m1,tu,ma +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vle32\.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vse32\.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f1 (void * in, void * in2, void *out, int32_t x) +{ + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4); + vint32m1_t v3 = __riscv_vmacc_vx_i32m1 (v, x, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vx_i32m1_tu (v3, x, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* +** f2: +** vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma +** vlm.v\tv[0-9]+,0\([a-x0-9]+\) +** vsetivli\tzero,4,e32,m1,tu,ma +** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t +** vle32.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vse32.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f2 (void * in, void * in2, void *out, int32_t x) +{ + vbool32_t mask = *(vbool32_t*)in; + asm volatile ("":::"memory"); + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in2, 4); + vint32m1_t v3 = __riscv_vmacc_vx_i32m1 (v, x, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vx_i32m1_tu (v3, x, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* +** f3: +** vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma +** vlm.v\tv[0-9]+,0\([a-x0-9]+\) +** vsetivli\tzero,4,e32,m1,tu,mu +** vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t +** vle32.v\tv[0-9]+,0\([a-x0-9]+\) +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vse32.v\tv[0-9]+,0\([a-x0-9]+\) +** ret +*/ +void f3 (void * in, void * in2, void *out, int32_t x) +{ + vbool32_t mask = *(vbool32_t*)in; + asm volatile ("":::"memory"); + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in2, 4); + vint32m1_t v3 = __riscv_vmacc_vx_i32m1 (v, x, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vx_i32m1_tumu (mask, v3, x, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* { dg-final { scan-assembler-not {vmv} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-2.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-2.c new file mode 100644 index 00000000000..82815f19279 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-2.c @@ -0,0 +1,38 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */ +#include "riscv_vector.h" + +void f1 (void * in, void * in2, void *out, int32_t x) +{ + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4); + vint32m1_t v3 = __riscv_vmacc_vx_i32m1 (v, x, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vx_i32m1_tu (v3, x, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +void f2 (void * in, void * in2, void *out, int32_t x) +{ + vbool32_t mask = *(vbool32_t*)in; + asm volatile ("":::"memory"); + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in2, 4); + vint32m1_t v3 = __riscv_vmacc_vx_i32m1 (v, x, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vx_i32m1_tu (v3, x, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +void f3 (void * in, void * in2, void *out, int32_t x) +{ + vbool32_t mask = *(vbool32_t*)in; + asm volatile ("":::"memory"); + vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4); + vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in2, 4); + vint32m1_t v3 = __riscv_vmacc_vx_i32m1 (v, x, v2, 4); + vint32m1_t v4 = __riscv_vmacc_vx_i32m1_tumu (mask, v3, x, v2, 4); + __riscv_vse32_v_i32m1 (out, v4, 4); +} + +/* { dg-final { scan-assembler-times {vma[c-d][c-d]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+\s+} 5 } } */ +/* { dg-final { scan-assembler-times {vma[c-d][c-d]\.vx\s+v[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,\s*v0.t} 1 } } */ +/* { dg-final { scan-assembler-not {vmv} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-3.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-3.c new file mode 100644 index 00000000000..8ffba43ab5f --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-3.c @@ -0,0 +1,125 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ +#include "riscv_vector.h" + +/* +** f0: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f0 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, -16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, -16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f1: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f1 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 15, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 15, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f2: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f2 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f3: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f3 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 0xAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 0xAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f4: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f4 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 0xAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 0xAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f5: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f5 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f6: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f6 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, x, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, x, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* { dg-final { scan-assembler-not {vmv} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-4.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-4.c new file mode 100644 index 00000000000..f07ad68cd74 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-4.c @@ -0,0 +1,123 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ +#include "riscv_vector.h" + +/* +** f0: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f0 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, -16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, -16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f1: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f1 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 15, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 15, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f2: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f2 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f3: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f3 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 0xAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 0xAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f4: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f4 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 0xAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 0xAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f5: +** ... +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** ... +*/ +void f5 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, 0xAAAAAAAAAAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, 0xAAAAAAAAAAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f6: +** ... +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** ... +*/ +void f6 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1 (v2, x, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1 (v3, x, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* { dg-final { scan-assembler-not {vmv} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-5.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-5.c new file mode 100644 index 00000000000..c554036d8e1 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-5.c @@ -0,0 +1,123 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ +#include "riscv_vector.h" + +/* +** f0: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f0 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tu (v2, -16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tu (v3, -16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f1: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f1 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tu (v2, 15, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tu (v3, 15, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f2: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f2 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tu (v2, 16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tu (v3, 16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f3: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f3 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tu (v2, 0xAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tu (v3, 0xAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f4: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+ +** ... +** ret +*/ +void f4 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tu (v2, 0xAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tu (v3, 0xAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f5: +** ... +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** ... +*/ +void f5 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tu (v2, 0xAAAAAAAAAAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tu (v3, 0xAAAAAAAAAAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f6: +** ... +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ +** ... +*/ +void f6 (void * in, void *out, int64_t x, int n) +{ + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tu (v2, x, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tu (v3, x, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* { dg-final { scan-assembler-not {vmv} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-6.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-6.c new file mode 100644 index 00000000000..cb593bcf97f --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-6.c @@ -0,0 +1,130 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ +#include "riscv_vector.h" + +/* +** f0: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f0 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_m (mask, v2, -16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_m (mask, v3, -16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f1: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f1 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_m (mask,v2, 15, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_m (mask,v3, 15, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f2: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f2 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_m (mask,v2, 16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_m (mask,v3, 16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f3: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f3 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_m (mask,v2, 0xAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_m (mask,v3, 0xAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f4: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f4 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_m (mask,v2, 0xAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_m (mask,v3, 0xAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f5: +** ... +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** ... +*/ +void f5 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_m (mask,v2, 0xAAAAAAAAAAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_m (mask,v3, 0xAAAAAAAAAAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f6: +** ... +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** ... +*/ +void f6 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_m (mask,v2, x, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_m (mask,v3, x, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* { dg-final { scan-assembler-not {vmv} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-7.c new file mode 100644 index 00000000000..e87f6ec3362 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/ternop_vx_constraint-7.c @@ -0,0 +1,130 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" } */ +/* { dg-final { check-function-bodies "**" "" } } */ +#include "riscv_vector.h" + +/* +** f0: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f0 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tumu (mask, v2, -16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tumu (mask, v3, -16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f1: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f1 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tumu (mask,v2, 15, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tumu (mask,v3, 15, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f2: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f2 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tumu (mask,v2, 16, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tumu (mask,v3, 16, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f3: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f3 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tumu (mask,v2, 0xAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tumu (mask,v3, 0xAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f4: +** ... +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vx\tv[0-9]+,\s*[a-x0-9]+,\s*v[0-9]+,v0.t +** ... +** ret +*/ +void f4 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tumu (mask,v2, 0xAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tumu (mask,v3, 0xAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f5: +** ... +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** ... +*/ +void f5 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tumu (mask,v2, 0xAAAAAAAAAAAAAAAA, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tumu (mask,v3, 0xAAAAAAAAAAAAAAAA, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* +** f6: +** ... +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** vma[c-d][c-d]\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+,v0.t +** ... +*/ +void f6 (void * in, void *out, int64_t x, int n) +{ + vbool64_t mask = __riscv_vlm_v_b64 (in + 100, 4); + vint64m1_t v = __riscv_vle64_v_i64m1 (in + 1, 4); + vint64m1_t v2 = __riscv_vle64_v_i64m1_tu (v, in + 2, 4); + vint64m1_t v3 = __riscv_vmacc_vx_i64m1_tumu (mask,v2, x, v2, 4); + vint64m1_t v4 = __riscv_vmacc_vx_i64m1_tumu (mask,v3, x, v3, 4); + __riscv_vse64_v_i64m1 (out + 2, v4, 4); +} + +/* { dg-final { scan-assembler-not {vmv} } } */