* [PATCH 2/2] SVE intrinsics: Fold constant operands for svmul
@ 2024-08-19 7:04 Jennifer Schmitz
2024-08-19 12:59 ` Richard Sandiford
0 siblings, 1 reply; 2+ messages in thread
From: Jennifer Schmitz @ 2024-08-19 7:04 UTC (permalink / raw)
To: gcc-patches; +Cc: richard.sandiford, Kyrylo Tkachov
[-- Attachment #1.1: Type: text/plain, Size: 771 bytes --]
This patch implements constant folding for svmul. It uses the
gimple_folder::const_fold function to fold constant integer operands.
Additionally, if at least one of the operands is a zero vector, svmul is
folded to a zero vector (in case of ptrue, _x, or _z).
Tests were added to check the produced assembly for different
predicates and signed and unsigned integers.
The patch was bootstrapped and regtested on aarch64-linux-gnu, no regression.
OK for mainline?
Signed-off-by: Jennifer Schmitz <jschmitz@nvidia.com>
gcc/
* config/aarch64/aarch64-sve-builtins-base.cc
(svmul_impl::fold): Implement function and add constant folding.
gcc/testsuite/
* gcc.target/aarch64/sve/const_fold_mul_1.c: New test.
* gcc.target/aarch64/sve/const_fold_mul_zero.c: Likewise.
[-- Attachment #1.2: 0002-SVE-intrinsics-Fold-constant-operands-for-svmul.patch --]
[-- Type: application/octet-stream, Size: 9607 bytes --]
From 42b98071845072bde7411d5a8be792513f601193 Mon Sep 17 00:00:00 2001
From: Jennifer Schmitz <jschmitz@nvidia.com>
Date: Thu, 15 Aug 2024 06:21:53 -0700
Subject: [PATCH 2/2] SVE intrinsics: Fold constant operands for svmul
This patch implements constant folding for svmul. It uses the
gimple_folder::const_fold function to fold constant integer operands.
Additionally, if at least one of the operands is a zero vector, svmul is
folded to a zero vector (in case of ptrue, _x, or _z).
Tests were added to check the produced assembly for different
predicates and signed and unsigned integers.
The patch was bootstrapped and regtested on aarch64-linux-gnu, no regression.
OK for mainline?
Signed-off-by: Jennifer Schmitz <jschmitz@nvidia.com>
gcc/
* config/aarch64/aarch64-sve-builtins-base.cc
(svmul_impl::fold): Implement function and add constant folding.
gcc/testsuite/
* gcc.target/aarch64/sve/const_fold_mul_1.c: New test.
* gcc.target/aarch64/sve/const_fold_mul_zero.c: Likewise.
---
.../aarch64/aarch64-sve-builtins-base.cc | 36 ++++-
.../gcc.target/aarch64/sve/const_fold_mul_1.c | 128 ++++++++++++++++++
.../aarch64/sve/const_fold_mul_zero.c | 109 +++++++++++++--
3 files changed, 262 insertions(+), 11 deletions(-)
create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_1.c
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index 7f948ecc0c7..ef0e11fe327 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -2019,6 +2019,40 @@ public:
}
};
+class svmul_impl : public rtx_code_function
+{
+public:
+ CONSTEXPR svmul_impl ()
+ : rtx_code_function (MULT, MULT, UNSPEC_COND_FMUL) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ tree pg = gimple_call_arg (f.call, 0);
+ tree op1 = gimple_call_arg (f.call, 1);
+ tree op2 = gimple_call_arg (f.call, 2);
+
+ /* For integers, if one of the operands is a zero vector,
+ fold to zero vector. */
+ int step = f.type_suffix (0).element_bytes;
+ if (f.pred != PRED_m || is_ptrue (pg, step))
+ {
+ if (vector_cst_all_same (op1, step)
+ && integer_zerop (VECTOR_CST_ENCODED_ELT (op1, 0)))
+ return gimple_build_assign (f.lhs, op1);
+ if (vector_cst_all_same (op2, step)
+ && integer_zerop (VECTOR_CST_ENCODED_ELT (op2, 0)))
+ return gimple_build_assign (f.lhs, op2);
+ }
+
+ /* Try to fold constant operands. */
+ if (gimple *new_stmt = f.const_fold (MULT_EXPR))
+ return new_stmt;
+
+ return NULL;
+ }
+};
+
class svnand_impl : public function_base
{
public:
@@ -3203,7 +3237,7 @@ FUNCTION (svmls_lane, svmls_lane_impl,)
FUNCTION (svmmla, svmmla_impl,)
FUNCTION (svmov, svmov_impl,)
FUNCTION (svmsb, svmsb_impl,)
-FUNCTION (svmul, rtx_code_function, (MULT, MULT, UNSPEC_COND_FMUL))
+FUNCTION (svmul, svmul_impl,)
FUNCTION (svmul_lane, CODE_FOR_MODE0 (aarch64_mul_lane),)
FUNCTION (svmulh, unspec_based_function, (UNSPEC_SMUL_HIGHPART,
UNSPEC_UMUL_HIGHPART, -1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_1.c b/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_1.c
new file mode 100644
index 00000000000..95273e2e57d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_1.c
@@ -0,0 +1,128 @@
+/* { dg-final { check-function-bodies "**" "" } } */
+/* { dg-options "-O2" } */
+
+#include "arm_sve.h"
+
+/*
+** s64_x_pg:
+** mov z[0-9]+\.d, #15
+** ret
+*/
+svint64_t s64_x_pg (svbool_t pg)
+{
+ return svmul_x (pg, svdup_s64 (5), svdup_s64 (3));
+}
+
+/*
+** s64_z_pg:
+** mov z[0-9]+\.d, p[0-7]/z, #15
+** ret
+*/
+svint64_t s64_z_pg (svbool_t pg)
+{
+ return svmul_z (pg, svdup_s64 (5), svdup_s64 (3));
+}
+
+/*
+** s64_m_pg:
+** mov (z[0-9]+\.d), #3
+** mov (z[0-9]+\.d), #5
+** mul \2, p[0-7]/m, \2, \1
+** ret
+*/
+svint64_t s64_m_pg (svbool_t pg)
+{
+ return svmul_m (pg, svdup_s64 (5), svdup_s64 (3));
+}
+
+/*
+** s64_x_ptrue:
+** mov z[0-9]+\.d, #15
+** ret
+*/
+svint64_t s64_x_ptrue ()
+{
+ return svmul_x (svptrue_b64 (), svdup_s64 (5), svdup_s64 (3));
+}
+
+/*
+** s64_z_ptrue:
+** mov z[0-9]+\.d, #15
+** ret
+*/
+svint64_t s64_z_ptrue ()
+{
+ return svmul_z (svptrue_b64 (), svdup_s64 (5), svdup_s64 (3));
+}
+
+/*
+** s64_m_ptrue:
+** mov z[0-9]+\.d, #15
+** ret
+*/
+svint64_t s64_m_ptrue ()
+{
+ return svmul_m (svptrue_b64 (), svdup_s64 (5), svdup_s64 (3));
+}
+
+/*
+** u64_x_pg:
+** mov z[0-9]+\.d, #15
+** ret
+*/
+svuint64_t u64_x_pg (svbool_t pg)
+{
+ return svmul_x (pg, svdup_u64 (5), svdup_u64 (3));
+}
+
+/*
+** u64_z_pg:
+** mov z[0-9]+\.d, p[0-7]/z, #15
+** ret
+*/
+svuint64_t u64_z_pg (svbool_t pg)
+{
+ return svmul_z (pg, svdup_u64 (5), svdup_u64 (3));
+}
+
+/*
+** u64_m_pg:
+** mov (z[0-9]+\.d), #3
+** mov (z[0-9]+\.d), #5
+** mul \2, p[0-7]/m, \2, \1
+** ret
+*/
+svuint64_t u64_m_pg (svbool_t pg)
+{
+ return svmul_m (pg, svdup_u64 (5), svdup_u64 (3));
+}
+
+/*
+** u64_x_ptrue:
+** mov z[0-9]+\.d, #15
+** ret
+*/
+svuint64_t u64_x_ptrue ()
+{
+ return svmul_x (svptrue_b64 (), svdup_u64 (5), svdup_u64 (3));
+}
+
+/*
+** u64_z_ptrue:
+** mov z[0-9]+\.d, #15
+** ret
+*/
+svuint64_t u64_z_ptrue ()
+{
+ return svmul_z (svptrue_b64 (), svdup_u64 (5), svdup_u64 (3));
+}
+
+/*
+** u64_m_ptrue:
+** mov z[0-9]+\.d, #15
+** ret
+*/
+svuint64_t u64_m_ptrue ()
+{
+ return svmul_m (svptrue_b64 (), svdup_u64 (5), svdup_u64 (3));
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_zero.c b/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_zero.c
index 793291449c1..c6295bbc640 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_zero.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_zero.c
@@ -20,7 +20,7 @@ svint64_t s64_x_pg_op1 (svbool_t pg, svint64_t op2)
*/
svint64_t s64_z_pg_op1 (svbool_t pg, svint64_t op2)
{
- return svdiv_z (pg, svdup_s64 (0), op2);
+ return svmul_z (pg, svdup_s64 (0), op2);
}
/*
@@ -30,7 +30,7 @@ svint64_t s64_z_pg_op1 (svbool_t pg, svint64_t op2)
*/
svint64_t s64_m_pg_op1 (svbool_t pg, svint64_t op2)
{
- return svdiv_m (pg, svdup_s64 (0), op2);
+ return svmul_m (pg, svdup_s64 (0), op2);
}
/*
@@ -40,7 +40,7 @@ svint64_t s64_m_pg_op1 (svbool_t pg, svint64_t op2)
*/
svint64_t s64_x_pg_op2 (svbool_t pg, svint64_t op1)
{
- return svdiv_x (pg, op1, svdup_s64 (0));
+ return svmul_x (pg, op1, svdup_s64 (0));
}
/*
@@ -50,18 +50,17 @@ svint64_t s64_x_pg_op2 (svbool_t pg, svint64_t op1)
*/
svint64_t s64_z_pg_op2 (svbool_t pg, svint64_t op1)
{
- return svdiv_z (pg, op1, svdup_s64 (0));
+ return svmul_z (pg, op1, svdup_s64 (0));
}
/*
** s64_m_pg_op2:
-** mov (z[0-9]+)\.b, #0
-** mul (z[0-9]+\.d), p[0-7]+/m, \2, \1\.d
+** mov z[0-9]+\.d, p[0-7]/m, #0
** ret
*/
svint64_t s64_m_pg_op2 (svbool_t pg, svint64_t op1)
{
- return svdiv_m (pg, op1, svdup_s64 (0));
+ return svmul_m (pg, op1, svdup_s64 (0));
}
/*
@@ -71,7 +70,7 @@ svint64_t s64_m_pg_op2 (svbool_t pg, svint64_t op1)
*/
svint64_t s64_m_ptrue_op1 (svint64_t op2)
{
- return svdiv_m (svptrue_b64 (), svdup_s64 (0), op2);
+ return svmul_m (svptrue_b64 (), svdup_s64 (0), op2);
}
/*
@@ -81,7 +80,7 @@ svint64_t s64_m_ptrue_op1 (svint64_t op2)
*/
svint64_t s64_m_ptrue_op2 (svint64_t op1)
{
- return svdiv_m (svptrue_b64 (), op1, svdup_s64 (0));
+ return svmul_m (svptrue_b64 (), op1, svdup_s64 (0));
}
/*
@@ -91,5 +90,95 @@ svint64_t s64_m_ptrue_op2 (svint64_t op1)
*/
svint64_t s64_m_ptrue_op1_op2 ()
{
- return svdiv_m (svptrue_b64 (), svdup_s64 (0), svdup_s64 (0));
+ return svmul_m (svptrue_b64 (), svdup_s64 (0), svdup_s64 (0));
+}
+
+/*
+** u64_x_pg_op1:
+** mov z[0-9]+\.b, #0
+** ret
+*/
+svuint64_t u64_x_pg_op1 (svbool_t pg, svuint64_t op2)
+{
+ return svmul_x (pg, svdup_u64 (0), op2);
+}
+
+/*
+** u64_z_pg_op1:
+** mov z[0-9]+\.b, #0
+** ret
+*/
+svuint64_t u64_z_pg_op1 (svbool_t pg, svuint64_t op2)
+{
+ return svmul_z (pg, svdup_u64 (0), op2);
+}
+
+/*
+** u64_m_pg_op1:
+** mov z[0-9]+\.d, p[0-7]/z, #0
+** ret
+*/
+svuint64_t u64_m_pg_op1 (svbool_t pg, svuint64_t op2)
+{
+ return svmul_m (pg, svdup_u64 (0), op2);
+}
+
+/*
+** u64_x_pg_op2:
+** mov z[0-9]+\.b, #0
+** ret
+*/
+svuint64_t u64_x_pg_op2 (svbool_t pg, svuint64_t op1)
+{
+ return svmul_x (pg, op1, svdup_u64 (0));
+}
+
+/*
+** u64_z_pg_op2:
+** mov z[0-9]+\.b, #0
+** ret
+*/
+svuint64_t u64_z_pg_op2 (svbool_t pg, svuint64_t op1)
+{
+ return svmul_z (pg, op1, svdup_u64 (0));
+}
+
+/*
+** u64_m_pg_op2:
+** mov z[0-9]+\.d, p[0-7]/m, #0
+** ret
+*/
+svuint64_t u64_m_pg_op2 (svbool_t pg, svuint64_t op1)
+{
+ return svmul_m (pg, op1, svdup_u64 (0));
+}
+
+/*
+** u64_m_ptrue_op1:
+** mov z[0-9]+\.b, #0
+** ret
+*/
+svuint64_t u64_m_ptrue_op1 (svuint64_t op2)
+{
+ return svmul_m (svptrue_b64 (), svdup_u64 (0), op2);
+}
+
+/*
+** u64_m_ptrue_op2:
+** mov z[0-9]+\.b, #0
+** ret
+*/
+svuint64_t u64_m_ptrue_op2 (svuint64_t op1)
+{
+ return svmul_m (svptrue_b64 (), op1, svdup_u64 (0));
+}
+
+/*
+** u64_m_ptrue_op1_op2:
+** mov z[0-9]+\.b, #0
+** ret
+*/
+svuint64_t u64_m_ptrue_op1_op2 ()
+{
+ return svmul_m (svptrue_b64 (), svdup_u64 (0), svdup_u64 (0));
}
--
2.44.0
[-- Attachment #2: smime.p7s --]
[-- Type: application/pkcs7-signature, Size: 4641 bytes --]
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH 2/2] SVE intrinsics: Fold constant operands for svmul
2024-08-19 7:04 [PATCH 2/2] SVE intrinsics: Fold constant operands for svmul Jennifer Schmitz
@ 2024-08-19 12:59 ` Richard Sandiford
0 siblings, 0 replies; 2+ messages in thread
From: Richard Sandiford @ 2024-08-19 12:59 UTC (permalink / raw)
To: Jennifer Schmitz; +Cc: gcc-patches, Kyrylo Tkachov
Jennifer Schmitz <jschmitz@nvidia.com> writes:
> This patch implements constant folding for svmul. It uses the
> gimple_folder::const_fold function to fold constant integer operands.
> Additionally, if at least one of the operands is a zero vector, svmul is
> folded to a zero vector (in case of ptrue, _x, or _z).
> Tests were added to check the produced assembly for different
> predicates and signed and unsigned integers.
>
> The patch was bootstrapped and regtested on aarch64-linux-gnu, no regression.
> OK for mainline?
>
> Signed-off-by: Jennifer Schmitz <jschmitz@nvidia.com>
>
> gcc/
>
> * config/aarch64/aarch64-sve-builtins-base.cc
> (svmul_impl::fold): Implement function and add constant folding.
>
> gcc/testsuite/
>
> * gcc.target/aarch64/sve/const_fold_mul_1.c: New test.
> * gcc.target/aarch64/sve/const_fold_mul_zero.c: Likewise.
>
> From 42b98071845072bde7411d5a8be792513f601193 Mon Sep 17 00:00:00 2001
> From: Jennifer Schmitz <jschmitz@nvidia.com>
> Date: Thu, 15 Aug 2024 06:21:53 -0700
> Subject: [PATCH 2/2] SVE intrinsics: Fold constant operands for svmul
>
> This patch implements constant folding for svmul. It uses the
> gimple_folder::const_fold function to fold constant integer operands.
> Additionally, if at least one of the operands is a zero vector, svmul is
> folded to a zero vector (in case of ptrue, _x, or _z).
> Tests were added to check the produced assembly for different
> predicates and signed and unsigned integers.
>
> The patch was bootstrapped and regtested on aarch64-linux-gnu, no regression.
> OK for mainline?
>
> Signed-off-by: Jennifer Schmitz <jschmitz@nvidia.com>
>
> gcc/
>
> * config/aarch64/aarch64-sve-builtins-base.cc
> (svmul_impl::fold): Implement function and add constant folding.
>
> gcc/testsuite/
>
> * gcc.target/aarch64/sve/const_fold_mul_1.c: New test.
> * gcc.target/aarch64/sve/const_fold_mul_zero.c: Likewise.
> ---
> .../aarch64/aarch64-sve-builtins-base.cc | 36 ++++-
> .../gcc.target/aarch64/sve/const_fold_mul_1.c | 128 ++++++++++++++++++
> .../aarch64/sve/const_fold_mul_zero.c | 109 +++++++++++++--
> 3 files changed, 262 insertions(+), 11 deletions(-)
> create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_1.c
>
> diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> index 7f948ecc0c7..ef0e11fe327 100644
> --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> @@ -2019,6 +2019,40 @@ public:
> }
> };
>
> +class svmul_impl : public rtx_code_function
> +{
> +public:
> + CONSTEXPR svmul_impl ()
> + : rtx_code_function (MULT, MULT, UNSPEC_COND_FMUL) {}
> +
> + gimple *
> + fold (gimple_folder &f) const override
> + {
> + tree pg = gimple_call_arg (f.call, 0);
> + tree op1 = gimple_call_arg (f.call, 1);
> + tree op2 = gimple_call_arg (f.call, 2);
> +
> + /* For integers, if one of the operands is a zero vector,
> + fold to zero vector. */
> + int step = f.type_suffix (0).element_bytes;
> + if (f.pred != PRED_m || is_ptrue (pg, step))
> + {
> + if (vector_cst_all_same (op1, step)
> + && integer_zerop (VECTOR_CST_ENCODED_ELT (op1, 0)))
> + return gimple_build_assign (f.lhs, op1);
> + if (vector_cst_all_same (op2, step)
> + && integer_zerop (VECTOR_CST_ENCODED_ELT (op2, 0)))
> + return gimple_build_assign (f.lhs, op2);
> + }
Similarly to part 1, I think we should drop this and just use...
> +
> + /* Try to fold constant operands. */
> + if (gimple *new_stmt = f.const_fold (MULT_EXPR))
> + return new_stmt;
...this.
Thanks,
Richard
> +
> + return NULL;
> + }
> +};
> +
> class svnand_impl : public function_base
> {
> public:
> @@ -3203,7 +3237,7 @@ FUNCTION (svmls_lane, svmls_lane_impl,)
> FUNCTION (svmmla, svmmla_impl,)
> FUNCTION (svmov, svmov_impl,)
> FUNCTION (svmsb, svmsb_impl,)
> -FUNCTION (svmul, rtx_code_function, (MULT, MULT, UNSPEC_COND_FMUL))
> +FUNCTION (svmul, svmul_impl,)
> FUNCTION (svmul_lane, CODE_FOR_MODE0 (aarch64_mul_lane),)
> FUNCTION (svmulh, unspec_based_function, (UNSPEC_SMUL_HIGHPART,
> UNSPEC_UMUL_HIGHPART, -1))
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_1.c b/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_1.c
> new file mode 100644
> index 00000000000..95273e2e57d
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_1.c
> @@ -0,0 +1,128 @@
> +/* { dg-final { check-function-bodies "**" "" } } */
> +/* { dg-options "-O2" } */
> +
> +#include "arm_sve.h"
> +
> +/*
> +** s64_x_pg:
> +** mov z[0-9]+\.d, #15
> +** ret
> +*/
> +svint64_t s64_x_pg (svbool_t pg)
> +{
> + return svmul_x (pg, svdup_s64 (5), svdup_s64 (3));
> +}
> +
> +/*
> +** s64_z_pg:
> +** mov z[0-9]+\.d, p[0-7]/z, #15
> +** ret
> +*/
> +svint64_t s64_z_pg (svbool_t pg)
> +{
> + return svmul_z (pg, svdup_s64 (5), svdup_s64 (3));
> +}
> +
> +/*
> +** s64_m_pg:
> +** mov (z[0-9]+\.d), #3
> +** mov (z[0-9]+\.d), #5
> +** mul \2, p[0-7]/m, \2, \1
> +** ret
> +*/
> +svint64_t s64_m_pg (svbool_t pg)
> +{
> + return svmul_m (pg, svdup_s64 (5), svdup_s64 (3));
> +}
> +
> +/*
> +** s64_x_ptrue:
> +** mov z[0-9]+\.d, #15
> +** ret
> +*/
> +svint64_t s64_x_ptrue ()
> +{
> + return svmul_x (svptrue_b64 (), svdup_s64 (5), svdup_s64 (3));
> +}
> +
> +/*
> +** s64_z_ptrue:
> +** mov z[0-9]+\.d, #15
> +** ret
> +*/
> +svint64_t s64_z_ptrue ()
> +{
> + return svmul_z (svptrue_b64 (), svdup_s64 (5), svdup_s64 (3));
> +}
> +
> +/*
> +** s64_m_ptrue:
> +** mov z[0-9]+\.d, #15
> +** ret
> +*/
> +svint64_t s64_m_ptrue ()
> +{
> + return svmul_m (svptrue_b64 (), svdup_s64 (5), svdup_s64 (3));
> +}
> +
> +/*
> +** u64_x_pg:
> +** mov z[0-9]+\.d, #15
> +** ret
> +*/
> +svuint64_t u64_x_pg (svbool_t pg)
> +{
> + return svmul_x (pg, svdup_u64 (5), svdup_u64 (3));
> +}
> +
> +/*
> +** u64_z_pg:
> +** mov z[0-9]+\.d, p[0-7]/z, #15
> +** ret
> +*/
> +svuint64_t u64_z_pg (svbool_t pg)
> +{
> + return svmul_z (pg, svdup_u64 (5), svdup_u64 (3));
> +}
> +
> +/*
> +** u64_m_pg:
> +** mov (z[0-9]+\.d), #3
> +** mov (z[0-9]+\.d), #5
> +** mul \2, p[0-7]/m, \2, \1
> +** ret
> +*/
> +svuint64_t u64_m_pg (svbool_t pg)
> +{
> + return svmul_m (pg, svdup_u64 (5), svdup_u64 (3));
> +}
> +
> +/*
> +** u64_x_ptrue:
> +** mov z[0-9]+\.d, #15
> +** ret
> +*/
> +svuint64_t u64_x_ptrue ()
> +{
> + return svmul_x (svptrue_b64 (), svdup_u64 (5), svdup_u64 (3));
> +}
> +
> +/*
> +** u64_z_ptrue:
> +** mov z[0-9]+\.d, #15
> +** ret
> +*/
> +svuint64_t u64_z_ptrue ()
> +{
> + return svmul_z (svptrue_b64 (), svdup_u64 (5), svdup_u64 (3));
> +}
> +
> +/*
> +** u64_m_ptrue:
> +** mov z[0-9]+\.d, #15
> +** ret
> +*/
> +svuint64_t u64_m_ptrue ()
> +{
> + return svmul_m (svptrue_b64 (), svdup_u64 (5), svdup_u64 (3));
> +}
> diff --git a/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_zero.c b/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_zero.c
> index 793291449c1..c6295bbc640 100644
> --- a/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_zero.c
> +++ b/gcc/testsuite/gcc.target/aarch64/sve/const_fold_mul_zero.c
> @@ -20,7 +20,7 @@ svint64_t s64_x_pg_op1 (svbool_t pg, svint64_t op2)
> */
> svint64_t s64_z_pg_op1 (svbool_t pg, svint64_t op2)
> {
> - return svdiv_z (pg, svdup_s64 (0), op2);
> + return svmul_z (pg, svdup_s64 (0), op2);
> }
>
> /*
> @@ -30,7 +30,7 @@ svint64_t s64_z_pg_op1 (svbool_t pg, svint64_t op2)
> */
> svint64_t s64_m_pg_op1 (svbool_t pg, svint64_t op2)
> {
> - return svdiv_m (pg, svdup_s64 (0), op2);
> + return svmul_m (pg, svdup_s64 (0), op2);
> }
>
> /*
> @@ -40,7 +40,7 @@ svint64_t s64_m_pg_op1 (svbool_t pg, svint64_t op2)
> */
> svint64_t s64_x_pg_op2 (svbool_t pg, svint64_t op1)
> {
> - return svdiv_x (pg, op1, svdup_s64 (0));
> + return svmul_x (pg, op1, svdup_s64 (0));
> }
>
> /*
> @@ -50,18 +50,17 @@ svint64_t s64_x_pg_op2 (svbool_t pg, svint64_t op1)
> */
> svint64_t s64_z_pg_op2 (svbool_t pg, svint64_t op1)
> {
> - return svdiv_z (pg, op1, svdup_s64 (0));
> + return svmul_z (pg, op1, svdup_s64 (0));
> }
>
> /*
> ** s64_m_pg_op2:
> -** mov (z[0-9]+)\.b, #0
> -** mul (z[0-9]+\.d), p[0-7]+/m, \2, \1\.d
> +** mov z[0-9]+\.d, p[0-7]/m, #0
> ** ret
> */
> svint64_t s64_m_pg_op2 (svbool_t pg, svint64_t op1)
> {
> - return svdiv_m (pg, op1, svdup_s64 (0));
> + return svmul_m (pg, op1, svdup_s64 (0));
> }
>
> /*
> @@ -71,7 +70,7 @@ svint64_t s64_m_pg_op2 (svbool_t pg, svint64_t op1)
> */
> svint64_t s64_m_ptrue_op1 (svint64_t op2)
> {
> - return svdiv_m (svptrue_b64 (), svdup_s64 (0), op2);
> + return svmul_m (svptrue_b64 (), svdup_s64 (0), op2);
> }
>
> /*
> @@ -81,7 +80,7 @@ svint64_t s64_m_ptrue_op1 (svint64_t op2)
> */
> svint64_t s64_m_ptrue_op2 (svint64_t op1)
> {
> - return svdiv_m (svptrue_b64 (), op1, svdup_s64 (0));
> + return svmul_m (svptrue_b64 (), op1, svdup_s64 (0));
> }
>
> /*
> @@ -91,5 +90,95 @@ svint64_t s64_m_ptrue_op2 (svint64_t op1)
> */
> svint64_t s64_m_ptrue_op1_op2 ()
> {
> - return svdiv_m (svptrue_b64 (), svdup_s64 (0), svdup_s64 (0));
> + return svmul_m (svptrue_b64 (), svdup_s64 (0), svdup_s64 (0));
> +}
> +
> +/*
> +** u64_x_pg_op1:
> +** mov z[0-9]+\.b, #0
> +** ret
> +*/
> +svuint64_t u64_x_pg_op1 (svbool_t pg, svuint64_t op2)
> +{
> + return svmul_x (pg, svdup_u64 (0), op2);
> +}
> +
> +/*
> +** u64_z_pg_op1:
> +** mov z[0-9]+\.b, #0
> +** ret
> +*/
> +svuint64_t u64_z_pg_op1 (svbool_t pg, svuint64_t op2)
> +{
> + return svmul_z (pg, svdup_u64 (0), op2);
> +}
> +
> +/*
> +** u64_m_pg_op1:
> +** mov z[0-9]+\.d, p[0-7]/z, #0
> +** ret
> +*/
> +svuint64_t u64_m_pg_op1 (svbool_t pg, svuint64_t op2)
> +{
> + return svmul_m (pg, svdup_u64 (0), op2);
> +}
> +
> +/*
> +** u64_x_pg_op2:
> +** mov z[0-9]+\.b, #0
> +** ret
> +*/
> +svuint64_t u64_x_pg_op2 (svbool_t pg, svuint64_t op1)
> +{
> + return svmul_x (pg, op1, svdup_u64 (0));
> +}
> +
> +/*
> +** u64_z_pg_op2:
> +** mov z[0-9]+\.b, #0
> +** ret
> +*/
> +svuint64_t u64_z_pg_op2 (svbool_t pg, svuint64_t op1)
> +{
> + return svmul_z (pg, op1, svdup_u64 (0));
> +}
> +
> +/*
> +** u64_m_pg_op2:
> +** mov z[0-9]+\.d, p[0-7]/m, #0
> +** ret
> +*/
> +svuint64_t u64_m_pg_op2 (svbool_t pg, svuint64_t op1)
> +{
> + return svmul_m (pg, op1, svdup_u64 (0));
> +}
> +
> +/*
> +** u64_m_ptrue_op1:
> +** mov z[0-9]+\.b, #0
> +** ret
> +*/
> +svuint64_t u64_m_ptrue_op1 (svuint64_t op2)
> +{
> + return svmul_m (svptrue_b64 (), svdup_u64 (0), op2);
> +}
> +
> +/*
> +** u64_m_ptrue_op2:
> +** mov z[0-9]+\.b, #0
> +** ret
> +*/
> +svuint64_t u64_m_ptrue_op2 (svuint64_t op1)
> +{
> + return svmul_m (svptrue_b64 (), op1, svdup_u64 (0));
> +}
> +
> +/*
> +** u64_m_ptrue_op1_op2:
> +** mov z[0-9]+\.b, #0
> +** ret
> +*/
> +svuint64_t u64_m_ptrue_op1_op2 ()
> +{
> + return svmul_m (svptrue_b64 (), svdup_u64 (0), svdup_u64 (0));
> }
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-08-19 12:59 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-08-19 7:04 [PATCH 2/2] SVE intrinsics: Fold constant operands for svmul Jennifer Schmitz
2024-08-19 12:59 ` Richard Sandiford
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).