public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r13-379] Strip of a vector load which is only used partially.
@ 2022-05-13 1:04 hongtao Liu
0 siblings, 0 replies; only message in thread
From: hongtao Liu @ 2022-05-13 1:04 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:8ab4b484153031c407b7d8c760b6a2605da1199a
commit r13-379-g8ab4b484153031c407b7d8c760b6a2605da1199a
Author: liuhongt <hongtao.liu@intel.com>
Date: Fri Apr 8 11:26:46 2022 +0800
Strip of a vector load which is only used partially.
Optimize
_4 = VEC_PERM_EXPR <_1, _1, { 4, 5, 6, 7, 4, 5, 6, 7 }>;
_5 = BIT_FIELD_REF <_4, 128, 0>;
to
_5 = BIT_FIELD_REF <_1, 128, 128>;
gcc/ChangeLog:
PR tree-optimization/102583
* tree-ssa-forwprop.cc (simplify_bitfield_ref): Extended to a
contiguous stride in the VEC_PERM_EXPR.
gcc/testsuite/ChangeLog:
* gcc.target/i386/pr102583.c: New test.
* gcc.target/i386/pr92645-2.c: Adjust testcase.
* gcc.target/i386/pr92645-3.c: Ditto.
Diff:
---
gcc/testsuite/gcc.target/i386/pr102583.c | 30 +++++++++++
gcc/testsuite/gcc.target/i386/pr92645-2.c | 4 +-
gcc/testsuite/gcc.target/i386/pr92645-3.c | 4 +-
gcc/tree-ssa-forwprop.cc | 87 +++++++++++++++++++++----------
4 files changed, 93 insertions(+), 32 deletions(-)
diff --git a/gcc/testsuite/gcc.target/i386/pr102583.c b/gcc/testsuite/gcc.target/i386/pr102583.c
new file mode 100644
index 00000000000..4ef2f296d0c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr102583.c
@@ -0,0 +1,30 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512f -O2" } */
+/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+32\(%.*%ymm} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vcvtdq2ps[ \t]+16\(%.*%xmm} 1 } } */
+/* { dg-final { scan-assembler-times {(?n)vmovq[ \t]+16\(%.*%xmm} 1 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-not {(?n)vpermd[ \t]+.*%zmm} } } */
+
+typedef int v16si __attribute__((vector_size(64)));
+typedef float v8sf __attribute__((vector_size(32)));
+typedef float v4sf __attribute__((vector_size(16)));
+typedef float v2sf __attribute__((vector_size(8)));
+
+v8sf part (v16si *srcp)
+{
+ v16si src = *srcp;
+ return (v8sf) { (float)src[8], (float) src[9], (float)src[10], (float)src[11],
+ (float)src[12], (float)src[13], (float)src[14], (float)src[15] };
+}
+
+v4sf part1 (v16si *srcp)
+{
+ v16si src = *srcp;
+ return (v4sf) { (float)src[4], (float)src[5], (float)src[6], (float)src[7] };
+}
+
+v2sf part2 (v16si *srcp)
+{
+ v16si src = *srcp;
+ return (v2sf) { (float)src[4], (float)src[5] };
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr92645-2.c b/gcc/testsuite/gcc.target/i386/pr92645-2.c
index d34ed3aa8e5..f0608de938a 100644
--- a/gcc/testsuite/gcc.target/i386/pr92645-2.c
+++ b/gcc/testsuite/gcc.target/i386/pr92645-2.c
@@ -29,6 +29,6 @@ void odd (v2si *dst, v4si *srcp)
}
/* { dg-final { scan-tree-dump-times "BIT_FIELD_REF" 4 "cddce1" } } */
-/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
/* Ideally highpart extraction would elide the permutation as well. */
-/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr92645-3.c b/gcc/testsuite/gcc.target/i386/pr92645-3.c
index 9c08c9fb632..691011195c9 100644
--- a/gcc/testsuite/gcc.target/i386/pr92645-3.c
+++ b/gcc/testsuite/gcc.target/i386/pr92645-3.c
@@ -32,6 +32,6 @@ void odd (v4sf *dst, v8si *srcp)
/* Four conversions, on the smaller vector type, to not convert excess
elements. */
/* { dg-final { scan-tree-dump-times " = \\\(vector\\\(4\\\) float\\\)" 4 "cddce1" } } */
-/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" } } */
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 3 "cddce1" { xfail *-*-* } } } */
/* Ideally highpart extraction would elide the VEC_PERM_EXPR as well. */
-/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "VEC_PERM_EXPR" 2 "cddce1" } } */
diff --git a/gcc/tree-ssa-forwprop.cc b/gcc/tree-ssa-forwprop.cc
index 484491fa1c5..c5b2a4f9f42 100644
--- a/gcc/tree-ssa-forwprop.cc
+++ b/gcc/tree-ssa-forwprop.cc
@@ -2334,8 +2334,10 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
gimple *stmt = gsi_stmt (*gsi);
gimple *def_stmt;
tree op, op0, op1;
- tree elem_type;
- unsigned idx, size;
+ tree elem_type, type;
+ tree p, m, tem;
+ unsigned HOST_WIDE_INT nelts, idx;
+ poly_uint64 size, elem_size;
enum tree_code code;
op = gimple_assign_rhs1 (stmt);
@@ -2353,42 +2355,71 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi)
op1 = TREE_OPERAND (op, 1);
code = gimple_assign_rhs_code (def_stmt);
elem_type = TREE_TYPE (TREE_TYPE (op0));
- if (TREE_TYPE (op) != elem_type)
- return false;
+ type = TREE_TYPE (op);
+ /* Also hanlde vector type.
+ .i.e.
+ _7 = VEC_PERM_EXPR <_1, _1, { 2, 3, 2, 3 }>;
+ _11 = BIT_FIELD_REF <_7, 64, 0>;
+
+ to
- size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
+ _11 = BIT_FIELD_REF <_1, 64, 64>. */
+
+ size = tree_to_poly_uint64 (TYPE_SIZE (type));
if (maybe_ne (bit_field_size (op), size))
return false;
- if (code == VEC_PERM_EXPR
- && constant_multiple_p (bit_field_offset (op), size, &idx))
+ elem_size = tree_to_poly_uint64 (TYPE_SIZE (elem_type));
+ if (code != VEC_PERM_EXPR
+ || !constant_multiple_p (bit_field_offset (op), elem_size, &idx))
+ return false;
+
+ m = gimple_assign_rhs3 (def_stmt);
+ if (TREE_CODE (m) != VECTOR_CST
+ || !VECTOR_CST_NELTS (m).is_constant (&nelts))
+ return false;
+
+ /* One element. */
+ if (known_eq (size, elem_size))
+ idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
+ else
{
- tree p, m, tem;
- unsigned HOST_WIDE_INT nelts;
- m = gimple_assign_rhs3 (def_stmt);
- if (TREE_CODE (m) != VECTOR_CST
- || !VECTOR_CST_NELTS (m).is_constant (&nelts))
+ unsigned HOST_WIDE_INT nelts_op;
+ if (!constant_multiple_p (size, elem_size, &nelts_op)
+ || !pow2p_hwi (nelts_op))
return false;
- idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
- idx %= 2 * nelts;
- if (idx < nelts)
- {
- p = gimple_assign_rhs1 (def_stmt);
- }
- else
+ unsigned start = TREE_INT_CST_LOW (vector_cst_elt (m, idx));
+ unsigned end = TREE_INT_CST_LOW (vector_cst_elt (m, idx + nelts_op - 1));
+ /* Be in the same vector. */
+ if ((start < nelts) != (end < nelts))
+ return false;
+ for (unsigned HOST_WIDE_INT i = 1; i != nelts_op; i++)
{
- p = gimple_assign_rhs2 (def_stmt);
- idx -= nelts;
+ /* Continuous area. */
+ if (TREE_INT_CST_LOW (vector_cst_elt (m, idx + i)) - 1
+ != TREE_INT_CST_LOW (vector_cst_elt (m, idx + i - 1)))
+ return false;
}
- tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
- unshare_expr (p), op1, bitsize_int (idx * size));
- gimple_assign_set_rhs1 (stmt, tem);
- fold_stmt (gsi);
- update_stmt (gsi_stmt (*gsi));
- return true;
+ /* Alignment not worse than before. */
+ if (start % nelts_op)
+ return false;
+ idx = start;
}
- return false;
+ if (idx < nelts)
+ p = gimple_assign_rhs1 (def_stmt);
+ else
+ {
+ p = gimple_assign_rhs2 (def_stmt);
+ idx -= nelts;
+ }
+
+ tem = build3 (BIT_FIELD_REF, TREE_TYPE (op),
+ p, op1, bitsize_int (idx * elem_size));
+ gimple_assign_set_rhs1 (stmt, tem);
+ fold_stmt (gsi);
+ update_stmt (gsi_stmt (*gsi));
+ return true;
}
/* Determine whether applying the 2 permutations (mask1 then mask2)
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2022-05-13 1:04 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-13 1:04 [gcc r13-379] Strip of a vector load which is only used partially hongtao Liu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).