From: Kewen Lin <linkw@linux.ibm.com>
To: gcc-patches@gcc.gnu.org
Cc: richard.guenther@gmail.com, richard.sandiford@arm.com
Subject: [PATCH 02/10] vect: Move vect_model_store_cost next to the transform in vectorizable_store
Date: Wed, 13 Sep 2023 22:11:51 -0500 [thread overview]
Message-ID: <1539ec7d34af4e38467420b3aed342d708a64a48.1694657494.git.linkw@linux.ibm.com> (raw)
In-Reply-To: <cover.1694657494.git.linkw@linux.ibm.com>
This patch is an initial patch to move costing next to the
transform, it still adopts vect_model_store_cost for costing
but moves and duplicates it down according to the handlings
of different vect_memory_access_types or some special
handling need, hope it can make the subsequent patches easy
to review. This patch should not have any functional
changes.
gcc/ChangeLog:
* tree-vect-stmts.cc (vectorizable_store): Move and duplicate the call
to vect_model_store_cost down to some different transform paths
according to the handlings of different vect_memory_access_types
or some special handling need.
---
gcc/tree-vect-stmts.cc | 79 ++++++++++++++++++++++++++++++++----------
1 file changed, 60 insertions(+), 19 deletions(-)
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index a5caaf0bca2..36f7c5b9f4b 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -8372,7 +8372,8 @@ vectorizable_store (vec_info *vinfo,
return false;
}
- if (!vec_stmt) /* transformation not required. */
+ bool costing_p = !vec_stmt;
+ if (costing_p) /* transformation not required. */
{
STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
@@ -8401,11 +8402,6 @@ vectorizable_store (vec_info *vinfo,
"Vectorizing an unaligned access.\n");
STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
- vect_model_store_cost (vinfo, stmt_info, ncopies,
- memory_access_type, &gs_info,
- alignment_support_scheme,
- misalignment, vls_type, slp_node, cost_vec);
- return true;
}
gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
@@ -8415,12 +8411,27 @@ vectorizable_store (vec_info *vinfo,
if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
{
- vect_build_scatter_store_calls (vinfo, stmt_info, gsi, vec_stmt,
- &gs_info, mask);
+ if (costing_p)
+ vect_model_store_cost (vinfo, stmt_info, ncopies, memory_access_type,
+ &gs_info, alignment_support_scheme, misalignment,
+ vls_type, slp_node, cost_vec);
+ else
+ vect_build_scatter_store_calls (vinfo, stmt_info, gsi, vec_stmt,
+ &gs_info, mask);
return true;
}
else if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3)
- return vectorizable_scan_store (vinfo, stmt_info, gsi, vec_stmt, ncopies);
+ {
+ gcc_assert (memory_access_type == VMAT_CONTIGUOUS);
+ if (costing_p)
+ {
+ vect_model_store_cost (vinfo, stmt_info, ncopies, memory_access_type,
+ &gs_info, alignment_support_scheme,
+ misalignment, vls_type, slp_node, cost_vec);
+ return true;
+ }
+ return vectorizable_scan_store (vinfo, stmt_info, gsi, vec_stmt, ncopies);
+ }
if (grouped_store)
{
@@ -8449,13 +8460,21 @@ vectorizable_store (vec_info *vinfo,
else
ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "transform store. ncopies = %d\n", ncopies);
+ if (!costing_p && dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "transform store. ncopies = %d\n",
+ ncopies);
if (memory_access_type == VMAT_ELEMENTWISE
|| memory_access_type == VMAT_STRIDED_SLP)
{
+ if (costing_p)
+ {
+ vect_model_store_cost (vinfo, stmt_info, ncopies, memory_access_type,
+ &gs_info, alignment_support_scheme,
+ misalignment, vls_type, slp_node, cost_vec);
+ return true;
+ }
+
gimple_stmt_iterator incr_gsi;
bool insert_after;
gimple *incr;
@@ -8718,8 +8737,9 @@ vectorizable_store (vec_info *vinfo,
else if (memory_access_type == VMAT_GATHER_SCATTER)
{
aggr_type = elem_type;
- vect_get_strided_load_store_ops (stmt_info, loop_vinfo, gsi, &gs_info,
- &bump, &vec_offset, loop_lens);
+ if (!costing_p)
+ vect_get_strided_load_store_ops (stmt_info, loop_vinfo, gsi, &gs_info,
+ &bump, &vec_offset, loop_lens);
}
else
{
@@ -8731,7 +8751,7 @@ vectorizable_store (vec_info *vinfo,
memory_access_type, loop_lens);
}
- if (mask)
+ if (mask && !costing_p)
LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
/* In case the vectorization factor (VF) is bigger than the number
@@ -8782,6 +8802,13 @@ vectorizable_store (vec_info *vinfo,
if (memory_access_type == VMAT_LOAD_STORE_LANES)
{
gcc_assert (!slp && grouped_store);
+ if (costing_p)
+ {
+ vect_model_store_cost (vinfo, stmt_info, ncopies, memory_access_type,
+ &gs_info, alignment_support_scheme,
+ misalignment, vls_type, slp_node, cost_vec);
+ return true;
+ }
for (j = 0; j < ncopies; j++)
{
gimple *new_stmt;
@@ -8927,6 +8954,13 @@ vectorizable_store (vec_info *vinfo,
if (memory_access_type == VMAT_GATHER_SCATTER)
{
gcc_assert (!slp && !grouped_store);
+ if (costing_p)
+ {
+ vect_model_store_cost (vinfo, stmt_info, ncopies, memory_access_type,
+ &gs_info, alignment_support_scheme,
+ misalignment, vls_type, slp_node, cost_vec);
+ return true;
+ }
auto_vec<tree> vec_offsets;
for (j = 0; j < ncopies; j++)
{
@@ -9091,7 +9125,7 @@ vectorizable_store (vec_info *vinfo,
for (j = 0; j < ncopies; j++)
{
gimple *new_stmt;
- if (j == 0)
+ if (j == 0 && !costing_p)
{
if (slp)
{
@@ -9158,7 +9192,7 @@ vectorizable_store (vec_info *vinfo,
offset, &dummy, gsi, &ptr_incr,
simd_lane_access_p, bump);
}
- else
+ else if (!costing_p)
{
gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
/* DR_CHAIN is then used as an input to vect_permute_store_chain().
@@ -9179,7 +9213,7 @@ vectorizable_store (vec_info *vinfo,
}
new_stmt = NULL;
- if (grouped_store)
+ if (!costing_p && grouped_store)
/* Permute. */
vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info, gsi,
&result_chain);
@@ -9187,6 +9221,8 @@ vectorizable_store (vec_info *vinfo,
stmt_vec_info next_stmt_info = first_stmt_info;
for (i = 0; i < vec_num; i++)
{
+ if (costing_p)
+ continue;
unsigned misalign;
unsigned HOST_WIDE_INT align;
@@ -9361,7 +9397,7 @@ vectorizable_store (vec_info *vinfo,
if (!next_stmt_info)
break;
}
- if (!slp)
+ if (!slp && !costing_p)
{
if (j == 0)
*vec_stmt = new_stmt;
@@ -9369,6 +9405,11 @@ vectorizable_store (vec_info *vinfo,
}
}
+ if (costing_p)
+ vect_model_store_cost (vinfo, stmt_info, ncopies, memory_access_type,
+ &gs_info, alignment_support_scheme, misalignment,
+ vls_type, slp_node, cost_vec);
+
return true;
}
--
2.31.1
next prev parent reply other threads:[~2023-09-14 3:12 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-14 3:11 [PATCH 00/10] vect: Move costing next to the transform for vect store Kewen Lin
2023-09-14 3:11 ` [PATCH 01/10] vect: Ensure vect store is supported for some VMAT_ELEMENTWISE case Kewen Lin
2023-09-27 11:22 ` Richard Biener
2023-09-14 3:11 ` Kewen Lin [this message]
2023-09-27 11:23 ` [PATCH 02/10] vect: Move vect_model_store_cost next to the transform in vectorizable_store Richard Biener
2023-09-14 3:11 ` [PATCH 03/10] vect: Adjust vectorizable_store costing on VMAT_GATHER_SCATTER Kewen Lin
2023-09-27 11:24 ` Richard Biener
2023-09-14 3:11 ` [PATCH 04/10] vect: Simplify costing on vectorizable_scan_store Kewen Lin
2023-09-27 11:25 ` Richard Biener
2023-09-14 3:11 ` [PATCH 05/10] vect: Adjust vectorizable_store costing on VMAT_ELEMENTWISE and VMAT_STRIDED_SLP Kewen Lin
2023-09-27 11:26 ` Richard Biener
2023-09-14 3:11 ` [PATCH 06/10] vect: Adjust vectorizable_store costing on VMAT_LOAD_STORE_LANES Kewen Lin
2023-09-27 11:27 ` Richard Biener
2023-09-14 3:11 ` [PATCH 07/10] vect: Adjust vectorizable_store costing on VMAT_CONTIGUOUS_PERMUTE Kewen Lin
2023-09-27 11:28 ` Richard Biener
2023-09-14 3:11 ` [PATCH/RFC 08/10] aarch64: Don't use CEIL for vector_store in aarch64_stp_sequence_cost Kewen Lin
2023-09-18 8:41 ` Richard Sandiford
2023-09-18 8:53 ` Richard Biener
2023-09-20 2:40 ` Kewen.Lin
2023-09-14 3:11 ` [PATCH 09/10] vect: Get rid of vect_model_store_cost Kewen Lin
2023-09-27 11:29 ` Richard Biener
2023-09-14 3:11 ` [PATCH 10/10] vect: Consider vec_perm costing for VMAT_CONTIGUOUS_REVERSE Kewen Lin
2023-09-27 11:30 ` Richard Biener
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1539ec7d34af4e38467420b3aed342d708a64a48.1694657494.git.linkw@linux.ibm.com \
--to=linkw@linux.ibm.com \
--cc=gcc-patches@gcc.gnu.org \
--cc=richard.guenther@gmail.com \
--cc=richard.sandiford@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).