public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Kewen Lin <linkw@linux.ibm.com>
To: gcc-patches@gcc.gnu.org
Cc: richard.guenther@gmail.com, richard.sandiford@arm.com,
	segher@kernel.crashing.org, bergner@linux.ibm.com
Subject: [PATCH 6/9] vect: Adjust vectorizable_load costing on VMAT_LOAD_STORE_LANES
Date: Mon, 12 Jun 2023 21:03:27 -0500	[thread overview]
Message-ID: <1a263aa46335ad08c0cd198b4c2075560a3ed44d.1686573640.git.linkw@linux.ibm.com> (raw)
In-Reply-To: <cover.1686573640.git.linkw@linux.ibm.com>

This patch adjusts the cost handling on
VMAT_LOAD_STORE_LANES in function vectorizable_load.  We
don't call function vect_model_load_cost for it any more.

It follows what we do in the function vect_model_load_cost,
and shouldn't have any functional changes.

gcc/ChangeLog:

	* tree-vect-stmts.cc (vectorizable_load): Adjust the cost handling on
	VMAT_LOAD_STORE_LANES without calling vect_model_load_cost.
	(vectorizable_load): Remove VMAT_LOAD_STORE_LANES related handling and
	assert it will never get VMAT_LOAD_STORE_LANES.
---
 gcc/tree-vect-stmts.cc | 73 ++++++++++++++++++++++++------------------
 1 file changed, 42 insertions(+), 31 deletions(-)

diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index a3fd0bf879e..4c5ce2ab278 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -1137,7 +1137,8 @@ vect_model_load_cost (vec_info *vinfo,
   gcc_assert (memory_access_type != VMAT_GATHER_SCATTER
 	      && memory_access_type != VMAT_INVARIANT
 	      && memory_access_type != VMAT_ELEMENTWISE
-	      && memory_access_type != VMAT_STRIDED_SLP);
+	      && memory_access_type != VMAT_STRIDED_SLP
+	      && memory_access_type != VMAT_LOAD_STORE_LANES);
 
   unsigned int inside_cost = 0, prologue_cost = 0;
   bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
@@ -1176,31 +1177,6 @@ vect_model_load_cost (vec_info *vinfo,
      once per group anyhow.  */
   bool first_stmt_p = (first_stmt_info == stmt_info);
 
-  /* An IFN_LOAD_LANES will load all its vector results, regardless of which
-     ones we actually need.  Account for the cost of unused results.  */
-  if (first_stmt_p && !slp_node && memory_access_type == VMAT_LOAD_STORE_LANES)
-    {
-      unsigned int gaps = DR_GROUP_SIZE (first_stmt_info);
-      stmt_vec_info next_stmt_info = first_stmt_info;
-      do
-	{
-	  gaps -= 1;
-	  next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
-	}
-      while (next_stmt_info);
-      if (gaps)
-	{
-	  if (dump_enabled_p ())
-	    dump_printf_loc (MSG_NOTE, vect_location,
-			     "vect_model_load_cost: %d unused vectors.\n",
-			     gaps);
-	  vect_get_load_cost (vinfo, stmt_info, ncopies * gaps,
-			      alignment_support_scheme, misalignment, false,
-			      &inside_cost, &prologue_cost,
-			      cost_vec, cost_vec, true);
-	}
-    }
-
   /* We assume that the cost of a single load-lanes instruction is
      equivalent to the cost of DR_GROUP_SIZE separate loads.  If a grouped
      access is instead being provided by a load-and-permute operation,
@@ -10110,7 +10086,7 @@ vectorizable_load (vec_info *vinfo,
     }
   tree vec_mask = NULL_TREE;
   poly_uint64 group_elt = 0;
-  unsigned int inside_cost = 0;
+  unsigned int inside_cost = 0, prologue_cost = 0;
   for (j = 0; j < ncopies; j++)
     {
       /* 1. Create the vector or array pointer update chain.  */
@@ -10190,8 +10166,42 @@ vectorizable_load (vec_info *vinfo,
 	dr_chain.create (vec_num);
 
       gimple *new_stmt = NULL;
-      if (memory_access_type == VMAT_LOAD_STORE_LANES && !costing_p)
+      if (memory_access_type == VMAT_LOAD_STORE_LANES)
 	{
+	  if (costing_p)
+	    {
+	      /* An IFN_LOAD_LANES will load all its vector results,
+		 regardless of which ones we actually need.  Account
+		 for the cost of unused results.  */
+	      if (grouped_load && first_stmt_info == stmt_info)
+		{
+		  unsigned int gaps = DR_GROUP_SIZE (first_stmt_info);
+		  stmt_vec_info next_stmt_info = first_stmt_info;
+		  do
+		    {
+		      gaps -= 1;
+		      next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
+		    }
+		  while (next_stmt_info);
+		  if (gaps)
+		    {
+		      if (dump_enabled_p ())
+			dump_printf_loc (MSG_NOTE, vect_location,
+					 "vect_model_load_cost: %d "
+					 "unused vectors.\n",
+					 gaps);
+		      vect_get_load_cost (vinfo, stmt_info, gaps,
+					  alignment_support_scheme,
+					  misalignment, false, &inside_cost,
+					  &prologue_cost, cost_vec, cost_vec,
+					  true);
+		    }
+		}
+	      vect_get_load_cost (vinfo, stmt_info, 1, alignment_support_scheme,
+				  misalignment, false, &inside_cost,
+				  &prologue_cost, cost_vec, cost_vec, true);
+	      continue;
+	    }
 	  tree vec_array;
 
 	  vec_array = create_vector_array (vectype, vec_num);
@@ -10771,13 +10781,14 @@ vec_num_loop_costing_end:
 
   if (costing_p)
     {
-      if (memory_access_type == VMAT_GATHER_SCATTER)
+      if (memory_access_type == VMAT_GATHER_SCATTER
+	  || memory_access_type == VMAT_LOAD_STORE_LANES)
 	{
 	  if (dump_enabled_p ())
 	    dump_printf_loc (MSG_NOTE, vect_location,
 			     "vect_model_load_cost: inside_cost = %u, "
-			     "prologue_cost = 0 .\n",
-			     inside_cost);
+			     "prologue_cost = %u .\n",
+			     inside_cost, prologue_cost);
 	}
       else
 	vect_model_load_cost (vinfo, stmt_info, ncopies, vf, memory_access_type,
-- 
2.31.1


  parent reply	other threads:[~2023-06-13  2:03 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-13  2:03 [PATCH 0/9] vect: Move costing next to the transform for vect load Kewen Lin
2023-06-13  2:03 ` [PATCH 1/9] vect: Move vect_model_load_cost next to the transform in vectorizable_load Kewen Lin
2023-06-13  2:03 ` [PATCH 2/9] vect: Adjust vectorizable_load costing on VMAT_GATHER_SCATTER && gs_info.decl Kewen Lin
2023-06-30 11:11   ` Richard Biener
2023-07-03  2:57     ` [PATCH 2/9 v2] " Kewen.Lin
2023-06-13  2:03 ` [PATCH 3/9] vect: Adjust vectorizable_load costing on VMAT_INVARIANT Kewen Lin
2023-06-30 11:18   ` Richard Biener
2023-07-03  2:58     ` [PATCH 3/9 v2] " Kewen.Lin
2023-06-13  2:03 ` [PATCH 4/9] vect: Adjust vectorizable_load costing on VMAT_ELEMENTWISE and VMAT_STRIDED_SLP Kewen Lin
2023-07-02  8:58   ` Richard Sandiford
2023-07-03  3:19     ` Kewen.Lin
2023-07-22 15:58       ` Iain Sandoe
2023-07-24  1:50         ` Kewen.Lin
2023-06-13  2:03 ` [PATCH 5/9] vect: Adjust vectorizable_load costing on VMAT_GATHER_SCATTER Kewen Lin
2023-07-03  3:01   ` [PATCH 5/9 v2] " Kewen.Lin
2023-06-13  2:03 ` Kewen Lin [this message]
2023-06-13  2:03 ` [PATCH 7/9] vect: Adjust vectorizable_load costing on VMAT_CONTIGUOUS_REVERSE Kewen Lin
2023-06-13  2:03 ` [PATCH 8/9] vect: Adjust vectorizable_load costing on VMAT_CONTIGUOUS_PERMUTE Kewen Lin
2023-06-14  8:17   ` Hongtao Liu
2023-06-19  7:23     ` Kewen.Lin
2023-06-13  2:03 ` [PATCH 9/9] vect: Adjust vectorizable_load costing on VMAT_CONTIGUOUS Kewen Lin
2023-07-03  3:06   ` [PATCH 9/9 v2] " Kewen.Lin
2023-06-26  6:00 ` [PATCH 0/9] vect: Move costing next to the transform for vect load Kewen.Lin
2023-06-30 11:37 ` Richard Biener
2023-07-02  9:13   ` Richard Sandiford
2023-07-03  3:39   ` Kewen.Lin
2023-07-03  8:42     ` Richard Biener

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1a263aa46335ad08c0cd198b4c2075560a3ed44d.1686573640.git.linkw@linux.ibm.com \
    --to=linkw@linux.ibm.com \
    --cc=bergner@linux.ibm.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=richard.guenther@gmail.com \
    --cc=richard.sandiford@arm.com \
    --cc=segher@kernel.crashing.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).