public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Richard Biener <richard.guenther@gmail.com>
To: Kewen Lin <linkw@linux.ibm.com>
Cc: gcc-patches@gcc.gnu.org, richard.sandiford@arm.com
Subject: Re: [PATCH 09/10] vect: Get rid of vect_model_store_cost
Date: Wed, 27 Sep 2023 13:29:40 +0200	[thread overview]
Message-ID: <CAFiYyc0pit4Ph=gu+sxUkvogkZcGyQw4fWSUCkC7Wcxc=cMMng@mail.gmail.com> (raw)
In-Reply-To: <b2f2a8081d2ffd2459b0ff161a559e502511d8a5.1694657494.git.linkw@linux.ibm.com>

On Thu, Sep 14, 2023 at 5:12 AM Kewen Lin <linkw@linux.ibm.com> wrote:
>
> This patch is to eventually get rid of vect_model_store_cost,
> it adjusts the costing for the remaining memory access types
> VMAT_CONTIGUOUS{, _DOWN, _REVERSE} by moving costing close
> to the transform code.  Note that in vect_model_store_cost,
> there is one special handling for vectorizing a store into
> the function result, since it's extra penalty and the
> transform part doesn't have it, this patch keep it alone.

OK.

> gcc/ChangeLog:
>
>         * tree-vect-stmts.cc (vect_model_store_cost): Remove.
>         (vectorizable_store): Adjust the costing for the remaining memory
>         access types VMAT_CONTIGUOUS{, _DOWN, _REVERSE}.
> ---
>  gcc/tree-vect-stmts.cc | 137 +++++++++++++----------------------------
>  1 file changed, 44 insertions(+), 93 deletions(-)
>
> diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
> index e3ba8077091..3d451c80bca 100644
> --- a/gcc/tree-vect-stmts.cc
> +++ b/gcc/tree-vect-stmts.cc
> @@ -951,81 +951,6 @@ cfun_returns (tree decl)
>    return false;
>  }
>
> -/* Function vect_model_store_cost
> -
> -   Models cost for stores.  In the case of grouped accesses, one access
> -   has the overhead of the grouped access attributed to it.  */
> -
> -static void
> -vect_model_store_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies,
> -                      vect_memory_access_type memory_access_type,
> -                      dr_alignment_support alignment_support_scheme,
> -                      int misalignment,
> -                      vec_load_store_type vls_type, slp_tree slp_node,
> -                      stmt_vector_for_cost *cost_vec)
> -{
> -  gcc_assert (memory_access_type != VMAT_GATHER_SCATTER
> -             && memory_access_type != VMAT_ELEMENTWISE
> -             && memory_access_type != VMAT_STRIDED_SLP
> -             && memory_access_type != VMAT_LOAD_STORE_LANES
> -             && memory_access_type != VMAT_CONTIGUOUS_PERMUTE);
> -
> -  unsigned int inside_cost = 0, prologue_cost = 0;
> -
> -  /* ???  Somehow we need to fix this at the callers.  */
> -  if (slp_node)
> -    ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
> -
> -  if (vls_type == VLS_STORE_INVARIANT)
> -    {
> -      if (!slp_node)
> -       prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
> -                                          stmt_info, 0, vect_prologue);
> -    }
> -
> -
> -  /* Costs of the stores.  */
> -  vect_get_store_cost (vinfo, stmt_info, ncopies, alignment_support_scheme,
> -                      misalignment, &inside_cost, cost_vec);
> -
> -  /* When vectorizing a store into the function result assign
> -     a penalty if the function returns in a multi-register location.
> -     In this case we assume we'll end up with having to spill the
> -     vector result and do piecewise loads as a conservative estimate.  */
> -  tree base = get_base_address (STMT_VINFO_DATA_REF (stmt_info)->ref);
> -  if (base
> -      && (TREE_CODE (base) == RESULT_DECL
> -         || (DECL_P (base) && cfun_returns (base)))
> -      && !aggregate_value_p (base, cfun->decl))
> -    {
> -      rtx reg = hard_function_value (TREE_TYPE (base), cfun->decl, 0, 1);
> -      /* ???  Handle PARALLEL in some way.  */
> -      if (REG_P (reg))
> -       {
> -         int nregs = hard_regno_nregs (REGNO (reg), GET_MODE (reg));
> -         /* Assume that a single reg-reg move is possible and cheap,
> -            do not account for vector to gp register move cost.  */
> -         if (nregs > 1)
> -           {
> -             /* Spill.  */
> -             prologue_cost += record_stmt_cost (cost_vec, ncopies,
> -                                                vector_store,
> -                                                stmt_info, 0, vect_epilogue);
> -             /* Loads.  */
> -             prologue_cost += record_stmt_cost (cost_vec, ncopies * nregs,
> -                                                scalar_load,
> -                                                stmt_info, 0, vect_epilogue);
> -           }
> -       }
> -    }
> -
> -  if (dump_enabled_p ())
> -    dump_printf_loc (MSG_NOTE, vect_location,
> -                     "vect_model_store_cost: inside_cost = %d, "
> -                     "prologue_cost = %d .\n", inside_cost, prologue_cost);
> -}
> -
> -
>  /* Calculate cost of DR's memory access.  */
>  void
>  vect_get_store_cost (vec_info *, stmt_vec_info stmt_info, int ncopies,
> @@ -9223,6 +9148,11 @@ vectorizable_store (vec_info *vinfo,
>        return true;
>      }
>
> +  gcc_assert (memory_access_type == VMAT_CONTIGUOUS
> +             || memory_access_type == VMAT_CONTIGUOUS_DOWN
> +             || memory_access_type == VMAT_CONTIGUOUS_PERMUTE
> +             || memory_access_type == VMAT_CONTIGUOUS_REVERSE);
> +
>    unsigned inside_cost = 0, prologue_cost = 0;
>    auto_vec<tree> result_chain (group_size);
>    auto_vec<tree, 1> vec_oprnds;
> @@ -9257,10 +9187,9 @@ vectorizable_store (vec_info *vinfo,
>                      that there is no interleaving, DR_GROUP_SIZE is 1,
>                      and only one iteration of the loop will be executed.  */
>                   op = vect_get_store_rhs (next_stmt_info);
> -                 if (costing_p
> -                     && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
> +                 if (costing_p)
>                     update_prologue_cost (&prologue_cost, op);
> -                 else if (!costing_p)
> +                 else
>                     {
>                       vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
>                                                      ncopies, op,
> @@ -9352,10 +9281,9 @@ vectorizable_store (vec_info *vinfo,
>         {
>           if (costing_p)
>             {
> -             if (memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
> -               vect_get_store_cost (vinfo, stmt_info, 1,
> -                                    alignment_support_scheme, misalignment,
> -                                    &inside_cost, cost_vec);
> +             vect_get_store_cost (vinfo, stmt_info, 1,
> +                                  alignment_support_scheme, misalignment,
> +                                  &inside_cost, cost_vec);
>
>               if (!slp)
>                 {
> @@ -9550,18 +9478,41 @@ vectorizable_store (vec_info *vinfo,
>
>    if (costing_p)
>      {
> -      if (memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
> -       {
> -         if (dump_enabled_p ())
> -           dump_printf_loc (MSG_NOTE, vect_location,
> -                            "vect_model_store_cost: inside_cost = %d, "
> -                            "prologue_cost = %d .\n",
> -                            inside_cost, prologue_cost);
> +      /* When vectorizing a store into the function result assign
> +        a penalty if the function returns in a multi-register location.
> +        In this case we assume we'll end up with having to spill the
> +        vector result and do piecewise loads as a conservative estimate.  */
> +      tree base = get_base_address (STMT_VINFO_DATA_REF (stmt_info)->ref);
> +      if (base
> +         && (TREE_CODE (base) == RESULT_DECL
> +             || (DECL_P (base) && cfun_returns (base)))
> +         && !aggregate_value_p (base, cfun->decl))
> +       {
> +         rtx reg = hard_function_value (TREE_TYPE (base), cfun->decl, 0, 1);
> +         /* ???  Handle PARALLEL in some way.  */
> +         if (REG_P (reg))
> +           {
> +             int nregs = hard_regno_nregs (REGNO (reg), GET_MODE (reg));
> +             /* Assume that a single reg-reg move is possible and cheap,
> +                do not account for vector to gp register move cost.  */
> +             if (nregs > 1)
> +               {
> +                 /* Spill.  */
> +                 prologue_cost
> +                   += record_stmt_cost (cost_vec, ncopies, vector_store,
> +                                        stmt_info, 0, vect_epilogue);
> +                 /* Loads.  */
> +                 prologue_cost
> +                   += record_stmt_cost (cost_vec, ncopies * nregs, scalar_load,
> +                                        stmt_info, 0, vect_epilogue);
> +               }
> +           }
>         }
> -      else
> -       vect_model_store_cost (vinfo, stmt_info, ncopies, memory_access_type,
> -                              alignment_support_scheme, misalignment, vls_type,
> -                              slp_node, cost_vec);
> +      if (dump_enabled_p ())
> +       dump_printf_loc (MSG_NOTE, vect_location,
> +                        "vect_model_store_cost: inside_cost = %d, "
> +                        "prologue_cost = %d .\n",
> +                        inside_cost, prologue_cost);
>      }
>
>    return true;
> --
> 2.31.1
>

  reply	other threads:[~2023-09-27 11:32 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-14  3:11 [PATCH 00/10] vect: Move costing next to the transform for vect store Kewen Lin
2023-09-14  3:11 ` [PATCH 01/10] vect: Ensure vect store is supported for some VMAT_ELEMENTWISE case Kewen Lin
2023-09-27 11:22   ` Richard Biener
2023-09-14  3:11 ` [PATCH 02/10] vect: Move vect_model_store_cost next to the transform in vectorizable_store Kewen Lin
2023-09-27 11:23   ` Richard Biener
2023-09-14  3:11 ` [PATCH 03/10] vect: Adjust vectorizable_store costing on VMAT_GATHER_SCATTER Kewen Lin
2023-09-27 11:24   ` Richard Biener
2023-09-14  3:11 ` [PATCH 04/10] vect: Simplify costing on vectorizable_scan_store Kewen Lin
2023-09-27 11:25   ` Richard Biener
2023-09-14  3:11 ` [PATCH 05/10] vect: Adjust vectorizable_store costing on VMAT_ELEMENTWISE and VMAT_STRIDED_SLP Kewen Lin
2023-09-27 11:26   ` Richard Biener
2023-09-14  3:11 ` [PATCH 06/10] vect: Adjust vectorizable_store costing on VMAT_LOAD_STORE_LANES Kewen Lin
2023-09-27 11:27   ` Richard Biener
2023-09-14  3:11 ` [PATCH 07/10] vect: Adjust vectorizable_store costing on VMAT_CONTIGUOUS_PERMUTE Kewen Lin
2023-09-27 11:28   ` Richard Biener
2023-09-14  3:11 ` [PATCH/RFC 08/10] aarch64: Don't use CEIL for vector_store in aarch64_stp_sequence_cost Kewen Lin
2023-09-18  8:41   ` Richard Sandiford
2023-09-18  8:53     ` Richard Biener
2023-09-20  2:40       ` Kewen.Lin
2023-09-14  3:11 ` [PATCH 09/10] vect: Get rid of vect_model_store_cost Kewen Lin
2023-09-27 11:29   ` Richard Biener [this message]
2023-09-14  3:11 ` [PATCH 10/10] vect: Consider vec_perm costing for VMAT_CONTIGUOUS_REVERSE Kewen Lin
2023-09-27 11:30   ` Richard Biener

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAFiYyc0pit4Ph=gu+sxUkvogkZcGyQw4fWSUCkC7Wcxc=cMMng@mail.gmail.com' \
    --to=richard.guenther@gmail.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=linkw@linux.ibm.com \
    --cc=richard.sandiford@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).