public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/users/meissner/heads/dmf004)] Tweak variable move support.
@ 2022-11-17 21:55 Michael Meissner
0 siblings, 0 replies; 2+ messages in thread
From: Michael Meissner @ 2022-11-17 21:55 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:7832dfa3cfe5d0dcaa24d33ec643d81592c82f5f
commit 7832dfa3cfe5d0dcaa24d33ec643d81592c82f5f
Author: Michael Meissner <meissner@linux.ibm.com>
Date: Wed Nov 16 13:10:43 2022 -0500
Tweak variable move support.
2022-11-16 Michael Meissner <meissner@linux.ibm.com>
gcc/
* config/rs6000/rs6000-string.cc (expand_block_move_variable): Move
testing whether we want to do variable moves here. For now, limit moves
to 0..16 bytes.
(expand_block_move): Move variable tests to
expand_block_moves_variable.
Diff:
---
gcc/config/rs6000/rs6000-string.cc | 92 +++++++++++++++++---------------------
1 file changed, 40 insertions(+), 52 deletions(-)
diff --git a/gcc/config/rs6000/rs6000-string.cc b/gcc/config/rs6000/rs6000-string.cc
index 038f194c32c..d14ee1da289 100644
--- a/gcc/config/rs6000/rs6000-string.cc
+++ b/gcc/config/rs6000/rs6000-string.cc
@@ -2735,31 +2735,48 @@ gen_lxvl_stxvl_move (rtx dest, rtx src, int length)
return gen_lxvl (dest, addr, len);
}
-/* Expand a block move operation with a variable count using lxvl and stxvl
- instructions. */
+/* Expand a variable block move operation, and return 1 if successful. Return
+ 0 if we should let the compiler generate normal code.
-static void
-expand_block_move_variable (rtx orig_dest, /* destiation address. */
- rtx orig_src, /* source address. */
- rtx bytes_rtx) /* bytes to move. */
+ operands[0] is the destination
+ operands[1] is the source
+ operands[2] is the length
+ operands[3] is the alignment */
+static int
+expand_block_move_variable (rtx operands[], bool might_overlap)
{
- rtx join_label = gen_label_rtx ();
- rtx inline_label = gen_label_rtx ();
- rtx dest_addr = copy_addr_to_reg (XEXP (orig_dest, 0));
- rtx src_addr = copy_addr_to_reg (XEXP (orig_src, 0));
+ /* See if we have the necessary support for load/store vector with length,
+ and we want to do the optimization. */
+ if (!TARGET_BLOCK_OPS_UNALIGNED_VSX
+ || !TARGET_P9_VECTOR
+ || !TARGET_64BIT
+ || rs6000_memcpy_inline_bytes == 0
+ || might_overlap
+ || !optimize
+ || optimize_size)
+ return 0;
+
+ rtx dest_addr = copy_addr_to_reg (XEXP (operands[0], 0));
+ rtx src_addr = copy_addr_to_reg (XEXP (operands[1], 0));
/* Check if we want to handle this with inline code. */
- bytes_rtx = (GET_MODE (bytes_rtx) == Pmode
- ? copy_to_reg (bytes_rtx)
- : convert_to_mode (Pmode, bytes_rtx, true));
-
- rtx cr = gen_reg_rtx (CCUNSmode);
- rtx max_size = GEN_INT (rs6000_memcpy_inline_bytes);
- emit_insn (gen_rtx_SET (cr,
- gen_rtx_COMPARE (CCUNSmode, bytes_rtx, max_size)));
+ rtx bytes_rtx = (GET_MODE (operands[2]) == Pmode
+ ? copy_to_reg (operands[2])
+ : convert_to_mode (Pmode, operands[2], true));
+
+ HOST_WIDE_INT var_size_int
+ = (rs6000_memcpy_inline_bytes > GET_MODE_SIZE (V16QImode)
+ ? GET_MODE_SIZE (V16QImode)
+ : rs6000_memcpy_inline_bytes);
+
+ rtx var_size = GEN_INT (var_size_int);
+ rtx var_cr = gen_reg_rtx (CCUNSmode);
+ emit_insn (gen_rtx_SET (var_cr,
+ gen_rtx_COMPARE (CCUNSmode, bytes_rtx, var_size)));
- do_ifelse (CCUNSmode, LEU, NULL_RTX, NULL_RTX, cr, inline_label,
+ rtx var_label = gen_label_rtx ();
+ do_ifelse (CCUNSmode, LEU, NULL_RTX, NULL_RTX, var_cr, var_label,
profile_probability::likely ());
/* Call memcpy if the size is too large. */
@@ -2770,38 +2787,20 @@ expand_block_move_variable (rtx orig_dest, /* destiation address. */
src_addr, Pmode,
bytes_rtx, Pmode);
+ rtx join_label = gen_label_rtx ();
rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
emit_jump_insn (gen_rtx_SET (pc_rtx, join_ref));
emit_barrier ();
- emit_label (inline_label);
+ emit_label (var_label);
/* We want to move bytes inline. Move 0..16 bytes now. */
rtx vreg = gen_reg_rtx (V16QImode);
emit_insn (gen_lxvl (vreg, src_addr, bytes_rtx));
emit_insn (gen_stxvl (vreg, dest_addr, bytes_rtx));
- /* If we want to move more byes, adjust the pointers and lengths and
- loop back. */
- if (rs6000_memcpy_inline_bytes > GET_MODE_SIZE (V16QImode))
- {
- rtx vec_size = GEN_INT (GET_MODE_SIZE (V16QImode));
- rtx neg_vec_size = GEN_INT (- GET_MODE_SIZE (V16QImode));
- emit_insn (gen_add2_insn (src_addr, vec_size));
- emit_insn (gen_add2_insn (dest_addr, vec_size));
- emit_insn (gen_add2_insn (bytes_rtx, neg_vec_size));
-
- profile_probability probability
- = (rs6000_memcpy_inline_bytes >= 2 * GET_MODE_SIZE (V16QImode)
- ? profile_probability::likely ()
- : profile_probability::unlikely ());
-
- do_ifelse (CCmode, GT, bytes_rtx, const0_rtx, NULL_RTX, inline_label,
- probability);
- }
-
emit_label (join_label);
- return;
+ return 1;
}
/* Expand a block move operation, and return 1 if successful. Return 0
@@ -2834,18 +2833,7 @@ expand_block_move (rtx operands[], bool might_overlap)
length to handle multiple bytes. Don't do the optimization if -Os.
Otherwise, just call memcpy. */
if (! constp)
- {
- if (TARGET_BLOCK_OPS_UNALIGNED_VSX && TARGET_P9_VECTOR && TARGET_64BIT
- && rs6000_memcpy_inline_bytes > 0
- && rs6000_memcpy_inline_bytes <= 255
- && !might_overlap && optimize && !optimize_size)
- {
- expand_block_move_variable (orig_dest, orig_src, bytes_rtx);
- return 1;
- }
-
- return 0;
- }
+ return expand_block_move_variable (operands, might_overlap);
/* This must be a fixed size alignment */
gcc_assert (CONST_INT_P (align_rtx));
^ permalink raw reply [flat|nested] 2+ messages in thread
* [gcc(refs/users/meissner/heads/dmf004)] Tweak variable move support.
@ 2022-11-16 18:11 Michael Meissner
0 siblings, 0 replies; 2+ messages in thread
From: Michael Meissner @ 2022-11-16 18:11 UTC (permalink / raw)
To: gcc-cvs
https://gcc.gnu.org/g:d58c343538a5dc13cd5eb369c946053b4b0bbfc5
commit d58c343538a5dc13cd5eb369c946053b4b0bbfc5
Author: Michael Meissner <meissner@linux.ibm.com>
Date: Wed Nov 16 13:10:43 2022 -0500
Tweak variable move support.
2022-11-16 Michael Meissner <meissner@linux.ibm.com>
gcc/
* config/rs6000/rs6000-string.cc (expand_block_move_variable): Move
testing whether we want to do variable moves here. For now, limit moves
to 0..16 bytes.
(expand_block_move): Move variable tests to
expand_block_moves_variable.
Diff:
---
gcc/config/rs6000/rs6000-string.cc | 92 +++++++++++++++++---------------------
1 file changed, 40 insertions(+), 52 deletions(-)
diff --git a/gcc/config/rs6000/rs6000-string.cc b/gcc/config/rs6000/rs6000-string.cc
index 038f194c32c..d14ee1da289 100644
--- a/gcc/config/rs6000/rs6000-string.cc
+++ b/gcc/config/rs6000/rs6000-string.cc
@@ -2735,31 +2735,48 @@ gen_lxvl_stxvl_move (rtx dest, rtx src, int length)
return gen_lxvl (dest, addr, len);
}
-/* Expand a block move operation with a variable count using lxvl and stxvl
- instructions. */
+/* Expand a variable block move operation, and return 1 if successful. Return
+ 0 if we should let the compiler generate normal code.
-static void
-expand_block_move_variable (rtx orig_dest, /* destiation address. */
- rtx orig_src, /* source address. */
- rtx bytes_rtx) /* bytes to move. */
+ operands[0] is the destination
+ operands[1] is the source
+ operands[2] is the length
+ operands[3] is the alignment */
+static int
+expand_block_move_variable (rtx operands[], bool might_overlap)
{
- rtx join_label = gen_label_rtx ();
- rtx inline_label = gen_label_rtx ();
- rtx dest_addr = copy_addr_to_reg (XEXP (orig_dest, 0));
- rtx src_addr = copy_addr_to_reg (XEXP (orig_src, 0));
+ /* See if we have the necessary support for load/store vector with length,
+ and we want to do the optimization. */
+ if (!TARGET_BLOCK_OPS_UNALIGNED_VSX
+ || !TARGET_P9_VECTOR
+ || !TARGET_64BIT
+ || rs6000_memcpy_inline_bytes == 0
+ || might_overlap
+ || !optimize
+ || optimize_size)
+ return 0;
+
+ rtx dest_addr = copy_addr_to_reg (XEXP (operands[0], 0));
+ rtx src_addr = copy_addr_to_reg (XEXP (operands[1], 0));
/* Check if we want to handle this with inline code. */
- bytes_rtx = (GET_MODE (bytes_rtx) == Pmode
- ? copy_to_reg (bytes_rtx)
- : convert_to_mode (Pmode, bytes_rtx, true));
-
- rtx cr = gen_reg_rtx (CCUNSmode);
- rtx max_size = GEN_INT (rs6000_memcpy_inline_bytes);
- emit_insn (gen_rtx_SET (cr,
- gen_rtx_COMPARE (CCUNSmode, bytes_rtx, max_size)));
+ rtx bytes_rtx = (GET_MODE (operands[2]) == Pmode
+ ? copy_to_reg (operands[2])
+ : convert_to_mode (Pmode, operands[2], true));
+
+ HOST_WIDE_INT var_size_int
+ = (rs6000_memcpy_inline_bytes > GET_MODE_SIZE (V16QImode)
+ ? GET_MODE_SIZE (V16QImode)
+ : rs6000_memcpy_inline_bytes);
+
+ rtx var_size = GEN_INT (var_size_int);
+ rtx var_cr = gen_reg_rtx (CCUNSmode);
+ emit_insn (gen_rtx_SET (var_cr,
+ gen_rtx_COMPARE (CCUNSmode, bytes_rtx, var_size)));
- do_ifelse (CCUNSmode, LEU, NULL_RTX, NULL_RTX, cr, inline_label,
+ rtx var_label = gen_label_rtx ();
+ do_ifelse (CCUNSmode, LEU, NULL_RTX, NULL_RTX, var_cr, var_label,
profile_probability::likely ());
/* Call memcpy if the size is too large. */
@@ -2770,38 +2787,20 @@ expand_block_move_variable (rtx orig_dest, /* destiation address. */
src_addr, Pmode,
bytes_rtx, Pmode);
+ rtx join_label = gen_label_rtx ();
rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
emit_jump_insn (gen_rtx_SET (pc_rtx, join_ref));
emit_barrier ();
- emit_label (inline_label);
+ emit_label (var_label);
/* We want to move bytes inline. Move 0..16 bytes now. */
rtx vreg = gen_reg_rtx (V16QImode);
emit_insn (gen_lxvl (vreg, src_addr, bytes_rtx));
emit_insn (gen_stxvl (vreg, dest_addr, bytes_rtx));
- /* If we want to move more byes, adjust the pointers and lengths and
- loop back. */
- if (rs6000_memcpy_inline_bytes > GET_MODE_SIZE (V16QImode))
- {
- rtx vec_size = GEN_INT (GET_MODE_SIZE (V16QImode));
- rtx neg_vec_size = GEN_INT (- GET_MODE_SIZE (V16QImode));
- emit_insn (gen_add2_insn (src_addr, vec_size));
- emit_insn (gen_add2_insn (dest_addr, vec_size));
- emit_insn (gen_add2_insn (bytes_rtx, neg_vec_size));
-
- profile_probability probability
- = (rs6000_memcpy_inline_bytes >= 2 * GET_MODE_SIZE (V16QImode)
- ? profile_probability::likely ()
- : profile_probability::unlikely ());
-
- do_ifelse (CCmode, GT, bytes_rtx, const0_rtx, NULL_RTX, inline_label,
- probability);
- }
-
emit_label (join_label);
- return;
+ return 1;
}
/* Expand a block move operation, and return 1 if successful. Return 0
@@ -2834,18 +2833,7 @@ expand_block_move (rtx operands[], bool might_overlap)
length to handle multiple bytes. Don't do the optimization if -Os.
Otherwise, just call memcpy. */
if (! constp)
- {
- if (TARGET_BLOCK_OPS_UNALIGNED_VSX && TARGET_P9_VECTOR && TARGET_64BIT
- && rs6000_memcpy_inline_bytes > 0
- && rs6000_memcpy_inline_bytes <= 255
- && !might_overlap && optimize && !optimize_size)
- {
- expand_block_move_variable (orig_dest, orig_src, bytes_rtx);
- return 1;
- }
-
- return 0;
- }
+ return expand_block_move_variable (operands, might_overlap);
/* This must be a fixed size alignment */
gcc_assert (CONST_INT_P (align_rtx));
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-11-17 21:55 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-17 21:55 [gcc(refs/users/meissner/heads/dmf004)] Tweak variable move support Michael Meissner
-- strict thread matches above, loose matches on Subject: below --
2022-11-16 18:11 Michael Meissner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).