public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc(refs/users/meissner/heads/dmf004)] Allow for inline code for memcpy to move more than 16 bytes.
@ 2022-11-15 21:22 Michael Meissner
  0 siblings, 0 replies; 2+ messages in thread
From: Michael Meissner @ 2022-11-15 21:22 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:dcd80589b3fe13697d32ab85b9ff2df66774b757

commit dcd80589b3fe13697d32ab85b9ff2df66774b757
Author: Michael Meissner <meissner@linux.ibm.com>
Date:   Tue Nov 15 16:22:36 2022 -0500

    Allow for inline code for memcpy to move more than 16 bytes.
    
    2022-11-15   Michael Meissner  <meissner@linux.ibm.com>
    
    gcc/
    
            * config/rs6000/rs6000-string.cc (toplevel): Include optabs.h.
            (expand_block_move_variable): New helper function, move variable byte
            copy support here.  Add support to move more than 16 bytes.
            (expand_block_move): Move variable copy support to
            expand_block_move_variable.

Diff:
---
 gcc/config/rs6000/rs6000-string.cc | 112 ++++++++++++++++++++++++-------------
 gcc/config/rs6000/rs6000.opt       |   4 +-
 2 files changed, 74 insertions(+), 42 deletions(-)

diff --git a/gcc/config/rs6000/rs6000-string.cc b/gcc/config/rs6000/rs6000-string.cc
index 243be4f396d..6e1e51c15fb 100644
--- a/gcc/config/rs6000/rs6000-string.cc
+++ b/gcc/config/rs6000/rs6000-string.cc
@@ -37,6 +37,7 @@
 #include "target.h"
 #include "profile-count.h"
 #include "predict.h"
+#include "optabs.h"
 
 /* Expand a block clear operation, and return 1 if successful.  Return 0
    if we should let the compiler generate normal code.
@@ -2734,6 +2735,75 @@ gen_lxvl_stxvl_move (rtx dest, rtx src, int length)
     return gen_lxvl (dest, addr, len);
 }
 
+/* Expand a block move operation with a variable count using lxvl and stxvl
+   instructions.  */
+
+static void
+expand_block_move_variable (rtx orig_dest,	/* destiation address.  */
+			    rtx orig_src,	/* source address.  */
+			    rtx bytes_rtx)	/* bytes to move.  */
+
+{
+  rtx join_label = gen_label_rtx ();
+  rtx inline_label = gen_label_rtx ();
+  rtx dest_addr = copy_addr_to_reg (XEXP (orig_dest, 0));
+  rtx src_addr = copy_addr_to_reg (XEXP (orig_src, 0));
+
+  /* Check if we want to handle this with inline code.  */
+  bytes_rtx = (GET_MODE (bytes_rtx) == Pmode
+	       ? copy_to_reg (bytes_rtx)
+	       : convert_to_mode (Pmode, bytes_rtx, true));
+
+  rtx cr = gen_reg_rtx (CCUNSmode);
+  rtx max_size = GEN_INT (rs6000_memcpy_inline_bytes);
+  emit_insn (gen_rtx_SET (cr,
+			  gen_rtx_COMPARE (CCUNSmode, bytes_rtx, max_size)));
+				  
+  do_ifelse (CCUNSmode, LEU, NULL_RTX, NULL_RTX, cr, inline_label,
+	     profile_probability::likely ());
+
+  /* Call memcpy if the size is too large.  */
+  tree fun = builtin_decl_explicit (BUILT_IN_MEMCPY);
+  emit_library_call_value (XEXP (DECL_RTL (fun), 0),
+			   NULL_RTX, LCT_NORMAL, Pmode,
+			   dest_addr, Pmode,
+			   src_addr, Pmode,
+			   bytes_rtx, Pmode);
+
+  rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
+  emit_jump_insn (gen_rtx_SET (pc_rtx, join_ref));
+  emit_barrier ();
+
+  emit_label (inline_label);
+
+  /* We want to move bytes inline.  Move 0..16 bytes now.  */
+  rtx vreg = gen_reg_rtx (V16QImode);
+  emit_insn (gen_lxvl (vreg, src_addr, bytes_rtx));
+  emit_insn (gen_stxvl (vreg, dest_addr, bytes_rtx));
+
+  /* If we want to move more byes, adjust the pointers and lengths and
+     loop back.  */
+  if (rs6000_memcpy_inline_bytes > GET_MODE_SIZE (V16QImode))
+    {
+      rtx vec_size = GEN_INT (GET_MODE_SIZE (V16QImode));
+      rtx neg_vec_size = GEN_INT (- GET_MODE_SIZE (V16QImode));
+      emit_insn (gen_add2_insn (src_addr, vec_size));
+      emit_insn (gen_add2_insn (dest_addr, vec_size));
+      emit_insn (gen_add2_insn (bytes_rtx, neg_vec_size));
+
+      profile_probability probability
+	= (rs6000_memcpy_inline_bytes >= 2 * GET_MODE_SIZE (V16QImode)
+	   ? profile_probability::likely ()
+	   : profile_probability::unlikely ());
+
+      do_ifelse (CCmode, GT, bytes_rtx, const0_rtx, NULL_RTX, inline_label,
+		 probability);
+    }
+
+  emit_label (join_label);
+  return;
+}
+
 /* Expand a block move operation, and return 1 if successful.  Return 0
    if we should let the compiler generate normal code.
 
@@ -2767,48 +2837,10 @@ expand_block_move (rtx operands[], bool might_overlap)
     {
       if (TARGET_BLOCK_OPS_UNALIGNED_VSX && TARGET_P9_VECTOR && TARGET_64BIT
 	  && rs6000_memcpy_inline_bytes > 0
-	  && rs6000_memcpy_inline_bytes <= GET_MODE_SIZE (V16QImode)
+	  && rs6000_memcpy_inline_bytes <= 255
 	  && optimize && !optimize_size)
 	{
-	  rtx join_label = gen_label_rtx ();
-	  rtx inline_label = gen_label_rtx ();
-	  rtx dest_addr = copy_addr_to_reg (XEXP (orig_dest, 0));
-	  rtx src_addr = copy_addr_to_reg (XEXP (orig_src, 0));
-
-	  /* Check if we want to handle this with inline code.  */
-	  bytes_rtx = (GET_MODE (bytes_rtx) == Pmode
-		       ? copy_to_reg (bytes_rtx)
-		       : convert_to_mode (Pmode, bytes_rtx, true));
-
-	  rtx cr = gen_reg_rtx (CCUNSmode);
-	  rtx max_size = GEN_INT (rs6000_memcpy_inline_bytes);
-	  emit_insn (gen_rtx_SET (cr,
-				  gen_rtx_COMPARE (CCUNSmode, bytes_rtx,
-						   max_size)));
-				  
-	  do_ifelse (CCUNSmode, LEU, NULL_RTX, NULL_RTX, cr,
-		     inline_label, profile_probability::likely ());
-
-	  /* Call memcpy if the size is too large.  */
-	  tree fun = builtin_decl_explicit (BUILT_IN_MEMCPY);
-	  emit_library_call_value (XEXP (DECL_RTL (fun), 0),
-				   NULL_RTX, LCT_NORMAL, Pmode,
-				   dest_addr, Pmode,
-				   src_addr, Pmode,
-				   bytes_rtx, Pmode);
-
-	  rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
-	  emit_jump_insn (gen_rtx_SET (pc_rtx, join_ref));
-	  emit_barrier ();
-
-	  emit_label (inline_label);
-
-	  /* We want to move bytes inline.  Move 0..16 bytes now.  */
-	  rtx vreg = gen_reg_rtx (V16QImode);
-	  emit_insn (gen_lxvl (vreg, src_addr, bytes_rtx));
-	  emit_insn (gen_stxvl (vreg, dest_addr, bytes_rtx));
-
-	  emit_label (join_label);
+	  expand_block_move_variable (orig_dest, orig_src, bytes_rtx);
 	  return 1;
 	}
 
diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt
index 90dc91a277f..c594877ebc6 100644
--- a/gcc/config/rs6000/rs6000.opt
+++ b/gcc/config/rs6000/rs6000.opt
@@ -689,6 +689,6 @@ When reduction factor computed for a loop exceeds the threshold specified by
 this parameter, prefer to unroll this loop.  The default value is 1.
 
 -param=rs6000-memcpy-inline-bytes=
-Target Undocumented Joined UInteger Var(rs6000_memcpy_inline_bytes) Init(0) Param
+Target Undocumented Joined UInteger Var(rs6000_memcpy_inline_bytes) Init(16) Param
 Maximum number of bytes to move with inline code before calling the memcpy
-library function.  The default value is 0.
+library function.  The default value is 16.

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [gcc(refs/users/meissner/heads/dmf004)] Allow for inline code for memcpy to move more than 16 bytes.
@ 2022-11-17 21:55 Michael Meissner
  0 siblings, 0 replies; 2+ messages in thread
From: Michael Meissner @ 2022-11-17 21:55 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:9ff4722315630f759eec70eb04c837932b10d3ca

commit 9ff4722315630f759eec70eb04c837932b10d3ca
Author: Michael Meissner <meissner@linux.ibm.com>
Date:   Tue Nov 15 16:22:36 2022 -0500

    Allow for inline code for memcpy to move more than 16 bytes.
    
    2022-11-15   Michael Meissner  <meissner@linux.ibm.com>
    
    gcc/
    
            * config/rs6000/rs6000-string.cc (toplevel): Include optabs.h.
            (expand_block_move_variable): New helper function, move variable byte
            copy support here.  Add support to move more than 16 bytes.
            (expand_block_move): Move variable copy support to
            expand_block_move_variable.

Diff:
---
 gcc/config/rs6000/rs6000-string.cc | 112 ++++++++++++++++++++++++-------------
 gcc/config/rs6000/rs6000.opt       |   4 +-
 2 files changed, 74 insertions(+), 42 deletions(-)

diff --git a/gcc/config/rs6000/rs6000-string.cc b/gcc/config/rs6000/rs6000-string.cc
index 243be4f396d..6e1e51c15fb 100644
--- a/gcc/config/rs6000/rs6000-string.cc
+++ b/gcc/config/rs6000/rs6000-string.cc
@@ -37,6 +37,7 @@
 #include "target.h"
 #include "profile-count.h"
 #include "predict.h"
+#include "optabs.h"
 
 /* Expand a block clear operation, and return 1 if successful.  Return 0
    if we should let the compiler generate normal code.
@@ -2734,6 +2735,75 @@ gen_lxvl_stxvl_move (rtx dest, rtx src, int length)
     return gen_lxvl (dest, addr, len);
 }
 
+/* Expand a block move operation with a variable count using lxvl and stxvl
+   instructions.  */
+
+static void
+expand_block_move_variable (rtx orig_dest,	/* destiation address.  */
+			    rtx orig_src,	/* source address.  */
+			    rtx bytes_rtx)	/* bytes to move.  */
+
+{
+  rtx join_label = gen_label_rtx ();
+  rtx inline_label = gen_label_rtx ();
+  rtx dest_addr = copy_addr_to_reg (XEXP (orig_dest, 0));
+  rtx src_addr = copy_addr_to_reg (XEXP (orig_src, 0));
+
+  /* Check if we want to handle this with inline code.  */
+  bytes_rtx = (GET_MODE (bytes_rtx) == Pmode
+	       ? copy_to_reg (bytes_rtx)
+	       : convert_to_mode (Pmode, bytes_rtx, true));
+
+  rtx cr = gen_reg_rtx (CCUNSmode);
+  rtx max_size = GEN_INT (rs6000_memcpy_inline_bytes);
+  emit_insn (gen_rtx_SET (cr,
+			  gen_rtx_COMPARE (CCUNSmode, bytes_rtx, max_size)));
+				  
+  do_ifelse (CCUNSmode, LEU, NULL_RTX, NULL_RTX, cr, inline_label,
+	     profile_probability::likely ());
+
+  /* Call memcpy if the size is too large.  */
+  tree fun = builtin_decl_explicit (BUILT_IN_MEMCPY);
+  emit_library_call_value (XEXP (DECL_RTL (fun), 0),
+			   NULL_RTX, LCT_NORMAL, Pmode,
+			   dest_addr, Pmode,
+			   src_addr, Pmode,
+			   bytes_rtx, Pmode);
+
+  rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
+  emit_jump_insn (gen_rtx_SET (pc_rtx, join_ref));
+  emit_barrier ();
+
+  emit_label (inline_label);
+
+  /* We want to move bytes inline.  Move 0..16 bytes now.  */
+  rtx vreg = gen_reg_rtx (V16QImode);
+  emit_insn (gen_lxvl (vreg, src_addr, bytes_rtx));
+  emit_insn (gen_stxvl (vreg, dest_addr, bytes_rtx));
+
+  /* If we want to move more byes, adjust the pointers and lengths and
+     loop back.  */
+  if (rs6000_memcpy_inline_bytes > GET_MODE_SIZE (V16QImode))
+    {
+      rtx vec_size = GEN_INT (GET_MODE_SIZE (V16QImode));
+      rtx neg_vec_size = GEN_INT (- GET_MODE_SIZE (V16QImode));
+      emit_insn (gen_add2_insn (src_addr, vec_size));
+      emit_insn (gen_add2_insn (dest_addr, vec_size));
+      emit_insn (gen_add2_insn (bytes_rtx, neg_vec_size));
+
+      profile_probability probability
+	= (rs6000_memcpy_inline_bytes >= 2 * GET_MODE_SIZE (V16QImode)
+	   ? profile_probability::likely ()
+	   : profile_probability::unlikely ());
+
+      do_ifelse (CCmode, GT, bytes_rtx, const0_rtx, NULL_RTX, inline_label,
+		 probability);
+    }
+
+  emit_label (join_label);
+  return;
+}
+
 /* Expand a block move operation, and return 1 if successful.  Return 0
    if we should let the compiler generate normal code.
 
@@ -2767,48 +2837,10 @@ expand_block_move (rtx operands[], bool might_overlap)
     {
       if (TARGET_BLOCK_OPS_UNALIGNED_VSX && TARGET_P9_VECTOR && TARGET_64BIT
 	  && rs6000_memcpy_inline_bytes > 0
-	  && rs6000_memcpy_inline_bytes <= GET_MODE_SIZE (V16QImode)
+	  && rs6000_memcpy_inline_bytes <= 255
 	  && optimize && !optimize_size)
 	{
-	  rtx join_label = gen_label_rtx ();
-	  rtx inline_label = gen_label_rtx ();
-	  rtx dest_addr = copy_addr_to_reg (XEXP (orig_dest, 0));
-	  rtx src_addr = copy_addr_to_reg (XEXP (orig_src, 0));
-
-	  /* Check if we want to handle this with inline code.  */
-	  bytes_rtx = (GET_MODE (bytes_rtx) == Pmode
-		       ? copy_to_reg (bytes_rtx)
-		       : convert_to_mode (Pmode, bytes_rtx, true));
-
-	  rtx cr = gen_reg_rtx (CCUNSmode);
-	  rtx max_size = GEN_INT (rs6000_memcpy_inline_bytes);
-	  emit_insn (gen_rtx_SET (cr,
-				  gen_rtx_COMPARE (CCUNSmode, bytes_rtx,
-						   max_size)));
-				  
-	  do_ifelse (CCUNSmode, LEU, NULL_RTX, NULL_RTX, cr,
-		     inline_label, profile_probability::likely ());
-
-	  /* Call memcpy if the size is too large.  */
-	  tree fun = builtin_decl_explicit (BUILT_IN_MEMCPY);
-	  emit_library_call_value (XEXP (DECL_RTL (fun), 0),
-				   NULL_RTX, LCT_NORMAL, Pmode,
-				   dest_addr, Pmode,
-				   src_addr, Pmode,
-				   bytes_rtx, Pmode);
-
-	  rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
-	  emit_jump_insn (gen_rtx_SET (pc_rtx, join_ref));
-	  emit_barrier ();
-
-	  emit_label (inline_label);
-
-	  /* We want to move bytes inline.  Move 0..16 bytes now.  */
-	  rtx vreg = gen_reg_rtx (V16QImode);
-	  emit_insn (gen_lxvl (vreg, src_addr, bytes_rtx));
-	  emit_insn (gen_stxvl (vreg, dest_addr, bytes_rtx));
-
-	  emit_label (join_label);
+	  expand_block_move_variable (orig_dest, orig_src, bytes_rtx);
 	  return 1;
 	}
 
diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt
index 90dc91a277f..c594877ebc6 100644
--- a/gcc/config/rs6000/rs6000.opt
+++ b/gcc/config/rs6000/rs6000.opt
@@ -689,6 +689,6 @@ When reduction factor computed for a loop exceeds the threshold specified by
 this parameter, prefer to unroll this loop.  The default value is 1.
 
 -param=rs6000-memcpy-inline-bytes=
-Target Undocumented Joined UInteger Var(rs6000_memcpy_inline_bytes) Init(0) Param
+Target Undocumented Joined UInteger Var(rs6000_memcpy_inline_bytes) Init(16) Param
 Maximum number of bytes to move with inline code before calling the memcpy
-library function.  The default value is 0.
+library function.  The default value is 16.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-11-17 21:55 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-15 21:22 [gcc(refs/users/meissner/heads/dmf004)] Allow for inline code for memcpy to move more than 16 bytes Michael Meissner
2022-11-17 21:55 Michael Meissner

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).