public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [Patchv2, rs6000] Correct definition of macro of fixed point efficient unaligned
@ 2023-12-18  2:43 HAO CHEN GUI
  2023-12-19  3:01 ` Kewen.Lin
  0 siblings, 1 reply; 2+ messages in thread
From: HAO CHEN GUI @ 2023-12-18  2:43 UTC (permalink / raw)
  To: gcc-patches; +Cc: Segher Boessenkool, David, Kewen.Lin, Peter Bergner

Hi,
  The patch corrects the definition of
TARGET_EFFICIENT_OVERLAPPING_UNALIGNED and replace it with the call of
slow_unaligned_access.

  Compared with last version,
https://gcc.gnu.org/pipermail/gcc-patches/2023-December/640076.html
the main change is to replace the macro with slow_unaligned_access.

  Bootstrapped and tested on x86 and powerpc64-linux BE and LE with no
regressions. Is this OK for trunk?

Thanks
Gui Haochen

ChangeLog
rs6000: Correct definition of macro of fixed point efficient unaligned

Marco TARGET_EFFICIENT_OVERLAPPING_UNALIGNED is used in rs6000-string.cc to
guard the platform which is efficient on fixed point unaligned load/store.
It's originally defined by TARGET_EFFICIENT_UNALIGNED_VSX which is enabled
from P8 and can be disabled by mno-vsx option. So the definition is wrong.
This patch corrects the problem and call slow_unaligned_access to judge if
fixed point unaligned load/store is efficient or not.

gcc/
	* config/rs6000/rs6000.h (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED):
	Remove.
	* config/rs6000/rs6000-string.cc (select_block_compare_mode):
	Replace TARGET_EFFICIENT_OVERLAPPING_UNALIGNED with
	targetm.slow_unaligned_access.
	(expand_block_compare_gpr): Likewise.
	(expand_block_compare): Likewise.
	(expand_strncmp_gpr_sequence): Likewise.

gcc/testsuite/
	* gcc.target/powerpc/block-cmp-1.c: New.
	* gcc.target/powerpc/block-cmp-2.c: New.

patch.diff
diff --git a/gcc/config/rs6000/rs6000-string.cc b/gcc/config/rs6000/rs6000-string.cc
index 44a946cd453..cb9eeef05d8 100644
--- a/gcc/config/rs6000/rs6000-string.cc
+++ b/gcc/config/rs6000/rs6000-string.cc
@@ -305,7 +305,7 @@ select_block_compare_mode (unsigned HOST_WIDE_INT offset,
   else if (bytes == GET_MODE_SIZE (QImode))
     return QImode;
   else if (bytes < GET_MODE_SIZE (SImode)
-	   && TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+	   && !targetm.slow_unaligned_access (SImode, align)
 	   && offset >= GET_MODE_SIZE (SImode) - bytes)
     /* This matches the case were we have SImode and 3 bytes
        and offset >= 1 and permits us to move back one and overlap
@@ -313,7 +313,7 @@ select_block_compare_mode (unsigned HOST_WIDE_INT offset,
        unwanted bytes off of the input.  */
     return SImode;
   else if (word_mode_ok && bytes < UNITS_PER_WORD
-	   && TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+	   && !targetm.slow_unaligned_access (word_mode, align)
 	   && offset >= UNITS_PER_WORD-bytes)
     /* Similarly, if we can use DImode it will get matched here and
        can do an overlapping read that ends at the end of the block.  */
@@ -1749,7 +1749,7 @@ expand_block_compare_gpr(unsigned HOST_WIDE_INT bytes, unsigned int base_align,
       load_mode_size = GET_MODE_SIZE (load_mode);
       if (bytes >= load_mode_size)
 	cmp_bytes = load_mode_size;
-      else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
+      else if (!targetm.slow_unaligned_access (load_mode, align))
 	{
 	  /* Move this load back so it doesn't go past the end.
 	     P8/P9 can do this efficiently.  */
@@ -2026,7 +2026,7 @@ expand_block_compare (rtx operands[])
   /* The code generated for p7 and older is not faster than glibc
      memcmp if alignment is small and length is not short, so bail
      out to avoid those conditions.  */
-  if (!TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+  if (targetm.slow_unaligned_access (word_mode, UINTVAL (align_rtx))
       && ((base_align == 1 && bytes > 16)
 	  || (base_align == 2 && bytes > 32)))
     return false;
@@ -2168,7 +2168,7 @@ expand_strncmp_gpr_sequence (unsigned HOST_WIDE_INT bytes_to_compare,
       load_mode_size = GET_MODE_SIZE (load_mode);
       if (bytes_to_compare >= load_mode_size)
 	cmp_bytes = load_mode_size;
-      else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
+      else if (!targetm.slow_unaligned_access (load_mode, align))
 	{
 	  /* Move this load back so it doesn't go past the end.
 	     P8/P9 can do this efficiently.  */
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 326c45221e9..3971a56c588 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -483,10 +483,6 @@ extern int rs6000_vector_align[];
 #define TARGET_NO_SF_SUBREG	TARGET_DIRECT_MOVE_64BIT
 #define TARGET_ALLOW_SF_SUBREG	(!TARGET_DIRECT_MOVE_64BIT)

-/* This wants to be set for p8 and newer.  On p7, overlapping unaligned
-   loads are slow. */
-#define TARGET_EFFICIENT_OVERLAPPING_UNALIGNED TARGET_EFFICIENT_UNALIGNED_VSX
-
 /* Byte/char syncs were added as phased in for ISA 2.06B, but are not present
    in power7, so conditionalize them on p8 features.  TImode syncs need quad
    memory support.  */
diff --git a/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c b/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c
new file mode 100644
index 00000000000..bcf0cb2ab4f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mdejagnu-cpu=power8 -mno-vsx" } */
+/* { dg-final { scan-assembler-not {\mb[l]? memcmp\M} } }  */
+
+/* Test that it still can do expand for memcmpsi instead of calling library
+   on P8 with vsx disabled.  */
+
+int foo (const char* s1, const char* s2)
+{
+  return __builtin_memcmp (s1, s2, 20);
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c b/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c
new file mode 100644
index 00000000000..4f162dc0437
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mstrict-align" } */
+/* { dg-final { scan-assembler-times {\mb[l]? memcmp\M} 1 } }  */
+
+/* Test that it calls library for block memory compare when strict-align
+   is set.  The flag causes rs6000_slow_unaligned_access returns true.  */
+
+int foo (const char* s1, const char* s2)
+{
+  return __builtin_memcmp (s1, s2, 20);
+}


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [Patchv2, rs6000] Correct definition of macro of fixed point efficient unaligned
  2023-12-18  2:43 [Patchv2, rs6000] Correct definition of macro of fixed point efficient unaligned HAO CHEN GUI
@ 2023-12-19  3:01 ` Kewen.Lin
  0 siblings, 0 replies; 2+ messages in thread
From: Kewen.Lin @ 2023-12-19  3:01 UTC (permalink / raw)
  To: HAO CHEN GUI; +Cc: Segher Boessenkool, David, Peter Bergner, gcc-patches

Hi Haochen,

on 2023/12/18 10:43, HAO CHEN GUI wrote:
> Hi,
>   The patch corrects the definition of
> TARGET_EFFICIENT_OVERLAPPING_UNALIGNED and replace it with the call of
> slow_unaligned_access.
> 
>   Compared with last version,
> https://gcc.gnu.org/pipermail/gcc-patches/2023-December/640076.html
> the main change is to replace the macro with slow_unaligned_access.
> 
>   Bootstrapped and tested on x86 and powerpc64-linux BE and LE with no
> regressions. Is this OK for trunk?
> 
> Thanks
> Gui Haochen
> 
> ChangeLog
> rs6000: Correct definition of macro of fixed point efficient unaligned
> 
> Marco TARGET_EFFICIENT_OVERLAPPING_UNALIGNED is used in rs6000-string.cc to
> guard the platform which is efficient on fixed point unaligned load/store.
> It's originally defined by TARGET_EFFICIENT_UNALIGNED_VSX which is enabled
> from P8 and can be disabled by mno-vsx option. So the definition is wrong.
> This patch corrects the problem and call slow_unaligned_access to judge if
> fixed point unaligned load/store is efficient or not.
> 
> gcc/
> 	* config/rs6000/rs6000.h (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED):
> 	Remove.
> 	* config/rs6000/rs6000-string.cc (select_block_compare_mode):
> 	Replace TARGET_EFFICIENT_OVERLAPPING_UNALIGNED with
> 	targetm.slow_unaligned_access.
> 	(expand_block_compare_gpr): Likewise.
> 	(expand_block_compare): Likewise.
> 	(expand_strncmp_gpr_sequence): Likewise.
> 
> gcc/testsuite/
> 	* gcc.target/powerpc/block-cmp-1.c: New.
> 	* gcc.target/powerpc/block-cmp-2.c: New.
> 
> patch.diff
> diff --git a/gcc/config/rs6000/rs6000-string.cc b/gcc/config/rs6000/rs6000-string.cc
> index 44a946cd453..cb9eeef05d8 100644
> --- a/gcc/config/rs6000/rs6000-string.cc
> +++ b/gcc/config/rs6000/rs6000-string.cc
> @@ -305,7 +305,7 @@ select_block_compare_mode (unsigned HOST_WIDE_INT offset,
>    else if (bytes == GET_MODE_SIZE (QImode))
>      return QImode;
>    else if (bytes < GET_MODE_SIZE (SImode)
> -	   && TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
> +	   && !targetm.slow_unaligned_access (SImode, align)
>  	   && offset >= GET_MODE_SIZE (SImode) - bytes)
>      /* This matches the case were we have SImode and 3 bytes
>         and offset >= 1 and permits us to move back one and overlap
> @@ -313,7 +313,7 @@ select_block_compare_mode (unsigned HOST_WIDE_INT offset,
>         unwanted bytes off of the input.  */
>      return SImode;
>    else if (word_mode_ok && bytes < UNITS_PER_WORD
> -	   && TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
> +	   && !targetm.slow_unaligned_access (word_mode, align)
>  	   && offset >= UNITS_PER_WORD-bytes)
>      /* Similarly, if we can use DImode it will get matched here and
>         can do an overlapping read that ends at the end of the block.  */
> @@ -1749,7 +1749,7 @@ expand_block_compare_gpr(unsigned HOST_WIDE_INT bytes, unsigned int base_align,
>        load_mode_size = GET_MODE_SIZE (load_mode);
>        if (bytes >= load_mode_size)
>  	cmp_bytes = load_mode_size;
> -      else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
> +      else if (!targetm.slow_unaligned_access (load_mode, align))
>  	{
>  	  /* Move this load back so it doesn't go past the end.
>  	     P8/P9 can do this efficiently.  */
> @@ -2026,7 +2026,7 @@ expand_block_compare (rtx operands[])
>    /* The code generated for p7 and older is not faster than glibc
>       memcmp if alignment is small and length is not short, so bail
>       out to avoid those conditions.  */
> -  if (!TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
> +  if (targetm.slow_unaligned_access (word_mode, UINTVAL (align_rtx))

At the first glance it looks that we can use base_align here instead,
but I noticed that base_align is computed with

  unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT;

As the internal doc, the alignment already passed in bytes?  If so,
the "/ BITS_PER_UNIT" looks unexpected, could you have a check?
If it is the case, a separated patch for it is appreciated (and
please some other related/similar places too).  Thanks!

>        && ((base_align == 1 && bytes > 16)
>  	  || (base_align == 2 && bytes > 32)))
>      return false;
> @@ -2168,7 +2168,7 @@ expand_strncmp_gpr_sequence (unsigned HOST_WIDE_INT bytes_to_compare,
>        load_mode_size = GET_MODE_SIZE (load_mode);
>        if (bytes_to_compare >= load_mode_size)
>  	cmp_bytes = load_mode_size;
> -      else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
> +      else if (!targetm.slow_unaligned_access (load_mode, align))
>  	{
>  	  /* Move this load back so it doesn't go past the end.
>  	     P8/P9 can do this efficiently.  */
> diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
> index 326c45221e9..3971a56c588 100644
> --- a/gcc/config/rs6000/rs6000.h
> +++ b/gcc/config/rs6000/rs6000.h
> @@ -483,10 +483,6 @@ extern int rs6000_vector_align[];
>  #define TARGET_NO_SF_SUBREG	TARGET_DIRECT_MOVE_64BIT
>  #define TARGET_ALLOW_SF_SUBREG	(!TARGET_DIRECT_MOVE_64BIT)
> 
> -/* This wants to be set for p8 and newer.  On p7, overlapping unaligned
> -   loads are slow. */
> -#define TARGET_EFFICIENT_OVERLAPPING_UNALIGNED TARGET_EFFICIENT_UNALIGNED_VSX
> -
>  /* Byte/char syncs were added as phased in for ISA 2.06B, but are not present
>     in power7, so conditionalize them on p8 features.  TImode syncs need quad
>     memory support.  */
> diff --git a/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c b/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c
> new file mode 100644
> index 00000000000..bcf0cb2ab4f
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c
> @@ -0,0 +1,11 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O2 -mdejagnu-cpu=power8 -mno-vsx" } */
> +/* { dg-final { scan-assembler-not {\mb[l]? memcmp\M} } }  */
> +
> +/* Test that it still can do expand for memcmpsi instead of calling library
> +   on P8 with vsx disabled.  */
> +
> +int foo (const char* s1, const char* s2)
> +{
> +  return __builtin_memcmp (s1, s2, 20);
> +}
> diff --git a/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c b/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c
> new file mode 100644
> index 00000000000..4f162dc0437
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c
> @@ -0,0 +1,11 @@
> +/* { dg-do compile } */
> +/* { dg-options "-O2 -mstrict-align" } */

There is an effective target opt_mstrict_align, we should check it first.

The others look good to me, thanks!

BR,
Kewen

> +/* { dg-final { scan-assembler-times {\mb[l]? memcmp\M} 1 } }  */
> +
> +/* Test that it calls library for block memory compare when strict-align
> +   is set.  The flag causes rs6000_slow_unaligned_access returns true.  */
> +
> +int foo (const char* s1, const char* s2)
> +{
> +  return __builtin_memcmp (s1, s2, 20);
> +}
> 


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-12-19  3:01 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-18  2:43 [Patchv2, rs6000] Correct definition of macro of fixed point efficient unaligned HAO CHEN GUI
2023-12-19  3:01 ` Kewen.Lin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).