public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r12-5512] bswap: Improve perform_symbolic_merge [PR103376]
@ 2021-11-25  9:40 Jakub Jelinek
  0 siblings, 0 replies; only message in thread
From: Jakub Jelinek @ 2021-11-25  9:40 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:531dae29a67e915a145d908bd2f46d22bc369c11

commit r12-5512-g531dae29a67e915a145d908bd2f46d22bc369c11
Author: Jakub Jelinek <jakub@redhat.com>
Date:   Thu Nov 25 10:38:33 2021 +0100

    bswap: Improve perform_symbolic_merge [PR103376]
    
    Thinking more about it, perhaps we could do more for BIT_XOR_EXPR.
    We could allow masked1 == masked2 case for it, but would need to
    do something different than the
      n->n = n1->n | n2->n;
    we do on all the bytes together.
    In particular, for masked1 == masked2 if masked1 != 0 (well, for 0
    both variants are the same) and masked1 != 0xff we would need to
    clear corresponding n->n byte instead of setting it to the input
    as x ^ x = 0 (but if we don't know what x and y are, the result is
    also don't know).  Now, for plus it is much harder, because not only
    for non-zero operands we don't know what the result is, but it can
    modify upper bytes as well.  So perhaps only if current's byte
    masked1 && masked2 set the resulting byte to 0xff (unknown) iff
    the byte above it is 0 and 0, and set that resulting byte to 0xff too.
    Also, even for | we could instead of return NULL just set the resulting
    byte to 0xff if it is different, perhaps it will be masked off later on.
    
    This patch just punts on plus if both corresponding bytes are non-zero,
    otherwise implements the above.
    
    2021-11-25  Jakub Jelinek  <jakub@redhat.com>
    
            PR tree-optimization/103376
            * gimple-ssa-store-merging.c (perform_symbolic_merge): For
            BIT_IOR_EXPR, if masked1 && masked2 && masked1 != masked2, don't
            punt, but set the corresponding result byte to MARKER_BYTE_UNKNOWN.
            For BIT_XOR_EXPR similarly and if masked1 == masked2 and the
            byte isn't MARKER_BYTE_UNKNOWN, set the corresponding result byte to
            0.
    
            * gcc.dg/optimize-bswapsi-7.c: New test.

Diff:
---
 gcc/gimple-ssa-store-merging.c            | 32 +++++++++++++++++++++-----
 gcc/testsuite/gcc.dg/optimize-bswapsi-7.c | 37 +++++++++++++++++++++++++++++++
 2 files changed, 64 insertions(+), 5 deletions(-)

diff --git a/gcc/gimple-ssa-store-merging.c b/gcc/gimple-ssa-store-merging.c
index 35df0fee955..e7c90ba8b59 100644
--- a/gcc/gimple-ssa-store-merging.c
+++ b/gcc/gimple-ssa-store-merging.c
@@ -556,6 +556,7 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
   n->bytepos = n_start->bytepos;
   n->type = n_start->type;
   size = TYPE_PRECISION (n->type) / BITS_PER_UNIT;
+  uint64_t res_n = n1->n | n2->n;
 
   for (i = 0, mask = MARKER_MASK; i < size; i++, mask <<= BITS_PER_MARKER)
     {
@@ -563,12 +564,33 @@ perform_symbolic_merge (gimple *source_stmt1, struct symbolic_number *n1,
 
       masked1 = n1->n & mask;
       masked2 = n2->n & mask;
-      /* For BIT_XOR_EXPR or PLUS_EXPR, at least one of masked1 and masked2
-	 has to be 0, for BIT_IOR_EXPR x | x is still x.  */
-      if (masked1 && masked2 && (code != BIT_IOR_EXPR || masked1 != masked2))
-	return NULL;
+      /* If at least one byte is 0, all of 0 | x == 0 ^ x == 0 + x == x.  */
+      if (masked1 && masked2)
+	{
+	  /* + can carry into upper bits, just punt.  */
+	  if (code == PLUS_EXPR)
+	    return NULL;
+	  /* x | x is still x.  */
+	  if (code == BIT_IOR_EXPR && masked1 == masked2)
+	    continue;
+	  if (code == BIT_XOR_EXPR)
+	    {
+	      /* x ^ x is 0, but MARKER_BYTE_UNKNOWN stands for
+		 unknown values and unknown ^ unknown is unknown.  */
+	      if (masked1 == masked2
+		  && masked1 != ((uint64_t) MARKER_BYTE_UNKNOWN
+				 << i * BITS_PER_MARKER))
+		{
+		  res_n &= ~mask;
+		  continue;
+		}
+	    }
+	  /* Otherwise set the byte to unknown, it might still be
+	     later masked off.  */
+	  res_n |= mask;
+	}
     }
-  n->n = n1->n | n2->n;
+  n->n = res_n;
   n->n_ops = n1->n_ops + n2->n_ops;
 
   return source_stmt;
diff --git a/gcc/testsuite/gcc.dg/optimize-bswapsi-7.c b/gcc/testsuite/gcc.dg/optimize-bswapsi-7.c
new file mode 100644
index 00000000000..1e4db5e4818
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/optimize-bswapsi-7.c
@@ -0,0 +1,37 @@
+/* PR tree-optimization/103376 */
+/* { dg-do compile } */
+/* { dg-require-effective-target bswap } */
+/* { dg-options "-O2 -fno-tree-vectorize -fdump-tree-optimized" } */
+/* { dg-additional-options "-march=z900" { target s390-*-* } } */
+
+static unsigned int
+f1 (unsigned int x)
+{
+  return (x << 24) | (x >> 8);
+}
+
+unsigned int
+f2 (unsigned *p)
+{
+  return ((f1 (p[0]) | (p[0] >> 8)) & 0xff000000U) | (p[0] >> 24) | ((p[0] & 0xff00U) << 8) | ((p[0] & 0xff0000U) >> 8);
+}
+
+unsigned int
+f3 (unsigned *p)
+{
+  return ((f1 (p[0]) | (p[0] & 0x00ff00ffU)) & 0xff00ff00U) | (f1 (f1 (f1 (p[0]))) & 0x00ff00ffU);
+}
+
+unsigned int
+f4 (unsigned *p)
+{
+  return (f1 (p[0]) ^ (p[0] >> 8)) ^ (p[0] >> 24) ^ ((p[0] & 0xff00U) << 8) ^ ((p[0] & 0xff0000U) >> 8);
+}
+
+unsigned int
+f5 (unsigned *p)
+{
+  return (((f1 (p[0]) | (p[0] >> 16)) ^ (p[0] >> 8)) & 0xffff0000U) ^ (p[0] >> 24) ^ ((p[0] & 0xff00U) << 8) ^ ((p[0] & 0xff0000U) >> 8);
+}
+
+/* { dg-final { scan-tree-dump-times "= __builtin_bswap32 \\\(" 4 "optimized" } } */


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-11-25  9:40 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-11-25  9:40 [gcc r12-5512] bswap: Improve perform_symbolic_merge [PR103376] Jakub Jelinek

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).