public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Tamar Christina <Tamar.Christina@arm.com>
To: Richard Biener <richard.guenther@gmail.com>,
	Andrew Pinski <pinskia@gmail.com>
Cc: "rguenther@suse.de" <rguenther@suse.de>, nd <nd@arm.com>,
	"gcc-patches@gcc.gnu.org" <gcc-patches@gcc.gnu.org>
Subject: RE: [PATCH 1/2]middle-end Fold BIT_FIELD_REF and Shifts into BIT_FIELD_REFs alone
Date: Mon, 31 Oct 2022 11:51:47 +0000	[thread overview]
Message-ID: <VI1PR08MB5325510F8C42F1B0C43D5E06FF379@VI1PR08MB5325.eurprd08.prod.outlook.com> (raw)
In-Reply-To: <CAFiYyc20CGV7HL=-3cMEdCHH9uye4ek9tnsp9AJWYpik-8D5bQ@mail.gmail.com>

[-- Attachment #1: Type: text/plain, Size: 5942 bytes --]

Hi All,

Here's a respin addressing review comments.

Bootstrapped Regtested on aarch64-none-linux-gnu, x86_64-pc-linux-gnu
and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

	* match.pd: Add bitfield and shift folding.

gcc/testsuite/ChangeLog:

	* gcc.dg/bitshift_1.c: New.
	* gcc.dg/bitshift_2.c: New.

--- inline copy of patch ---

diff --git a/gcc/match.pd b/gcc/match.pd
index 70e90cdbfa902830e6b58be84e114e86ff7b4dff..a4ad465b2b074b21835be74732dce295f8db03bc 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -7245,6 +7245,45 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
       && ANY_INTEGRAL_TYPE_P (type) && ANY_INTEGRAL_TYPE_P (TREE_TYPE(@0)))
   (IFN_REDUC_PLUS_WIDEN @0)))
 
+/* Canonicalize BIT_FIELD_REFS and right shift to BIT_FIELD_REFS.  */
+(simplify
+ (rshift (BIT_FIELD_REF @0 @1 @2) INTEGER_CST@3)
+ (if (INTEGRAL_TYPE_P (type)
+      && tree_fits_uhwi_p (@1)
+      && tree_fits_uhwi_p (@3))
+  (with { /* Can't use wide-int here as the precision differs between
+	     @1 and @3.  */
+	  unsigned HOST_WIDE_INT size = tree_to_uhwi (@1);
+	  unsigned HOST_WIDE_INT shiftc = tree_to_uhwi (@3);
+	  unsigned HOST_WIDE_INT newsize = size - shiftc;
+	  tree nsize = wide_int_to_tree (bitsizetype, newsize);
+	  tree ntype
+	    = build_nonstandard_integer_type (newsize, TYPE_UNSIGNED (type)); }
+   (switch
+    (if (INTEGRAL_TYPE_P (ntype) && !BYTES_BIG_ENDIAN)
+     (convert:type (BIT_FIELD_REF:ntype @0 { nsize; } (plus @2 @3))))
+    (if (INTEGRAL_TYPE_P (ntype) && BYTES_BIG_ENDIAN)
+     (convert:type (BIT_FIELD_REF:ntype @0 { nsize; } (minus @2 @3))))))))
+
+/* Canonicalize BIT_FIELD_REFS and converts to BIT_FIELD_REFS.  */
+(simplify
+ (convert (BIT_FIELD_REF@3 @0 @1 @2))
+ (if (INTEGRAL_TYPE_P (type)
+      && INTEGRAL_TYPE_P (TREE_TYPE (@3)))
+  (with { unsigned int size_inner = element_precision (TREE_TYPE (@3));
+	  unsigned int size_outer  = element_precision (type); }
+   (if (size_inner > size_outer)
+    /* Truncating convert, we can shrink the bit field similar to the
+        shift case.  */
+    (with {
+	    tree nsize = wide_int_to_tree (bitsizetype, size_outer);
+	    auto sign = TYPE_UNSIGNED (type);
+	    tree ntype
+	      = build_nonstandard_integer_type (size_outer, sign);
+	    gcc_assert (useless_type_conversion_p (type, ntype)); }
+     (if (INTEGRAL_TYPE_P (ntype))
+      (BIT_FIELD_REF:ntype @0 { nsize; } @2)))))))
+
 (simplify
  (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
  (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
diff --git a/gcc/testsuite/gcc.dg/bitshift_1.c b/gcc/testsuite/gcc.dg/bitshift_1.c
new file mode 100644
index 0000000000000000000000000000000000000000..5995d0746d2301eb48304629cb4b779b079f1270
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitshift_1.c
@@ -0,0 +1,50 @@
+/* { dg-do compile { target le } } */
+/* { dg-additional-options "-O2 -save-temps -fdump-tree-optimized" } */
+
+typedef int v4si __attribute__ ((vector_size (16)));
+typedef unsigned int v4usi __attribute__ ((vector_size (16)));
+typedef unsigned short v8uhi __attribute__ ((vector_size (16)));
+
+unsigned int foor (v4usi x)
+{
+    return x[1] >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 48>;} "optimized" } } */
+
+unsigned int fool (v4usi x)
+{
+    return x[1] << 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 32, 32>;} "optimized" } } */
+
+unsigned short foor2 (v4usi x)
+{
+    return x[3] >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 112>;} "optimized" } } */
+
+unsigned int fool2 (v4usi x)
+{
+    return x[0] << 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 32, 0>;} "optimized" } } */
+
+unsigned char foor3 (v8uhi x)
+{
+    return x[3] >> 9;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 7, 57>;} "optimized" } } */
+
+unsigned short fool3 (v8uhi x)
+{
+    return x[0] << 9;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 0>;} "optimized" } } */
+
+unsigned short foo2 (v4si x)
+{
+  int y = x[0] + x[1];
+  return y >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 64, 0>;} "optimized" } } */
+
diff --git a/gcc/testsuite/gcc.dg/bitshift_2.c b/gcc/testsuite/gcc.dg/bitshift_2.c
new file mode 100644
index 0000000000000000000000000000000000000000..406b4def9d4aebbc83bd5bef92dab825b85f2aa4
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitshift_2.c
@@ -0,0 +1,49 @@
+/* { dg-do compile { target be } } */
+/* { dg-additional-options "-O2 -save-temps -fdump-tree-optimized" } */
+
+typedef int v4si __attribute__ ((vector_size (16)));
+typedef unsigned int v4usi __attribute__ ((vector_size (16)));
+typedef unsigned short v8uhi __attribute__ ((vector_size (16)));
+
+unsigned int foor (v4usi x)
+{
+    return x[1] >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 16>;} "optimized" } } */
+
+unsigned int fool (v4usi x)
+{
+    return x[1] << 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 32, 32>;} "optimized" } } */
+
+unsigned short foor2 (v4usi x)
+{
+    return x[3] >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 80>;} "optimized" } } */
+
+unsigned int fool2 (v4usi x)
+{
+    return x[0] << 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 32, 0>;} "optimized" } } */
+
+unsigned char foor3 (v8uhi x)
+{
+    return x[3] >> 9;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 7, 39>;} "optimized" } } */
+
+unsigned short fool3 (v8uhi x)
+{
+    return x[0] << 9;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 0>;} "optimized" } } */
+
+unsigned short foo2 (v4si x)
+{
+  int y = x[0] + x[1];
+  return y >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 64, 0>;} "optimized" } } */

[-- Attachment #2: rb15776.patch --]
[-- Type: application/octet-stream, Size: 5410 bytes --]

diff --git a/gcc/match.pd b/gcc/match.pd
index 70e90cdbfa902830e6b58be84e114e86ff7b4dff..a4ad465b2b074b21835be74732dce295f8db03bc 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -7245,6 +7245,45 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
       && ANY_INTEGRAL_TYPE_P (type) && ANY_INTEGRAL_TYPE_P (TREE_TYPE(@0)))
   (IFN_REDUC_PLUS_WIDEN @0)))
 
+/* Canonicalize BIT_FIELD_REFS and right shift to BIT_FIELD_REFS.  */
+(simplify
+ (rshift (BIT_FIELD_REF @0 @1 @2) INTEGER_CST@3)
+ (if (INTEGRAL_TYPE_P (type)
+      && tree_fits_uhwi_p (@1)
+      && tree_fits_uhwi_p (@3))
+  (with { /* Can't use wide-int here as the precision differs between
+	     @1 and @3.  */
+	  unsigned HOST_WIDE_INT size = tree_to_uhwi (@1);
+	  unsigned HOST_WIDE_INT shiftc = tree_to_uhwi (@3);
+	  unsigned HOST_WIDE_INT newsize = size - shiftc;
+	  tree nsize = wide_int_to_tree (bitsizetype, newsize);
+	  tree ntype
+	    = build_nonstandard_integer_type (newsize, TYPE_UNSIGNED (type)); }
+   (switch
+    (if (INTEGRAL_TYPE_P (ntype) && !BYTES_BIG_ENDIAN)
+     (convert:type (BIT_FIELD_REF:ntype @0 { nsize; } (plus @2 @3))))
+    (if (INTEGRAL_TYPE_P (ntype) && BYTES_BIG_ENDIAN)
+     (convert:type (BIT_FIELD_REF:ntype @0 { nsize; } (minus @2 @3))))))))
+
+/* Canonicalize BIT_FIELD_REFS and converts to BIT_FIELD_REFS.  */
+(simplify
+ (convert (BIT_FIELD_REF@3 @0 @1 @2))
+ (if (INTEGRAL_TYPE_P (type)
+      && INTEGRAL_TYPE_P (TREE_TYPE (@3)))
+  (with { unsigned int size_inner = element_precision (TREE_TYPE (@3));
+	  unsigned int size_outer  = element_precision (type); }
+   (if (size_inner > size_outer)
+    /* Truncating convert, we can shrink the bit field similar to the
+        shift case.  */
+    (with {
+	    tree nsize = wide_int_to_tree (bitsizetype, size_outer);
+	    auto sign = TYPE_UNSIGNED (type);
+	    tree ntype
+	      = build_nonstandard_integer_type (size_outer, sign);
+	    gcc_assert (useless_type_conversion_p (type, ntype)); }
+     (if (INTEGRAL_TYPE_P (ntype))
+      (BIT_FIELD_REF:ntype @0 { nsize; } @2)))))))
+
 (simplify
  (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
  (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
diff --git a/gcc/testsuite/gcc.dg/bitshift_1.c b/gcc/testsuite/gcc.dg/bitshift_1.c
new file mode 100644
index 0000000000000000000000000000000000000000..5995d0746d2301eb48304629cb4b779b079f1270
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitshift_1.c
@@ -0,0 +1,50 @@
+/* { dg-do compile { target le } } */
+/* { dg-additional-options "-O2 -save-temps -fdump-tree-optimized" } */
+
+typedef int v4si __attribute__ ((vector_size (16)));
+typedef unsigned int v4usi __attribute__ ((vector_size (16)));
+typedef unsigned short v8uhi __attribute__ ((vector_size (16)));
+
+unsigned int foor (v4usi x)
+{
+    return x[1] >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 48>;} "optimized" } } */
+
+unsigned int fool (v4usi x)
+{
+    return x[1] << 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 32, 32>;} "optimized" } } */
+
+unsigned short foor2 (v4usi x)
+{
+    return x[3] >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 112>;} "optimized" } } */
+
+unsigned int fool2 (v4usi x)
+{
+    return x[0] << 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 32, 0>;} "optimized" } } */
+
+unsigned char foor3 (v8uhi x)
+{
+    return x[3] >> 9;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 7, 57>;} "optimized" } } */
+
+unsigned short fool3 (v8uhi x)
+{
+    return x[0] << 9;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 0>;} "optimized" } } */
+
+unsigned short foo2 (v4si x)
+{
+  int y = x[0] + x[1];
+  return y >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 64, 0>;} "optimized" } } */
+
diff --git a/gcc/testsuite/gcc.dg/bitshift_2.c b/gcc/testsuite/gcc.dg/bitshift_2.c
new file mode 100644
index 0000000000000000000000000000000000000000..406b4def9d4aebbc83bd5bef92dab825b85f2aa4
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/bitshift_2.c
@@ -0,0 +1,49 @@
+/* { dg-do compile { target be } } */
+/* { dg-additional-options "-O2 -save-temps -fdump-tree-optimized" } */
+
+typedef int v4si __attribute__ ((vector_size (16)));
+typedef unsigned int v4usi __attribute__ ((vector_size (16)));
+typedef unsigned short v8uhi __attribute__ ((vector_size (16)));
+
+unsigned int foor (v4usi x)
+{
+    return x[1] >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 16>;} "optimized" } } */
+
+unsigned int fool (v4usi x)
+{
+    return x[1] << 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 32, 32>;} "optimized" } } */
+
+unsigned short foor2 (v4usi x)
+{
+    return x[3] >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 80>;} "optimized" } } */
+
+unsigned int fool2 (v4usi x)
+{
+    return x[0] << 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 32, 0>;} "optimized" } } */
+
+unsigned char foor3 (v8uhi x)
+{
+    return x[3] >> 9;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 7, 39>;} "optimized" } } */
+
+unsigned short fool3 (v8uhi x)
+{
+    return x[0] << 9;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 16, 0>;} "optimized" } } */
+
+unsigned short foo2 (v4si x)
+{
+  int y = x[0] + x[1];
+  return y >> 16;
+}
+/* { dg-final { scan-tree-dump {BIT_FIELD_REF <x_[^,]+, 64, 0>;} "optimized" } } */

  reply	other threads:[~2022-10-31 11:51 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-23 11:42 Tamar Christina
2022-09-23 11:43 ` [PATCH 2/2]AArch64 Perform more late folding of reg moves and shifts which arrive after expand Tamar Christina
2022-09-23 14:32   ` Richard Sandiford
2022-10-31 11:48     ` Tamar Christina
2022-11-14 21:54       ` Richard Sandiford
2022-11-14 21:59         ` Richard Sandiford
2022-12-01 16:25           ` Tamar Christina
2022-12-01 18:38             ` Richard Sandiford
2022-09-24 18:38 ` [PATCH 1/2]middle-end Fold BIT_FIELD_REF and Shifts into BIT_FIELD_REFs alone Jeff Law
2022-09-28 13:19   ` Tamar Christina
2022-09-28 17:25     ` Jeff Law
2022-09-24 18:57 ` Andrew Pinski
2022-09-26  4:55   ` Tamar Christina
2022-09-26  8:05     ` Richard Biener
2022-09-26 15:24     ` Andrew Pinski
2022-09-27 12:40       ` Richard Biener
2022-10-31 11:51         ` Tamar Christina [this message]
2022-10-31 16:24           ` Jeff Law
2022-11-07 13:29           ` Richard Biener

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=VI1PR08MB5325510F8C42F1B0C43D5E06FF379@VI1PR08MB5325.eurprd08.prod.outlook.com \
    --to=tamar.christina@arm.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=nd@arm.com \
    --cc=pinskia@gmail.com \
    --cc=rguenther@suse.de \
    --cc=richard.guenther@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).