public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH] New wi::bitreverse function.
@ 2023-06-02 14:17 Roger Sayle
  2023-06-05 10:27 ` Richard Sandiford
  0 siblings, 1 reply; 3+ messages in thread
From: Roger Sayle @ 2023-06-02 14:17 UTC (permalink / raw)
  To: gcc-patches; +Cc: 'Richard Sandiford'

[-- Attachment #1: Type: text/plain, Size: 1379 bytes --]


This patch provides a wide-int implementation of bitreverse, that
implements both of Richard Sandiford's suggestions from the review at
https://gcc.gnu.org/pipermail/gcc-patches/2023-May/618215.html of an
improved API (as a stand-alone function matching the bswap refactoring),
and an implementation that works with any bit-width precision.

This patch has been tested on x86_64-pc-linux-gnu with make bootstrap
(and a make check-gcc).  Ok for mainline?  Are the remaining pieces
of the above patch pre-approved (pending re-testing)?  The aim is that
this new code will be thoroughly tested by the new *-2.c test cases in
https://gcc.gnu.org/git/?p=gcc.git;h=c09471fbc7588db2480f036aa56a2403d3c03ae
5
with a minor tweak to use the BITREVERSE rtx in the NVPTX back-end,
followed by similar tests on other targets that provide bit-reverse
built-ins (such as ARM and xstormy16), in advance of support for a
backend-independent solution to PR middle-end/50481.


2023-06-02  Roger Sayle  <roger@nextmovesoftware.com>

gcc/ChangeLog
        * wide-int.cc (wi::bitreverse_large): New function implementing
        bit reversal of an integer.
        * wide-int.h (wi::bitreverse): New (template) function prototype.
        (bitreverse_large): Prototype helper function/implementation.
        (wi::bitreverse): New template wrapper around bitreverse_large.


Thanks again,
Roger
--


[-- Attachment #2: patchwi.txt --]
[-- Type: text/plain, Size: 5571 bytes --]

diff --git a/gcc/fold-const-call.cc b/gcc/fold-const-call.cc
index 340cb66..663eae2 100644
--- a/gcc/fold-const-call.cc
+++ b/gcc/fold-const-call.cc
@@ -1060,7 +1060,8 @@ fold_const_call_ss (wide_int *result, combined_fn fn, const wide_int_ref &arg,
     case CFN_BUILT_IN_BSWAP32:
     case CFN_BUILT_IN_BSWAP64:
     case CFN_BUILT_IN_BSWAP128:
-      *result = wide_int::from (arg, precision, TYPE_SIGN (arg_type)).bswap ();
+      *result = wi::bswap (wide_int::from (arg, precision,
+					   TYPE_SIGN (arg_type)));
       return true;
 
     default:
diff --git a/gcc/simplify-rtx.cc b/gcc/simplify-rtx.cc
index d4aeebc..d93d632 100644
--- a/gcc/simplify-rtx.cc
+++ b/gcc/simplify-rtx.cc
@@ -2111,7 +2111,7 @@ simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
 	  break;
 
 	case BSWAP:
-	  result = wide_int (op0).bswap ();
+	  result = wi::bswap (op0);
 	  break;
 
 	case TRUNCATE:
diff --git a/gcc/tree-ssa-ccp.cc b/gcc/tree-ssa-ccp.cc
index 6fb371c..26d5e44 100644
--- a/gcc/tree-ssa-ccp.cc
+++ b/gcc/tree-ssa-ccp.cc
@@ -2401,11 +2401,12 @@ evaluate_stmt (gimple *stmt)
 		  wide_int wval = wi::to_wide (val.value);
 		  val.value
 		    = wide_int_to_tree (type,
-					wide_int::from (wval, prec,
-							UNSIGNED).bswap ());
+					wi::bswap (wide_int::from (wval, prec,
+								   UNSIGNED)));
 		  val.mask
-		    = widest_int::from (wide_int::from (val.mask, prec,
-							UNSIGNED).bswap (),
+		    = widest_int::from (wi::bswap (wide_int::from (val.mask,
+								   prec,
+								   UNSIGNED)),
 					UNSIGNED);
 		  if (wi::sext (val.mask, prec) != -1)
 		    break;
diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc
index c0987aa..1e4c046 100644
--- a/gcc/wide-int.cc
+++ b/gcc/wide-int.cc
@@ -731,16 +731,13 @@ wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
     }
 }
 
-/* bswap THIS.  */
-wide_int
-wide_int_storage::bswap () const
+/* Byte swap the integer represented by XVAL and LEN into VAL.  Return
+   the number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.  */
+unsigned int
+wi::bswap_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+	         unsigned int len, unsigned int precision)
 {
-  wide_int result = wide_int::create (precision);
   unsigned int i, s;
-  unsigned int len = BLOCKS_NEEDED (precision);
-  unsigned int xlen = get_len ();
-  const HOST_WIDE_INT *xval = get_val ();
-  HOST_WIDE_INT *val = result.write_val ();
 
   /* This is not a well defined operation if the precision is not a
      multiple of 8.  */
@@ -758,7 +755,7 @@ wide_int_storage::bswap () const
       unsigned int block = s / HOST_BITS_PER_WIDE_INT;
       unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
 
-      byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
+      byte = (safe_uhwi (xval, len, block) >> offset) & 0xff;
 
       block = d / HOST_BITS_PER_WIDE_INT;
       offset = d & (HOST_BITS_PER_WIDE_INT - 1);
@@ -766,8 +763,7 @@ wide_int_storage::bswap () const
       val[block] |= byte << offset;
     }
 
-  result.set_len (canonize (val, len, precision));
-  return result;
+  return canonize (val, len, precision);
 }
 
 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index 3d9b87c..a2b3371 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -552,6 +552,7 @@ namespace wi
   UNARY_FUNCTION sext (const T &, unsigned int);
   UNARY_FUNCTION zext (const T &, unsigned int);
   UNARY_FUNCTION set_bit (const T &, unsigned int);
+  UNARY_FUNCTION bswap (const T &);
 
   BINARY_FUNCTION min (const T1 &, const T2 &, signop);
   BINARY_FUNCTION smin (const T1 &, const T2 &);
@@ -1086,9 +1087,6 @@ public:
   static wide_int from_array (const HOST_WIDE_INT *, unsigned int,
 			      unsigned int, bool = true);
   static wide_int create (unsigned int);
-
-  /* FIXME: target-dependent, so should disappear.  */
-  wide_int bswap () const;
 };
 
 namespace wi
@@ -1743,13 +1741,14 @@ namespace wi
   int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
 		  const HOST_WIDE_INT *, unsigned int);
   unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
-			   unsigned int,
-			   unsigned int, unsigned int);
+			   unsigned int, unsigned int, unsigned int);
   unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
-			   unsigned int,
-			   unsigned int, unsigned int);
+			   unsigned int, unsigned int, unsigned int);
   unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
 			      unsigned int, unsigned int, unsigned int);
+  unsigned int bswap_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+			    unsigned int, unsigned int);
+  
   unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
 			     unsigned int, unsigned int, unsigned int);
   unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
@@ -2267,6 +2266,18 @@ wi::set_bit (const T &x, unsigned int bit)
   return result;
 }
 
+/* Byte swap the integer X.  */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::bswap (const T &x)
+{
+  WI_UNARY_RESULT_VAR (result, val, T, x);
+  unsigned int precision = get_precision (result);
+  WIDE_INT_REF_FOR (T) xi (x, precision);
+  result.set_len (bswap_large (val, xi.val, xi.len, precision));
+  return result;
+}
+
 /* Return the mininum of X and Y, treating them both as having
    signedness SGN.  */
 template <typename T1, typename T2>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] New wi::bitreverse function.
  2023-06-02 14:17 [PATCH] New wi::bitreverse function Roger Sayle
@ 2023-06-05 10:27 ` Richard Sandiford
  0 siblings, 0 replies; 3+ messages in thread
From: Richard Sandiford @ 2023-06-05 10:27 UTC (permalink / raw)
  To: Roger Sayle; +Cc: gcc-patches

"Roger Sayle" <roger@nextmovesoftware.com> writes:
> This patch provides a wide-int implementation of bitreverse, that
> implements both of Richard Sandiford's suggestions from the review at
> https://gcc.gnu.org/pipermail/gcc-patches/2023-May/618215.html of an
> improved API (as a stand-alone function matching the bswap refactoring),
> and an implementation that works with any bit-width precision.
>
> This patch has been tested on x86_64-pc-linux-gnu with make bootstrap
> (and a make check-gcc).  Ok for mainline?

OK, thanks.

> Are the remaining pieces
> of the above patch pre-approved (pending re-testing)?  The aim is that
> this new code will be thoroughly tested by the new *-2.c test cases in
> https://gcc.gnu.org/git/?p=gcc.git;h=c09471fbc7588db2480f036aa56a2403d3c03ae
> 5
> with a minor tweak to use the BITREVERSE rtx in the NVPTX back-end,
> followed by similar tests on other targets that provide bit-reverse
> built-ins (such as ARM and xstormy16), in advance of support for a
> backend-independent solution to PR middle-end/50481.

Ah, great.  Yeah, in that case, OK for the rest as well.

Richard

^ permalink raw reply	[flat|nested] 3+ messages in thread

* RE: [PATCH] New wi::bitreverse function.
@ 2023-06-02 14:21 Roger Sayle
  0 siblings, 0 replies; 3+ messages in thread
From: Roger Sayle @ 2023-06-02 14:21 UTC (permalink / raw)
  To: gcc-patches; +Cc: 'Richard Sandiford'

[-- Attachment #1: Type: text/plain, Size: 1677 bytes --]


Doh!  Wrong patch...

Roger
--

-----Original Message-----
From: Roger Sayle <roger@nextmovesoftware.com> 
Sent: Friday, June 2, 2023 3:17 PM
To: 'gcc-patches@gcc.gnu.org' <gcc-patches@gcc.gnu.org>
Cc: 'Richard Sandiford' <richard.sandiford@arm.com>
Subject: [PATCH] New wi::bitreverse function.


This patch provides a wide-int implementation of bitreverse, that implements
both of Richard Sandiford's suggestions from the review at
https://gcc.gnu.org/pipermail/gcc-patches/2023-May/618215.html of an
improved API (as a stand-alone function matching the bswap refactoring), and
an implementation that works with any bit-width precision.

This patch has been tested on x86_64-pc-linux-gnu with make bootstrap (and a
make check-gcc).  Ok for mainline?  Are the remaining pieces of the above
patch pre-approved (pending re-testing)?  The aim is that this new code will
be thoroughly tested by the new *-2.c test cases in
https://gcc.gnu.org/git/?p=gcc.git;h=c09471fbc7588db2480f036aa56a2403d3c03ae
5
with a minor tweak to use the BITREVERSE rtx in the NVPTX back-end, followed
by similar tests on other targets that provide bit-reverse built-ins (such
as ARM and xstormy16), in advance of support for a backend-independent
solution to PR middle-end/50481.


2023-06-02  Roger Sayle  <roger@nextmovesoftware.com>

gcc/ChangeLog
        * wide-int.cc (wi::bitreverse_large): New function implementing
        bit reversal of an integer.
        * wide-int.h (wi::bitreverse): New (template) function prototype.
        (bitreverse_large): Prototype helper function/implementation.
        (wi::bitreverse): New template wrapper around bitreverse_large.


Thanks again,
Roger
--


[-- Attachment #2: patchwi.txt --]
[-- Type: text/plain, Size: 2760 bytes --]

diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc
index 1e4c046..24bdce2 100644
--- a/gcc/wide-int.cc
+++ b/gcc/wide-int.cc
@@ -766,6 +766,33 @@ wi::bswap_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
   return canonize (val, len, precision);
 }
 
+/* Bitreverse the integer represented by XVAL and LEN into VAL.  Return
+   the number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.  */
+unsigned int
+wi::bitreverse_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+		      unsigned int len, unsigned int precision)
+{
+  unsigned int i, s;
+
+  for (i = 0; i < len; i++)
+    val[i] = 0;
+
+  for (s = 0; s < precision; s++)
+    {
+      unsigned int block = s / HOST_BITS_PER_WIDE_INT;
+      unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
+      if (((safe_uhwi (xval, len, block) >> offset) & 1) != 0)
+	{
+	  unsigned int d = (precision - 1) - s;
+	  block = d / HOST_BITS_PER_WIDE_INT;
+	  offset = d & (HOST_BITS_PER_WIDE_INT - 1);
+          val[block] |= 1 << offset;
+	}
+    }
+
+  return canonize (val, len, precision);
+}
+
 /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
    above that up to PREC are zeros.  The result is inverted if NEGATE
    is true.  Return the number of blocks in VAL.  */
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index e4723ad..498d14d 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -553,6 +553,7 @@ namespace wi
   UNARY_FUNCTION zext (const T &, unsigned int);
   UNARY_FUNCTION set_bit (const T &, unsigned int);
   UNARY_FUNCTION bswap (const T &);
+  UNARY_FUNCTION bitreverse (const T &);
 
   BINARY_FUNCTION min (const T1 &, const T2 &, signop);
   BINARY_FUNCTION smin (const T1 &, const T2 &);
@@ -1748,6 +1749,8 @@ namespace wi
 			      unsigned int, unsigned int, unsigned int);
   unsigned int bswap_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
 			    unsigned int, unsigned int);
+  unsigned int bitreverse_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+				 unsigned int, unsigned int);
   
   unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
 			     unsigned int, unsigned int, unsigned int);
@@ -2281,6 +2284,18 @@ wi::bswap (const T &x)
   return result;
 }
 
+/* Bitreverse the integer X.  */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::bitreverse (const T &x)
+{
+  WI_UNARY_RESULT_VAR (result, val, T, x);
+  unsigned int precision = get_precision (result);
+  WIDE_INT_REF_FOR (T) xi (x, precision);
+  result.set_len (bitreverse_large (val, xi.val, xi.len, precision));
+  return result;
+}
+
 /* Return the mininum of X and Y, treating them both as having
    signedness SGN.  */
 template <typename T1, typename T2>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-06-05 10:27 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-02 14:17 [PATCH] New wi::bitreverse function Roger Sayle
2023-06-05 10:27 ` Richard Sandiford
2023-06-02 14:21 Roger Sayle

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).