public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* Support << and >> for offset_int and widest_int
@ 2016-04-29 12:30 Richard Sandiford
  2016-04-29 12:37 ` H.J. Lu
  2016-05-02  8:53 ` Richard Biener
  0 siblings, 2 replies; 6+ messages in thread
From: Richard Sandiford @ 2016-04-29 12:30 UTC (permalink / raw)
  To: gcc-patches

Following on from the comparison patch, I think it makes sense to
support << and >> for offset_int (int128_t) and widest_int (intNNN_t),
with >> being arithmetic shift.  It doesn't make sense to use
logical right shift on a potentially negative offset_int, since
the precision of 128 bits has no meaning on the target.

Tested on x86_64-linux-gnu and aarch64-linux-gnu.  OK to install?

Thanks,
Richard


gcc/
	* wide-int.h: Update offset_int and widest_int documentation.
	(WI_SIGNED_SHIFT_RESULT): New macro.
	(wi::binary_shift): Define signed_shift_result_type for
	shifts on offset_int- and widest_int-like types.
	(generic_wide_int): Support <<= and >>= if << and >> are supported.
	* tree.h (int_bit_position): Use shift operators instead of wi::
	 shifts.
	* alias.c (adjust_offset_for_component_ref): Likewise.
	* expr.c (get_inner_reference): Likewise.
	* fold-const.c (fold_comparison): Likewise.
	* gimple-fold.c (fold_nonarray_ctor_reference): Likewise.
	* gimple-ssa-strength-reduction.c (restructure_reference): Likewise.
	* tree-dfa.c (get_ref_base_and_extent): Likewise.
	* tree-ssa-alias.c (indirect_ref_may_alias_decl_p): Likewise.
	(stmt_kills_ref_p): Likewise.
	* tree-ssa-ccp.c (bit_value_binop_1): Likewise.
	* tree-ssa-math-opts.c (find_bswap_or_nop_load): Likewise.
	* tree-ssa-sccvn.c (copy_reference_ops_from_ref): Likewise.
	(ao_ref_init_from_vn_reference): Likewise.

gcc/cp/
	* init.c (build_new_1): Use shift operators instead of wi:: shifts.

Index: gcc/wide-int.h
===================================================================
--- gcc/wide-int.h
+++ gcc/wide-int.h
@@ -68,6 +68,8 @@ along with GCC; see the file COPYING3.  If not see
      Since the values are logically signed, there is no need to
      distinguish between signed and unsigned operations.  Sign-sensitive
      comparison operators <, <=, > and >= are therefore supported.
+     Shift operators << and >> are also supported, with >> being
+     an _arithmetic_ right shift.
 
      [ Note that, even though offset_int is effectively int128_t,
        it can still be useful to use unsigned comparisons like
@@ -82,7 +84,8 @@ along with GCC; see the file COPYING3.  If not see
 
      Like offset_int, widest_int is wider than all the values that
      it needs to represent, so the integers are logically signed.
-     Sign-sensitive comparison operators <, <=, > and >= are supported.
+     Sign-sensitive comparison operators <, <=, > and >= are supported,
+     as are << and >>.
 
      There are several places in the GCC where this should/must be used:
 
@@ -259,6 +262,11 @@ along with GCC; see the file COPYING3.  If not see
 #define WI_BINARY_RESULT(T1, T2) \
   typename wi::binary_traits <T1, T2>::result_type
 
+/* The type of result produced by T1 << T2.  Leads to substitution failure
+   if the operation isn't supported.  Defined purely for brevity.  */
+#define WI_SIGNED_SHIFT_RESULT(T1, T2) \
+  typename wi::binary_traits <T1, T2>::signed_shift_result_type
+
 /* The type of result produced by a signed binary predicate on types T1 and T2.
    This is bool if signed comparisons make sense for T1 and T2 and leads to
    substitution failure otherwise.  */
@@ -405,6 +413,7 @@ namespace wi
        so as not to confuse gengtype.  */
     typedef generic_wide_int < fixed_wide_int_storage
 			       <int_traits <T1>::precision> > result_type;
+    typedef result_type signed_shift_result_type;
     typedef bool signed_predicate_result;
   };
 
@@ -416,6 +425,7 @@ namespace wi
     STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
     typedef generic_wide_int < fixed_wide_int_storage
 			       <int_traits <T1>::precision> > result_type;
+    typedef result_type signed_shift_result_type;
     typedef bool signed_predicate_result;
   };
 
@@ -681,6 +691,11 @@ public:
   template <typename T> \
     generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
 
+/* Restrict these to cases where the shift operator is defined.  */
+#define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \
+  template <typename T> \
+    generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); }
+
 #define INCDEC_OPERATOR(OP, DELTA) \
   generic_wide_int &OP () { *this += DELTA; return *this; }
 
@@ -702,12 +717,15 @@ public:
   ASSIGNMENT_OPERATOR (operator +=, add)
   ASSIGNMENT_OPERATOR (operator -=, sub)
   ASSIGNMENT_OPERATOR (operator *=, mul)
+  SHIFT_ASSIGNMENT_OPERATOR (operator <<=, <<)
+  SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>)
   INCDEC_OPERATOR (operator ++, 1)
   INCDEC_OPERATOR (operator --, -1)
 
 #undef BINARY_PREDICATE
 #undef UNARY_OPERATOR
 #undef BINARY_OPERATOR
+#undef SHIFT_ASSIGNMENT_OPERATOR
 #undef ASSIGNMENT_OPERATOR
 #undef INCDEC_OPERATOR
 
@@ -857,7 +875,7 @@ generic_wide_int <storage>::elt (unsigned int i) const
 
 template <typename storage>
 template <typename T>
-generic_wide_int <storage> &
+inline generic_wide_int <storage> &
 generic_wide_int <storage>::operator = (const T &x)
 {
   storage::operator = (x);
@@ -3078,6 +3096,20 @@ SIGNED_BINARY_PREDICATE (operator >=, ges_p)
 
 #undef SIGNED_BINARY_PREDICATE
 
+template <typename T1, typename T2>
+inline WI_SIGNED_SHIFT_RESULT (T1, T2)
+operator << (const T1 &x, const T2 &y)
+{
+  return wi::lshift (x, y);
+}
+
+template <typename T1, typename T2>
+inline WI_SIGNED_SHIFT_RESULT (T1, T2)
+operator >> (const T1 &x, const T2 &y)
+{
+  return wi::arshift (x, y);
+}
+
 template<typename T>
 void
 gt_ggc_mx (generic_wide_int <T> *)
Index: gcc/tree.h
===================================================================
--- gcc/tree.h
+++ gcc/tree.h
@@ -5375,7 +5375,7 @@ extern GTY(()) struct int_n_trees_t int_n_trees[NUM_INT_N_ENTS];
 inline HOST_WIDE_INT
 int_bit_position (const_tree field)
 { 
-  return (wi::lshift (wi::to_offset (DECL_FIELD_OFFSET (field)), BITS_PER_UNIT_LOG)
+  return ((wi::to_offset (DECL_FIELD_OFFSET (field)) << BITS_PER_UNIT_LOG)
 	  + wi::to_offset (DECL_FIELD_BIT_OFFSET (field))).to_shwi ();
 }
 
Index: gcc/alias.c
===================================================================
--- gcc/alias.c
+++ gcc/alias.c
@@ -2651,8 +2651,8 @@ adjust_offset_for_component_ref (tree x, bool *known_p,
 
       offset_int woffset
 	= (wi::to_offset (xoffset)
-	   + wi::lrshift (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
-			  LOG2_BITS_PER_UNIT));
+	   + (wi::to_offset (DECL_FIELD_BIT_OFFSET (field))
+	      >> LOG2_BITS_PER_UNIT));
       if (!wi::fits_uhwi_p (woffset))
 	{
 	  *known_p = false;
Index: gcc/expr.c
===================================================================
--- gcc/expr.c
+++ gcc/expr.c
@@ -6989,7 +6989,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
 	      if (!integer_zerop (off))
 		{
 		  offset_int boff, coff = mem_ref_offset (exp);
-		  boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
+		  boff = coff << LOG2_BITS_PER_UNIT;
 		  bit_offset += boff;
 		}
 	      exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -7015,7 +7015,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
     {
       offset_int tem = wi::sext (wi::to_offset (offset),
 				 TYPE_PRECISION (sizetype));
-      tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+      tem <<= LOG2_BITS_PER_UNIT;
       tem += bit_offset;
       if (wi::fits_shwi_p (tem))
 	{
@@ -7035,7 +7035,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
 	  /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
 	     Subtract it to BIT_OFFSET and add it (scaled) to OFFSET.  */
 	  bit_offset -= tem;
-	  tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
+	  tem >>= LOG2_BITS_PER_UNIT;
 	  offset = size_binop (PLUS_EXPR, offset,
 			       wide_int_to_tree (sizetype, tem));
 	}
Index: gcc/fold-const.c
===================================================================
--- gcc/fold-const.c
+++ gcc/fold-const.c
@@ -8518,7 +8518,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
 	    {
 	      offset_int tem = wi::sext (wi::to_offset (offset0),
 					 TYPE_PRECISION (sizetype));
-	      tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+	      tem <<= LOG2_BITS_PER_UNIT;
 	      tem += bitpos0;
 	      if (wi::fits_shwi_p (tem))
 		{
@@ -8565,7 +8565,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
 	    {
 	      offset_int tem = wi::sext (wi::to_offset (offset1),
 					 TYPE_PRECISION (sizetype));
-	      tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+	      tem <<= LOG2_BITS_PER_UNIT;
 	      tem += bitpos1;
 	      if (wi::fits_shwi_p (tem))
 		{
Index: gcc/gimple-fold.c
===================================================================
--- gcc/gimple-fold.c
+++ gcc/gimple-fold.c
@@ -5435,8 +5435,7 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
 
       /* Compute bit offset of the field.  */
       bitoffset = (wi::to_offset (field_offset)
-		   + wi::lshift (wi::to_offset (byte_offset),
-				 LOG2_BITS_PER_UNIT));
+		   + (wi::to_offset (byte_offset) << LOG2_BITS_PER_UNIT));
       /* Compute bit offset where the field ends.  */
       if (field_size != NULL_TREE)
 	bitoffset_end = bitoffset + wi::to_offset (field_size);
Index: gcc/gimple-ssa-strength-reduction.c
===================================================================
--- gcc/gimple-ssa-strength-reduction.c
+++ gcc/gimple-ssa-strength-reduction.c
@@ -951,7 +951,7 @@ restructure_reference (tree *pbase, tree *poffset, widest_int *pindex,
       c2 = 0;
     }
 
-  c4 = wi::lrshift (index, LOG2_BITS_PER_UNIT);
+  c4 = index >> LOG2_BITS_PER_UNIT;
   c5 = backtrace_base_for_ref (&t2);
 
   *pbase = t1;
Index: gcc/tree-dfa.c
===================================================================
--- gcc/tree-dfa.c
+++ gcc/tree-dfa.c
@@ -424,8 +424,8 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
 
 	    if (this_offset && TREE_CODE (this_offset) == INTEGER_CST)
 	      {
-		offset_int woffset = wi::lshift (wi::to_offset (this_offset),
-						 LOG2_BITS_PER_UNIT);
+		offset_int woffset = (wi::to_offset (this_offset)
+				      << LOG2_BITS_PER_UNIT);
 		woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
 		bit_offset += woffset;
 
@@ -453,7 +453,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
 			  {
 			    offset_int tem = (wi::to_offset (ssize)
 					      - wi::to_offset (fsize));
-			    tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+			    tem <<= LOG2_BITS_PER_UNIT;
 			    tem -= woffset;
 			    maxsize += tem;
 			  }
@@ -493,7 +493,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
 		  = wi::sext (wi::to_offset (index) - wi::to_offset (low_bound),
 			      TYPE_PRECISION (TREE_TYPE (index)));
 		woffset *= wi::to_offset (unit_size);
-		woffset = wi::lshift (woffset, LOG2_BITS_PER_UNIT);
+		woffset <<= LOG2_BITS_PER_UNIT;
 		bit_offset += woffset;
 
 		/* An array ref with a constant index up in the structure
@@ -570,7 +570,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
 	      else
 		{
 		  offset_int off = mem_ref_offset (exp);
-		  off = wi::lshift (off, LOG2_BITS_PER_UNIT);
+		  off <<= LOG2_BITS_PER_UNIT;
 		  off += bit_offset;
 		  if (wi::fits_shwi_p (off))
 		    {
Index: gcc/tree-ssa-alias.c
===================================================================
--- gcc/tree-ssa-alias.c
+++ gcc/tree-ssa-alias.c
@@ -1041,7 +1041,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
   /* The offset embedded in MEM_REFs can be negative.  Bias them
      so that the resulting offset adjustment is positive.  */
   offset_int moff = mem_ref_offset (base1);
-  moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+  moff <<= LOG2_BITS_PER_UNIT;
   if (wi::neg_p (moff))
     offset2p += (-moff).to_short_addr ();
   else
@@ -1113,7 +1113,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
       || TREE_CODE (dbase2) == TARGET_MEM_REF)
     {
       offset_int moff = mem_ref_offset (dbase2);
-      moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+      moff <<= LOG2_BITS_PER_UNIT;
       if (wi::neg_p (moff))
 	doffset1 -= (-moff).to_short_addr ();
       else
@@ -1211,13 +1211,13 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
       /* The offset embedded in MEM_REFs can be negative.  Bias them
 	 so that the resulting offset adjustment is positive.  */
       moff = mem_ref_offset (base1);
-      moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+      moff <<= LOG2_BITS_PER_UNIT;
       if (wi::neg_p (moff))
 	offset2 += (-moff).to_short_addr ();
       else
 	offset1 += moff.to_shwi ();
       moff = mem_ref_offset (base2);
-      moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+      moff <<= LOG2_BITS_PER_UNIT;
       if (wi::neg_p (moff))
 	offset1 += (-moff).to_short_addr ();
       else
@@ -2298,10 +2298,10 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *ref)
 				       TREE_OPERAND (ref->base, 1)))
 		{
 		  offset_int off1 = mem_ref_offset (base);
-		  off1 = wi::lshift (off1, LOG2_BITS_PER_UNIT);
+		  off1 <<= LOG2_BITS_PER_UNIT;
 		  off1 += offset;
 		  offset_int off2 = mem_ref_offset (ref->base);
-		  off2 = wi::lshift (off2, LOG2_BITS_PER_UNIT);
+		  off2 <<= LOG2_BITS_PER_UNIT;
 		  off2 += ref_offset;
 		  if (wi::fits_shwi_p (off1) && wi::fits_shwi_p (off2))
 		    {
@@ -2372,18 +2372,15 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *ref)
 		  if (TREE_CODE (rbase) != MEM_REF)
 		    return false;
 		  // Compare pointers.
-		  offset += wi::lshift (mem_ref_offset (base),
-					LOG2_BITS_PER_UNIT);
-		  roffset += wi::lshift (mem_ref_offset (rbase),
-					 LOG2_BITS_PER_UNIT);
+		  offset += mem_ref_offset (base) << LOG2_BITS_PER_UNIT;
+		  roffset += mem_ref_offset (rbase) << LOG2_BITS_PER_UNIT;
 		  base = TREE_OPERAND (base, 0);
 		  rbase = TREE_OPERAND (rbase, 0);
 		}
 	      if (base == rbase
 		  && offset <= roffset
 		  && (roffset + ref->max_size
-		      <= offset + wi::lshift (wi::to_offset (len),
-					      LOG2_BITS_PER_UNIT)))
+		      <= offset + (wi::to_offset (len) << LOG2_BITS_PER_UNIT)))
 		return true;
 	      break;
 	    }
Index: gcc/tree-ssa-ccp.c
===================================================================
--- gcc/tree-ssa-ccp.c
+++ gcc/tree-ssa-ccp.c
@@ -1372,8 +1372,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
 		}
 	      else
 		{
-		  *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
-		  *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
+		  *mask = wi::ext (r1mask << shift, width, sgn);
+		  *val = wi::ext (r1val << shift, width, sgn);
 		}
 	    }
 	}
Index: gcc/tree-ssa-math-opts.c
===================================================================
--- gcc/tree-ssa-math-opts.c
+++ gcc/tree-ssa-math-opts.c
@@ -2104,7 +2104,7 @@ find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
       if (!integer_zerop (off))
 	{
 	  offset_int boff, coff = mem_ref_offset (base_addr);
-	  boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
+	  boff = coff << LOG2_BITS_PER_UNIT;
 	  bit_offset += boff;
 	}
 
@@ -2118,7 +2118,7 @@ find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
 	  /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
 	     Subtract it to BIT_OFFSET and add it (scaled) to OFFSET.  */
 	  bit_offset -= tem;
-	  tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
+	  tem >>= LOG2_BITS_PER_UNIT;
 	  if (offset)
 	    offset = size_binop (PLUS_EXPR, offset,
 				    wide_int_to_tree (sizetype, tem));
Index: gcc/tree-ssa-sccvn.c
===================================================================
--- gcc/tree-ssa-sccvn.c
+++ gcc/tree-ssa-sccvn.c
@@ -788,8 +788,7 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
 		  {
 		    offset_int off
 		      = (wi::to_offset (this_offset)
-			 + wi::lrshift (wi::to_offset (bit_offset),
-					LOG2_BITS_PER_UNIT));
+			 + (wi::to_offset (bit_offset) >> LOG2_BITS_PER_UNIT));
 		    if (wi::fits_shwi_p (off)
 			/* Probibit value-numbering zero offset components
 			   of addresses the same before the pass folding
@@ -999,8 +998,8 @@ ao_ref_init_from_vn_reference (ao_ref *ref,
 	      max_size = -1;
 	    else
 	      {
-		offset_int woffset = wi::lshift (wi::to_offset (this_offset),
-						 LOG2_BITS_PER_UNIT);
+		offset_int woffset = (wi::to_offset (this_offset)
+				      << LOG2_BITS_PER_UNIT);
 		woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
 		offset += woffset;
 	      }
@@ -1020,7 +1019,7 @@ ao_ref_init_from_vn_reference (ao_ref *ref,
 		= wi::sext (wi::to_offset (op->op0) - wi::to_offset (op->op1),
 			    TYPE_PRECISION (TREE_TYPE (op->op0)));
 	      woffset *= wi::to_offset (op->op2);
-	      woffset = wi::lshift (woffset, LOG2_BITS_PER_UNIT);
+	      woffset <<= LOG2_BITS_PER_UNIT;
 	      offset += woffset;
 	    }
 	  break;
Index: gcc/cp/init.c
===================================================================
--- gcc/cp/init.c
+++ gcc/cp/init.c
@@ -2812,8 +2812,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
 
 	  unsigned shift = (max_outer_nelts.get_precision ()) - 7
 	    - wi::clz (max_outer_nelts);
-	  max_outer_nelts = wi::lshift (wi::lrshift (max_outer_nelts, shift),
-				        shift);
+	  max_outer_nelts = (max_outer_nelts >> shift) << shift;
 
           outer_nelts_check = fold_build2 (LE_EXPR, boolean_type_node,
 					   outer_nelts,

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Support << and >> for offset_int and widest_int
  2016-04-29 12:30 Support << and >> for offset_int and widest_int Richard Sandiford
@ 2016-04-29 12:37 ` H.J. Lu
  2016-05-02  9:38   ` Richard Sandiford
  2016-05-02  8:53 ` Richard Biener
  1 sibling, 1 reply; 6+ messages in thread
From: H.J. Lu @ 2016-04-29 12:37 UTC (permalink / raw)
  To: GCC Patches, Richard Sandiford

On Fri, Apr 29, 2016 at 5:30 AM, Richard Sandiford
<richard.sandiford@arm.com> wrote:
> Following on from the comparison patch, I think it makes sense to
> support << and >> for offset_int (int128_t) and widest_int (intNNN_t),
> with >> being arithmetic shift.  It doesn't make sense to use
> logical right shift on a potentially negative offset_int, since
> the precision of 128 bits has no meaning on the target.
>
> Tested on x86_64-linux-gnu and aarch64-linux-gnu.  OK to install?
>
> Thanks,
> Richard
>
>
> gcc/
>         * wide-int.h: Update offset_int and widest_int documentation.
>         (WI_SIGNED_SHIFT_RESULT): New macro.
>         (wi::binary_shift): Define signed_shift_result_type for
>         shifts on offset_int- and widest_int-like types.
>         (generic_wide_int): Support <<= and >>= if << and >> are supported.
>         * tree.h (int_bit_position): Use shift operators instead of wi::
>          shifts.
>         * alias.c (adjust_offset_for_component_ref): Likewise.
>         * expr.c (get_inner_reference): Likewise.
>         * fold-const.c (fold_comparison): Likewise.
>         * gimple-fold.c (fold_nonarray_ctor_reference): Likewise.
>         * gimple-ssa-strength-reduction.c (restructure_reference): Likewise.
>         * tree-dfa.c (get_ref_base_and_extent): Likewise.
>         * tree-ssa-alias.c (indirect_ref_may_alias_decl_p): Likewise.
>         (stmt_kills_ref_p): Likewise.
>         * tree-ssa-ccp.c (bit_value_binop_1): Likewise.
>         * tree-ssa-math-opts.c (find_bswap_or_nop_load): Likewise.
>         * tree-ssa-sccvn.c (copy_reference_ops_from_ref): Likewise.
>         (ao_ref_init_from_vn_reference): Likewise.
>
> gcc/cp/
>         * init.c (build_new_1): Use shift operators instead of wi:: shifts.

Can you also update change_zero_ext in combine.c:

https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70687

It should use wide_int << instead of HOST_WIDE_INT <<< to
support __int128.

-- 
H.J.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Support << and >> for offset_int and widest_int
  2016-04-29 12:30 Support << and >> for offset_int and widest_int Richard Sandiford
  2016-04-29 12:37 ` H.J. Lu
@ 2016-05-02  8:53 ` Richard Biener
  1 sibling, 0 replies; 6+ messages in thread
From: Richard Biener @ 2016-05-02  8:53 UTC (permalink / raw)
  To: GCC Patches, richard.sandiford

On Fri, Apr 29, 2016 at 2:30 PM, Richard Sandiford
<richard.sandiford@arm.com> wrote:
> Following on from the comparison patch, I think it makes sense to
> support << and >> for offset_int (int128_t) and widest_int (intNNN_t),
> with >> being arithmetic shift.  It doesn't make sense to use
> logical right shift on a potentially negative offset_int, since
> the precision of 128 bits has no meaning on the target.
>
> Tested on x86_64-linux-gnu and aarch64-linux-gnu.  OK to install?

Ok.

Richard.

> Thanks,
> Richard
>
>
> gcc/
>         * wide-int.h: Update offset_int and widest_int documentation.
>         (WI_SIGNED_SHIFT_RESULT): New macro.
>         (wi::binary_shift): Define signed_shift_result_type for
>         shifts on offset_int- and widest_int-like types.
>         (generic_wide_int): Support <<= and >>= if << and >> are supported.
>         * tree.h (int_bit_position): Use shift operators instead of wi::
>          shifts.
>         * alias.c (adjust_offset_for_component_ref): Likewise.
>         * expr.c (get_inner_reference): Likewise.
>         * fold-const.c (fold_comparison): Likewise.
>         * gimple-fold.c (fold_nonarray_ctor_reference): Likewise.
>         * gimple-ssa-strength-reduction.c (restructure_reference): Likewise.
>         * tree-dfa.c (get_ref_base_and_extent): Likewise.
>         * tree-ssa-alias.c (indirect_ref_may_alias_decl_p): Likewise.
>         (stmt_kills_ref_p): Likewise.
>         * tree-ssa-ccp.c (bit_value_binop_1): Likewise.
>         * tree-ssa-math-opts.c (find_bswap_or_nop_load): Likewise.
>         * tree-ssa-sccvn.c (copy_reference_ops_from_ref): Likewise.
>         (ao_ref_init_from_vn_reference): Likewise.
>
> gcc/cp/
>         * init.c (build_new_1): Use shift operators instead of wi:: shifts.
>
> Index: gcc/wide-int.h
> ===================================================================
> --- gcc/wide-int.h
> +++ gcc/wide-int.h
> @@ -68,6 +68,8 @@ along with GCC; see the file COPYING3.  If not see
>       Since the values are logically signed, there is no need to
>       distinguish between signed and unsigned operations.  Sign-sensitive
>       comparison operators <, <=, > and >= are therefore supported.
> +     Shift operators << and >> are also supported, with >> being
> +     an _arithmetic_ right shift.
>
>       [ Note that, even though offset_int is effectively int128_t,
>         it can still be useful to use unsigned comparisons like
> @@ -82,7 +84,8 @@ along with GCC; see the file COPYING3.  If not see
>
>       Like offset_int, widest_int is wider than all the values that
>       it needs to represent, so the integers are logically signed.
> -     Sign-sensitive comparison operators <, <=, > and >= are supported.
> +     Sign-sensitive comparison operators <, <=, > and >= are supported,
> +     as are << and >>.
>
>       There are several places in the GCC where this should/must be used:
>
> @@ -259,6 +262,11 @@ along with GCC; see the file COPYING3.  If not see
>  #define WI_BINARY_RESULT(T1, T2) \
>    typename wi::binary_traits <T1, T2>::result_type
>
> +/* The type of result produced by T1 << T2.  Leads to substitution failure
> +   if the operation isn't supported.  Defined purely for brevity.  */
> +#define WI_SIGNED_SHIFT_RESULT(T1, T2) \
> +  typename wi::binary_traits <T1, T2>::signed_shift_result_type
> +
>  /* The type of result produced by a signed binary predicate on types T1 and T2.
>     This is bool if signed comparisons make sense for T1 and T2 and leads to
>     substitution failure otherwise.  */
> @@ -405,6 +413,7 @@ namespace wi
>         so as not to confuse gengtype.  */
>      typedef generic_wide_int < fixed_wide_int_storage
>                                <int_traits <T1>::precision> > result_type;
> +    typedef result_type signed_shift_result_type;
>      typedef bool signed_predicate_result;
>    };
>
> @@ -416,6 +425,7 @@ namespace wi
>      STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
>      typedef generic_wide_int < fixed_wide_int_storage
>                                <int_traits <T1>::precision> > result_type;
> +    typedef result_type signed_shift_result_type;
>      typedef bool signed_predicate_result;
>    };
>
> @@ -681,6 +691,11 @@ public:
>    template <typename T> \
>      generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
>
> +/* Restrict these to cases where the shift operator is defined.  */
> +#define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \
> +  template <typename T> \
> +    generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); }
> +
>  #define INCDEC_OPERATOR(OP, DELTA) \
>    generic_wide_int &OP () { *this += DELTA; return *this; }
>
> @@ -702,12 +717,15 @@ public:
>    ASSIGNMENT_OPERATOR (operator +=, add)
>    ASSIGNMENT_OPERATOR (operator -=, sub)
>    ASSIGNMENT_OPERATOR (operator *=, mul)
> +  SHIFT_ASSIGNMENT_OPERATOR (operator <<=, <<)
> +  SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>)
>    INCDEC_OPERATOR (operator ++, 1)
>    INCDEC_OPERATOR (operator --, -1)
>
>  #undef BINARY_PREDICATE
>  #undef UNARY_OPERATOR
>  #undef BINARY_OPERATOR
> +#undef SHIFT_ASSIGNMENT_OPERATOR
>  #undef ASSIGNMENT_OPERATOR
>  #undef INCDEC_OPERATOR
>
> @@ -857,7 +875,7 @@ generic_wide_int <storage>::elt (unsigned int i) const
>
>  template <typename storage>
>  template <typename T>
> -generic_wide_int <storage> &
> +inline generic_wide_int <storage> &
>  generic_wide_int <storage>::operator = (const T &x)
>  {
>    storage::operator = (x);
> @@ -3078,6 +3096,20 @@ SIGNED_BINARY_PREDICATE (operator >=, ges_p)
>
>  #undef SIGNED_BINARY_PREDICATE
>
> +template <typename T1, typename T2>
> +inline WI_SIGNED_SHIFT_RESULT (T1, T2)
> +operator << (const T1 &x, const T2 &y)
> +{
> +  return wi::lshift (x, y);
> +}
> +
> +template <typename T1, typename T2>
> +inline WI_SIGNED_SHIFT_RESULT (T1, T2)
> +operator >> (const T1 &x, const T2 &y)
> +{
> +  return wi::arshift (x, y);
> +}
> +
>  template<typename T>
>  void
>  gt_ggc_mx (generic_wide_int <T> *)
> Index: gcc/tree.h
> ===================================================================
> --- gcc/tree.h
> +++ gcc/tree.h
> @@ -5375,7 +5375,7 @@ extern GTY(()) struct int_n_trees_t int_n_trees[NUM_INT_N_ENTS];
>  inline HOST_WIDE_INT
>  int_bit_position (const_tree field)
>  {
> -  return (wi::lshift (wi::to_offset (DECL_FIELD_OFFSET (field)), BITS_PER_UNIT_LOG)
> +  return ((wi::to_offset (DECL_FIELD_OFFSET (field)) << BITS_PER_UNIT_LOG)
>           + wi::to_offset (DECL_FIELD_BIT_OFFSET (field))).to_shwi ();
>  }
>
> Index: gcc/alias.c
> ===================================================================
> --- gcc/alias.c
> +++ gcc/alias.c
> @@ -2651,8 +2651,8 @@ adjust_offset_for_component_ref (tree x, bool *known_p,
>
>        offset_int woffset
>         = (wi::to_offset (xoffset)
> -          + wi::lrshift (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
> -                         LOG2_BITS_PER_UNIT));
> +          + (wi::to_offset (DECL_FIELD_BIT_OFFSET (field))
> +             >> LOG2_BITS_PER_UNIT));
>        if (!wi::fits_uhwi_p (woffset))
>         {
>           *known_p = false;
> Index: gcc/expr.c
> ===================================================================
> --- gcc/expr.c
> +++ gcc/expr.c
> @@ -6989,7 +6989,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
>               if (!integer_zerop (off))
>                 {
>                   offset_int boff, coff = mem_ref_offset (exp);
> -                 boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
> +                 boff = coff << LOG2_BITS_PER_UNIT;
>                   bit_offset += boff;
>                 }
>               exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
> @@ -7015,7 +7015,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
>      {
>        offset_int tem = wi::sext (wi::to_offset (offset),
>                                  TYPE_PRECISION (sizetype));
> -      tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
> +      tem <<= LOG2_BITS_PER_UNIT;
>        tem += bit_offset;
>        if (wi::fits_shwi_p (tem))
>         {
> @@ -7035,7 +7035,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
>           /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
>              Subtract it to BIT_OFFSET and add it (scaled) to OFFSET.  */
>           bit_offset -= tem;
> -         tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
> +         tem >>= LOG2_BITS_PER_UNIT;
>           offset = size_binop (PLUS_EXPR, offset,
>                                wide_int_to_tree (sizetype, tem));
>         }
> Index: gcc/fold-const.c
> ===================================================================
> --- gcc/fold-const.c
> +++ gcc/fold-const.c
> @@ -8518,7 +8518,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
>             {
>               offset_int tem = wi::sext (wi::to_offset (offset0),
>                                          TYPE_PRECISION (sizetype));
> -             tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
> +             tem <<= LOG2_BITS_PER_UNIT;
>               tem += bitpos0;
>               if (wi::fits_shwi_p (tem))
>                 {
> @@ -8565,7 +8565,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type,
>             {
>               offset_int tem = wi::sext (wi::to_offset (offset1),
>                                          TYPE_PRECISION (sizetype));
> -             tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
> +             tem <<= LOG2_BITS_PER_UNIT;
>               tem += bitpos1;
>               if (wi::fits_shwi_p (tem))
>                 {
> Index: gcc/gimple-fold.c
> ===================================================================
> --- gcc/gimple-fold.c
> +++ gcc/gimple-fold.c
> @@ -5435,8 +5435,7 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
>
>        /* Compute bit offset of the field.  */
>        bitoffset = (wi::to_offset (field_offset)
> -                  + wi::lshift (wi::to_offset (byte_offset),
> -                                LOG2_BITS_PER_UNIT));
> +                  + (wi::to_offset (byte_offset) << LOG2_BITS_PER_UNIT));
>        /* Compute bit offset where the field ends.  */
>        if (field_size != NULL_TREE)
>         bitoffset_end = bitoffset + wi::to_offset (field_size);
> Index: gcc/gimple-ssa-strength-reduction.c
> ===================================================================
> --- gcc/gimple-ssa-strength-reduction.c
> +++ gcc/gimple-ssa-strength-reduction.c
> @@ -951,7 +951,7 @@ restructure_reference (tree *pbase, tree *poffset, widest_int *pindex,
>        c2 = 0;
>      }
>
> -  c4 = wi::lrshift (index, LOG2_BITS_PER_UNIT);
> +  c4 = index >> LOG2_BITS_PER_UNIT;
>    c5 = backtrace_base_for_ref (&t2);
>
>    *pbase = t1;
> Index: gcc/tree-dfa.c
> ===================================================================
> --- gcc/tree-dfa.c
> +++ gcc/tree-dfa.c
> @@ -424,8 +424,8 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
>
>             if (this_offset && TREE_CODE (this_offset) == INTEGER_CST)
>               {
> -               offset_int woffset = wi::lshift (wi::to_offset (this_offset),
> -                                                LOG2_BITS_PER_UNIT);
> +               offset_int woffset = (wi::to_offset (this_offset)
> +                                     << LOG2_BITS_PER_UNIT);
>                 woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
>                 bit_offset += woffset;
>
> @@ -453,7 +453,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
>                           {
>                             offset_int tem = (wi::to_offset (ssize)
>                                               - wi::to_offset (fsize));
> -                           tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
> +                           tem <<= LOG2_BITS_PER_UNIT;
>                             tem -= woffset;
>                             maxsize += tem;
>                           }
> @@ -493,7 +493,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
>                   = wi::sext (wi::to_offset (index) - wi::to_offset (low_bound),
>                               TYPE_PRECISION (TREE_TYPE (index)));
>                 woffset *= wi::to_offset (unit_size);
> -               woffset = wi::lshift (woffset, LOG2_BITS_PER_UNIT);
> +               woffset <<= LOG2_BITS_PER_UNIT;
>                 bit_offset += woffset;
>
>                 /* An array ref with a constant index up in the structure
> @@ -570,7 +570,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
>               else
>                 {
>                   offset_int off = mem_ref_offset (exp);
> -                 off = wi::lshift (off, LOG2_BITS_PER_UNIT);
> +                 off <<= LOG2_BITS_PER_UNIT;
>                   off += bit_offset;
>                   if (wi::fits_shwi_p (off))
>                     {
> Index: gcc/tree-ssa-alias.c
> ===================================================================
> --- gcc/tree-ssa-alias.c
> +++ gcc/tree-ssa-alias.c
> @@ -1041,7 +1041,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
>    /* The offset embedded in MEM_REFs can be negative.  Bias them
>       so that the resulting offset adjustment is positive.  */
>    offset_int moff = mem_ref_offset (base1);
> -  moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
> +  moff <<= LOG2_BITS_PER_UNIT;
>    if (wi::neg_p (moff))
>      offset2p += (-moff).to_short_addr ();
>    else
> @@ -1113,7 +1113,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
>        || TREE_CODE (dbase2) == TARGET_MEM_REF)
>      {
>        offset_int moff = mem_ref_offset (dbase2);
> -      moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
> +      moff <<= LOG2_BITS_PER_UNIT;
>        if (wi::neg_p (moff))
>         doffset1 -= (-moff).to_short_addr ();
>        else
> @@ -1211,13 +1211,13 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
>        /* The offset embedded in MEM_REFs can be negative.  Bias them
>          so that the resulting offset adjustment is positive.  */
>        moff = mem_ref_offset (base1);
> -      moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
> +      moff <<= LOG2_BITS_PER_UNIT;
>        if (wi::neg_p (moff))
>         offset2 += (-moff).to_short_addr ();
>        else
>         offset1 += moff.to_shwi ();
>        moff = mem_ref_offset (base2);
> -      moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
> +      moff <<= LOG2_BITS_PER_UNIT;
>        if (wi::neg_p (moff))
>         offset1 += (-moff).to_short_addr ();
>        else
> @@ -2298,10 +2298,10 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *ref)
>                                        TREE_OPERAND (ref->base, 1)))
>                 {
>                   offset_int off1 = mem_ref_offset (base);
> -                 off1 = wi::lshift (off1, LOG2_BITS_PER_UNIT);
> +                 off1 <<= LOG2_BITS_PER_UNIT;
>                   off1 += offset;
>                   offset_int off2 = mem_ref_offset (ref->base);
> -                 off2 = wi::lshift (off2, LOG2_BITS_PER_UNIT);
> +                 off2 <<= LOG2_BITS_PER_UNIT;
>                   off2 += ref_offset;
>                   if (wi::fits_shwi_p (off1) && wi::fits_shwi_p (off2))
>                     {
> @@ -2372,18 +2372,15 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *ref)
>                   if (TREE_CODE (rbase) != MEM_REF)
>                     return false;
>                   // Compare pointers.
> -                 offset += wi::lshift (mem_ref_offset (base),
> -                                       LOG2_BITS_PER_UNIT);
> -                 roffset += wi::lshift (mem_ref_offset (rbase),
> -                                        LOG2_BITS_PER_UNIT);
> +                 offset += mem_ref_offset (base) << LOG2_BITS_PER_UNIT;
> +                 roffset += mem_ref_offset (rbase) << LOG2_BITS_PER_UNIT;
>                   base = TREE_OPERAND (base, 0);
>                   rbase = TREE_OPERAND (rbase, 0);
>                 }
>               if (base == rbase
>                   && offset <= roffset
>                   && (roffset + ref->max_size
> -                     <= offset + wi::lshift (wi::to_offset (len),
> -                                             LOG2_BITS_PER_UNIT)))
> +                     <= offset + (wi::to_offset (len) << LOG2_BITS_PER_UNIT)))
>                 return true;
>               break;
>             }
> Index: gcc/tree-ssa-ccp.c
> ===================================================================
> --- gcc/tree-ssa-ccp.c
> +++ gcc/tree-ssa-ccp.c
> @@ -1372,8 +1372,8 @@ bit_value_binop_1 (enum tree_code code, tree type,
>                 }
>               else
>                 {
> -                 *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
> -                 *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
> +                 *mask = wi::ext (r1mask << shift, width, sgn);
> +                 *val = wi::ext (r1val << shift, width, sgn);
>                 }
>             }
>         }
> Index: gcc/tree-ssa-math-opts.c
> ===================================================================
> --- gcc/tree-ssa-math-opts.c
> +++ gcc/tree-ssa-math-opts.c
> @@ -2104,7 +2104,7 @@ find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
>        if (!integer_zerop (off))
>         {
>           offset_int boff, coff = mem_ref_offset (base_addr);
> -         boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
> +         boff = coff << LOG2_BITS_PER_UNIT;
>           bit_offset += boff;
>         }
>
> @@ -2118,7 +2118,7 @@ find_bswap_or_nop_load (gimple *stmt, tree ref, struct symbolic_number *n)
>           /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
>              Subtract it to BIT_OFFSET and add it (scaled) to OFFSET.  */
>           bit_offset -= tem;
> -         tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
> +         tem >>= LOG2_BITS_PER_UNIT;
>           if (offset)
>             offset = size_binop (PLUS_EXPR, offset,
>                                     wide_int_to_tree (sizetype, tem));
> Index: gcc/tree-ssa-sccvn.c
> ===================================================================
> --- gcc/tree-ssa-sccvn.c
> +++ gcc/tree-ssa-sccvn.c
> @@ -788,8 +788,7 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
>                   {
>                     offset_int off
>                       = (wi::to_offset (this_offset)
> -                        + wi::lrshift (wi::to_offset (bit_offset),
> -                                       LOG2_BITS_PER_UNIT));
> +                        + (wi::to_offset (bit_offset) >> LOG2_BITS_PER_UNIT));
>                     if (wi::fits_shwi_p (off)
>                         /* Probibit value-numbering zero offset components
>                            of addresses the same before the pass folding
> @@ -999,8 +998,8 @@ ao_ref_init_from_vn_reference (ao_ref *ref,
>               max_size = -1;
>             else
>               {
> -               offset_int woffset = wi::lshift (wi::to_offset (this_offset),
> -                                                LOG2_BITS_PER_UNIT);
> +               offset_int woffset = (wi::to_offset (this_offset)
> +                                     << LOG2_BITS_PER_UNIT);
>                 woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
>                 offset += woffset;
>               }
> @@ -1020,7 +1019,7 @@ ao_ref_init_from_vn_reference (ao_ref *ref,
>                 = wi::sext (wi::to_offset (op->op0) - wi::to_offset (op->op1),
>                             TYPE_PRECISION (TREE_TYPE (op->op0)));
>               woffset *= wi::to_offset (op->op2);
> -             woffset = wi::lshift (woffset, LOG2_BITS_PER_UNIT);
> +             woffset <<= LOG2_BITS_PER_UNIT;
>               offset += woffset;
>             }
>           break;
> Index: gcc/cp/init.c
> ===================================================================
> --- gcc/cp/init.c
> +++ gcc/cp/init.c
> @@ -2812,8 +2812,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
>
>           unsigned shift = (max_outer_nelts.get_precision ()) - 7
>             - wi::clz (max_outer_nelts);
> -         max_outer_nelts = wi::lshift (wi::lrshift (max_outer_nelts, shift),
> -                                       shift);
> +         max_outer_nelts = (max_outer_nelts >> shift) << shift;
>
>            outer_nelts_check = fold_build2 (LE_EXPR, boolean_type_node,
>                                            outer_nelts,

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Support << and >> for offset_int and widest_int
  2016-04-29 12:37 ` H.J. Lu
@ 2016-05-02  9:38   ` Richard Sandiford
  0 siblings, 0 replies; 6+ messages in thread
From: Richard Sandiford @ 2016-05-02  9:38 UTC (permalink / raw)
  To: H.J. Lu; +Cc: GCC Patches

"H.J. Lu" <hjl.tools@gmail.com> writes:
> On Fri, Apr 29, 2016 at 5:30 AM, Richard Sandiford
> <richard.sandiford@arm.com> wrote:
>> Following on from the comparison patch, I think it makes sense to
>> support << and >> for offset_int (int128_t) and widest_int (intNNN_t),
>> with >> being arithmetic shift.  It doesn't make sense to use
>> logical right shift on a potentially negative offset_int, since
>> the precision of 128 bits has no meaning on the target.
>>
>> Tested on x86_64-linux-gnu and aarch64-linux-gnu.  OK to install?
>>
>> Thanks,
>> Richard
>>
>>
>> gcc/
>>         * wide-int.h: Update offset_int and widest_int documentation.
>>         (WI_SIGNED_SHIFT_RESULT): New macro.
>>         (wi::binary_shift): Define signed_shift_result_type for
>>         shifts on offset_int- and widest_int-like types.
>>         (generic_wide_int): Support <<= and >>= if << and >> are supported.
>>         * tree.h (int_bit_position): Use shift operators instead of wi::
>>          shifts.
>>         * alias.c (adjust_offset_for_component_ref): Likewise.
>>         * expr.c (get_inner_reference): Likewise.
>>         * fold-const.c (fold_comparison): Likewise.
>>         * gimple-fold.c (fold_nonarray_ctor_reference): Likewise.
>>         * gimple-ssa-strength-reduction.c (restructure_reference): Likewise.
>>         * tree-dfa.c (get_ref_base_and_extent): Likewise.
>>         * tree-ssa-alias.c (indirect_ref_may_alias_decl_p): Likewise.
>>         (stmt_kills_ref_p): Likewise.
>>         * tree-ssa-ccp.c (bit_value_binop_1): Likewise.
>>         * tree-ssa-math-opts.c (find_bswap_or_nop_load): Likewise.
>>         * tree-ssa-sccvn.c (copy_reference_ops_from_ref): Likewise.
>>         (ao_ref_init_from_vn_reference): Likewise.
>>
>> gcc/cp/
>>         * init.c (build_new_1): Use shift operators instead of wi:: shifts.
>
> Can you also update change_zero_ext in combine.c:
>
> https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70687
>
> It should use wide_int << instead of HOST_WIDE_INT <<< to
> support __int128.

The patch doesn't add wide_int shift operators since wide_ints have no
sign.  You need to use wi:: shift routines for them instead.  However,
there's already a wi::mask function for creating this kind of value
directly.

Like you say, the PR is about converting other code to use wi::, which
is very different from what this patch is doing.  I'll take the PR
anyway though.  Hope to post a patch on Wednesday.

It also looks like the code is missing a check that the ZERO_EXTEND mode
is scalar, since the transformation would be incorrect for vectors.

Thanks,
Richard

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: Support <, <=, > and >= for offset_int and widest_int
  2016-04-29 12:26 Support <, <=, > and >= " Richard Sandiford
@ 2016-05-02  8:50 ` Richard Biener
  0 siblings, 0 replies; 6+ messages in thread
From: Richard Biener @ 2016-05-02  8:50 UTC (permalink / raw)
  To: GCC Patches, richard.sandiford

On Fri, Apr 29, 2016 at 2:26 PM, Richard Sandiford
<richard.sandiford@arm.com> wrote:
> offset_int and widest_int are supposed to be at least one bit wider
> than all the values they need to represent, with the extra bits
> being signs.  Thus offset_int is effectively int128_t and widest_int
> is effectively intNNN_t, for target-dependent NNN.
>
> Because the types are signed, there's not really any need to specify
> a sign for operations like comparison.  I think things would be clearer
> if we supported <, <=, > and >= for them (but not for wide_int, which
> doesn't have a sign).
>
> Tested on x86_64-linux-gnu and aarch64-linux-gnu.  OK to install?

Ok.

Thanks,
Richard.

> Thanks,
> Richard
>
>
> gcc/
>         * wide-int.h: Update offset_int and widest_int documentation.
>         (WI_SIGNED_BINARY_PREDICATE_RESULT): New macro.
>         (wi::binary_traits): Allow ordered comparisons between offset_int and
>         offset_int, between widest_int and widest_int, and between either
>         of these types and basic C types.
>         (operator <, <=, >, >=): Define for the same combinations.
>         * tree.h (tree_int_cst_lt): Use comparison operators instead
>         of wi:: comparisons.
>         (tree_int_cst_le): Likewise.
>         * gimple-fold.c (fold_array_ctor_reference): Likewise.
>         (fold_nonarray_ctor_reference): Likewise.
>         * gimple-ssa-strength-reduction.c (record_increment): Likewise.
>         * tree-affine.c (aff_comb_cannot_overlap_p): Likewise.
>         * tree-parloops.c (try_transform_to_exit_first_loop_alt): Likewise.
>         * tree-sra.c (completely_scalarize): Likewise.
>         * tree-ssa-alias.c (stmt_kills_ref_p): Likewise.
>         * tree-ssa-reassoc.c (extract_bit_test_mask): Likewise.
>         * tree-vrp.c (extract_range_from_binary_expr_1): Likewise.
>         (check_for_binary_op_overflow): Likewise.
>         (search_for_addr_array): Likewise.
>         * ubsan.c (ubsan_expand_objsize_ifn): Likewise.
>
> Index: gcc/wide-int.h
> ===================================================================
> --- gcc/wide-int.h
> +++ gcc/wide-int.h
> @@ -53,22 +53,26 @@ along with GCC; see the file COPYING3.  If not see
>       multiply, division, shifts, comparisons, and operations that need
>       overflow detected), the signedness must be specified separately.
>
> -     2) offset_int.  This is a fixed size representation that is
> -     guaranteed to be large enough to compute any bit or byte sized
> -     address calculation on the target.  Currently the value is 64 + 4
> -     bits rounded up to the next number even multiple of
> -     HOST_BITS_PER_WIDE_INT (but this can be changed when the first
> -     port needs more than 64 bits for the size of a pointer).
> -
> -     This flavor can be used for all address math on the target.  In
> -     this representation, the values are sign or zero extended based
> -     on their input types to the internal precision.  All math is done
> -     in this precision and then the values are truncated to fit in the
> -     result type.  Unlike most gimple or rtl intermediate code, it is
> -     not useful to perform the address arithmetic at the same
> -     precision in which the operands are represented because there has
> -     been no effort by the front ends to convert most addressing
> -     arithmetic to canonical types.
> +     2) offset_int.  This is a fixed-precision integer that can hold
> +     any address offset, measured in either bits or bytes, with at
> +     least one extra sign bit.  At the moment the maximum address
> +     size GCC supports is 64 bits.  With 8-bit bytes and an extra
> +     sign bit, offset_int therefore needs to have at least 68 bits
> +     of precision.  We round this up to 128 bits for efficiency.
> +     Values of type T are converted to this precision by sign- or
> +     zero-extending them based on the signedness of T.
> +
> +     The extra sign bit means that offset_int is effectively a signed
> +     128-bit integer, i.e. it behaves like int128_t.
> +
> +     Since the values are logically signed, there is no need to
> +     distinguish between signed and unsigned operations.  Sign-sensitive
> +     comparison operators <, <=, > and >= are therefore supported.
> +
> +     [ Note that, even though offset_int is effectively int128_t,
> +       it can still be useful to use unsigned comparisons like
> +       wi::leu_p (a, b) as a more efficient short-hand for
> +       "a >= 0 && a <= b". ]
>
>       3) widest_int.  This representation is an approximation of
>       infinite precision math.  However, it is not really infinite
> @@ -76,9 +80,9 @@ along with GCC; see the file COPYING3.  If not see
>       precision math where the precision is 4 times the size of the
>       largest integer that the target port can represent.
>
> -     widest_int is supposed to be wider than any number that it needs to
> -     store, meaning that there is always at least one leading sign bit.
> -     All widest_int values are therefore signed.
> +     Like offset_int, widest_int is wider than all the values that
> +     it needs to represent, so the integers are logically signed.
> +     Sign-sensitive comparison operators <, <=, > and >= are supported.
>
>       There are several places in the GCC where this should/must be used:
>
> @@ -255,6 +259,12 @@ along with GCC; see the file COPYING3.  If not see
>  #define WI_BINARY_RESULT(T1, T2) \
>    typename wi::binary_traits <T1, T2>::result_type
>
> +/* The type of result produced by a signed binary predicate on types T1 and T2.
> +   This is bool if signed comparisons make sense for T1 and T2 and leads to
> +   substitution failure otherwise.  */
> +#define WI_SIGNED_BINARY_PREDICATE_RESULT(T1, T2) \
> +  typename wi::binary_traits <T1, T2>::signed_predicate_result
> +
>  /* The type of result produced by a unary operation on type T.  */
>  #define WI_UNARY_RESULT(T) \
>    typename wi::unary_traits <T>::result_type
> @@ -316,7 +326,7 @@ namespace wi
>      VAR_PRECISION,
>
>      /* The integer has a constant precision (known at GCC compile time)
> -       but no defined signedness.  */
> +       and is signed.  */
>      CONST_PRECISION
>    };
>
> @@ -379,6 +389,7 @@ namespace wi
>         so as not to confuse gengtype.  */
>      typedef generic_wide_int < fixed_wide_int_storage
>                                <int_traits <T2>::precision> > result_type;
> +    typedef bool signed_predicate_result;
>    };
>
>    template <typename T1, typename T2>
> @@ -394,6 +405,7 @@ namespace wi
>         so as not to confuse gengtype.  */
>      typedef generic_wide_int < fixed_wide_int_storage
>                                <int_traits <T1>::precision> > result_type;
> +    typedef bool signed_predicate_result;
>    };
>
>    template <typename T1, typename T2>
> @@ -404,6 +416,7 @@ namespace wi
>      STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
>      typedef generic_wide_int < fixed_wide_int_storage
>                                <int_traits <T1>::precision> > result_type;
> +    typedef bool signed_predicate_result;
>    };
>
>    template <typename T1, typename T2>
> @@ -3050,6 +3063,21 @@ wi::min_precision (const T &x, signop sgn)
>      return get_precision (x) - clz (x);
>  }
>
> +#define SIGNED_BINARY_PREDICATE(OP, F)                 \
> +  template <typename T1, typename T2>                  \
> +    inline WI_SIGNED_BINARY_PREDICATE_RESULT (T1, T2)  \
> +    OP (const T1 &x, const T2 &y)                      \
> +    {                                                  \
> +      return wi::F (x, y);                             \
> +    }
> +
> +SIGNED_BINARY_PREDICATE (operator <, lts_p)
> +SIGNED_BINARY_PREDICATE (operator <=, les_p)
> +SIGNED_BINARY_PREDICATE (operator >, gts_p)
> +SIGNED_BINARY_PREDICATE (operator >=, ges_p)
> +
> +#undef SIGNED_BINARY_PREDICATE
> +
>  template<typename T>
>  void
>  gt_ggc_mx (generic_wide_int <T> *)
> Index: gcc/tree.h
> ===================================================================
> --- gcc/tree.h
> +++ gcc/tree.h
> @@ -5318,7 +5318,7 @@ wi::max_value (const_tree type)
>  inline bool
>  tree_int_cst_lt (const_tree t1, const_tree t2)
>  {
> -  return wi::lts_p (wi::to_widest (t1), wi::to_widest (t2));
> +  return wi::to_widest (t1) < wi::to_widest (t2);
>  }
>
>  /* Return true if INTEGER_CST T1 is less than or equal to INTEGER_CST T2,
> @@ -5327,7 +5327,7 @@ tree_int_cst_lt (const_tree t1, const_tree t2)
>  inline bool
>  tree_int_cst_le (const_tree t1, const_tree t2)
>  {
> -  return wi::les_p (wi::to_widest (t1), wi::to_widest (t2));
> +  return wi::to_widest (t1) <= wi::to_widest (t2);
>  }
>
>  /* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2.  T1 and T2
> Index: gcc/gimple-fold.c
> ===================================================================
> --- gcc/gimple-fold.c
> +++ gcc/gimple-fold.c
> @@ -5380,7 +5380,7 @@ fold_array_ctor_reference (tree type, tree ctor,
>       be larger than size of array element.  */
>    if (!TYPE_SIZE_UNIT (type)
>        || TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST
> -      || wi::lts_p (elt_size, wi::to_offset (TYPE_SIZE_UNIT (type)))
> +      || elt_size < wi::to_offset (TYPE_SIZE_UNIT (type))
>        || elt_size == 0)
>      return NULL_TREE;
>
> @@ -5457,7 +5457,7 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
>              fields.  */
>           if (wi::cmps (access_end, bitoffset_end) > 0)
>             return NULL_TREE;
> -         if (wi::lts_p (offset, bitoffset))
> +         if (offset < bitoffset)
>             return NULL_TREE;
>           return fold_ctor_reference (type, cval,
>                                       inner_offset.to_uhwi (), size,
> Index: gcc/gimple-ssa-strength-reduction.c
> ===================================================================
> --- gcc/gimple-ssa-strength-reduction.c
> +++ gcc/gimple-ssa-strength-reduction.c
> @@ -2506,8 +2506,7 @@ record_increment (slsr_cand_t c, widest_int increment, bool is_phi_adjust)
>        if (c->kind == CAND_ADD
>           && !is_phi_adjust
>           && c->index == increment
> -         && (wi::gts_p (increment, 1)
> -             || wi::lts_p (increment, -1))
> +         && (increment > 1 || increment < -1)
>           && (gimple_assign_rhs_code (c->cand_stmt) == PLUS_EXPR
>               || gimple_assign_rhs_code (c->cand_stmt) == POINTER_PLUS_EXPR))
>         {
> Index: gcc/tree-affine.c
> ===================================================================
> --- gcc/tree-affine.c
> +++ gcc/tree-affine.c
> @@ -929,7 +929,7 @@ aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
>    else
>      {
>        /* We succeed if the second object starts after the first one ends.  */
> -      return wi::les_p (size1, diff->offset);
> +      return size1 <= diff->offset;
>      }
>  }
>
> Index: gcc/tree-parloops.c
> ===================================================================
> --- gcc/tree-parloops.c
> +++ gcc/tree-parloops.c
> @@ -1868,7 +1868,7 @@ try_transform_to_exit_first_loop_alt (struct loop *loop,
>
>    /* Check if nit + 1 overflows.  */
>    widest_int type_max = wi::to_widest (TYPE_MAXVAL (nit_type));
> -  if (!wi::lts_p (nit_max, type_max))
> +  if (nit_max >= type_max)
>      return false;
>
>    gimple *def = SSA_NAME_DEF_STMT (nit);
> Index: gcc/tree-sra.c
> ===================================================================
> --- gcc/tree-sra.c
> +++ gcc/tree-sra.c
> @@ -1055,7 +1055,7 @@ completely_scalarize (tree base, tree decl_type, HOST_WIDE_INT offset, tree ref)
>                 idx = wi::sext (idx, TYPE_PRECISION (domain));
>                 max = wi::sext (max, TYPE_PRECISION (domain));
>               }
> -           for (int el_off = offset; wi::les_p (idx, max); ++idx)
> +           for (int el_off = offset; idx <= max; ++idx)
>               {
>                 tree nref = build4 (ARRAY_REF, elemtype,
>                                     ref,
> Index: gcc/tree-ssa-alias.c
> ===================================================================
> --- gcc/tree-ssa-alias.c
> +++ gcc/tree-ssa-alias.c
> @@ -2380,10 +2380,10 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *ref)
>                   rbase = TREE_OPERAND (rbase, 0);
>                 }
>               if (base == rbase
> -                 && wi::les_p (offset, roffset)
> -                 && wi::les_p (roffset + ref->max_size,
> -                               offset + wi::lshift (wi::to_offset (len),
> -                                                    LOG2_BITS_PER_UNIT)))
> +                 && offset <= roffset
> +                 && (roffset + ref->max_size
> +                     <= offset + wi::lshift (wi::to_offset (len),
> +                                             LOG2_BITS_PER_UNIT)))
>                 return true;
>               break;
>             }
> Index: gcc/tree-ssa-reassoc.c
> ===================================================================
> --- gcc/tree-ssa-reassoc.c
> +++ gcc/tree-ssa-reassoc.c
> @@ -2464,7 +2464,7 @@ extract_bit_test_mask (tree exp, int prec, tree totallow, tree low, tree high,
>                 return NULL_TREE;
>               bias = wi::to_widest (tbias);
>               bias -= wi::to_widest (totallow);
> -             if (wi::ges_p (bias, 0) && wi::lts_p (bias, prec - max))
> +             if (bias >= 0 && bias < prec - max)
>                 {
>                   *mask = wi::lshift (*mask, bias);
>                   return ret;
> Index: gcc/tree-vrp.c
> ===================================================================
> --- gcc/tree-vrp.c
> +++ gcc/tree-vrp.c
> @@ -2749,17 +2749,17 @@ extract_range_from_binary_expr_1 (value_range *vr,
>           /* Sort the 4 products so that min is in prod0 and max is in
>              prod3.  */
>           /* min0min1 > max0max1 */
> -         if (wi::gts_p (prod0, prod3))
> +         if (prod0 > prod3)
>             std::swap (prod0, prod3);
>
>           /* min0max1 > max0min1 */
> -         if (wi::gts_p (prod1, prod2))
> +         if (prod1 > prod2)
>             std::swap (prod1, prod2);
>
> -         if (wi::gts_p (prod0, prod1))
> +         if (prod0 > prod1)
>             std::swap (prod0, prod1);
>
> -         if (wi::gts_p (prod2, prod3))
> +         if (prod2 > prod3)
>             std::swap (prod2, prod3);
>
>           /* diff = max - min.  */
> @@ -3775,7 +3775,7 @@ check_for_binary_op_overflow (enum tree_code subcode, tree type,
>        /* If all values in [wmin, wmax] are smaller than
>          [wtmin, wtmax] or all are larger than [wtmin, wtmax],
>          the arithmetic operation will always overflow.  */
> -      if (wi::lts_p (wmax, wtmin) || wi::gts_p (wmin, wtmax))
> +      if (wmax < wtmin || wmin > wtmax)
>         return true;
>        return false;
>      }
> @@ -6587,7 +6587,7 @@ search_for_addr_array (tree t, location_t location)
>
>        idx = mem_ref_offset (t);
>        idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
> -      if (wi::lts_p (idx, 0))
> +      if (idx < 0)
>         {
>           if (dump_file && (dump_flags & TDF_DETAILS))
>             {
> @@ -6599,8 +6599,8 @@ search_for_addr_array (tree t, location_t location)
>                       "array subscript is below array bounds");
>           TREE_NO_WARNING (t) = 1;
>         }
> -      else if (wi::gts_p (idx, (wi::to_offset (up_bound)
> -                               - wi::to_offset (low_bound) + 1)))
> +      else if (idx > (wi::to_offset (up_bound)
> +                     - wi::to_offset (low_bound) + 1))
>         {
>           if (dump_file && (dump_flags & TDF_DETAILS))
>             {
> Index: gcc/ubsan.c
> ===================================================================
> --- gcc/ubsan.c
> +++ gcc/ubsan.c
> @@ -911,8 +911,8 @@ ubsan_expand_objsize_ifn (gimple_stmt_iterator *gsi)
>      /* Yes, __builtin_object_size couldn't determine the
>         object size.  */;
>    else if (TREE_CODE (offset) == INTEGER_CST
> -          && wi::ges_p (wi::to_widest (offset), -OBJSZ_MAX_OFFSET)
> -          && wi::les_p (wi::to_widest (offset), -1))
> +          && wi::to_widest (offset) >= -OBJSZ_MAX_OFFSET
> +          && wi::to_widest (offset) <= -1)
>      /* The offset is in range [-16K, -1].  */;
>    else
>      {
> @@ -928,8 +928,8 @@ ubsan_expand_objsize_ifn (gimple_stmt_iterator *gsi)
>        /* If the offset is small enough, we don't need the second
>          run-time check.  */
>        if (TREE_CODE (offset) == INTEGER_CST
> -         && wi::ges_p (wi::to_widest (offset), 0)
> -         && wi::les_p (wi::to_widest (offset), OBJSZ_MAX_OFFSET))
> +         && wi::to_widest (offset) >= 0
> +         && wi::to_widest (offset) <= OBJSZ_MAX_OFFSET)
>         *gsi = gsi_after_labels (then_bb);
>        else
>         {

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Support <, <=, > and >= for offset_int and widest_int
@ 2016-04-29 12:26 Richard Sandiford
  2016-05-02  8:50 ` Richard Biener
  0 siblings, 1 reply; 6+ messages in thread
From: Richard Sandiford @ 2016-04-29 12:26 UTC (permalink / raw)
  To: gcc-patches

offset_int and widest_int are supposed to be at least one bit wider
than all the values they need to represent, with the extra bits
being signs.  Thus offset_int is effectively int128_t and widest_int
is effectively intNNN_t, for target-dependent NNN.

Because the types are signed, there's not really any need to specify
a sign for operations like comparison.  I think things would be clearer
if we supported <, <=, > and >= for them (but not for wide_int, which
doesn't have a sign).

Tested on x86_64-linux-gnu and aarch64-linux-gnu.  OK to install?

Thanks,
Richard


gcc/
	* wide-int.h: Update offset_int and widest_int documentation.
	(WI_SIGNED_BINARY_PREDICATE_RESULT): New macro.
	(wi::binary_traits): Allow ordered comparisons between offset_int and
	offset_int, between widest_int and widest_int, and between either
	of these types and basic C types.
	(operator <, <=, >, >=): Define for the same combinations.
	* tree.h (tree_int_cst_lt): Use comparison operators instead
	of wi:: comparisons.
	(tree_int_cst_le): Likewise.
	* gimple-fold.c (fold_array_ctor_reference): Likewise.
	(fold_nonarray_ctor_reference): Likewise.
	* gimple-ssa-strength-reduction.c (record_increment): Likewise.
	* tree-affine.c (aff_comb_cannot_overlap_p): Likewise.
	* tree-parloops.c (try_transform_to_exit_first_loop_alt): Likewise.
	* tree-sra.c (completely_scalarize): Likewise.
	* tree-ssa-alias.c (stmt_kills_ref_p): Likewise.
	* tree-ssa-reassoc.c (extract_bit_test_mask): Likewise.
	* tree-vrp.c (extract_range_from_binary_expr_1): Likewise.
	(check_for_binary_op_overflow): Likewise.
	(search_for_addr_array): Likewise.
	* ubsan.c (ubsan_expand_objsize_ifn): Likewise.

Index: gcc/wide-int.h
===================================================================
--- gcc/wide-int.h
+++ gcc/wide-int.h
@@ -53,22 +53,26 @@ along with GCC; see the file COPYING3.  If not see
      multiply, division, shifts, comparisons, and operations that need
      overflow detected), the signedness must be specified separately.
 
-     2) offset_int.  This is a fixed size representation that is
-     guaranteed to be large enough to compute any bit or byte sized
-     address calculation on the target.  Currently the value is 64 + 4
-     bits rounded up to the next number even multiple of
-     HOST_BITS_PER_WIDE_INT (but this can be changed when the first
-     port needs more than 64 bits for the size of a pointer).
-
-     This flavor can be used for all address math on the target.  In
-     this representation, the values are sign or zero extended based
-     on their input types to the internal precision.  All math is done
-     in this precision and then the values are truncated to fit in the
-     result type.  Unlike most gimple or rtl intermediate code, it is
-     not useful to perform the address arithmetic at the same
-     precision in which the operands are represented because there has
-     been no effort by the front ends to convert most addressing
-     arithmetic to canonical types.
+     2) offset_int.  This is a fixed-precision integer that can hold
+     any address offset, measured in either bits or bytes, with at
+     least one extra sign bit.  At the moment the maximum address
+     size GCC supports is 64 bits.  With 8-bit bytes and an extra
+     sign bit, offset_int therefore needs to have at least 68 bits
+     of precision.  We round this up to 128 bits for efficiency.
+     Values of type T are converted to this precision by sign- or
+     zero-extending them based on the signedness of T.
+
+     The extra sign bit means that offset_int is effectively a signed
+     128-bit integer, i.e. it behaves like int128_t.
+
+     Since the values are logically signed, there is no need to
+     distinguish between signed and unsigned operations.  Sign-sensitive
+     comparison operators <, <=, > and >= are therefore supported.
+
+     [ Note that, even though offset_int is effectively int128_t,
+       it can still be useful to use unsigned comparisons like
+       wi::leu_p (a, b) as a more efficient short-hand for
+       "a >= 0 && a <= b". ]
 
      3) widest_int.  This representation is an approximation of
      infinite precision math.  However, it is not really infinite
@@ -76,9 +80,9 @@ along with GCC; see the file COPYING3.  If not see
      precision math where the precision is 4 times the size of the
      largest integer that the target port can represent.
 
-     widest_int is supposed to be wider than any number that it needs to
-     store, meaning that there is always at least one leading sign bit.
-     All widest_int values are therefore signed.
+     Like offset_int, widest_int is wider than all the values that
+     it needs to represent, so the integers are logically signed.
+     Sign-sensitive comparison operators <, <=, > and >= are supported.
 
      There are several places in the GCC where this should/must be used:
 
@@ -255,6 +259,12 @@ along with GCC; see the file COPYING3.  If not see
 #define WI_BINARY_RESULT(T1, T2) \
   typename wi::binary_traits <T1, T2>::result_type
 
+/* The type of result produced by a signed binary predicate on types T1 and T2.
+   This is bool if signed comparisons make sense for T1 and T2 and leads to
+   substitution failure otherwise.  */
+#define WI_SIGNED_BINARY_PREDICATE_RESULT(T1, T2) \
+  typename wi::binary_traits <T1, T2>::signed_predicate_result
+
 /* The type of result produced by a unary operation on type T.  */
 #define WI_UNARY_RESULT(T) \
   typename wi::unary_traits <T>::result_type
@@ -316,7 +326,7 @@ namespace wi
     VAR_PRECISION,
 
     /* The integer has a constant precision (known at GCC compile time)
-       but no defined signedness.  */
+       and is signed.  */
     CONST_PRECISION
   };
 
@@ -379,6 +389,7 @@ namespace wi
        so as not to confuse gengtype.  */
     typedef generic_wide_int < fixed_wide_int_storage
 			       <int_traits <T2>::precision> > result_type;
+    typedef bool signed_predicate_result;
   };
 
   template <typename T1, typename T2>
@@ -394,6 +405,7 @@ namespace wi
        so as not to confuse gengtype.  */
     typedef generic_wide_int < fixed_wide_int_storage
 			       <int_traits <T1>::precision> > result_type;
+    typedef bool signed_predicate_result;
   };
 
   template <typename T1, typename T2>
@@ -404,6 +416,7 @@ namespace wi
     STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
     typedef generic_wide_int < fixed_wide_int_storage
 			       <int_traits <T1>::precision> > result_type;
+    typedef bool signed_predicate_result;
   };
 
   template <typename T1, typename T2>
@@ -3050,6 +3063,21 @@ wi::min_precision (const T &x, signop sgn)
     return get_precision (x) - clz (x);
 }
 
+#define SIGNED_BINARY_PREDICATE(OP, F)			\
+  template <typename T1, typename T2>			\
+    inline WI_SIGNED_BINARY_PREDICATE_RESULT (T1, T2)	\
+    OP (const T1 &x, const T2 &y)			\
+    {							\
+      return wi::F (x, y);				\
+    }
+
+SIGNED_BINARY_PREDICATE (operator <, lts_p)
+SIGNED_BINARY_PREDICATE (operator <=, les_p)
+SIGNED_BINARY_PREDICATE (operator >, gts_p)
+SIGNED_BINARY_PREDICATE (operator >=, ges_p)
+
+#undef SIGNED_BINARY_PREDICATE
+
 template<typename T>
 void
 gt_ggc_mx (generic_wide_int <T> *)
Index: gcc/tree.h
===================================================================
--- gcc/tree.h
+++ gcc/tree.h
@@ -5318,7 +5318,7 @@ wi::max_value (const_tree type)
 inline bool
 tree_int_cst_lt (const_tree t1, const_tree t2)
 {
-  return wi::lts_p (wi::to_widest (t1), wi::to_widest (t2));
+  return wi::to_widest (t1) < wi::to_widest (t2);
 }
 
 /* Return true if INTEGER_CST T1 is less than or equal to INTEGER_CST T2,
@@ -5327,7 +5327,7 @@ tree_int_cst_lt (const_tree t1, const_tree t2)
 inline bool
 tree_int_cst_le (const_tree t1, const_tree t2)
 {
-  return wi::les_p (wi::to_widest (t1), wi::to_widest (t2));
+  return wi::to_widest (t1) <= wi::to_widest (t2);
 }
 
 /* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2.  T1 and T2
Index: gcc/gimple-fold.c
===================================================================
--- gcc/gimple-fold.c
+++ gcc/gimple-fold.c
@@ -5380,7 +5380,7 @@ fold_array_ctor_reference (tree type, tree ctor,
      be larger than size of array element.  */
   if (!TYPE_SIZE_UNIT (type)
       || TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST
-      || wi::lts_p (elt_size, wi::to_offset (TYPE_SIZE_UNIT (type)))
+      || elt_size < wi::to_offset (TYPE_SIZE_UNIT (type))
       || elt_size == 0)
     return NULL_TREE;
 
@@ -5457,7 +5457,7 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
 	     fields.  */
 	  if (wi::cmps (access_end, bitoffset_end) > 0)
 	    return NULL_TREE;
-	  if (wi::lts_p (offset, bitoffset))
+	  if (offset < bitoffset)
 	    return NULL_TREE;
 	  return fold_ctor_reference (type, cval,
 				      inner_offset.to_uhwi (), size,
Index: gcc/gimple-ssa-strength-reduction.c
===================================================================
--- gcc/gimple-ssa-strength-reduction.c
+++ gcc/gimple-ssa-strength-reduction.c
@@ -2506,8 +2506,7 @@ record_increment (slsr_cand_t c, widest_int increment, bool is_phi_adjust)
       if (c->kind == CAND_ADD
 	  && !is_phi_adjust
 	  && c->index == increment
-	  && (wi::gts_p (increment, 1)
-	      || wi::lts_p (increment, -1))
+	  && (increment > 1 || increment < -1)
 	  && (gimple_assign_rhs_code (c->cand_stmt) == PLUS_EXPR
 	      || gimple_assign_rhs_code (c->cand_stmt) == POINTER_PLUS_EXPR))
 	{
Index: gcc/tree-affine.c
===================================================================
--- gcc/tree-affine.c
+++ gcc/tree-affine.c
@@ -929,7 +929,7 @@ aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
   else
     {
       /* We succeed if the second object starts after the first one ends.  */
-      return wi::les_p (size1, diff->offset);
+      return size1 <= diff->offset;
     }
 }
 
Index: gcc/tree-parloops.c
===================================================================
--- gcc/tree-parloops.c
+++ gcc/tree-parloops.c
@@ -1868,7 +1868,7 @@ try_transform_to_exit_first_loop_alt (struct loop *loop,
 
   /* Check if nit + 1 overflows.  */
   widest_int type_max = wi::to_widest (TYPE_MAXVAL (nit_type));
-  if (!wi::lts_p (nit_max, type_max))
+  if (nit_max >= type_max)
     return false;
 
   gimple *def = SSA_NAME_DEF_STMT (nit);
Index: gcc/tree-sra.c
===================================================================
--- gcc/tree-sra.c
+++ gcc/tree-sra.c
@@ -1055,7 +1055,7 @@ completely_scalarize (tree base, tree decl_type, HOST_WIDE_INT offset, tree ref)
 		idx = wi::sext (idx, TYPE_PRECISION (domain));
 		max = wi::sext (max, TYPE_PRECISION (domain));
 	      }
-	    for (int el_off = offset; wi::les_p (idx, max); ++idx)
+	    for (int el_off = offset; idx <= max; ++idx)
 	      {
 		tree nref = build4 (ARRAY_REF, elemtype,
 				    ref,
Index: gcc/tree-ssa-alias.c
===================================================================
--- gcc/tree-ssa-alias.c
+++ gcc/tree-ssa-alias.c
@@ -2380,10 +2380,10 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *ref)
 		  rbase = TREE_OPERAND (rbase, 0);
 		}
 	      if (base == rbase
-		  && wi::les_p (offset, roffset)
-		  && wi::les_p (roffset + ref->max_size,
-				offset + wi::lshift (wi::to_offset (len),
-						     LOG2_BITS_PER_UNIT)))
+		  && offset <= roffset
+		  && (roffset + ref->max_size
+		      <= offset + wi::lshift (wi::to_offset (len),
+					      LOG2_BITS_PER_UNIT)))
 		return true;
 	      break;
 	    }
Index: gcc/tree-ssa-reassoc.c
===================================================================
--- gcc/tree-ssa-reassoc.c
+++ gcc/tree-ssa-reassoc.c
@@ -2464,7 +2464,7 @@ extract_bit_test_mask (tree exp, int prec, tree totallow, tree low, tree high,
 		return NULL_TREE;
 	      bias = wi::to_widest (tbias);
 	      bias -= wi::to_widest (totallow);
-	      if (wi::ges_p (bias, 0) && wi::lts_p (bias, prec - max))
+	      if (bias >= 0 && bias < prec - max)
 		{
 		  *mask = wi::lshift (*mask, bias);
 		  return ret;
Index: gcc/tree-vrp.c
===================================================================
--- gcc/tree-vrp.c
+++ gcc/tree-vrp.c
@@ -2749,17 +2749,17 @@ extract_range_from_binary_expr_1 (value_range *vr,
 	  /* Sort the 4 products so that min is in prod0 and max is in
 	     prod3.  */
 	  /* min0min1 > max0max1 */
-	  if (wi::gts_p (prod0, prod3))
+	  if (prod0 > prod3)
 	    std::swap (prod0, prod3);
 
 	  /* min0max1 > max0min1 */
-	  if (wi::gts_p (prod1, prod2))
+	  if (prod1 > prod2)
 	    std::swap (prod1, prod2);
 
-	  if (wi::gts_p (prod0, prod1))
+	  if (prod0 > prod1)
 	    std::swap (prod0, prod1);
 
-	  if (wi::gts_p (prod2, prod3))
+	  if (prod2 > prod3)
 	    std::swap (prod2, prod3);
 
 	  /* diff = max - min.  */
@@ -3775,7 +3775,7 @@ check_for_binary_op_overflow (enum tree_code subcode, tree type,
       /* If all values in [wmin, wmax] are smaller than
 	 [wtmin, wtmax] or all are larger than [wtmin, wtmax],
 	 the arithmetic operation will always overflow.  */
-      if (wi::lts_p (wmax, wtmin) || wi::gts_p (wmin, wtmax))
+      if (wmax < wtmin || wmin > wtmax)
 	return true;
       return false;
     }
@@ -6587,7 +6587,7 @@ search_for_addr_array (tree t, location_t location)
 
       idx = mem_ref_offset (t);
       idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
-      if (wi::lts_p (idx, 0))
+      if (idx < 0)
 	{
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    {
@@ -6599,8 +6599,8 @@ search_for_addr_array (tree t, location_t location)
 		      "array subscript is below array bounds");
 	  TREE_NO_WARNING (t) = 1;
 	}
-      else if (wi::gts_p (idx, (wi::to_offset (up_bound)
-				- wi::to_offset (low_bound) + 1)))
+      else if (idx > (wi::to_offset (up_bound)
+		      - wi::to_offset (low_bound) + 1))
 	{
 	  if (dump_file && (dump_flags & TDF_DETAILS))
 	    {
Index: gcc/ubsan.c
===================================================================
--- gcc/ubsan.c
+++ gcc/ubsan.c
@@ -911,8 +911,8 @@ ubsan_expand_objsize_ifn (gimple_stmt_iterator *gsi)
     /* Yes, __builtin_object_size couldn't determine the
        object size.  */;
   else if (TREE_CODE (offset) == INTEGER_CST
-	   && wi::ges_p (wi::to_widest (offset), -OBJSZ_MAX_OFFSET)
-	   && wi::les_p (wi::to_widest (offset), -1))
+	   && wi::to_widest (offset) >= -OBJSZ_MAX_OFFSET
+	   && wi::to_widest (offset) <= -1)
     /* The offset is in range [-16K, -1].  */;
   else
     {
@@ -928,8 +928,8 @@ ubsan_expand_objsize_ifn (gimple_stmt_iterator *gsi)
       /* If the offset is small enough, we don't need the second
 	 run-time check.  */
       if (TREE_CODE (offset) == INTEGER_CST
-	  && wi::ges_p (wi::to_widest (offset), 0)
-	  && wi::les_p (wi::to_widest (offset), OBJSZ_MAX_OFFSET))
+	  && wi::to_widest (offset) >= 0
+	  && wi::to_widest (offset) <= OBJSZ_MAX_OFFSET)
 	*gsi = gsi_after_labels (then_bb);
       else
 	{

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2016-05-02  9:38 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-04-29 12:30 Support << and >> for offset_int and widest_int Richard Sandiford
2016-04-29 12:37 ` H.J. Lu
2016-05-02  9:38   ` Richard Sandiford
2016-05-02  8:53 ` Richard Biener
  -- strict thread matches above, loose matches on Subject: below --
2016-04-29 12:26 Support <, <=, > and >= " Richard Sandiford
2016-05-02  8:50 ` Richard Biener

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).