public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Richard Sandiford <richard.sandiford@linaro.org>
To: gcc-patches@gcc.gnu.org
Subject: [015/nnn] poly_int: ao_ref and vn_reference_op_t
Date: Mon, 23 Oct 2017 17:06:00 -0000	[thread overview]
Message-ID: <873769ssje.fsf@linaro.org> (raw)
In-Reply-To: <871sltvm7r.fsf@linaro.org> (Richard Sandiford's message of "Mon,	23 Oct 2017 17:54:32 +0100")

This patch changes the offset, size and max_size fields
of ao_ref from HOST_WIDE_INT to poly_int64 and propagates
the change through the code that references it.  This includes
changing the off field of vn_reference_op_struct in the same way.


2017-10-23  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* inchash.h (inchash::hash::add_poly_int): New function.
	* tree-ssa-alias.h (ao_ref::offset, ao_ref::size, ao_ref::max_size):
	Use poly_int64 rather than HOST_WIDE_INT.
	(ao_ref::max_size_known_p): New function.
	* tree-ssa-sccvn.h (vn_reference_op_struct::off): Use poly_int64_pod
	rather than HOST_WIDE_INT.
	* tree-ssa-alias.c (ao_ref_base): Apply get_ref_base_and_extent
	to temporaries until its interface is adjusted to match.
	(ao_ref_init_from_ptr_and_size): Handle polynomial offsets and sizes.
	(aliasing_component_refs_p, decl_refs_may_alias_p)
	(indirect_ref_may_alias_decl_p, indirect_refs_may_alias_p): Take
	the offsets and max_sizes as poly_int64s instead of HOST_WIDE_INTs.
	(refs_may_alias_p_1, stmt_kills_ref_p): Adjust for changes to
	ao_ref fields.
	* alias.c (ao_ref_from_mem): Likewise.
	* tree-ssa-dce.c (mark_aliased_reaching_defs_necessary_1): Likewise.
	* tree-ssa-dse.c (valid_ao_ref_for_dse, normalize_ref)
	(clear_bytes_written_by, setup_live_bytes_from_ref, compute_trims)
	(maybe_trim_complex_store, maybe_trim_constructor_store)
	(live_bytes_read, dse_classify_store): Likewise.
	* tree-ssa-sccvn.c (vn_reference_compute_hash, vn_reference_eq):
	(copy_reference_ops_from_ref, ao_ref_init_from_vn_reference)
	(fully_constant_vn_reference_p, valueize_refs_1): Likewise.
	(vn_reference_lookup_3): Likewise.
	* tree-ssa-uninit.c (warn_uninitialized_vars): Likewise.

Index: gcc/inchash.h
===================================================================
--- gcc/inchash.h	2017-10-23 17:01:43.314993320 +0100
+++ gcc/inchash.h	2017-10-23 17:01:52.303181137 +0100
@@ -57,6 +57,14 @@ hashval_t iterative_hash_hashval_t (hash
     val = iterative_hash_hashval_t (v, val);
   }
 
+  /* Add polynomial value V, treating each element as an unsigned int.  */
+  template<unsigned int N, typename T>
+  void add_poly_int (const poly_int_pod<N, T> &v)
+  {
+    for (unsigned int i = 0; i < N; ++i)
+      add_int (v.coeffs[i]);
+  }
+
   /* Add HOST_WIDE_INT value V.  */
   void add_hwi (HOST_WIDE_INT v)
   {
Index: gcc/tree-ssa-alias.h
===================================================================
--- gcc/tree-ssa-alias.h	2017-10-23 16:52:20.058356365 +0100
+++ gcc/tree-ssa-alias.h	2017-10-23 17:01:52.304179714 +0100
@@ -80,11 +80,11 @@ struct ao_ref
      the following fields are not yet computed.  */
   tree base;
   /* The offset relative to the base.  */
-  HOST_WIDE_INT offset;
+  poly_int64 offset;
   /* The size of the access.  */
-  HOST_WIDE_INT size;
+  poly_int64 size;
   /* The maximum possible extent of the access or -1 if unconstrained.  */
-  HOST_WIDE_INT max_size;
+  poly_int64 max_size;
 
   /* The alias set of the access or -1 if not yet computed.  */
   alias_set_type ref_alias_set;
@@ -94,8 +94,18 @@ struct ao_ref
 
   /* Whether the memory is considered a volatile access.  */
   bool volatile_p;
+
+  bool max_size_known_p () const;
 };
 
+/* Return true if the maximum size is known, rather than the special -1
+   marker.  */
+
+inline bool
+ao_ref::max_size_known_p () const
+{
+  return known_size_p (max_size);
+}
 
 /* In tree-ssa-alias.c  */
 extern void ao_ref_init (ao_ref *, tree);
Index: gcc/tree-ssa-sccvn.h
===================================================================
--- gcc/tree-ssa-sccvn.h	2017-10-23 16:52:20.058356365 +0100
+++ gcc/tree-ssa-sccvn.h	2017-10-23 17:01:52.305178291 +0100
@@ -93,7 +93,7 @@ typedef struct vn_reference_op_struct
   /* For storing TYPE_ALIGN for array ref element size computation.  */
   unsigned align : 6;
   /* Constant offset this op adds or -1 if it is variable.  */
-  HOST_WIDE_INT off;
+  poly_int64_pod off;
   tree type;
   tree op0;
   tree op1;
Index: gcc/tree-ssa-alias.c
===================================================================
--- gcc/tree-ssa-alias.c	2017-10-23 17:01:51.044974644 +0100
+++ gcc/tree-ssa-alias.c	2017-10-23 17:01:52.304179714 +0100
@@ -635,11 +635,15 @@ ao_ref_init (ao_ref *r, tree ref)
 ao_ref_base (ao_ref *ref)
 {
   bool reverse;
+  HOST_WIDE_INT offset, size, max_size;
 
   if (ref->base)
     return ref->base;
-  ref->base = get_ref_base_and_extent (ref->ref, &ref->offset, &ref->size,
-				       &ref->max_size, &reverse);
+  ref->base = get_ref_base_and_extent (ref->ref, &offset, &size,
+				       &max_size, &reverse);
+  ref->offset = offset;
+  ref->size = size;
+  ref->max_size = max_size;
   return ref->base;
 }
 
@@ -679,7 +683,8 @@ ao_ref_alias_set (ao_ref *ref)
 void
 ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size)
 {
-  HOST_WIDE_INT t, size_hwi, extra_offset = 0;
+  HOST_WIDE_INT t;
+  poly_int64 size_hwi, extra_offset = 0;
   ref->ref = NULL_TREE;
   if (TREE_CODE (ptr) == SSA_NAME)
     {
@@ -689,11 +694,10 @@ ao_ref_init_from_ptr_and_size (ao_ref *r
 	ptr = gimple_assign_rhs1 (stmt);
       else if (is_gimple_assign (stmt)
 	       && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
-	       && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
+	       && ptrdiff_tree_p (gimple_assign_rhs2 (stmt), &extra_offset))
 	{
 	  ptr = gimple_assign_rhs1 (stmt);
-	  extra_offset = BITS_PER_UNIT
-			 * int_cst_value (gimple_assign_rhs2 (stmt));
+	  extra_offset *= BITS_PER_UNIT;
 	}
     }
 
@@ -717,8 +721,8 @@ ao_ref_init_from_ptr_and_size (ao_ref *r
     }
   ref->offset += extra_offset;
   if (size
-      && tree_fits_shwi_p (size)
-      && (size_hwi = tree_to_shwi (size)) <= HOST_WIDE_INT_MAX / BITS_PER_UNIT)
+      && poly_int_tree_p (size, &size_hwi)
+      && coeffs_in_range_p (size_hwi, 0, HOST_WIDE_INT_MAX / BITS_PER_UNIT))
     ref->max_size = ref->size = size_hwi * BITS_PER_UNIT;
   else
     ref->max_size = ref->size = -1;
@@ -779,11 +783,11 @@ same_type_for_tbaa (tree type1, tree typ
 aliasing_component_refs_p (tree ref1,
 			   alias_set_type ref1_alias_set,
 			   alias_set_type base1_alias_set,
-			   HOST_WIDE_INT offset1, HOST_WIDE_INT max_size1,
+			   poly_int64 offset1, poly_int64 max_size1,
 			   tree ref2,
 			   alias_set_type ref2_alias_set,
 			   alias_set_type base2_alias_set,
-			   HOST_WIDE_INT offset2, HOST_WIDE_INT max_size2,
+			   poly_int64 offset2, poly_int64 max_size2,
 			   bool ref2_is_decl)
 {
   /* If one reference is a component references through pointers try to find a
@@ -825,7 +829,7 @@ aliasing_component_refs_p (tree ref1,
       offset2 -= offadj;
       get_ref_base_and_extent (base1, &offadj, &sztmp, &msztmp, &reverse);
       offset1 -= offadj;
-      return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
+      return ranges_may_overlap_p (offset1, max_size1, offset2, max_size2);
     }
   /* If we didn't find a common base, try the other way around.  */
   refp = &ref1;
@@ -844,7 +848,7 @@ aliasing_component_refs_p (tree ref1,
       offset1 -= offadj;
       get_ref_base_and_extent (base2, &offadj, &sztmp, &msztmp, &reverse);
       offset2 -= offadj;
-      return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
+      return ranges_may_overlap_p (offset1, max_size1, offset2, max_size2);
     }
 
   /* If we have two type access paths B1.path1 and B2.path2 they may
@@ -1090,9 +1094,9 @@ nonoverlapping_component_refs_p (const_t
 
 static bool
 decl_refs_may_alias_p (tree ref1, tree base1,
-		       HOST_WIDE_INT offset1, HOST_WIDE_INT max_size1,
+		       poly_int64 offset1, poly_int64 max_size1,
 		       tree ref2, tree base2,
-		       HOST_WIDE_INT offset2, HOST_WIDE_INT max_size2)
+		       poly_int64 offset2, poly_int64 max_size2)
 {
   gcc_checking_assert (DECL_P (base1) && DECL_P (base2));
 
@@ -1102,7 +1106,7 @@ decl_refs_may_alias_p (tree ref1, tree b
 
   /* If both references are based on the same variable, they cannot alias if
      the accesses do not overlap.  */
-  if (!ranges_overlap_p (offset1, max_size1, offset2, max_size2))
+  if (!ranges_may_overlap_p (offset1, max_size1, offset2, max_size2))
     return false;
 
   /* For components with variable position, the above test isn't sufficient,
@@ -1124,12 +1128,11 @@ decl_refs_may_alias_p (tree ref1, tree b
 
 static bool
 indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
-			       HOST_WIDE_INT offset1,
-			       HOST_WIDE_INT max_size1 ATTRIBUTE_UNUSED,
+			       poly_int64 offset1, poly_int64 max_size1,
 			       alias_set_type ref1_alias_set,
 			       alias_set_type base1_alias_set,
 			       tree ref2 ATTRIBUTE_UNUSED, tree base2,
-			       HOST_WIDE_INT offset2, HOST_WIDE_INT max_size2,
+			       poly_int64 offset2, poly_int64 max_size2,
 			       alias_set_type ref2_alias_set,
 			       alias_set_type base2_alias_set, bool tbaa_p)
 {
@@ -1185,14 +1188,15 @@ indirect_ref_may_alias_decl_p (tree ref1
      is bigger than the size of the decl we can't possibly access the
      decl via that pointer.  */
   if (DECL_SIZE (base2) && COMPLETE_TYPE_P (TREE_TYPE (ptrtype1))
-      && TREE_CODE (DECL_SIZE (base2)) == INTEGER_CST
-      && TREE_CODE (TYPE_SIZE (TREE_TYPE (ptrtype1))) == INTEGER_CST
+      && poly_int_tree_p (DECL_SIZE (base2))
+      && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (ptrtype1)))
       /* ???  This in turn may run afoul when a decl of type T which is
 	 a member of union type U is accessed through a pointer to
 	 type U and sizeof T is smaller than sizeof U.  */
       && TREE_CODE (TREE_TYPE (ptrtype1)) != UNION_TYPE
       && TREE_CODE (TREE_TYPE (ptrtype1)) != QUAL_UNION_TYPE
-      && tree_int_cst_lt (DECL_SIZE (base2), TYPE_SIZE (TREE_TYPE (ptrtype1))))
+      && must_lt (wi::to_poly_widest (DECL_SIZE (base2)),
+		  wi::to_poly_widest (TYPE_SIZE (TREE_TYPE (ptrtype1)))))
     return false;
 
   if (!ref2)
@@ -1203,8 +1207,8 @@ indirect_ref_may_alias_decl_p (tree ref1
   dbase2 = ref2;
   while (handled_component_p (dbase2))
     dbase2 = TREE_OPERAND (dbase2, 0);
-  HOST_WIDE_INT doffset1 = offset1;
-  offset_int doffset2 = offset2;
+  poly_int64 doffset1 = offset1;
+  poly_offset_int doffset2 = offset2;
   if (TREE_CODE (dbase2) == MEM_REF
       || TREE_CODE (dbase2) == TARGET_MEM_REF)
     doffset2 -= mem_ref_offset (dbase2) << LOG2_BITS_PER_UNIT;
@@ -1252,11 +1256,11 @@ indirect_ref_may_alias_decl_p (tree ref1
 
 static bool
 indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
-			   HOST_WIDE_INT offset1, HOST_WIDE_INT max_size1,
+			   poly_int64 offset1, poly_int64 max_size1,
 			   alias_set_type ref1_alias_set,
 			   alias_set_type base1_alias_set,
 			   tree ref2 ATTRIBUTE_UNUSED, tree base2,
-			   HOST_WIDE_INT offset2, HOST_WIDE_INT max_size2,
+			   poly_int64 offset2, poly_int64 max_size2,
 			   alias_set_type ref2_alias_set,
 			   alias_set_type base2_alias_set, bool tbaa_p)
 {
@@ -1330,7 +1334,7 @@ indirect_refs_may_alias_p (tree ref1 ATT
       /* But avoid treating arrays as "objects", instead assume they
          can overlap by an exact multiple of their element size.  */
       && TREE_CODE (TREE_TYPE (ptrtype1)) != ARRAY_TYPE)
-    return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
+    return ranges_may_overlap_p (offset1, max_size1, offset2, max_size2);
 
   /* Do type-based disambiguation.  */
   if (base1_alias_set != base2_alias_set
@@ -1365,8 +1369,8 @@ indirect_refs_may_alias_p (tree ref1 ATT
 refs_may_alias_p_1 (ao_ref *ref1, ao_ref *ref2, bool tbaa_p)
 {
   tree base1, base2;
-  HOST_WIDE_INT offset1 = 0, offset2 = 0;
-  HOST_WIDE_INT max_size1 = -1, max_size2 = -1;
+  poly_int64 offset1 = 0, offset2 = 0;
+  poly_int64 max_size1 = -1, max_size2 = -1;
   bool var1_p, var2_p, ind1_p, ind2_p;
 
   gcc_checking_assert ((!ref1->ref
@@ -2444,14 +2448,17 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *
          handling constant offset and size.  */
       /* For a must-alias check we need to be able to constrain
 	 the access properly.  */
-      if (ref->max_size == -1)
+      if (!ref->max_size_known_p ())
 	return false;
-      HOST_WIDE_INT size, offset, max_size, ref_offset = ref->offset;
+      HOST_WIDE_INT size, max_size, const_offset;
+      poly_int64 ref_offset = ref->offset;
       bool reverse;
       tree base
-	= get_ref_base_and_extent (lhs, &offset, &size, &max_size, &reverse);
+	= get_ref_base_and_extent (lhs, &const_offset, &size, &max_size,
+				   &reverse);
       /* We can get MEM[symbol: sZ, index: D.8862_1] here,
 	 so base == ref->base does not always hold.  */
+      poly_int64 offset = const_offset;
       if (base != ref->base)
 	{
 	  /* Try using points-to info.  */
@@ -2468,18 +2475,13 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *
 	      if (!tree_int_cst_equal (TREE_OPERAND (base, 1),
 				       TREE_OPERAND (ref->base, 1)))
 		{
-		  offset_int off1 = mem_ref_offset (base);
+		  poly_offset_int off1 = mem_ref_offset (base);
 		  off1 <<= LOG2_BITS_PER_UNIT;
 		  off1 += offset;
-		  offset_int off2 = mem_ref_offset (ref->base);
+		  poly_offset_int off2 = mem_ref_offset (ref->base);
 		  off2 <<= LOG2_BITS_PER_UNIT;
 		  off2 += ref_offset;
-		  if (wi::fits_shwi_p (off1) && wi::fits_shwi_p (off2))
-		    {
-		      offset = off1.to_shwi ();
-		      ref_offset = off2.to_shwi ();
-		    }
-		  else
+		  if (!off1.to_shwi (&offset) || !off2.to_shwi (&ref_offset))
 		    size = -1;
 		}
 	    }
@@ -2488,12 +2490,9 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *
 	}
       /* For a must-alias check we need to be able to constrain
 	 the access properly.  */
-      if (size != -1 && size == max_size)
-	{
-	  if (offset <= ref_offset
-	      && offset + size >= ref_offset + ref->max_size)
-	    return true;
-	}
+      if (size == max_size
+	  && known_subrange_p (ref_offset, ref->max_size, offset, size))
+	return true;
     }
 
   if (is_gimple_call (stmt))
@@ -2526,19 +2525,19 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *
 	    {
 	      /* For a must-alias check we need to be able to constrain
 		 the access properly.  */
-	      if (ref->max_size == -1)
+	      if (!ref->max_size_known_p ())
 		return false;
 	      tree dest = gimple_call_arg (stmt, 0);
 	      tree len = gimple_call_arg (stmt, 2);
-	      if (!tree_fits_shwi_p (len))
+	      if (!poly_int_tree_p (len))
 		return false;
 	      tree rbase = ref->base;
-	      offset_int roffset = ref->offset;
+	      poly_offset_int roffset = ref->offset;
 	      ao_ref dref;
 	      ao_ref_init_from_ptr_and_size (&dref, dest, len);
 	      tree base = ao_ref_base (&dref);
-	      offset_int offset = dref.offset;
-	      if (!base || dref.size == -1)
+	      poly_offset_int offset = dref.offset;
+	      if (!base || !known_size_p (dref.size))
 		return false;
 	      if (TREE_CODE (base) == MEM_REF)
 		{
@@ -2551,9 +2550,9 @@ stmt_kills_ref_p (gimple *stmt, ao_ref *
 		  rbase = TREE_OPERAND (rbase, 0);
 		}
 	      if (base == rbase
-		  && offset <= roffset
-		  && (roffset + ref->max_size
-		      <= offset + (wi::to_offset (len) << LOG2_BITS_PER_UNIT)))
+		  && known_subrange_p (roffset, ref->max_size, offset,
+				       wi::to_poly_offset (len)
+				       << LOG2_BITS_PER_UNIT))
 		return true;
 	      break;
 	    }
Index: gcc/alias.c
===================================================================
--- gcc/alias.c	2017-10-23 16:52:20.058356365 +0100
+++ gcc/alias.c	2017-10-23 17:01:52.303181137 +0100
@@ -331,9 +331,9 @@ ao_ref_from_mem (ao_ref *ref, const_rtx
   /* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size
      drop ref->ref.  */
   if (MEM_OFFSET (mem) < 0
-      || (ref->max_size != -1
-	  && ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT
-	      > ref->max_size)))
+      || (ref->max_size_known_p ()
+	  && may_gt ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT,
+		     ref->max_size)))
     ref->ref = NULL_TREE;
 
   /* Refine size and offset we got from analyzing MEM_EXPR by using
@@ -344,19 +344,18 @@ ao_ref_from_mem (ao_ref *ref, const_rtx
 
   /* The MEM may extend into adjacent fields, so adjust max_size if
      necessary.  */
-  if (ref->max_size != -1
-      && ref->size > ref->max_size)
-    ref->max_size = ref->size;
+  if (ref->max_size_known_p ())
+    ref->max_size = upper_bound (ref->max_size, ref->size);
 
-  /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
+  /* If MEM_OFFSET and MEM_SIZE might get us outside of the base object of
      the MEM_EXPR punt.  This happens for STRICT_ALIGNMENT targets a lot.  */
   if (MEM_EXPR (mem) != get_spill_slot_decl (false)
-      && (ref->offset < 0
+      && (may_lt (ref->offset, 0)
 	  || (DECL_P (ref->base)
 	      && (DECL_SIZE (ref->base) == NULL_TREE
-		  || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST
-		  || wi::ltu_p (wi::to_offset (DECL_SIZE (ref->base)),
-				ref->offset + ref->size)))))
+		  || !poly_int_tree_p (DECL_SIZE (ref->base))
+		  || may_lt (wi::to_poly_offset (DECL_SIZE (ref->base)),
+			     ref->offset + ref->size)))))
     return false;
 
   return true;
Index: gcc/tree-ssa-dce.c
===================================================================
--- gcc/tree-ssa-dce.c	2017-10-23 16:52:20.058356365 +0100
+++ gcc/tree-ssa-dce.c	2017-10-23 17:01:52.304179714 +0100
@@ -488,13 +488,9 @@ mark_aliased_reaching_defs_necessary_1 (
 	{
 	  /* For a must-alias check we need to be able to constrain
 	     the accesses properly.  */
-	  if (size != -1 && size == max_size
-	      && ref->max_size != -1)
-	    {
-	      if (offset <= ref->offset
-		  && offset + size >= ref->offset + ref->max_size)
-		return true;
-	    }
+	  if (size == max_size
+	      && known_subrange_p (ref->offset, ref->max_size, offset, size))
+	    return true;
 	  /* Or they need to be exactly the same.  */
 	  else if (ref->ref
 		   /* Make sure there is no induction variable involved
Index: gcc/tree-ssa-dse.c
===================================================================
--- gcc/tree-ssa-dse.c	2017-10-23 16:52:20.058356365 +0100
+++ gcc/tree-ssa-dse.c	2017-10-23 17:01:52.304179714 +0100
@@ -128,13 +128,12 @@ initialize_ao_ref_for_dse (gimple *stmt,
 valid_ao_ref_for_dse (ao_ref *ref)
 {
   return (ao_ref_base (ref)
-	  && ref->max_size != -1
-	  && ref->size != 0
-	  && ref->max_size == ref->size
-	  && ref->offset >= 0
-	  && (ref->offset % BITS_PER_UNIT) == 0
-	  && (ref->size % BITS_PER_UNIT) == 0
-	  && (ref->size != -1));
+	  && known_size_p (ref->max_size)
+	  && maybe_nonzero (ref->size)
+	  && must_eq (ref->max_size, ref->size)
+	  && must_ge (ref->offset, 0)
+	  && multiple_p (ref->offset, BITS_PER_UNIT)
+	  && multiple_p (ref->size, BITS_PER_UNIT));
 }
 
 /* Try to normalize COPY (an ao_ref) relative to REF.  Essentially when we are
@@ -144,25 +143,31 @@ valid_ao_ref_for_dse (ao_ref *ref)
 static bool
 normalize_ref (ao_ref *copy, ao_ref *ref)
 {
+  if (!ordered_p (copy->offset, ref->offset))
+    return false;
+
   /* If COPY starts before REF, then reset the beginning of
      COPY to match REF and decrease the size of COPY by the
      number of bytes removed from COPY.  */
-  if (copy->offset < ref->offset)
+  if (may_lt (copy->offset, ref->offset))
     {
-      HOST_WIDE_INT diff = ref->offset - copy->offset;
-      if (copy->size <= diff)
+      poly_int64 diff = ref->offset - copy->offset;
+      if (may_le (copy->size, diff))
 	return false;
       copy->size -= diff;
       copy->offset = ref->offset;
     }
 
-  HOST_WIDE_INT diff = copy->offset - ref->offset;
-  if (ref->size <= diff)
+  poly_int64 diff = copy->offset - ref->offset;
+  if (may_le (ref->size, diff))
     return false;
 
   /* If COPY extends beyond REF, chop off its size appropriately.  */
-  HOST_WIDE_INT limit = ref->size - diff;
-  if (copy->size > limit)
+  poly_int64 limit = ref->size - diff;
+  if (!ordered_p (limit, copy->size))
+    return false;
+
+  if (may_gt (copy->size, limit))
     copy->size = limit;
   return true;
 }
@@ -183,15 +188,15 @@ clear_bytes_written_by (sbitmap live_byt
 
   /* Verify we have the same base memory address, the write
      has a known size and overlaps with REF.  */
+  HOST_WIDE_INT start, size;
   if (valid_ao_ref_for_dse (&write)
       && operand_equal_p (write.base, ref->base, OEP_ADDRESS_OF)
-      && write.size == write.max_size
-      && normalize_ref (&write, ref))
-    {
-      HOST_WIDE_INT start = write.offset - ref->offset;
-      bitmap_clear_range (live_bytes, start / BITS_PER_UNIT,
-			  write.size / BITS_PER_UNIT);
-    }
+      && must_eq (write.size, write.max_size)
+      && normalize_ref (&write, ref)
+      && (write.offset - ref->offset).is_constant (&start)
+      && write.size.is_constant (&size))
+    bitmap_clear_range (live_bytes, start / BITS_PER_UNIT,
+			size / BITS_PER_UNIT);
 }
 
 /* REF is a memory write.  Extract relevant information from it and
@@ -201,12 +206,14 @@ clear_bytes_written_by (sbitmap live_byt
 static bool
 setup_live_bytes_from_ref (ao_ref *ref, sbitmap live_bytes)
 {
+  HOST_WIDE_INT const_size;
   if (valid_ao_ref_for_dse (ref)
-      && (ref->size / BITS_PER_UNIT
+      && ref->size.is_constant (&const_size)
+      && (const_size / BITS_PER_UNIT
 	  <= PARAM_VALUE (PARAM_DSE_MAX_OBJECT_SIZE)))
     {
       bitmap_clear (live_bytes);
-      bitmap_set_range (live_bytes, 0, ref->size / BITS_PER_UNIT);
+      bitmap_set_range (live_bytes, 0, const_size / BITS_PER_UNIT);
       return true;
     }
   return false;
@@ -231,9 +238,15 @@ compute_trims (ao_ref *ref, sbitmap live
      the REF to compute the trims.  */
 
   /* Now identify how much, if any of the tail we can chop off.  */
-  int last_orig = (ref->size / BITS_PER_UNIT) - 1;
-  int last_live = bitmap_last_set_bit (live);
-  *trim_tail = (last_orig - last_live) & ~0x1;
+  HOST_WIDE_INT const_size;
+  if (ref->size.is_constant (&const_size))
+    {
+      int last_orig = (const_size / BITS_PER_UNIT) - 1;
+      int last_live = bitmap_last_set_bit (live);
+      *trim_tail = (last_orig - last_live) & ~0x1;
+    }
+  else
+    *trim_tail = 0;
 
   /* Identify how much, if any of the head we can chop off.  */
   int first_orig = 0;
@@ -267,7 +280,7 @@ maybe_trim_complex_store (ao_ref *ref, s
      least half the size of the object to ensure we're trimming
      the entire real or imaginary half.  By writing things this
      way we avoid more O(n) bitmap operations.  */
-  if (trim_tail * 2 >= ref->size / BITS_PER_UNIT)
+  if (must_ge (trim_tail * 2 * BITS_PER_UNIT, ref->size))
     {
       /* TREE_REALPART is live */
       tree x = TREE_REALPART (gimple_assign_rhs1 (stmt));
@@ -276,7 +289,7 @@ maybe_trim_complex_store (ao_ref *ref, s
       gimple_assign_set_lhs (stmt, y);
       gimple_assign_set_rhs1 (stmt, x);
     }
-  else if (trim_head * 2 >= ref->size / BITS_PER_UNIT)
+  else if (must_ge (trim_head * 2 * BITS_PER_UNIT, ref->size))
     {
       /* TREE_IMAGPART is live */
       tree x = TREE_IMAGPART (gimple_assign_rhs1 (stmt));
@@ -326,7 +339,8 @@ maybe_trim_constructor_store (ao_ref *re
 	return;
 
       /* The number of bytes for the new constructor.  */
-      int count = (ref->size / BITS_PER_UNIT) - head_trim - tail_trim;
+      poly_int64 ref_bytes = exact_div (ref->size, BITS_PER_UNIT);
+      poly_int64 count = ref_bytes - head_trim - tail_trim;
 
       /* And the new type for the CONSTRUCTOR.  Essentially it's just
 	 a char array large enough to cover the non-trimmed parts of
@@ -483,15 +497,15 @@ live_bytes_read (ao_ref use_ref, ao_ref
 {
   /* We have already verified that USE_REF and REF hit the same object.
      Now verify that there's actually an overlap between USE_REF and REF.  */
-  if (normalize_ref (&use_ref, ref))
+  HOST_WIDE_INT start, size;
+  if (normalize_ref (&use_ref, ref)
+      && (use_ref.offset - ref->offset).is_constant (&start)
+      && use_ref.size.is_constant (&size))
     {
-      HOST_WIDE_INT start = use_ref.offset - ref->offset;
-      HOST_WIDE_INT size = use_ref.size;
-
       /* If USE_REF covers all of REF, then it will hit one or more
 	 live bytes.   This avoids useless iteration over the bitmap
 	 below.  */
-      if (start == 0 && size == ref->size)
+      if (start == 0 && must_eq (size, ref->size))
 	return true;
 
       /* Now check if any of the remaining bits in use_ref are set in LIVE.  */
@@ -592,8 +606,8 @@ dse_classify_store (ao_ref *ref, gimple
 		      ao_ref use_ref;
 		      ao_ref_init (&use_ref, gimple_assign_rhs1 (use_stmt));
 		      if (valid_ao_ref_for_dse (&use_ref)
-			  && use_ref.base == ref->base
-			  && use_ref.size == use_ref.max_size
+			  && must_eq (use_ref.base, ref->base)
+			  && must_eq (use_ref.size, use_ref.max_size)
 			  && !live_bytes_read (use_ref, ref, live_bytes))
 			{
 			  /* If this statement has a VDEF, then it is the
Index: gcc/tree-ssa-sccvn.c
===================================================================
--- gcc/tree-ssa-sccvn.c	2017-10-23 16:52:20.058356365 +0100
+++ gcc/tree-ssa-sccvn.c	2017-10-23 17:01:52.305178291 +0100
@@ -547,7 +547,7 @@ vn_reference_compute_hash (const vn_refe
   hashval_t result;
   int i;
   vn_reference_op_t vro;
-  HOST_WIDE_INT off = -1;
+  poly_int64 off = -1;
   bool deref = false;
 
   FOR_EACH_VEC_ELT (vr1->operands, i, vro)
@@ -556,17 +556,17 @@ vn_reference_compute_hash (const vn_refe
 	deref = true;
       else if (vro->opcode != ADDR_EXPR)
 	deref = false;
-      if (vro->off != -1)
+      if (may_ne (vro->off, -1))
 	{
-	  if (off == -1)
+	  if (must_eq (off, -1))
 	    off = 0;
 	  off += vro->off;
 	}
       else
 	{
-	  if (off != -1
-	      && off != 0)
-	    hstate.add_int (off);
+	  if (may_ne (off, -1)
+	      && may_ne (off, 0))
+	    hstate.add_poly_int (off);
 	  off = -1;
 	  if (deref
 	      && vro->opcode == ADDR_EXPR)
@@ -632,7 +632,7 @@ vn_reference_eq (const_vn_reference_t co
   j = 0;
   do
     {
-      HOST_WIDE_INT off1 = 0, off2 = 0;
+      poly_int64 off1 = 0, off2 = 0;
       vn_reference_op_t vro1, vro2;
       vn_reference_op_s tem1, tem2;
       bool deref1 = false, deref2 = false;
@@ -643,7 +643,7 @@ vn_reference_eq (const_vn_reference_t co
 	  /* Do not look through a storage order barrier.  */
 	  else if (vro1->opcode == VIEW_CONVERT_EXPR && vro1->reverse)
 	    return false;
-	  if (vro1->off == -1)
+	  if (must_eq (vro1->off, -1))
 	    break;
 	  off1 += vro1->off;
 	}
@@ -654,11 +654,11 @@ vn_reference_eq (const_vn_reference_t co
 	  /* Do not look through a storage order barrier.  */
 	  else if (vro2->opcode == VIEW_CONVERT_EXPR && vro2->reverse)
 	    return false;
-	  if (vro2->off == -1)
+	  if (must_eq (vro2->off, -1))
 	    break;
 	  off2 += vro2->off;
 	}
-      if (off1 != off2)
+      if (may_ne (off1, off2))
 	return false;
       if (deref1 && vro1->opcode == ADDR_EXPR)
 	{
@@ -784,24 +784,23 @@ copy_reference_ops_from_ref (tree ref, v
 	  {
 	    tree this_offset = component_ref_field_offset (ref);
 	    if (this_offset
-		&& TREE_CODE (this_offset) == INTEGER_CST)
+		&& poly_int_tree_p (this_offset))
 	      {
 		tree bit_offset = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
 		if (TREE_INT_CST_LOW (bit_offset) % BITS_PER_UNIT == 0)
 		  {
-		    offset_int off
-		      = (wi::to_offset (this_offset)
+		    poly_offset_int off
+		      = (wi::to_poly_offset (this_offset)
 			 + (wi::to_offset (bit_offset) >> LOG2_BITS_PER_UNIT));
-		    if (wi::fits_shwi_p (off)
-			/* Probibit value-numbering zero offset components
-			   of addresses the same before the pass folding
-			   __builtin_object_size had a chance to run
-			   (checking cfun->after_inlining does the
-			   trick here).  */
-			&& (TREE_CODE (orig) != ADDR_EXPR
-			    || off != 0
-			    || cfun->after_inlining))
-		      temp.off = off.to_shwi ();
+		    /* Probibit value-numbering zero offset components
+		       of addresses the same before the pass folding
+		       __builtin_object_size had a chance to run
+		       (checking cfun->after_inlining does the
+		       trick here).  */
+		    if (TREE_CODE (orig) != ADDR_EXPR
+			|| maybe_nonzero (off)
+			|| cfun->after_inlining)
+		      off.to_shwi (&temp.off);
 		  }
 	      }
 	  }
@@ -820,16 +819,15 @@ copy_reference_ops_from_ref (tree ref, v
 	    if (! temp.op2)
 	      temp.op2 = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (eltype),
 				     size_int (TYPE_ALIGN_UNIT (eltype)));
-	    if (TREE_CODE (temp.op0) == INTEGER_CST
-		&& TREE_CODE (temp.op1) == INTEGER_CST
+	    if (poly_int_tree_p (temp.op0)
+		&& poly_int_tree_p (temp.op1)
 		&& TREE_CODE (temp.op2) == INTEGER_CST)
 	      {
-		offset_int off = ((wi::to_offset (temp.op0)
-				   - wi::to_offset (temp.op1))
-				  * wi::to_offset (temp.op2)
-				  * vn_ref_op_align_unit (&temp));
-		if (wi::fits_shwi_p (off))
-		  temp.off = off.to_shwi();
+		poly_offset_int off = ((wi::to_poly_offset (temp.op0)
+					- wi::to_poly_offset (temp.op1))
+				       * wi::to_offset (temp.op2)
+				       * vn_ref_op_align_unit (&temp));
+		off.to_shwi (&temp.off);
 	      }
 	  }
 	  break;
@@ -918,9 +916,9 @@ ao_ref_init_from_vn_reference (ao_ref *r
   unsigned i;
   tree base = NULL_TREE;
   tree *op0_p = &base;
-  offset_int offset = 0;
-  offset_int max_size;
-  offset_int size = -1;
+  poly_offset_int offset = 0;
+  poly_offset_int max_size;
+  poly_offset_int size = -1;
   tree size_tree = NULL_TREE;
   alias_set_type base_alias_set = -1;
 
@@ -936,11 +934,11 @@ ao_ref_init_from_vn_reference (ao_ref *r
       if (mode == BLKmode)
 	size_tree = TYPE_SIZE (type);
       else
-	size = int (GET_MODE_BITSIZE (mode));
+	size = GET_MODE_BITSIZE (mode);
     }
   if (size_tree != NULL_TREE
-      && TREE_CODE (size_tree) == INTEGER_CST)
-    size = wi::to_offset (size_tree);
+      && poly_int_tree_p (size_tree))
+    size = wi::to_poly_offset (size_tree);
 
   /* Initially, maxsize is the same as the accessed element size.
      In the following it will only grow (or become -1).  */
@@ -963,7 +961,7 @@ ao_ref_init_from_vn_reference (ao_ref *r
 	    {
 	      vn_reference_op_t pop = &ops[i-1];
 	      base = TREE_OPERAND (op->op0, 0);
-	      if (pop->off == -1)
+	      if (must_eq (pop->off, -1))
 		{
 		  max_size = -1;
 		  offset = 0;
@@ -1008,12 +1006,12 @@ ao_ref_init_from_vn_reference (ao_ref *r
 	       parts manually.  */
 	    tree this_offset = DECL_FIELD_OFFSET (field);
 
-	    if (op->op1 || TREE_CODE (this_offset) != INTEGER_CST)
+	    if (op->op1 || !poly_int_tree_p (this_offset))
 	      max_size = -1;
 	    else
 	      {
-		offset_int woffset = (wi::to_offset (this_offset)
-				      << LOG2_BITS_PER_UNIT);
+		poly_offset_int woffset = (wi::to_poly_offset (this_offset)
+					   << LOG2_BITS_PER_UNIT);
 		woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
 		offset += woffset;
 	      }
@@ -1023,14 +1021,15 @@ ao_ref_init_from_vn_reference (ao_ref *r
 	case ARRAY_RANGE_REF:
 	case ARRAY_REF:
 	  /* We recorded the lower bound and the element size.  */
-	  if (TREE_CODE (op->op0) != INTEGER_CST
-	      || TREE_CODE (op->op1) != INTEGER_CST
+	  if (!poly_int_tree_p (op->op0)
+	      || !poly_int_tree_p (op->op1)
 	      || TREE_CODE (op->op2) != INTEGER_CST)
 	    max_size = -1;
 	  else
 	    {
-	      offset_int woffset
-		= wi::sext (wi::to_offset (op->op0) - wi::to_offset (op->op1),
+	      poly_offset_int woffset
+		= wi::sext (wi::to_poly_offset (op->op0)
+			    - wi::to_poly_offset (op->op1),
 			    TYPE_PRECISION (TREE_TYPE (op->op0)));
 	      woffset *= wi::to_offset (op->op2) * vn_ref_op_align_unit (op);
 	      woffset <<= LOG2_BITS_PER_UNIT;
@@ -1077,7 +1076,7 @@ ao_ref_init_from_vn_reference (ao_ref *r
   /* We discount volatiles from value-numbering elsewhere.  */
   ref->volatile_p = false;
 
-  if (!wi::fits_shwi_p (size) || wi::neg_p (size))
+  if (!size.to_shwi (&ref->size) || may_lt (ref->size, 0))
     {
       ref->offset = 0;
       ref->size = -1;
@@ -1085,21 +1084,15 @@ ao_ref_init_from_vn_reference (ao_ref *r
       return true;
     }
 
-  ref->size = size.to_shwi ();
-
-  if (!wi::fits_shwi_p (offset))
+  if (!offset.to_shwi (&ref->offset))
     {
       ref->offset = 0;
       ref->max_size = -1;
       return true;
     }
 
-  ref->offset = offset.to_shwi ();
-
-  if (!wi::fits_shwi_p (max_size) || wi::neg_p (max_size))
+  if (!max_size.to_shwi (&ref->max_size) || may_lt (ref->max_size, 0))
     ref->max_size = -1;
-  else
-    ref->max_size = max_size.to_shwi ();
 
   return true;
 }
@@ -1344,7 +1337,7 @@ fully_constant_vn_reference_p (vn_refere
 	   && (!INTEGRAL_TYPE_P (ref->type)
 	       || TYPE_PRECISION (ref->type) % BITS_PER_UNIT == 0))
     {
-      HOST_WIDE_INT off = 0;
+      poly_int64 off = 0;
       HOST_WIDE_INT size;
       if (INTEGRAL_TYPE_P (ref->type))
 	size = TYPE_PRECISION (ref->type);
@@ -1362,7 +1355,7 @@ fully_constant_vn_reference_p (vn_refere
 	      ++i;
 	      break;
 	    }
-	  if (operands[i].off == -1)
+	  if (must_eq (operands[i].off, -1))
 	    return NULL_TREE;
 	  off += operands[i].off;
 	  if (operands[i].opcode == MEM_REF)
@@ -1388,6 +1381,7 @@ fully_constant_vn_reference_p (vn_refere
 	return build_zero_cst (ref->type);
       else if (ctor != error_mark_node)
 	{
+	  HOST_WIDE_INT const_off;
 	  if (decl)
 	    {
 	      tree res = fold_ctor_reference (ref->type, ctor,
@@ -1400,10 +1394,10 @@ fully_constant_vn_reference_p (vn_refere
 		    return res;
 		}
 	    }
-	  else
+	  else if (off.is_constant (&const_off))
 	    {
 	      unsigned char buf[MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT];
-	      int len = native_encode_expr (ctor, buf, size, off);
+	      int len = native_encode_expr (ctor, buf, size, const_off);
 	      if (len > 0)
 		return native_interpret_expr (ref->type, buf, len);
 	    }
@@ -1495,17 +1489,16 @@ valueize_refs_1 (vec<vn_reference_op_s>
       /* If it transforms a non-constant ARRAY_REF into a constant
 	 one, adjust the constant offset.  */
       else if (vro->opcode == ARRAY_REF
-	       && vro->off == -1
-	       && TREE_CODE (vro->op0) == INTEGER_CST
-	       && TREE_CODE (vro->op1) == INTEGER_CST
+	       && must_eq (vro->off, -1)
+	       && poly_int_tree_p (vro->op0)
+	       && poly_int_tree_p (vro->op1)
 	       && TREE_CODE (vro->op2) == INTEGER_CST)
 	{
-	  offset_int off = ((wi::to_offset (vro->op0)
-			     - wi::to_offset (vro->op1))
-			    * wi::to_offset (vro->op2)
-			    * vn_ref_op_align_unit (vro));
-	  if (wi::fits_shwi_p (off))
-	    vro->off = off.to_shwi ();
+	  poly_offset_int off = ((wi::to_poly_offset (vro->op0)
+				  - wi::to_poly_offset (vro->op1))
+				 * wi::to_offset (vro->op2)
+				 * vn_ref_op_align_unit (vro));
+	  off.to_shwi (&vro->off);
 	}
     }
 
@@ -1821,10 +1814,11 @@ vn_reference_lookup_3 (ao_ref *ref, tree
   vn_reference_t vr = (vn_reference_t)vr_;
   gimple *def_stmt = SSA_NAME_DEF_STMT (vuse);
   tree base = ao_ref_base (ref);
-  HOST_WIDE_INT offset, maxsize;
+  HOST_WIDE_INT offseti, maxsizei;
   static vec<vn_reference_op_s> lhs_ops;
   ao_ref lhs_ref;
   bool lhs_ref_ok = false;
+  poly_int64 copy_size;
 
   /* If the reference is based on a parameter that was determined as
      pointing to readonly memory it doesn't change.  */
@@ -1903,14 +1897,14 @@ vn_reference_lookup_3 (ao_ref *ref, tree
   if (*disambiguate_only)
     return (void *)-1;
 
-  offset = ref->offset;
-  maxsize = ref->max_size;
-
   /* If we cannot constrain the size of the reference we cannot
      test if anything kills it.  */
-  if (maxsize == -1)
+  if (!ref->max_size_known_p ())
     return (void *)-1;
 
+  poly_int64 offset = ref->offset;
+  poly_int64 maxsize = ref->max_size;
+
   /* We can't deduce anything useful from clobbers.  */
   if (gimple_clobber_p (def_stmt))
     return (void *)-1;
@@ -1921,7 +1915,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree
   if (is_gimple_reg_type (vr->type)
       && gimple_call_builtin_p (def_stmt, BUILT_IN_MEMSET)
       && integer_zerop (gimple_call_arg (def_stmt, 1))
-      && tree_fits_uhwi_p (gimple_call_arg (def_stmt, 2))
+      && poly_int_tree_p (gimple_call_arg (def_stmt, 2))
       && TREE_CODE (gimple_call_arg (def_stmt, 0)) == ADDR_EXPR)
     {
       tree ref2 = TREE_OPERAND (gimple_call_arg (def_stmt, 0), 0);
@@ -1930,13 +1924,11 @@ vn_reference_lookup_3 (ao_ref *ref, tree
       bool reverse;
       base2 = get_ref_base_and_extent (ref2, &offset2, &size2, &maxsize2,
 				       &reverse);
-      size2 = tree_to_uhwi (gimple_call_arg (def_stmt, 2)) * 8;
-      if ((unsigned HOST_WIDE_INT)size2 / 8
-	  == tree_to_uhwi (gimple_call_arg (def_stmt, 2))
-	  && maxsize2 != -1
+      tree len = gimple_call_arg (def_stmt, 2);
+      if (known_size_p (maxsize2)
 	  && operand_equal_p (base, base2, 0)
-	  && offset2 <= offset
-	  && offset2 + size2 >= offset + maxsize)
+	  && known_subrange_p (offset, maxsize, offset2,
+			       wi::to_poly_offset (len) << LOG2_BITS_PER_UNIT))
 	{
 	  tree val = build_zero_cst (vr->type);
 	  return vn_reference_lookup_or_insert_for_pieces
@@ -1955,10 +1947,9 @@ vn_reference_lookup_3 (ao_ref *ref, tree
       bool reverse;
       base2 = get_ref_base_and_extent (gimple_assign_lhs (def_stmt),
 				       &offset2, &size2, &maxsize2, &reverse);
-      if (maxsize2 != -1
+      if (known_size_p (maxsize2)
 	  && operand_equal_p (base, base2, 0)
-	  && offset2 <= offset
-	  && offset2 + size2 >= offset + maxsize)
+	  && known_subrange_p (offset, maxsize, offset2, size2))
 	{
 	  tree val = build_zero_cst (vr->type);
 	  return vn_reference_lookup_or_insert_for_pieces
@@ -1968,13 +1959,17 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 
   /* 3) Assignment from a constant.  We can use folds native encode/interpret
      routines to extract the assigned bits.  */
-  else if (ref->size == maxsize
+  else if (must_eq (ref->size, maxsize)
 	   && is_gimple_reg_type (vr->type)
 	   && !contains_storage_order_barrier_p (vr->operands)
 	   && gimple_assign_single_p (def_stmt)
 	   && CHAR_BIT == 8 && BITS_PER_UNIT == 8
-	   && maxsize % BITS_PER_UNIT == 0
-	   && offset % BITS_PER_UNIT == 0
+	   /* native_encode and native_decode operate on arrays of bytes
+	      and so fundamentally need a compile-time size and offset.  */
+	   && maxsize.is_constant (&maxsizei)
+	   && maxsizei % BITS_PER_UNIT == 0
+	   && offset.is_constant (&offseti)
+	   && offseti % BITS_PER_UNIT == 0
 	   && (is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt))
 	       || (TREE_CODE (gimple_assign_rhs1 (def_stmt)) == SSA_NAME
 		   && is_gimple_min_invariant (SSA_VAL (gimple_assign_rhs1 (def_stmt))))))
@@ -1990,8 +1985,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	  && size2 % BITS_PER_UNIT == 0
 	  && offset2 % BITS_PER_UNIT == 0
 	  && operand_equal_p (base, base2, 0)
-	  && offset2 <= offset
-	  && offset2 + size2 >= offset + maxsize)
+	  && known_subrange_p (offseti, maxsizei, offset2, size2))
 	{
 	  /* We support up to 512-bit values (for V8DFmode).  */
 	  unsigned char buffer[64];
@@ -2008,14 +2002,14 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	      /* Make sure to interpret in a type that has a range
 	         covering the whole access size.  */
 	      if (INTEGRAL_TYPE_P (vr->type)
-		  && ref->size != TYPE_PRECISION (vr->type))
-		type = build_nonstandard_integer_type (ref->size,
+		  && maxsizei != TYPE_PRECISION (vr->type))
+		type = build_nonstandard_integer_type (maxsizei,
 						       TYPE_UNSIGNED (type));
 	      tree val = native_interpret_expr (type,
 						buffer
-						+ ((offset - offset2)
+						+ ((offseti - offset2)
 						   / BITS_PER_UNIT),
-						ref->size / BITS_PER_UNIT);
+						maxsizei / BITS_PER_UNIT);
 	      /* If we chop off bits because the types precision doesn't
 		 match the memory access size this is ok when optimizing
 		 reads but not when called from the DSE code during
@@ -2038,7 +2032,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 
   /* 4) Assignment from an SSA name which definition we may be able
      to access pieces from.  */
-  else if (ref->size == maxsize
+  else if (must_eq (ref->size, maxsize)
 	   && is_gimple_reg_type (vr->type)
 	   && !contains_storage_order_barrier_p (vr->operands)
 	   && gimple_assign_single_p (def_stmt)
@@ -2054,15 +2048,14 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	  && maxsize2 != -1
 	  && maxsize2 == size2
 	  && operand_equal_p (base, base2, 0)
-	  && offset2 <= offset
-	  && offset2 + size2 >= offset + maxsize
+	  && known_subrange_p (offset, maxsize, offset2, size2)
 	  /* ???  We can't handle bitfield precision extracts without
 	     either using an alternate type for the BIT_FIELD_REF and
 	     then doing a conversion or possibly adjusting the offset
 	     according to endianness.  */
 	  && (! INTEGRAL_TYPE_P (vr->type)
-	      || ref->size == TYPE_PRECISION (vr->type))
-	  && ref->size % BITS_PER_UNIT == 0)
+	      || must_eq (ref->size, TYPE_PRECISION (vr->type)))
+	  && multiple_p (ref->size, BITS_PER_UNIT))
 	{
 	  code_helper rcode = BIT_FIELD_REF;
 	  tree ops[3];
@@ -2090,7 +2083,6 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	       || handled_component_p (gimple_assign_rhs1 (def_stmt))))
     {
       tree base2;
-      HOST_WIDE_INT maxsize2;
       int i, j, k;
       auto_vec<vn_reference_op_s> rhs;
       vn_reference_op_t vro;
@@ -2101,8 +2093,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 
       /* See if the assignment kills REF.  */
       base2 = ao_ref_base (&lhs_ref);
-      maxsize2 = lhs_ref.max_size;
-      if (maxsize2 == -1
+      if (!lhs_ref.max_size_known_p ()
 	  || (base != base2
 	      && (TREE_CODE (base) != MEM_REF
 		  || TREE_CODE (base2) != MEM_REF
@@ -2129,15 +2120,15 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	 may fail when comparing types for compatibility.  But we really
 	 don't care here - further lookups with the rewritten operands
 	 will simply fail if we messed up types too badly.  */
-      HOST_WIDE_INT extra_off = 0;
+      poly_int64 extra_off = 0;
       if (j == 0 && i >= 0
 	  && lhs_ops[0].opcode == MEM_REF
-	  && lhs_ops[0].off != -1)
+	  && may_ne (lhs_ops[0].off, -1))
 	{
-	  if (lhs_ops[0].off == vr->operands[i].off)
+	  if (must_eq (lhs_ops[0].off, vr->operands[i].off))
 	    i--, j--;
 	  else if (vr->operands[i].opcode == MEM_REF
-		   && vr->operands[i].off != -1)
+		   && may_ne (vr->operands[i].off, -1))
 	    {
 	      extra_off = vr->operands[i].off - lhs_ops[0].off;
 	      i--, j--;
@@ -2163,11 +2154,11 @@ vn_reference_lookup_3 (ao_ref *ref, tree
       copy_reference_ops_from_ref (gimple_assign_rhs1 (def_stmt), &rhs);
 
       /* Apply an extra offset to the inner MEM_REF of the RHS.  */
-      if (extra_off != 0)
+      if (maybe_nonzero (extra_off))
 	{
 	  if (rhs.length () < 2
 	      || rhs[0].opcode != MEM_REF
-	      || rhs[0].off == -1)
+	      || must_eq (rhs[0].off, -1))
 	    return (void *)-1;
 	  rhs[0].off += extra_off;
 	  rhs[0].op0 = int_const_binop (PLUS_EXPR, rhs[0].op0,
@@ -2198,7 +2189,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree
       if (!ao_ref_init_from_vn_reference (&r, vr->set, vr->type, vr->operands))
 	return (void *)-1;
       /* This can happen with bitfields.  */
-      if (ref->size != r.size)
+      if (may_ne (ref->size, r.size))
 	return (void *)-1;
       *ref = r;
 
@@ -2221,20 +2212,20 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	       || TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME)
 	   && (TREE_CODE (gimple_call_arg (def_stmt, 1)) == ADDR_EXPR
 	       || TREE_CODE (gimple_call_arg (def_stmt, 1)) == SSA_NAME)
-	   && tree_fits_uhwi_p (gimple_call_arg (def_stmt, 2)))
+	   && poly_int_tree_p (gimple_call_arg (def_stmt, 2), &copy_size))
     {
       tree lhs, rhs;
       ao_ref r;
-      HOST_WIDE_INT rhs_offset, copy_size, lhs_offset;
+      poly_int64 rhs_offset, lhs_offset;
       vn_reference_op_s op;
-      HOST_WIDE_INT at;
+      poly_uint64 mem_offset;
+      poly_int64 at, byte_maxsize;
 
       /* Only handle non-variable, addressable refs.  */
-      if (ref->size != maxsize
-	  || offset % BITS_PER_UNIT != 0
-	  || ref->size % BITS_PER_UNIT != 0)
+      if (may_ne (ref->size, maxsize)
+	  || !multiple_p (offset, BITS_PER_UNIT, &at)
+	  || !multiple_p (maxsize, BITS_PER_UNIT, &byte_maxsize))
 	return (void *)-1;
-      at = offset / BITS_PER_UNIT;
 
       /* Extract a pointer base and an offset for the destination.  */
       lhs = gimple_call_arg (def_stmt, 0);
@@ -2252,17 +2243,19 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	}
       if (TREE_CODE (lhs) == ADDR_EXPR)
 	{
+	  HOST_WIDE_INT tmp_lhs_offset;
 	  tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (lhs, 0),
-						    &lhs_offset);
+						    &tmp_lhs_offset);
+	  lhs_offset = tmp_lhs_offset;
 	  if (!tem)
 	    return (void *)-1;
 	  if (TREE_CODE (tem) == MEM_REF
-	      && tree_fits_uhwi_p (TREE_OPERAND (tem, 1)))
+	      && poly_int_tree_p (TREE_OPERAND (tem, 1), &mem_offset))
 	    {
 	      lhs = TREE_OPERAND (tem, 0);
 	      if (TREE_CODE (lhs) == SSA_NAME)
 		lhs = SSA_VAL (lhs);
-	      lhs_offset += tree_to_uhwi (TREE_OPERAND (tem, 1));
+	      lhs_offset += mem_offset;
 	    }
 	  else if (DECL_P (tem))
 	    lhs = build_fold_addr_expr (tem);
@@ -2280,15 +2273,17 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	rhs = SSA_VAL (rhs);
       if (TREE_CODE (rhs) == ADDR_EXPR)
 	{
+	  HOST_WIDE_INT tmp_rhs_offset;
 	  tree tem = get_addr_base_and_unit_offset (TREE_OPERAND (rhs, 0),
-						    &rhs_offset);
+						    &tmp_rhs_offset);
+	  rhs_offset = tmp_rhs_offset;
 	  if (!tem)
 	    return (void *)-1;
 	  if (TREE_CODE (tem) == MEM_REF
-	      && tree_fits_uhwi_p (TREE_OPERAND (tem, 1)))
+	      && poly_int_tree_p (TREE_OPERAND (tem, 1), &mem_offset))
 	    {
 	      rhs = TREE_OPERAND (tem, 0);
-	      rhs_offset += tree_to_uhwi (TREE_OPERAND (tem, 1));
+	      rhs_offset += mem_offset;
 	    }
 	  else if (DECL_P (tem))
 	    rhs = build_fold_addr_expr (tem);
@@ -2299,15 +2294,13 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 	  && TREE_CODE (rhs) != ADDR_EXPR)
 	return (void *)-1;
 
-      copy_size = tree_to_uhwi (gimple_call_arg (def_stmt, 2));
-
       /* The bases of the destination and the references have to agree.  */
       if (TREE_CODE (base) == MEM_REF)
 	{
 	  if (TREE_OPERAND (base, 0) != lhs
-	      || !tree_fits_uhwi_p (TREE_OPERAND (base, 1)))
+	      || !poly_int_tree_p (TREE_OPERAND (base, 1), &mem_offset))
 	    return (void *) -1;
-	  at += tree_to_uhwi (TREE_OPERAND (base, 1));
+	  at += mem_offset;
 	}
       else if (!DECL_P (base)
 	       || TREE_CODE (lhs) != ADDR_EXPR
@@ -2316,12 +2309,10 @@ vn_reference_lookup_3 (ao_ref *ref, tree
 
       /* If the access is completely outside of the memcpy destination
 	 area there is no aliasing.  */
-      if (lhs_offset >= at + maxsize / BITS_PER_UNIT
-	  || lhs_offset + copy_size <= at)
+      if (!ranges_may_overlap_p (lhs_offset, copy_size, at, byte_maxsize))
 	return NULL;
       /* And the access has to be contained within the memcpy destination.  */
-      if (lhs_offset > at
-	  || lhs_offset + copy_size < at + maxsize / BITS_PER_UNIT)
+      if (!known_subrange_p (at, byte_maxsize, lhs_offset, copy_size))
 	return (void *)-1;
 
       /* Make room for 2 operands in the new reference.  */
@@ -2359,7 +2350,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree
       if (!ao_ref_init_from_vn_reference (&r, vr->set, vr->type, vr->operands))
 	return (void *)-1;
       /* This can happen with bitfields.  */
-      if (ref->size != r.size)
+      if (may_ne (ref->size, r.size))
 	return (void *)-1;
       *ref = r;
 
Index: gcc/tree-ssa-uninit.c
===================================================================
--- gcc/tree-ssa-uninit.c	2017-10-23 16:52:20.058356365 +0100
+++ gcc/tree-ssa-uninit.c	2017-10-23 17:01:52.305178291 +0100
@@ -294,15 +294,15 @@ warn_uninitialized_vars (bool warn_possi
 
 	      /* Do not warn if the access is fully outside of the
 	         variable.  */
+	      poly_int64 decl_size;
 	      if (DECL_P (base)
-		  && ref.size != -1
-		  && ref.max_size == ref.size
-		  && (ref.offset + ref.size <= 0
-		      || (ref.offset >= 0
+		  && known_size_p (ref.size)
+		  && must_eq (ref.max_size, ref.size)
+		  && (must_le (ref.offset + ref.size, 0)
+		      || (must_ge (ref.offset, 0)
 			  && DECL_SIZE (base)
-			  && TREE_CODE (DECL_SIZE (base)) == INTEGER_CST
-			  && compare_tree_int (DECL_SIZE (base),
-					       ref.offset) <= 0)))
+			  && poly_int_tree_p (DECL_SIZE (base), &decl_size)
+			  && must_le (decl_size, ref.offset))))
 		continue;
 
 	      /* Do not warn if the access is then used for a BIT_INSERT_EXPR. */

  parent reply	other threads:[~2017-10-23 17:06 UTC|newest]

Thread overview: 302+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-10-23 16:57 [000/nnn] poly_int: representation of runtime offsets and sizes Richard Sandiford
2017-10-23 16:58 ` [001/nnn] poly_int: add poly-int.h Richard Sandiford
2017-10-25 16:17   ` Martin Sebor
2017-11-08  9:44     ` Richard Sandiford
2017-11-08 16:51       ` Martin Sebor
2017-11-08 16:56         ` Richard Sandiford
2017-11-08 17:33           ` Martin Sebor
2017-11-08 17:34           ` Martin Sebor
2017-11-08 18:34             ` Richard Sandiford
2017-11-09  9:10               ` Martin Sebor
2017-11-09 11:14                 ` Richard Sandiford
2017-11-09 17:42                   ` Martin Sebor
2017-11-13 17:59                   ` Jeff Law
2017-11-13 23:57                     ` Richard Sandiford
2017-11-14  1:21                       ` Martin Sebor
2017-11-14  9:46                         ` Richard Sandiford
2017-11-17  3:31                       ` Jeff Law
2017-11-08 10:03   ` Richard Sandiford
2017-11-14  0:42     ` Richard Sandiford
2017-12-06 20:11       ` Jeff Law
2017-12-07 14:46         ` Richard Biener
2017-12-07 15:08           ` Jeff Law
2017-12-07 22:39             ` Richard Sandiford
2017-12-07 22:48               ` Jeff Law
2017-12-15  3:40                 ` Martin Sebor
2017-12-15  9:08                   ` Richard Biener
2017-12-15 15:19                     ` Jeff Law
2017-10-23 16:59 ` [002/nnn] poly_int: IN_TARGET_CODE Richard Sandiford
2017-11-17  3:35   ` Jeff Law
2017-12-15  1:08     ` Richard Sandiford
2017-12-15 15:22       ` Jeff Law
2017-10-23 17:00 ` [004/nnn] poly_int: mode query functions Richard Sandiford
2017-11-17  3:37   ` Jeff Law
2017-10-23 17:00 ` [003/nnn] poly_int: MACRO_MODE Richard Sandiford
2017-11-17  3:36   ` Jeff Law
2017-10-23 17:01 ` [005/nnn] poly_int: rtx constants Richard Sandiford
2017-11-17  4:17   ` Jeff Law
2017-12-15  1:25     ` Richard Sandiford
2017-12-19  4:52       ` Jeff Law
2017-10-23 17:02 ` [006/nnn] poly_int: tree constants Richard Sandiford
2017-10-25 17:14   ` Martin Sebor
2017-10-25 21:35     ` Richard Sandiford
2017-10-26  5:52       ` Martin Sebor
2017-10-26  8:40         ` Richard Sandiford
2017-10-26 16:45           ` Martin Sebor
2017-10-26 18:05             ` Richard Sandiford
2017-10-26 23:53               ` Martin Sebor
2017-10-27  8:33                 ` Richard Sandiford
2017-10-29 16:56                   ` Martin Sebor
2017-10-30  6:36                     ` Trevor Saunders
2017-10-31 20:25                       ` Martin Sebor
2017-10-26 18:11             ` Pedro Alves
2017-10-26 19:12               ` Martin Sebor
2017-10-26 19:19                 ` Pedro Alves
2017-10-26 23:41                   ` Martin Sebor
2017-10-30 10:26                     ` Pedro Alves
2017-10-31 16:12                       ` Martin Sebor
2017-11-17  4:51   ` Jeff Law
2017-11-18 15:48     ` Richard Sandiford
2017-10-23 17:02 ` [007/nnn] poly_int: dump routines Richard Sandiford
2017-11-17  3:38   ` Jeff Law
2017-10-23 17:03 ` [008/nnn] poly_int: create_integer_operand Richard Sandiford
2017-11-17  3:40   ` Jeff Law
2017-10-23 17:04 ` [010/nnn] poly_int: REG_OFFSET Richard Sandiford
2017-11-17  3:41   ` Jeff Law
2017-10-23 17:04 ` [009/nnn] poly_int: TRULY_NOOP_TRUNCATION Richard Sandiford
2017-11-17  3:40   ` Jeff Law
2017-10-23 17:05 ` [012/nnn] poly_int: fold_ctor_reference Richard Sandiford
2017-11-17  3:59   ` Jeff Law
2017-10-23 17:05 ` [013/nnn] poly_int: same_addr_size_stores_p Richard Sandiford
2017-11-17  4:11   ` Jeff Law
2017-10-23 17:05 ` [011/nnn] poly_int: DWARF locations Richard Sandiford
2017-11-17 17:40   ` Jeff Law
2017-10-23 17:06 ` [014/nnn] poly_int: indirect_refs_may_alias_p Richard Sandiford
2017-11-17 18:11   ` Jeff Law
2017-11-20 13:31     ` Richard Sandiford
2017-11-21  0:49       ` Jeff Law
2017-10-23 17:06 ` Richard Sandiford [this message]
2017-11-18  4:25   ` [015/nnn] poly_int: ao_ref and vn_reference_op_t Jeff Law
2017-10-23 17:07 ` [017/nnn] poly_int: rtx_addr_can_trap_p_1 Richard Sandiford
2017-11-18  4:46   ` Jeff Law
2017-10-23 17:07 ` [016/nnn] poly_int: dse.c Richard Sandiford
2017-11-18  4:30   ` Jeff Law
2017-10-23 17:08 ` [019/nnn] poly_int: lra frame offsets Richard Sandiford
2017-12-06  0:16   ` Jeff Law
2017-10-23 17:08 ` [018/nnn] poly_int: MEM_OFFSET and MEM_SIZE Richard Sandiford
2017-12-06 18:27   ` Jeff Law
2017-10-23 17:08 ` [020/nnn] poly_int: store_bit_field bitrange Richard Sandiford
2017-12-05 23:43   ` Jeff Law
2017-10-23 17:09 ` [023/nnn] poly_int: store_field & co Richard Sandiford
2017-12-05 23:49   ` Jeff Law
2017-10-23 17:09 ` [021/nnn] poly_int: extract_bit_field bitrange Richard Sandiford
2017-12-05 23:46   ` Jeff Law
2017-10-23 17:09 ` [022/nnn] poly_int: C++ bitfield regions Richard Sandiford
2017-12-05 23:39   ` Jeff Law
2017-10-23 17:10 ` [025/nnn] poly_int: SUBREG_BYTE Richard Sandiford
2017-12-06 18:50   ` Jeff Law
2017-10-23 17:10 ` [024/nnn] poly_int: ira subreg liveness tracking Richard Sandiford
2017-11-28 21:10   ` Jeff Law
2017-12-05 21:54     ` Richard Sandiford
2017-10-23 17:11 ` [026/nnn] poly_int: operand_subword Richard Sandiford
2017-11-28 17:51   ` Jeff Law
2017-10-23 17:11 ` [027/nnn] poly_int: DWARF CFA offsets Richard Sandiford
2017-12-06  0:40   ` Jeff Law
2017-10-23 17:12 ` [029/nnn] poly_int: get_ref_base_and_extent Richard Sandiford
2017-12-06 20:03   ` Jeff Law
2017-10-23 17:12 ` [028/nnn] poly_int: ipa_parm_adjustment Richard Sandiford
2017-11-28 17:47   ` Jeff Law
2017-10-23 17:12 ` [030/nnn] poly_int: get_addr_unit_base_and_extent Richard Sandiford
2017-12-06  0:26   ` Jeff Law
2017-10-23 17:13 ` [033/nnn] poly_int: pointer_may_wrap_p Richard Sandiford
2017-11-28 17:44   ` Jeff Law
2017-10-23 17:13 ` [031/nnn] poly_int: aff_tree Richard Sandiford
2017-12-06  0:04   ` Jeff Law
2017-10-23 17:13 ` [032/nnn] poly_int: symbolic_number Richard Sandiford
2017-11-28 17:45   ` Jeff Law
2017-10-23 17:14 ` [034/nnn] poly_int: get_inner_reference_aff Richard Sandiford
2017-11-28 17:56   ` Jeff Law
2017-10-23 17:14 ` [036/nnn] poly_int: get_object_alignment_2 Richard Sandiford
2017-11-28 17:37   ` Jeff Law
2017-10-23 17:14 ` [035/nnn] poly_int: expand_debug_expr Richard Sandiford
2017-12-05 17:08   ` Jeff Law
2017-10-23 17:16 ` [037/nnn] poly_int: get_bit_range Richard Sandiford
2017-12-05 23:19   ` Jeff Law
2017-10-23 17:17 ` [039/nnn] poly_int: pass_store_merging::execute Richard Sandiford
2017-11-28 18:00   ` Jeff Law
2017-12-20 12:59     ` Richard Sandiford
2017-10-23 17:17 ` [038/nnn] poly_int: fold_comparison Richard Sandiford
2017-11-28 21:47   ` Jeff Law
2017-10-23 17:18 ` [041/nnn] poly_int: reload.c Richard Sandiford
2017-12-05 17:10   ` Jeff Law
2017-10-23 17:18 ` [040/nnn] poly_int: get_inner_reference & co Richard Sandiford
2017-12-06 17:26   ` Jeff Law
2018-12-21 11:17   ` Thomas Schwinge
2018-12-21 11:40     ` Jakub Jelinek
2018-12-28 14:34       ` Thomas Schwinge
2017-10-23 17:18 ` [042/nnn] poly_int: reload1.c Richard Sandiford
2017-12-05 17:23   ` Jeff Law
2017-10-23 17:19 ` [043/nnn] poly_int: frame allocations Richard Sandiford
2017-12-06  3:15   ` Jeff Law
2017-10-23 17:19 ` [044/nnn] poly_int: push_block/emit_push_insn Richard Sandiford
2017-11-28 22:18   ` Jeff Law
2017-10-23 17:19 ` [045/nnn] poly_int: REG_ARGS_SIZE Richard Sandiford
2017-12-06  0:10   ` Jeff Law
2017-12-22 21:56   ` Andreas Schwab
2017-12-23  9:36     ` Richard Sandiford
2017-12-24 12:49       ` Andreas Schwab
2017-12-28 20:37         ` RFA: Fix REG_ARGS_SIZE handling when pushing TLS addresses Richard Sandiford
2018-01-02 19:07           ` Jeff Law
2017-10-23 17:20 ` [047/nnn] poly_int: argument sizes Richard Sandiford
2017-12-06 20:57   ` Jeff Law
2017-12-20 11:37     ` Richard Sandiford
2017-10-23 17:20 ` [046/nnn] poly_int: instantiate_virtual_regs Richard Sandiford
2017-11-28 18:00   ` Jeff Law
2017-10-23 17:21 ` [049/nnn] poly_int: emit_inc Richard Sandiford
2017-11-28 17:30   ` Jeff Law
2017-10-23 17:21 ` [050/nnn] poly_int: reload<->ira interface Richard Sandiford
2017-11-28 16:55   ` Jeff Law
2017-10-23 17:21 ` [048/nnn] poly_int: cfgexpand stack variables Richard Sandiford
2017-12-05 23:22   ` Jeff Law
2017-10-23 17:22 ` [051/nnn] poly_int: emit_group_load/store Richard Sandiford
2017-12-05 23:26   ` Jeff Law
2017-10-23 17:22 ` [052/nnn] poly_int: bit_field_size/offset Richard Sandiford
2017-12-05 17:25   ` Jeff Law
2017-10-23 17:22 ` [053/nnn] poly_int: decode_addr_const Richard Sandiford
2017-11-28 16:53   ` Jeff Law
2017-10-23 17:23 ` [055/nnn] poly_int: find_bswap_or_nop_load Richard Sandiford
2017-11-28 16:52   ` Jeff Law
2017-10-23 17:23 ` [054/nnn] poly_int: adjust_ptr_info_misalignment Richard Sandiford
2017-11-28 16:53   ` Jeff Law
2017-10-23 17:24 ` [058/nnn] poly_int: get_binfo_at_offset Richard Sandiford
2017-11-28 16:50   ` Jeff Law
2017-10-23 17:24 ` [056/nnn] poly_int: MEM_REF offsets Richard Sandiford
2017-12-06  0:46   ` Jeff Law
2017-10-23 17:24 ` [057/nnn] poly_int: build_ref_for_offset Richard Sandiford
2017-11-28 16:51   ` Jeff Law
2017-10-23 17:25 ` [060/nnn] poly_int: loop versioning threshold Richard Sandiford
2017-12-05 17:31   ` Jeff Law
2017-10-23 17:25 ` [061/nnn] poly_int: compute_data_ref_alignment Richard Sandiford
2017-11-28 16:49   ` Jeff Law
2017-10-23 17:25 ` [059/nnn] poly_int: tree-ssa-loop-ivopts.c:iv_use Richard Sandiford
2017-12-05 17:26   ` Jeff Law
2017-10-23 17:26 ` [062/nnn] poly_int: prune_runtime_alias_test_list Richard Sandiford
2017-12-05 17:33   ` Jeff Law
2017-10-23 17:26 ` [063/nnn] poly_int: vectoriser vf and uf Richard Sandiford
2017-12-06  2:46   ` Jeff Law
2018-01-03 21:23   ` [PATCH] Fix gcc.dg/vect-opt-info-1.c testcase Jakub Jelinek
2018-01-03 21:30     ` Richard Sandiford
2018-01-04 17:32     ` Jeff Law
2017-10-23 17:27 ` [066/nnn] poly_int: omp_max_vf Richard Sandiford
2017-12-05 17:40   ` Jeff Law
2017-10-23 17:27 ` [065/nnn] poly_int: vect_nunits_for_cost Richard Sandiford
2017-12-05 17:35   ` Jeff Law
2017-10-23 17:27 ` [064/nnn] poly_int: SLP max_units Richard Sandiford
2017-12-05 17:41   ` Jeff Law
2017-10-23 17:28 ` [068/nnn] poly_int: current_vector_size and TARGET_AUTOVECTORIZE_VECTOR_SIZES Richard Sandiford
2017-12-06  1:52   ` Jeff Law
2017-10-23 17:28 ` [067/nnn] poly_int: get_mask_mode Richard Sandiford
2017-11-28 16:48   ` Jeff Law
2017-10-23 17:29 ` [070/nnn] poly_int: vectorizable_reduction Richard Sandiford
2017-11-22 18:11   ` Richard Sandiford
2017-12-06  0:33     ` Jeff Law
2017-10-23 17:29 ` [069/nnn] poly_int: vector_alignment_reachable_p Richard Sandiford
2017-11-28 16:48   ` Jeff Law
2017-10-23 17:29 ` [071/nnn] poly_int: vectorizable_induction Richard Sandiford
2017-12-05 17:44   ` Jeff Law
2017-10-23 17:30 ` [074/nnn] poly_int: vectorizable_call Richard Sandiford
2017-11-28 16:46   ` Jeff Law
2017-10-23 17:30 ` [073/nnn] poly_int: vectorizable_load/store Richard Sandiford
2017-12-06  0:51   ` Jeff Law
2017-10-23 17:30 ` [072/nnn] poly_int: vectorizable_live_operation Richard Sandiford
2017-11-28 16:47   ` Jeff Law
2017-10-23 17:31 ` [075/nnn] poly_int: vectorizable_simd_clone_call Richard Sandiford
2017-11-28 16:45   ` Jeff Law
2017-10-23 17:31 ` [077/nnn] poly_int: vect_get_constant_vectors Richard Sandiford
2017-11-28 16:43   ` Jeff Law
2017-10-23 17:31 ` [076/nnn] poly_int: vectorizable_conversion Richard Sandiford
2017-11-28 16:44   ` Jeff Law
2017-11-28 18:15     ` Richard Sandiford
2017-12-05 17:49       ` Jeff Law
2017-10-23 17:32 ` [078/nnn] poly_int: two-operation SLP Richard Sandiford
2017-11-28 16:41   ` Jeff Law
2017-10-23 17:32 ` [079/nnn] poly_int: vect_no_alias_p Richard Sandiford
2017-12-05 17:46   ` Jeff Law
2017-10-23 17:32 ` [080/nnn] poly_int: tree-vect-generic.c Richard Sandiford
2017-12-05 17:48   ` Jeff Law
2017-10-23 17:33 ` [081/nnn] poly_int: brig vector elements Richard Sandiford
2017-10-24  7:10   ` Pekka Jääskeläinen
2017-10-23 17:33 ` [082/nnn] poly_int: omp-simd-clone.c Richard Sandiford
2017-11-28 16:36   ` Jeff Law
2017-10-23 17:34 ` [083/nnn] poly_int: fold_indirect_ref_1 Richard Sandiford
2017-11-28 16:34   ` Jeff Law
2017-10-23 17:34 ` [085/nnn] poly_int: expand_vector_ubsan_overflow Richard Sandiford
2017-11-28 16:33   ` Jeff Law
2017-10-23 17:34 ` [084/nnn] poly_int: folding BIT_FIELD_REFs on vectors Richard Sandiford
2017-11-28 16:33   ` Jeff Law
2017-10-23 17:35 ` [088/nnn] poly_int: expand_expr_real_2 Richard Sandiford
2017-11-28  8:49   ` Jeff Law
2017-10-23 17:35 ` [087/nnn] poly_int: subreg_get_info Richard Sandiford
2017-11-28 16:29   ` Jeff Law
2017-10-23 17:35 ` [086/nnn] poly_int: REGMODE_NATURAL_SIZE Richard Sandiford
2017-12-05 23:33   ` Jeff Law
2017-10-23 17:36 ` [090/nnn] poly_int: set_inc_state Richard Sandiford
2017-11-28  8:35   ` Jeff Law
2017-10-23 17:36 ` [089/nnn] poly_int: expand_expr_real_1 Richard Sandiford
2017-11-28  8:41   ` Jeff Law
2017-10-23 17:37 ` [091/nnn] poly_int: emit_single_push_insn_1 Richard Sandiford
2017-11-28  8:33   ` Jeff Law
2017-10-23 17:37 ` [092/nnn] poly_int: PUSH_ROUNDING Richard Sandiford
2017-11-28 16:21   ` Jeff Law
2017-11-28 18:01     ` Richard Sandiford
2017-11-28 18:10       ` PUSH_ROUNDING Jeff Law
2017-10-23 17:37 ` [093/nnn] poly_int: adjust_mems Richard Sandiford
2017-11-28  8:32   ` Jeff Law
2017-10-23 17:38 ` [094/nnn] poly_int: expand_ifn_atomic_compare_exchange_into_call Richard Sandiford
2017-11-28  8:31   ` Jeff Law
2017-10-23 17:39 ` [096/nnn] poly_int: reloading complex subregs Richard Sandiford
2017-11-28  8:09   ` Jeff Law
2017-10-23 17:39 ` [095/nnn] poly_int: process_alt_operands Richard Sandiford
2017-11-28  8:14   ` Jeff Law
2017-10-23 17:40 ` [099/nnn] poly_int: struct_value_size Richard Sandiford
2017-11-21  8:14   ` Jeff Law
2017-10-23 17:40 ` [097/nnn] poly_int: alter_reg Richard Sandiford
2017-11-28  8:08   ` Jeff Law
2017-10-23 17:40 ` [098/nnn] poly_int: load_register_parameters Richard Sandiford
2017-11-28  8:08   ` Jeff Law
2017-10-23 17:41 ` [101/nnn] poly_int: GET_MODE_NUNITS Richard Sandiford
2017-12-06  2:05   ` Jeff Law
2017-10-23 17:41 ` [100/nnn] poly_int: memrefs_conflict_p Richard Sandiford
2017-12-05 23:29   ` Jeff Law
2017-10-23 17:42 ` [102/nnn] poly_int: vect_permute_load/store_chain Richard Sandiford
2017-11-21  8:01   ` Jeff Law
2017-10-23 17:42 ` [103/nnn] poly_int: TYPE_VECTOR_SUBPARTS Richard Sandiford
2017-10-24  9:06   ` Richard Biener
2017-10-24  9:40     ` Richard Sandiford
2017-10-24 10:01       ` Richard Biener
2017-10-24 11:20         ` Richard Sandiford
2017-10-24 11:30           ` Richard Biener
2017-10-24 16:24             ` Richard Sandiford
2017-12-06  2:31   ` Jeff Law
2017-10-23 17:43 ` [106/nnn] poly_int: GET_MODE_BITSIZE Richard Sandiford
2017-11-21  7:49   ` Jeff Law
2017-10-23 17:43 ` [104/nnn] poly_int: GET_MODE_PRECISION Richard Sandiford
2017-11-28  8:07   ` Jeff Law
2017-10-23 17:43 ` [105/nnn] poly_int: expand_assignment Richard Sandiford
2017-11-21  7:50   ` Jeff Law
2017-10-23 17:48 ` [107/nnn] poly_int: GET_MODE_SIZE Richard Sandiford
2017-11-21  7:48   ` Jeff Law
2017-10-24  9:25 ` [000/nnn] poly_int: representation of runtime offsets and sizes Eric Botcazou
2017-10-24  9:58   ` Richard Sandiford
2017-10-24 10:53     ` Eric Botcazou
2017-10-24 11:25       ` Richard Sandiford
2017-10-24 12:24         ` Richard Biener
2017-10-24 13:07           ` Richard Sandiford
2017-10-24 13:18             ` Richard Biener
2017-10-24 13:30               ` Richard Sandiford
2017-10-25 10:27                 ` Richard Biener
2017-10-25 10:45                   ` Jakub Jelinek
2017-10-25 11:39                   ` Richard Sandiford
2017-10-25 13:09                     ` Richard Biener
2017-11-08  9:51                       ` Richard Sandiford
2017-11-08 11:57                         ` Richard Biener

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=873769ssje.fsf@linaro.org \
    --to=richard.sandiford@linaro.org \
    --cc=gcc-patches@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).