public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
* [gcc r12-1303] analyzer: bitfield fixes [PR99212]
@ 2021-06-08 18:46 David Malcolm
  0 siblings, 0 replies; only message in thread
From: David Malcolm @ 2021-06-08 18:46 UTC (permalink / raw)
  To: gcc-cvs

https://gcc.gnu.org/g:d3b1ef7a83c0c0cd5b20a1dd1714b868f3d2b442

commit r12-1303-gd3b1ef7a83c0c0cd5b20a1dd1714b868f3d2b442
Author: David Malcolm <dmalcolm@redhat.com>
Date:   Tue Jun 8 14:45:57 2021 -0400

    analyzer: bitfield fixes [PR99212]
    
    This patch verifies the previous fix for bitfield sizes by implementing
    enough support for bitfields in the analyzer to get the test cases to pass.
    
    The patch implements support in the analyzer for reading from a
    BIT_FIELD_REF, and support for folding BIT_AND_EXPR of a mask, to handle
    the cases generated in tests.
    
    The existing bitfields tests in data-model-1.c turned out to rely on
    undefined behavior, in that they were assigning values to a signed
    bitfield that were outside of the valid range of values.  I believe that
    that's why we were seeing target-specific differences in the test
    results (PR analyzer/99212).  The patch updates the test to remove the
    undefined behaviors.
    
    gcc/analyzer/ChangeLog:
            PR analyzer/99212
            * region-model-manager.cc
            (region_model_manager::maybe_fold_binop): Add support for folding
            BIT_AND_EXPR of compound_svalue and a mask constant.
            * region-model.cc (region_model::get_rvalue_1): Implement
            BIT_FIELD_REF in terms of...
            (region_model::get_rvalue_for_bits): New function.
            * region-model.h (region_model::get_rvalue_for_bits): New decl.
            * store.cc (bit_range::from_mask): New function.
            (selftest::test_bit_range_intersects_p): New selftest.
            (selftest::assert_bit_range_from_mask_eq): New.
            (ASSERT_BIT_RANGE_FROM_MASK_EQ): New macro.
            (selftest::assert_no_bit_range_from_mask_eq): New.
            (ASSERT_NO_BIT_RANGE_FROM_MASK): New macro.
            (selftest::test_bit_range_from_mask): New selftest.
            (selftest::analyzer_store_cc_tests): Call the new selftests.
            * store.h (bit_range::intersects_p): New.
            (bit_range::from_mask): New decl.
            (concrete_binding::get_bit_range): New accessor.
            (store_manager::get_concrete_binding): New overload taking
            const bit_range &.
    
    gcc/testsuite/ChangeLog:
            PR analyzer/99212
            * gcc.dg/analyzer/bitfields-1.c: New test.
            * gcc.dg/analyzer/data-model-1.c (struct sbits): Make bitfields
            explicitly signed.
            (test_44): Update test values assigned to the bits to ones that
            fit in the range of the bitfield type.  Remove xfails.
            (test_45): Remove xfails.
    
    Signed-off-by: David Malcolm <dmalcolm@redhat.com>

Diff:
---
 gcc/analyzer/region-model-manager.cc         |  46 ++++++-
 gcc/analyzer/region-model.cc                 |  65 +++++++++-
 gcc/analyzer/region-model.h                  |   4 +
 gcc/analyzer/store.cc                        | 186 +++++++++++++++++++++++++++
 gcc/analyzer/store.h                         |  18 +++
 gcc/testsuite/gcc.dg/analyzer/bitfields-1.c  | 144 +++++++++++++++++++++
 gcc/testsuite/gcc.dg/analyzer/data-model-1.c |  30 ++---
 7 files changed, 469 insertions(+), 24 deletions(-)

diff --git a/gcc/analyzer/region-model-manager.cc b/gcc/analyzer/region-model-manager.cc
index dfd2413e914..0ca0c8ad02e 100644
--- a/gcc/analyzer/region-model-manager.cc
+++ b/gcc/analyzer/region-model-manager.cc
@@ -480,9 +480,49 @@ region_model_manager::maybe_fold_binop (tree type, enum tree_code op,
       break;
     case BIT_AND_EXPR:
       if (cst1)
-	if (zerop (cst1) && INTEGRAL_TYPE_P (type))
-	  /* "(ARG0 & 0)" -> "0".  */
-	  return get_or_create_constant_svalue (build_int_cst (type, 0));
+	{
+	  if (zerop (cst1) && INTEGRAL_TYPE_P (type))
+	    /* "(ARG0 & 0)" -> "0".  */
+	    return get_or_create_constant_svalue (build_int_cst (type, 0));
+
+	  /* Support masking out bits from a compound_svalue, as this
+	     is generated when accessing bitfields.  */
+	  if (const compound_svalue *compound_sval
+		= arg0->dyn_cast_compound_svalue ())
+	    {
+	      const binding_map &map = compound_sval->get_map ();
+	      unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (cst1);
+	      /* If "mask" is a contiguous range of set bits, see if the
+		 compound_sval has a value for those bits.  */
+	      bit_range bits (0, 0);
+	      if (bit_range::from_mask (mask, &bits))
+		{
+		  const concrete_binding *conc
+		    = get_store_manager ()->get_concrete_binding (bits,
+								  BK_direct);
+		  if (const svalue *sval = map.get (conc))
+		    {
+		      /* We have a value;
+			 shift it by the correct number of bits.  */
+		      const svalue *lhs = get_or_create_cast (type, sval);
+		      HOST_WIDE_INT bit_offset
+			= bits.get_start_bit_offset ().to_shwi ();
+		      tree shift_amt = build_int_cst (type, bit_offset);
+		      const svalue *shift_sval
+			= get_or_create_constant_svalue (shift_amt);
+		      const svalue *shifted_sval
+			= get_or_create_binop (type,
+					       LSHIFT_EXPR,
+					       lhs, shift_sval);
+		      /* Reapply the mask (needed for negative
+			 signed bitfields).  */
+		      return get_or_create_binop (type,
+						  BIT_AND_EXPR,
+						  shifted_sval, arg1);
+		    }
+		}
+	    }
+	}
       break;
     case TRUTH_ANDIF_EXPR:
     case TRUTH_AND_EXPR:
diff --git a/gcc/analyzer/region-model.cc b/gcc/analyzer/region-model.cc
index c7038dd2d4b..0d363fb15d3 100644
--- a/gcc/analyzer/region-model.cc
+++ b/gcc/analyzer/region-model.cc
@@ -1357,7 +1357,18 @@ region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt)
       break;
 
     case BIT_FIELD_REF:
-      return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (pv.m_tree));
+      {
+	tree expr = pv.m_tree;
+	tree op0 = TREE_OPERAND (expr, 0);
+	const region *reg = get_lvalue (op0, ctxt);
+	tree num_bits = TREE_OPERAND (expr, 1);
+	tree first_bit_offset = TREE_OPERAND (expr, 2);
+	gcc_assert (TREE_CODE (num_bits) == INTEGER_CST);
+	gcc_assert (TREE_CODE (first_bit_offset) == INTEGER_CST);
+	bit_range bits (TREE_INT_CST_LOW (first_bit_offset),
+			TREE_INT_CST_LOW (num_bits));
+	return get_rvalue_for_bits (TREE_TYPE (expr), reg, bits);
+      }
 
     case SSA_NAME:
     case VAR_DECL:
@@ -1686,6 +1697,58 @@ region_model::deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
   return m_mgr->get_symbolic_region (ptr_sval);
 }
 
+/* Attempt to get BITS within any value of REG, as TYPE.
+   In particular, extract values from compound_svalues for the case
+   where there's a concrete binding at BITS.
+   Return an unknown svalue if we can't handle the given case.  */
+
+const svalue *
+region_model::get_rvalue_for_bits (tree type,
+				   const region *reg,
+				   const bit_range &bits)
+{
+  const svalue *sval = get_store_value (reg);
+  if (const compound_svalue *compound_sval = sval->dyn_cast_compound_svalue ())
+    {
+      const binding_map &map = compound_sval->get_map ();
+      binding_map result_map;
+      for (auto iter : map)
+	{
+	  const binding_key *key = iter.first;
+	  if (const concrete_binding *conc_key
+	      = key->dyn_cast_concrete_binding ())
+	    {
+	      /* Ignore concrete bindings outside BITS.  */
+	      if (!conc_key->get_bit_range ().intersects_p (bits))
+		continue;
+	      if ((conc_key->get_start_bit_offset ()
+		   < bits.get_start_bit_offset ())
+		  || (conc_key->get_next_bit_offset ()
+		      > bits.get_next_bit_offset ()))
+		{
+		  /* If we have any concrete keys that aren't fully within BITS,
+		     then bail out.  */
+		  return m_mgr->get_or_create_unknown_svalue (type);
+		}
+	      const concrete_binding *offset_conc_key
+		    = m_mgr->get_store_manager ()->get_concrete_binding
+			(conc_key->get_start_bit_offset ()
+			   - bits.get_start_bit_offset (),
+			 conc_key->get_size_in_bits (),
+			 conc_key->get_kind ());
+		  const svalue *sval = iter.second;
+		  result_map.put (offset_conc_key, sval);
+	    }
+	  else
+	    /* If we have any symbolic keys we can't get it as bits.  */
+	    return m_mgr->get_or_create_unknown_svalue (type);
+	}
+      return m_mgr->get_or_create_compound_svalue (type, result_map);
+    }
+
+  return m_mgr->get_or_create_unknown_svalue (type);
+}
+
 /* A subclass of pending_diagnostic for complaining about writes to
    constant regions of memory.  */
 
diff --git a/gcc/analyzer/region-model.h b/gcc/analyzer/region-model.h
index a169396dcb5..5e43e547199 100644
--- a/gcc/analyzer/region-model.h
+++ b/gcc/analyzer/region-model.h
@@ -509,6 +509,10 @@ class region_model
   const region *deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
 			       region_model_context *ctxt);
 
+  const svalue *get_rvalue_for_bits (tree type,
+				     const region *reg,
+				     const bit_range &bits);
+
   void set_value (const region *lhs_reg, const svalue *rhs_sval,
 		  region_model_context *ctxt);
   void set_value (tree lhs, tree rhs, region_model_context *ctxt);
diff --git a/gcc/analyzer/store.cc b/gcc/analyzer/store.cc
index f4bb7def781..699de94cdb0 100644
--- a/gcc/analyzer/store.cc
+++ b/gcc/analyzer/store.cc
@@ -259,6 +259,64 @@ bit_range::cmp (const bit_range &br1, const bit_range &br2)
   return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
 }
 
+/* If MASK is a contiguous range of set bits, write them
+   to *OUT and return true.
+   Otherwise return false.  */
+
+bool
+bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
+{
+  unsigned iter_bit_idx = 0;
+  unsigned HOST_WIDE_INT iter_bit_mask = 1;
+
+  /* Find the first contiguous run of set bits in MASK.  */
+
+  /* Find first set bit in MASK.  */
+  while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+    {
+      if (mask & iter_bit_mask)
+	break;
+      iter_bit_idx++;
+      iter_bit_mask <<= 1;
+    }
+  if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
+    /* MASK is zero.  */
+    return false;
+
+  unsigned first_set_iter_bit_idx = iter_bit_idx;
+  unsigned num_set_bits = 1;
+  iter_bit_idx++;
+  iter_bit_mask <<= 1;
+
+  /* Find next unset bit in MASK.  */
+  while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+    {
+      if (!(mask & iter_bit_mask))
+	break;
+      num_set_bits++;
+      iter_bit_idx++;
+      iter_bit_mask <<= 1;
+    }
+  if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
+    {
+      *out = bit_range (first_set_iter_bit_idx, num_set_bits);
+      return true;
+    }
+
+  /* We now have the first contiguous run of set bits in MASK.
+     Fail if any other bits are set.  */
+  while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+    {
+      if (mask & iter_bit_mask)
+	return false;
+      iter_bit_idx++;
+      iter_bit_mask <<= 1;
+    }
+
+  *out = bit_range (first_set_iter_bit_idx, num_set_bits);
+  return true;
+}
+
 /* class concrete_binding : public binding_key.  */
 
 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding.  */
@@ -2448,6 +2506,132 @@ store::loop_replay_fixup (const store *other_store,
 
 namespace selftest {
 
+/* Verify that bit_range::intersects_p works as expected.  */
+
+static void
+test_bit_range_intersects_p ()
+{
+  bit_range b0 (0, 1);
+  bit_range b1 (1, 1);
+  bit_range b2 (2, 1);
+  bit_range b3 (3, 1);
+  bit_range b4 (4, 1);
+  bit_range b5 (5, 1);
+  bit_range b6 (6, 1);
+  bit_range b7 (7, 1);
+  bit_range b1_to_6 (1, 6);
+  bit_range b0_to_7 (0, 8);
+  bit_range b3_to_5 (3, 3);
+  bit_range b6_to_7 (6, 2);
+
+  /* self-intersection is true.  */
+  ASSERT_TRUE (b0.intersects_p (b0));
+  ASSERT_TRUE (b7.intersects_p (b7));
+  ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
+  ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
+
+  ASSERT_FALSE (b0.intersects_p (b1));
+  ASSERT_FALSE (b1.intersects_p (b0));
+  ASSERT_FALSE (b0.intersects_p (b7));
+  ASSERT_FALSE (b7.intersects_p (b0));
+
+  ASSERT_TRUE (b0_to_7.intersects_p (b0));
+  ASSERT_TRUE (b0_to_7.intersects_p (b7));
+  ASSERT_TRUE (b0.intersects_p (b0_to_7));
+  ASSERT_TRUE (b7.intersects_p (b0_to_7));
+
+  ASSERT_FALSE (b0.intersects_p (b1_to_6));
+  ASSERT_FALSE (b1_to_6.intersects_p (b0));
+  ASSERT_TRUE (b1.intersects_p (b1_to_6));
+  ASSERT_TRUE (b1_to_6.intersects_p (b1));
+  ASSERT_TRUE (b1_to_6.intersects_p (b6));
+  ASSERT_FALSE (b1_to_6.intersects_p (b7));
+
+  ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
+  ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
+
+  ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
+  ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
+}
+
+/* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ.  */
+
+static void
+assert_bit_range_from_mask_eq (const location &loc,
+			       unsigned HOST_WIDE_INT mask,
+			       const bit_range &expected)
+{
+  bit_range actual (0, 0);
+  bool ok = bit_range::from_mask (mask, &actual);
+  ASSERT_TRUE_AT (loc, ok);
+  ASSERT_EQ_AT (loc, actual, expected);
+}
+
+/* Assert that bit_range::from_mask (MASK) returns true, and writes
+   out EXPECTED_BIT_RANGE.  */
+
+#define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
+  SELFTEST_BEGIN_STMT							\
+  assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK,		\
+				 EXPECTED_BIT_RANGE);			\
+  SELFTEST_END_STMT
+
+/* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK.  */
+
+static void
+assert_no_bit_range_from_mask_eq (const location &loc,
+				  unsigned HOST_WIDE_INT mask)
+{
+  bit_range actual (0, 0);
+  bool ok = bit_range::from_mask (mask, &actual);
+  ASSERT_FALSE_AT (loc, ok);
+}
+
+/* Assert that bit_range::from_mask (MASK) returns false.  */
+
+#define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
+  SELFTEST_BEGIN_STMT							\
+  assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK);		\
+  SELFTEST_END_STMT
+
+/* Verify that bit_range::from_mask works as expected.  */
+
+static void
+test_bit_range_from_mask ()
+{
+  /* Should fail on zero.  */
+  ASSERT_NO_BIT_RANGE_FROM_MASK (0);
+
+  /* Verify 1-bit masks.  */
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
+
+  /* Verify N-bit masks starting at bit 0.  */
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
+
+  /* Various other tests. */
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
+  ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
+
+  /* Multiple ranges of set bits should fail.  */
+  ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
+  ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
+}
+
 /* Implementation detail of ASSERT_OVERLAP.  */
 
 static void
@@ -2546,6 +2730,8 @@ test_binding_key_overlap ()
 void
 analyzer_store_cc_tests ()
 {
+  test_bit_range_intersects_p ();
+  test_bit_range_from_mask ();
   test_binding_key_overlap ();
 }
 
diff --git a/gcc/analyzer/store.h b/gcc/analyzer/store.h
index be09b427366..7bd2824dba9 100644
--- a/gcc/analyzer/store.h
+++ b/gcc/analyzer/store.h
@@ -297,8 +297,16 @@ struct bit_range
 	    && m_size_in_bits == other.m_size_in_bits);
   }
 
+  bool intersects_p (const bit_range &other) const
+  {
+    return (get_start_bit_offset () < other.get_next_bit_offset ()
+	    && other.get_start_bit_offset () < get_next_bit_offset ());
+  }
+
   static int cmp (const bit_range &br1, const bit_range &br2);
 
+  static bool from_mask (unsigned HOST_WIDE_INT mask, bit_range *out);
+
   bit_offset_t m_start_bit_offset;
   bit_size_t m_size_in_bits;
 };
@@ -338,6 +346,8 @@ public:
   const concrete_binding *dyn_cast_concrete_binding () const FINAL OVERRIDE
   { return this; }
 
+  const bit_range &get_bit_range () const { return m_bit_range; }
+
   bit_offset_t get_start_bit_offset () const
   {
     return m_bit_range.m_start_bit_offset;
@@ -739,6 +749,14 @@ public:
   get_concrete_binding (bit_offset_t start_bit_offset,
 			bit_offset_t size_in_bits,
 			enum binding_kind kind);
+  const concrete_binding *
+  get_concrete_binding (const bit_range &bits,
+			enum binding_kind kind)
+  {
+    return get_concrete_binding (bits.get_start_bit_offset (),
+				 bits.m_size_in_bits,
+				 kind);
+  }
   const symbolic_binding *
   get_symbolic_binding (const region *region,
 			enum binding_kind kind);
diff --git a/gcc/testsuite/gcc.dg/analyzer/bitfields-1.c b/gcc/testsuite/gcc.dg/analyzer/bitfields-1.c
new file mode 100644
index 00000000000..8bbe76bdbf3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/analyzer/bitfields-1.c
@@ -0,0 +1,144 @@
+#include "analyzer-decls.h"
+
+typedef unsigned char u8;
+typedef unsigned __INT16_TYPE__ u16;
+typedef unsigned __INT32_TYPE__ u32;
+
+struct st1
+{
+  u16 nonzero_offset;
+  unsigned int f0 : 1;
+  unsigned int f1 : 1;
+  unsigned int f2 : 1;
+  unsigned int f3 : 1;
+  unsigned int f4 : 1;
+  unsigned int f5 : 1;
+  unsigned int f6 : 1;
+  unsigned int f7 : 1;
+};
+
+void test_1 (void)
+{
+  struct st1 s;
+  s.f0 = 0;
+  __analyzer_eval (s.f0 == 0); /* { dg-warning "TRUE" } */
+  s.f0 = 1;
+  __analyzer_eval (s.f0 == 1); /* { dg-warning "TRUE" } */
+
+  s.f1 = 0;
+  __analyzer_eval (s.f1 == 0); /* { dg-warning "TRUE" } */
+  s.f1 = 1;
+  __analyzer_eval (s.f1 == 1); /* { dg-warning "TRUE" } */
+
+  /* etc  */
+
+  s.f6 = 0;
+  __analyzer_eval (s.f6 == 0); /* { dg-warning "TRUE" } */
+  s.f6 = 1;
+  __analyzer_eval (s.f6 == 1); /* { dg-warning "TRUE" } */
+
+  s.f7 = 0;
+  __analyzer_eval (s.f7 == 0); /* { dg-warning "TRUE" } */
+  s.f7 = 1;
+  __analyzer_eval (s.f7 == 1); /* { dg-warning "TRUE" } */
+};
+
+void test_2 (_Bool v0, _Bool v1, _Bool v2, _Bool v3,
+	     _Bool v4, _Bool v5, _Bool v6, _Bool v7)
+{
+  struct st1 s;
+  s.f0 = v0;
+  s.f1 = v1;
+  s.f2 = v2;
+  s.f3 = v3;
+  s.f4 = v4;
+  s.f5 = v5;
+  s.f6 = v6;
+  s.f7 = v7;
+
+  __analyzer_eval (s.f0 == v0); /* { dg-warning "TRUE" } */
+  __analyzer_eval (s.f1 == v1); /* { dg-warning "TRUE" } */
+  __analyzer_eval (s.f2 == v2); /* { dg-warning "TRUE" } */
+  __analyzer_eval (s.f3 == v3); /* { dg-warning "TRUE" } */
+  __analyzer_eval (s.f4 == v4); /* { dg-warning "TRUE" } */
+  __analyzer_eval (s.f5 == v5); /* { dg-warning "TRUE" } */
+  __analyzer_eval (s.f6 == v6); /* { dg-warning "TRUE" } */
+  __analyzer_eval (s.f7 == v7); /* { dg-warning "TRUE" } */
+};
+
+struct st3
+{
+  unsigned int f01 : 2;
+  unsigned int f23 : 2;
+  unsigned int f34 : 2;
+  unsigned int f56 : 2;
+};
+
+void test_3 (void)
+{
+  struct st3 s;
+  s.f01 = 0;
+  __analyzer_eval (s.f01 == 0); /* { dg-warning "TRUE" } */
+  s.f01 = 1;
+  __analyzer_eval (s.f01 == 1); /* { dg-warning "TRUE" } */
+  s.f01 = 2;
+  __analyzer_eval (s.f01 == 2); /* { dg-warning "TRUE" } */
+  s.f01 = 3;
+  __analyzer_eval (s.f01 == 3); /* { dg-warning "TRUE" } */
+
+  /* etc  */
+
+  s.f56 = 0;
+  __analyzer_eval (s.f56 == 0); /* { dg-warning "TRUE" } */
+  s.f56 = 1;
+  __analyzer_eval (s.f56 == 1); /* { dg-warning "TRUE" } */
+  s.f56 = 2;
+  __analyzer_eval (s.f56 == 2); /* { dg-warning "TRUE" } */
+  s.f56 = 3;
+  __analyzer_eval (s.f56 == 3); /* { dg-warning "TRUE" } */
+};
+
+/* A signed bitfield.  */
+
+struct st4
+{
+  signed int f012 : 3;
+  signed int f345 : 3;
+};
+
+void test_4 (void)
+{
+  struct st4 s;
+  s.f345 = -4;
+  __analyzer_eval (s.f345 == -4); /* { dg-warning "TRUE" } */
+  s.f345 = -3;
+  __analyzer_eval (s.f345 == -3); /* { dg-warning "TRUE" } */
+  s.f345 = -2;
+  __analyzer_eval (s.f345 == -2); /* { dg-warning "TRUE" } */
+  s.f345 = -1;
+  __analyzer_eval (s.f345 == -1); /* { dg-warning "TRUE" } */
+  s.f345 = 0;
+  __analyzer_eval (s.f345 == 0); /* { dg-warning "TRUE" } */
+  s.f345 = 1;
+  __analyzer_eval (s.f345 == 1); /* { dg-warning "TRUE" } */
+  s.f345 = 2;
+  __analyzer_eval (s.f345 == 2); /* { dg-warning "TRUE" } */
+  s.f345 = 3;
+  __analyzer_eval (s.f345 == 3); /* { dg-warning "TRUE" } */
+};
+
+/* A zero bitfield to break up padding.  */
+
+struct st5
+{
+  unsigned f0 : 5;
+  unsigned :0;
+  unsigned f1 : 16;
+};
+
+void test_5 (void)
+{
+  struct st5 s;
+  s.f1 = 0xcafe;
+  __analyzer_eval (s.f1 == 0xcafe); /* { dg-warning "TRUE" } */
+}
diff --git a/gcc/testsuite/gcc.dg/analyzer/data-model-1.c b/gcc/testsuite/gcc.dg/analyzer/data-model-1.c
index c0f54637693..4a62a0e2bbc 100644
--- a/gcc/testsuite/gcc.dg/analyzer/data-model-1.c
+++ b/gcc/testsuite/gcc.dg/analyzer/data-model-1.c
@@ -934,24 +934,20 @@ void test_43 (void)
 
 struct sbits
 {
-  int b0 : 1;
-  int b123 : 3;
-  int b456 : 3;
-  int b7 : 1;
+  signed int b0 : 1;
+  signed int b123 : 3;
+  signed int b456 : 3;
+  signed int b7 : 1;
 };
 
 void test_44 (void)
 {
   struct sbits bits;
-  bits.b0 = 1;
-  __analyzer_eval (bits.b0 == 1); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
-  /* { dg-warning "FALSE" "status quo" { target *-*-* } .-1 } */
-  // TODO(xfail): ^^^^
+  bits.b0 = -1;
+  __analyzer_eval (bits.b0 == -1); /* { dg-warning "TRUE" } */
 
-  bits.b456 = 5;
-  __analyzer_eval (bits.b456 == 5); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
-  /* { dg-warning "FALSE" "status quo" { target *-*-* } .-1 } */
-  // TODO(xfail): ^^^^
+  bits.b456 = -4;
+  __analyzer_eval (bits.b456 == -4); /* { dg-warning "TRUE" } */
 };
 
 struct ubits
@@ -962,20 +958,14 @@ struct ubits
   unsigned int b7 : 1;
 };
 
-/* FIXME: this requires BIT_FIELD_REF to work.  */
-
 void test_45 (void)
 {
   struct ubits bits;
   bits.b0 = 1;
-  __analyzer_eval (bits.b0 == 1); /* { dg-warning "TRUE" "desired, PR99212" { xfail { ! { cris-*-* } } } } */
-  /* { dg-warning "UNKNOWN" "status quo, PR99212" { target { *-*-* } xfail { cris-*-* } } .-1 } */
-  // TODO(xfail): ^^^^
+  __analyzer_eval (bits.b0 == 1); /* { dg-warning "TRUE" } */
 
   bits.b456 = 5;
-  __analyzer_eval (bits.b456 == 5); /* { dg-warning "TRUE" "desired" { xfail *-*-* } } */
-  /* { dg-warning "UNKNOWN" "status quo" { target *-*-* } .-1 } */
-  // TODO(xfail): ^^^^
+  __analyzer_eval (bits.b456 == 5); /* { dg-warning "TRUE" } */
 };
 
 extern const char *char_ptr;


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-06-08 18:46 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-08 18:46 [gcc r12-1303] analyzer: bitfield fixes [PR99212] David Malcolm

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).