public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Jason Merrill <jason@redhat.com>
To: Patrick Palka <ppalka@redhat.com>
Cc: gcc-patches@gcc.gnu.org
Subject: Re: [PATCH 2/2] c++: speculative constexpr and is_constant_evaluated [PR108243]
Date: Sun, 5 Feb 2023 12:11:28 -0800	[thread overview]
Message-ID: <1ba53fb8-f688-b680-75f9-a158fa0d679b@redhat.com> (raw)
In-Reply-To: <8ca008ee-d56c-52d9-4f3f-3f738e0e8af6@idea>

On 2/3/23 15:51, Patrick Palka wrote:
> On Mon, 30 Jan 2023, Jason Merrill wrote:
> 
>> On 1/27/23 17:02, Patrick Palka wrote:
>>> This PR illustrates that __builtin_is_constant_evaluated currently acts
>>> as an optimization barrier for our speculative constexpr evaluation,
>>> since we don't want to prematurely fold the builtin to false if the
>>> expression in question would be later manifestly constant evaluated (in
>>> which case it must be folded to true).
>>>
>>> This patch fixes this by permitting __builtin_is_constant_evaluated
>>> to get folded as false during cp_fold_function, since at that point
>>> we're sure we're doing manifestly constant evaluation.  To that end
>>> we add a flags parameter to cp_fold that controls what mce_value the
>>> CALL_EXPR case passes to maybe_constant_value.
>>>
>>> bootstrapped and rgetsted no x86_64-pc-linux-gnu, does this look OK for
>>> trunk?
>>>
>>> 	PR c++/108243
>>>
>>> gcc/cp/ChangeLog:
>>>
>>> 	* cp-gimplify.cc (enum fold_flags): Define.
>>> 	(cp_fold_data::genericize): Replace this data member with ...
>>> 	(cp_fold_data::fold_flags): ... this.
>>> 	(cp_fold_r): Adjust cp_fold_data use and cp_fold_calls.
>>> 	(cp_fold_function): Likewise.
>>> 	(cp_fold_maybe_rvalue): Likewise.
>>> 	(cp_fully_fold_init): Likewise.
>>> 	(cp_fold): Add fold_flags parameter.  Don't cache if flags
>>> 	isn't empty.
>>> 	<case CALL_EXPR>: Pass mce_false to maybe_constant_value
>>> 	if if ff_genericize is set.
>>>
>>> gcc/testsuite/ChangeLog:
>>>
>>> 	* g++.dg/opt/pr108243.C: New test.
>>> ---
>>>    gcc/cp/cp-gimplify.cc               | 76 ++++++++++++++++++-----------
>>>    gcc/testsuite/g++.dg/opt/pr108243.C | 29 +++++++++++
>>>    2 files changed, 76 insertions(+), 29 deletions(-)
>>>    create mode 100644 gcc/testsuite/g++.dg/opt/pr108243.C
>>>
>>> diff --git a/gcc/cp/cp-gimplify.cc b/gcc/cp/cp-gimplify.cc
>>> index a35cedd05cc..d023a63768f 100644
>>> --- a/gcc/cp/cp-gimplify.cc
>>> +++ b/gcc/cp/cp-gimplify.cc
>>> @@ -43,12 +43,20 @@ along with GCC; see the file COPYING3.  If not see
>>>    #include "omp-general.h"
>>>    #include "opts.h"
>>>    +/* Flags for cp_fold and cp_fold_r.  */
>>> +
>>> +enum fold_flags {
>>> +  ff_none = 0,
>>> +  /* Whether we're being called from cp_fold_function.  */
>>> +  ff_genericize = 1 << 0,
>>> +};
>>> +
>>>    /* Forward declarations.  */
>>>      static tree cp_genericize_r (tree *, int *, void *);
>>>    static tree cp_fold_r (tree *, int *, void *);
>>>    static void cp_genericize_tree (tree*, bool);
>>> -static tree cp_fold (tree);
>>> +static tree cp_fold (tree, fold_flags);
>>>      /* Genericize a TRY_BLOCK.  */
>>>    @@ -996,9 +1004,8 @@ struct cp_genericize_data
>>>    struct cp_fold_data
>>>    {
>>>      hash_set<tree> pset;
>>> -  bool genericize; // called from cp_fold_function?
>>> -
>>> -  cp_fold_data (bool g): genericize (g) {}
>>> +  fold_flags flags;
>>> +  cp_fold_data (fold_flags flags): flags (flags) {}
>>>    };
>>>      static tree
>>> @@ -1039,7 +1046,7 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void
>>> *data_)
>>>          break;
>>>        }
>>>    -  *stmt_p = stmt = cp_fold (*stmt_p);
>>> +  *stmt_p = stmt = cp_fold (*stmt_p, data->flags);
>>>        if (data->pset.add (stmt))
>>>        {
>>> @@ -1119,12 +1126,12 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void
>>> *data_)
>>>    	 here rather than in cp_genericize to avoid problems with the
>>> invisible
>>>    	 reference transition.  */
>>>        case INIT_EXPR:
>>> -      if (data->genericize)
>>> +      if (data->flags & ff_genericize)
>>>    	cp_genericize_init_expr (stmt_p);
>>>          break;
>>>          case TARGET_EXPR:
>>> -      if (data->genericize)
>>> +      if (data->flags & ff_genericize)
>>>    	cp_genericize_target_expr (stmt_p);
>>>            /* Folding might replace e.g. a COND_EXPR with a TARGET_EXPR; in
>>> @@ -1157,7 +1164,7 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void
>>> *data_)
>>>    void
>>>    cp_fold_function (tree fndecl)
>>>    {
>>> -  cp_fold_data data (/*genericize*/true);
>>> +  cp_fold_data data (ff_genericize);
>>>      cp_walk_tree (&DECL_SAVED_TREE (fndecl), cp_fold_r, &data, NULL);
>>>    }
>>>    @@ -2375,7 +2382,7 @@ cp_fold_maybe_rvalue (tree x, bool rval)
>>>    {
>>>      while (true)
>>>        {
>>> -      x = cp_fold (x);
>>> +      x = cp_fold (x, ff_none);
>>>          if (rval)
>>>    	x = mark_rvalue_use (x);
>>>          if (rval && DECL_P (x)
>>> @@ -2434,7 +2441,7 @@ cp_fully_fold_init (tree x)
>>>      if (processing_template_decl)
>>>        return x;
>>>      x = cp_fully_fold (x);
>>> -  cp_fold_data data (/*genericize*/false);
>>> +  cp_fold_data data (ff_none);
>>>      cp_walk_tree (&x, cp_fold_r, &data, NULL);
>>>      return x;
>>>    }
>>> @@ -2469,7 +2476,7 @@ clear_fold_cache (void)
>>>        Function returns X or its folded variant.  */
>>>      static tree
>>> -cp_fold (tree x)
>>> +cp_fold (tree x, fold_flags flags)
>>>    {
>>>      tree op0, op1, op2, op3;
>>>      tree org_x = x, r = NULL_TREE;
>>> @@ -2490,8 +2497,11 @@ cp_fold (tree x)
>>>      if (fold_cache == NULL)
>>>        fold_cache = hash_map<tree, tree>::create_ggc (101);
>>>    -  if (tree *cached = fold_cache->get (x))
>>> -    return *cached;
>>> +  bool cache_p = (flags == ff_none);
>>> +
>>> +  if (cache_p)
>>> +    if (tree *cached = fold_cache->get (x))
>>> +      return *cached;
>>>        uid_sensitive_constexpr_evaluation_checker c;
>>>    @@ -2526,7 +2536,7 @@ cp_fold (tree x)
>>>    	     Don't create a new tree if op0 != TREE_OPERAND (x, 0), the
>>>    	     folding of the operand should be in the caches and if in
>>> cp_fold_r
>>>    	     it will modify it in place.  */
>>> -	  op0 = cp_fold (TREE_OPERAND (x, 0));
>>> +	  op0 = cp_fold (TREE_OPERAND (x, 0), flags);
>>>    	  if (op0 == error_mark_node)
>>>    	    x = error_mark_node;
>>>    	  break;
>>> @@ -2571,7 +2581,7 @@ cp_fold (tree x)
>>>    	{
>>>    	  tree p = maybe_undo_parenthesized_ref (x);
>>>    	  if (p != x)
>>> -	    return cp_fold (p);
>>> +	    return cp_fold (p, flags);
>>>    	}
>>>          goto unary;
>>>    @@ -2763,8 +2773,8 @@ cp_fold (tree x)
>>>        case COND_EXPR:
>>>          loc = EXPR_LOCATION (x);
>>>          op0 = cp_fold_rvalue (TREE_OPERAND (x, 0));
>>> -      op1 = cp_fold (TREE_OPERAND (x, 1));
>>> -      op2 = cp_fold (TREE_OPERAND (x, 2));
>>> +      op1 = cp_fold (TREE_OPERAND (x, 1), flags);
>>> +      op2 = cp_fold (TREE_OPERAND (x, 2), flags);
>>>            if (TREE_CODE (TREE_TYPE (x)) == BOOLEAN_TYPE)
>>>    	{
>>> @@ -2854,7 +2864,7 @@ cp_fold (tree x)
>>>    	      {
>>>    		if (!same_type_p (TREE_TYPE (x), TREE_TYPE (r)))
>>>    		  r = build_nop (TREE_TYPE (x), r);
>>> -		x = cp_fold (r);
>>> +		x = cp_fold (r, flags);
>>>    		break;
>>>    	      }
>>>    	  }
>>> @@ -2908,7 +2918,7 @@ cp_fold (tree x)
>>>    	int m = call_expr_nargs (x);
>>>    	for (int i = 0; i < m; i++)
>>>    	  {
>>> -	    r = cp_fold (CALL_EXPR_ARG (x, i));
>>> +	    r = cp_fold (CALL_EXPR_ARG (x, i), flags);
>>>    	    if (r != CALL_EXPR_ARG (x, i))
>>>    	      {
>>>    		if (r == error_mark_node)
>>> @@ -2931,7 +2941,7 @@ cp_fold (tree x)
>>>      	if (TREE_CODE (r) != CALL_EXPR)
>>>    	  {
>>> -	    x = cp_fold (r);
>>> +	    x = cp_fold (r, flags);
>>>    	    break;
>>>    	  }
>>>    @@ -2944,7 +2954,15 @@ cp_fold (tree x)
>>>    	   constant, but the call followed by an INDIRECT_REF is.  */
>>>    	if (callee && DECL_DECLARED_CONSTEXPR_P (callee)
>>>    	    && !flag_no_inline)
>>> -	  r = maybe_constant_value (x);
>>> +	  {
>>> +	    mce_value manifestly_const_eval = mce_unknown;
>>> +	    if (flags & ff_genericize)
>>> +	      /* At genericization time it's safe to fold
>>> +		 __builtin_is_constant_evaluated to false.  */
>>> +	      manifestly_const_eval = mce_false;
>>> +	    r = maybe_constant_value (x, /*decl=*/NULL_TREE,
>>> +				      manifestly_const_eval);
>>> +	  }
>>>    	optimize = sv;
>>>              if (TREE_CODE (r) != CALL_EXPR)
>>> @@ -2971,7 +2989,7 @@ cp_fold (tree x)
>>>    	vec<constructor_elt, va_gc> *nelts = NULL;
>>>    	FOR_EACH_VEC_SAFE_ELT (elts, i, p)
>>>    	  {
>>> -	    tree op = cp_fold (p->value);
>>> +	    tree op = cp_fold (p->value, flags);
>>>    	    if (op != p->value)
>>>    	      {
>>>    		if (op == error_mark_node)
>>> @@ -3002,7 +3020,7 @@ cp_fold (tree x)
>>>      	for (int i = 0; i < n; i++)
>>>    	  {
>>> -	    tree op = cp_fold (TREE_VEC_ELT (x, i));
>>> +	    tree op = cp_fold (TREE_VEC_ELT (x, i), flags);
>>>    	    if (op != TREE_VEC_ELT (x, i))
>>>    	      {
>>>    		if (!changed)
>>> @@ -3019,10 +3037,10 @@ cp_fold (tree x)
>>>        case ARRAY_RANGE_REF:
>>>            loc = EXPR_LOCATION (x);
>>> -      op0 = cp_fold (TREE_OPERAND (x, 0));
>>> -      op1 = cp_fold (TREE_OPERAND (x, 1));
>>> -      op2 = cp_fold (TREE_OPERAND (x, 2));
>>> -      op3 = cp_fold (TREE_OPERAND (x, 3));
>>> +      op0 = cp_fold (TREE_OPERAND (x, 0), flags);
>>> +      op1 = cp_fold (TREE_OPERAND (x, 1), flags);
>>> +      op2 = cp_fold (TREE_OPERAND (x, 2), flags);
>>> +      op3 = cp_fold (TREE_OPERAND (x, 3), flags);
>>>            if (op0 != TREE_OPERAND (x, 0)
>>>    	  || op1 != TREE_OPERAND (x, 1)
>>> @@ -3050,7 +3068,7 @@ cp_fold (tree x)
>>>          /* A SAVE_EXPR might contain e.g. (0 * i) + (0 * j), which, after
>>>    	 folding, evaluates to an invariant.  In that case no need to wrap
>>>    	 this folded tree with a SAVE_EXPR.  */
>>> -      r = cp_fold (TREE_OPERAND (x, 0));
>>> +      r = cp_fold (TREE_OPERAND (x, 0), flags);
>>>          if (tree_invariant_p (r))
>>>    	x = r;
>>>          break;
>>> @@ -3069,7 +3087,7 @@ cp_fold (tree x)
>>>          copy_warning (x, org_x);
>>>        }
>>>    -  if (!c.evaluation_restricted_p ())
>>> +  if (cache_p && !c.evaluation_restricted_p ())
>>>        {
>>>          fold_cache->put (org_x, x);
>>>          /* Prevent that we try to fold an already folded result again.  */
>>> diff --git a/gcc/testsuite/g++.dg/opt/pr108243.C
>>> b/gcc/testsuite/g++.dg/opt/pr108243.C
>>> new file mode 100644
>>> index 00000000000..4c45dbba13c
>>> --- /dev/null
>>> +++ b/gcc/testsuite/g++.dg/opt/pr108243.C
>>> @@ -0,0 +1,29 @@
>>> +// PR c++/108243
>>> +// { dg-do compile { target c++11 } }
>>> +// { dg-additional-options "-O -fdump-tree-original" }
>>> +
>>> +constexpr int foo() {
>>> +  return __builtin_is_constant_evaluated() + 1;
>>> +}
>>> +
>>> +#if __cpp_if_consteval
>>> +constexpr int bar() {
>>> +  if consteval {
>>> +    return 5;
>>> +  } else {
>>> +    return 4;
>>> +  }
>>> +}
>>> +#endif
>>> +
>>> +int p, q;
>>> +
>>> +int main() {
>>> +  p = foo();
>>> +#if __cpp_if_consteval
>>> +  q = bar();
>>> +#endif
>>> +}
>>> +
>>> +// { dg-final { scan-tree-dump-not "= foo" "original" } }
>>> +// { dg-final { scan-tree-dump-not "= bar" "original" } }
>>
>> Let's also test a static initializer that can't be fully constant-evaluated.
> 
> D'oh, doing so revealed that cp_fold_function doesn't reach static
> initializers; that's taken care of by cp_fully_fold_init.  So it seems
> we need to make cp_fold when called from the latter entry point to also
> assume m_c_e is false.  We can't re-use ff_genericize here because that
> flag has additional effects in cp_fold_r, so it seems we need another
> flag that that only affects the manifestly constant-eval stuff; I called
> it ff_mce_false.  How does the following look?
> 
> -- >8 --
> 
> Subject: [PATCH 2/2] c++: speculative constexpr and is_constant_evaluated
>   [PR108243]
> 
> This PR illustrates that __builtin_is_constant_evaluated currently acts
> as an optimization barrier for our speculative constexpr evaluation,
> since we don't want to prematurely fold the builtin to false if the
> expression in question would be later manifestly constant evaluated (in
> which case it must be folded to true).
> 
> This patch fixes this by permitting __builtin_is_constant_evaluated
> to get folded as false during cp_fold_function and cp_fully_fold_init,
> since at these points we're sure we're done with manifestly constant
> evaluation.  To that end we add a flags parameter to cp_fold that
> controls whether we pass mce_false or mce_unknown to maybe_constant_value
> when folding a CALL_EXPR.
> 
> 	PR c++/108243
> 	PR c++/97553
> 
> gcc/cp/ChangeLog:
> 
> 	* cp-gimplify.cc (enum fold_flags): Define.
> 	(cp_fold_data::genericize): Replace this data member with ...
> 	(cp_fold_data::fold_flags): ... this.
> 	(cp_fold_r): Adjust use of cp_fold_data and calls to cp_fold.
> 	(cp_fold_function): Likewise.
> 	(cp_fold_maybe_rvalue): Likewise.
> 	(cp_fully_fold_init): Likewise.
> 	(cp_fold): Add fold_flags parameter.  Don't cache if flags
> 	isn't empty.
> 	<case CALL_EXPR>: If ff_genericize is set, fold
> 	__builtin_is_constant_evaluated to false and pass mce_false to
> 	maybe_constant_value.
> 
> gcc/testsuite/ChangeLog:
> 
> 	* g++.dg/opt/is_constant_evaluated1.C: New test.
> 	* g++.dg/opt/is_constant_evaluated2.C: New test.
> ---
>   gcc/cp/cp-gimplify.cc                         | 88 ++++++++++++-------
>   .../g++.dg/opt/is_constant_evaluated1.C       | 14 +++
>   .../g++.dg/opt/is_constant_evaluated2.C       | 32 +++++++
>   3 files changed, 104 insertions(+), 30 deletions(-)
>   create mode 100644 gcc/testsuite/g++.dg/opt/is_constant_evaluated1.C
>   create mode 100644 gcc/testsuite/g++.dg/opt/is_constant_evaluated2.C
> 
> diff --git a/gcc/cp/cp-gimplify.cc b/gcc/cp/cp-gimplify.cc
> index 9929d29981a..590ed787997 100644
> --- a/gcc/cp/cp-gimplify.cc
> +++ b/gcc/cp/cp-gimplify.cc
> @@ -43,12 +43,26 @@ along with GCC; see the file COPYING3.  If not see
>   #include "omp-general.h"
>   #include "opts.h"
>   
> +/* Flags for cp_fold and cp_fold_r.  */
> +
> +enum fold_flags {
> +  ff_none = 0,
> +  /* Whether we're being called from cp_fold_function.  */
> +  ff_genericize = 1 << 0,
> +  /* Whether we're folding late enough that we could assume
> +     we're definitely not in a manifestly constant-evaluated
> +     context.  */

It's not necessarily a matter of late enough; we could fold sooner and 
still know that, as in cp_fully_fold_init.  We could do the same at 
other full-expression points, but we don't because we want to delay 
folding as much as possible.  So let's say "folding at a point where we 
know we're..."

> +  ff_mce_false = 1 << 1,
> +};
> +
> +using fold_flags_t = int;
> +
>   /* Forward declarations.  */
>   
>   static tree cp_genericize_r (tree *, int *, void *);
>   static tree cp_fold_r (tree *, int *, void *);
>   static void cp_genericize_tree (tree*, bool);
> -static tree cp_fold (tree);
> +static tree cp_fold (tree, fold_flags_t);
>   
>   /* Genericize a TRY_BLOCK.  */
>   
> @@ -1012,9 +1026,8 @@ struct cp_genericize_data
>   struct cp_fold_data
>   {
>     hash_set<tree> pset;
> -  bool genericize; // called from cp_fold_function?
> -
> -  cp_fold_data (bool g): genericize (g) {}
> +  fold_flags_t flags;
> +  cp_fold_data (fold_flags_t flags): flags (flags) {}
>   };
>   
>   static tree
> @@ -1055,7 +1068,7 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void *data_)
>         break;
>       }
>   
> -  *stmt_p = stmt = cp_fold (*stmt_p);
> +  *stmt_p = stmt = cp_fold (*stmt_p, data->flags);
>   
>     if (data->pset.add (stmt))
>       {
> @@ -1135,12 +1148,12 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void *data_)
>   	 here rather than in cp_genericize to avoid problems with the invisible
>   	 reference transition.  */
>       case INIT_EXPR:
> -      if (data->genericize)
> +      if (data->flags & ff_genericize)
>   	cp_genericize_init_expr (stmt_p);
>         break;
>   
>       case TARGET_EXPR:
> -      if (data->genericize)
> +      if (data->flags & ff_genericize)
>   	cp_genericize_target_expr (stmt_p);
>   
>         /* Folding might replace e.g. a COND_EXPR with a TARGET_EXPR; in
> @@ -1173,7 +1186,7 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void *data_)
>   void
>   cp_fold_function (tree fndecl)
>   {
> -  cp_fold_data data (/*genericize*/true);
> +  cp_fold_data data (ff_genericize | ff_mce_false);

Here would be a good place for a comment about passing mce_false because 
all manifestly-constant-evaluated expressions will have been 
constant-evaluated already if possible.

>     cp_walk_tree (&DECL_SAVED_TREE (fndecl), cp_fold_r, &data, NULL);
>   }
>   
> @@ -2391,7 +2404,7 @@ cp_fold_maybe_rvalue (tree x, bool rval)
>   {
>     while (true)
>       {
> -      x = cp_fold (x);
> +      x = cp_fold (x, ff_none);
>         if (rval)
>   	x = mark_rvalue_use (x);
>         if (rval && DECL_P (x)
> @@ -2450,7 +2463,7 @@ cp_fully_fold_init (tree x)
>     if (processing_template_decl)
>       return x;
>     x = cp_fully_fold (x);
> -  cp_fold_data data (/*genericize*/false);
> +  cp_fold_data data (ff_mce_false);
>     cp_walk_tree (&x, cp_fold_r, &data, NULL);
>     return x;
>   }
> @@ -2485,7 +2498,7 @@ clear_fold_cache (void)
>       Function returns X or its folded variant.  */
>   
>   static tree
> -cp_fold (tree x)
> +cp_fold (tree x, fold_flags_t flags)
>   {
>     tree op0, op1, op2, op3;
>     tree org_x = x, r = NULL_TREE;
> @@ -2506,8 +2519,11 @@ cp_fold (tree x)
>     if (fold_cache == NULL)
>       fold_cache = hash_map<tree, tree>::create_ggc (101);
>   
> -  if (tree *cached = fold_cache->get (x))
> -    return *cached;
> +  bool cache_p = (flags == ff_none);
> +
> +  if (cache_p)
> +    if (tree *cached = fold_cache->get (x))
> +      return *cached;
>   
>     uid_sensitive_constexpr_evaluation_checker c;
>   
> @@ -2542,7 +2558,7 @@ cp_fold (tree x)
>   	     Don't create a new tree if op0 != TREE_OPERAND (x, 0), the
>   	     folding of the operand should be in the caches and if in cp_fold_r
>   	     it will modify it in place.  */
> -	  op0 = cp_fold (TREE_OPERAND (x, 0));
> +	  op0 = cp_fold (TREE_OPERAND (x, 0), flags);
>   	  if (op0 == error_mark_node)
>   	    x = error_mark_node;
>   	  break;
> @@ -2587,7 +2603,7 @@ cp_fold (tree x)
>   	{
>   	  tree p = maybe_undo_parenthesized_ref (x);
>   	  if (p != x)
> -	    return cp_fold (p);
> +	    return cp_fold (p, flags);
>   	}
>         goto unary;
>   
> @@ -2779,8 +2795,8 @@ cp_fold (tree x)
>       case COND_EXPR:
>         loc = EXPR_LOCATION (x);
>         op0 = cp_fold_rvalue (TREE_OPERAND (x, 0));
> -      op1 = cp_fold (TREE_OPERAND (x, 1));
> -      op2 = cp_fold (TREE_OPERAND (x, 2));
> +      op1 = cp_fold (TREE_OPERAND (x, 1), flags);
> +      op2 = cp_fold (TREE_OPERAND (x, 2), flags);
>   
>         if (TREE_CODE (TREE_TYPE (x)) == BOOLEAN_TYPE)
>   	{
> @@ -2870,7 +2886,7 @@ cp_fold (tree x)
>   	      {
>   		if (!same_type_p (TREE_TYPE (x), TREE_TYPE (r)))
>   		  r = build_nop (TREE_TYPE (x), r);
> -		x = cp_fold (r);
> +		x = cp_fold (r, flags);
>   		break;
>   	      }
>   	  }
> @@ -2890,8 +2906,12 @@ cp_fold (tree x)
>   	  {
>   	    switch (DECL_FE_FUNCTION_CODE (callee))
>   	      {
> -		/* Defer folding __builtin_is_constant_evaluated.  */
>   	      case CP_BUILT_IN_IS_CONSTANT_EVALUATED:
> +		/* Defer folding __builtin_is_constant_evaluated unless
> +		   we can assume this isn't a manifestly constant-evaluated

s/can assume/know/

OK with those comment changes.

> +		   context.  */
> +		if (flags & ff_mce_false)
> +		  x = boolean_false_node;
>   		break;
>   	      case CP_BUILT_IN_SOURCE_LOCATION:
>   		x = fold_builtin_source_location (x);
> @@ -2924,7 +2944,7 @@ cp_fold (tree x)
>   	int m = call_expr_nargs (x);
>   	for (int i = 0; i < m; i++)
>   	  {
> -	    r = cp_fold (CALL_EXPR_ARG (x, i));
> +	    r = cp_fold (CALL_EXPR_ARG (x, i), flags);
>   	    if (r != CALL_EXPR_ARG (x, i))
>   	      {
>   		if (r == error_mark_node)
> @@ -2947,7 +2967,7 @@ cp_fold (tree x)
>   
>   	if (TREE_CODE (r) != CALL_EXPR)
>   	  {
> -	    x = cp_fold (r);
> +	    x = cp_fold (r, flags);
>   	    break;
>   	  }
>   
> @@ -2960,7 +2980,15 @@ cp_fold (tree x)
>   	   constant, but the call followed by an INDIRECT_REF is.  */
>   	if (callee && DECL_DECLARED_CONSTEXPR_P (callee)
>   	    && !flag_no_inline)
> -	  r = maybe_constant_value (x);
> +	  {
> +	    mce_value manifestly_const_eval = mce_unknown;
> +	    if (flags & ff_mce_false)
> +	      /* Allow folding __builtin_is_constant_evaluated to false during
> +		 constexpr evaluation of this call.  */
> +	      manifestly_const_eval = mce_false;
> +	    r = maybe_constant_value (x, /*decl=*/NULL_TREE,
> +				      manifestly_const_eval);
> +	  }
>   	optimize = sv;
>   
>           if (TREE_CODE (r) != CALL_EXPR)
> @@ -2987,7 +3015,7 @@ cp_fold (tree x)
>   	vec<constructor_elt, va_gc> *nelts = NULL;
>   	FOR_EACH_VEC_SAFE_ELT (elts, i, p)
>   	  {
> -	    tree op = cp_fold (p->value);
> +	    tree op = cp_fold (p->value, flags);
>   	    if (op != p->value)
>   	      {
>   		if (op == error_mark_node)
> @@ -3018,7 +3046,7 @@ cp_fold (tree x)
>   
>   	for (int i = 0; i < n; i++)
>   	  {
> -	    tree op = cp_fold (TREE_VEC_ELT (x, i));
> +	    tree op = cp_fold (TREE_VEC_ELT (x, i), flags);
>   	    if (op != TREE_VEC_ELT (x, i))
>   	      {
>   		if (!changed)
> @@ -3035,10 +3063,10 @@ cp_fold (tree x)
>       case ARRAY_RANGE_REF:
>   
>         loc = EXPR_LOCATION (x);
> -      op0 = cp_fold (TREE_OPERAND (x, 0));
> -      op1 = cp_fold (TREE_OPERAND (x, 1));
> -      op2 = cp_fold (TREE_OPERAND (x, 2));
> -      op3 = cp_fold (TREE_OPERAND (x, 3));
> +      op0 = cp_fold (TREE_OPERAND (x, 0), flags);
> +      op1 = cp_fold (TREE_OPERAND (x, 1), flags);
> +      op2 = cp_fold (TREE_OPERAND (x, 2), flags);
> +      op3 = cp_fold (TREE_OPERAND (x, 3), flags);
>   
>         if (op0 != TREE_OPERAND (x, 0)
>   	  || op1 != TREE_OPERAND (x, 1)
> @@ -3066,7 +3094,7 @@ cp_fold (tree x)
>         /* A SAVE_EXPR might contain e.g. (0 * i) + (0 * j), which, after
>   	 folding, evaluates to an invariant.  In that case no need to wrap
>   	 this folded tree with a SAVE_EXPR.  */
> -      r = cp_fold (TREE_OPERAND (x, 0));
> +      r = cp_fold (TREE_OPERAND (x, 0), flags);
>         if (tree_invariant_p (r))
>   	x = r;
>         break;
> @@ -3085,7 +3113,7 @@ cp_fold (tree x)
>         copy_warning (x, org_x);
>       }
>   
> -  if (!c.evaluation_restricted_p ())
> +  if (cache_p && !c.evaluation_restricted_p ())
>       {
>         fold_cache->put (org_x, x);
>         /* Prevent that we try to fold an already folded result again.  */
> diff --git a/gcc/testsuite/g++.dg/opt/is_constant_evaluated1.C b/gcc/testsuite/g++.dg/opt/is_constant_evaluated1.C
> new file mode 100644
> index 00000000000..ee05cbab785
> --- /dev/null
> +++ b/gcc/testsuite/g++.dg/opt/is_constant_evaluated1.C
> @@ -0,0 +1,14 @@
> +// PR c++/108243
> +// { dg-do compile { target c++11 } }
> +// { dg-additional-options "-O -fdump-tree-original" }
> +
> +struct A {
> +  constexpr A(int n, int m) : n(n), m(m) { }
> +  int n, m;
> +};
> +
> +void f(int n) {
> +  static A a = {n, __builtin_is_constant_evaluated()};
> +}
> +
> +// { dg-final { scan-tree-dump-not "is_constant_evaluated" "original" } }
> diff --git a/gcc/testsuite/g++.dg/opt/is_constant_evaluated2.C b/gcc/testsuite/g++.dg/opt/is_constant_evaluated2.C
> new file mode 100644
> index 00000000000..ed964e20a7a
> --- /dev/null
> +++ b/gcc/testsuite/g++.dg/opt/is_constant_evaluated2.C
> @@ -0,0 +1,32 @@
> +// PR c++/97553
> +// { dg-do compile { target c++11 } }
> +// { dg-additional-options "-O -fdump-tree-original" }
> +
> +constexpr int foo() {
> +  return __builtin_is_constant_evaluated() + 1;
> +}
> +
> +#if __cpp_if_consteval
> +constexpr int bar() {
> +  if consteval {
> +    return 5;
> +  } else {
> +    return 4;
> +  }
> +}
> +#endif
> +
> +int p, q;
> +
> +int main() {
> +  p = foo();
> +#if __cpp_if_consteval
> +  q = bar();
> +#endif
> +}
> +
> +// { dg-final { scan-tree-dump "p = 1" "original" } }
> +// { dg-final { scan-tree-dump-not "= foo" "original" } }
> +
> +// { dg-final { scan-tree-dump "q = 4" "original" { target c++23 } } }
> +// { dg-final { scan-tree-dump-not "= bar" "original" { target c++23 } } }


  parent reply	other threads:[~2023-02-05 20:11 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-27 22:02 [PATCH 1/2] c++: make manifestly_const_eval tri-state Patrick Palka
2023-01-27 22:02 ` [PATCH 2/2] c++: speculative constexpr and is_constant_evaluated [PR108243] Patrick Palka
2023-01-27 22:05   ` Patrick Palka
2023-01-30 20:05   ` Jason Merrill
2023-02-03 20:51     ` Patrick Palka
2023-02-03 20:57       ` Patrick Palka
2023-02-05 20:11       ` Jason Merrill [this message]
2023-02-09 17:36         ` Patrick Palka
2023-02-09 23:36           ` Jason Merrill
2023-02-10  1:32             ` Patrick Palka
2023-02-10 14:48               ` Patrick Palka
2023-02-10 16:51                 ` Patrick Palka
2023-02-14 23:02                   ` Jason Merrill
2023-01-30 20:02 ` [PATCH 1/2] c++: make manifestly_const_eval tri-state Jason Merrill
2023-02-03 21:21   ` Patrick Palka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1ba53fb8-f688-b680-75f9-a158fa0d679b@redhat.com \
    --to=jason@redhat.com \
    --cc=gcc-patches@gcc.gnu.org \
    --cc=ppalka@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).