From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (qmail 29127 invoked by alias); 4 Nov 2011 10:37:28 -0000 Received: (qmail 27656 invoked by uid 22791); 4 Nov 2011 10:37:19 -0000 X-SWARE-Spam-Status: No, hits=-2.2 required=5.0 tests=AWL,BAYES_00,DKIM_SIGNED,DKIM_VALID,DKIM_VALID_AU,FREEMAIL_FROM,RCVD_IN_DNSWL_LOW,TW_FN,TW_TM X-Spam-Check-By: sourceware.org Received: from mail-qy0-f182.google.com (HELO mail-qy0-f182.google.com) (209.85.216.182) by sourceware.org (qpsmtpd/0.43rc1) with ESMTP; Fri, 04 Nov 2011 10:36:53 +0000 Received: by qyk9 with SMTP id 9so1588277qyk.20 for ; Fri, 04 Nov 2011 03:36:52 -0700 (PDT) MIME-Version: 1.0 Received: by 10.182.74.37 with SMTP id q5mr714022obv.32.1320403012100; Fri, 04 Nov 2011 03:36:52 -0700 (PDT) Received: by 10.182.17.232 with HTTP; Fri, 4 Nov 2011 03:36:51 -0700 (PDT) In-Reply-To: <4EB2EACC.8050307@redhat.com> References: <4EB2EACC.8050307@redhat.com> Date: Fri, 04 Nov 2011 10:44:00 -0000 Message-ID: Subject: Re: [patch] 19/n: trans-mem: compiler tree/gimple stuff From: Richard Guenther To: Aldy Hernandez Cc: gcc-patches Content-Type: text/plain; charset=ISO-8859-1 Content-Transfer-Encoding: quoted-printable X-IsSubscribed: yes Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org X-SW-Source: 2011-11/txt/msg00521.txt.bz2 On Thu, Nov 3, 2011 at 8:26 PM, Aldy Hernandez wrote: > These are misc tree and gimple patches, which I consider front-ish-end > changes. > > Index: gcc/tree.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/tree.c =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/tree.c =A0(.../branches/transactional-memory) =A0 =A0 (revision 1= 80773) > @@ -9594,6 +9594,9 @@ build_common_builtin_nodes (void) > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0in= teger_type_node, NULL_TREE); > =A0 local_define_builtin ("__builtin_eh_pointer", ftype, BUILT_IN_EH_POIN= TER, > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0"__builtin_eh_pointer", EC= F_PURE | ECF_NOTHROW | > ECF_LEAF); > + =A0if (flag_tm) > + =A0 =A0apply_tm_attr (builtin_decl_explicit (BUILT_IN_EH_POINTER), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0get_identifier ("transaction_pure")); I think this should use a new ECF_TM_PURE flag, unconditionally set with handling in the functions that handle/return ECF flags so that transitioning this to a tree node flag instead of an attribute is easier. > =A0 tmp =3D lang_hooks.types.type_for_mode (targetm.eh_return_filter_mode= (), > 0); > =A0 ftype =3D build_function_type_list (tmp, integer_type_node, NULL_TREE= ); > Index: gcc/tree.h > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/tree.h =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/tree.h =A0(.../branches/transactional-memory) =A0 =A0 (revision 1= 80773) > @@ -539,6 +539,9 @@ struct GTY(()) tree_common { > =A0 =A0 =A0 =A0ENUM_IS_SCOPED in > =A0 =A0 =A0 =A0 =A0 ENUMERAL_TYPE > > + =A0 =A0 =A0 TRANSACTION_EXPR_OUTER in > + =A0 =A0 =A0 =A0 =A0 TRANSACTION_EXPR > + > =A0 =A0public_flag: > > =A0 =A0 =A0 =A0TREE_OVERFLOW in > @@ -566,6 +569,9 @@ struct GTY(()) tree_common { > =A0 =A0 =A0 =A0OMP_CLAUSE_PRIVATE_DEBUG in > =A0 =A0 =A0 =A0 =A0 =A0OMP_CLAUSE_PRIVATE > > + =A0 =A0 =A0 TRANSACTION_EXPR_RELAXED in > + =A0 =A0 =A0 =A0 =A0 TRANSACTION_EXPR > + > =A0 =A0private_flag: > > =A0 =A0 =A0 =A0TREE_PRIVATE in > @@ -1808,6 +1814,14 @@ extern void protected_set_expr_location > =A0#define CALL_EXPR_ARGP(NODE) \ > =A0 (&(TREE_OPERAND (CALL_EXPR_CHECK (NODE), 0)) + 3) > > +/* TM directives and accessors. =A0*/ > +#define TRANSACTION_EXPR_BODY(NODE) \ > + =A0TREE_OPERAND (TRANSACTION_EXPR_CHECK (NODE), 0) > +#define TRANSACTION_EXPR_OUTER(NODE) \ > + =A0(TRANSACTION_EXPR_CHECK (NODE)->base.static_flag) > +#define TRANSACTION_EXPR_RELAXED(NODE) \ > + =A0(TRANSACTION_EXPR_CHECK (NODE)->base.public_flag) > + > =A0/* OpenMP directive and clause accessors. =A0*/ > > =A0#define OMP_BODY(NODE) \ > @@ -3452,6 +3466,34 @@ struct GTY(()) > =A0#define DECL_NO_INLINE_WARNING_P(NODE) \ > =A0 (FUNCTION_DECL_CHECK (NODE)->function_decl.no_inline_warning_flag) > > +/* Nonzero in a FUNCTION_DECL means this function is the transactional > + =A0 clone of a function - called only from inside transactions. =A0*/ > +#define DECL_IS_TM_CLONE(NODE) \ > + =A0(FUNCTION_DECL_CHECK (NODE)->function_decl.tm_clone_flag) Why is it necessary to know whether a clone is a tm clone? > +/* Nonzero if a FUNCTION_CODE is a TM load/store. =A0*/ > +#define BUILTIN_TM_LOAD_STORE_P(FN) \ > + =A0((FN) >=3D BUILT_IN_TM_STORE_1 && (FN) <=3D BUILT_IN_TM_LOAD_RFW_LDO= UBLE) > + > +/* Nonzero if a FUNCTION_CODE is a TM load. =A0*/ > +#define BUILTIN_TM_LOAD_P(FN) \ > + =A0((FN) >=3D BUILT_IN_TM_LOAD_1 && (FN) <=3D BUILT_IN_TM_LOAD_RFW_LDOU= BLE) > + > +/* Nonzero if a FUNCTION_CODE is a TM store. =A0*/ > +#define BUILTIN_TM_STORE_P(FN) \ > + =A0((FN) >=3D BUILT_IN_TM_STORE_1 && (FN) <=3D BUILT_IN_TM_STORE_WAW_LD= OUBLE) > + > +#define CASE_BUILT_IN_TM_LOAD(FN) =A0 =A0 =A0\ > + =A0case BUILT_IN_TM_LOAD_##FN: =A0 =A0 =A0 =A0 =A0\ > + =A0case BUILT_IN_TM_LOAD_RAR_##FN: =A0 =A0 =A0\ > + =A0case BUILT_IN_TM_LOAD_RAW_##FN: =A0 =A0 =A0\ > + =A0case BUILT_IN_TM_LOAD_RFW_##FN > + > +#define CASE_BUILT_IN_TM_STORE(FN) =A0 =A0 \ > + =A0case BUILT_IN_TM_STORE_##FN: =A0 =A0 =A0 =A0 \ > + =A0case BUILT_IN_TM_STORE_WAR_##FN: =A0 =A0 \ > + =A0case BUILT_IN_TM_STORE_WAW_##FN > + > =A0/* Nonzero in a FUNCTION_DECL that should be always inlined by the inl= iner > =A0 =A0disregarding size and cost heuristics. =A0This is equivalent to us= ing > =A0 =A0the always_inline attribute without the required diagnostics if the > @@ -3539,8 +3581,9 @@ struct GTY(()) tree_function_decl { > =A0 unsigned pure_flag : 1; > =A0 unsigned looping_const_or_pure_flag : 1; > =A0 unsigned has_debug_args_flag : 1; > + =A0unsigned tm_clone_flag : 1; > > - =A0/* 2 bits left */ > + =A0/* 1 bit left */ > =A0}; > > =A0/* The source language of the translation-unit. =A0*/ > @@ -5174,6 +5217,8 @@ extern void expand_return (tree); > > =A0/* In tree-eh.c */ > =A0extern void using_eh_for_cleanups (void); > +extern int struct_ptr_eq (const void *, const void *); > +extern hashval_t struct_ptr_hash (const void *); > > =A0/* In fold-const.c */ > > @@ -5543,6 +5588,8 @@ extern tree build_duplicate_type (tree); > =A0#define ECF_NOVOPS =A0 =A0 =A0 =A0 =A0 =A0 =A0 (1 << 9) > =A0/* The function does not lead to calls within current function unit. = =A0*/ > =A0#define ECF_LEAF =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 (1 << 10) > +/* Nonzero if this call performs a transactional memory operation. =A0*/ > +#define ECF_TM_OPS =A0 =A0 =A0 =A0 =A0 =A0 =A0 (1 << 11) What's this flag useful for? Isn't it the case that you want to conservati= vely know whether a call might perform a tm operation? Thus, the flag should be inverted? Is this the same as "TM pure"? > =A0extern int flags_from_decl_or_type (const_tree); > =A0extern int call_expr_flags (const_tree); > @@ -5593,6 +5640,8 @@ extern void init_attributes (void); > =A0 =A0a decl attribute to the declaration rather than to its type). =A0*/ > =A0extern tree decl_attributes (tree *, tree, int); > > +extern void apply_tm_attr (tree, tree); > + > =A0/* In integrate.c */ > =A0extern void set_decl_abstract_flags (tree, int); > =A0extern void set_decl_origin_self (tree); > @@ -5805,6 +5854,21 @@ extern unsigned HOST_WIDE_INT compute_bu > =A0extern unsigned HOST_WIDE_INT highest_pow2_factor (const_tree); > =A0extern tree build_personality_function (const char *); > > +/* In trans-mem.c. =A0*/ > +extern tree build_tm_abort_call (location_t, bool); > +extern bool is_tm_safe (const_tree); > +extern bool is_tm_pure (const_tree); > +extern bool is_tm_may_cancel_outer (tree); > +extern bool is_tm_ending_fndecl (tree); > +extern void record_tm_replacement (tree, tree); > +extern void tm_malloc_replacement (tree); > + > +static inline bool > +is_tm_safe_or_pure (tree x) const_tree > +{ > + =A0return is_tm_safe (x) || is_tm_pure (x); > +} > + > =A0/* In tree-inline.c. =A0*/ > > =A0void init_inline_once (void); > Index: gcc/attribs.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/attribs.c =A0 =A0 =A0 (.../trunk) =A0 =A0 (revision 180744) > +++ gcc/attribs.c =A0 =A0 =A0 (.../branches/transactional-memory) =A0 =A0= (revision > 180773) > @@ -166,7 +166,8 @@ init_attributes (void) > =A0 =A0 =A0 =A0 =A0gcc_assert (strcmp (attribute_tables[i][j].name, > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0attribute_tabl= es[i][k].name)); > =A0 =A0 } > - =A0/* Check that no name occurs in more than one table. =A0*/ > + =A0/* Check that no name occurs in more than one table. =A0Names that > + =A0 =A0 begin with '*' are exempt, and may be overridden. =A0*/ > =A0 for (i =3D 0; i < ARRAY_SIZE (attribute_tables); i++) > =A0 =A0 { > =A0 =A0 =A0 size_t j, k, l; > @@ -174,8 +175,9 @@ init_attributes (void) > =A0 =A0 =A0 for (j =3D i + 1; j < ARRAY_SIZE (attribute_tables); j++) > =A0 =A0 =A0 =A0for (k =3D 0; attribute_tables[i][k].name !=3D NULL; k++) > =A0 =A0 =A0 =A0 =A0for (l =3D 0; attribute_tables[j][l].name !=3D NULL; l= ++) > - =A0 =A0 =A0 =A0 =A0 gcc_assert (strcmp (attribute_tables[i][k].name, > - =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 attribute_t= ables[j][l].name)); > + =A0 =A0 =A0 =A0 =A0 gcc_assert (attribute_tables[i][k].name[0] =3D=3D '= *' > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 || strcmp (attribute_tables= [i][k].name, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0attr= ibute_tables[j][l].name)); > =A0 =A0 } > =A0#endif > > @@ -207,7 +209,7 @@ register_attribute (const struct attribu > =A0 slot =3D htab_find_slot_with_hash (attribute_hash, &str, > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 subst= ring_hash (str.str, str.length), > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 INSER= T); > - =A0gcc_assert (!*slot); > + =A0gcc_assert (!*slot || attr->name[0] =3D=3D '*'); > =A0 *slot =3D (void *) CONST_CAST (struct attribute_spec *, attr); > =A0} The above changes seem to belong to a different changeset and look strange. Why would attributes ever appear in two different tables? > @@ -484,3 +486,12 @@ decl_attributes (tree *node, tree attrib > > =A0 return returned_attrs; > =A0} > + > +/* Subroutine of set_method_tm_attributes. =A0Apply TM attribute ATTR > + =A0 to the method FNDECL. =A0*/ > + > +void > +apply_tm_attr (tree fndecl, tree attr) > +{ > + =A0decl_attributes (&TREE_TYPE (fndecl), tree_cons (attr, NULL, NULL), = 0); > +} > Index: gcc/targhooks.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/targhooks.c =A0 =A0 (.../trunk) =A0 =A0 (revision 180744) > +++ gcc/targhooks.c =A0 =A0 (.../branches/transactional-memory) =A0 =A0 (= revision > 180773) > @@ -1214,6 +1214,12 @@ default_have_conditional_execution (void > =A0#endif > =A0} > > +tree > +default_builtin_tm_load_store (tree ARG_UNUSED (type)) > +{ > + =A0return NULL_TREE; > +} > + > =A0/* Compute cost of moving registers to/from memory. =A0*/ > > =A0int > Index: gcc/targhooks.h > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/targhooks.h =A0 =A0 (.../trunk) =A0 =A0 (revision 180744) > +++ gcc/targhooks.h =A0 =A0 (.../branches/transactional-memory) =A0 =A0 (= revision > 180773) > @@ -152,6 +152,9 @@ extern bool default_addr_space_subset_p > =A0extern rtx default_addr_space_convert (rtx, tree, tree); > =A0extern unsigned int default_case_values_threshold (void); > =A0extern bool default_have_conditional_execution (void); > + > +extern tree default_builtin_tm_load_store (tree); > + > =A0extern int default_memory_move_cost (enum machine_mode, reg_class_t, b= ool); > =A0extern int default_register_move_cost (enum machine_mode, reg_class_t, > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 = =A0 reg_class_t); > Index: gcc/gimple.def > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/gimple.def =A0 =A0 =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/gimple.def =A0 =A0 =A0(.../branches/transactional-memory) =A0 =A0= (revision > 180773) > @@ -124,6 +124,14 @@ DEFGSCODE(GIMPLE_ASM, "gimple_asm", GSS_ > =A0 =A0 CHAIN is the optional static chain link for nested functions. =A0= */ > =A0DEFGSCODE(GIMPLE_CALL, "gimple_call", GSS_CALL) > > +/* GIMPLE_TRANSACTION represents __transaction_atomic and > + =A0 __transaction_relaxed blocks. > + =A0 BODY is the sequence of statements inside the transaction. > + =A0 LABEL is a label for the statement immediately following the > + =A0 transaction. =A0This is before RETURN so that it has MEM_OPS, > + =A0 so that it can clobber global memory. =A0*/ > +DEFGSCODE(GIMPLE_TRANSACTION, "gimple_transaction", GSS_TRANSACTION) > + > =A0/* GIMPLE_RETURN represents return statements. > > =A0 =A0RETVAL is the value to return or NULL. =A0If a value is returned it > @@ -151,6 +159,12 @@ DEFGSCODE(GIMPLE_EH_FILTER, "gimple_eh_f > =A0 =A0be invoked if an exception propagates to this point. =A0*/ > =A0DEFGSCODE(GIMPLE_EH_MUST_NOT_THROW, "gimple_eh_must_not_throw", GSS_EH= _MNT) > > +/* GIMPLE_EH_ELSE must be the sole contents of > + =A0 a GIMPLE_TRY_FINALLY node. =A0For all normal exits from the try blo= ck, > + =A0 we N_BODY is run; for all exception exits from the try block, > + =A0 E_BODY is run. =A0*/ > +DEFGSCODE(GIMPLE_EH_ELSE, "gimple_eh_else", GSS_EH_ELSE) > + > =A0/* GIMPLE_RESX resumes execution after an exception. =A0*/ > =A0DEFGSCODE(GIMPLE_RESX, "gimple_resx", GSS_EH_CTRL) > > Index: gcc/builtin-types.def > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/builtin-types.def =A0 =A0 =A0 (.../trunk) =A0 =A0 (revision 18074= 4) > +++ gcc/builtin-types.def =A0 =A0 =A0 (.../branches/transactional-memory) > (revision 180773) > @@ -477,3 +477,24 @@ DEF_FUNCTION_TYPE_VAR_5 (BT_FN_INT_INT_I > =A0DEF_POINTER_TYPE (BT_PTR_FN_VOID_VAR, BT_FN_VOID_VAR) > =A0DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 BT_PTR, BT_PTR_FN_VOID_VAR, BT_PT= R, BT_SIZE) > + > + > +DEF_FUNCTION_TYPE_1 (BT_FN_I1_VPTR, BT_I1, BT_VOLATILE_PTR) > +DEF_FUNCTION_TYPE_1 (BT_FN_I2_VPTR, BT_I2, BT_VOLATILE_PTR) > +DEF_FUNCTION_TYPE_1 (BT_FN_I4_VPTR, BT_I4, BT_VOLATILE_PTR) > +DEF_FUNCTION_TYPE_1 (BT_FN_I8_VPTR, BT_I8, BT_VOLATILE_PTR) > +DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_VPTR, BT_FLOAT, BT_VOLATILE_PTR) > +DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_VPTR, BT_DOUBLE, BT_VOLATILE_PTR) > +DEF_FUNCTION_TYPE_1 (BT_FN_LDOUBLE_VPTR, BT_LONGDOUBLE, BT_VOLATILE_PTR) > + > +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I1, BT_VOID, BT_VOLATILE_PTR, BT_I1) > +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I2, BT_VOID, BT_VOLATILE_PTR, BT_I2) > +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I4, BT_VOID, BT_VOLATILE_PTR, BT_I4) > +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I8, BT_VOID, BT_VOLATILE_PTR, BT_I8) > +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_FLOAT, BT_VOID, BT_VOLATILE_PTR, > BT_FLOAT) > +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_DOUBLE, BT_VOID, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 BT_VOLATILE_PTR, BT_DOUBLE) > +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_LDOUBLE, BT_VOID, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0BT_VOLATILE_PTR, BT_LONGDOUBLE) > +DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_SIZE, BT_VOID, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0BT_VOLATILE_PTR, BT_SIZE) > Index: gcc/builtins.def > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/builtins.def =A0 =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/builtins.def =A0 =A0(.../branches/transactional-memory) =A0 =A0 (= revision > 180773) > @@ -142,6 +142,13 @@ along with GCC; see the file COPYING3. > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0false, true, true, ATTRS, false, \ > =A0 =A0 =A0 =A0 =A0 =A0 =A0 (flag_openmp || flag_tree_parallelize_loops)) > > +/* Builtin used by the implementation of GNU TM. =A0These > + =A0 functions are mapped to the actual implementation of the STM librar= y. */ > +#undef DEF_TM_BUILTIN > +#define DEF_TM_BUILTIN(ENUM, NAME, TYPE, ATTRS) \ > + =A0DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, = =A0 =A0\ > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 false, true, true, ATTRS, false, flag_tm) > + > =A0/* Define an attribute list for math functions that are normally > =A0 =A0"impure" because some of them may write into global memory for > =A0 =A0`errno'. =A0If !flag_errno_math they are instead "const". =A0*/ > @@ -624,6 +631,7 @@ DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_APPLY_A > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_BSWAP32, "bswap32", BT_FN_UIN= T32_UINT32, > ATTR_CONST_NOTHROW_LEAF_LIST) > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_BSWAP64, "bswap64", BT_FN_UIN= T64_UINT64, > ATTR_CONST_NOTHROW_LEAF_LIST) > =A0DEF_EXT_LIB_BUILTIN =A0 =A0(BUILT_IN_CLEAR_CACHE, "__clear_cache", > BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST) > +/* [trans-mem]: Adjust BUILT_IN_TM_CALLOC if BUILT_IN_CALLOC is changed. > =A0*/ > =A0DEF_LIB_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_CALLOC, "calloc", BT_FN_PTR_S= IZE_SIZE, > ATTR_MALLOC_NOTHROW_LEAF_LIST) > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_CLASSIFY_TYPE, "classify_type= ", > BT_FN_INT_VAR, ATTR_LEAF_LIST) > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_CLZ, "clz", BT_FN_INT_UINT, > ATTR_CONST_NOTHROW_LEAF_LIST) > @@ -662,6 +670,7 @@ DEF_EXT_LIB_BUILTIN =A0 =A0(BUILT_IN_FFSL, " > =A0DEF_EXT_LIB_BUILTIN =A0 =A0(BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLON= G, > ATTR_CONST_NOTHROW_LEAF_LIST) > =A0DEF_EXT_LIB_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_FORK, "fork", BT_FN_PID, > ATTR_NOTHROW_LIST) > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_FRAME_ADDRESS, "frame_address= ", > BT_FN_PTR_UINT, ATTR_NULL) > +/* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed. =A0= */ > =A0DEF_LIB_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_FREE, "free", BT_FN_VOID_PTR, > ATTR_NOTHROW_LIST) > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_FROB_RETURN_ADDR, "frob_retur= n_addr", > BT_FN_PTR_PTR, ATTR_NULL) > =A0DEF_EXT_LIB_BUILTIN =A0 =A0(BUILT_IN_GETTEXT, "gettext", > BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1) > @@ -698,6 +707,7 @@ DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_ISUNORD > =A0DEF_LIB_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, > ATTR_CONST_NOTHROW_LEAF_LIST) > =A0DEF_C99_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_LLABS, "llabs", BT_FN_LONGLON= G_LONGLONG, > ATTR_CONST_NOTHROW_LEAF_LIST) > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_LONGJMP, "longjmp", BT_FN_VOI= D_PTR_INT, > ATTR_NORETURN_NOTHROW_LEAF_LIST) > +/* [trans-mem]: Adjust BUILT_IN_TM_MALLOC if BUILT_IN_MALLOC is changed. > =A0*/ > =A0DEF_LIB_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_MALLOC, "malloc", BT_FN_PTR_S= IZE, > ATTR_MALLOC_NOTHROW_LEAF_LIST) > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_NEXT_ARG, "next_arg", BT_FN_P= TR_VAR, > ATTR_LEAF_LIST) > =A0DEF_GCC_BUILTIN =A0 =A0 =A0 =A0(BUILT_IN_PARITY, "parity", BT_FN_INT_U= INT, > ATTR_CONST_NOTHROW_LEAF_LIST) > @@ -793,3 +803,6 @@ DEF_BUILTIN_STUB (BUILT_IN_EH_COPY_VALUE > > =A0/* OpenMP builtins. =A0*/ > =A0#include "omp-builtins.def" > + > +/* GTM builtins. */ > +#include "gtm-builtins.def" > Index: gcc/gimple-low.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/gimple-low.c =A0 =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/gimple-low.c =A0 =A0(.../branches/transactional-memory) =A0 =A0 (= revision > 180773) > @@ -396,6 +396,11 @@ lower_stmt (gimple_stmt_iterator *gsi, s > =A0 =A0 =A0 lower_sequence (gimple_eh_filter_failure (stmt), data); > =A0 =A0 =A0 break; > > + =A0 =A0case GIMPLE_EH_ELSE: > + =A0 =A0 =A0lower_sequence (gimple_eh_else_n_body (stmt), data); > + =A0 =A0 =A0lower_sequence (gimple_eh_else_e_body (stmt), data); > + =A0 =A0 =A0break; > + > =A0 =A0 case GIMPLE_NOP: > =A0 =A0 case GIMPLE_ASM: > =A0 =A0 case GIMPLE_ASSIGN: > @@ -446,6 +451,10 @@ lower_stmt (gimple_stmt_iterator *gsi, s > =A0 =A0 =A0 data->cannot_fallthru =3D false; > =A0 =A0 =A0 return; > > + =A0 =A0case GIMPLE_TRANSACTION: > + =A0 =A0 =A0lower_sequence (gimple_transaction_body (stmt), data); > + =A0 =A0 =A0break; > + > =A0 =A0 default: > =A0 =A0 =A0 gcc_unreachable (); > =A0 =A0 } > @@ -727,6 +736,10 @@ gimple_stmt_may_fallthru (gimple stmt) > =A0 =A0 =A0 return (gimple_seq_may_fallthru (gimple_try_eval (stmt)) > =A0 =A0 =A0 =A0 =A0 =A0 =A0&& gimple_seq_may_fallthru (gimple_try_cleanup= (stmt))); > > + =A0 =A0case GIMPLE_EH_ELSE: > + =A0 =A0 =A0return (gimple_seq_may_fallthru (gimple_eh_else_n_body (stmt= )) > + =A0 =A0 =A0 =A0 =A0 =A0 || gimple_seq_may_fallthru (gimple_eh_else_e_bo= dy (stmt))); > + > =A0 =A0 case GIMPLE_CALL: > =A0 =A0 =A0 /* Functions that do not return do not fall through. =A0*/ > =A0 =A0 =A0 return (gimple_call_flags (stmt) & ECF_NORETURN) =3D=3D 0; > Index: gcc/gsstruct.def > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/gsstruct.def =A0 =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/gsstruct.def =A0 =A0(.../branches/transactional-memory) =A0 =A0 (= revision > 180773) > @@ -38,6 +38,7 @@ DEFGSSTRUCT(GSS_CATCH, gimple_statement_ > =A0DEFGSSTRUCT(GSS_EH_FILTER, gimple_statement_eh_filter, false) > =A0DEFGSSTRUCT(GSS_EH_MNT, gimple_statement_eh_mnt, false) > =A0DEFGSSTRUCT(GSS_EH_CTRL, gimple_statement_eh_ctrl, false) > +DEFGSSTRUCT(GSS_EH_ELSE, gimple_statement_eh_else, false) > =A0DEFGSSTRUCT(GSS_WCE, gimple_statement_wce, false) > =A0DEFGSSTRUCT(GSS_OMP, gimple_statement_omp, false) > =A0DEFGSSTRUCT(GSS_OMP_CRITICAL, gimple_statement_omp_critical, false) > @@ -49,3 +50,4 @@ DEFGSSTRUCT(GSS_OMP_SINGLE, gimple_state > =A0DEFGSSTRUCT(GSS_OMP_CONTINUE, gimple_statement_omp_continue, false) > =A0DEFGSSTRUCT(GSS_OMP_ATOMIC_LOAD, gimple_statement_omp_atomic_load, fal= se) > =A0DEFGSSTRUCT(GSS_OMP_ATOMIC_STORE, gimple_statement_omp_atomic_store, f= alse) > +DEFGSSTRUCT(GSS_TRANSACTION, gimple_statement_transaction, false) > Index: gcc/tree-eh.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/tree-eh.c =A0 =A0 =A0 (.../trunk) =A0 =A0 (revision 180744) > +++ gcc/tree-eh.c =A0 =A0 =A0 (.../branches/transactional-memory) =A0 =A0= (revision > 180773) > @@ -58,7 +58,7 @@ using_eh_for_cleanups (void) > =A0 =A0pointer. =A0Assumes all pointers are interchangeable, which is sort > =A0 =A0of already assumed by gcc elsewhere IIRC. =A0*/ > > -static int > +int > =A0struct_ptr_eq (const void *a, const void *b) > =A0{ > =A0 const void * const * x =3D (const void * const *) a; > @@ -66,7 +66,7 @@ struct_ptr_eq (const void *a, const void > =A0 return *x =3D=3D *y; > =A0} > > -static hashval_t > +hashval_t > =A0struct_ptr_hash (const void *a) > =A0{ > =A0 const void * const * x =3D (const void * const *) a; Rather than exporting those here consider moving them to a common header as inline functions. const void * const * x =3D (const void * const *) a; return (size_t)*x >> 4; and on the way change that to (intptr_t)*x >> 4 > @@ -284,6 +284,11 @@ collect_finally_tree (gimple stmt, gimpl > =A0 =A0 =A0 collect_finally_tree_1 (gimple_eh_filter_failure (stmt), regi= on); > =A0 =A0 =A0 break; > > + =A0 =A0case GIMPLE_EH_ELSE: > + =A0 =A0 =A0collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region= ); > + =A0 =A0 =A0collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region= ); > + =A0 =A0 =A0break; > + > =A0 =A0 default: > =A0 =A0 =A0 /* A type, a decl, or some kind of statement that we're not > =A0 =A0 =A0 =A0 interested in. =A0Don't walk them. =A0*/ > @@ -534,6 +539,10 @@ replace_goto_queue_1 (gimple stmt, struc > =A0 =A0 case GIMPLE_EH_FILTER: > =A0 =A0 =A0 replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt)= , tf); > =A0 =A0 =A0 break; > + =A0 =A0case GIMPLE_EH_ELSE: > + =A0 =A0 =A0replace_goto_queue_stmt_list (gimple_eh_else_n_body (stmt), = tf); > + =A0 =A0 =A0replace_goto_queue_stmt_list (gimple_eh_else_e_body (stmt), = tf); > + =A0 =A0 =A0break; > > =A0 =A0 default: > =A0 =A0 =A0 /* These won't have gotos in them. =A0*/ > @@ -921,6 +930,21 @@ lower_try_finally_fallthru_label (struct > =A0 return label; > =A0} > > +/* A subroutine of lower_try_finally. =A0If FINALLY consits of a > + =A0 GIMPLE_EH_ELSE node, return it. =A0*/ > + > +static inline gimple > +get_eh_else (gimple_seq finally) > +{ > + =A0gimple x =3D gimple_seq_first_stmt (finally); > + =A0if (gimple_code (x) =3D=3D GIMPLE_EH_ELSE) > + =A0 =A0{ > + =A0 =A0 =A0gcc_assert (gimple_seq_singleton_p (finally)); > + =A0 =A0 =A0return x; > + =A0 =A0} > + =A0return NULL; > +} > + > =A0/* A subroutine of lower_try_finally. =A0If the eh_protect_cleanup_act= ions > =A0 =A0langhook returns non-null, then the language requires that the exc= eption > =A0 =A0path out of a try_finally be treated specially. =A0To wit: the cod= e within > @@ -950,7 +974,7 @@ honor_protect_cleanup_actions (struct le > =A0 gimple_stmt_iterator gsi; > =A0 bool finally_may_fallthru; > =A0 gimple_seq finally; > - =A0gimple x; > + =A0gimple x, eh_else; > > =A0 /* First check for nothing to do. =A0*/ > =A0 if (lang_hooks.eh_protect_cleanup_actions =3D=3D NULL) > @@ -960,12 +984,18 @@ honor_protect_cleanup_actions (struct le > =A0 =A0 return; > > =A0 finally =3D gimple_try_cleanup (tf->top_p); > - =A0finally_may_fallthru =3D gimple_seq_may_fallthru (finally); > + =A0eh_else =3D get_eh_else (finally); > > =A0 /* Duplicate the FINALLY block. =A0Only need to do this for try-final= ly, > - =A0 =A0 and not for cleanups. =A0*/ > - =A0if (this_state) > + =A0 =A0 and not for cleanups. =A0If we've got an EH_ELSE, extract it no= w. =A0*/ > + =A0if (eh_else) > + =A0 =A0{ > + =A0 =A0 =A0finally =3D gimple_eh_else_e_body (eh_else); > + =A0 =A0 =A0gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh= _else)); > + =A0 =A0} > + =A0else if (this_state) > =A0 =A0 finally =3D lower_try_finally_dup_block (finally, outer_state); > + =A0finally_may_fallthru =3D gimple_seq_may_fallthru (finally); > > =A0 /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLE= ANUP > =A0 =A0 =A0set, the handler of the TRY_CATCH_EXPR is another cleanup whic= h ought > @@ -1011,7 +1041,7 @@ lower_try_finally_nofallthru (struct leh > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0struct leh_tf_= state *tf) > =A0{ > =A0 tree lab; > - =A0gimple x; > + =A0gimple x, eh_else; > =A0 gimple_seq finally; > =A0 struct goto_queue_node *q, *qe; > > @@ -1034,15 +1064,35 @@ lower_try_finally_nofallthru (struct leh > > =A0 replace_goto_queue (tf); > > - =A0lower_eh_constructs_1 (state, finally); > - =A0gimple_seq_add_seq (&tf->top_p_seq, finally); > + =A0/* Emit the finally block into the stream. =A0Lower EH_ELSE at this = time. > =A0*/ > + =A0eh_else =3D get_eh_else (finally); > + =A0if (eh_else) > + =A0 =A0{ > + =A0 =A0 =A0finally =3D gimple_eh_else_n_body (eh_else); > + =A0 =A0 =A0lower_eh_constructs_1 (state, finally); > + =A0 =A0 =A0gimple_seq_add_seq (&tf->top_p_seq, finally); > > - =A0if (tf->may_throw) > + =A0 =A0 =A0if (tf->may_throw) > + =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 finally =3D gimple_eh_else_e_body (eh_else); > + =A0 =A0 =A0 =A0 lower_eh_constructs_1 (state, finally); > + > + =A0 =A0 =A0 =A0 emit_post_landing_pad (&eh_seq, tf->region); > + =A0 =A0 =A0 =A0 gimple_seq_add_seq (&eh_seq, finally); > + =A0 =A0 =A0 } > + =A0 =A0} > + =A0else > =A0 =A0 { > - =A0 =A0 =A0emit_post_landing_pad (&eh_seq, tf->region); > + =A0 =A0 =A0lower_eh_constructs_1 (state, finally); > + =A0 =A0 =A0gimple_seq_add_seq (&tf->top_p_seq, finally); > > - =A0 =A0 =A0x =3D gimple_build_goto (lab); > - =A0 =A0 =A0gimple_seq_add_stmt (&eh_seq, x); > + =A0 =A0 =A0if (tf->may_throw) > + =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 emit_post_landing_pad (&eh_seq, tf->region); > + > + =A0 =A0 =A0 =A0 x =3D gimple_build_goto (lab); > + =A0 =A0 =A0 =A0 gimple_seq_add_stmt (&eh_seq, x); > + =A0 =A0 =A0 } > =A0 =A0 } > =A0} > > @@ -1062,6 +1112,18 @@ lower_try_finally_onedest (struct leh_st > =A0 finally =3D gimple_try_cleanup (tf->top_p); > =A0 tf->top_p_seq =3D gimple_try_eval (tf->top_p); > > + =A0/* Since there's only one destination, and the destination edge can = only > + =A0 =A0 either be EH or non-EH, that implies that all of our incoming e= dges > + =A0 =A0 are of the same type. =A0Therefore we can lower EH_ELSE immedia= tely. =A0*/ > + =A0x =3D get_eh_else (finally); > + =A0if (x) > + =A0 =A0{ > + =A0 =A0 =A0if (tf->may_throw) > + =A0 =A0 =A0 =A0finally =3D gimple_eh_else_e_body (x); > + =A0 =A0 =A0else > + =A0 =A0 =A0 =A0finally =3D gimple_eh_else_n_body (x); > + =A0 =A0} > + > =A0 lower_eh_constructs_1 (state, finally); > > =A0 if (tf->may_throw) > @@ -1132,11 +1194,18 @@ lower_try_finally_copy (struct leh_state > =A0 gimple_seq finally; > =A0 gimple_seq new_stmt; > =A0 gimple_seq seq; > - =A0gimple x; > + =A0gimple x, eh_else; > =A0 tree tmp; > =A0 location_t tf_loc =3D gimple_location (tf->try_finally_expr); > > =A0 finally =3D gimple_try_cleanup (tf->top_p); > + > + =A0/* Notice EH_ELSE, and simplify some of the remaining code > + =A0 =A0 by considering FINALLY to be the normal return path only. =A0*/ > + =A0eh_else =3D get_eh_else (finally); > + =A0if (eh_else) > + =A0 =A0finally =3D gimple_eh_else_n_body (eh_else); > + > =A0 tf->top_p_seq =3D gimple_try_eval (tf->top_p); > =A0 new_stmt =3D NULL; > > @@ -1153,7 +1222,12 @@ lower_try_finally_copy (struct leh_state > > =A0 if (tf->may_throw) > =A0 =A0 { > - =A0 =A0 =A0seq =3D lower_try_finally_dup_block (finally, state); > + =A0 =A0 =A0/* We don't need to copy the EH path of EH_ELSE, > + =A0 =A0 =A0 =A0since it is only emitted once. =A0*/ > + =A0 =A0 =A0if (eh_else) > + =A0 =A0 =A0 =A0seq =3D gimple_eh_else_e_body (eh_else); > + =A0 =A0 =A0else > + =A0 =A0 =A0 =A0seq =3D lower_try_finally_dup_block (finally, state); > =A0 =A0 =A0 lower_eh_constructs_1 (state, seq); > > =A0 =A0 =A0 emit_post_landing_pad (&eh_seq, tf->region); > @@ -1252,7 +1326,7 @@ lower_try_finally_switch (struct leh_sta > =A0 tree last_case; > =A0 VEC (tree,heap) *case_label_vec; > =A0 gimple_seq switch_body; > - =A0gimple x; > + =A0gimple x, eh_else; > =A0 tree tmp; > =A0 gimple switch_stmt; > =A0 gimple_seq finally; > @@ -1263,9 +1337,10 @@ lower_try_finally_switch (struct leh_sta > =A0 location_t finally_loc; > > =A0 switch_body =3D gimple_seq_alloc (); > + =A0finally =3D gimple_try_cleanup (tf->top_p); > + =A0eh_else =3D get_eh_else (finally); > > =A0 /* Mash the TRY block to the head of the chain. =A0*/ > - =A0finally =3D gimple_try_cleanup (tf->top_p); > =A0 tf->top_p_seq =3D gimple_try_eval (tf->top_p); > > =A0 /* The location of the finally is either the last stmt in the finally > @@ -1281,7 +1356,7 @@ lower_try_finally_switch (struct leh_sta > =A0 nlabels =3D VEC_length (tree, tf->dest_array); > =A0 return_index =3D nlabels; > =A0 eh_index =3D return_index + tf->may_return; > - =A0fallthru_index =3D eh_index + tf->may_throw; > + =A0fallthru_index =3D eh_index + (tf->may_throw && !eh_else); > =A0 ndests =3D fallthru_index + tf->may_fallthru; > > =A0 finally_tmp =3D create_tmp_var (integer_type_node, "finally_tmp"); > @@ -1319,7 +1394,23 @@ lower_try_finally_switch (struct leh_sta > =A0 =A0 =A0 gimple_seq_add_stmt (&switch_body, x); > =A0 =A0 } > > - =A0if (tf->may_throw) > + =A0/* For EH_ELSE, emit the exception path (plus resx) now, then > + =A0 =A0 subsequently we only need consider the normal path. =A0*/ > + =A0if (eh_else) > + =A0 =A0{ > + =A0 =A0 =A0if (tf->may_throw) > + =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 finally =3D gimple_eh_else_e_body (eh_else); > + =A0 =A0 =A0 =A0 lower_eh_constructs_1 (state, finally); > + > + =A0 =A0 =A0 =A0 emit_post_landing_pad (&eh_seq, tf->region); > + =A0 =A0 =A0 =A0 gimple_seq_add_seq (&eh_seq, finally); > + =A0 =A0 =A0 =A0 emit_resx (&eh_seq, tf->region); > + =A0 =A0 =A0 } > + > + =A0 =A0 =A0finally =3D gimple_eh_else_n_body (eh_else); > + =A0 =A0} > + =A0else if (tf->may_throw) > =A0 =A0 { > =A0 =A0 =A0 emit_post_landing_pad (&eh_seq, tf->region); > > @@ -1452,12 +1543,22 @@ lower_try_finally_switch (struct leh_sta > =A0 =A0the estimate of the size of the switch machinery we'd have to add.= =A0*/ > > =A0static bool > -decide_copy_try_finally (int ndests, gimple_seq finally) > +decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally) > =A0{ > =A0 int f_estimate, sw_estimate; > + =A0gimple eh_else; > + > + =A0/* If there's an EH_ELSE involved, the exception path is separate > + =A0 =A0 and really doesn't come into play for this computation. =A0*/ > + =A0eh_else =3D get_eh_else (finally); > + =A0if (eh_else) > + =A0 =A0{ > + =A0 =A0 =A0ndests -=3D may_throw; > + =A0 =A0 =A0finally =3D gimple_eh_else_n_body (eh_else); > + =A0 =A0} > > =A0 if (!optimize) > - =A0 =A0return false; > + =A0 =A0return ndests =3D=3D 1; > > =A0 /* Finally estimate N times, plus N gotos. =A0*/ > =A0 f_estimate =3D count_insns_seq (finally, &eni_size_weights); > @@ -1563,7 +1664,8 @@ lower_try_finally (struct leh_state *sta > =A0 /* We can easily special-case redirection to a single destination. = =A0*/ > =A0 else if (ndests =3D=3D 1) > =A0 =A0 lower_try_finally_onedest (state, &this_tf); > - =A0else if (decide_copy_try_finally (ndests, gimple_try_cleanup (tp))) > + =A0else if (decide_copy_try_finally (ndests, this_tf.may_throw, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 gim= ple_try_cleanup (tp))) > =A0 =A0 lower_try_finally_copy (state, &this_tf); > =A0 else > =A0 =A0 lower_try_finally_switch (state, &this_tf); > @@ -1928,6 +2030,9 @@ lower_eh_constructs_2 (struct leh_state > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0case GIMPLE_EH_MUST_NOT_THROW: > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0replace =3D lower_eh_must_not_thro= w (state, stmt); > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0break; > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 case GIMPLE_EH_ELSE: > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 /* This code is only valid with GIM= PLE_TRY_FINALLY. =A0*/ > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 gcc_unreachable (); > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0default: > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0replace =3D lower_cleanup (state, = stmt); > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0break; > @@ -1942,6 +2047,10 @@ lower_eh_constructs_2 (struct leh_state > =A0 =A0 =A0 /* Return since we don't want gsi_next () */ > =A0 =A0 =A0 return; > > + =A0 =A0case GIMPLE_EH_ELSE: > + =A0 =A0 =A0/* We should be eliminating this in lower_try_finally et al.= =A0*/ > + =A0 =A0 =A0gcc_unreachable (); > + > =A0 =A0 default: > =A0 =A0 =A0 /* A type, a decl, or some kind of statement that we're not > =A0 =A0 =A0 =A0 interested in. =A0Don't walk them. =A0*/ > @@ -2832,6 +2941,10 @@ refactor_eh_r (gimple_seq seq) > =A0 =A0 =A0 =A0 =A0case GIMPLE_EH_FILTER: > =A0 =A0 =A0 =A0 =A0 =A0refactor_eh_r (gimple_eh_filter_failure (one)); > =A0 =A0 =A0 =A0 =A0 =A0break; > + =A0 =A0 =A0 =A0 case GIMPLE_EH_ELSE: > + =A0 =A0 =A0 =A0 =A0 refactor_eh_r (gimple_eh_else_n_body (one)); > + =A0 =A0 =A0 =A0 =A0 refactor_eh_r (gimple_eh_else_e_body (one)); > + =A0 =A0 =A0 =A0 =A0 break; > =A0 =A0 =A0 =A0 =A0default: > =A0 =A0 =A0 =A0 =A0 =A0break; > =A0 =A0 =A0 =A0 =A0} > Index: gcc/gimple-pretty-print.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/gimple-pretty-print.c =A0 (.../trunk) =A0 =A0 (revision 180744) > +++ gcc/gimple-pretty-print.c =A0 (.../branches/transactional-memory) > (revision 180773) > @@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. > =A0#include "tree-pass.h" > =A0#include "gimple.h" > =A0#include "value-prof.h" > +#include "trans-mem.h" > > =A0#define INDENT(SPACE) =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 = =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0\ > =A0 do { int i; for (i =3D 0; i < SPACE; i++) pp_space (buffer); } while = (0) > @@ -162,6 +163,7 @@ debug_gimple_seq (gimple_seq seq) > =A0 =A0 =A0'd' - outputs an int as a decimal, > =A0 =A0 =A0's' - outputs a string, > =A0 =A0 =A0'n' - outputs a newline, > + =A0 =A0 'x' - outputs an int as hexadecimal, > =A0 =A0 =A0'+' - increases indent by 2 then outputs a newline, > =A0 =A0 =A0'-' - decreases indent by 2 then outputs a newline. =A0 */ > > @@ -216,6 +218,10 @@ dump_gimple_fmt (pretty_printer *buffer, > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 newline_and_indent (buffer, spc); > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 break; > > + =A0 =A0 =A0 =A0 =A0 =A0 =A0case 'x': > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0pp_scalar (buffer, "%x", va_arg (args, i= nt)); > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0break; > + > =A0 =A0 =A0 =A0 =A0 =A0 =A0 case '+': > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 spc +=3D 2; > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 newline_and_indent (buffer, spc); > @@ -622,6 +628,7 @@ static void > =A0dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flag= s) > =A0{ > =A0 tree lhs =3D gimple_call_lhs (gs); > + =A0tree fn =3D gimple_call_fn (gs); > > =A0 if (flags & TDF_ALIAS) > =A0 =A0 { > @@ -648,8 +655,7 @@ dump_gimple_call (pretty_printer *buffer > =A0 =A0 =A0 =A0dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T", gs, > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 internal_fn_name (gimple_= call_internal_fn (gs)), > lhs); > =A0 =A0 =A0 else > - =A0 =A0 =A0 dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T", > - =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0gs, gimple_call_fn (gs),= lhs); > + =A0 =A0 =A0 dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T", gs, fn, = lhs); > =A0 =A0 =A0 if (gimple_call_num_args (gs) > 0) > =A0 =A0 =A0 =A0 { > =A0 =A0 =A0 =A0 =A0 pp_string (buffer, ", "); > @@ -672,7 +678,7 @@ dump_gimple_call (pretty_printer *buffer > =A0 =A0 =A0 if (gimple_call_internal_p (gs)) > =A0 =A0 =A0 =A0pp_string (buffer, internal_fn_name (gimple_call_internal_= fn (gs))); > =A0 =A0 =A0 else > - =A0 =A0 =A0 print_call_name (buffer, gimple_call_fn (gs), flags); > + =A0 =A0 =A0 print_call_name (buffer, fn, flags); > =A0 =A0 =A0 pp_string (buffer, " ("); > =A0 =A0 =A0 dump_gimple_call_args (buffer, gs, flags); > =A0 =A0 =A0 pp_character (buffer, ')'); > @@ -689,9 +695,63 @@ dump_gimple_call (pretty_printer *buffer > > =A0 if (gimple_call_return_slot_opt_p (gs)) > =A0 =A0 pp_string (buffer, " [return slot optimization]"); > - > =A0 if (gimple_call_tail_p (gs)) > =A0 =A0 pp_string (buffer, " [tail call]"); > + > + =A0/* Dump the arguments of _ITM_beginTransaction sanely. =A0*/ > + =A0if (TREE_CODE (fn) =3D=3D ADDR_EXPR) > + =A0 =A0fn =3D TREE_OPERAND (fn, 0); > + =A0if (TREE_CODE (fn) =3D=3D FUNCTION_DECL && DECL_IS_TM_CLONE (fn)) > + =A0 =A0pp_string (buffer, " [tm-clone]"); > + =A0if (TREE_CODE (fn) =3D=3D FUNCTION_DECL > + =A0 =A0 =A0&& DECL_BUILT_IN_CLASS (fn) =3D=3D BUILT_IN_NORMAL > + =A0 =A0 =A0&& DECL_FUNCTION_CODE (fn) =3D=3D BUILT_IN_TM_START > + =A0 =A0 =A0/* Check we're referring to Intel's TM specifications. =A0*/ > + =A0 =A0 =A0&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (fn)), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 "__builtin__ITM_beginTransaction") Huh. Are there others that would use the same builtin? > + =A0 =A0 =A0&& gimple_call_num_args (gs) > 0 > + =A0 =A0 =A0) ) goes to the previouys line. > + =A0 =A0{ > + =A0 =A0 =A0tree t =3D gimple_call_arg (gs, 0); > + =A0 =A0 =A0unsigned HOST_WIDE_INT props; > + =A0 =A0 =A0gcc_assert (TREE_CODE (t) =3D=3D INTEGER_CST); > + > + =A0 =A0 =A0pp_string (buffer, " [ "); > + > + =A0 =A0 =A0/* Get the transaction code properties. =A0*/ > + =A0 =A0 =A0props =3D TREE_INT_CST_LOW (t); > + > + =A0 =A0 =A0if (props & PR_INSTRUMENTEDCODE) > + =A0 =A0 =A0 pp_string (buffer, "instrumentedCode "); > + =A0 =A0 =A0if (props & PR_UNINSTRUMENTEDCODE) > + =A0 =A0 =A0 pp_string (buffer, "uninstrumentedCode "); > + =A0 =A0 =A0if (props & PR_HASNOXMMUPDATE) > + =A0 =A0 =A0 pp_string (buffer, "hasNoXMMUpdate "); > + =A0 =A0 =A0if (props & PR_HASNOABORT) > + =A0 =A0 =A0 pp_string (buffer, "hasNoAbort "); > + =A0 =A0 =A0if (props & PR_HASNOIRREVOCABLE) > + =A0 =A0 =A0 pp_string (buffer, "hasNoIrrevocable "); > + =A0 =A0 =A0if (props & PR_DOESGOIRREVOCABLE) > + =A0 =A0 =A0 pp_string (buffer, "doesGoIrrevocable "); > + =A0 =A0 =A0if (props & PR_HASNOSIMPLEREADS) > + =A0 =A0 =A0 pp_string (buffer, "hasNoSimpleReads "); > + =A0 =A0 =A0if (props & PR_AWBARRIERSOMITTED) > + =A0 =A0 =A0 pp_string (buffer, "awBarriersOmitted "); > + =A0 =A0 =A0if (props & PR_RARBARRIERSOMITTED) > + =A0 =A0 =A0 pp_string (buffer, "RaRBarriersOmitted "); > + =A0 =A0 =A0if (props & PR_UNDOLOGCODE) > + =A0 =A0 =A0 pp_string (buffer, "undoLogCode "); > + =A0 =A0 =A0if (props & PR_PREFERUNINSTRUMENTED) > + =A0 =A0 =A0 pp_string (buffer, "preferUninstrumented "); > + =A0 =A0 =A0if (props & PR_EXCEPTIONBLOCK) > + =A0 =A0 =A0 pp_string (buffer, "exceptionBlock "); > + =A0 =A0 =A0if (props & PR_HASELSE) > + =A0 =A0 =A0 pp_string (buffer, "hasElse "); > + =A0 =A0 =A0if (props & PR_READONLY) > + =A0 =A0 =A0 pp_string (buffer, "readOnly "); > + > + =A0 =A0 =A0pp_string (buffer, "]"); > + =A0 =A0} > =A0} > > > @@ -947,6 +1007,24 @@ dump_gimple_eh_must_not_throw (pretty_pr > =A0} > > > +/* Dump a GIMPLE_EH_ELSE tuple on the pretty_printer BUFFER, SPC spaces = of > + =A0 indent. =A0FLAGS specifies details to show in the dump (see TDF_* in > + =A0 tree-pass.h). =A0*/ > + > +static void > +dump_gimple_eh_else (pretty_printer *buffer, gimple gs, int spc, int fla= gs) > +{ > + =A0if (flags & TDF_RAW) > + =A0 =A0dump_gimple_fmt (buffer, spc, flags, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 "%G <%+N_BODY <%S>%nE_BODY <%S>= %->", gs, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 gimple_eh_else_n_body (gs), gim= ple_eh_else_e_body > (gs)); > + =A0else > + =A0 =A0dump_gimple_fmt (buffer, spc, flags, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0"<<>>%+{%S}%-<<<= else_eh_exit>>>%+{%S}", > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 gimple_eh_else_n_body (gs), gim= ple_eh_else_e_body > (gs)); > +} > + > + > =A0/* Dump a GIMPLE_RESX tuple on the pretty_printer BUFFER, SPC spaces of > =A0 =A0indent. =A0FLAGS specifies details to show in the dump (see TDF_* = in > =A0 =A0tree-pass.h). =A0*/ > @@ -1269,6 +1347,86 @@ dump_gimple_omp_return (pretty_printer * > =A0 =A0 } > =A0} > > +/* Dump a GIMPLE_TRANSACTION tuple on the pretty_printer BUFFER. =A0*/ > + > +static void > +dump_gimple_transaction (pretty_printer *buffer, gimple gs, int spc, int > flags) > +{ > + =A0unsigned subcode =3D gimple_transaction_subcode (gs); > + > + =A0if (flags & TDF_RAW) > + =A0 =A0{ > + =A0 =A0 =A0dump_gimple_fmt (buffer, spc, flags, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0"%G [SUBCODE=3D%x,LABEL=3D%T= ] <%+BODY <%S> >", > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0gs, subcode, gimple_transact= ion_label (gs), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0gimple_transaction_body (gs)= ); > + =A0 =A0} > + =A0else > + =A0 =A0{ > + =A0 =A0 =A0if (subcode & GTMA_IS_OUTER) > + =A0 =A0 =A0 pp_string (buffer, "__transaction_atomic [[outer]]"); > + =A0 =A0 =A0else if (subcode & GTMA_IS_RELAXED) > + =A0 =A0 =A0 pp_string (buffer, "__transaction_relaxed"); > + =A0 =A0 =A0else > + =A0 =A0 =A0 pp_string (buffer, "__transaction_atomic"); > + =A0 =A0 =A0subcode &=3D ~GTMA_DECLARATION_MASK; > + > + =A0 =A0 =A0if (subcode || gimple_transaction_label (gs)) > + =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 pp_string (buffer, " =A0//"); > + =A0 =A0 =A0 =A0 if (gimple_transaction_label (gs)) > + =A0 =A0 =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 =A0 =A0 pp_string (buffer, " LABEL=3D"); > + =A0 =A0 =A0 =A0 =A0 =A0 dump_generic_node (buffer, gimple_transaction_l= abel (gs), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0spc, fla= gs, false); > + =A0 =A0 =A0 =A0 =A0 } > + =A0 =A0 =A0 =A0 if (subcode) > + =A0 =A0 =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 =A0 =A0 pp_string (buffer, " SUBCODE=3D[ "); > + =A0 =A0 =A0 =A0 =A0 =A0 if (subcode & GTMA_HAVE_ABORT) > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 pp_string (buffer, "GTMA_HAVE_ABORT "); > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 subcode &=3D ~GTMA_HAVE_ABORT; > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 } > + =A0 =A0 =A0 =A0 =A0 =A0 if (subcode & GTMA_HAVE_LOAD) > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 pp_string (buffer, "GTMA_HAVE_LOAD "); > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 subcode &=3D ~GTMA_HAVE_LOAD; > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 } > + =A0 =A0 =A0 =A0 =A0 =A0 if (subcode & GTMA_HAVE_STORE) > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 pp_string (buffer, "GTMA_HAVE_STORE "); > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 subcode &=3D ~GTMA_HAVE_STORE; > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 } > + =A0 =A0 =A0 =A0 =A0 =A0 if (subcode & GTMA_MAY_ENTER_IRREVOCABLE) > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 pp_string (buffer, "GTMA_MAY_ENTER_IRRE= VOCABLE "); > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 subcode &=3D ~GTMA_MAY_ENTER_IRREVOCABL= E; > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 } > + =A0 =A0 =A0 =A0 =A0 =A0 if (subcode & GTMA_DOES_GO_IRREVOCABLE) > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 pp_string (buffer, "GTMA_DOES_GO_IRREVO= CABLE "); > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 subcode &=3D ~GTMA_DOES_GO_IRREVOCABLE; > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 } > + =A0 =A0 =A0 =A0 =A0 =A0 if (subcode) > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 pp_printf (buffer, "0x%x ", subcode); > + =A0 =A0 =A0 =A0 =A0 =A0 pp_string (buffer, "]"); > + =A0 =A0 =A0 =A0 =A0 } > + =A0 =A0 =A0 } > + > + =A0 =A0 =A0if (!gimple_seq_empty_p (gimple_transaction_body (gs))) > + =A0 =A0 =A0 { > + =A0 =A0 =A0 =A0 newline_and_indent (buffer, spc + 2); > + =A0 =A0 =A0 =A0 pp_character (buffer, '{'); > + =A0 =A0 =A0 =A0 pp_newline (buffer); > + =A0 =A0 =A0 =A0 dump_gimple_seq (buffer, gimple_transaction_body (gs), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0spc + 4, flags); > + =A0 =A0 =A0 =A0 newline_and_indent (buffer, spc + 2); > + =A0 =A0 =A0 =A0 pp_character (buffer, '}'); > + =A0 =A0 =A0 } > + =A0 =A0} > +} > + > =A0/* Dump a GIMPLE_ASM tuple on the pretty_printer BUFFER, SPC spaces of > =A0 =A0indent. =A0FLAGS specifies details to show in the dump (see TDF_* = in > =A0 =A0tree-pass.h). =A0*/ > @@ -1855,6 +2013,10 @@ dump_gimple_stmt (pretty_printer *buffer > =A0 =A0 =A0 dump_gimple_eh_must_not_throw (buffer, gs, spc, flags); > =A0 =A0 =A0 break; > > + =A0 =A0case GIMPLE_EH_ELSE: > + =A0 =A0 =A0dump_gimple_eh_else (buffer, gs, spc, flags); > + =A0 =A0 =A0break; > + > =A0 =A0 case GIMPLE_RESX: > =A0 =A0 =A0 dump_gimple_resx (buffer, gs, spc, flags); > =A0 =A0 =A0 break; > @@ -1877,6 +2039,10 @@ dump_gimple_stmt (pretty_printer *buffer > =A0 =A0 =A0 pp_string (buffer, " predictor."); > =A0 =A0 =A0 break; > > + =A0 =A0case GIMPLE_TRANSACTION: > + =A0 =A0 =A0dump_gimple_transaction (buffer, gs, spc, flags); > + =A0 =A0 =A0break; > + > =A0 =A0 default: > =A0 =A0 =A0 GIMPLE_NIY; > =A0 =A0 } > Index: gcc/gimplify.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/gimplify.c =A0 =A0 =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/gimplify.c =A0 =A0 =A0(.../branches/transactional-memory) =A0 =A0= (revision > 180773) > @@ -413,6 +413,8 @@ create_tmp_var_name (const char *prefix) > =A0 =A0 =A0 char *preftmp =3D ASTRDUP (prefix); > > =A0 =A0 =A0 remove_suffix (preftmp, strlen (preftmp)); > + =A0 =A0 =A0clean_symbol_name (preftmp); > + > =A0 =A0 =A0 prefix =3D preftmp; > =A0 =A0 } > > @@ -1072,6 +1074,12 @@ voidify_wrapper_expr (tree wrapper, tree > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0} > =A0 =A0 =A0 =A0 =A0 =A0 =A0break; > > + =A0 =A0 =A0 =A0 =A0 case TRANSACTION_EXPR: > + =A0 =A0 =A0 =A0 =A0 =A0 TREE_SIDE_EFFECTS (*p) =3D 1; > + =A0 =A0 =A0 =A0 =A0 =A0 TREE_TYPE (*p) =3D void_type_node; > + =A0 =A0 =A0 =A0 =A0 =A0 p =3D &TRANSACTION_EXPR_BODY (*p); > + =A0 =A0 =A0 =A0 =A0 =A0 break; > + > =A0 =A0 =A0 =A0 =A0 =A0default: > =A0 =A0 =A0 =A0 =A0 =A0 =A0goto out; > =A0 =A0 =A0 =A0 =A0 =A0} > @@ -6527,6 +6535,53 @@ gimplify_omp_atomic (tree *expr_p, gimpl > =A0 =A0return GS_ALL_DONE; > =A0} > > +/* Gimplify a TRANSACTION_EXPR. =A0This involves gimplification of the > + =A0 body, and adding some EH bits. =A0*/ > + > +static enum gimplify_status > +gimplify_transaction (tree *expr_p, gimple_seq *pre_p) > +{ > + =A0tree expr =3D *expr_p, temp, tbody =3D TRANSACTION_EXPR_BODY (expr); > + =A0gimple g; > + =A0gimple_seq body =3D NULL; > + =A0struct gimplify_ctx gctx; > + =A0int subcode =3D 0; > + > + =A0/* Wrap the transaction body in a BIND_EXPR so we have a context > + =A0 =A0 where to put decls for OpenMP. =A0*/ > + =A0if (TREE_CODE (tbody) !=3D BIND_EXPR) > + =A0 =A0{ > + =A0 =A0 =A0tree bind =3D build3 (BIND_EXPR, void_type_node, NULL, tbody= , NULL); > + =A0 =A0 =A0TREE_SIDE_EFFECTS (bind) =3D 1; > + =A0 =A0 =A0SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody)); > + =A0 =A0 =A0TRANSACTION_EXPR_BODY (expr) =3D bind; > + =A0 =A0} > + > + =A0push_gimplify_context (&gctx); > + =A0temp =3D voidify_wrapper_expr (*expr_p, NULL); > + > + =A0g =3D gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body= ); > + =A0pop_gimplify_context (g); > + > + =A0g =3D gimple_build_transaction (body, NULL); > + =A0if (TRANSACTION_EXPR_OUTER (expr)) > + =A0 =A0subcode =3D GTMA_IS_OUTER; > + =A0else if (TRANSACTION_EXPR_RELAXED (expr)) > + =A0 =A0subcode =3D GTMA_IS_RELAXED; > + =A0gimple_transaction_set_subcode (g, subcode); > + > + =A0gimplify_seq_add_stmt (pre_p, g); > + > + =A0if (temp) > + =A0 =A0{ > + =A0 =A0 =A0*expr_p =3D temp; > + =A0 =A0 =A0return GS_OK; > + =A0 =A0} > + > + =A0*expr_p =3D NULL_TREE; > + =A0return GS_ALL_DONE; > +} > + > =A0/* Convert the GENERIC expression tree *EXPR_P to GIMPLE. =A0If the > =A0 =A0expression produces a value to be used as an operand inside a GIMP= LE > =A0 =A0statement, the value will be stored back in *EXPR_P. =A0This value= will > @@ -7251,6 +7306,10 @@ gimplify_expr (tree *expr_p, gimple_seq > =A0 =A0 =A0 =A0 =A0ret =3D gimplify_omp_atomic (expr_p, pre_p); > =A0 =A0 =A0 =A0 =A0break; > > + =A0 =A0 =A0 =A0case TRANSACTION_EXPR: > + =A0 =A0 =A0 =A0 =A0ret =3D gimplify_transaction (expr_p, pre_p); > + =A0 =A0 =A0 =A0 =A0break; > + > =A0 =A0 =A0 =A0case TRUTH_AND_EXPR: > =A0 =A0 =A0 =A0case TRUTH_OR_EXPR: > =A0 =A0 =A0 =A0case TRUTH_XOR_EXPR: > Index: gcc/calls.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/calls.c (.../trunk) =A0 =A0 (revision 180744) > +++ gcc/calls.c (.../branches/transactional-memory) =A0 =A0 (revision 180= 773) > @@ -496,7 +496,60 @@ emit_call_1 (rtx funexp, tree fntree ATT > =A0static int > =A0special_function_p (const_tree fndecl, int flags) > =A0{ > - =A0if (fndecl && DECL_NAME (fndecl) > + =A0if (fndecl =3D=3D NULL) > + =A0 =A0return flags; > + > + =A0if (DECL_BUILT_IN_CLASS (fndecl) =3D=3D BUILT_IN_NORMAL) > + =A0 =A0{ > + =A0 =A0 =A0switch (DECL_FUNCTION_CODE (fndecl)) > + =A0 =A0 =A0 { > + =A0 =A0 =A0 case BUILT_IN_TM_COMMIT: > + =A0 =A0 =A0 case BUILT_IN_TM_COMMIT_EH: > + =A0 =A0 =A0 case BUILT_IN_TM_ABORT: > + =A0 =A0 =A0 case BUILT_IN_TM_IRREVOCABLE: > + =A0 =A0 =A0 case BUILT_IN_TM_GETTMCLONE_IRR: > + =A0 =A0 =A0 case BUILT_IN_TM_MEMCPY: > + =A0 =A0 =A0 case BUILT_IN_TM_MEMMOVE: > + =A0 =A0 =A0 =A0case BUILT_IN_TM_MEMSET: > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (1): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (2): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (4): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (8): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (FLOAT): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (DOUBLE): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (LDOUBLE): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (M64): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (M128): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_STORE (M256): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (1): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (2): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (4): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (8): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (FLOAT): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (DOUBLE): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (LDOUBLE): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (M64): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (M128): > + =A0 =A0 =A0 CASE_BUILT_IN_TM_LOAD (M256): > + =A0 =A0 =A0 case BUILT_IN_TM_LOG: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_1: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_2: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_4: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_8: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_FLOAT: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_DOUBLE: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_LDOUBLE: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_M64: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_M128: > + =A0 =A0 =A0 case BUILT_IN_TM_LOG_M256: > + =A0 =A0 =A0 =A0 flags |=3D ECF_TM_OPS; > + =A0 =A0 =A0 =A0 break; > + =A0 =A0 =A0 default: > + =A0 =A0 =A0 =A0 break; > + =A0 =A0 =A0 } > + =A0 =A0} This should not be in special_function_p which is solely to check for the identifiers. Instead the caller of this function should handle the builtin codes (flags_from_decl_or_type). > + =A0if (DECL_NAME (fndecl) > =A0 =A0 =A0 && IDENTIFIER_LENGTH (DECL_NAME (fndecl)) <=3D 17 > =A0 =A0 =A0 /* Exclude functions not at the file scope, or not `extern', > =A0 =A0 =A0 =A0 since they are not the magic functions we would otherwise > @@ -644,6 +697,9 @@ flags_from_decl_or_type (const_tree exp) > =A0 =A0 =A0 if (TREE_NOTHROW (exp)) > =A0 =A0 =A0 =A0flags |=3D ECF_NOTHROW; > > + =A0 =A0 =A0if (DECL_IS_TM_CLONE (exp)) > + =A0 =A0 =A0 flags |=3D ECF_TM_OPS; > + Thus, here. > =A0 =A0 =A0 flags =3D special_function_p (exp, flags); > =A0 =A0 } > =A0 else if (TYPE_P (exp) && TYPE_READONLY (exp)) > Index: gcc/tree-inline.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/tree-inline.c =A0 (.../trunk) =A0 =A0 (revision 180744) > +++ gcc/tree-inline.c =A0 (.../branches/transactional-memory) =A0 =A0 (re= vision > 180773) > @@ -1365,6 +1365,12 @@ remap_gimple_stmt (gimple stmt, copy_bod > =A0 =A0 =A0 =A0 =A0 =A0=3D gimple_build_omp_critical (s1, gimple_omp_crit= ical_name > (stmt)); > =A0 =A0 =A0 =A0 =A0break; > > + =A0 =A0 =A0 case GIMPLE_TRANSACTION: > + =A0 =A0 =A0 =A0 s1 =3D remap_gimple_seq (gimple_transaction_body (stmt)= , id); > + =A0 =A0 =A0 =A0 copy =3D gimple_build_transaction (s1, gimple_transacti= on_label > (stmt)); > + =A0 =A0 =A0 =A0 gimple_transaction_set_subcode (copy, gimple_transactio= n_subcode > (stmt)); > + =A0 =A0 =A0 =A0 break; > + > =A0 =A0 =A0 =A0default: > =A0 =A0 =A0 =A0 =A0gcc_unreachable (); > =A0 =A0 =A0 =A0} > @@ -3600,6 +3606,11 @@ estimate_num_insns (gimple stmt, eni_wei > =A0 =A0 =A0 return (weights->omp_cost > =A0 =A0 =A0 =A0 =A0 =A0 =A0 + estimate_num_insns_seq (gimple_omp_body (st= mt), weights)); > > + =A0 =A0case GIMPLE_TRANSACTION: > + =A0 =A0 =A0return (weights->tm_cost > + =A0 =A0 =A0 =A0 =A0 =A0 + estimate_num_insns_seq (gimple_transaction_bo= dy (stmt), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0= =A0 weights)); > + Huh, so we now have non-lowered gimple sub-sequence throughout all optimizations (inlining especially)? :( I think I miss tree-cfg.c parts that do any verification of the new gimple kinds. > =A0 =A0 default: > =A0 =A0 =A0 gcc_unreachable (); > =A0 =A0 } > @@ -3639,6 +3650,7 @@ init_inline_once (void) > =A0 eni_size_weights.target_builtin_call_cost =3D 1; > =A0 eni_size_weights.div_mod_cost =3D 1; > =A0 eni_size_weights.omp_cost =3D 40; > + =A0eni_size_weights.tm_cost =3D 10; > =A0 eni_size_weights.time_based =3D false; > =A0 eni_size_weights.return_cost =3D 1; > > @@ -3650,6 +3662,7 @@ init_inline_once (void) > =A0 eni_time_weights.target_builtin_call_cost =3D 1; > =A0 eni_time_weights.div_mod_cost =3D 10; > =A0 eni_time_weights.omp_cost =3D 40; > + =A0eni_time_weights.tm_cost =3D 40; > =A0 eni_time_weights.time_based =3D true; > =A0 eni_time_weights.return_cost =3D 2; > =A0} > Index: gcc/tree-inline.h > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/tree-inline.h =A0 (.../trunk) =A0 =A0 (revision 180744) > +++ gcc/tree-inline.h =A0 (.../branches/transactional-memory) =A0 =A0 (re= vision > 180773) > @@ -144,6 +144,9 @@ typedef struct eni_weights_d > =A0 /* Cost for omp construct. =A0*/ > =A0 unsigned omp_cost; > > + =A0/* Cost for tm transaction. =A0*/ > + =A0unsigned tm_cost; > + > =A0 /* Cost of return. =A0*/ > =A0 unsigned return_cost; > > Index: gcc/gimple.c > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/gimple.c =A0 =A0 =A0 =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/gimple.c =A0 =A0 =A0 =A0(.../branches/transactional-memory) =A0 = =A0 (revision > 180773) > @@ -743,6 +743,17 @@ gimple_build_eh_must_not_throw (tree dec > =A0 return p; > =A0} > > +/* Build a GIMPLE_EH_ELSE statement. =A0*/ > + > +gimple > +gimple_build_eh_else (gimple_seq n_body, gimple_seq e_body) > +{ > + =A0gimple p =3D gimple_alloc (GIMPLE_EH_ELSE, 0); > + =A0gimple_eh_else_set_n_body (p, n_body); > + =A0gimple_eh_else_set_e_body (p, e_body); > + =A0return p; > +} > + > =A0/* Build a GIMPLE_TRY statement. > > =A0 =A0EVAL is the expression to evaluate. > @@ -1146,6 +1157,17 @@ gimple_build_omp_atomic_store (tree val) > =A0 return p; > =A0} > > +/* Build a GIMPLE_TRANSACTION statement. =A0*/ > + > +gimple > +gimple_build_transaction (gimple_seq body, tree label) > +{ > + =A0gimple p =3D gimple_alloc (GIMPLE_TRANSACTION, 0); > + =A0gimple_transaction_set_body (p, body); > + =A0gimple_transaction_set_label (p, label); > + =A0return p; > +} > + > =A0/* Build a GIMPLE_PREDICT statement. =A0PREDICT is one of the predicto= rs from > =A0 =A0predict.def, OUTCOME is NOT_TAKEN or TAKEN. =A0*/ > > @@ -1331,7 +1353,7 @@ walk_gimple_seq (gimple_seq seq, walk_st As you are changing features of this walker you should update its documentation. > =A0{ > =A0 gimple_stmt_iterator gsi; > > - =A0for (gsi =3D gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi)) > + =A0for (gsi =3D gsi_start (seq); !gsi_end_p (gsi); ) > =A0 =A0 { > =A0 =A0 =A0 tree ret =3D walk_gimple_stmt (&gsi, callback_stmt, callback_= op, wi); > =A0 =A0 =A0 if (ret) > @@ -1340,8 +1362,12 @@ walk_gimple_seq (gimple_seq seq, walk_st > =A0 =A0 =A0 =A0 =A0 =A0 to hold it. =A0*/ > =A0 =A0 =A0 =A0 =A0gcc_assert (wi); > =A0 =A0 =A0 =A0 =A0wi->callback_result =3D ret; > - =A0 =A0 =A0 =A0 return gsi_stmt (gsi); > + > + =A0 =A0 =A0 =A0 return wi->removed_stmt ? NULL : gsi_stmt (gsi); > =A0 =A0 =A0 =A0} > + > + =A0 =A0 =A0if (!wi->removed_stmt) > + =A0 =A0 =A0 gsi_next (&gsi); > =A0 =A0 } > > =A0 if (wi) > @@ -1680,6 +1706,13 @@ walk_gimple_op (gimple stmt, walk_tree_f > =A0 =A0 =A0 =A0return ret; > =A0 =A0 =A0 break; > > + =A0 =A0case GIMPLE_TRANSACTION: > + =A0 =A0 =A0ret =3D walk_tree (gimple_transaction_label_ptr (stmt), call= back_op, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0wi, pset); > + =A0 =A0 =A0if (ret) > + =A0 =A0 =A0 return ret; > + =A0 =A0 =A0break; > + > =A0 =A0 =A0 /* Tuples that do not have operands. =A0*/ > =A0 =A0 case GIMPLE_NOP: > =A0 =A0 case GIMPLE_RESX: > @@ -1730,10 +1763,13 @@ walk_gimple_stmt (gimple_stmt_iterator * > =A0 gimple stmt =3D gsi_stmt (*gsi); > > =A0 if (wi) > - =A0 =A0wi->gsi =3D *gsi; > + =A0 =A0{ > + =A0 =A0 =A0wi->gsi =3D *gsi; > + =A0 =A0 =A0wi->removed_stmt =3D false; > > - =A0if (wi && wi->want_locations && gimple_has_location (stmt)) > - =A0 =A0input_location =3D gimple_location (stmt); > + =A0 =A0 =A0if (wi->want_locations && gimple_has_location (stmt)) > + =A0 =A0 =A0 input_location =3D gimple_location (stmt); > + =A0 =A0} > > =A0 ret =3D NULL; > > @@ -1751,6 +1787,8 @@ walk_gimple_stmt (gimple_stmt_iterator * > =A0 =A0 =A0 gcc_assert (tree_ret =3D=3D NULL); > > =A0 =A0 =A0 /* Re-read stmt in case the callback changed it. =A0*/ > + =A0 =A0 =A0if (wi && wi->removed_stmt) > + =A0 =A0 =A0 return NULL; > =A0 =A0 =A0 stmt =3D gsi_stmt (*gsi); > =A0 =A0 } > > @@ -1786,6 +1824,17 @@ walk_gimple_stmt (gimple_stmt_iterator * > =A0 =A0 =A0 =A0return wi->callback_result; > =A0 =A0 =A0 break; > > + =A0 =A0case GIMPLE_EH_ELSE: > + =A0 =A0 =A0ret =3D walk_gimple_seq (gimple_eh_else_n_body (stmt), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0callback_stmt, c= allback_op, wi); > + =A0 =A0 =A0if (ret) > + =A0 =A0 =A0 return wi->callback_result; > + =A0 =A0 =A0ret =3D walk_gimple_seq (gimple_eh_else_e_body (stmt), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0callback_stmt, c= allback_op, wi); > + =A0 =A0 =A0if (ret) > + =A0 =A0 =A0 return wi->callback_result; > + =A0 =A0 =A0break; > + > =A0 =A0 case GIMPLE_TRY: > =A0 =A0 =A0 ret =3D walk_gimple_seq (gimple_try_eval (stmt), callback_stm= t, > callback_op, > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 wi); > @@ -1813,8 +1862,8 @@ walk_gimple_stmt (gimple_stmt_iterator * > =A0 =A0 case GIMPLE_OMP_TASK: > =A0 =A0 case GIMPLE_OMP_SECTIONS: > =A0 =A0 case GIMPLE_OMP_SINGLE: > - =A0 =A0 =A0ret =3D walk_gimple_seq (gimple_omp_body (stmt), callback_st= mt, > callback_op, > - =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0wi); > + =A0 =A0 =A0ret =3D walk_gimple_seq (gimple_omp_body (stmt), callback_st= mt, > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0callback_op, wi); > =A0 =A0 =A0 if (ret) > =A0 =A0 =A0 =A0return wi->callback_result; > =A0 =A0 =A0 break; > @@ -1826,6 +1875,13 @@ walk_gimple_stmt (gimple_stmt_iterator * > =A0 =A0 =A0 =A0return wi->callback_result; > =A0 =A0 =A0 break; > > + =A0 =A0case GIMPLE_TRANSACTION: > + =A0 =A0 =A0ret =3D walk_gimple_seq (gimple_transaction_body (stmt), > + =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0callback_stmt, c= allback_op, wi); > + =A0 =A0 =A0if (ret) > + =A0 =A0 =A0 return wi->callback_result; > + =A0 =A0 =A0break; > + > =A0 =A0 default: > =A0 =A0 =A0 gcc_assert (!gimple_has_substatements (stmt)); > =A0 =A0 =A0 break; > @@ -2252,6 +2308,13 @@ gimple_copy (gimple stmt) > =A0 =A0 =A0 =A0 =A0gimple_eh_filter_set_types (copy, t); > =A0 =A0 =A0 =A0 =A0break; > > + =A0 =A0 =A0 case GIMPLE_EH_ELSE: > + =A0 =A0 =A0 =A0 new_seq =3D gimple_seq_copy (gimple_eh_else_n_body (stm= t)); > + =A0 =A0 =A0 =A0 gimple_eh_else_set_n_body (copy, new_seq); > + =A0 =A0 =A0 =A0 new_seq =3D gimple_seq_copy (gimple_eh_else_e_body (stm= t)); > + =A0 =A0 =A0 =A0 gimple_eh_else_set_e_body (copy, new_seq); > + =A0 =A0 =A0 =A0 break; > + > =A0 =A0 =A0 =A0case GIMPLE_TRY: > =A0 =A0 =A0 =A0 =A0new_seq =3D gimple_seq_copy (gimple_try_eval (stmt)); > =A0 =A0 =A0 =A0 =A0gimple_try_set_eval (copy, new_seq); > @@ -2327,6 +2390,11 @@ gimple_copy (gimple stmt) > =A0 =A0 =A0 =A0 =A0gimple_omp_set_body (copy, new_seq); > =A0 =A0 =A0 =A0 =A0break; > > + =A0 =A0 =A0 =A0case GIMPLE_TRANSACTION: > + =A0 =A0 =A0 =A0 new_seq =3D gimple_seq_copy (gimple_transaction_body (s= tmt)); > + =A0 =A0 =A0 =A0 gimple_transaction_set_body (copy, new_seq); > + =A0 =A0 =A0 =A0 break; > + > =A0 =A0 =A0 =A0case GIMPLE_WITH_CLEANUP_EXPR: > =A0 =A0 =A0 =A0 =A0new_seq =3D gimple_seq_copy (gimple_wce_cleanup (stmt)= ); > =A0 =A0 =A0 =A0 =A0gimple_wce_set_cleanup (copy, new_seq); > @@ -2785,7 +2853,7 @@ is_gimple_address (const_tree t) > =A0/* Strip out all handled components that produce invariant > =A0 =A0offsets. =A0*/ > > -static const_tree > +const_tree > =A0strip_invariant_refs (const_tree op) > =A0{ > =A0 while (handled_component_p (op)) If you export this please move it to tree.c. > @@ -3085,6 +3153,8 @@ get_call_expr_in (tree t) > =A0 =A0 t =3D TREE_OPERAND (t, 1); > =A0 if (TREE_CODE (t) =3D=3D WITH_SIZE_EXPR) > =A0 =A0 t =3D TREE_OPERAND (t, 0); > + =A0if (TREE_CODE (t) =3D=3D VIEW_CONVERT_EXPR) > + =A0 =A0t =3D TREE_OPERAND (t, 0); > =A0 if (TREE_CODE (t) =3D=3D CALL_EXPR) > =A0 =A0 return t; > =A0 return NULL_TREE; An unused function. Please move it to where you need it instead, make it static and adjust it in a way to do exactly what you want. After the above change it looks strange - handling V_C_E but not other component refs. > Index: gcc/gimple.h > =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D > --- gcc/gimple.h =A0 =A0 =A0 =A0(.../trunk) =A0 =A0 (revision 180744) > +++ gcc/gimple.h =A0 =A0 =A0 =A0(.../branches/transactional-memory) =A0 = =A0 (revision > 180773) > @@ -105,6 +105,7 @@ enum gf_mask { > =A0 =A0 GF_CALL_NOTHROW =A0 =A0 =A0 =A0 =A0 =A0=3D 1 << 5, > =A0 =A0 GF_CALL_ALLOCA_FOR_VAR =A0 =A0 =3D 1 << 6, > =A0 =A0 GF_CALL_INTERNAL =A0 =A0 =A0 =A0 =A0 =3D 1 << 7, > + =A0 =A0GF_CALL_NOINLINE =A0 =A0 =A0 =A0 =A0 =3D 1 << 8, > =A0 =A0 GF_OMP_PARALLEL_COMBINED =A0 =3D 1 << 0, ? Why not use GF_CALL_CANNOT_INLINE? > =A0 =A0 /* True on an GIMPLE_OMP_RETURN statement if the return does not = require > @@ -487,6 +488,15 @@ struct GTY(()) gimple_statement_eh_filte > =A0 gimple_seq failure; > =A0}; > > +/* GIMPLE_EH_ELSE */ > + > +struct GTY(()) gimple_statement_eh_else { > + =A0/* [ WORD 1-4 ] =A0*/ > + =A0struct gimple_statement_base gsbase; > + > + =A0/* [ WORD 5,6 ] */ > + =A0gimple_seq n_body, e_body; > +}; > > =A0/* GIMPLE_EH_MUST_NOT_THROW */ > > @@ -757,6 +767,43 @@ struct GTY(()) gimple_statement_omp_atom > =A0 tree val; > =A0}; > > +/* GIMPLE_TRANSACTION. =A0*/ > + > +/* Bits to be stored in the GIMPLE_TRANSACTION subcode. =A0*/ > + > +/* The __transaction_atomic was declared [[outer]] or it is > + =A0 __transaction_relaxed. =A0*/ > +#define GTMA_IS_OUTER =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0(1u << 0) > +#define GTMA_IS_RELAXED =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0(= 1u << 1) > +#define GTMA_DECLARATION_MASK =A0 =A0 =A0 =A0 =A0(GTMA_IS_OUTER | GTMA_I= S_RELAXED) > + > +/* The transaction is seen to not have an abort. =A0*/ > +#define GTMA_HAVE_ABORT =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0(= 1u << 2) > +/* The transaction is seen to have loads or stores. =A0*/ > +#define GTMA_HAVE_LOAD =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 (1u << 3) > +#define GTMA_HAVE_STORE =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0(= 1u << 4) > +/* The transaction MAY enter serial irrevocable mode in its dynamic scop= e. > =A0*/ > +#define GTMA_MAY_ENTER_IRREVOCABLE =A0 =A0 (1u << 5) > +/* The transaction WILL enter serial irrevocable mode. > + =A0 An irrevocable block post-dominates the entire transaction, such > + =A0 that all invocations of the transaction will go serial-irrevocable. > + =A0 In such case, we don't bother instrumenting the transaction, and > + =A0 tell the runtime that it should begin the transaction in > + =A0 serial-irrevocable mode. =A0*/ > +#define GTMA_DOES_GO_IRREVOCABLE =A0 =A0 =A0 (1u << 6) > + > +struct GTY(()) gimple_statement_transaction > +{ > + =A0/* [ WORD 1-10 ] =A0*/ > + =A0struct gimple_statement_with_memory_ops_base gsbase; > + > + =A0/* [ WORD 11 ] */ > + =A0gimple_seq body; > + > + =A0/* [ WORD 12 ] */ > + =A0tree label; > +}; > + > =A0#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) =A0SYM, > =A0enum gimple_statement_structure_enum { > =A0#include "gsstruct.def" > @@ -779,6 +826,7 @@ union GTY ((desc ("gimple_statement_stru > =A0 struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch; > =A0 struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) > gimple_eh_filter; > =A0 struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_m= nt; > + =A0struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) > gimple_eh_else; > =A0 struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi; > =A0 struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) > gimple_eh_ctrl; > =A0 struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try; > @@ -793,6 +841,7 @@ union GTY ((desc ("gimple_statement_stru > =A0 struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) > gimple_omp_continue; > =A0 struct gimple_statement_omp_atomic_load GTY ((tag > ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load; > =A0 struct gimple_statement_omp_atomic_store GTY ((tag > ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store; > + =A0struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) > gimple_transaction; > =A0}; > > =A0/* In gimple.c. =A0*/ > @@ -846,6 +895,7 @@ gimple gimple_build_asm_vec (const char > =A0gimple gimple_build_catch (tree, gimple_seq); > =A0gimple gimple_build_eh_filter (tree, gimple_seq); > =A0gimple gimple_build_eh_must_not_throw (tree); > +gimple gimple_build_eh_else (gimple_seq, gimple_seq); > =A0gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags= ); > =A0gimple gimple_build_wce (gimple_seq); > =A0gimple gimple_build_resx (int); > @@ -868,6 +918,7 @@ gimple gimple_build_omp_single (gimple_s > =A0gimple gimple_build_cdt (tree, tree); > =A0gimple gimple_build_omp_atomic_load (tree, tree); > =A0gimple gimple_build_omp_atomic_store (tree); > +gimple gimple_build_transaction (gimple_seq, tree); > =A0gimple gimple_build_predict (enum br_predictor, enum prediction); > =A0enum gimple_statement_structure_enum gss_for_assign (enum tree_code); > =A0void sort_case_labels (VEC(tree,heap) *); > @@ -986,6 +1037,7 @@ extern bool walk_stmt_load_store_ops (gi > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 = =A0bool (*)(gimple, tree, void *), > =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 =A0 = =A0bool (*)(gimple, tree, void *)); > =A0extern bool gimple_ior_addresses_taken (bitmap, gimple); > +extern const_tree strip_invariant_refs (const_tree); > =A0extern bool gimple_call_builtin_p (gimple, enum built_in_function); > =A0extern bool gimple_asm_clobbers_memory_p (const_gimple); > > @@ -1077,6 +1129,9 @@ extern tree canonicalize_cond_expr_cond > =A0/* In omp-low.c. =A0*/ > =A0extern tree omp_reduction_init (tree, tree); > > +/* In trans-mem.c. =A0*/ > +extern void diagnose_tm_safe_errors (tree); > + > =A0/* In tree-nested.c. =A0*/ > =A0extern void lower_nested_functions (tree); > =A0extern void insert_field_into_struct (tree, tree); > @@ -1135,6 +1190,7 @@ gimple_has_substatements (gimple g) > =A0 =A0 case GIMPLE_BIND: > =A0 =A0 case GIMPLE_CATCH: > =A0 =A0 case GIMPLE_EH_FILTER: > + =A0 =A0case GIMPLE_EH_ELSE: > =A0 =A0 case GIMPLE_TRY: > =A0 =A0 case GIMPLE_OMP_FOR: > =A0 =A0 case GIMPLE_OMP_MASTER: > @@ -1146,6 +1202,7 @@ gimple_has_substatements (gimple g) > =A0 =A0 case GIMPLE_OMP_SINGLE: > =A0 =A0 case GIMPLE_OMP_CRITICAL: > =A0 =A0 case GIMPLE_WITH_CLEANUP_EXPR: > + =A0 =A0case GIMPLE_TRANSACTION: > =A0 =A0 =A0 return true; > > =A0 =A0 default: > @@ -2436,6 +2493,22 @@ gimple_call_alloca_for_var_p (gimple s) > =A0 return (s->gsbase.subcode & GF_CALL_ALLOCA_FOR_VAR) !=3D 0; > =A0} > > +/* Return true if S is a noinline call. =A0*/ > + > +static inline bool > +gimple_call_noinline_p (gimple s) > +{ > + =A0GIMPLE_CHECK (s, GIMPLE_CALL); > + =A0return (s->gsbase.subcode & GF_CALL_NOINLINE) !=3D 0; > +} > + > +static inline void > +gimple_call_set_noinline_p (gimple s) > +{ > + =A0GIMPLE_CHECK (s, GIMPLE_CALL); > + =A0s->gsbase.subcode |=3D GF_CALL_NOINLINE; > +} See above. We have *_cannot_inline already. > =A0/* Copy all the GF_CALL_* flags from ORIG_CALL to DEST_CALL. =A0*/ > > =A0static inline void > @@ -3178,6 +3251,35 @@ gimple_eh_must_not_throw_set_fndecl (gim > =A0 gs->gimple_eh_mnt.fndecl =3D decl; > =A0} > > +/* GIMPLE_EH_ELSE accessors. =A0*/ > + > +static inline gimple_seq > +gimple_eh_else_n_body (gimple gs) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); > + =A0return gs->gimple_eh_else.n_body; > +} > + > +static inline gimple_seq > +gimple_eh_else_e_body (gimple gs) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); > + =A0return gs->gimple_eh_else.e_body; > +} > + > +static inline void > +gimple_eh_else_set_n_body (gimple gs, gimple_seq seq) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); > + =A0gs->gimple_eh_else.n_body =3D seq; > +} > + > +static inline void > +gimple_eh_else_set_e_body (gimple gs, gimple_seq seq) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_EH_ELSE); > + =A0gs->gimple_eh_else.e_body =3D seq; > +} > > =A0/* GIMPLE_TRY accessors. */ > > @@ -4556,6 +4658,67 @@ gimple_omp_continue_set_control_use (gim > =A0 g->gimple_omp_continue.control_use =3D use; > =A0} > > +/* Return the body for the GIMPLE_TRANSACTION statement GS. =A0*/ > + > +static inline gimple_seq > +gimple_transaction_body (gimple gs) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); > + =A0return gs->gimple_transaction.body; > +} > + > +/* Return the label associated with a GIMPLE_TRANSACTION. =A0*/ > + > +static inline tree > +gimple_transaction_label (const_gimple gs) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); > + =A0return gs->gimple_transaction.label; > +} > + > +static inline tree * > +gimple_transaction_label_ptr (gimple gs) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); > + =A0return &gs->gimple_transaction.label; > +} > + > +/* Return the subcode associated with a GIMPLE_TRANSACTION. =A0*/ > + > +static inline unsigned int > +gimple_transaction_subcode (const_gimple gs) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); > + =A0return gs->gsbase.subcode; > +} > + > +/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. =A0*/ > + > +static inline void > +gimple_transaction_set_body (gimple gs, gimple_seq body) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); > + =A0gs->gimple_transaction.body =3D body; > +} > + > +/* Set the label associated with a GIMPLE_TRANSACTION. =A0*/ > + > +static inline void > +gimple_transaction_set_label (gimple gs, tree label) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); > + =A0gs->gimple_transaction.label =3D label; > +} > + > +/* Set the subcode associated with a GIMPLE_TRANSACTION. =A0*/ > + > +static inline void > +gimple_transaction_set_subcode (gimple gs, unsigned int subcode) > +{ > + =A0GIMPLE_CHECK (gs, GIMPLE_TRANSACTION); > + =A0gs->gsbase.subcode =3D subcode; > +} > + > > =A0/* Return a pointer to the return value for GIMPLE_RETURN GS. =A0*/ > > @@ -4982,6 +5145,12 @@ struct walk_stmt_info > =A0 =A0 =A0will be visited more than once. =A0*/ > =A0 struct pointer_set_t *pset; > > + =A0/* Operand returned by the callbacks. =A0This is set when calling > + =A0 =A0 walk_gimple_seq. =A0If the walk_stmt_fn or walk_tree_fn callback > + =A0 =A0 returns non-NULL, this field will contain the tree returned by > + =A0 =A0 the last callback. =A0*/ > + =A0tree callback_result; > + > =A0 /* Indicates whether the operand being examined may be replaced > =A0 =A0 =A0with something that matches is_gimple_val (if true) or somethi= ng > =A0 =A0 =A0slightly more complicated (if false). =A0"Something" technical= ly > @@ -4994,23 +5163,20 @@ struct walk_stmt_info > =A0 =A0 =A0statement 'foo (&var)', the flag VAL_ONLY will initially be set > =A0 =A0 =A0to true, however, when walking &var, the operand of that > =A0 =A0 =A0ADDR_EXPR does not need to be a GIMPLE value. =A0*/ > - =A0bool val_only; > + =A0BOOL_BITFIELD val_only : 1; > > =A0 /* True if we are currently walking the LHS of an assignment. =A0*/ > - =A0bool is_lhs; > + =A0BOOL_BITFIELD is_lhs : 1; > > =A0 /* Optional. =A0Set to true by the callback functions if they made any > =A0 =A0 =A0changes. =A0*/ > - =A0bool changed; > + =A0BOOL_BITFIELD changed : 1; > > =A0 /* True if we're interested in location information. =A0*/ > - =A0bool want_locations; > + =A0BOOL_BITFIELD want_locations : 1; > > - =A0/* Operand returned by the callbacks. =A0This is set when calling > - =A0 =A0 walk_gimple_seq. =A0If the walk_stmt_fn or walk_tree_fn callback > - =A0 =A0 returns non-NULL, this field will contain the tree returned by > - =A0 =A0 the last callback. =A0*/ > - =A0tree callback_result; > + =A0/* True if we've removed the statement that was processed. =A0*/ > + =A0BOOL_BITFIELD removed_stmt : 1; > =A0}; > > =A0/* Callback for walk_gimple_stmt. =A0Called for every statement found > Otherwise looks ok to me. Richard.