public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
From: Richard Henderson <rth@redhat.com>
To: gcc-patches@gcc.gnu.org
Subject: [trans-mem] snapshot tm expansion code
Date: Sat, 25 Oct 2008 15:50:00 -0000	[thread overview]
Message-ID: <49025DDC.3050706@redhat.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 1053 bytes --]

I've reorganized the TM expansion code significantly since the last
commit.  I've done away with the TM_LOAD/STORE markings in favour of
just using builtin functions.  I'm delaying expansion of the
transaction stuff until all optimizations have run; no longer need I
worry about the optimizers doing something weird.  I've split up the
TM expansion into several passes, all gated by pass_tm_init, which
arranges to elide all of the TM passes if there are no TM regions.
There are placeholders for the IPA and must-alias optimization passes,
and examples of how to walk the dominator tree for each TM region.

All that said, while it compiles, it's not complete enough to work yet.
There's some problem with the SSA update after insertting the builtins,
and the code to insert the transaction-restart backedges and lower the
gimple_tm_atomic nodes hasn't been reconnected.  I just felt that it
had been too long since a commit, and wanted to let Martin and Albert
see where things are heading.  Hope to have things functional again
early next week.


r~

[-- Attachment #2: z.txt --]
[-- Type: text/plain, Size: 86854 bytes --]

        * calls.c (special_function_p): Include more TM builtins.
        * cfgexpand.c: Revert all changes.
        * except.c (struct eh_region): Add u.transaction.tm_atomic_stmt.
        (gen_eh_region_transaction): Take and store the stmt.
        (for_each_tm_atomic): New.
        * except.h (for_each_tm_atomic): Declare.
        * gimple-pretty-print.c (dump_gimple_assign): Revert.
        * gimple.c (gimple_rhs_class_table): Revert.
        * gimple.h (GTMA_HAVE_CALL_INDIRECT): Remove.
        (GTMA_HAVE_UNCOMMITTED_THROW): New.
        * gimplify.c (gimplify_tm_atomic): Remove EH wrapping.
        * gtm-builtins.def (BUILT_IN_TM_IRREVOKABLE): New.
        (BUILT_IN_TM_MEMCPY): New.
        (BUILT_IN_TM_LOAD_*): Mark PURE.
        * passes.c (init_optimization_passes): Place TM passes.
        * trans-mem.c: Rewrite.
        * tree-cfg.c (make_edges): Revert.
        (is_ctrl_stmt): Rewrite as switch.
        (is_ctrl_altering_stmt): Likewise.  Handle GIMPLE_TM_ATOMIC.
        * tree-eh.c (lower_tm_atomic_eh): New.
        (lower_eh_constructs_2): Record EH region for transactional stmts.
        * tree-flow.h (make_tm_edge): Remove.
        * tree-passes.h (pass_checkpoint_tm): Remove.
        (pass_tm_init, pass_tm_mark, pass_tm_memopt,
        pass_tm_edges, pass_tm_done, pass_ipa_tm): New.
        * tree-ssa-operands.c (get_addr_dereference_operands): Handle
        ADDR_EXPR.
        (add_tm_call_ops): New.
        (maybe_add_call_clobbered_vops): Use it.
        (add_all_call_clobber_ops): Split out from ... 
        (get_asm_expr_operands): ... here.
        (parse_ssa_operands): Convert to switch.
        * tree.def (TM_LOAD, TM_STORE): Remove.

--- calls.c	(revision 141360)
+++ calls.c	(local)
@@ -483,6 +483,9 @@ special_function_p (const_tree fndecl, i
       switch (DECL_FUNCTION_CODE (fndecl))
 	{
 	case BUILT_IN_TM_ABORT:
+	case BUILT_IN_TM_COMMIT:
+	case BUILT_IN_TM_IRREVOKABLE:
+	case BUILT_IN_TM_MEMCPY:
 	case BUILT_IN_TM_STORE_1:
 	case BUILT_IN_TM_STORE_2:
 	case BUILT_IN_TM_STORE_4:
--- cfgexpand.c	(revision 141360)
+++ cfgexpand.c	(local)
@@ -51,10 +51,8 @@ gimple_assign_rhs_to_tree (gimple stmt)
 {
   tree t;
   enum gimple_rhs_class grhs_class;
-  enum tree_code code;
-
-  code = gimple_expr_code (stmt);
-  grhs_class = get_gimple_rhs_class (code);
+    
+  grhs_class = get_gimple_rhs_class (gimple_expr_code (stmt));
 
   if (grhs_class == GIMPLE_BINARY_RHS)
     t = build2 (gimple_assign_rhs_code (stmt),
@@ -66,10 +64,7 @@ gimple_assign_rhs_to_tree (gimple stmt)
 		TREE_TYPE (gimple_assign_lhs (stmt)),
 		gimple_assign_rhs1 (stmt));
   else if (grhs_class == GIMPLE_SINGLE_RHS)
-    {
-      gcc_assert (code != TM_LOAD && code != TM_STORE);
-      t = gimple_assign_rhs1 (stmt);
-    }
+    t = gimple_assign_rhs1 (stmt);
   else
     gcc_unreachable ();
 
@@ -99,99 +94,6 @@ set_expr_location_r (tree *tp, int *ws A
   return NULL_TREE;
 }
 
-/* Construct a memory load in a transactional context.  */
-
-static tree
-build_tm_load (tree lhs, tree rhs)
-{
-  enum built_in_function code = END_BUILTINS;
-  tree t, type = TREE_TYPE (rhs);
-
-  if (type == float_type_node)
-    code = BUILT_IN_TM_LOAD_FLOAT;
-  else if (type == double_type_node)
-    code = BUILT_IN_TM_LOAD_DOUBLE;
-  else if (TYPE_SIZE_UNIT (type) != NULL
-	   && host_integerp (TYPE_SIZE_UNIT (type), 1))
-    {
-      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
-	{
-	case 1:
-	  code = BUILT_IN_TM_LOAD_1;
-	  break;
-	case 2:
-	  code = BUILT_IN_TM_LOAD_2;
-	  break;
-	case 4:
-	  code = BUILT_IN_TM_LOAD_4;
-	  break;
-	case 8:
-	  code = BUILT_IN_TM_LOAD_8;
-	  break;
-	}
-    }
-
-  if (code == END_BUILTINS)
-    {
-      sorry ("transactional load for %T not supported", type);
-      code = BUILT_IN_TM_LOAD_4;
-    }
-
-  t = built_in_decls[code];
-  t = build_call_expr (t, 1, build_fold_addr_expr (rhs));
-  if (TYPE_MAIN_VARIANT (TREE_TYPE (t)) != TYPE_MAIN_VARIANT (type))
-    t = build1 (VIEW_CONVERT_EXPR, type, t);
-  t = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, t);
-
-  return t;
-}
-
-/* Similarly for storing TYPE in a transactional context.  */
-
-static tree
-build_tm_store (tree lhs, tree rhs)
-{
-  enum built_in_function code = END_BUILTINS;
-  tree t, fn, type = TREE_TYPE (rhs), simple_type;
-
-  if (type == float_type_node)
-    code = BUILT_IN_TM_STORE_FLOAT;
-  else if (type == double_type_node)
-    code = BUILT_IN_TM_STORE_DOUBLE;
-  else if (TYPE_SIZE_UNIT (type) != NULL
-	   && host_integerp (TYPE_SIZE_UNIT (type), 1))
-    {
-      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
-	{
-	case 1:
-	  code = BUILT_IN_TM_STORE_1;
-	  break;
-	case 2:
-	  code = BUILT_IN_TM_STORE_2;
-	  break;
-	case 4:
-	  code = BUILT_IN_TM_STORE_4;
-	  break;
-	case 8:
-	  code = BUILT_IN_TM_STORE_8;
-	  break;
-	}
-    }
-
-  if (code == END_BUILTINS)
-    {
-      sorry ("transactional load for %T not supported", type);
-      code = BUILT_IN_TM_STORE_4;
-    }
-
-  fn = built_in_decls[code];
-  simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
-  if (TYPE_MAIN_VARIANT (simple_type) != TYPE_MAIN_VARIANT (type))
-    rhs = build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
-  t = build_call_expr (fn, 2, build_fold_addr_expr (lhs), rhs);
-
-  return t;
-}
 
 /* RTL expansion has traditionally been done on trees, so the
    transition to doing it on GIMPLE tuples is very invasive to the RTL
@@ -213,23 +115,10 @@ gimple_to_tree (gimple stmt)
       {
 	tree lhs = gimple_assign_lhs (stmt);
 
-	switch (gimple_expr_code (stmt))
-	  {
-	  case TM_LOAD:
-	    t = build_tm_load (lhs, gimple_assign_rhs1 (stmt));
-	    break;
-				 
-	  case TM_STORE:
-	    t = build_tm_store (lhs, gimple_assign_rhs1 (stmt));
-	    break;
-
-	  default:
-	    t = gimple_assign_rhs_to_tree (stmt);
-	    t = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, t);
-	    if (gimple_assign_nontemporal_move_p (stmt))
-	      MOVE_NONTEMPORAL (t) = true;
-	    break;
-	  }
+	t = gimple_assign_rhs_to_tree (stmt);
+	t = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, t);
+	if (gimple_assign_nontemporal_move_p (stmt))
+	  MOVE_NONTEMPORAL (t) = true;
       }
       break;
 	                                 
--- except.c	(revision 141360)
+++ except.c	(local)
@@ -182,7 +182,7 @@ struct eh_region GTY(())
 
     /* ??? Nothing for now.  */
     struct eh_region_u_transaction {
-      int dummy;
+      gimple tm_atomic_stmt;
     } GTY ((tag ("ERT_TRANSACTION"))) transaction;
   } GTY ((desc ("%0.type"))) u;
 
@@ -515,9 +515,11 @@ gen_eh_region_must_not_throw (struct eh_
 }
 
 struct eh_region *
-gen_eh_region_transaction (struct eh_region *outer)
+gen_eh_region_transaction (struct eh_region *outer, gimple stmt)
 {
-  return gen_eh_region (ERT_TRANSACTION, outer);
+  struct eh_region *region = gen_eh_region (ERT_TRANSACTION, outer);
+  region->u.transaction.tm_atomic_stmt = stmt;
+  return region;
 }
 
 int
@@ -2417,6 +2419,34 @@ for_each_eh_region (void (*callback) (st
 	(*callback) (region);
     }
 }
+
+void
+for_each_tm_atomic (bool want_nested, void (*callback) (gimple, void *),
+		    void *callback_data)
+{
+  int i, n = cfun->eh->last_region_number;
+  for (i = 1; i <= n; ++i)
+    {
+      struct eh_region *region;
+
+      region = VEC_index (eh_region, cfun->eh->region_array, i);
+      if (region && region->type == ERT_TRANSACTION)
+	{
+	  if (!want_nested)
+	    {
+	      struct eh_region *r = region;
+	      do {
+		r = r->outer;
+	      } while (r && r->type != ERT_TRANSACTION);
+	      if (r)
+		continue;
+	    }
+
+	  callback (region->u.transaction.tm_atomic_stmt, callback_data);
+	}
+    }
+  
+}
 \f
 /* This section describes CFG exception edges for flow.  */
 
--- except.h	(revision 141360)
+++ except.h	(local)
@@ -43,6 +43,9 @@ extern void for_each_eh_label (void (*) 
 /* Invokes CALLBACK for every exception region in the current function.  */
 extern void for_each_eh_region (void (*) (struct eh_region *));
 
+/* Invokes CALLBACK for every transaction region.  */
+extern void for_each_tm_atomic (bool, void (*) (gimple, void *), void *);
+
 /* Determine if the given INSN can throw an exception.  */
 extern bool can_throw_internal_1 (int, bool);
 extern bool can_throw_internal (const_rtx);
@@ -91,7 +94,7 @@ extern struct eh_region *gen_eh_region_t
 extern struct eh_region *gen_eh_region_catch (struct eh_region *, tree);
 extern struct eh_region *gen_eh_region_allowed (struct eh_region *, tree);
 extern struct eh_region *gen_eh_region_must_not_throw (struct eh_region *);
-extern struct eh_region *gen_eh_region_transaction (struct eh_region *);
+extern struct eh_region *gen_eh_region_transaction (struct eh_region *, gimple);
 extern int get_eh_region_number (struct eh_region *);
 extern struct eh_region *get_eh_region_from_number (int);
 extern bool get_eh_region_may_contain_throw (struct eh_region *);
--- gimple-pretty-print.c	(revision 141360)
+++ gimple-pretty-print.c	(local)
@@ -355,19 +355,9 @@ dump_binary_rhs (pretty_printer *buffer,
 static void
 dump_gimple_assign (pretty_printer *buffer, gimple gs, int spc, int flags)
 {
-  enum tree_code code;
-
-  /* Don't bypass the transactional markers like
-     gimple_assign_rhs_code would.  */
-  code = gimple_expr_code (gs);
-  if (code != TM_LOAD && code != TM_STORE
-      && get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
-    code = TREE_CODE (gimple_assign_rhs1 (gs));
-
   if (flags & TDF_RAW)
     {
       tree last;
-
       if (gimple_num_ops (gs) == 2)
         last = NULL_TREE;
       else if (gimple_num_ops (gs) == 3)
@@ -376,8 +366,8 @@ dump_gimple_assign (pretty_printer *buff
         gcc_unreachable ();
 
       dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T, %T, %T>", gs,
-                       tree_code_name[code], gimple_assign_lhs (gs),
-		       gimple_assign_rhs1 (gs), last);
+                       tree_code_name[gimple_assign_rhs_code (gs)],
+                       gimple_assign_lhs (gs), gimple_assign_rhs1 (gs), last);
     }
   else
     {
@@ -387,11 +377,6 @@ dump_gimple_assign (pretty_printer *buff
 	  pp_space (buffer);
 	  pp_character (buffer, '=');
 
-	  if (code == TM_LOAD)
-	    pp_string (buffer, "{tm_load}");
-	  else if (code == TM_STORE)
-	    pp_string (buffer, "{tm_store}");
-
 	  if (gimple_assign_nontemporal_move_p (gs))
 	    pp_string (buffer, "{nt}");
 
--- gimple.c	(revision 141360)
+++ gimple.c	(local)
@@ -2583,9 +2583,7 @@ get_gimple_rhs_num_ops (enum tree_code c
       || (SYM) == POLYNOMIAL_CHREC					    \
       || (SYM) == DOT_PROD_EXPR						    \
       || (SYM) == VEC_COND_EXPR						    \
-      || (SYM) == REALIGN_LOAD_EXPR					    \
-      || (SYM) == TM_LOAD						    \
-      || (SYM) == TM_STORE) ? GIMPLE_SINGLE_RHS				    \
+      || (SYM) == REALIGN_LOAD_EXPR) ? GIMPLE_SINGLE_RHS		    \
    : GIMPLE_INVALID_RHS),
 #define END_OF_BASE_TREE_CODES (unsigned char) GIMPLE_INVALID_RHS,
 
--- gimple.h	(revision 141360)
+++ gimple.h	(local)
@@ -742,7 +742,7 @@ struct gimple_statement_omp_atomic_store
 #define GTMA_HAVE_CALL_TM		(1u << 3)
 #define GTMA_HAVE_CALL_IRREVOKABLE	(1u << 4)
 #define GTMA_MUST_CALL_IRREVOKABLE	(1u << 5)
-#define GTMA_HAVE_CALL_INDIRECT		(1u << 6)
+#define GTMA_HAVE_UNCOMMITTED_THROW	(1u << 6)
 
 struct gimple_statement_tm_atomic GTY(())
 {
--- gimplify.c	(revision 141360)
+++ gimplify.c	(local)
@@ -6123,7 +6123,7 @@ gimplify_tm_atomic (tree *expr_p, gimple
 {
   tree expr = *expr_p;
   gimple g;
-  gimple_seq body = NULL, cleanup = NULL;
+  gimple_seq body = NULL;
   struct gimplify_ctx gctx;
 
   push_gimplify_context (&gctx);
@@ -6134,23 +6134,7 @@ gimplify_tm_atomic (tree *expr_p, gimple
   else
     pop_gimplify_context (NULL);
 
-  /* We need to add the EH support for committing the transaction
-     before pass_lower_eh runs, which is before pass_expand_tm.
-     Doing it now is easy enough.  We need to build
-	try {
-	  BODY
-	} finally {
-	  __tm_commit ();
-	}
-  */
-  g = gimple_build_call (built_in_decls[BUILT_IN_TM_COMMIT], 0);
-  gimplify_seq_add_stmt (&cleanup, g);
-  g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY);
-
-  body = NULL;
-  gimplify_seq_add_stmt (&body, g);
   g = gimple_build_tm_atomic (body, NULL);
-
   gimplify_seq_add_stmt (pre_p, g);
   *expr_p = NULL_TREE;
 
--- gtm-builtins.def	(revision 141360)
+++ gtm-builtins.def	(local)
@@ -5,6 +5,11 @@ DEF_TM_BUILTIN (BUILT_IN_TM_COMMIT, "__g
 		BT_FN_VOID, ATTR_NOTHROW_LIST)
 DEF_TM_BUILTIN (BUILT_IN_TM_ABORT, "__gtm_abort",
 		BT_FN_VOID, ATTR_NORETURN_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_IRREVOKABLE, "__gtm_irrevokable",
+		BT_FN_VOID, ATTR_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMCPY, "__gtm_memcpy",
+		BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_NOTHROW_NONNULL)
 
 DEF_TM_BUILTIN (BUILT_IN_TM_STORE_1, "__gtm_store8",
 		BT_FN_VOID_VPTR_I1, ATTR_NOTHROW_LIST)
@@ -20,14 +25,14 @@ DEF_TM_BUILTIN (BUILT_IN_TM_STORE_DOUBLE
 		BT_FN_VOID_VPTR_DOUBLE, ATTR_NOTHROW_LIST)
 
 DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_1, "__gtm_load8",
-		BT_FN_I1_VPTR, ATTR_NOTHROW_LIST)
+		BT_FN_I1_VPTR, ATTR_PURE_NOTHROW_LIST)
 DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_2, "__gtm_load16",
-		BT_FN_I2_VPTR, ATTR_NOTHROW_LIST)
+		BT_FN_I2_VPTR, ATTR_PURE_NOTHROW_LIST)
 DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_4, "__gtm_load32",
-		BT_FN_I4_VPTR, ATTR_NOTHROW_LIST)
+		BT_FN_I4_VPTR, ATTR_PURE_NOTHROW_LIST)
 DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_8, "__gtm_load64",
-		BT_FN_I8_VPTR, ATTR_NOTHROW_LIST)
+		BT_FN_I8_VPTR, ATTR_PURE_NOTHROW_LIST)
 DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_FLOAT, "__gtm_load_float",
-		BT_FN_FLOAT_VPTR, ATTR_NOTHROW_LIST)
+		BT_FN_FLOAT_VPTR, ATTR_PURE_NOTHROW_LIST)
 DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_DOUBLE, "__gtm_load_double",
-		BT_FN_DOUBLE_VPTR, ATTR_NOTHROW_LIST)
+		BT_FN_DOUBLE_VPTR, ATTR_PURE_NOTHROW_LIST)
--- passes.c	(revision 141360)
+++ passes.c	(local)
@@ -513,9 +513,9 @@ init_optimization_passes (void)
   NEXT_PASS (pass_mudflap_1);
   NEXT_PASS (pass_lower_omp);
   NEXT_PASS (pass_lower_cf);
+  NEXT_PASS (pass_lower_tm);
   NEXT_PASS (pass_refactor_eh);
   NEXT_PASS (pass_lower_eh);
-  NEXT_PASS (pass_lower_tm);
   NEXT_PASS (pass_build_cfg);
   NEXT_PASS (pass_lower_complex_O0);
   NEXT_PASS (pass_lower_vector);
@@ -564,7 +564,6 @@ init_optimization_passes (void)
 	  NEXT_PASS (pass_convert_switch);
           NEXT_PASS (pass_profile);
 	}
-      NEXT_PASS (pass_checkpoint_tm);
       NEXT_PASS (pass_release_ssa_names);
       NEXT_PASS (pass_rebuild_cgraph_edges);
       NEXT_PASS (pass_inline_parameters);
@@ -578,6 +577,7 @@ init_optimization_passes (void)
   NEXT_PASS (pass_ipa_type_escape);
   NEXT_PASS (pass_ipa_pta);
   NEXT_PASS (pass_ipa_struct_reorg);  
+  NEXT_PASS (pass_ipa_tm);
   *p = NULL;
 
   /* These passes are run after IPA passes on every function that is being
@@ -704,6 +704,14 @@ init_optimization_passes (void)
       NEXT_PASS (pass_rename_ssa_copies);
       NEXT_PASS (pass_uncprop);
     }
+  NEXT_PASS (pass_tm_init);
+    {
+      struct opt_pass **p = &pass_tm_init.pass.sub;
+      NEXT_PASS (pass_tm_mark);
+      NEXT_PASS (pass_tm_memopt);
+      NEXT_PASS (pass_tm_edges);
+      NEXT_PASS (pass_tm_done);
+    }
   NEXT_PASS (pass_del_ssa);
   NEXT_PASS (pass_nrv);
   NEXT_PASS (pass_mark_used_blocks);
--- trans-mem.c	(revision 141360)
+++ trans-mem.c	(local)
@@ -27,6 +27,8 @@
 #include "tree-pass.h"
 #include "except.h"
 #include "diagnostic.h"
+#include "toplev.h"
+#include "flags.h"
 
 
 /* The representation of a transaction changes several times during the
@@ -39,24 +41,25 @@
 	    __tm_abort;
 	}
 
-  is represented as
-
-	TM_ATOMIC {
-	  local++;
-	  if (++global == 10)
-	    __builtin___tm_abort ();
-	}
-
   During initial gimplification (gimplify.c) the TM_ATOMIC node is
-  trivially replaced with a GIMPLE_TM_ATOMIC node, and we add bits
-  to handle EH cleanup of the transaction:
+  trivially replaced with a GIMPLE_TM_ATOMIC node.
+
+  During pass_lower_tm, we examine the body of transactions looking
+  for aborts.  Transactions that do not contain an abort may be 
+  merged into an outer transaction.  We also add a TRY-FINALLY node
+  to arrange for the transaction to be committed on any exit.
+
+  [??? Think about how this arrangement affects throw-with-commit
+  and throw-with-abort operations.  In this case we want the TRY to
+  handle gotos, but not to catch any exceptions because the transaction
+  will already be closed.]
 
 	GIMPLE_TM_ATOMIC [label=NULL] {
 	  try {
 	    local = local + 1;
-	    t0 [tm_load]= global;
+	    t0 = global;
 	    t1 = t0 + 1;
-	    global [tm_store]= t1;
+	    global = t1;
 	    if (t1 == 10)
 	      __builtin___tm_abort ();
 	  } finally {
@@ -68,92 +71,48 @@
   intermixed with the regular EH stuff.  This gives us a nice persistent
   mapping (all the way through rtl) from transactional memory operation
   back to the transaction, which allows us to get the abnormal edges
-  correct to model transaction aborts and restarts.
-
-  During pass_lower_tm, we mark the gimple statements that perform
-  transactional memory operations with TM_LOAD/TM_STORE, and swap out
-  function calls with their (non-)transactional clones.  At this time
-  we flatten nested transactions (when possible), and flatten the
-  GIMPLE representation.
+  correct to model transaction aborts and restarts:
 
 	GIMPLE_TM_ATOMIC [label=over]
-	eh_label:
 	local = local + 1;
-	t0 [tm_load]= global;
+	t0 = global;
 	t1 = t0 + 1;
-	global [tm_store]= t1;
+	global = t1;
 	if (t1 == 10)
 	  __builtin___tm_abort ();
 	__builtin___tm_commit ();
 	over:
 
-  During pass_checkpoint_tm, we complete the lowering of the
-  GIMPLE_TM_ATOMIC node.  Here we examine the SSA web and arange for
-  local variables to be saved and restored in the event of an abort.
+  This is the end of all_lowering_passes, and so is what is present
+  during the IPA passes, and through all of the optimization passes.
+
+  During pass_ipa_tm, we examine all GIMPLE_TM_ATOMIC blocks in all
+  functions and mark functions for cloning.
+
+  At the end of gimple optimization, before exiting SSA form, 
+  pass_expand_tm replaces statements that perform transactional
+  memory operations with the appropriate TM builtins, and swap
+  out function calls with their transactional clones.  At this
+  point we introduce the abnormal transaction restart edges and
+  complete lowering of the GIMPLE_TM_ATOMIC node.
 
-	save_local = local;
 	x = __builtin___tm_start (MAY_ABORT);
 	eh_label:
-	if (x & restore_locals) {
-	  local = save_local;
-	}
 	if (x & abort_transaction)
 	  goto over;
 	local = local + 1;
-	t0 [tm_load]= global;
+        t0 = __builtin___tm_load (global);
 	t1 = t0 + 1;
-	global [tm_store]= t1;
+        __builtin___tm_store (&global, t1);
 	if (t1 == 10)
 	  __builtin___tm_abort ();
 	__builtin___tm_commit ();
 	over:
+*/
 
-  During expansion to rtl, we expand the TM_LOAD/TM_STORE markings
-  with calls to the appropriate builtin functions.  Delaying this long
-  allows the tree optimizers the most visibility into the operations.  */
-
-DEF_VEC_O(gimple_stmt_iterator);
-DEF_VEC_ALLOC_O(gimple_stmt_iterator,heap);
-
-struct ltm_state
-{
-  /* Bits to be stored in the GIMPLE_TM_ATOMIC subcode.  */
-  unsigned subcode;
-
-  /* The EH region number for this transaction.  Non-negative numbers
-     represent an active transaction within this function; -1 represents
-     an active transaction from a calling function (i.e. we're compiling
-     a transaction clone).  For no active transaction, the state pointer
-     passed will be null.  */
-  int region_nr;
-
-  /* Record the iterator pointing to a __tm_commit function call that
-     binds to this transaction region.  There may be many such calls,
-     depending on how the EH expansion of the try-finally node went.
-     But there's usually exactly one such call, and essentially always
-     only a small number, so to avoid rescanning the entire sequence
-     when we need to remove these calls, record the iterator location.  */
-  VEC(gimple_stmt_iterator,heap) *commit_stmts;
-};
-
-
-static void lower_sequence_tm (struct ltm_state *, gimple_seq);
+static void lower_sequence_tm (unsigned *, gimple_seq);
 static void lower_sequence_no_tm (gimple_seq);
 
-
-/* Record the transaction for this statement.  If the statement
-   already has a region number that's fine -- it means that the
-   statement can also throw.  If there's no region number, it 
-   means we're expanding a transactional clone and the region
-   is in a calling function.  */
-
-static void
-add_stmt_to_transaction (struct ltm_state *state, gimple stmt)
-{
-  if (state->region_nr >= 0 && lookup_stmt_eh_region (stmt) < 0)
-    add_stmt_to_eh_region (stmt, state->region_nr);
-}
-
 /* Determine whether X has to be instrumented using a read
    or write barrier.  */
 
@@ -166,7 +125,6 @@ requires_barrier (tree x)
   switch (TREE_CODE (x))
     {
     case INDIRECT_REF:
-      /* ??? Use must-alias information to reduce this.  */
       return true;
 
     case ALIGN_INDIRECT_REF:
@@ -189,155 +147,63 @@ requires_barrier (tree x)
    a transaction region.  */
 
 static void
-lower_assign_tm (struct ltm_state *state, gimple_stmt_iterator *gsi)
+examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
 {
   gimple stmt = gsi_stmt (*gsi);
-  bool load_p = requires_barrier (gimple_assign_rhs1 (stmt));
-  bool store_p = requires_barrier (gimple_assign_lhs (stmt));
-
-  if (load_p && store_p)
-    {
-      /* ??? This is a copy between two aggregates in memory.  I
-	 believe the Intel compiler handles this with a special
-	 version of memcpy.  For now, just consider the transaction
-	 irrevokable at this point.  */
-      state->subcode |= GTMA_HAVE_CALL_IRREVOKABLE;
-      return;
-    }
-  else if (load_p)
-    {
-      gimple_assign_set_rhs_code (stmt, TM_LOAD);
-      state->subcode |= GTMA_HAVE_LOAD;
-    }
-  else if (store_p)
-    {
-      gimple_assign_set_rhs_code (stmt, TM_STORE);
-      state->subcode |= GTMA_HAVE_STORE;
-    }
-  else
-    return;
 
-  add_stmt_to_transaction (state, stmt);
+  if (requires_barrier (gimple_assign_rhs1 (stmt)))
+    *state |= GTMA_HAVE_LOAD;
+  if (requires_barrier (gimple_assign_lhs (stmt)))
+    *state |= GTMA_HAVE_STORE;
 }
 
 /* Mark a GIMPLE_CALL as appropriate for being inside a transaction.  */
 
 static void
-lower_call_tm (struct ltm_state *state, gimple_stmt_iterator *gsi)
+examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
 {
   gimple stmt = gsi_stmt (*gsi);
   tree fn_decl;
-  struct cgraph_node *node, *orig_node;
-  int flags;
+  unsigned flags;
 
   flags = gimple_call_flags (stmt);
   if (flags & ECF_CONST)
     return;
 
+  if (flag_exceptions && !(flags & ECF_NOTHROW))
+    *state |= GTMA_HAVE_UNCOMMITTED_THROW;
+
   fn_decl = gimple_call_fndecl (stmt);
   if (!fn_decl)
     {
-      state->subcode |= GTMA_HAVE_CALL_INDIRECT;
+      *state |= GTMA_HAVE_CALL_IRREVOKABLE;
       return;
     }
 
-  /* Check if this call is one of our transactional builtins.  */
-  if (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL)
-    switch (DECL_FUNCTION_CODE (fn_decl))
-      {
-      case BUILT_IN_TM_COMMIT:
-	/* Remember the commit so that we can remove it if
-	   we decide to elide the transaction.  */
-	VEC_safe_push (gimple_stmt_iterator,heap, state->commit_stmts, gsi);
-	return;
-      case BUILT_IN_TM_ABORT:
-	state->subcode |= GTMA_HAVE_ABORT;
-	add_stmt_to_transaction (state, stmt);
-	return;
-
-      default:
-	break;
-      }
-
+  /* If this function is pure, we can ignore it.  */
   if (DECL_IS_TM_PURE (fn_decl))
     return;
 
-  orig_node = node = cgraph_node (fn_decl);
-
-  /* Find transactional clone of function.  */
-  while (node && node->next_clone)
+  /* Check if this call is a transaction abort.  */
+  if (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL
+      && DECL_FUNCTION_CODE (fn_decl) == BUILT_IN_TM_ABORT)
     {
-      node = node->next_clone;
-      if (DECL_IS_TM_CLONE (node->decl))
-	break;
-    }
-  if (DECL_IS_TM_CLONE (node->decl))
-    {
-      struct cgraph_edge *callers = orig_node->callers;
-
-      /* Find appropriate call stmt to redirect.  */
-      while (callers)
-	{
-	  if (callers->call_stmt == stmt)
-	    break;
-	  callers = callers->next_caller;
-	}
-
-      /* Substitute call stmt.  */
-      if (callers)
-	{
-	  gimple_call_set_fndecl (stmt, node->decl);
-	  cgraph_redirect_edge_callee (callers, node);
-	  if (dump_file)
-	    {
-	      fprintf (dump_file, "redirected edge to");
-	      print_generic_expr (dump_file, node->decl, 0);
-	      fputc ('\n', dump_file);
-	    }
-
-	  state->subcode |= GTMA_HAVE_CALL_TM;
-	  add_stmt_to_transaction (state, stmt);
-	  return;
-	}
+      *state |= GTMA_HAVE_ABORT;
+      return;
     }
 
-  /* The function was not const, tm_pure, or redirected to a 
-     transactional clone.  The call is therefore considered to
-     be irrevokable.  */
-  state->subcode |= GTMA_HAVE_CALL_IRREVOKABLE;
-}
-
-/* Remove any calls to __tm_commit inside STATE which belong
-   to the transaction.  */
-
-static void
-remove_tm_commits (struct ltm_state *state)
-{
-  gimple_stmt_iterator *gsi;
-  unsigned i;
-
-  for (i = 0;
-       VEC_iterate(gimple_stmt_iterator, state->commit_stmts, i, gsi);
-       ++i)
-    gsi_remove (gsi, true);
+  /* At this point pass_ipa_tm has not run, so no transactional
+     clones exist yet, so there's no point in looking for them.  */
+  *state |= GTMA_HAVE_CALL_IRREVOKABLE;
 }
 
 /* Lower a GIMPLE_TM_ATOMIC statement.  The GSI is advanced.  */
 
 static void
-lower_tm_atomic (struct ltm_state *outer_state, gimple_stmt_iterator *gsi)
+lower_tm_atomic (unsigned int *outer_state, gimple_stmt_iterator *gsi)
 {
-  gimple stmt = gsi_stmt (*gsi);
-  struct ltm_state this_state;
-  struct eh_region *eh_region;
-  tree label;
-
-  this_state.subcode = 0;
-  this_state.region_nr = lookup_stmt_eh_region (stmt);
-  this_state.commit_stmts = VEC_alloc(gimple_stmt_iterator, heap, 1);
-
-  gcc_assert (this_state.region_nr >= 0);
-  eh_region = get_eh_region_from_number (this_state.region_nr);
+  gimple g, stmt = gsi_stmt (*gsi);
+  unsigned int this_state = 0;
 
   /* First, lower the body.  The scanning that we do inside gives
      us some idea of what we're dealing with.  */
@@ -346,57 +212,45 @@ lower_tm_atomic (struct ltm_state *outer
   /* If there was absolutely nothing transaction related inside the
      transaction, we may elide it.  Likewise if this is a nested
      transaction and does not contain an abort.  */
-  if (this_state.subcode == 0
-      || (!(this_state.subcode & GTMA_HAVE_ABORT)
-	  && outer_state != NULL))
+  if (this_state == 0
+      || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
     {
       if (outer_state)
-	outer_state->subcode |= this_state.subcode;
+	*outer_state |= this_state;
 
-      remove_tm_commits (&this_state);
       gsi_insert_seq_before (gsi, gimple_seq_body (stmt), GSI_SAME_STMT);
       gimple_seq_set_body (stmt, NULL);
       gsi_remove (gsi, true);
-      remove_eh_handler (eh_region);
-      goto fini;
+      return;
     }
 
-  /* Insert an EH_LABEL immediately after the GIMPLE_TM_ATOMIC node.
-     This label won't really be used, but it mimicks the effect of 
-     the setjmp/longjmp that's going on behind the scenes.  */
-  label = create_artificial_label ();
-  set_eh_region_tree_label (eh_region, label);
-  gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
-
-  /* Insert the entire transaction sequence.  */
-  gsi_insert_seq_after (gsi, gimple_seq_body (stmt), GSI_CONTINUE_LINKING);
-  gimple_seq_set_body (stmt, NULL);
-
-  /* Record a label at the end of the transaction that will be used in
-     case the transaction aborts.  */
-  if (this_state.subcode & GTMA_HAVE_ABORT)
+  /* Wrap the body of the transaction in a try-finally node so that
+     the commit call is always properly called.  */
+  g = gimple_build_call (built_in_decls[BUILT_IN_TM_COMMIT], 0);
+  g = gimple_build_try (gimple_seq_body (stmt),
+			gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
+  gimple_seq_set_body (stmt, gimple_seq_alloc_with_stmt (g));
+
+  /* If the transaction calls abort, add an "over" label afterwards.  */
+  if (this_state & GTMA_HAVE_ABORT)
     {
-      label = create_artificial_label ();
+      tree label = create_artificial_label ();
       gimple_tm_atomic_set_label (stmt, label);
       gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
     }
 
-  /* Record the set of operations found for use during final lowering
-     of the GIMPLE_TM_ATOMIC node.  */
-  gimple_tm_atomic_set_subcode (stmt, this_state.subcode);
+  /* Record the set of operations found for use later.  */
+  gimple_tm_atomic_set_subcode (stmt, this_state);
 
   /* Always update the iterator.  */
   gsi_next (gsi);
-
- fini:
-  VEC_free (gimple_stmt_iterator, heap, this_state.commit_stmts);
 }
 
 /* Iterate through the statements in the sequence, lowering them all
    as appropriate for being in a transaction.  */
 
 static void
-lower_sequence_tm (struct ltm_state *state, gimple_seq seq)
+lower_sequence_tm (unsigned int *state, gimple_seq seq)
 {
   gimple_stmt_iterator gsi;
 
@@ -408,11 +262,15 @@ lower_sequence_tm (struct ltm_state *sta
 	case GIMPLE_ASSIGN:
 	  /* Only memory reads/writes need to be instrumented.  */
 	  if (gimple_assign_single_p (stmt))
-	    lower_assign_tm (state, &gsi);
+	    examine_assign_tm (state, &gsi);
 	  break;
 
 	case GIMPLE_CALL:
-	  lower_call_tm (state, &gsi);
+	  examine_call_tm (state, &gsi);
+	  break;
+
+	case GIMPLE_ASM:
+	  *state |= GTMA_HAVE_CALL_IRREVOKABLE;
 	  break;
 
 	case GIMPLE_TM_ATOMIC:
@@ -423,8 +281,8 @@ lower_sequence_tm (struct ltm_state *sta
 	  break;
 	}
       gsi_next (&gsi);
-    no_update:
-      ;
+
+    no_update:;
     }
 }
 
@@ -451,20 +309,11 @@ lower_sequence_no_tm (gimple_seq seq)
 static unsigned int
 execute_lower_tm (void)
 {
-  /* Functions that are marked TM_PURE don't need annotation by definition.  */
-  /* ??? The Intel OOPSLA paper talks about performing the same scan of the
-     function as we would if the function was marked DECL_IS_TM_CLONE, and
-     warning if we find anything for which we would have made a change.  */
-  if (DECL_IS_TM_PURE (current_function_decl))
-    return 0;
-
-  /* When instrumenting a transactional clone, we begin the function inside
+  /* When lowering a transactional clone, we begin the function inside
      a transaction.  */
   if (DECL_IS_TM_CLONE (current_function_decl))
     {
-      struct ltm_state state;
-      state.subcode = 0;
-      state.region_nr = -1;
+      unsigned state = 0;
       lower_sequence_tm (&state, gimple_body (current_function_decl));
     }
   else
@@ -473,8 +322,6 @@ execute_lower_tm (void)
   return 0;
 }
 
-/* TM expansion -- the default pass, run before creation of SSA form.  */
-
 static bool
 gate_tm (void)
 {
@@ -492,7 +339,7 @@ struct gimple_opt_pass pass_lower_tm =
   NULL,					/* next */
   0,					/* static_pass_number */
   0,					/* tv_id */
-  PROP_gimple_leh,			/* properties_required */
+  PROP_gimple_lcf,			/* properties_required */
   0,			                /* properties_provided */
   0,					/* properties_destroyed */
   0,					/* todo_flags_start */
@@ -500,6 +347,604 @@ struct gimple_opt_pass pass_lower_tm =
  }
 };
 \f
+/* Return true if STMT may alter control flow via a transactional edge.  */
+
+bool
+is_transactional_stmt (const_gimple stmt)
+{
+  switch (gimple_code (stmt))
+    {
+    case GIMPLE_CALL:
+      return (gimple_call_flags (stmt) & ECF_TM_OPS) != 0;
+    case GIMPLE_TM_ATOMIC:
+      return true;
+    default:
+      return false;
+    }
+}
+\f
+/* Collect region information for each transaction.  */
+
+struct tm_region
+{
+  /* Link to the next unnested transaction.  */
+  struct tm_region *next;
+
+  /* The GIMPLE_TM_ATOMIC statement beginning this transaction.  */
+  gimple tm_atomic_stmt;
+
+  /* The entry block to this region.  */
+  basic_block entry_block;
+
+  /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.  */
+  bitmap exit_blocks;
+
+  /* The EH region number assigned to this transaction.  */
+  int region_nr;
+};
+
+static struct tm_region *all_tm_regions;
+static bitmap_obstack tm_obstack;
+
+
+/* A subroutine of gate_tm_init, callback via for_each_tm_atomic.
+   Record the existance of the GIMPLE_TM_ATOMIC statement in a linked
+   list of tm_region elements.  */
+
+static void
+tm_region_init_1 (gimple stmt, void *xdata)
+{
+  struct tm_region **pptr = (struct tm_region **) xdata;
+  struct tm_region *region;
+  basic_block bb = gimple_bb (stmt);
+
+  /* ??? Verify that the statement (and the block) havn't been deleted.  */
+  gcc_assert (bb != NULL);
+  gcc_assert (gimple_code (stmt) == GIMPLE_TM_ATOMIC);
+
+  region = (struct tm_region *)
+    obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
+  region->next = *pptr;
+  region->tm_atomic_stmt = stmt;
+
+  /* There are either one or two edges out of the block containing
+     the GIMPLE_TM_ATOMIC, one to the actual region and one to the
+     "over" label if the region contains an abort.  The former will
+     always be the one marked FALLTHRU.  */
+  region->entry_block = FALLTHRU_EDGE (bb)->dest;
+
+  region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
+  region->region_nr = lookup_stmt_eh_region (stmt);
+
+  *pptr = region;
+}
+
+
+/* The "gate" function for all transactional memory expansion and optimization
+   passes.  We collect region information for each top-level transaction, and
+   if we don't find any, we skip all of the TM passes.  Each region will have
+   all of the exit blocks recorded, and the originating statement.  */
+
+static bool
+gate_tm_init (void)
+{
+  struct tm_region *region;
+  VEC (basic_block, heap) *queue;
+
+  if (!flag_tm)
+    return false;
+
+  calculate_dominance_info (CDI_DOMINATORS);
+  bitmap_obstack_initialize (&tm_obstack);
+
+  /* If the function is a TM_CLONE, then the entire function is the region.  */
+  if (DECL_IS_TM_CLONE (current_function_decl))
+    {
+      region = (struct tm_region *)
+	obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
+      region->next = NULL;
+      region->tm_atomic_stmt = NULL;
+      region->entry_block = ENTRY_BLOCK_PTR;
+      region->exit_blocks = NULL;
+      region->region_nr = -1;
+
+      return true;
+    }
+
+  /* Find each GIMPLE_TM_ATOMIC statement.  This data is stored
+     in the exception handling tables, so it's quickest to get
+     it out that way than actually search the function.  */
+  for_each_tm_atomic (false, tm_region_init_1, &all_tm_regions);
+
+  /* If we didn't find any regions, cleanup and skip the whole tree
+     of tm-related optimizations.  */
+  if (all_tm_regions == NULL)
+    {
+      bitmap_obstack_release (&tm_obstack);
+      return false;
+    }
+
+  queue = VEC_alloc (basic_block, heap, 10);
+
+  /* Find the exit blocks for each region.  */
+  for (region = all_tm_regions; region ; region = region->next)
+    {
+      basic_block bb;
+      gimple_stmt_iterator gsi;
+
+      VEC_quick_push (basic_block, queue, region->entry_block);
+      do
+	{
+	  bb = VEC_pop (basic_block, queue);
+
+	  /* Check to see if this is the end of the region by
+	     seeing if it ends in a call to __tm_commit.  */
+	  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
+	    {
+	      gimple g = gsi_stmt (gsi);
+	      if (gimple_code (g) == GIMPLE_CALL)
+		{
+		  tree fn = gimple_call_fndecl (g);
+		  if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
+		      && DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
+		      && lookup_stmt_eh_region (g) == region->region_nr)
+		    {
+		      bitmap_set_bit (region->exit_blocks, bb->index);
+		      goto skip;
+		    }
+		}
+	    }
+
+	  for (bb = first_dom_son (CDI_DOMINATORS, bb);
+	       bb;
+	       bb = next_dom_son (CDI_DOMINATORS, bb))
+	    VEC_safe_push (basic_block, heap, queue, bb);
+
+	skip:;
+	}
+      while (!VEC_empty (basic_block, queue));
+    }
+
+  VEC_free (basic_block, heap, queue);
+
+  return true;
+}
+
+/* Free the transactional memory data structures.  */
+
+static unsigned int
+execute_tm_done (void)
+{
+  bitmap_obstack_release (&tm_obstack);
+  free_dominance_info (CDI_DOMINATORS);
+
+  return 0;
+}
+
+struct gimple_opt_pass pass_tm_init =
+{
+ {
+  GIMPLE_PASS,
+  "tminit",				/* name */
+  gate_tm_init,				/* gate */
+  NULL,					/* execute */
+  NULL,					/* sub */
+  NULL,					/* next */
+  0,					/* static_pass_number */
+  0,					/* tv_id */
+  PROP_ssa | PROP_cfg,			/* properties_required */
+  0,			                /* properties_provided */
+  0,					/* properties_destroyed */
+  0,					/* todo_flags_start */
+  0,					/* todo_flags_finish */
+ }
+};
+
+struct gimple_opt_pass pass_tm_done =
+{
+ {
+  GIMPLE_PASS,
+  "tminit",				/* name */
+  NULL,					/* gate */
+  execute_tm_done,			/* execute */
+  NULL,					/* sub */
+  NULL,					/* next */
+  0,					/* static_pass_number */
+  0,					/* tv_id */
+  PROP_ssa | PROP_cfg,			/* properties_required */
+  0,			                /* properties_provided */
+  0,					/* properties_destroyed */
+  0,					/* todo_flags_start */
+  0,					/* todo_flags_finish */
+ }
+};
+\f
+/* Add FLAGS to the GIMPLE_TM_ATOMIC subcode for the transaction region
+   represented by STATE.  */
+
+static inline void
+tm_atomic_subcode_ior (struct tm_region *state, unsigned flags)
+{
+  if (state->tm_atomic_stmt)
+    gimple_tm_atomic_set_subcode (state->tm_atomic_stmt,
+      gimple_tm_atomic_subcode (state->tm_atomic_stmt) | flags);
+}
+
+
+/* Construct a call to TM_IRREVOKABLE and insert it before GSI.  */
+
+static void
+expand_irrevokable (struct tm_region *state, gimple_stmt_iterator *gsi)
+{
+  gimple g;
+
+  tm_atomic_subcode_ior (state, GTMA_HAVE_CALL_IRREVOKABLE);
+
+  g = gimple_build_call (built_in_decls[BUILT_IN_TM_IRREVOKABLE], 0);
+  add_stmt_to_eh_region (g, state->region_nr);
+
+  gsi_insert_before (gsi, g, GSI_SAME_STMT);
+}
+
+
+/* Construct a memory load in a transactional context.  */
+
+static tree
+build_tm_load (tree lhs, tree rhs)
+{
+  enum built_in_function code = END_BUILTINS;
+  tree t, type = TREE_TYPE (rhs);
+
+  if (type == float_type_node)
+    code = BUILT_IN_TM_LOAD_FLOAT;
+  else if (type == double_type_node)
+    code = BUILT_IN_TM_LOAD_DOUBLE;
+  else if (TYPE_SIZE_UNIT (type) != NULL
+	   && host_integerp (TYPE_SIZE_UNIT (type), 1))
+    {
+      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
+	{
+	case 1:
+	  code = BUILT_IN_TM_LOAD_1;
+	  break;
+	case 2:
+	  code = BUILT_IN_TM_LOAD_2;
+	  break;
+	case 4:
+	  code = BUILT_IN_TM_LOAD_4;
+	  break;
+	case 8:
+	  code = BUILT_IN_TM_LOAD_8;
+	  break;
+	}
+    }
+
+  if (code == END_BUILTINS)
+    {
+      sorry ("transactional load for %T not supported", type);
+      code = BUILT_IN_TM_LOAD_4;
+    }
+
+  t = built_in_decls[code];
+  t = build_call_expr (t, 1, build_fold_addr_expr (rhs));
+  if (TYPE_MAIN_VARIANT (TREE_TYPE (t)) != TYPE_MAIN_VARIANT (type))
+    t = build1 (VIEW_CONVERT_EXPR, type, t);
+  t = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, t);
+
+  return t;
+}
+
+
+/* Similarly for storing TYPE in a transactional context.  */
+
+static tree
+build_tm_store (tree lhs, tree rhs)
+{
+  enum built_in_function code = END_BUILTINS;
+  tree t, fn, type = TREE_TYPE (rhs), simple_type;
+
+  if (type == float_type_node)
+    code = BUILT_IN_TM_STORE_FLOAT;
+  else if (type == double_type_node)
+    code = BUILT_IN_TM_STORE_DOUBLE;
+  else if (TYPE_SIZE_UNIT (type) != NULL
+	   && host_integerp (TYPE_SIZE_UNIT (type), 1))
+    {
+      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
+	{
+	case 1:
+	  code = BUILT_IN_TM_STORE_1;
+	  break;
+	case 2:
+	  code = BUILT_IN_TM_STORE_2;
+	  break;
+	case 4:
+	  code = BUILT_IN_TM_STORE_4;
+	  break;
+	case 8:
+	  code = BUILT_IN_TM_STORE_8;
+	  break;
+	}
+    }
+
+  if (code == END_BUILTINS)
+    {
+      sorry ("transactional load for %T not supported", type);
+      code = BUILT_IN_TM_STORE_4;
+    }
+
+  fn = built_in_decls[code];
+  simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
+  if (TYPE_MAIN_VARIANT (simple_type) != TYPE_MAIN_VARIANT (type))
+    rhs = build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
+  t = build_call_expr (fn, 2, build_fold_addr_expr (lhs), rhs);
+
+  return t;
+}
+
+
+/* Expand an assignment statement into transactional builtins.  */
+
+static void
+expand_assign_tm (struct tm_region *state, gimple_stmt_iterator *gsi)
+{
+  gimple stmt = gsi_stmt (*gsi);
+  tree lhs = gimple_assign_lhs (stmt);
+  tree rhs = gimple_assign_rhs1 (stmt);
+  bool store_p = requires_barrier (lhs);
+  bool load_p = requires_barrier (rhs);
+  tree call;
+  gimple_seq seq;
+  gimple gcall;
+  gimple_stmt_iterator gsi2;
+  struct gimplify_ctx gctx;
+
+  if (load_p && store_p)
+    {
+      tm_atomic_subcode_ior (state, GTMA_HAVE_LOAD | GTMA_HAVE_STORE);
+      call = build_call_expr (built_in_decls [BUILT_IN_TM_MEMCPY],
+			      3, build_fold_addr_expr (lhs),
+			      build_fold_addr_expr (rhs),
+			      TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
+    }
+  else if (load_p)
+    {
+      tm_atomic_subcode_ior (state, GTMA_HAVE_LOAD);
+      call = build_tm_load (lhs, rhs);
+    }
+  else if (store_p)
+    {
+      tm_atomic_subcode_ior (state, GTMA_HAVE_STORE);
+      call = build_tm_store (lhs, rhs);
+    }
+  else
+    return;
+
+  push_gimplify_context (&gctx);
+  gctx.into_ssa = false;
+
+  seq = NULL;
+  gimplify_and_add (call, &seq);
+
+  pop_gimplify_context (NULL);
+
+  for (gsi2 = gsi_last (seq); ; gsi_prev (&gsi2))
+    {
+      gcall = gsi_stmt (gsi2);
+      if (gimple_code (gcall) == GIMPLE_CALL)
+	break;
+    }
+
+  add_stmt_to_eh_region  (gcall, state->region_nr);
+  gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
+  gsi_remove (gsi, true);
+}
+
+
+static tree
+find_tm_clone (tree orig_decl ATTRIBUTE_UNUSED)
+{
+  return NULL_TREE;
+}
+
+/* Expand a call statement as appropriate for a transaction.  That is,
+   either verify that the call does not affect the transaction, or
+   redirect the call to a clone that handles transactions, or change
+   the transaction state to IRREVOKABLE.  Return true if the call is
+   one of the builtins that end a transaction.  */
+
+static bool
+expand_call_tm (struct tm_region *state, gimple_stmt_iterator *gsi)
+{
+  gimple stmt = gsi_stmt (*gsi);
+  tree fn_decl;
+  unsigned flags;
+
+  flags = gimple_call_flags (stmt);
+  if (flags & ECF_CONST)
+    return false;
+
+  if (flag_exceptions && !(flags & ECF_NOTHROW))
+    tm_atomic_subcode_ior (state, GTMA_HAVE_UNCOMMITTED_THROW);
+
+  fn_decl = gimple_call_fndecl (stmt);
+  if (!fn_decl)
+    {
+      /* ??? The ABI under discussion has us calling into the runtime
+	 to determine if there's a transactional version of this function.
+	 For now, just switch to irrevokable mode.  */
+      expand_irrevokable (state, gsi);
+      return false;
+    }
+
+  if (DECL_IS_TM_PURE (fn_decl))
+    return false;
+
+  if (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL)
+    {
+      /* ??? TM_COMMIT in a nested transaction has an abnormal edge back to
+	 the outer-most transaction (there are no nested retries), while
+	 a TM_ABORT has an abnormal backedge to the inner-most transaction.
+	 We havn't actually saved the inner-most transaction here.  We should
+	 be able to get to it via the region_nr saved on STMT, and read the
+	 tm_atomic_stmt from that, and find the first region block from there.
+	 This assumes we don't expand GIMPLE_TM_ATOMIC until after all other
+	 statements have been expanded.  */
+      switch (DECL_FUNCTION_CODE (fn_decl))
+	{
+	case BUILT_IN_TM_COMMIT:
+	case BUILT_IN_TM_ABORT:
+	  /* Both of these calls end a transaction.  */
+	  if (lookup_stmt_eh_region (stmt) == state->region_nr)
+	    return true;
+
+	default:
+	  break;
+	}
+
+      return false;
+    }
+
+  fn_decl = find_tm_clone (fn_decl);
+  if (fn_decl)
+    {
+      gimple_call_set_fndecl (stmt, fn_decl);
+      return false;
+    }
+
+  expand_irrevokable (state, gsi);
+  return false;
+}
+
+
+/* Expand all statements in BB as appropriate for being inside
+   a transaction.  */
+static void
+expand_block_tm (struct tm_region *state, basic_block bb)
+{
+  gimple_stmt_iterator gsi;
+
+  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
+    {
+      gimple stmt = gsi_stmt (gsi);
+      switch (gimple_code (stmt))
+	{
+	case GIMPLE_ASSIGN:
+	  /* Only memory reads/writes need to be instrumented.  */
+	  if (gimple_assign_single_p (stmt))
+	    {
+	      expand_assign_tm (state, &gsi);
+	      continue;
+	    }
+	  break;
+
+	case GIMPLE_CALL:
+	  if (expand_call_tm (state, &gsi))
+	    return;
+	  break;
+
+	case GIMPLE_ASM:
+	  expand_irrevokable (state, &gsi);
+	  break;
+
+	default:
+	  break;
+	}
+      gsi_next (&gsi);
+    }
+}
+
+/* Entry point to the MARK phase of TM expansion.  Here we replace
+   transactional memory statements with calls to builtins, and function
+   calls with their transactional clones (if available).  But we don't
+   yet lower GIMPLE_TM_ATOMIC or add the transaction restart back-edges.  */
+
+static unsigned int
+execute_tm_mark (void)
+{
+  struct tm_region *region;
+  basic_block bb;
+  VEC (basic_block, heap) *queue;
+
+  queue = VEC_alloc (basic_block, heap, 10);
+
+  for (region = all_tm_regions; region ; region = region->next)
+    if (region->exit_blocks)
+      {
+	/* Collect a new SUBCODE set, now that optimizations are done.  */
+	gimple_tm_atomic_set_subcode (region->tm_atomic_stmt, 0);
+
+	VEC_quick_push (basic_block, queue, region->entry_block);
+	do
+	  {
+	    bb = VEC_pop (basic_block, queue);
+	    expand_block_tm (region, bb);
+
+	    if (!bitmap_bit_p (region->exit_blocks, bb->index))
+	      for (bb = first_dom_son (CDI_DOMINATORS, bb);
+		   bb;
+		   bb = next_dom_son (CDI_DOMINATORS, bb))
+		VEC_safe_push (basic_block, heap, queue, bb);
+	  }
+	while (!VEC_empty (basic_block, queue));
+      }
+    else
+      {
+	FOR_EACH_BB (bb)
+	  expand_block_tm (region, bb);
+      }
+
+  VEC_free (basic_block, heap, queue);
+
+  return 0;
+}
+
+struct gimple_opt_pass pass_tm_mark =
+{
+ {
+  GIMPLE_PASS,
+  "tmmark",				/* name */
+  NULL,					/* gate */
+  execute_tm_mark,			/* execute */
+  NULL,					/* sub */
+  NULL,					/* next */
+  0,					/* static_pass_number */
+  0,					/* tv_id */
+  PROP_ssa | PROP_cfg,			/* properties_required */
+  0,			                /* properties_provided */
+  0,					/* properties_destroyed */
+  0,					/* todo_flags_start */
+  TODO_update_ssa
+  | TODO_verify_ssa
+  | TODO_dump_func,			/* todo_flags_finish */
+ }
+};
+\f
+/* Create an abnormal call edge from BB to the first block of the region
+   represented by STATE.  */
+
+static inline void
+make_tm_edge (basic_block bb, struct tm_region *state)
+{
+  make_edge (bb, state->entry_block, EDGE_ABNORMAL | EDGE_ABNORMAL_CALL);
+}
+
+
+/* Split the block at GSI and create an abnormal back edge.  */
+
+static void ATTRIBUTE_UNUSED
+split_and_add_tm_edge (struct tm_region *state, gimple_stmt_iterator *gsi)
+{
+  basic_block bb = gsi->bb;
+  if (!gsi_one_before_end_p (*gsi))
+    {
+      edge e = split_block (bb, gsi_stmt (*gsi));
+      *gsi = gsi_start_bb (e->dest);
+    }
+
+  make_tm_edge (bb, state);
+}
+
 
 /* ??? Find real values for these bits.  */
 #define TM_START_RESTORE_LIVE_IN	1
@@ -516,7 +961,7 @@ struct gimple_opt_pass pass_lower_tm =
    and this isn't necessarily the most efficient implementation, but it
    is just about the easiest.  */
 
-static void
+static void ATTRIBUTE_UNUSED
 checkpoint_live_in_variables (edge e)
 {
   gimple_stmt_iterator gsi;
@@ -566,7 +1011,7 @@ checkpoint_live_in_variables (edge e)
     }
 }
 
-static void
+static void ATTRIBUTE_UNUSED
 expand_tm_atomic (basic_block bb, gimple_stmt_iterator *gsi)
 {
   tree status, tm_start;
@@ -615,31 +1060,21 @@ expand_tm_atomic (basic_block bb, gimple
   gsi_remove (gsi, true);
 }
 
-/* Entry point to the checkpointing. */
+/* Entry point to the final expansion of transactional nodes. */
 
 static unsigned int
-execute_checkpoint_tm (void)
+execute_tm_edges (void)
 {
-  basic_block bb;
-
-  FOR_EACH_BB (bb)
-    {
-      gimple_stmt_iterator gsi = gsi_last_bb (bb);
-      if (!gsi_end_p (gsi)
-	  && gimple_code (gsi_stmt (gsi)) == GIMPLE_TM_ATOMIC)
-	expand_tm_atomic (bb, &gsi);
-    }
-
   return 0;
 }
 
-struct gimple_opt_pass pass_checkpoint_tm =
+struct gimple_opt_pass pass_tm_edges =
 {
  {
   GIMPLE_PASS,
-  "tmcheckpoint",			/* name */
-  gate_tm,				/* gate */
-  execute_checkpoint_tm,		/* execute */
+  "tmedge",				/* name */
+  gate_tm_init,				/* gate */
+  execute_tm_edges,			/* execute */
   NULL,					/* sub */
   NULL,					/* next */
   0,					/* static_pass_number */
@@ -648,87 +1083,65 @@ struct gimple_opt_pass pass_checkpoint_t
   0,			                /* properties_provided */
   0,					/* properties_destroyed */
   0,					/* todo_flags_start */
-  TODO_update_ssa |
-  TODO_verify_ssa |
-  TODO_dump_func,			/* todo_flags_finish */
+  TODO_verify_ssa | TODO_dump_func,	/* todo_flags_finish */
  }
 };
 \f
-/* Construct transaction restart edges for STMT.  */
 
-static void
-make_tm_edge_1 (struct eh_region *region, void *data)
+static unsigned int
+execute_tm_memopt (void)
 {
-  gimple stmt = (gimple) data;
-  basic_block src, dst;
-  unsigned flags;
-
-  src = gimple_bb (stmt);
-  dst = label_to_block (get_eh_region_tree_label (region));
-
-  /* Don't set EDGE_EH here, because that's supposed to be used when
-     we could in principal redirect the edge by modifying the exception
-     tables.  Transactions don't use tables though, only setjmp.  */
-  flags = EDGE_ABNORMAL;
-  if (gimple_code (stmt) == GIMPLE_CALL)
-    flags |= EDGE_ABNORMAL_CALL;
-  make_edge (src, dst, flags);
+  return 0;
 }
 
-void
-make_tm_edge (gimple stmt)
+static bool
+gate_tm_memopt (void)
 {
-  int region_nr;
-
-  /* Do nothing if the region is outside this function.  */
-  region_nr = lookup_stmt_eh_region (stmt);
-  if (region_nr < 0)
-    return;
-
-  /* The control structure inside tree-cfg.c isn't the best;
-     re-check whether this is actually a transactional stmt.  */
-  if (!is_transactional_stmt (stmt))
-    return;
-
-  foreach_reachable_transaction (region_nr, make_tm_edge_1, (void *) stmt);
+  return optimize > 0;
 }
 
-/* Return true if STMT may alter control flow via a transactional edge.  */
-
-bool
-is_transactional_stmt (const_gimple stmt)
+struct gimple_opt_pass pass_tm_memopt =
 {
-  switch (gimple_code (stmt))
-    {
-    case GIMPLE_ASSIGN:
-      {
-	/* We only want to process assignments that have been
-	   marked for transactional memory.  */
-	enum tree_code subcode = gimple_expr_code (stmt);
-	return (subcode == TM_LOAD || subcode == TM_STORE);
-      }
-
-    case GIMPLE_CALL:
-      {
-	tree fn_decl = gimple_call_fndecl (stmt);
-
-	/* We only want to process __tm_abort and cloned direct calls,
-	   since those are the only ones that can restart the transaction.  */
-	if (!fn_decl)
-	  return false;
-	if (DECL_BUILT_IN_CLASS (fn_decl) == BUILT_IN_NORMAL
-	    && DECL_FUNCTION_CODE (fn_decl) == BUILT_IN_TM_ABORT)
-	  return true;
-	if (DECL_IS_TM_CLONE (fn_decl))
-	  return true;
-	else
-	  return false;
-      }
+ {
+  GIMPLE_PASS,
+  "tminit",				/* name */
+  gate_tm_memopt,			/* gate */
+  execute_tm_memopt,			/* execute */
+  NULL,					/* sub */
+  NULL,					/* next */
+  0,					/* static_pass_number */
+  0,					/* tv_id */
+  PROP_ssa | PROP_cfg,			/* properties_required */
+  0,			                /* properties_provided */
+  0,					/* properties_destroyed */
+  0,					/* todo_flags_start */
+  TODO_dump_func,			/* todo_flags_finish */
+ }
+};
 
-    case GIMPLE_TM_ATOMIC:
-      return true;
+\f
 
-    default:
-      return false;
-    }
+static unsigned int
+execute_ipa_tm (void)
+{
+  return 0;
 }
+
+struct simple_ipa_opt_pass pass_ipa_tm =
+{
+ {
+  SIMPLE_IPA_PASS,
+  "tmipa",				/* name */
+  gate_tm,				/* gate */
+  execute_ipa_tm,			/* execute */
+  NULL,					/* sub */
+  NULL,					/* next */
+  0,					/* static_pass_number */
+  0,					/* tv_id */
+  PROP_ssa | PROP_cfg,			/* properties_required */
+  0,			                /* properties_provided */
+  0,					/* properties_destroyed */
+  0,					/* todo_flags_start */
+  0,					/* todo_flags_finish */
+ }
+};
--- tree-cfg.c	(revision 141360)
+++ tree-cfg.c	(local)
@@ -514,10 +514,6 @@ make_edges (void)
 		 create abnormal edges to them.  */
 	      make_eh_edges (last);
 
-	      /* If this statement calls a transaction clone,
-		 add transactional restart edges.  */
-	      make_tm_edge (last);
-
 	      /* Some calls are known not to return.  */
 	      fallthru = !(gimple_call_flags (last) & ECF_NORETURN);
 	      break;
@@ -526,10 +522,7 @@ make_edges (void)
 	       /* A GIMPLE_ASSIGN may throw internally and thus be considered
 		  control-altering. */
 	      if (is_ctrl_altering_stmt (last))
-		{
-		  make_eh_edges (last);
-		  make_tm_edge (last);
-		}
+		make_eh_edges (last);
 	      fallthru = true;
 	      break;
 
@@ -554,10 +547,10 @@ make_edges (void)
 	      fallthru = false;
 	      break;
 
-            case GIMPLE_OMP_ATOMIC_LOAD:
-            case GIMPLE_OMP_ATOMIC_STORE:
-               fallthru = true;
-               break;
+	    case GIMPLE_OMP_ATOMIC_LOAD:
+	    case GIMPLE_OMP_ATOMIC_STORE:
+	       fallthru = true;
+	       break;
 
 	    case GIMPLE_OMP_RETURN:
 	      /* In the case of a GIMPLE_OMP_SECTION, the edge will go
@@ -1038,7 +1031,7 @@ cleanup_dead_labels (void)
 	/* We have to handle gotos until they're removed, and we don't
 	   remove them until after we've created the CFG edges.  */
 	case GIMPLE_GOTO:
-          if (!computed_goto_p (stmt))
+	  if (!computed_goto_p (stmt))
 	    {
 	      label = gimple_goto_dest (stmt);
 	      new_label = main_block_label (label);
@@ -1302,7 +1295,7 @@ replace_uses_by (tree name, tree val)
 	push_stmt_changes (&stmt);
 
       FOR_EACH_IMM_USE_ON_STMT (use, imm_iter)
-        {
+	{
 	  replace_exp (use, val);
 
 	  if (gimple_code (stmt) == GIMPLE_PHI)
@@ -1331,9 +1324,9 @@ replace_uses_by (tree name, tree val)
 	  for (i = 0; i < gimple_num_ops (stmt); i++)
 	    {
 	      tree op = gimple_op (stmt, i);
-              /* Operands may be empty here.  For example, the labels
-                 of a GIMPLE_COND are nulled out following the creation
-                 of the corresponding CFG edges.  */
+	      /* Operands may be empty here.  For example, the labels
+	         of a GIMPLE_COND are nulled out following the creation
+	         of the corresponding CFG edges.  */
 	      if (op && TREE_CODE (op) == ADDR_EXPR)
 		recompute_tree_invariant_for_addr_expr (op);
 	    }
@@ -1400,10 +1393,10 @@ gimple_merge_blocks (basic_block a, basi
 	     appear as arguments of the phi nodes.  */
 	  copy = gimple_build_assign (def, use);
 	  gsi_insert_after (&gsi, copy, GSI_NEW_STMT);
-          remove_phi_node (&psi, false);
+	  remove_phi_node (&psi, false);
 	}
       else
-        {
+	{
 	  /* If we deal with a PHI for virtual operands, we can simply
 	     propagate these without fussing with folding or updating
 	     the stmt.  */
@@ -1418,10 +1411,10 @@ gimple_merge_blocks (basic_block a, basi
 		  SET_USE (use_p, use);
 	    }
 	  else
-            replace_uses_by (def, use);
+	    replace_uses_by (def, use);
 
-          remove_phi_node (&psi, true);
-        }
+	  remove_phi_node (&psi, true);
+	}
     }
 
   /* Ensure that B follows A.  */
@@ -1537,39 +1530,39 @@ remove_useless_stmts_warn_notreached (gi
       gimple stmt = gsi_stmt (gsi);
 
       if (gimple_has_location (stmt))
-        {
-          location_t loc = gimple_location (stmt);
-          if (LOCATION_LINE (loc) > 0)
-	    {
-              warning (OPT_Wunreachable_code, "%Hwill never be executed", &loc);
-              return true;
-            }
-        }
+	{
+	  location_t loc = gimple_location (stmt);
+	  if (LOCATION_LINE (loc) > 0)
+	    {
+	      warning (OPT_Wunreachable_code, "%Hwill never be executed", &loc);
+	      return true;
+	    }
+	}
 
       switch (gimple_code (stmt))
-        {
-        /* Unfortunately, we need the CFG now to detect unreachable
-           branches in a conditional, so conditionals are not handled here.  */
-
-        case GIMPLE_TRY:
-          if (remove_useless_stmts_warn_notreached (gimple_try_eval (stmt)))
-            return true;
-          if (remove_useless_stmts_warn_notreached (gimple_try_cleanup (stmt)))
-            return true;
-          break;
-
-        case GIMPLE_CATCH:
-          return remove_useless_stmts_warn_notreached (gimple_catch_handler (stmt));
-
-        case GIMPLE_EH_FILTER:
-          return remove_useless_stmts_warn_notreached (gimple_eh_filter_failure (stmt));
-
-        case GIMPLE_BIND:
-          return remove_useless_stmts_warn_notreached (gimple_bind_body (stmt));
-
-        default:
-          break;
-        }
+	{
+	/* Unfortunately, we need the CFG now to detect unreachable
+	   branches in a conditional, so conditionals are not handled here.  */
+
+	case GIMPLE_TRY:
+	  if (remove_useless_stmts_warn_notreached (gimple_try_eval (stmt)))
+	    return true;
+	  if (remove_useless_stmts_warn_notreached (gimple_try_cleanup (stmt)))
+	    return true;
+	  break;
+
+	case GIMPLE_CATCH:
+	  return remove_useless_stmts_warn_notreached (gimple_catch_handler (stmt));
+
+	case GIMPLE_EH_FILTER:
+	  return remove_useless_stmts_warn_notreached (gimple_eh_filter_failure (stmt));
+
+	case GIMPLE_BIND:
+	  return remove_useless_stmts_warn_notreached (gimple_bind_body (stmt));
+
+	default:
+	  break;
+	}
     }
 
   return false;
@@ -1614,11 +1607,11 @@ remove_useless_stmts_cond (gimple_stmt_i
       tree else_label = gimple_cond_false_label (stmt);
 
       if (then_label == else_label)
-        {
-          /* Goto common destination.  */
-          gsi_replace (gsi, gimple_build_goto (then_label), false);
-          data->last_goto_gsi = *gsi;
-          data->last_was_goto = true;
+	{
+	  /* Goto common destination.  */
+	  gsi_replace (gsi, gimple_build_goto (then_label), false);
+	  data->last_goto_gsi = *gsi;
+	  data->last_was_goto = true;
 	  data->repeat = true;
 	}
     }
@@ -1725,9 +1718,9 @@ remove_useless_stmts_tc (gimple_stmt_ite
   if (!this_may_throw)
     {
       if (warn_notreached)
-        {
-          remove_useless_stmts_warn_notreached (cleanup_seq);
-        }
+	{
+	  remove_useless_stmts_warn_notreached (cleanup_seq);
+	}
       gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT);
       gsi_remove (gsi, false);
       data->repeat = true;
@@ -1747,17 +1740,17 @@ remove_useless_stmts_tc (gimple_stmt_ite
     case GIMPLE_CATCH:
       /* If the first element is a catch, they all must be.  */
       while (!gsi_end_p (cleanup_gsi))
-        {
+	{
 	  stmt = gsi_stmt (cleanup_gsi);
 	  /* If we catch all exceptions, then the body does not
 	     propagate exceptions past this point.  */
 	  if (gimple_catch_types (stmt) == NULL)
 	    this_may_throw = false;
 	  data->last_was_goto = false;
-          handler_seq = gimple_catch_handler (stmt);
-          handler_gsi = gsi_start (handler_seq);
+	  handler_seq = gimple_catch_handler (stmt);
+	  handler_gsi = gsi_start (handler_seq);
 	  remove_useless_stmts_1 (&handler_gsi, data);
-          gsi_next (&cleanup_gsi);
+	  gsi_next (&cleanup_gsi);
 	}
       gsi_next (gsi);
       break;
@@ -1782,12 +1775,12 @@ remove_useless_stmts_tc (gimple_stmt_ite
 	 the enclosing TRY_CATCH_EXPR.  */
       if (gimple_seq_empty_p (cleanup_seq))
 	{
-          gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT);
-          gsi_remove(gsi, false);
+	  gsi_insert_seq_before (gsi, eval_seq, GSI_SAME_STMT);
+	  gsi_remove(gsi, false);
 	  data->repeat = true;
 	}
       else
-        gsi_next (gsi);
+	gsi_next (gsi);
       break;
     }
 
@@ -1821,7 +1814,7 @@ remove_useless_stmts_bind (gimple_stmt_i
   fn_body_seq = gimple_body (current_function_decl);
   if (gimple_bind_vars (stmt) == NULL_TREE
       && (gimple_seq_empty_p (fn_body_seq)
-          || stmt != gimple_seq_first_stmt (fn_body_seq))
+	  || stmt != gimple_seq_first_stmt (fn_body_seq))
       && (! block
 	  || ! BLOCK_ABSTRACT_ORIGIN (block)
 	  || (TREE_CODE (BLOCK_ABSTRACT_ORIGIN (block))
@@ -1873,12 +1866,12 @@ remove_useless_stmts_label (gimple_stmt_
     data->last_was_goto = false;
 
   else if (data->last_was_goto
-           && gimple_goto_dest (gsi_stmt (data->last_goto_gsi)) == label)
+	   && gimple_goto_dest (gsi_stmt (data->last_goto_gsi)) == label)
     {
       /* Replace the preceding GIMPLE_GOTO statement with
-         a GIMPLE_NOP, which will be subsequently removed.
-         In this way, we avoid invalidating other iterators
-         active on the statement sequence.  */
+	 a GIMPLE_NOP, which will be subsequently removed.
+	 In this way, we avoid invalidating other iterators
+	 active on the statement sequence.  */
       gsi_replace(&data->last_goto_gsi, gimple_build_nop(), false);
       data->last_was_goto = false;
       data->repeat = true;
@@ -1925,123 +1918,123 @@ remove_useless_stmts_1 (gimple_stmt_iter
       gimple stmt = gsi_stmt (*gsi);
 
       switch (gimple_code (stmt))
-        {
-        case GIMPLE_COND:
-          remove_useless_stmts_cond (gsi, data);
-          break;
-
-        case GIMPLE_GOTO:
-          remove_useless_stmts_goto (gsi, data);
-          break;
-
-        case GIMPLE_LABEL:
-          remove_useless_stmts_label (gsi, data);
-          break;
-
-        case GIMPLE_ASSIGN:
-          fold_stmt (gsi);
-          stmt = gsi_stmt (*gsi);
-          data->last_was_goto = false;
-          if (stmt_could_throw_p (stmt))
-            data->may_throw = true;
-          gsi_next (gsi);
-          break;
-
-        case GIMPLE_ASM:
-          fold_stmt (gsi);
-          data->last_was_goto = false;
-          gsi_next (gsi);
-          break;
-
-        case GIMPLE_CALL:
-          fold_stmt (gsi);
-          stmt = gsi_stmt (*gsi);
-          data->last_was_goto = false;
-          if (is_gimple_call (stmt))
-            notice_special_calls (stmt);
-
-          /* We used to call update_gimple_call_flags here,
-             which copied side-effects and nothrows status
-             from the function decl to the call.  In the new
-             tuplified GIMPLE, the accessors for this information
-             always consult the function decl, so this copying
-             is no longer necessary.  */
-          if (stmt_could_throw_p (stmt))
-            data->may_throw = true;
-          gsi_next (gsi);
-          break;
-
-        case GIMPLE_RETURN:
-          fold_stmt (gsi);
-          data->last_was_goto = false;
-          data->may_branch = true;
-          gsi_next (gsi);
-          break;
-
-        case GIMPLE_BIND:
-          remove_useless_stmts_bind (gsi, data);
-          break;
-
-        case GIMPLE_TRY:
-          if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
-            remove_useless_stmts_tc (gsi, data);
-          else if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
-            remove_useless_stmts_tf (gsi, data);
-          else
-            gcc_unreachable ();
-          break;
-
-        case GIMPLE_CATCH:
-          gcc_unreachable ();
-          break;
-
-        case GIMPLE_NOP:
-          gsi_remove (gsi, false);
-          break;
-
-        case GIMPLE_OMP_FOR:
-          {
-            gimple_seq pre_body_seq = gimple_omp_for_pre_body (stmt);
-            gimple_stmt_iterator pre_body_gsi = gsi_start (pre_body_seq);
+	{
+	case GIMPLE_COND:
+	  remove_useless_stmts_cond (gsi, data);
+	  break;
+
+	case GIMPLE_GOTO:
+	  remove_useless_stmts_goto (gsi, data);
+	  break;
+
+	case GIMPLE_LABEL:
+	  remove_useless_stmts_label (gsi, data);
+	  break;
+
+	case GIMPLE_ASSIGN:
+	  fold_stmt (gsi);
+	  stmt = gsi_stmt (*gsi);
+	  data->last_was_goto = false;
+	  if (stmt_could_throw_p (stmt))
+	    data->may_throw = true;
+	  gsi_next (gsi);
+	  break;
+
+	case GIMPLE_ASM:
+	  fold_stmt (gsi);
+	  data->last_was_goto = false;
+	  gsi_next (gsi);
+	  break;
+
+	case GIMPLE_CALL:
+	  fold_stmt (gsi);
+	  stmt = gsi_stmt (*gsi);
+	  data->last_was_goto = false;
+	  if (is_gimple_call (stmt))
+	    notice_special_calls (stmt);
+
+	  /* We used to call update_gimple_call_flags here,
+	     which copied side-effects and nothrows status
+	     from the function decl to the call.  In the new
+	     tuplified GIMPLE, the accessors for this information
+	     always consult the function decl, so this copying
+	     is no longer necessary.  */
+	  if (stmt_could_throw_p (stmt))
+	    data->may_throw = true;
+	  gsi_next (gsi);
+	  break;
+
+	case GIMPLE_RETURN:
+	  fold_stmt (gsi);
+	  data->last_was_goto = false;
+	  data->may_branch = true;
+	  gsi_next (gsi);
+	  break;
+
+	case GIMPLE_BIND:
+	  remove_useless_stmts_bind (gsi, data);
+	  break;
+
+	case GIMPLE_TRY:
+	  if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
+	    remove_useless_stmts_tc (gsi, data);
+	  else if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
+	    remove_useless_stmts_tf (gsi, data);
+	  else
+	    gcc_unreachable ();
+	  break;
+
+	case GIMPLE_CATCH:
+	  gcc_unreachable ();
+	  break;
+
+	case GIMPLE_NOP:
+	  gsi_remove (gsi, false);
+	  break;
+
+	case GIMPLE_OMP_FOR:
+	  {
+	    gimple_seq pre_body_seq = gimple_omp_for_pre_body (stmt);
+	    gimple_stmt_iterator pre_body_gsi = gsi_start (pre_body_seq);
 
-            remove_useless_stmts_1 (&pre_body_gsi, data);
+	    remove_useless_stmts_1 (&pre_body_gsi, data);
 	    data->last_was_goto = false;
-          }
-          /* FALLTHROUGH */
-        case GIMPLE_OMP_CRITICAL:
-        case GIMPLE_OMP_CONTINUE:
-        case GIMPLE_OMP_MASTER:
-        case GIMPLE_OMP_ORDERED:
-        case GIMPLE_OMP_SECTION:
-        case GIMPLE_OMP_SECTIONS:
-        case GIMPLE_OMP_SINGLE:
-          {
-            gimple_seq body_seq = gimple_seq_body (stmt);
-            gimple_stmt_iterator body_gsi = gsi_start (body_seq);
+	  }
+	  /* FALLTHROUGH */
+	case GIMPLE_OMP_CRITICAL:
+	case GIMPLE_OMP_CONTINUE:
+	case GIMPLE_OMP_MASTER:
+	case GIMPLE_OMP_ORDERED:
+	case GIMPLE_OMP_SECTION:
+	case GIMPLE_OMP_SECTIONS:
+	case GIMPLE_OMP_SINGLE:
+	  {
+	    gimple_seq body_seq = gimple_seq_body (stmt);
+	    gimple_stmt_iterator body_gsi = gsi_start (body_seq);
 
-            remove_useless_stmts_1 (&body_gsi, data);
+	    remove_useless_stmts_1 (&body_gsi, data);
 	    data->last_was_goto = false;
 	    gsi_next (gsi);
-          }
-          break;
+	  }
+	  break;
 
-        case GIMPLE_OMP_PARALLEL:
+	case GIMPLE_OMP_PARALLEL:
 	case GIMPLE_OMP_TASK:
-          {
+	  {
 	    /* Make sure the outermost GIMPLE_BIND isn't removed
 	       as useless.  */
-            gimple_seq body_seq = gimple_seq_body (stmt);
-            gimple bind = gimple_seq_first_stmt (body_seq);
-            gimple_seq bind_seq = gimple_bind_body (bind);
-            gimple_stmt_iterator bind_gsi = gsi_start (bind_seq);
+	    gimple_seq body_seq = gimple_seq_body (stmt);
+	    gimple bind = gimple_seq_first_stmt (body_seq);
+	    gimple_seq bind_seq = gimple_bind_body (bind);
+	    gimple_stmt_iterator bind_gsi = gsi_start (bind_seq);
 
-            remove_useless_stmts_1 (&bind_gsi, data);
+	    remove_useless_stmts_1 (&bind_gsi, data);
 	    data->last_was_goto = false;
 	    gsi_next (gsi);
-          }
-          break;
+	  }
+	  break;
 
-        case GIMPLE_CHANGE_DYNAMIC_TYPE:
+	case GIMPLE_CHANGE_DYNAMIC_TYPE:
 	  /* If we do not optimize remove GIMPLE_CHANGE_DYNAMIC_TYPE as
 	     expansion is confused about them and we only remove them
 	     during alias computation otherwise.  */
@@ -2053,11 +2046,11 @@ remove_useless_stmts_1 (gimple_stmt_iter
 	    }
 	  /* Fallthru.  */
 
-        default:
-          data->last_was_goto = false;
-          gsi_next (gsi);
-          break;
-        }
+	default:
+	  data->last_was_goto = false;
+	  gsi_next (gsi);
+	  break;
+	}
     }
 }
 
@@ -2245,9 +2238,9 @@ find_taken_edge (basic_block bb, tree va
       /* Only optimize if the argument is a label, if the argument is
 	 not a label then we can not construct a proper CFG.
 
-         It may be the case that we only need to allow the LABEL_REF to
-         appear inside an ADDR_EXPR, but we also allow the LABEL_REF to
-         appear inside a LABEL_EXPR just to be safe.  */
+	 It may be the case that we only need to allow the LABEL_REF to
+	 appear inside an ADDR_EXPR, but we also allow the LABEL_REF to
+	 appear inside a LABEL_EXPR just to be safe.  */
       if ((TREE_CODE (val) == ADDR_EXPR || TREE_CODE (val) == LABEL_EXPR)
 	  && TREE_CODE (TREE_OPERAND (val, 0)) == LABEL_DECL)
 	return find_taken_edge_computed_goto (bb, TREE_OPERAND (val, 0));
@@ -2573,11 +2566,18 @@ gimple_cfg2vcg (FILE *file)
 bool
 is_ctrl_stmt (gimple t)
 {
-  return gimple_code (t) == GIMPLE_COND
-    || gimple_code (t) == GIMPLE_SWITCH
-    || gimple_code (t) == GIMPLE_GOTO
-    || gimple_code (t) == GIMPLE_RETURN
-    || gimple_code (t) == GIMPLE_RESX;
+  switch (gimple_code (t))
+    {
+    case GIMPLE_COND:
+    case GIMPLE_SWITCH:
+    case GIMPLE_GOTO:
+    case GIMPLE_RETURN:
+    case GIMPLE_RESX:
+      return true;
+
+    default:
+      return false;
+    }
 }
 
 
@@ -2589,33 +2589,52 @@ is_ctrl_altering_stmt (gimple t)
 {
   gcc_assert (t);
 
-  if (is_gimple_call (t))
+  switch (gimple_code (t))
     {
-      int flags = gimple_call_flags (t);
+    case GIMPLE_CALL:
+      {
+	int flags = gimple_call_flags (t);
 
-      /* A non-pure/const call alters flow control if the current
-	 function has nonlocal labels.  */
-      if (!(flags & (ECF_CONST | ECF_PURE))
-	  && cfun->has_nonlocal_label)
-	return true;
+	/* A non-pure/const call alters flow control if the current
+	   function has nonlocal labels.  */
+	if (!(flags & (ECF_CONST | ECF_PURE)) && cfun->has_nonlocal_label)
+	  return true;
 
-      /* A call also alters control flow if it does not return.  */
-      if (gimple_call_flags (t) & ECF_NORETURN)
-	return true;
-    }
+	/* A call also alters control flow if it does not return.
+	   A call alters control flow if it may generate a
+	   transaction restart.  */
+	if (flags & (ECF_NORETURN | ECF_TM_OPS))
+	  return true;
+      }
+      break;
 
-  /* OpenMP directives alter control flow.  */
-  if (is_gimple_omp (t))
-    return true;
+    case GIMPLE_OMP_PARALLEL:
+    case GIMPLE_OMP_TASK:
+    case GIMPLE_OMP_FOR:
+    case GIMPLE_OMP_SECTIONS:
+    case GIMPLE_OMP_SECTIONS_SWITCH:
+    case GIMPLE_OMP_SINGLE:
+    case GIMPLE_OMP_SECTION:
+    case GIMPLE_OMP_MASTER:
+    case GIMPLE_OMP_ORDERED:
+    case GIMPLE_OMP_CRITICAL:
+    case GIMPLE_OMP_RETURN:
+    case GIMPLE_OMP_ATOMIC_LOAD:
+    case GIMPLE_OMP_ATOMIC_STORE:
+    case GIMPLE_OMP_CONTINUE:
+      /* OpenMP directives alter control flow.  */
+      return true;
 
-  /* If a statement can throw, it alters control flow.  */
-  if (stmt_can_throw_internal (t))
-    return true;
+    case GIMPLE_TM_ATOMIC:
+      /* A transaction start alters control flow.  */
+      return true;
 
-  if (flag_tm && is_transactional_stmt (t))
-    return true;
+    default:
+      break;
+    }
 
-  return false;
+  /* If a statement can throw, it alters control flow.  */
+  return stmt_can_throw_internal (t);
 }
 
 
@@ -2887,7 +2906,7 @@ verify_expr (tree *tp, int *walk_subtree
 	new_side_effects = TREE_SIDE_EFFECTS (t);
 	new_constant = TREE_CONSTANT (t);
 
-        if (old_constant != new_constant)
+	if (old_constant != new_constant)
 	  {
 	    error ("constant not recomputed when ADDR_EXPR changed");
 	    return t;
@@ -2925,7 +2944,7 @@ verify_expr (tree *tp, int *walk_subtree
 	  return x;
 	}
       if (!is_gimple_condexpr (x))
-        {
+	{
 	  error ("invalid conditional operand");
 	  return x;
 	}
@@ -3415,7 +3434,7 @@ verify_gimple_assign_unary (gimple stmt)
 	    return true;
 	  }
 
-        return false;
+	return false;
       }
 
     case FIX_TRUNC_EXPR:
@@ -3428,7 +3447,7 @@ verify_gimple_assign_unary (gimple stmt)
 	    return true;
 	  }
 
-        return false;
+	return false;
       }
 
     case TRUTH_NOT_EXPR:
@@ -4008,7 +4027,7 @@ verify_types_in_gimple_seq_2 (gimple_seq
       gimple stmt = gsi_stmt (ittr);
 
       switch (gimple_code (stmt))
-        {
+	{
 	case GIMPLE_BIND:
 	  err |= verify_types_in_gimple_seq_2 (gimple_bind_body (stmt));
 	  break;
@@ -4778,7 +4797,7 @@ gimple_redirect_edge_and_branch (edge e,
     case GIMPLE_SWITCH:
       {
 	tree label = gimple_block_label (dest);
-        tree cases = get_cases_for_edge (e, stmt);
+	tree cases = get_cases_for_edge (e, stmt);
 
 	/* If we have a list of cases associated with E, then use it
 	   as it's a lot faster than walking the entire case vector.  */
@@ -5718,7 +5737,7 @@ move_block_to_fn (struct function *dest_
     }
 
   VEC_replace (basic_block, cfg->x_basic_block_info,
-               bb->index, bb);
+	       bb->index, bb);
 
   /* Remap the variables in phi nodes.  */
   for (si = gsi_start_phis (bb); !gsi_end_p (si); )
@@ -5732,7 +5751,7 @@ move_block_to_fn (struct function *dest_
 	{
 	  /* Remove the phi nodes for virtual operands (alias analysis will be
 	     run for the new function, anyway).  */
-          remove_phi_node (&si, true);
+	  remove_phi_node (&si, true);
 	  continue;
 	}
 
@@ -5790,7 +5809,7 @@ move_block_to_fn (struct function *dest_
 	  add_stmt_to_eh_region_fn (dest_cfun, stmt, region + eh_offset);
 	  remove_stmt_from_eh_region (stmt);
 	  gimple_duplicate_stmt_histograms (dest_cfun, stmt, cfun, stmt);
-          gimple_remove_stmt_histograms (cfun, stmt);
+	  gimple_remove_stmt_histograms (cfun, stmt);
 	}
 
       /* We cannot leave any operands allocated from the operand caches of
@@ -5945,7 +5964,7 @@ move_sese_region_to_fn (struct function 
   /* If ENTRY does not strictly dominate EXIT, this cannot be an SESE
      region.  */
   gcc_assert (entry_bb != exit_bb
-              && (!exit_bb
+	      && (!exit_bb
 		  || dominated_by_p (CDI_DOMINATORS, exit_bb, entry_bb)));
 
   /* Collect all the blocks in the region.  Manually add ENTRY_BB
@@ -7021,7 +7040,7 @@ gimplify_build3 (gimple_stmt_iterator *g
   STRIP_NOPS (ret);
 
   return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
-                                   GSI_SAME_STMT);
+	                           GSI_SAME_STMT);
 }
 
 /* Build a binary operation and gimplify it.  Emit code before GSI.
@@ -7037,7 +7056,7 @@ gimplify_build2 (gimple_stmt_iterator *g
   STRIP_NOPS (ret);
 
   return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
-                                   GSI_SAME_STMT);
+	                           GSI_SAME_STMT);
 }
 
 /* Build a unary operation and gimplify it.  Emit code before GSI.
@@ -7053,7 +7072,7 @@ gimplify_build1 (gimple_stmt_iterator *g
   STRIP_NOPS (ret);
 
   return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
-                                   GSI_SAME_STMT);
+	                           GSI_SAME_STMT);
 }
 
 
--- tree-eh.c	(revision 141360)
+++ tree-eh.c	(local)
@@ -357,6 +357,7 @@ struct leh_state
      don't have easy access to.  */
   struct eh_region *cur_region;
   struct eh_region *prev_try;
+  struct eh_region *prev_atomic;
 
   /* Processing of TRY_FINALLY requires a bit more state.  This is
      split out into a separate structure so that we don't have to
@@ -1812,6 +1813,33 @@ lower_cleanup (struct leh_state *state, 
 }
 
 
+/* Continue lowering GIMPLE_TM_ATOMIC.  Record an EH region for it,
+   and flatten the body.  */
+
+static void
+lower_tm_atomic_eh (struct leh_state *state, gimple_stmt_iterator *gsi)
+{
+  struct eh_region *outer_region = state->cur_region;
+  struct eh_region *outer_atomic = state->prev_atomic;
+  struct eh_region *region;
+  gimple stmt = gsi_stmt (*gsi);
+
+  /* Record the transaction region in the EH tree.  */
+  region = gen_eh_region_transaction (outer_region, stmt);
+  state->cur_region = region;
+  state->prev_atomic = region;
+  record_stmt_eh_region (region, stmt);
+
+  lower_eh_constructs_1 (state, gimple_seq_body (stmt));
+
+  /* Flatten the atomic node with respect to its body.  */
+  gsi_insert_seq_after (gsi, gimple_seq_body (stmt), GSI_CONTINUE_LINKING);
+  gimple_seq_set_body (stmt, NULL);
+
+  state->cur_region = outer_region;
+  state->prev_atomic = outer_atomic;
+}
+
 
 /* Main loop for lowering eh constructs. Also moves gsi to the next 
    statement. */
@@ -1833,6 +1861,8 @@ lower_eh_constructs_2 (struct leh_state 
 	  record_stmt_eh_region (state->cur_region, stmt);
 	  note_eh_region_may_contain_throw (state->cur_region);
 	}
+      if (state->prev_atomic && is_transactional_stmt (stmt))
+	record_stmt_eh_region (state->prev_atomic, stmt);
       break;
 
     case GIMPLE_COND:
@@ -1874,18 +1904,7 @@ lower_eh_constructs_2 (struct leh_state 
       return;
 
     case GIMPLE_TM_ATOMIC:
-      {
-        /* Record the transaction region in the EH tree, then process
-	   the body of the transaction.  We don't lower the transaction
-	   node just yet.  */
-	struct eh_region *outer = state->cur_region;
-	state->cur_region = gen_eh_region_transaction (outer);
-
-	record_stmt_eh_region (state->cur_region, stmt);
-	lower_eh_constructs_1 (state, gimple_seq_body (stmt));
-
-	state->cur_region = outer;
-      }
+      lower_tm_atomic_eh (state, gsi);
       break;
 
     default:
--- tree-flow.h	(revision 141360)
+++ tree-flow.h	(local)
@@ -1080,7 +1080,6 @@ extern int lookup_stmt_eh_region (gimple
 extern bool verify_eh_edges (gimple);
 
 /* In gtm-low.c  */
-extern void make_tm_edge (gimple);
 extern bool is_transactional_stmt (const_gimple);
 
 /* In tree-ssa-pre.c  */
--- tree-pass.h	(revision 141360)
+++ tree-pass.h	(local)
@@ -389,7 +389,11 @@ extern struct gimple_opt_pass pass_rebui
 extern struct gimple_opt_pass pass_build_cgraph_edges;
 extern struct gimple_opt_pass pass_reset_cc_flags;
 extern struct gimple_opt_pass pass_lower_tm;
-extern struct gimple_opt_pass pass_checkpoint_tm;
+extern struct gimple_opt_pass pass_tm_init;
+extern struct gimple_opt_pass pass_tm_mark;
+extern struct gimple_opt_pass pass_tm_memopt;
+extern struct gimple_opt_pass pass_tm_edges;
+extern struct gimple_opt_pass pass_tm_done;
 
 /* IPA Passes */
 extern struct ipa_opt_pass pass_ipa_inline;
@@ -405,6 +409,7 @@ extern struct simple_ipa_opt_pass pass_i
 extern struct simple_ipa_opt_pass pass_early_local_passes;
 extern struct simple_ipa_opt_pass pass_ipa_increase_alignment;
 extern struct simple_ipa_opt_pass pass_ipa_function_and_variable_visibility;
+extern struct simple_ipa_opt_pass pass_ipa_tm;
 
 extern struct gimple_opt_pass pass_all_optimizations;
 extern struct gimple_opt_pass pass_cleanup_cfg_post_optimizing;
--- tree-ssa-operands.c	(revision 141360)
+++ tree-ssa-operands.c	(local)
@@ -1551,6 +1551,11 @@ get_addr_dereference_operands (gimple st
       gimple_set_has_volatile_ops (stmt, true);
       return;
     }
+  else if (TREE_CODE (ptr) == ADDR_EXPR)
+    {
+      get_expr_operands (stmt, &TREE_OPERAND (ptr, 0), flags);
+      return;
+    }
   else
     {
       /* Ok, this isn't even is_gimple_min_invariant.  Something's broke.  */
@@ -1630,11 +1635,35 @@ get_tmr_operands (gimple stmt, tree expr
 }
 
 
+/* Clobber everything in memory.  Used when memory barriers are seen.  */
+
+static void
+add_all_call_clobber_ops (gimple stmt)
+{
+  unsigned i;
+  bitmap_iterator bi;
+
+  /* Mark the statement as having memory operands.  */
+  gimple_set_references_memory (stmt, true);
+
+  EXECUTE_IF_SET_IN_BITMAP (gimple_call_clobbered_vars (cfun), 0, i, bi)
+    {
+      tree var = referenced_var (i);
+      add_stmt_operand (&var, stmt, opf_def | opf_implicit);
+    }
+
+  EXECUTE_IF_SET_IN_BITMAP (gimple_addressable_vars (cfun), 0, i, bi)
+    {
+      tree var = referenced_var (i);
+      add_stmt_operand (&var, stmt, opf_def | opf_implicit);
+    }
+}
+
 /* Add clobbering definitions for .GLOBAL_VAR or for each of the call
    clobbered variables in the function.  */
 
 static void
-add_call_clobber_ops (gimple stmt, tree callee ATTRIBUTE_UNUSED)
+add_call_clobber_ops (gimple stmt, tree callee)
 {
   unsigned u;
   bitmap_iterator bi;
@@ -1653,8 +1682,13 @@ add_call_clobber_ops (gimple stmt, tree 
   /* Get info for local and module level statics.  There is a bit
      set for each static if the call being processed does not read
      or write that variable.  */
-  not_read_b = callee ? ipa_reference_get_not_read_global (cgraph_node (callee)) : NULL; 
-  not_written_b = callee ? ipa_reference_get_not_written_global (cgraph_node (callee)) : NULL;
+  not_read_b = not_written_b = NULL;
+  if (callee)
+    {
+      struct cgraph_node *cg = cgraph_node (callee);
+      not_read_b = ipa_reference_get_not_read_global (cg);
+      not_written_b = ipa_reference_get_not_written_global (cg);
+    }
 
   /* Add a VDEF operand for every call clobbered variable.  */
   EXECUTE_IF_SET_IN_BITMAP (gimple_call_clobbered_vars (cfun), 0, u, bi)
@@ -1695,7 +1729,7 @@ add_call_clobber_ops (gimple stmt, tree 
    function.  */
 
 static void
-add_call_read_ops (gimple stmt, tree callee ATTRIBUTE_UNUSED)
+add_call_read_ops (gimple stmt, tree callee)
 {
   unsigned u;
   bitmap_iterator bi;
@@ -1705,7 +1739,9 @@ add_call_read_ops (gimple stmt, tree cal
   if (gimple_call_flags (stmt) & ECF_CONST)
     return;
 
-  not_read_b = callee ? ipa_reference_get_not_read_global (cgraph_node (callee)) : NULL;
+  not_read_b = NULL;
+  if (callee)
+    not_read_b = ipa_reference_get_not_read_global (cgraph_node (callee));
 
   /* For pure functions we compute non-escaped uses separately.  */
   if (gimple_call_flags (stmt) & ECF_PURE)
@@ -1763,6 +1799,16 @@ add_call_read_ops (gimple stmt, tree cal
     }
 }
 
+/* STMT is one of the transactional memory barrier builtins, FLAGS
+   is either opf_def or opf_use.  Determine what memory is touched
+   by the builtin, and set the operands appropriately.  */
+
+static void
+add_tm_call_ops (gimple stmt, int flags)
+{
+  tree *pptr = gimple_call_arg_ptr (stmt, 0);
+  get_addr_dereference_operands (stmt, pptr, flags, *pptr, 0, -1, false);
+}
 
 /* If STMT is a call that may clobber globals and other symbols that
    escape, add them to the VDEF/VUSE lists for it.  */
@@ -1771,22 +1817,50 @@ static void
 maybe_add_call_clobbered_vops (gimple stmt)
 {
   int call_flags = gimple_call_flags (stmt);
+  tree fndecl = gimple_call_fndecl (stmt);
 
   /* Mark the statement as having memory operands.  */
   gimple_set_references_memory (stmt, true);
 
+  /* Special-case some builtin functions.  */
+  if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+    switch (DECL_FUNCTION_CODE (fndecl))
+      {
+      case BUILT_IN_TM_STORE_1:
+      case BUILT_IN_TM_STORE_2:
+      case BUILT_IN_TM_STORE_4:
+      case BUILT_IN_TM_STORE_8:
+      case BUILT_IN_TM_STORE_FLOAT:
+      case BUILT_IN_TM_STORE_DOUBLE:
+        add_tm_call_ops (stmt, opf_def);
+	return;
+
+      case BUILT_IN_TM_LOAD_1:
+      case BUILT_IN_TM_LOAD_2:
+      case BUILT_IN_TM_LOAD_4:
+      case BUILT_IN_TM_LOAD_8:
+      case BUILT_IN_TM_LOAD_FLOAT:
+      case BUILT_IN_TM_LOAD_DOUBLE:
+        add_tm_call_ops (stmt, opf_use);
+	return;
+
+      /* TODO: All of the <string.h> routines have known memory access
+	 patterns; we can avoid touching all memory for them.  */
+
+      default:
+	break;
+      }
+
   /* If aliases have been computed already, add VDEF or VUSE
      operands for all the symbols that have been found to be
      call-clobbered.  */
   if (gimple_aliases_computed_p (cfun) && !(call_flags & ECF_NOVOPS))
     {
-      /* A 'pure' or a 'const' function never call-clobbers anything. 
-	 A 'noreturn' function might, but since we don't return anyway 
-	 there is no point in recording that.  */ 
-      if (!(call_flags & (ECF_PURE | ECF_CONST | ECF_NORETURN)))
-	add_call_clobber_ops (stmt, gimple_call_fndecl (stmt));
+      /* A 'pure' or a 'const' function never call-clobbers anything.  */
+      if (!(call_flags & (ECF_PURE | ECF_CONST)))
+	add_call_clobber_ops (stmt, fndecl);
       else if (!(call_flags & ECF_CONST))
-	add_call_read_ops (stmt, gimple_call_fndecl (stmt));
+	add_call_read_ops (stmt, fndecl);
     }
 }
 
@@ -1854,23 +1928,7 @@ get_asm_expr_operands (gimple stmt)
       tree link = gimple_asm_clobber_op (stmt, i);
       if (strcmp (TREE_STRING_POINTER (TREE_VALUE (link)), "memory") == 0)
 	{
-	  unsigned i;
-	  bitmap_iterator bi;
-
-	  /* Mark the statement as having memory operands.  */
-	  gimple_set_references_memory (stmt, true);
-
-	  EXECUTE_IF_SET_IN_BITMAP (gimple_call_clobbered_vars (cfun), 0, i, bi)
-	    {
-	      tree var = referenced_var (i);
-	      add_stmt_operand (&var, stmt, opf_def | opf_implicit);
-	    }
-
-	  EXECUTE_IF_SET_IN_BITMAP (gimple_addressable_vars (cfun), 0, i, bi)
-	    {
-	      tree var = referenced_var (i);
-	      add_stmt_operand (&var, stmt, opf_def | opf_implicit);
-	    }
+	  add_all_call_clobber_ops (stmt);
 	  break;
 	}
     }
@@ -2076,25 +2134,32 @@ static void
 parse_ssa_operands (gimple stmt)
 {
   enum gimple_code code = gimple_code (stmt);
+  size_t i, start = 0;
 
-  if (code == GIMPLE_ASM)
-    get_asm_expr_operands (stmt);
-  else
+  switch (code)
     {
-      size_t i, start = 0;
-
-      if (code == GIMPLE_ASSIGN || code == GIMPLE_CALL)
-	{
-	  get_expr_operands (stmt, gimple_op_ptr (stmt, 0), opf_def);
-	  start = 1;
-	}
+    case GIMPLE_ASM:
+      get_asm_expr_operands (stmt);
+      break;
+
+    case GIMPLE_TM_ATOMIC:
+      /* The start of a transaction block acts as a memory barrier.  */
+      add_all_call_clobber_ops (stmt);
+      break;
+
+    case GIMPLE_CALL:
+      maybe_add_call_clobbered_vops (stmt);
+      /* FALLTHRU */
+
+    case GIMPLE_ASSIGN:
+      get_expr_operands (stmt, gimple_op_ptr (stmt, 0), opf_def);
+      start = 1;
+      /* FALLTHRU */
 
+    default:
       for (i = start; i < gimple_num_ops (stmt); i++)
 	get_expr_operands (stmt, gimple_op_ptr (stmt, i), opf_use);
-
-      /* Add call-clobbered operands, if needed.  */
-      if (code == GIMPLE_CALL)
-	maybe_add_call_clobbered_vops (stmt);
+      break;
     }
 }
 
--- tree.def	(revision 141360)
+++ tree.def	(local)
@@ -1061,12 +1061,6 @@ DEFTREECODE (OMP_CLAUSE, "omp_clause", t
    Operand 0: BODY: contains body of the transaction.*/
 DEFTREECODE (TM_ATOMIC, "tm_atomic", tcc_statement, 1)
 
-/* A read protected by a current transaction.  */
-DEFTREECODE (TM_LOAD, "tm_load", tcc_expression, 1)
-
-/* A write protected by a current transaction.  */
-DEFTREECODE (TM_STORE, "tm_store", tcc_expression, 1)
-
 /* Reduction operations. 
    Operations that take a vector of elements and "reduce" it to a scalar
    result (e.g. summing the elements of the vector, finding the minimum over

                 reply	other threads:[~2008-10-24 23:45 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=49025DDC.3050706@redhat.com \
    --to=rth@redhat.com \
    --cc=gcc-patches@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).