commit a441d916db334b71e76d9f4350ffdf992c058b4f Author: Aldy Hernandez Date: Thu Feb 21 16:02:11 2013 -0600 * trans-mem.c (expand_transaction): Only set PR_INSTRUMENTEDCODE if there was instrumented code. (expand_call_tm): Set GTMA_INSTRUMENTED_CODE. (expand_assign_tm): Same. * gimple.h (GTMA_INSTRUMENTED_CODE): New. diff --git a/gcc/gimple.h b/gcc/gimple.h index 4bd6b3d..baa5c24 100644 --- a/gcc/gimple.h +++ b/gcc/gimple.h @@ -661,6 +661,8 @@ struct GTY(()) gimple_statement_omp_atomic_store { tell the runtime that it should begin the transaction in serial-irrevocable mode. */ #define GTMA_DOES_GO_IRREVOCABLE (1u << 6) +/* The transaction has at least one instrumented instruction within. */ +#define GTMA_INSTRUMENTED_CODE (1u << 7) struct GTY(()) gimple_statement_transaction { diff --git a/gcc/testsuite/gcc.dg/tm/instrumented-mask.c b/gcc/testsuite/gcc.dg/tm/instrumented-mask.c new file mode 100644 index 0000000..6cfd3e4 --- /dev/null +++ b/gcc/testsuite/gcc.dg/tm/instrumented-mask.c @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* { dg-options "-fgnu-tm -fdump-tree-tmmark" } */ + +/* If we're sure to go irrevocable, as in the case below, do not pass + PR_INSTRUMENTEDCODE to the run-time if there is nothing + instrumented within the transaction. */ + +int +main() +{ + __transaction_relaxed { __asm__(""); } + return 0; +} + +/* { dg-final { scan-tree-dump-times " instrumentedCode" 0 "tmmark" } } */ +/* { dg-final { cleanup-tree-dump "tmmark" } } */ diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c index 71eaa44..315f00c 100644 --- a/gcc/trans-mem.c +++ b/gcc/trans-mem.c @@ -2146,6 +2146,9 @@ expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi) { /* Add thread private addresses to log if applicable. */ requires_barrier (region->entry_block, lhs, stmt); + // ?? Should we set GTMA_INSTRUMENTED_CODE here if there is no + // instrumentation, but requires_barrier added a thread private + // address to the log? gsi_next (gsi); return; } @@ -2153,6 +2156,8 @@ expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi) // Remove original load/store statement. gsi_remove (gsi, true); + transaction_subcode_ior (region, GTMA_INSTRUMENTED_CODE); + if (load_p && !store_p) { transaction_subcode_ior (region, GTMA_HAVE_LOAD); @@ -2230,9 +2235,12 @@ expand_call_tm (struct tm_region *region, if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY) || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE)) - transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD); + transaction_subcode_ior (region, GTMA_HAVE_STORE + | GTMA_HAVE_LOAD + | GTMA_INSTRUMENTED_CODE); if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET)) - transaction_subcode_ior (region, GTMA_HAVE_STORE); + transaction_subcode_ior (region, GTMA_HAVE_STORE + | GTMA_INSTRUMENTED_CODE); if (is_tm_pure_call (stmt)) return false; @@ -2243,7 +2251,8 @@ expand_call_tm (struct tm_region *region, { /* Assume all non-const/pure calls write to memory, except transaction ending builtins. */ - transaction_subcode_ior (region, GTMA_HAVE_STORE); + transaction_subcode_ior (region, GTMA_HAVE_STORE + | GTMA_INSTRUMENTED_CODE); } /* For indirect calls, we already generated a call into the runtime. */ @@ -2602,7 +2611,7 @@ expand_transaction (struct tm_region *region, void *data ATTRIBUTE_UNUSED) flags |= PR_HASNOABORT; if ((subcode & GTMA_HAVE_STORE) == 0) flags |= PR_READONLY; - if (inst_edge) + if (inst_edge && (subcode & GTMA_INSTRUMENTED_CODE)) flags |= PR_INSTRUMENTEDCODE; if (uninst_edge) flags |= PR_UNINSTRUMENTEDCODE;