diff --git a/gcc/config/arm/sync.md b/gcc/config/arm/sync.md index 44cda61..75dd52e 100644 --- a/gcc/config/arm/sync.md +++ b/gcc/config/arm/sync.md @@ -75,11 +75,12 @@ (define_insn "atomic_load" { enum memmodel model = memmodel_from_int (INTVAL (operands[2])); if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model)) - return \"ldr\\t%0, %1\"; + return \"ldr%(%)\\t%0, %1\"; else - return \"lda\\t%0, %1\"; + return \"lda%?\\t%0, %1\"; } -) + [(set_attr "predicable" "yes") + (set_attr "predicable_short_it" "no")]) (define_insn "atomic_store" [(set (match_operand:QHSI 0 "memory_operand" "=Q") @@ -91,11 +92,12 @@ (define_insn "atomic_store" { enum memmodel model = memmodel_from_int (INTVAL (operands[2])); if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model)) - return \"str\t%1, %0\"; + return \"str%(%)\t%1, %0\"; else - return \"stl\t%1, %0\"; + return \"stl%?\t%1, %0\"; } -) + [(set_attr "predicable" "yes") + (set_attr "predicable_short_it" "no")]) ;; Note that ldrd and vldr are *not* guaranteed to be single-copy atomic, ;; even for a 64-bit aligned address. Instead we use a ldrexd unparied diff --git a/gcc/testsuite/gcc.target/arm/stl-cond.c b/gcc/testsuite/gcc.target/arm/stl-cond.c new file mode 100644 index 0000000..de14bb5 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/stl-cond.c @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-require-effective-target arm_arm_ok } */ +/* { dg-require-effective-target arm_arch_v8a_ok } */ +/* { dg-options "-O2 -marm" } */ +/* { dg-add-options arm_arch_v8a } */ + +struct backtrace_state +{ + int threaded; + int lock_alloc; +}; + +void foo (struct backtrace_state *state) +{ + if (state->threaded) + __sync_lock_release (&state->lock_alloc); +} + +/* { dg-final { scan-assembler "stlne" } } */