public inbox for gcc-patches@gcc.gnu.org
 help / color / mirror / Atom feed
* [PATCH] RISCV: Strengthen libatomic lrsc pairs
@ 2022-03-07 23:35 Patrick O'Neill
  2022-03-08  2:54 ` [PATCH v2] " Patrick O'Neill
  0 siblings, 1 reply; 2+ messages in thread
From: Patrick O'Neill @ 2022-03-07 23:35 UTC (permalink / raw)
  To: gcc-patches; +Cc: vineetg, palmer, dlustig, Patrick O'Neill

Currently, libatomic's _sync_fetch_and_#op# and 
__sync_val_compare_and_swap methods are not sufficiently strong for the
ATOMIC_SEQ_CST memory model.

This can be shown using the following Herd litmus test:

RISCV LRSC-LIB-CALL

{
0:x6=a; 0:x8=b; 0:x10=c;
1:x6=a; 1:x8=b; 1:x10=c;
}

 P0                  | P1           ;
 ori x1,x0,1         | lw x9,0(x10) ;
 sw x1,0(x10)        | fence rw,rw  ;
 lr.w.aq x7,0(x8)    | lw x5,0(x6)  ;
 ori x7,x0,1         |              ;
 sc.w.rl x3,x7,0(x8) |              ;
 sw x1,0(x6)         |              ;

exists (1:x9=1 /\ 1:x5=0 /\ b=1)

This patch enforces SEQ_CST by setting the .aqrl bits on the LR and SC
ops.

2022-03-07 Patrick O'Neill <patrick@rivosinc.com>

	PR target/104831
	* atomic.c: Change LR.aq/SC.rl pairs into sequentially
	consistent LR.aqrl/SC.aqrl pair.

Signed-off-by: Patrick O'Neill <patrick@rivosinc.com>
---
RISCV LRSC-BUGFIX

{
0:x6=a; 0:x8=b; 0:x10=c;
1:x6=a; 1:x8=b; 1:x10=c;
}

 P0                    | P1           ;
 ori x1,x0,1           | lw x9,0(x10) ;
 sw x1,0(x10)          | fence rw,rw  ;
 lr.w.aqrl x7,0(x8)    | lw x5,0(x6)  ;
 ori x7,x0,1           |              ;
 sc.w.aqrl x3,x7,0(x8) |              ;
 sw x1,0(x6)           |              ;

~exists (1:x9=1 /\ 1:x5=0 /\ b=1)
---
Below are other Herd litmus tests used to test the LR.aqrl/SC.aqrl fix.
---
RISCV LRSC-READ

(*
  LR/SC with .aq .rl bits does not allow read operations to be reordered
  within/above it.
*)

{
0:x6=a; 0:x8=b;
1:x6=a; 1:x8=b;
}

 P0                     | P1          ;
 lr.w.aq.rl x7,0(x8)    | ori x1,x0,1 ;
 ori x7,x0,1            | sw x1,0(x6) ;
 sc.w.aq.rl x1,x7,0(x8) | fence rw,rw ;
 lw x5,0(x6)            | lw x7,0(x8) ;

~exists (0:x5=0 /\ 1:x7=0 /\ b=1)
---
RISCV READ-LRSC

(*
  LR/SC with .aq .rl bits does not allow read operations to be reordered
  within/beneath it.
*)

{
0:x6=a; 0:x8=b;
1:x6=a; 1:x8=b;
}

 P0                     | P1          ;
 lw x5,0(x6)            | ori x1,x0,1 ;
 lr.w.aq.rl x7,0(x8)    | sw x1,0(x8) ;
 ori x1,x0,1            | fence rw,rw ;
 sc.w.aq.rl x1,x1,0(x8) | sw x1,0(x6) ;

~exists (0:x5=1 /\ 0:x7=0 /\ b=1)
---
RISCV LRSC-WRITE

(* 
  LR/SC with .aq .rl bits does not allow write operations to be reordered
  within/above it.
*)

{
0:x8=b; 0:x10=c;
1:x8=b; 1:x10=c;
}

 P0                     | P1          ;
 ori x9,x0,1            | lw x9,0(x10);
 lr.w.aq.rl x7,0(x8)    | fence rw,rw ;
 ori x7,x0,1            | lw x7,0(x8) ;
 sc.w.aq.rl x1,x7,0(x8) |             ;
 sw x9,0(x10)           |             ;

~exists (1:x9=1 /\ 1:x7=0 /\ b=1)
---
RISCV WRITE-LRSC

(*
  LR/SC with .aq .rl bits does not allow write operations to be reordered
  within/beneath it.
*)

{
0:x8=b; 0:x10=c;
1:x8=b; 1:x10=c;
}

 P0                     | P1           ;
 ori x1,x0,1            | ori x1,x0,1  ;
 sw x1,0(x10)           | sw x1,0(x8)  ;
 lr.w.aq.rl x7,0(x8)    | fence rw,rw  ;
 sc.w.aq.rl x1,x1,0(x8) | lw x9,0(x10) ;

~exists (0:x7=0 /\ 1:x9=0 /\ b=1)
---
 libgcc/config/riscv/atomic.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/libgcc/config/riscv/atomic.c b/libgcc/config/riscv/atomic.c
index 7007e7a20e4..0c85a6d00ea 100644
--- a/libgcc/config/riscv/atomic.c
+++ b/libgcc/config/riscv/atomic.c
@@ -39,13 +39,13 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     unsigned old, tmp1, tmp2;						\
 									\
     asm volatile ("1:\n\t"						\
-		  "lr.w.aq %[old], %[mem]\n\t"				\
+		  "lr.w.aqrl %[old], %[mem]\n\t"			\
 		  #insn " %[tmp1], %[old], %[value]\n\t"		\
 		  invert						\
 		  "and %[tmp1], %[tmp1], %[mask]\n\t"			\
 		  "and %[tmp2], %[old], %[not_mask]\n\t"		\
 		  "or %[tmp2], %[tmp2], %[tmp1]\n\t"			\
-		  "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t"		\
+		  "sc.w.aqrl %[tmp1], %[tmp2], %[mem]\n\t"		\
 		  "bnez %[tmp1], 1b"					\
 		  : [old] "=&r" (old),					\
 		    [mem] "+A" (*(volatile unsigned*) aligned_addr),	\
@@ -73,12 +73,12 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     unsigned old, tmp1;							\
 									\
     asm volatile ("1:\n\t"						\
-		  "lr.w.aq %[old], %[mem]\n\t"				\
+		  "lr.w.aqrl %[old], %[mem]\n\t"				\
 		  "and %[tmp1], %[old], %[mask]\n\t"			\
 		  "bne %[tmp1], %[o], 1f\n\t"				\
 		  "and %[tmp1], %[old], %[not_mask]\n\t"		\
 		  "or %[tmp1], %[tmp1], %[n]\n\t"			\
-		  "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t"		\
+		  "sc.w.aqrl %[tmp1], %[tmp1], %[mem]\n\t"		\
 		  "bnez %[tmp1], 1b\n\t"				\
 		  "1:"							\
 		  : [old] "=&r" (old),					\
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH v2] RISCV: Strengthen libatomic lrsc pairs
  2022-03-07 23:35 [PATCH] RISCV: Strengthen libatomic lrsc pairs Patrick O'Neill
@ 2022-03-08  2:54 ` Patrick O'Neill
  0 siblings, 0 replies; 2+ messages in thread
From: Patrick O'Neill @ 2022-03-08  2:54 UTC (permalink / raw)
  To: gcc-patches; +Cc: palmer, vineetg, dlustig, Patrick O'Neill

Currently, libatomic's _sync_fetch_and_#op# and 
__sync_val_compare_and_swap methods are not sufficiently strong for the
ATOMIC_SEQ_CST memory model.

This can be shown using the following Herd litmus test:

RISCV LRSC-LIB-CALL

{
0:x6=a; 0:x8=b; 0:x10=c;
1:x6=a; 1:x8=b; 1:x10=c;
}

 P0                  | P1           ;
 ori x1,x0,1         | lw x7,0(x8) ;
 sw x1,0(x10)        | fence rw,rw  ;
 lr.w.aq x7,0(x8)    | lw x5,0(x6)  ;
 ori x7,x0,1         |              ;
 sc.w.rl x3,x7,0(x8) |              ;
 fence rw,w          |              ;
 sw x1,0(x6)         |              ;

exists (1:x7=1 /\ 1:x5=0 /\ b=1)

This patch enforces SEQ_CST by setting the .aqrl bits on the LR and .rl
bits on SC ops.

2022-03-07 Patrick O'Neill <patrick@rivosinc.com>

	PR target/104831
	* atomic.c: Change LR.aq/SC.rl pairs into sequentially
	consistent LR.aqrl/SC.rl pair.

Signed-off-by: Patrick O'Neill <patrick@rivosinc.com>
---
Changelog v2:
 - Weakened LR/SC pairs to be in-line with ISA manual.
 - Updated litmus tests to reflect the relevant leading fences present
   in the RISCV implementation.
---
RISCV LRSC-BUGFIX

{
0:x6=a; 0:x8=b; 0:x10=c;
1:x6=a; 1:x8=b; 1:x10=c;
}

 P0                  | P1           ;
 ori x1,x0,1         | lw x7,0(x8) ;
 sw x1,0(x10)        | fence rw,rw  ;
 lr.w.aq x7,0(x8)    | lw x5,0(x6)  ;
 ori x7,x0,1         |              ;
 sc.w.rl x3,x7,0(x8) |              ;
 fence rw,w          |              ;
 sw x1,0(x6)         |              ;

exists (1:x7=1 /\ 1:x5=0 /\ b=1)
---
Below are other Herd litmus tests used to test the LR.aqrl/SC.aqrl fix.
---
RISCV LRSC-READ

(*
  LR/SC with .aq .rl bits does not allow read operations to be reordered
  within/above it.
*)

{
0:x6=a; 0:x8=b;
1:x6=a; 1:x8=b;
}

 P0                  | P1          ;
 lr.w.aq.rl x7,0(x8) | ori x1,x0,1 ;
 ori x7,x0,1         | sw x1,0(x6) ;
 sc.w.rl x1,x7,0(x8) | fence rw,rw ;
 fence rw,rw         | lw x7,0(x8) ;
 lw x5,0(x6)         |             ;

~exists (0:x5=0 /\ 1:x7=0 /\ b=1)
---
RISCV READ-LRSC

(*
  LR/SC with .aq .rl bits does not allow read operations to be reordered
  within/beneath it.
*)

{
0:x6=a; 0:x8=b;
1:x6=a; 1:x8=b;
}

 P0                  | P1          ;
 lw x5,0(x6)         | ori x1,x0,1 ;
 lr.w.aq.rl x7,0(x8) | sw x1,0(x8) ;
 ori x1,x0,1         | fence rw,rw ;
 sc.w.rl x1,x1,0(x8) | sw x1,0(x6) ;

~exists (0:x5=1 /\ 0:x7=0 /\ b=1)
---
RISCV LRSC-WRITE

(* 
  LR/SC with .aq .rl bits does not allow write operations to be reordered
  within/above it.
*)

{
0:x8=b; 0:x10=c;
1:x8=b; 1:x10=c;
}

 P0                  | P1          ;
 ori x9,x0,1         | lw x9,0(x10);
 lr.w.aq.rl x7,0(x8) | fence rw,rw ;
 ori x7,x0,1         | lw x7,0(x8) ;
 sc.w.rl x1,x7,0(x8) |             ;
 fence rw,w          |             ;
 sw x9,0(x10)        |             ;

~exists (1:x9=1 /\ 1:x7=0 /\ b=1)
---
RISCV WRITE-LRSC

(*
  LR/SC with .aq .rl bits does not allow write operations to be reordered
  within/beneath it.
*)

{
0:x8=b; 0:x10=c;
1:x8=b; 1:x10=c;
}

 P0                  | P1           ;
 ori x1,x0,1         | ori x1,x0,1  ;
 sw x1,0(x10)        | sw x1,0(x8)  ;
 lr.w.aq.rl x7,0(x8) | fence rw,rw  ;
 sc.w.rl x1,x1,0(x8) | lw x9,0(x10) ;

~exists (0:x7=0 /\ 1:x9=0 /\ b=1)
---
 libgcc/config/riscv/atomic.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libgcc/config/riscv/atomic.c b/libgcc/config/riscv/atomic.c
index 7007e7a20e4..fa0e428963f 100644
--- a/libgcc/config/riscv/atomic.c
+++ b/libgcc/config/riscv/atomic.c
@@ -39,7 +39,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     unsigned old, tmp1, tmp2;						\
 									\
     asm volatile ("1:\n\t"						\
-		  "lr.w.aq %[old], %[mem]\n\t"				\
+		  "lr.w.aqrl %[old], %[mem]\n\t"			\
 		  #insn " %[tmp1], %[old], %[value]\n\t"		\
 		  invert						\
 		  "and %[tmp1], %[tmp1], %[mask]\n\t"			\
@@ -73,7 +73,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     unsigned old, tmp1;							\
 									\
     asm volatile ("1:\n\t"						\
-		  "lr.w.aq %[old], %[mem]\n\t"				\
+		  "lr.w.aqrl %[old], %[mem]\n\t"				\
 		  "and %[tmp1], %[old], %[mask]\n\t"			\
 		  "bne %[tmp1], %[o], 1f\n\t"				\
 		  "and %[tmp1], %[old], %[not_mask]\n\t"		\
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-03-08  2:54 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-07 23:35 [PATCH] RISCV: Strengthen libatomic lrsc pairs Patrick O'Neill
2022-03-08  2:54 ` [PATCH v2] " Patrick O'Neill

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).