* [PATCH v3 2/4] gdb/testsuite: fix gdb.reverse/solib-*.exp tests when using clang
2023-08-14 10:48 [PATCH v3 0/4] Many fixes to gdb.reverse tests when testing Guinevere Larsen
2023-08-14 10:48 ` [PATCH v3 1/4] gdb/testsuite: Fix many errors in gdb.reverse with clang Guinevere Larsen
@ 2023-08-14 10:48 ` Guinevere Larsen
2023-08-14 10:48 ` [PATCH v3 3/4] gdb/testsuite: fix testing gdb.reverse/step-reverse.exp with clang Guinevere Larsen
2023-08-14 10:48 ` [PATCH v3 4/4] gdb/testsuite: Multiple improvements for gdb.reverse/insn-reverse.exp Guinevere Larsen
3 siblings, 0 replies; 7+ messages in thread
From: Guinevere Larsen @ 2023-08-14 10:48 UTC (permalink / raw)
To: gdb-patches; +Cc: Guinevere Larsen
The tests gdb.reverse/solib-precsave.exp and solib-reverse.exp have the
assumption that line tables will have an entry for the closing } in a
function. Not all compiles do this, one example being clang. To fix
this, this commit changes the function in shr2.c to have multiple lines,
and the test to accept either line as a correct step location.
To properly re-sync the inferiors, the function repeat_cmd_until had to
be slightly changed to work with empty "current locations", so that we
are able to step through multiple lines.
This also changes the annotations used to determine the breakpoint
locations in solib-reverse.c, adding a simple variable assignment right
before the return statement. This way GDB will not set a breakpoint in
the closing } line.
---
gdb/testsuite/gdb.reverse/shr2.c | 3 ++-
gdb/testsuite/gdb.reverse/solib-precsave.exp | 24 ++++++++++++++++----
gdb/testsuite/gdb.reverse/solib-reverse.c | 5 ++--
gdb/testsuite/gdb.reverse/solib-reverse.exp | 24 ++++++++++++++++----
gdb/testsuite/lib/gdb.exp | 6 ++---
5 files changed, 48 insertions(+), 14 deletions(-)
diff --git a/gdb/testsuite/gdb.reverse/shr2.c b/gdb/testsuite/gdb.reverse/shr2.c
index 84a03de1b8c..16c3bf3bc6c 100644
--- a/gdb/testsuite/gdb.reverse/shr2.c
+++ b/gdb/testsuite/gdb.reverse/shr2.c
@@ -19,7 +19,8 @@
int shr2(int x)
{
- return 2*x;
+ int y = 2*x;
+ return y;
}
int shr2_local(int x)
diff --git a/gdb/testsuite/gdb.reverse/solib-precsave.exp b/gdb/testsuite/gdb.reverse/solib-precsave.exp
index 3ca73828063..f89969222c6 100644
--- a/gdb/testsuite/gdb.reverse/solib-precsave.exp
+++ b/gdb/testsuite/gdb.reverse/solib-precsave.exp
@@ -140,8 +140,17 @@ gdb_test_multiple "reverse-step" "reverse-step into solib function one" {
pass $gdb_test_name
}
}
-gdb_test "reverse-step" "return 2.x.*" "reverse-step within solib function one"
-gdb_test "reverse-step" " middle part two.*" "reverse-step back to main one"
+# Depending on wether the closing } has a line associated, we might have
+# different acceptable results here
+gdb_test_multiple "reverse-step" "reverse-step within solib function one" {
+ -re -wrap "return y;.*" {
+ pass $gdb_test_name
+ }
+ -re -wrap "int y =.*" {
+ pass $gdb_test_name
+ }
+}
+repeat_cmd_until "reverse-step" "" "main .. at" "reverse-step back to main one"
gdb_test_multiple "reverse-step" "reverse-step into solib function two" {
-re -wrap "begin part two.*" {
@@ -152,8 +161,15 @@ gdb_test_multiple "reverse-step" "reverse-step into solib function two" {
pass $gdb_test_name
}
}
-gdb_test "reverse-step" "return 2.x.*" "reverse-step within solib function two"
-gdb_test "reverse-step" " begin part two.*" "reverse-step back to main two"
+gdb_test_multiple "reverse-step" "reverse-step within solib function two" {
+ -re -wrap "return y;.*" {
+ pass $gdb_test_name
+ }
+ -re -wrap "int y =.*" {
+ pass $gdb_test_name
+ }
+}
+repeat_cmd_until "reverse-step" "" "main .. at" "reverse-step back to main two"
#
# Test reverse-next over debuggable solib function
diff --git a/gdb/testsuite/gdb.reverse/solib-reverse.c b/gdb/testsuite/gdb.reverse/solib-reverse.c
index 7aa60089df3..20288e0bee8 100644
--- a/gdb/testsuite/gdb.reverse/solib-reverse.c
+++ b/gdb/testsuite/gdb.reverse/solib-reverse.c
@@ -43,6 +43,7 @@ int main ()
shr1 ("message 2\n"); /* shr1 two */
shr1 ("message 3\n"); /* shr1 three */
- return 0; /* end part one */
-} /* end of main */
+ b[0] = 0; /* end part one */
+ return 0; /* end of main */
+}
diff --git a/gdb/testsuite/gdb.reverse/solib-reverse.exp b/gdb/testsuite/gdb.reverse/solib-reverse.exp
index c061086a8d7..afc4659184a 100644
--- a/gdb/testsuite/gdb.reverse/solib-reverse.exp
+++ b/gdb/testsuite/gdb.reverse/solib-reverse.exp
@@ -116,8 +116,17 @@ gdb_test_multiple "reverse-step" "reverse-step into solib function one" {
pass $gdb_test_name
}
}
-gdb_test "reverse-step" "return 2.x.*" "reverse-step within solib function one"
-gdb_test "reverse-step" " middle part two.*" "reverse-step back to main one"
+# Depending on wether the closing } has a line associated, we might have
+# different acceptable results here
+gdb_test_multiple "reverse-step" "reverse-step within solib function one" {
+ -re -wrap "return y;.*" {
+ pass $gdb_test_name
+ }
+ -re -wrap "int y =.*" {
+ pass $gdb_test_name
+ }
+}
+repeat_cmd_until "reverse-step" "" "main .. at" "reverse-step back to main one"
gdb_test_multiple "reverse-step" "reverse-step into solib function two" {
-re -wrap "begin part two.*" {
@@ -128,8 +137,15 @@ gdb_test_multiple "reverse-step" "reverse-step into solib function two" {
pass $gdb_test_name
}
}
-gdb_test "reverse-step" "return 2.x.*" "reverse-step within solib function two"
-gdb_test "reverse-step" " begin part two.*" "reverse-step back to main two"
+gdb_test_multiple "reverse-step" "reverse-step within solib function two" {
+ -re -wrap "return y;.*" {
+ pass $gdb_test_name
+ }
+ -re -wrap "int y =.*" {
+ pass $gdb_test_name
+ }
+}
+repeat_cmd_until "reverse-step" "" "main .. at" "reverse-step back to main two"
#
# Test reverse-next over debuggable solib function
diff --git a/gdb/testsuite/lib/gdb.exp b/gdb/testsuite/lib/gdb.exp
index 36bf738c667..8432e636ecc 100644
--- a/gdb/testsuite/lib/gdb.exp
+++ b/gdb/testsuite/lib/gdb.exp
@@ -9716,6 +9716,9 @@ proc repeat_cmd_until { command current target \
set count 0
gdb_test_multiple "$command" "$test_name" {
+ -re "$target.*$gdb_prompt $" {
+ pass "$test_name"
+ }
-re "$current.*$gdb_prompt $" {
incr count
if { $count < $max_steps } {
@@ -9725,9 +9728,6 @@ proc repeat_cmd_until { command current target \
fail "$test_name"
}
}
- -re "$target.*$gdb_prompt $" {
- pass "$test_name"
- }
}
}
--
2.41.0
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v3 4/4] gdb/testsuite: Multiple improvements for gdb.reverse/insn-reverse.exp
2023-08-14 10:48 [PATCH v3 0/4] Many fixes to gdb.reverse tests when testing Guinevere Larsen
` (2 preceding siblings ...)
2023-08-14 10:48 ` [PATCH v3 3/4] gdb/testsuite: fix testing gdb.reverse/step-reverse.exp with clang Guinevere Larsen
@ 2023-08-14 10:48 ` Guinevere Larsen
3 siblings, 0 replies; 7+ messages in thread
From: Guinevere Larsen @ 2023-08-14 10:48 UTC (permalink / raw)
To: gdb-patches; +Cc: Guinevere Larsen
This commits tackles 2 problems in the test
gdb.reverse/insn-reverse.exp. They are, broadly: flawed logic when an
unexpected error occurs, and badly formed asm expressions.
For the first, what happens is that if the inferior stops progressing
for some reason, the test will emit an UNSUPPORTED and continue testing
by reversing from the current location and checking all registers for
every instruction. However, due to how the outputs are indexed in the
test, this early exit will cause most of the subsequent tests to be
de-synced and will emit many unrelated failures.
This commit changes the UNSUPPORTED for a FAIL, since the test has in
fact failed to record the execution of the whole function, and
decrements the recorded instruction count by one so that the indexes are
in sync once more.
At the time of committing, this reduces the amount of failures when
testing with clang-15 from around 150 to 2, and correctly identifies
where the issue lies.
The second problem is in how the asm statements in the *-x86.c file
are written. As an example, let's examine the following line:
__asm__ volatile ("rdrand %%ebp;" : "=r" (number));
This statement says that number is being used as the output variable,
but is not indicating which registers were clobbered so that the
compiler is able to properly output. GCC decides to just not save
anything, whereas clang assumes that the output is in %rax, and writes
it to the variable. This hid the problem that any compiler is not good
at dealing with asm statements that change the rbp register. It can be
seen more explicitly by informing gcc that rbp has been clobbered like
so:
__asm__ volatile ("rdrand %%ebp;" : "=r" (number) : : "%ebp");
This statement gets compiled into the following assembly:
rdrandl %ebp
movl %eax, -4(%rbp)
Which is clearly using the incorrect rbp to find the memory location of
the variable. Since the test only exercises GDB's ability to record the
register changes, this commit removes the output to memory.
Finally, correctly informing the compiler of clobbered registers
makes gcc throw an error that the rsp is no longer usable at the end of
the function. To avoid that, this commit compresses the 3 asm statements
that would save, change and reset registers into a single asm statement.
---
gdb/testsuite/gdb.reverse/insn-reverse-x86.c | 324 +++++++++----------
gdb/testsuite/gdb.reverse/insn-reverse.exp | 4 +-
2 files changed, 165 insertions(+), 163 deletions(-)
diff --git a/gdb/testsuite/gdb.reverse/insn-reverse-x86.c b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c
index da9999e9942..2b4fb4c10e0 100644
--- a/gdb/testsuite/gdb.reverse/insn-reverse-x86.c
+++ b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c
@@ -64,100 +64,100 @@ rdrand (void)
return;
/* 16-bit random numbers. */
- __asm__ volatile ("rdrand %%ax;" : "=r" (number));
- __asm__ volatile ("rdrand %%bx;" : "=r" (number));
- __asm__ volatile ("rdrand %%cx;" : "=r" (number));
- __asm__ volatile ("rdrand %%dx;" : "=r" (number));
+ __asm__ volatile ("rdrand %%ax;": : : "%ax");
+ __asm__ volatile ("rdrand %%bx;": : : "%bx");
+ __asm__ volatile ("rdrand %%cx;": : : "%cx");
+ __asm__ volatile ("rdrand %%dx;": : : "%dx");
- __asm__ volatile ("mov %%di, %%ax;" : "=r" (number));
- __asm__ volatile ("rdrand %%di;" : "=r" (number));
- __asm__ volatile ("mov %%ax, %%di;" : "=r" (number));
+ __asm__ volatile ("mov %%di, %%ax;\n\
+ rdrand %%di;\n\
+ mov %%ax, %%di;" : : : "%ax");
- __asm__ volatile ("mov %%si, %%ax;" : "=r" (number));
- __asm__ volatile ("rdrand %%si;" : "=r" (number));
- __asm__ volatile ("mov %%ax, %%si;" : "=r" (number));
+ __asm__ volatile ("mov %%si, %%ax;\n\
+ rdrand %%si;\n\
+ mov %%ax, %%si;" : : : "%ax");
- __asm__ volatile ("mov %%bp, %%ax;" : "=r" (number));
- __asm__ volatile ("rdrand %%bp;" : "=r" (number));
- __asm__ volatile ("mov %%ax, %%bp;" : "=r" (number));
+ __asm__ volatile ("mov %%bp, %%ax;\n\
+ rdrand %%bp;\n\
+ mov %%ax, %%bp;" : : : "%ax");
- __asm__ volatile ("mov %%sp, %%ax;" : "=r" (number));
- __asm__ volatile ("rdrand %%sp;" : "=r" (number));
- __asm__ volatile ("mov %%ax, %%sp;" : "=r" (number));
+ __asm__ volatile ("mov %%sp, %%ax;\n\
+ rdrand %%sp;\n\
+ mov %%ax, %%sp;" : : : "%ax");
#ifdef __x86_64__
- __asm__ volatile ("rdrand %%r8w;" : "=r" (number));
- __asm__ volatile ("rdrand %%r9w;" : "=r" (number));
- __asm__ volatile ("rdrand %%r10w;" : "=r" (number));
- __asm__ volatile ("rdrand %%r11w;" : "=r" (number));
- __asm__ volatile ("rdrand %%r12w;" : "=r" (number));
- __asm__ volatile ("rdrand %%r13w;" : "=r" (number));
- __asm__ volatile ("rdrand %%r14w;" : "=r" (number));
- __asm__ volatile ("rdrand %%r15w;" : "=r" (number));
+ __asm__ volatile ("rdrand %%r8w;": : : "%r8");
+ __asm__ volatile ("rdrand %%r9w;": : : "%r9");
+ __asm__ volatile ("rdrand %%r10w;": : : "%r10");
+ __asm__ volatile ("rdrand %%r11w;": : : "%r11");
+ __asm__ volatile ("rdrand %%r12w;": : : "%r12");
+ __asm__ volatile ("rdrand %%r13w;": : : "%r13");
+ __asm__ volatile ("rdrand %%r14w;": : : "%r14");
+ __asm__ volatile ("rdrand %%r15w;": : : "%r15");
#endif
/* 32-bit random numbers. */
- __asm__ volatile ("rdrand %%eax;" : "=r" (number));
- __asm__ volatile ("rdrand %%ebx;" : "=r" (number));
- __asm__ volatile ("rdrand %%ecx;" : "=r" (number));
- __asm__ volatile ("rdrand %%edx;" : "=r" (number));
+ __asm__ volatile ("rdrand %%eax;": : : "%eax");
+ __asm__ volatile ("rdrand %%ebx;": : : "%ebx");
+ __asm__ volatile ("rdrand %%ecx;": : : "%ecx");
+ __asm__ volatile ("rdrand %%edx;": : : "%edx");
#ifdef __x86_64__
- __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%edi;" : "=r" (number));
- __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number));
-
- __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%esi;" : "=r" (number));
- __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number));
-
- __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%ebp;" : "=r" (number));
- __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number));
-
- __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%esp;" : "=r" (number));
- __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number));
-
- __asm__ volatile ("rdrand %%r8d;" : "=r" (number));
- __asm__ volatile ("rdrand %%r9d;" : "=r" (number));
- __asm__ volatile ("rdrand %%r10d;" : "=r" (number));
- __asm__ volatile ("rdrand %%r11d;" : "=r" (number));
- __asm__ volatile ("rdrand %%r12d;" : "=r" (number));
- __asm__ volatile ("rdrand %%r13d;" : "=r" (number));
- __asm__ volatile ("rdrand %%r14d;" : "=r" (number));
- __asm__ volatile ("rdrand %%r15d;" : "=r" (number));
+ __asm__ volatile ("mov %%rdi, %%rax;\n\
+ rdrand %%edi;\n\
+ mov %%rax, %%rdi;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rsi, %%rax;\n\
+ rdrand %%esi;\n\
+ mov %%rax, %%rsi;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rbp, %%rax;\n\
+ rdrand %%ebp;\n\
+ mov %%rax, %%rbp;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rsp, %%rax;\n\
+ rdrand %%esp;\n\
+ mov %%rax, %%rsp;" : : : "%rax");
+
+ __asm__ volatile ("rdrand %%r8d;": : : "%r8");
+ __asm__ volatile ("rdrand %%r9d;": : : "%r9");
+ __asm__ volatile ("rdrand %%r10d;": : : "%r10");
+ __asm__ volatile ("rdrand %%r11d;": : : "%r11");
+ __asm__ volatile ("rdrand %%r12d;": : : "%r12");
+ __asm__ volatile ("rdrand %%r13d;": : : "%r13");
+ __asm__ volatile ("rdrand %%r14d;": : : "%r14");
+ __asm__ volatile ("rdrand %%r15d;": : : "%r15");
/* 64-bit random numbers. */
- __asm__ volatile ("rdrand %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%rbx;" : "=r" (number));
- __asm__ volatile ("rdrand %%rcx;" : "=r" (number));
- __asm__ volatile ("rdrand %%rdx;" : "=r" (number));
-
- __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%rdi;" : "=r" (number));
- __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number));
-
- __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%rsi;" : "=r" (number));
- __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number));
-
- __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%rbp;" : "=r" (number));
- __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number));
-
- __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number));
- __asm__ volatile ("rdrand %%rsp;" : "=r" (number));
- __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number));
-
- __asm__ volatile ("rdrand %%r8;" : "=r" (number));
- __asm__ volatile ("rdrand %%r9;" : "=r" (number));
- __asm__ volatile ("rdrand %%r10;" : "=r" (number));
- __asm__ volatile ("rdrand %%r11;" : "=r" (number));
- __asm__ volatile ("rdrand %%r12;" : "=r" (number));
- __asm__ volatile ("rdrand %%r13;" : "=r" (number));
- __asm__ volatile ("rdrand %%r14;" : "=r" (number));
- __asm__ volatile ("rdrand %%r15;" : "=r" (number));
+ __asm__ volatile ("rdrand %%rax;": : : "%rax");
+ __asm__ volatile ("rdrand %%rbx;": : : "%rbx");
+ __asm__ volatile ("rdrand %%rcx;": : : "%rcx");
+ __asm__ volatile ("rdrand %%rdx;": : : "%rdx");
+
+ __asm__ volatile ("mov %%rdi, %%rax;\n\
+ rdrand %%rdi;\n\
+ mov %%rax, %%rdi;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rsi, %%rax;\n\
+ rdrand %%rsi;\n\
+ mov %%rax, %%rsi;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rbp, %%rax;\n\
+ rdrand %%rbp;\n\
+ mov %%rax, %%rbp;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rsp, %%rax;\n\
+ rdrand %%rsp;\n\
+ mov %%rax, %%rsp;" : : : "%rax");
+
+ __asm__ volatile ("rdrand %%r8;": : : "%r8");
+ __asm__ volatile ("rdrand %%r9;": : : "%r9");
+ __asm__ volatile ("rdrand %%r10;": : : "%r10");
+ __asm__ volatile ("rdrand %%r11;": : : "%r11");
+ __asm__ volatile ("rdrand %%r12;": : : "%r12");
+ __asm__ volatile ("rdrand %%r13;": : : "%r13");
+ __asm__ volatile ("rdrand %%r14;": : : "%r14");
+ __asm__ volatile ("rdrand %%r15;": : : "%r15");
#endif
}
@@ -173,100 +173,100 @@ rdseed (void)
return;
/* 16-bit random seeds. */
- __asm__ volatile ("rdseed %%ax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%bx;" : "=r" (seed));
- __asm__ volatile ("rdseed %%cx;" : "=r" (seed));
- __asm__ volatile ("rdseed %%dx;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%ax;": : : "%ax");
+ __asm__ volatile ("rdseed %%bx;": : : "%bx");
+ __asm__ volatile ("rdseed %%cx;": : : "%cx");
+ __asm__ volatile ("rdseed %%dx;": : : "%dx");
- __asm__ volatile ("mov %%di, %%ax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%di;" : "=r" (seed));
- __asm__ volatile ("mov %%ax, %%di;" : "=r" (seed));
+ __asm__ volatile ("mov %%di, %%ax;\n\
+ rdseed %%di;\n\
+ mov %%ax, %%di;" : : : "%ax");
- __asm__ volatile ("mov %%si, %%ax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%si;" : "=r" (seed));
- __asm__ volatile ("mov %%ax, %%si;" : "=r" (seed));
+ __asm__ volatile ("mov %%si, %%ax;\n\
+ rdseed %%si;\n\
+ mov %%ax, %%si;" : : : "%ax");
- __asm__ volatile ("mov %%bp, %%ax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%bp;" : "=r" (seed));
- __asm__ volatile ("mov %%ax, %%bp;" : "=r" (seed));
+ __asm__ volatile ("mov %%bp, %%ax;\n\
+ rdseed %%bp;\n\
+ mov %%ax, %%bp;" : : : "%ax");
- __asm__ volatile ("mov %%sp, %%ax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%sp;" : "=r" (seed));
- __asm__ volatile ("mov %%ax, %%sp;" : "=r" (seed));
+ __asm__ volatile ("mov %%sp, %%ax;\n\
+ rdseed %%sp;\n\
+ mov %%ax, %%sp;" : : : "%ax");
#ifdef __x86_64__
- __asm__ volatile ("rdseed %%r8w;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r9w;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r10w;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r11w;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r12w;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r13w;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r14w;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r15w;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%r8w;": : : "%r8");
+ __asm__ volatile ("rdseed %%r9w;": : : "%r9");
+ __asm__ volatile ("rdseed %%r10w;": : : "%r10");
+ __asm__ volatile ("rdseed %%r11w;": : : "%r11");
+ __asm__ volatile ("rdseed %%r12w;": : : "%r12");
+ __asm__ volatile ("rdseed %%r13w;": : : "%r13");
+ __asm__ volatile ("rdseed %%r14w;": : : "%r14");
+ __asm__ volatile ("rdseed %%r15w;": : : "%r15");
#endif
/* 32-bit random seeds. */
- __asm__ volatile ("rdseed %%eax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%ebx;" : "=r" (seed));
- __asm__ volatile ("rdseed %%ecx;" : "=r" (seed));
- __asm__ volatile ("rdseed %%edx;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%eax;": : : "%eax");
+ __asm__ volatile ("rdseed %%ebx;": : : "%ebx");
+ __asm__ volatile ("rdseed %%ecx;": : : "%ecx");
+ __asm__ volatile ("rdseed %%edx;": : : "%edx");
#ifdef __x86_64__
- __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%edi;" : "=r" (seed));
- __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed));
-
- __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%esi;" : "=r" (seed));
- __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed));
-
- __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%ebp;" : "=r" (seed));
- __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed));
-
- __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%esp;" : "=r" (seed));
- __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed));
-
- __asm__ volatile ("rdseed %%r8d;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r9d;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r10d;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r11d;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r12d;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r13d;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r14d;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r15d;" : "=r" (seed));
+ __asm__ volatile ("mov %%rdi, %%rax;\n\
+ rdseed %%edi;\n\
+ mov %%rax, %%rdi;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rsi, %%rax;\n\
+ rdseed %%esi;\n\
+ mov %%rax, %%rsi;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rbp, %%rax;\n\
+ rdseed %%ebp;\n\
+ mov %%rax, %%rbp;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rsp, %%rax;\n\
+ rdseed %%esp;\n\
+ mov %%rax, %%rsp;" : : : "%rax");
+
+ __asm__ volatile ("rdseed %%r8d;": : : "%r8");
+ __asm__ volatile ("rdseed %%r9d;": : : "%r9");
+ __asm__ volatile ("rdseed %%r10d;": : : "%r10");
+ __asm__ volatile ("rdseed %%r11d;": : : "%r11");
+ __asm__ volatile ("rdseed %%r12d;": : : "%r12");
+ __asm__ volatile ("rdseed %%r13d;": : : "%r13");
+ __asm__ volatile ("rdseed %%r14d;": : : "%r14");
+ __asm__ volatile ("rdseed %%r15d;": : : "%r15");
/* 64-bit random seeds. */
- __asm__ volatile ("rdseed %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%rbx;" : "=r" (seed));
- __asm__ volatile ("rdseed %%rcx;" : "=r" (seed));
- __asm__ volatile ("rdseed %%rdx;" : "=r" (seed));
-
- __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%rdi;" : "=r" (seed));
- __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed));
-
- __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%rsi;" : "=r" (seed));
- __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed));
-
- __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%rbp;" : "=r" (seed));
- __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed));
-
- __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed));
- __asm__ volatile ("rdseed %%rsp;" : "=r" (seed));
- __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed));
-
- __asm__ volatile ("rdseed %%r8;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r9;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r10;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r11;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r12;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r13;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r14;" : "=r" (seed));
- __asm__ volatile ("rdseed %%r15;" : "=r" (seed));
+ __asm__ volatile ("rdseed %%rax;": : : "%rax");
+ __asm__ volatile ("rdseed %%rbx;": : : "%rbx");
+ __asm__ volatile ("rdseed %%rcx;": : : "%rcx");
+ __asm__ volatile ("rdseed %%rdx;": : : "%rdx");
+
+ __asm__ volatile ("mov %%rdi, %%rax;\n\
+ rdseed %%rdi;\n\
+ mov %%rax, %%rdi;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rsi, %%rax;\n\
+ rdseed %%rsi;\n\
+ mov %%rax, %%rsi;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rbp, %%rax;\n\
+ rdseed %%rbp;\n\
+ mov %%rax, %%rbp;" : : : "%rax");
+
+ __asm__ volatile ("mov %%rsp, %%rax;\n\
+ rdseed %%rsp;\n\
+ mov %%rax, %%rsp;" : : : "%rax");
+
+ __asm__ volatile ("rdseed %%r8;": : : "%r8");
+ __asm__ volatile ("rdseed %%r9;": : : "%r9");
+ __asm__ volatile ("rdseed %%r10;": : : "%r10");
+ __asm__ volatile ("rdseed %%r11;": : : "%r11");
+ __asm__ volatile ("rdseed %%r12;": : : "%r12");
+ __asm__ volatile ("rdseed %%r13;": : : "%r13");
+ __asm__ volatile ("rdseed %%r14;": : : "%r14");
+ __asm__ volatile ("rdseed %%r15;": : : "%r15");
#endif
}
diff --git a/gdb/testsuite/gdb.reverse/insn-reverse.exp b/gdb/testsuite/gdb.reverse/insn-reverse.exp
index 1a575b2d43e..206e765dd50 100644
--- a/gdb/testsuite/gdb.reverse/insn-reverse.exp
+++ b/gdb/testsuite/gdb.reverse/insn-reverse.exp
@@ -95,7 +95,9 @@ proc test { func testcase_nr } {
if { $prev_insn_addr == $insn_addr } {
# Failed to make progress, might have run into SIGILL.
- unsupported "no progress at: $expect_out(2,string)"
+ fail "no progress at: $expect_out(2,string)"
+ # Ignore the last instruction recorded
+ incr count -1
break
}
--
2.41.0
^ permalink raw reply [flat|nested] 7+ messages in thread