From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.133.124]) by sourceware.org (Postfix) with ESMTPS id D4552385841D for ; Tue, 25 Jul 2023 09:58:46 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.2 sourceware.org D4552385841D Authentication-Results: sourceware.org; dmarc=pass (p=none dis=none) header.from=redhat.com Authentication-Results: sourceware.org; spf=pass smtp.mailfrom=redhat.com DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1690279126; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=E2VlXJ0Sh+gis11UBOsVBKAjSYcMAMX9wuu1/7cPXMY=; b=TZrqCb3pqnlsQt9/lMKdRAKnuMVz6JMSGzyG8HxLjkKnzKTGhXBIQXaAaXPpKsMy4R43Nl tYhY/Epp1hB8n9ZqCmF4NxHIf4TXmJMkFgSqq4SnHBD2y0p1pXPmbSMsunEMyPPrydYgzV hbvrw/YiNf8kE7MWHS5EpoBTgLIU8hY= Received: from mimecast-mx02.redhat.com (66.187.233.73 [66.187.233.73]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id us-mta-410-8AqtRnr2O0C1Gp6w7SJ7dg-1; Tue, 25 Jul 2023 05:58:44 -0400 X-MC-Unique: 8AqtRnr2O0C1Gp6w7SJ7dg-1 Received: from smtp.corp.redhat.com (int-mx10.intmail.prod.int.rdu2.redhat.com [10.11.54.10]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mimecast-mx02.redhat.com (Postfix) with ESMTPS id 8AF68280D202 for ; Tue, 25 Jul 2023 09:58:44 +0000 (UTC) Received: from fedora.redhat.com (unknown [10.45.224.155]) by smtp.corp.redhat.com (Postfix) with ESMTPS id 8F0BD4A9004; Tue, 25 Jul 2023 09:58:42 +0000 (UTC) From: Bruno Larsen To: gdb-patches@sourceware.org Cc: Bruno Larsen Subject: [PATCH 4/4] gdb/testsuite: Multiple improvements for gdb.reverse/insn-reverse.exp Date: Tue, 25 Jul 2023 11:58:33 +0200 Message-ID: <20230725095833.236804-5-blarsen@redhat.com> In-Reply-To: <20230725095833.236804-1-blarsen@redhat.com> References: <20230725095833.236804-1-blarsen@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.1 on 10.11.54.10 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset="US-ASCII"; x-default=true X-Spam-Status: No, score=-11.8 required=5.0 tests=BAYES_00,DKIMWL_WL_HIGH,DKIM_SIGNED,DKIM_VALID,DKIM_VALID_AU,DKIM_VALID_EF,GIT_PATCH_0,RCVD_IN_DNSWL_NONE,RCVD_IN_MSPIKE_H4,RCVD_IN_MSPIKE_WL,SPF_HELO_NONE,SPF_NONE,TXREP,T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org List-Id: This commits tackles 2 problems in the test gdb.reverse/insn-reverse.exp. They are, broadly: flawed logic when an unexpected error occurs, and badly formed asm expressions. For the first, what happens is that if the inferior stops progressing for some reason, the test will emit an UNSUPPORTED and continue testing by reversing from the current location and checking all registers for every instruction. However, due to how the outputs are indexed in the test, this early exit will cause most of the subsequent tests to be de-synced and will emit many unrelated failures. This commit changes the UNSUPPORTED for a FAIL, since the test has in fact failed to record the execution of the whole function, and decrements the recorded instruction count by one so that the indexes are in sync once more. At the time of committing, this reduces the amount of failures when testing with clang-15 from around 150 to 2, and correctly identifies where the issue lies. The second problem is in how the asm statements in the *-x86.c file are written. As an example, let's examine the following line: __asm__ volatile ("rdrand %%ebp;" : "=r" (number)); This statement says that number is being used as the output variable, but is not indicating which registers were clobbered so that the compiler is able to properly output. GCC decides to just not save anything, whereas clang assumes that the output is in %rax, and writes it to the variable. This hid the problem that any compiler is not good at dealing with asm statements that change the rbp register. It can be seen more explicitly by informing gcc that rbp has been clobbered like so: __asm__ volatile ("rdrand %%ebp;" : "=r" (number) : : "%ebp"); This statement gets compiled into the following assembly: rdrandl %ebp movl %eax, -4(%rbp) Which is clearly using the incorrect rbp to find the memory location of the variable. Since the test only exercises GDB's ability to record the register changes, this commit removes the output to memory. Finally, correctly informing the compiler of clobbered registers makes gcc throw an error that the rsp is no longer usable at the end of the function. To avoid that, this commit compresses the 3 asm statements that would save, change and reset registers into a single asm statement. --- gdb/testsuite/gdb.reverse/insn-reverse-x86.c | 324 +++++++++---------- gdb/testsuite/gdb.reverse/insn-reverse.exp | 4 +- 2 files changed, 165 insertions(+), 163 deletions(-) diff --git a/gdb/testsuite/gdb.reverse/insn-reverse-x86.c b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c index da9999e9942..2b4fb4c10e0 100644 --- a/gdb/testsuite/gdb.reverse/insn-reverse-x86.c +++ b/gdb/testsuite/gdb.reverse/insn-reverse-x86.c @@ -64,100 +64,100 @@ rdrand (void) return; /* 16-bit random numbers. */ - __asm__ volatile ("rdrand %%ax;" : "=r" (number)); - __asm__ volatile ("rdrand %%bx;" : "=r" (number)); - __asm__ volatile ("rdrand %%cx;" : "=r" (number)); - __asm__ volatile ("rdrand %%dx;" : "=r" (number)); + __asm__ volatile ("rdrand %%ax;": : : "%ax"); + __asm__ volatile ("rdrand %%bx;": : : "%bx"); + __asm__ volatile ("rdrand %%cx;": : : "%cx"); + __asm__ volatile ("rdrand %%dx;": : : "%dx"); - __asm__ volatile ("mov %%di, %%ax;" : "=r" (number)); - __asm__ volatile ("rdrand %%di;" : "=r" (number)); - __asm__ volatile ("mov %%ax, %%di;" : "=r" (number)); + __asm__ volatile ("mov %%di, %%ax;\n\ + rdrand %%di;\n\ + mov %%ax, %%di;" : : : "%ax"); - __asm__ volatile ("mov %%si, %%ax;" : "=r" (number)); - __asm__ volatile ("rdrand %%si;" : "=r" (number)); - __asm__ volatile ("mov %%ax, %%si;" : "=r" (number)); + __asm__ volatile ("mov %%si, %%ax;\n\ + rdrand %%si;\n\ + mov %%ax, %%si;" : : : "%ax"); - __asm__ volatile ("mov %%bp, %%ax;" : "=r" (number)); - __asm__ volatile ("rdrand %%bp;" : "=r" (number)); - __asm__ volatile ("mov %%ax, %%bp;" : "=r" (number)); + __asm__ volatile ("mov %%bp, %%ax;\n\ + rdrand %%bp;\n\ + mov %%ax, %%bp;" : : : "%ax"); - __asm__ volatile ("mov %%sp, %%ax;" : "=r" (number)); - __asm__ volatile ("rdrand %%sp;" : "=r" (number)); - __asm__ volatile ("mov %%ax, %%sp;" : "=r" (number)); + __asm__ volatile ("mov %%sp, %%ax;\n\ + rdrand %%sp;\n\ + mov %%ax, %%sp;" : : : "%ax"); #ifdef __x86_64__ - __asm__ volatile ("rdrand %%r8w;" : "=r" (number)); - __asm__ volatile ("rdrand %%r9w;" : "=r" (number)); - __asm__ volatile ("rdrand %%r10w;" : "=r" (number)); - __asm__ volatile ("rdrand %%r11w;" : "=r" (number)); - __asm__ volatile ("rdrand %%r12w;" : "=r" (number)); - __asm__ volatile ("rdrand %%r13w;" : "=r" (number)); - __asm__ volatile ("rdrand %%r14w;" : "=r" (number)); - __asm__ volatile ("rdrand %%r15w;" : "=r" (number)); + __asm__ volatile ("rdrand %%r8w;": : : "%r8"); + __asm__ volatile ("rdrand %%r9w;": : : "%r9"); + __asm__ volatile ("rdrand %%r10w;": : : "%r10"); + __asm__ volatile ("rdrand %%r11w;": : : "%r11"); + __asm__ volatile ("rdrand %%r12w;": : : "%r12"); + __asm__ volatile ("rdrand %%r13w;": : : "%r13"); + __asm__ volatile ("rdrand %%r14w;": : : "%r14"); + __asm__ volatile ("rdrand %%r15w;": : : "%r15"); #endif /* 32-bit random numbers. */ - __asm__ volatile ("rdrand %%eax;" : "=r" (number)); - __asm__ volatile ("rdrand %%ebx;" : "=r" (number)); - __asm__ volatile ("rdrand %%ecx;" : "=r" (number)); - __asm__ volatile ("rdrand %%edx;" : "=r" (number)); + __asm__ volatile ("rdrand %%eax;": : : "%eax"); + __asm__ volatile ("rdrand %%ebx;": : : "%ebx"); + __asm__ volatile ("rdrand %%ecx;": : : "%ecx"); + __asm__ volatile ("rdrand %%edx;": : : "%edx"); #ifdef __x86_64__ - __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%edi;" : "=r" (number)); - __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number)); - - __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%esi;" : "=r" (number)); - __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number)); - - __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%ebp;" : "=r" (number)); - __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number)); - - __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%esp;" : "=r" (number)); - __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number)); - - __asm__ volatile ("rdrand %%r8d;" : "=r" (number)); - __asm__ volatile ("rdrand %%r9d;" : "=r" (number)); - __asm__ volatile ("rdrand %%r10d;" : "=r" (number)); - __asm__ volatile ("rdrand %%r11d;" : "=r" (number)); - __asm__ volatile ("rdrand %%r12d;" : "=r" (number)); - __asm__ volatile ("rdrand %%r13d;" : "=r" (number)); - __asm__ volatile ("rdrand %%r14d;" : "=r" (number)); - __asm__ volatile ("rdrand %%r15d;" : "=r" (number)); + __asm__ volatile ("mov %%rdi, %%rax;\n\ + rdrand %%edi;\n\ + mov %%rax, %%rdi;" : : : "%rax"); + + __asm__ volatile ("mov %%rsi, %%rax;\n\ + rdrand %%esi;\n\ + mov %%rax, %%rsi;" : : : "%rax"); + + __asm__ volatile ("mov %%rbp, %%rax;\n\ + rdrand %%ebp;\n\ + mov %%rax, %%rbp;" : : : "%rax"); + + __asm__ volatile ("mov %%rsp, %%rax;\n\ + rdrand %%esp;\n\ + mov %%rax, %%rsp;" : : : "%rax"); + + __asm__ volatile ("rdrand %%r8d;": : : "%r8"); + __asm__ volatile ("rdrand %%r9d;": : : "%r9"); + __asm__ volatile ("rdrand %%r10d;": : : "%r10"); + __asm__ volatile ("rdrand %%r11d;": : : "%r11"); + __asm__ volatile ("rdrand %%r12d;": : : "%r12"); + __asm__ volatile ("rdrand %%r13d;": : : "%r13"); + __asm__ volatile ("rdrand %%r14d;": : : "%r14"); + __asm__ volatile ("rdrand %%r15d;": : : "%r15"); /* 64-bit random numbers. */ - __asm__ volatile ("rdrand %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%rbx;" : "=r" (number)); - __asm__ volatile ("rdrand %%rcx;" : "=r" (number)); - __asm__ volatile ("rdrand %%rdx;" : "=r" (number)); - - __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%rdi;" : "=r" (number)); - __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (number)); - - __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%rsi;" : "=r" (number)); - __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (number)); - - __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%rbp;" : "=r" (number)); - __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (number)); - - __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (number)); - __asm__ volatile ("rdrand %%rsp;" : "=r" (number)); - __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (number)); - - __asm__ volatile ("rdrand %%r8;" : "=r" (number)); - __asm__ volatile ("rdrand %%r9;" : "=r" (number)); - __asm__ volatile ("rdrand %%r10;" : "=r" (number)); - __asm__ volatile ("rdrand %%r11;" : "=r" (number)); - __asm__ volatile ("rdrand %%r12;" : "=r" (number)); - __asm__ volatile ("rdrand %%r13;" : "=r" (number)); - __asm__ volatile ("rdrand %%r14;" : "=r" (number)); - __asm__ volatile ("rdrand %%r15;" : "=r" (number)); + __asm__ volatile ("rdrand %%rax;": : : "%rax"); + __asm__ volatile ("rdrand %%rbx;": : : "%rbx"); + __asm__ volatile ("rdrand %%rcx;": : : "%rcx"); + __asm__ volatile ("rdrand %%rdx;": : : "%rdx"); + + __asm__ volatile ("mov %%rdi, %%rax;\n\ + rdrand %%rdi;\n\ + mov %%rax, %%rdi;" : : : "%rax"); + + __asm__ volatile ("mov %%rsi, %%rax;\n\ + rdrand %%rsi;\n\ + mov %%rax, %%rsi;" : : : "%rax"); + + __asm__ volatile ("mov %%rbp, %%rax;\n\ + rdrand %%rbp;\n\ + mov %%rax, %%rbp;" : : : "%rax"); + + __asm__ volatile ("mov %%rsp, %%rax;\n\ + rdrand %%rsp;\n\ + mov %%rax, %%rsp;" : : : "%rax"); + + __asm__ volatile ("rdrand %%r8;": : : "%r8"); + __asm__ volatile ("rdrand %%r9;": : : "%r9"); + __asm__ volatile ("rdrand %%r10;": : : "%r10"); + __asm__ volatile ("rdrand %%r11;": : : "%r11"); + __asm__ volatile ("rdrand %%r12;": : : "%r12"); + __asm__ volatile ("rdrand %%r13;": : : "%r13"); + __asm__ volatile ("rdrand %%r14;": : : "%r14"); + __asm__ volatile ("rdrand %%r15;": : : "%r15"); #endif } @@ -173,100 +173,100 @@ rdseed (void) return; /* 16-bit random seeds. */ - __asm__ volatile ("rdseed %%ax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%bx;" : "=r" (seed)); - __asm__ volatile ("rdseed %%cx;" : "=r" (seed)); - __asm__ volatile ("rdseed %%dx;" : "=r" (seed)); + __asm__ volatile ("rdseed %%ax;": : : "%ax"); + __asm__ volatile ("rdseed %%bx;": : : "%bx"); + __asm__ volatile ("rdseed %%cx;": : : "%cx"); + __asm__ volatile ("rdseed %%dx;": : : "%dx"); - __asm__ volatile ("mov %%di, %%ax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%di;" : "=r" (seed)); - __asm__ volatile ("mov %%ax, %%di;" : "=r" (seed)); + __asm__ volatile ("mov %%di, %%ax;\n\ + rdseed %%di;\n\ + mov %%ax, %%di;" : : : "%ax"); - __asm__ volatile ("mov %%si, %%ax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%si;" : "=r" (seed)); - __asm__ volatile ("mov %%ax, %%si;" : "=r" (seed)); + __asm__ volatile ("mov %%si, %%ax;\n\ + rdseed %%si;\n\ + mov %%ax, %%si;" : : : "%ax"); - __asm__ volatile ("mov %%bp, %%ax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%bp;" : "=r" (seed)); - __asm__ volatile ("mov %%ax, %%bp;" : "=r" (seed)); + __asm__ volatile ("mov %%bp, %%ax;\n\ + rdseed %%bp;\n\ + mov %%ax, %%bp;" : : : "%ax"); - __asm__ volatile ("mov %%sp, %%ax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%sp;" : "=r" (seed)); - __asm__ volatile ("mov %%ax, %%sp;" : "=r" (seed)); + __asm__ volatile ("mov %%sp, %%ax;\n\ + rdseed %%sp;\n\ + mov %%ax, %%sp;" : : : "%ax"); #ifdef __x86_64__ - __asm__ volatile ("rdseed %%r8w;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r9w;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r10w;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r11w;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r12w;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r13w;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r14w;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r15w;" : "=r" (seed)); + __asm__ volatile ("rdseed %%r8w;": : : "%r8"); + __asm__ volatile ("rdseed %%r9w;": : : "%r9"); + __asm__ volatile ("rdseed %%r10w;": : : "%r10"); + __asm__ volatile ("rdseed %%r11w;": : : "%r11"); + __asm__ volatile ("rdseed %%r12w;": : : "%r12"); + __asm__ volatile ("rdseed %%r13w;": : : "%r13"); + __asm__ volatile ("rdseed %%r14w;": : : "%r14"); + __asm__ volatile ("rdseed %%r15w;": : : "%r15"); #endif /* 32-bit random seeds. */ - __asm__ volatile ("rdseed %%eax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%ebx;" : "=r" (seed)); - __asm__ volatile ("rdseed %%ecx;" : "=r" (seed)); - __asm__ volatile ("rdseed %%edx;" : "=r" (seed)); + __asm__ volatile ("rdseed %%eax;": : : "%eax"); + __asm__ volatile ("rdseed %%ebx;": : : "%ebx"); + __asm__ volatile ("rdseed %%ecx;": : : "%ecx"); + __asm__ volatile ("rdseed %%edx;": : : "%edx"); #ifdef __x86_64__ - __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%edi;" : "=r" (seed)); - __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed)); - - __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%esi;" : "=r" (seed)); - __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed)); - - __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%ebp;" : "=r" (seed)); - __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed)); - - __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%esp;" : "=r" (seed)); - __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed)); - - __asm__ volatile ("rdseed %%r8d;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r9d;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r10d;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r11d;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r12d;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r13d;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r14d;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r15d;" : "=r" (seed)); + __asm__ volatile ("mov %%rdi, %%rax;\n\ + rdseed %%edi;\n\ + mov %%rax, %%rdi;" : : : "%rax"); + + __asm__ volatile ("mov %%rsi, %%rax;\n\ + rdseed %%esi;\n\ + mov %%rax, %%rsi;" : : : "%rax"); + + __asm__ volatile ("mov %%rbp, %%rax;\n\ + rdseed %%ebp;\n\ + mov %%rax, %%rbp;" : : : "%rax"); + + __asm__ volatile ("mov %%rsp, %%rax;\n\ + rdseed %%esp;\n\ + mov %%rax, %%rsp;" : : : "%rax"); + + __asm__ volatile ("rdseed %%r8d;": : : "%r8"); + __asm__ volatile ("rdseed %%r9d;": : : "%r9"); + __asm__ volatile ("rdseed %%r10d;": : : "%r10"); + __asm__ volatile ("rdseed %%r11d;": : : "%r11"); + __asm__ volatile ("rdseed %%r12d;": : : "%r12"); + __asm__ volatile ("rdseed %%r13d;": : : "%r13"); + __asm__ volatile ("rdseed %%r14d;": : : "%r14"); + __asm__ volatile ("rdseed %%r15d;": : : "%r15"); /* 64-bit random seeds. */ - __asm__ volatile ("rdseed %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%rbx;" : "=r" (seed)); - __asm__ volatile ("rdseed %%rcx;" : "=r" (seed)); - __asm__ volatile ("rdseed %%rdx;" : "=r" (seed)); - - __asm__ volatile ("mov %%rdi, %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%rdi;" : "=r" (seed)); - __asm__ volatile ("mov %%rax, %%rdi;" : "=r" (seed)); - - __asm__ volatile ("mov %%rsi, %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%rsi;" : "=r" (seed)); - __asm__ volatile ("mov %%rax, %%rsi;" : "=r" (seed)); - - __asm__ volatile ("mov %%rbp, %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%rbp;" : "=r" (seed)); - __asm__ volatile ("mov %%rax, %%rbp;" : "=r" (seed)); - - __asm__ volatile ("mov %%rsp, %%rax;" : "=r" (seed)); - __asm__ volatile ("rdseed %%rsp;" : "=r" (seed)); - __asm__ volatile ("mov %%rax, %%rsp;" : "=r" (seed)); - - __asm__ volatile ("rdseed %%r8;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r9;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r10;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r11;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r12;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r13;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r14;" : "=r" (seed)); - __asm__ volatile ("rdseed %%r15;" : "=r" (seed)); + __asm__ volatile ("rdseed %%rax;": : : "%rax"); + __asm__ volatile ("rdseed %%rbx;": : : "%rbx"); + __asm__ volatile ("rdseed %%rcx;": : : "%rcx"); + __asm__ volatile ("rdseed %%rdx;": : : "%rdx"); + + __asm__ volatile ("mov %%rdi, %%rax;\n\ + rdseed %%rdi;\n\ + mov %%rax, %%rdi;" : : : "%rax"); + + __asm__ volatile ("mov %%rsi, %%rax;\n\ + rdseed %%rsi;\n\ + mov %%rax, %%rsi;" : : : "%rax"); + + __asm__ volatile ("mov %%rbp, %%rax;\n\ + rdseed %%rbp;\n\ + mov %%rax, %%rbp;" : : : "%rax"); + + __asm__ volatile ("mov %%rsp, %%rax;\n\ + rdseed %%rsp;\n\ + mov %%rax, %%rsp;" : : : "%rax"); + + __asm__ volatile ("rdseed %%r8;": : : "%r8"); + __asm__ volatile ("rdseed %%r9;": : : "%r9"); + __asm__ volatile ("rdseed %%r10;": : : "%r10"); + __asm__ volatile ("rdseed %%r11;": : : "%r11"); + __asm__ volatile ("rdseed %%r12;": : : "%r12"); + __asm__ volatile ("rdseed %%r13;": : : "%r13"); + __asm__ volatile ("rdseed %%r14;": : : "%r14"); + __asm__ volatile ("rdseed %%r15;": : : "%r15"); #endif } diff --git a/gdb/testsuite/gdb.reverse/insn-reverse.exp b/gdb/testsuite/gdb.reverse/insn-reverse.exp index 1a575b2d43e..206e765dd50 100644 --- a/gdb/testsuite/gdb.reverse/insn-reverse.exp +++ b/gdb/testsuite/gdb.reverse/insn-reverse.exp @@ -95,7 +95,9 @@ proc test { func testcase_nr } { if { $prev_insn_addr == $insn_addr } { # Failed to make progress, might have run into SIGILL. - unsupported "no progress at: $expect_out(2,string)" + fail "no progress at: $expect_out(2,string)" + # Ignore the last instruction recorded + incr count -1 break } -- 2.41.0