public inbox for gcc-cvs@sourceware.org
help / color / mirror / Atom feed
From: H.J. Lu <hjl@gcc.gnu.org>
To: gcc-cvs@gcc.gnu.org
Subject: [gcc r12-5806] libsanitizer: Use SSE to save and restore XMM registers
Date: Mon,  6 Dec 2021 16:19:10 +0000 (GMT)	[thread overview]
Message-ID: <20211206161910.9ED903858C60@sourceware.org> (raw)

https://gcc.gnu.org/g:70b043845d7c378c6a9361a6769885897d1018c2

commit r12-5806-g70b043845d7c378c6a9361a6769885897d1018c2
Author: H.J. Lu <hjl.tools@gmail.com>
Date:   Tue Nov 30 05:31:26 2021 -0800

    libsanitizer: Use SSE to save and restore XMM registers
    
    Use SSE, instead of AVX, to save and restore XMM registers to support
    processors without AVX.  The affected codes are unused in upstream since
    
    https://github.com/llvm/llvm-project/commit/66d4ce7e26a5
    
    and will be removed in
    
    https://reviews.llvm.org/D112604
    
    This fixed
    
    FAIL: g++.dg/tsan/pthread_cond_clockwait.C   -O0  execution test
    FAIL: g++.dg/tsan/pthread_cond_clockwait.C   -O2  execution test
    
    on machines without AVX.
    
            PR sanitizer/103466
            * tsan/tsan_rtl_amd64.S (__tsan_trace_switch_thunk): Replace
            vmovdqu with movdqu.
            (__tsan_report_race_thunk): Likewise.

Diff:
---
 libsanitizer/tsan/tsan_rtl_amd64.S | 128 ++++++++++++++++++-------------------
 1 file changed, 64 insertions(+), 64 deletions(-)

diff --git a/libsanitizer/tsan/tsan_rtl_amd64.S b/libsanitizer/tsan/tsan_rtl_amd64.S
index 632b19d1815..c15b01e49e5 100644
--- a/libsanitizer/tsan/tsan_rtl_amd64.S
+++ b/libsanitizer/tsan/tsan_rtl_amd64.S
@@ -45,22 +45,22 @@ ASM_SYMBOL(__tsan_trace_switch_thunk):
   # All XMM registers are caller-saved.
   sub $0x100, %rsp
   CFI_ADJUST_CFA_OFFSET(0x100)
-  vmovdqu %xmm0, 0x0(%rsp)
-  vmovdqu %xmm1, 0x10(%rsp)
-  vmovdqu %xmm2, 0x20(%rsp)
-  vmovdqu %xmm3, 0x30(%rsp)
-  vmovdqu %xmm4, 0x40(%rsp)
-  vmovdqu %xmm5, 0x50(%rsp)
-  vmovdqu %xmm6, 0x60(%rsp)
-  vmovdqu %xmm7, 0x70(%rsp)
-  vmovdqu %xmm8, 0x80(%rsp)
-  vmovdqu %xmm9, 0x90(%rsp)
-  vmovdqu %xmm10, 0xa0(%rsp)
-  vmovdqu %xmm11, 0xb0(%rsp)
-  vmovdqu %xmm12, 0xc0(%rsp)
-  vmovdqu %xmm13, 0xd0(%rsp)
-  vmovdqu %xmm14, 0xe0(%rsp)
-  vmovdqu %xmm15, 0xf0(%rsp)
+  movdqu %xmm0, 0x0(%rsp)
+  movdqu %xmm1, 0x10(%rsp)
+  movdqu %xmm2, 0x20(%rsp)
+  movdqu %xmm3, 0x30(%rsp)
+  movdqu %xmm4, 0x40(%rsp)
+  movdqu %xmm5, 0x50(%rsp)
+  movdqu %xmm6, 0x60(%rsp)
+  movdqu %xmm7, 0x70(%rsp)
+  movdqu %xmm8, 0x80(%rsp)
+  movdqu %xmm9, 0x90(%rsp)
+  movdqu %xmm10, 0xa0(%rsp)
+  movdqu %xmm11, 0xb0(%rsp)
+  movdqu %xmm12, 0xc0(%rsp)
+  movdqu %xmm13, 0xd0(%rsp)
+  movdqu %xmm14, 0xe0(%rsp)
+  movdqu %xmm15, 0xf0(%rsp)
   # Align stack frame.
   push %rbx  # non-scratch
   CFI_ADJUST_CFA_OFFSET(8)
@@ -78,22 +78,22 @@ ASM_SYMBOL(__tsan_trace_switch_thunk):
   pop %rbx
   CFI_ADJUST_CFA_OFFSET(-8)
   # Restore scratch registers.
-  vmovdqu 0x0(%rsp), %xmm0
-  vmovdqu 0x10(%rsp), %xmm1
-  vmovdqu 0x20(%rsp), %xmm2
-  vmovdqu 0x30(%rsp), %xmm3
-  vmovdqu 0x40(%rsp), %xmm4
-  vmovdqu 0x50(%rsp), %xmm5
-  vmovdqu 0x60(%rsp), %xmm6
-  vmovdqu 0x70(%rsp), %xmm7
-  vmovdqu 0x80(%rsp), %xmm8
-  vmovdqu 0x90(%rsp), %xmm9
-  vmovdqu 0xa0(%rsp), %xmm10
-  vmovdqu 0xb0(%rsp), %xmm11
-  vmovdqu 0xc0(%rsp), %xmm12
-  vmovdqu 0xd0(%rsp), %xmm13
-  vmovdqu 0xe0(%rsp), %xmm14
-  vmovdqu 0xf0(%rsp), %xmm15
+  movdqu 0x0(%rsp), %xmm0
+  movdqu 0x10(%rsp), %xmm1
+  movdqu 0x20(%rsp), %xmm2
+  movdqu 0x30(%rsp), %xmm3
+  movdqu 0x40(%rsp), %xmm4
+  movdqu 0x50(%rsp), %xmm5
+  movdqu 0x60(%rsp), %xmm6
+  movdqu 0x70(%rsp), %xmm7
+  movdqu 0x80(%rsp), %xmm8
+  movdqu 0x90(%rsp), %xmm9
+  movdqu 0xa0(%rsp), %xmm10
+  movdqu 0xb0(%rsp), %xmm11
+  movdqu 0xc0(%rsp), %xmm12
+  movdqu 0xd0(%rsp), %xmm13
+  movdqu 0xe0(%rsp), %xmm14
+  movdqu 0xf0(%rsp), %xmm15
   add $0x100, %rsp
   CFI_ADJUST_CFA_OFFSET(-0x100)
   pop %r11
@@ -163,22 +163,22 @@ ASM_SYMBOL(__tsan_report_race_thunk):
   # All XMM registers are caller-saved.
   sub $0x100, %rsp
   CFI_ADJUST_CFA_OFFSET(0x100)
-  vmovdqu %xmm0, 0x0(%rsp)
-  vmovdqu %xmm1, 0x10(%rsp)
-  vmovdqu %xmm2, 0x20(%rsp)
-  vmovdqu %xmm3, 0x30(%rsp)
-  vmovdqu %xmm4, 0x40(%rsp)
-  vmovdqu %xmm5, 0x50(%rsp)
-  vmovdqu %xmm6, 0x60(%rsp)
-  vmovdqu %xmm7, 0x70(%rsp)
-  vmovdqu %xmm8, 0x80(%rsp)
-  vmovdqu %xmm9, 0x90(%rsp)
-  vmovdqu %xmm10, 0xa0(%rsp)
-  vmovdqu %xmm11, 0xb0(%rsp)
-  vmovdqu %xmm12, 0xc0(%rsp)
-  vmovdqu %xmm13, 0xd0(%rsp)
-  vmovdqu %xmm14, 0xe0(%rsp)
-  vmovdqu %xmm15, 0xf0(%rsp)
+  movdqu %xmm0, 0x0(%rsp)
+  movdqu %xmm1, 0x10(%rsp)
+  movdqu %xmm2, 0x20(%rsp)
+  movdqu %xmm3, 0x30(%rsp)
+  movdqu %xmm4, 0x40(%rsp)
+  movdqu %xmm5, 0x50(%rsp)
+  movdqu %xmm6, 0x60(%rsp)
+  movdqu %xmm7, 0x70(%rsp)
+  movdqu %xmm8, 0x80(%rsp)
+  movdqu %xmm9, 0x90(%rsp)
+  movdqu %xmm10, 0xa0(%rsp)
+  movdqu %xmm11, 0xb0(%rsp)
+  movdqu %xmm12, 0xc0(%rsp)
+  movdqu %xmm13, 0xd0(%rsp)
+  movdqu %xmm14, 0xe0(%rsp)
+  movdqu %xmm15, 0xf0(%rsp)
   # Align stack frame.
   push %rbx  # non-scratch
   CFI_ADJUST_CFA_OFFSET(8)
@@ -196,22 +196,22 @@ ASM_SYMBOL(__tsan_report_race_thunk):
   pop %rbx
   CFI_ADJUST_CFA_OFFSET(-8)
   # Restore scratch registers.
-  vmovdqu 0x0(%rsp), %xmm0
-  vmovdqu 0x10(%rsp), %xmm1
-  vmovdqu 0x20(%rsp), %xmm2
-  vmovdqu 0x30(%rsp), %xmm3
-  vmovdqu 0x40(%rsp), %xmm4
-  vmovdqu 0x50(%rsp), %xmm5
-  vmovdqu 0x60(%rsp), %xmm6
-  vmovdqu 0x70(%rsp), %xmm7
-  vmovdqu 0x80(%rsp), %xmm8
-  vmovdqu 0x90(%rsp), %xmm9
-  vmovdqu 0xa0(%rsp), %xmm10
-  vmovdqu 0xb0(%rsp), %xmm11
-  vmovdqu 0xc0(%rsp), %xmm12
-  vmovdqu 0xd0(%rsp), %xmm13
-  vmovdqu 0xe0(%rsp), %xmm14
-  vmovdqu 0xf0(%rsp), %xmm15
+  movdqu 0x0(%rsp), %xmm0
+  movdqu 0x10(%rsp), %xmm1
+  movdqu 0x20(%rsp), %xmm2
+  movdqu 0x30(%rsp), %xmm3
+  movdqu 0x40(%rsp), %xmm4
+  movdqu 0x50(%rsp), %xmm5
+  movdqu 0x60(%rsp), %xmm6
+  movdqu 0x70(%rsp), %xmm7
+  movdqu 0x80(%rsp), %xmm8
+  movdqu 0x90(%rsp), %xmm9
+  movdqu 0xa0(%rsp), %xmm10
+  movdqu 0xb0(%rsp), %xmm11
+  movdqu 0xc0(%rsp), %xmm12
+  movdqu 0xd0(%rsp), %xmm13
+  movdqu 0xe0(%rsp), %xmm14
+  movdqu 0xf0(%rsp), %xmm15
   add $0x100, %rsp
   CFI_ADJUST_CFA_OFFSET(-0x100)
   pop %r11


                 reply	other threads:[~2021-12-06 16:19 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211206161910.9ED903858C60@sourceware.org \
    --to=hjl@gcc.gnu.org \
    --cc=gcc-cvs@gcc.gnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).