From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 7844) id 868CE3854171; Mon, 3 Oct 2022 21:14:13 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 868CE3854171 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1664831653; bh=4D1fboiz8Gd+cIXVPVJv09PADqO3cM4rW6oASgSMzzc=; h=From:To:Subject:Date:From; b=eUr4oED+O53JhQDPObQDeWhIFBJVowEyxoAFPctwJ2bOBU1wQ7nYobU9DammAVgBQ c10baTmlJ+5SmruqVka39zvTzsbbau40Fq6/aLGGDp6qPxsrGDgRTmCEuGcEhRgKGv mSfiwdH8uY4aZpSMDYYE97ryFewSc2IzGA/TwY/8= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Noah Goldstein To: glibc-cvs@sourceware.org Subject: [glibc] x86: Cleanup pthread_spin_{try}lock.S X-Act-Checkin: glibc X-Git-Author: Noah Goldstein X-Git-Refname: refs/heads/master X-Git-Oldrev: 10c779f44ab3e9525f2d2a3c9a0aa9dedea5f1ec X-Git-Newrev: 653c12c7d880340462bd963752619a7a61bcb4e3 Message-Id: <20221003211413.868CE3854171@sourceware.org> Date: Mon, 3 Oct 2022 21:14:13 +0000 (GMT) List-Id: https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=653c12c7d880340462bd963752619a7a61bcb4e3 commit 653c12c7d880340462bd963752619a7a61bcb4e3 Author: Noah Goldstein Date: Fri Sep 30 21:13:27 2022 -0700 x86: Cleanup pthread_spin_{try}lock.S Save a jmp on the lock path coming from an initial failure in pthread_spin_lock.S. This costs 4-bytes of code but since the function still fits in the same number of 16-byte blocks (default function alignment) it does not have affect on the total binary size of libc.so (unchanged after this commit). pthread_spin_trylock was using a CAS when a simple xchg works which is often more expensive. Full check passes on x86-64. Diff: --- sysdeps/x86_64/nptl/pthread_spin_lock.S | 23 ++++++++++++++++------- sysdeps/x86_64/nptl/pthread_spin_trylock.S | 18 +++++++++++++----- 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/sysdeps/x86_64/nptl/pthread_spin_lock.S b/sysdeps/x86_64/nptl/pthread_spin_lock.S index 44b837d9db..1e09e59b10 100644 --- a/sysdeps/x86_64/nptl/pthread_spin_lock.S +++ b/sysdeps/x86_64/nptl/pthread_spin_lock.S @@ -19,18 +19,27 @@ #include ENTRY(__pthread_spin_lock) -1: LOCK - decl 0(%rdi) - jne 2f + /* Always return zero. */ xor %eax, %eax + LOCK + decl 0(%rdi) + jne 1f ret .align 16 -2: rep +1: + /* `rep nop` == `pause`. */ + rep nop - cmpl $0, 0(%rdi) - jg 1b - jmp 2b + cmpl %eax, 0(%rdi) + jle 1b + /* Just repeat the `lock decl` logic here. The code size save + of jumping back to entry doesn't change how many 16-byte + chunks (default function alignment) that the code fits in. */ + LOCK + decl 0(%rdi) + jne 1b + ret END(__pthread_spin_lock) versioned_symbol (libc, __pthread_spin_lock, pthread_spin_lock, GLIBC_2_34) diff --git a/sysdeps/x86_64/nptl/pthread_spin_trylock.S b/sysdeps/x86_64/nptl/pthread_spin_trylock.S index fffdb27dd9..a1f97cb420 100644 --- a/sysdeps/x86_64/nptl/pthread_spin_trylock.S +++ b/sysdeps/x86_64/nptl/pthread_spin_trylock.S @@ -20,13 +20,21 @@ #include ENTRY(__pthread_spin_trylock) - movl $1, %eax xorl %ecx, %ecx - lock - cmpxchgl %ecx, (%rdi) + /* xchg has implicit LOCK prefix. */ + xchgl %ecx, (%rdi) + + /* Branch on result. Expectation is the use of trylock will be + branching on success/failure so this branch can be used to + to predict the coming branch. It has the benefit of + breaking the likely expensive memory dependency on (%rdi). */ + cmpl $1, %ecx + jnz 1f + xorl %eax, %eax + ret +1: movl $EBUSY, %eax - cmovel %ecx, %eax - retq + ret END(__pthread_spin_trylock) versioned_symbol (libc, __pthread_spin_trylock, pthread_spin_trylock, GLIBC_2_34)