From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pg1-x533.google.com (mail-pg1-x533.google.com [IPv6:2607:f8b0:4864:20::533]) by sourceware.org (Postfix) with ESMTPS id 5434F3858429 for ; Wed, 27 Oct 2021 12:49:10 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org 5434F3858429 Received: by mail-pg1-x533.google.com with SMTP id h193so2851021pgc.1 for ; Wed, 27 Oct 2021 05:49:10 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:mime-version:references:in-reply-to:from:date :message-id:subject:to:cc; bh=+IQV5Szr/+hHutO6+88KxKMpBGmz122mOg5k7ghA/t4=; b=o6YnAdSWRbw48hT9oNlq/KEQvRBwxDFRAc7CebGYF68NnAmuEe7mxWLmxf2uSMa4gV pqanxho0iE942QLFiiJ0+jf+AKZwmnavwp/NMVTPFHPZ9YbTWO4VLCwQ6g5aVY2x3liF x7FQcuefANMzoGFUEqyC0i7+Fzg6+B+pd9UqzR0VXTlcI0EiaZ2oKoAR+ZXugEIcnzJr HG5fSmMAEU0SBBve5X8fF654xz1djw51yEt0keEKkheJQlwZcm3hMMLelk3UMAUTIHa5 BqJuRM6AGxghv75oYXIomU4gn/4D69B+mOaVBynHpaXdbu8W0gluU3TTZfkFOmfb095S C9JQ== X-Gm-Message-State: AOAM533i2BaN+gDeWjrCEbyt/YqCK6VhDsAWdXqdIbS4823Mx1VtszwU q3Jat0fs5j6VbrxcPfBUsYLcVHQpDDdmer0k18SPyex80DU= X-Google-Smtp-Source: ABdhPJySnaoVCJVHoZZ0S+fH/c3etg76LfVsuEFhOsB0/qqLSdGu4cl0ll3BikFataOH7oaHscmb7OaP+9HGSBfuHwk= X-Received: by 2002:a05:6a00:2351:b0:47b:d092:d2e4 with SMTP id j17-20020a056a00235100b0047bd092d2e4mr29545109pfj.76.1635338949302; Wed, 27 Oct 2021 05:49:09 -0700 (PDT) MIME-Version: 1.0 References: <20211027024323.1199441-1-goldstein.w.n@gmail.com> <20211027024323.1199441-5-goldstein.w.n@gmail.com> In-Reply-To: <20211027024323.1199441-5-goldstein.w.n@gmail.com> From: "H.J. Lu" Date: Wed, 27 Oct 2021 05:48:29 -0700 Message-ID: Subject: Re: [PATCH v1 5/6] x86_64: Add avx2 optimized __memcmpeq in memcmpeq-avx2.S To: Noah Goldstein Cc: GNU C Library , "Carlos O'Donell" Content-Type: text/plain; charset="UTF-8" X-Spam-Status: No, score=-3029.5 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, KAM_SHORT, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.4 X-Spam-Checker-Version: SpamAssassin 3.4.4 (2020-01-24) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 27 Oct 2021 12:49:12 -0000 On Tue, Oct 26, 2021 at 7:43 PM Noah Goldstein wrote: > > No bug. This commit adds new optimized __memcmpeq implementation for > avx2. > > The primary optimizations are: > > 1) skipping the logic to find the difference of the first mismatched > byte. > > 2) not updating src/dst addresses as the non-equals logic does not > need to be reused by different areas. > --- > sysdeps/x86_64/multiarch/ifunc-impl-list.c | 2 - > sysdeps/x86_64/multiarch/ifunc-memcmpeq.h | 2 +- > sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S | 4 +- > sysdeps/x86_64/multiarch/memcmpeq-avx2.S | 309 ++++++++++++++++++- > 4 files changed, 308 insertions(+), 9 deletions(-) > > diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c > index f7f3806d1d..535450f52c 100644 > --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c > +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c > @@ -42,13 +42,11 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, > IFUNC_IMPL (i, name, __memcmpeq, > IFUNC_IMPL_ADD (array, i, __memcmpeq, > (CPU_FEATURE_USABLE (AVX2) > - && CPU_FEATURE_USABLE (MOVBE) > && CPU_FEATURE_USABLE (BMI2)), > __memcmpeq_avx2) > IFUNC_IMPL_ADD (array, i, __memcmpeq, > (CPU_FEATURE_USABLE (AVX2) > && CPU_FEATURE_USABLE (BMI2) > - && CPU_FEATURE_USABLE (MOVBE) > && CPU_FEATURE_USABLE (RTM)), > __memcmpeq_avx2_rtm) > IFUNC_IMPL_ADD (array, i, __memcmpeq, > diff --git a/sysdeps/x86_64/multiarch/ifunc-memcmpeq.h b/sysdeps/x86_64/multiarch/ifunc-memcmpeq.h > index 3319a9568a..e596c5048b 100644 > --- a/sysdeps/x86_64/multiarch/ifunc-memcmpeq.h > +++ b/sysdeps/x86_64/multiarch/ifunc-memcmpeq.h > @@ -31,10 +31,10 @@ IFUNC_SELECTOR (void) > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2) > && CPU_FEATURE_USABLE_P (cpu_features, BMI2) > - && CPU_FEATURE_USABLE_P (cpu_features, MOVBE) > && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load)) > { > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > + && CPU_FEATURE_USABLE_P (cpu_features, MOVBE) > && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > return OPTIMIZE1 (evex); > > diff --git a/sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S b/sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S > index 24b6a0c9ff..3264a4a76c 100644 > --- a/sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S > +++ b/sysdeps/x86_64/multiarch/memcmpeq-avx2-rtm.S > @@ -1,5 +1,5 @@ > -#ifndef MEMCMP > -# define MEMCMP __memcmpeq_avx2_rtm > +#ifndef MEMCMPEQ > +# define MEMCMPEQ __memcmpeq_avx2_rtm > #endif > > #define ZERO_UPPER_VEC_REGISTERS_RETURN \ > diff --git a/sysdeps/x86_64/multiarch/memcmpeq-avx2.S b/sysdeps/x86_64/multiarch/memcmpeq-avx2.S > index 0181ea0d8d..0bf59fb8fa 100644 > --- a/sysdeps/x86_64/multiarch/memcmpeq-avx2.S > +++ b/sysdeps/x86_64/multiarch/memcmpeq-avx2.S > @@ -16,8 +16,309 @@ > License along with the GNU C Library; if not, see > . */ > > -#ifndef MEMCMP > -# define MEMCMP __memcmpeq_avx2 > -#endif > +#if IS_IN (libc) > + > +/* __memcmpeq is implemented as: > + 1. Use ymm vector compares when possible. The only case where > + vector compares is not possible for when size < VEC_SIZE > + and loading from either s1 or s2 would cause a page cross. > + 2. Use xmm vector compare when size >= 8 bytes. > + 3. Optimistically compare up to first 4 * VEC_SIZE one at a > + to check for early mismatches. Only do this if its guranteed the > + work is not wasted. > + 4. If size is 8 * VEC_SIZE or less, unroll the loop. > + 5. Compare 4 * VEC_SIZE at a time with the aligned first memory > + area. > + 6. Use 2 vector compares when size is 2 * VEC_SIZE or less. > + 7. Use 4 vector compares when size is 4 * VEC_SIZE or less. > + 8. Use 8 vector compares when size is 8 * VEC_SIZE or less. */ > + > +# include > + > +# ifndef MEMCMPEQ > +# define MEMCMPEQ __memcmpeq_avx2 > +# endif > + > +# define VPCMPEQ vpcmpeqb > + > +# ifndef VZEROUPPER > +# define VZEROUPPER vzeroupper > +# endif > + > +# ifndef SECTION > +# define SECTION(p) p##.avx > +# endif > + > +# define VEC_SIZE 32 > +# define PAGE_SIZE 4096 > + > + .section SECTION(.text), "ax", @progbits > +ENTRY_P2ALIGN (MEMCMPEQ, 6) > +# ifdef __ILP32__ > + /* Clear the upper 32 bits. */ > + movl %edx, %edx > +# endif > + cmp $VEC_SIZE, %RDX_LP > + jb L(less_vec) > + > + /* From VEC to 2 * VEC. No branch when size == VEC_SIZE. */ > + vmovdqu (%rsi), %ymm1 > + VPCMPEQ (%rdi), %ymm1, %ymm1 > + vpmovmskb %ymm1, %eax > + incl %eax > + jnz L(return_neq0) > + cmpq $(VEC_SIZE * 2), %rdx > + jbe L(last_1x_vec) > + > + /* Check second VEC no matter what. */ > + vmovdqu VEC_SIZE(%rsi), %ymm2 > + VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2 > + vpmovmskb %ymm2, %eax > + /* If all 4 VEC where equal eax will be all 1s so incl will overflow > + and set zero flag. */ > + incl %eax > + jnz L(return_neq0) > + > + /* Less than 4 * VEC. */ > + cmpq $(VEC_SIZE * 4), %rdx > + jbe L(last_2x_vec) > + > + /* Check third and fourth VEC no matter what. */ > + vmovdqu (VEC_SIZE * 2)(%rsi), %ymm3 > + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3 > + vpmovmskb %ymm3, %eax > + incl %eax > + jnz L(return_neq0) > + > + vmovdqu (VEC_SIZE * 3)(%rsi), %ymm4 > + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4 > + vpmovmskb %ymm4, %eax > + incl %eax > + jnz L(return_neq0) > + > + /* Go to 4x VEC loop. */ > + cmpq $(VEC_SIZE * 8), %rdx > + ja L(more_8x_vec) > + > + /* Handle remainder of size = 4 * VEC + 1 to 8 * VEC without any > + branches. */ > + > + /* Adjust rsi and rdi to avoid indexed address mode. This end up > + saving a 16 bytes of code, prevents unlamination, and bottlenecks in > + the AGU. */ > + addq %rdx, %rsi > + vmovdqu -(VEC_SIZE * 4)(%rsi), %ymm1 > + vmovdqu -(VEC_SIZE * 3)(%rsi), %ymm2 > + addq %rdx, %rdi > + > + VPCMPEQ -(VEC_SIZE * 4)(%rdi), %ymm1, %ymm1 > + VPCMPEQ -(VEC_SIZE * 3)(%rdi), %ymm2, %ymm2 > + > + vmovdqu -(VEC_SIZE * 2)(%rsi), %ymm3 > + VPCMPEQ -(VEC_SIZE * 2)(%rdi), %ymm3, %ymm3 > + vmovdqu -VEC_SIZE(%rsi), %ymm4 > + VPCMPEQ -VEC_SIZE(%rdi), %ymm4, %ymm4 > + > + /* Reduce VEC0 - VEC4. */ > + vpand %ymm1, %ymm2, %ymm2 > + vpand %ymm3, %ymm4, %ymm4 > + vpand %ymm2, %ymm4, %ymm4 > + vpmovmskb %ymm4, %eax > + incl %eax > +L(return_neq0): > +L(return_vzeroupper): > + ZERO_UPPER_VEC_REGISTERS_RETURN > > -#include "memcmp-avx2-movbe.S" > + /* NB: p2align 5 here will ensure the L(loop_4x_vec) is also 32 byte > + aligned. */ > + .p2align 5 > +L(less_vec): > + /* Check if one or less char. This is necessary for size = 0 but is > + also faster for size = 1. */ > + cmpl $1, %edx > + jbe L(one_or_less) > + > + /* Check if loading one VEC from either s1 or s2 could cause a page > + cross. This can have false positives but is by far the fastest > + method. */ > + movl %edi, %eax > + orl %esi, %eax > + andl $(PAGE_SIZE - 1), %eax > + cmpl $(PAGE_SIZE - VEC_SIZE), %eax > + jg L(page_cross_less_vec) > + > + /* No page cross possible. */ > + vmovdqu (%rsi), %ymm2 > + VPCMPEQ (%rdi), %ymm2, %ymm2 > + vpmovmskb %ymm2, %eax > + incl %eax > + /* Result will be zero if s1 and s2 match. Otherwise first set bit > + will be first mismatch. */ > + bzhil %edx, %eax, %eax > + VZEROUPPER_RETURN > + > + /* Relatively cold but placing close to L(less_vec) for 2 byte jump > + encoding. */ > + .p2align 4 > +L(one_or_less): > + jb L(zero) > + movzbl (%rsi), %ecx > + movzbl (%rdi), %eax > + subl %ecx, %eax > + /* No ymm register was touched. */ > + ret > + /* Within the same 16 byte block is L(one_or_less). */ > +L(zero): > + xorl %eax, %eax > + ret > + > + .p2align 4 > +L(last_1x_vec): > + vmovdqu -(VEC_SIZE * 1)(%rsi, %rdx), %ymm1 > + VPCMPEQ -(VEC_SIZE * 1)(%rdi, %rdx), %ymm1, %ymm1 > + vpmovmskb %ymm1, %eax > + incl %eax > + VZEROUPPER_RETURN > + > + .p2align 4 > +L(last_2x_vec): > + vmovdqu -(VEC_SIZE * 2)(%rsi, %rdx), %ymm1 > + VPCMPEQ -(VEC_SIZE * 2)(%rdi, %rdx), %ymm1, %ymm1 > + vmovdqu -(VEC_SIZE * 1)(%rsi, %rdx), %ymm2 > + VPCMPEQ -(VEC_SIZE * 1)(%rdi, %rdx), %ymm2, %ymm2 > + vpand %ymm1, %ymm2, %ymm2 > + vpmovmskb %ymm2, %eax > + incl %eax > + VZEROUPPER_RETURN > + > + .p2align 4 > +L(more_8x_vec): > + /* Set end of s1 in rdx. */ > + leaq -(VEC_SIZE * 4)(%rdi, %rdx), %rdx > + /* rsi stores s2 - s1. This allows loop to only update one pointer. > + */ > + subq %rdi, %rsi > + /* Align s1 pointer. */ > + andq $-VEC_SIZE, %rdi > + /* Adjust because first 4x vec where check already. */ > + subq $-(VEC_SIZE * 4), %rdi > + .p2align 4 > +L(loop_4x_vec): > + /* rsi has s2 - s1 so get correct address by adding s1 (in rdi). */ > + vmovdqu (%rsi, %rdi), %ymm1 > + VPCMPEQ (%rdi), %ymm1, %ymm1 > + > + vmovdqu VEC_SIZE(%rsi, %rdi), %ymm2 > + VPCMPEQ VEC_SIZE(%rdi), %ymm2, %ymm2 > + > + vmovdqu (VEC_SIZE * 2)(%rsi, %rdi), %ymm3 > + VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm3, %ymm3 > + > + vmovdqu (VEC_SIZE * 3)(%rsi, %rdi), %ymm4 > + VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm4, %ymm4 > + > + vpand %ymm1, %ymm2, %ymm2 > + vpand %ymm3, %ymm4, %ymm4 > + vpand %ymm2, %ymm4, %ymm4 > + vpmovmskb %ymm4, %eax > + incl %eax > + jnz L(return_neq1) > + subq $-(VEC_SIZE * 4), %rdi > + /* Check if s1 pointer at end. */ > + cmpq %rdx, %rdi > + jb L(loop_4x_vec) > + > + vmovdqu (VEC_SIZE * 3)(%rsi, %rdx), %ymm4 > + VPCMPEQ (VEC_SIZE * 3)(%rdx), %ymm4, %ymm4 > + subq %rdx, %rdi > + /* rdi has 4 * VEC_SIZE - remaining length. */ > + cmpl $(VEC_SIZE * 3), %edi > + jae L(8x_last_1x_vec) > + /* Load regardless of branch. */ > + vmovdqu (VEC_SIZE * 2)(%rsi, %rdx), %ymm3 > + VPCMPEQ (VEC_SIZE * 2)(%rdx), %ymm3, %ymm3 > + cmpl $(VEC_SIZE * 2), %edi > + jae L(8x_last_2x_vec) > + /* Check last 4 VEC. */ > + vmovdqu VEC_SIZE(%rsi, %rdx), %ymm1 > + VPCMPEQ VEC_SIZE(%rdx), %ymm1, %ymm1 > + > + vmovdqu (%rsi, %rdx), %ymm2 > + VPCMPEQ (%rdx), %ymm2, %ymm2 > + > + vpand %ymm3, %ymm4, %ymm4 > + vpand %ymm1, %ymm2, %ymm3 > +L(8x_last_2x_vec): > + vpand %ymm3, %ymm4, %ymm4 > +L(8x_last_1x_vec): > + vpmovmskb %ymm4, %eax > + /* Restore s1 pointer to rdi. */ > + incl %eax > +L(return_neq1): > + VZEROUPPER_RETURN > + > + /* Relatively cold case as page cross are unexpected. */ > + .p2align 4 > +L(page_cross_less_vec): > + cmpl $16, %edx > + jae L(between_16_31) > + cmpl $8, %edx > + ja L(between_9_15) > + cmpl $4, %edx > + jb L(between_2_3) > + /* From 4 to 8 bytes. No branch when size == 4. */ > + movl (%rdi), %eax > + subl (%rsi), %eax > + movl -4(%rdi, %rdx), %ecx > + movl -4(%rsi, %rdx), %edi > + subl %edi, %ecx > + orl %ecx, %eax > + ret > + > + .p2align 4,, 8 > +L(between_16_31): > + /* From 16 to 31 bytes. No branch when size == 16. */ > + > + /* Safe to use xmm[0, 15] as no vzeroupper is needed so RTM safe. > + */ > + vmovdqu (%rsi), %xmm1 > + vpcmpeqb (%rdi), %xmm1, %xmm1 > + vmovdqu -16(%rsi, %rdx), %xmm2 > + vpcmpeqb -16(%rdi, %rdx), %xmm2, %xmm2 > + vpand %xmm1, %xmm2, %xmm2 > + vpmovmskb %xmm2, %eax > + notw %ax > + /* No ymm register was touched. */ > + ret > + > + .p2align 4,, 8 > +L(between_9_15): > + /* From 9 to 15 bytes. */ > + movq (%rdi), %rax > + subq (%rsi), %rax > + movq -8(%rdi, %rdx), %rcx > + movq -8(%rsi, %rdx), %rdi > + subq %rdi, %rcx > + orq %rcx, %rax > + /* edx is guranteed to be a non-zero int. */ > + cmovnz %edx, %eax > + ret > + > + /* Don't align. This is cold and aligning here will cause code > + to spill into next cache line. */ > +L(between_2_3): > + /* From 2 to 3 bytes. No branch when size == 2. */ > + movzwl (%rdi), %eax > + movzwl (%rsi), %ecx > + subl %ecx, %eax > + movzbl -1(%rdi, %rdx), %ecx > + /* All machines that support evex will insert a "merging uop" > + avoiding any serious partial register stalls. */ > + subb -1(%rsi, %rdx), %cl > + orl %ecx, %eax > + /* No ymm register was touched. */ > + ret > + > + /* 2 Bytes from next cache line. */ > +END (MEMCMPEQ) > +#endif > -- > 2.25.1 > LGTM. Reviewed-by: H.J. Lu Thanks. -- H.J.