From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-yw1-x1134.google.com (mail-yw1-x1134.google.com [IPv6:2607:f8b0:4864:20::1134]) by sourceware.org (Postfix) with ESMTPS id DE7BF385626E for ; Fri, 22 Apr 2022 01:53:37 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org DE7BF385626E Received: by mail-yw1-x1134.google.com with SMTP id 00721157ae682-2f16645872fso71096767b3.4 for ; Thu, 21 Apr 2022 18:53:37 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:mime-version:references:in-reply-to:from:date :message-id:subject:to:cc; bh=YCJTQhGQ8X6KjhlFKrVtpFkYNMZ63o14J/Bth4PNkW8=; b=iH3DK0IkheHDWtR+7k3sOaWvaLqcsrMon7496g5hFrDrdoZJUbJqwtG2d/PSgk7B9T Z9qESYrzofugXXiCwujoji5fnYFua0vUyzZ485yMc5pWlcAXOEUwpLOI9Z6d06NOodr6 UElHaIilQjzYLkVqEdVjeLTyV1uhDqUUWJV99bMCzwOLXW3ib3S+yx/pH/mw7H1NuLBb bsIYQ5Idup6L1Y98Dor4Y4fs4baSyF+j2jL2mySkXQe5IWqtGM6JHWa/6U6eyWDrznrl /f5t3LJMR2jxDZwNxfGCA4xQhMsVWcXGZUZiJL5iEsVpa1dTSdCVvtGnwdx9yvS9am52 7gMg== X-Gm-Message-State: AOAM533J1dbNhLfDC5ObPeDEkyruYho9uvNRzfwUAF2JML1FlKoLdRx3 rZAULDPPWJSrpcDjqH0b1sjvHSRJfdFV0v/sRU/AuS+1 X-Google-Smtp-Source: ABdhPJzs+sMuBC1Re3XBuwcqEyqb0c2fqSrjPJgyBxs0TqhWjOVx+kFUGItuSY7H1xhiZapF7yf/XNu4s2xBWixDZuM= X-Received: by 2002:a81:1f03:0:b0:2f4:c6c4:9177 with SMTP id f3-20020a811f03000000b002f4c6c49177mr2708645ywf.427.1650592417199; Thu, 21 Apr 2022 18:53:37 -0700 (PDT) MIME-Version: 1.0 References: <20220421031410.2142238-1-goldstein.w.n@gmail.com> <20220421222204.3093130-1-goldstein.w.n@gmail.com> <20220421222204.3093130-4-goldstein.w.n@gmail.com> In-Reply-To: From: Noah Goldstein Date: Thu, 21 Apr 2022 20:53:26 -0500 Message-ID: Subject: Re: [PATCH v2 4/4] x86: Optimize {str|wcs}rchr-evex To: "H.J. Lu" Cc: GNU C Library , "Carlos O'Donell" Content-Type: text/plain; charset="UTF-8" X-Spam-Status: No, score=-9.1 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.4 X-Spam-Checker-Version: SpamAssassin 3.4.4 (2020-01-24) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 22 Apr 2022 01:53:40 -0000 On Thu, Apr 21, 2022 at 7:00 PM H.J. Lu wrote: > > On Thu, Apr 21, 2022 at 3:22 PM Noah Goldstein wrote: > > > > The new code unrolls the main loop slightly without adding too much > > overhead and minimizes the comparisons for the search CHAR. > > > > Geometric Mean of all benchmarks New / Old: 0.755 > > See email for all results. > > > > Full xcheck passes on x86_64 with and without multiarch enabled. > > --- > > sysdeps/x86_64/multiarch/strrchr-evex.S | 441 ++++++++++++++---------- > > 1 file changed, 259 insertions(+), 182 deletions(-) > > > > diff --git a/sysdeps/x86_64/multiarch/strrchr-evex.S b/sysdeps/x86_64/multiarch/strrchr-evex.S > > index adeddaed32..5cf9a8315b 100644 > > --- a/sysdeps/x86_64/multiarch/strrchr-evex.S > > +++ b/sysdeps/x86_64/multiarch/strrchr-evex.S > > @@ -24,242 +24,319 @@ > > # define STRRCHR __strrchr_evex > > # endif > > > > -# define VMOVU vmovdqu64 > > -# define VMOVA vmovdqa64 > > +# define VMOVU vmovdqu64 > > +# define VMOVA vmovdqa64 > > > > # ifdef USE_AS_WCSRCHR > > +# define SHIFT_REG esi > > + > > +# define kunpck kunpckbw > > +# define kmov_2x kmovd > > +# define maskz_2x ecx > > +# define maskm_2x eax > > +# define CHAR_SIZE 4 > > +# define VPMIN vpminud > > +# define VPTESTN vptestnmd > > # define VPBROADCAST vpbroadcastd > > -# define VPCMP vpcmpd > > -# define SHIFT_REG r8d > > +# define VPCMP vpcmpd > > # else > > +# define SHIFT_REG edi > > + > > +# define kunpck kunpckdq > > +# define kmov_2x kmovq > > +# define maskz_2x rcx > > +# define maskm_2x rax > > + > > +# define CHAR_SIZE 1 > > +# define VPMIN vpminub > > +# define VPTESTN vptestnmb > > # define VPBROADCAST vpbroadcastb > > -# define VPCMP vpcmpb > > -# define SHIFT_REG ecx > > +# define VPCMP vpcmpb > > # endif > > > > # define XMMZERO xmm16 > > # define YMMZERO ymm16 > > # define YMMMATCH ymm17 > > -# define YMM1 ymm18 > > +# define YMMSAVE ymm18 > > + > > +# define YMM1 ymm19 > > +# define YMM2 ymm20 > > +# define YMM3 ymm21 > > +# define YMM4 ymm22 > > +# define YMM5 ymm23 > > +# define YMM6 ymm24 > > +# define YMM7 ymm25 > > +# define YMM8 ymm26 > > > > -# define VEC_SIZE 32 > > > > - .section .text.evex,"ax",@progbits > > -ENTRY (STRRCHR) > > - movl %edi, %ecx > > +# define VEC_SIZE 32 > > +# define PAGE_SIZE 4096 > > + .section .text.evex, "ax", @progbits > > +ENTRY(STRRCHR) > > + movl %edi, %eax > > /* Broadcast CHAR to YMMMATCH. */ > > VPBROADCAST %esi, %YMMMATCH > > > > - vpxorq %XMMZERO, %XMMZERO, %XMMZERO > > - > > - /* Check if we may cross page boundary with one vector load. */ > > - andl $(2 * VEC_SIZE - 1), %ecx > > - cmpl $VEC_SIZE, %ecx > > - ja L(cros_page_boundary) > > + andl $(PAGE_SIZE - 1), %eax > > + cmpl $(PAGE_SIZE - VEC_SIZE), %eax > > + jg L(cross_page_boundary) > > > > +L(page_cross_continue): > > VMOVU (%rdi), %YMM1 > > - > > - /* Each bit in K0 represents a null byte in YMM1. */ > > - VPCMP $0, %YMMZERO, %YMM1, %k0 > > - /* Each bit in K1 represents a CHAR in YMM1. */ > > - VPCMP $0, %YMMMATCH, %YMM1, %k1 > > + VPTESTN %YMM1, %YMM1, %k0 > > Please add some comments for mask register on VPTESTN tests. Added in V3. > > > kmovd %k0, %ecx > > - kmovd %k1, %eax > > - > > - addq $VEC_SIZE, %rdi > > - > > - testl %eax, %eax > > - jnz L(first_vec) > > - > > testl %ecx, %ecx > > - jnz L(return_null) > > - > > - andq $-VEC_SIZE, %rdi > > - xorl %edx, %edx > > - jmp L(aligned_loop) > > - > > - .p2align 4 > > -L(first_vec): > > - /* Check if there is a null byte. */ > > - testl %ecx, %ecx > > - jnz L(char_and_nul_in_first_vec) > > - > > - /* Remember the match and keep searching. */ > > - movl %eax, %edx > > - movq %rdi, %rsi > > - andq $-VEC_SIZE, %rdi > > - jmp L(aligned_loop) > > - > > - .p2align 4 > > -L(cros_page_boundary): > > - andl $(VEC_SIZE - 1), %ecx > > - andq $-VEC_SIZE, %rdi > > - > > + jz L(aligned_more) > > + VPCMP $0, %YMMMATCH, %YMM1, %k1 > > Please add some comments. Added in V3. > > > + kmovd %k1, %eax > > + blsmskl %ecx, %ecx > > + andl %ecx, %eax > > + jz L(ret0) > > + bsrl %eax, %eax > > # ifdef USE_AS_WCSRCHR > > - /* NB: Divide shift count by 4 since each bit in K1 represent 4 > > - bytes. */ > > - movl %ecx, %SHIFT_REG > > - sarl $2, %SHIFT_REG > > + leaq (%rdi, %rax, CHAR_SIZE), %rax > > +# else > > + addq %rdi, %rax > > # endif > > +L(ret0): > > + ret > > > > - VMOVA (%rdi), %YMM1 > > - > > - /* Each bit in K0 represents a null byte in YMM1. */ > > - VPCMP $0, %YMMZERO, %YMM1, %k0 > > - /* Each bit in K1 represents a CHAR in YMM1. */ > > + /* Returns for first vec x1/x2/x3 have hard coded backward > > + search path for earlier matches. */ > > + .p2align 4,, 6 > > +L(first_vec_x1): > > + VPCMP $0, %YMMMATCH, %YMM2, %k1 > > + kmovd %k1, %eax > > + blsmskl %ecx, %ecx > > + andl %ecx, %eax > > + jnz L(first_vec_x1_return) > > + .p2align 4,, 4 > > +L(first_vec_x0_test): > > VPCMP $0, %YMMMATCH, %YMM1, %k1 > > - kmovd %k0, %edx > > kmovd %k1, %eax > > - > > - shrxl %SHIFT_REG, %edx, %edx > > - shrxl %SHIFT_REG, %eax, %eax > > - addq $VEC_SIZE, %rdi > > - > > - /* Check if there is a CHAR. */ > > testl %eax, %eax > > - jnz L(found_char) > > - > > - testl %edx, %edx > > - jnz L(return_null) > > - > > - jmp L(aligned_loop) > > - > > - .p2align 4 > > -L(found_char): > > - testl %edx, %edx > > - jnz L(char_and_nul) > > - > > - /* Remember the match and keep searching. */ > > - movl %eax, %edx > > - leaq (%rdi, %rcx), %rsi > > + jz L(ret1) > > + bsrl %eax, %eax > > +# ifdef USE_AS_WCSRCHR > > + leaq (%rsi, %rax, CHAR_SIZE), %rax > > +# else > > + addq %rsi, %rax > > +# endif > > +L(ret1): > > + ret > > > > - .p2align 4 > > -L(aligned_loop): > > - VMOVA (%rdi), %YMM1 > > - addq $VEC_SIZE, %rdi > > + .p2align 4,, 10 > > +L(first_vec_x1_or_x2): > > + VPCMP $0, %YMM3, %YMMMATCH, %k3 > > + VPCMP $0, %YMM2, %YMMMATCH, %k2 > > + kortestd %k2, %k3 > > + jz L(first_vec_x0_test) > > + > > + kunpck %k2, %k3, %k3 > > + kmovq %k3, %rax > > + bsrq %rax, %rax > > + leaq (VEC_SIZE)(%r8, %rax, CHAR_SIZE), %rax > > + ret > > > > - /* Each bit in K0 represents a null byte in YMM1. */ > > - VPCMP $0, %YMMZERO, %YMM1, %k0 > > - /* Each bit in K1 represents a CHAR in YMM1. */ > > - VPCMP $0, %YMMMATCH, %YMM1, %k1 > > - kmovd %k0, %ecx > > + .p2align 4,, 6 > > +L(first_vec_x3): > > + VPCMP $0, %YMMMATCH, %YMM4, %k1 > > kmovd %k1, %eax > > - orl %eax, %ecx > > - jnz L(char_nor_null) > > + blsmskl %ecx, %ecx > > + andl %ecx, %eax > > + jz L(first_vec_x1_or_x2) > > + bsrl %eax, %eax > > + leaq (VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax > > + ret > > > > - VMOVA (%rdi), %YMM1 > > - add $VEC_SIZE, %rdi > > + .p2align 4,, 6 > > +L(first_vec_x0_x1_test): > > + VPCMP $0, %YMMMATCH, %YMM2, %k1 > > + kmovd %k1, %eax > > + testl %eax, %eax > > + jz L(first_vec_x0_test) > > + .p2align 4,, 4 > > +L(first_vec_x1_return): > > + bsrl %eax, %eax > > + leaq (VEC_SIZE)(%rdi, %rax, CHAR_SIZE), %rax > > + ret > > > > - /* Each bit in K0 represents a null byte in YMM1. */ > > - VPCMP $0, %YMMZERO, %YMM1, %k0 > > - /* Each bit in K1 represents a CHAR in YMM1. */ > > - VPCMP $0, %YMMMATCH, %YMM1, %k1 > > - kmovd %k0, %ecx > > + .p2align 4,, 10 > > +L(first_vec_x2): > > + VPCMP $0, %YMMMATCH, %YMM3, %k1 > > kmovd %k1, %eax > > - orl %eax, %ecx > > - jnz L(char_nor_null) > > + blsmskl %ecx, %ecx > > + andl %ecx, %eax > > + jz L(first_vec_x0_x1_test) > > + bsrl %eax, %eax > > + leaq (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax > > + ret > > > > - VMOVA (%rdi), %YMM1 > > - addq $VEC_SIZE, %rdi > > > > - /* Each bit in K0 represents a null byte in YMM1. */ > > - VPCMP $0, %YMMZERO, %YMM1, %k0 > > - /* Each bit in K1 represents a CHAR in YMM1. */ > > - VPCMP $0, %YMMMATCH, %YMM1, %k1 > > + .p2align 4 > > +L(aligned_more): > > + /* Need to keep original pointer incase YMM1 has last match. */ > > + movq %rdi, %rsi > > + andq $-VEC_SIZE, %rdi > > + VMOVU VEC_SIZE(%rdi), %YMM2 > > + VPTESTN %YMM2, %YMM2, %k0 > > kmovd %k0, %ecx > > - kmovd %k1, %eax > > - orl %eax, %ecx > > - jnz L(char_nor_null) > > + testl %ecx, %ecx > > + jnz L(first_vec_x1) > > > > - VMOVA (%rdi), %YMM1 > > - addq $VEC_SIZE, %rdi > > + VMOVU (VEC_SIZE * 2)(%rdi), %YMM3 > > + VPTESTN %YMM3, %YMM3, %k0 > > + kmovd %k0, %ecx > > + testl %ecx, %ecx > > + jnz L(first_vec_x2) > > > > - /* Each bit in K0 represents a null byte in YMM1. */ > > - VPCMP $0, %YMMZERO, %YMM1, %k0 > > - /* Each bit in K1 represents a CHAR in YMM1. */ > > - VPCMP $0, %YMMMATCH, %YMM1, %k1 > > + VMOVU (VEC_SIZE * 3)(%rdi), %YMM4 > > + VPTESTN %YMM4, %YMM4, %k0 > > kmovd %k0, %ecx > > - kmovd %k1, %eax > > - orl %eax, %ecx > > - jz L(aligned_loop) > > + movq %rdi, %r8 > > + testl %ecx, %ecx > > + jnz L(first_vec_x3) > > > > + andq $-(VEC_SIZE * 2), %rdi > > .p2align 4 > > -L(char_nor_null): > > - /* Find a CHAR or a null byte in a loop. */ > > +L(first_aligned_loop): > > + /* Preserve YMM1, YMM2, YMM3, and YMM4 until we can gurantee > > + they don't store a match. */ > > + VMOVA (VEC_SIZE * 4)(%rdi), %YMM5 > > + VMOVA (VEC_SIZE * 5)(%rdi), %YMM6 > > + > > + VPCMP $0, %YMM5, %YMMMATCH, %k2 > > + vpxord %YMM6, %YMMMATCH, %YMM7 > > + > > + VPMIN %YMM5, %YMM6, %YMM8 > > + VPMIN %YMM8, %YMM7, %YMM7 > > + > > + VPTESTN %YMM7, %YMM7, %k1 > > + subq $(VEC_SIZE * -2), %rdi > > + kortestd %k1, %k2 > > + jz L(first_aligned_loop) > > + > > + VPCMP $0, %YMM6, %YMMMATCH, %k3 > > + VPTESTN %YMM8, %YMM8, %k1 > > + ktestd %k1, %k1 > > + jz L(second_aligned_loop_prep) > > + > > + kortestd %k2, %k3 > > + jnz L(return_first_aligned_loop) > > + > > + .p2align 4,, 6 > > +L(first_vec_x1_or_x2_or_x3): > > + VPCMP $0, %YMM4, %YMMMATCH, %k4 > > + kmovd %k4, %eax > > testl %eax, %eax > > - jnz L(match) > > -L(return_value): > > - testl %edx, %edx > > - jz L(return_null) > > - movl %edx, %eax > > - movq %rsi, %rdi > > + jz L(first_vec_x1_or_x2) > > bsrl %eax, %eax > > -# ifdef USE_AS_WCSRCHR > > - /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ > > - leaq -VEC_SIZE(%rdi, %rax, 4), %rax > > -# else > > - leaq -VEC_SIZE(%rdi, %rax), %rax > > -# endif > > + leaq (VEC_SIZE * 3)(%r8, %rax, CHAR_SIZE), %rax > > ret > > > > - .p2align 4 > > -L(match): > > - /* Find a CHAR. Check if there is a null byte. */ > > - kmovd %k0, %ecx > > - testl %ecx, %ecx > > - jnz L(find_nul) > > + .p2align 4,, 8 > > +L(return_first_aligned_loop): > > + VPTESTN %YMM5, %YMM5, %k0 > > + kunpck %k0, %k1, %k0 > > + kmov_2x %k0, %maskz_2x > > + > > + blsmsk %maskz_2x, %maskz_2x > > + kunpck %k2, %k3, %k3 > > + kmov_2x %k3, %maskm_2x > > + and %maskz_2x, %maskm_2x > > + jz L(first_vec_x1_or_x2_or_x3) > > + > > + bsr %maskm_2x, %maskm_2x > > + leaq (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax > > + ret > > > > - /* Remember the match and keep searching. */ > > - movl %eax, %edx > > + .p2align 4 > > + /* We can throw away the work done for the first 4x checks here > > + as we have a later match. This is the 'fast' path persay. > > + */ > > +L(second_aligned_loop_prep): > > +L(second_aligned_loop_set_furthest_match): > > movq %rdi, %rsi > > - jmp L(aligned_loop) > > + kunpck %k2, %k3, %k4 > > > > .p2align 4 > > -L(find_nul): > > - /* Mask out any matching bits after the null byte. */ > > - movl %ecx, %r8d > > - subl $1, %r8d > > - xorl %ecx, %r8d > > - andl %r8d, %eax > > - testl %eax, %eax > > - /* If there is no CHAR here, return the remembered one. */ > > - jz L(return_value) > > - bsrl %eax, %eax > > +L(second_aligned_loop): > > + VMOVU (VEC_SIZE * 4)(%rdi), %YMM1 > > + VMOVU (VEC_SIZE * 5)(%rdi), %YMM2 > > + > > + VPCMP $0, %YMM1, %YMMMATCH, %k2 > > + vpxord %YMM2, %YMMMATCH, %YMM3 > > + > > + VPMIN %YMM1, %YMM2, %YMM4 > > + VPMIN %YMM3, %YMM4, %YMM3 > > + > > + VPTESTN %YMM3, %YMM3, %k1 > > + subq $(VEC_SIZE * -2), %rdi > > + kortestd %k1, %k2 > > + jz L(second_aligned_loop) > > + > > + VPCMP $0, %YMM2, %YMMMATCH, %k3 > > + VPTESTN %YMM4, %YMM4, %k1 > > + ktestd %k1, %k1 > > + jz L(second_aligned_loop_set_furthest_match) > > + > > + kortestd %k2, %k3 > > + /* branch here because there is a significant advantage interms > > + of output dependency chance in using edx. */ > > + jnz L(return_new_match) > > +L(return_old_match): > > + kmovq %k4, %rax > > + bsrq %rax, %rax > > + leaq (VEC_SIZE * 2)(%rsi, %rax, CHAR_SIZE), %rax > > + ret > > + > > +L(return_new_match): > > + VPTESTN %YMM1, %YMM1, %k0 > > + kunpck %k0, %k1, %k0 > > + kmov_2x %k0, %maskz_2x > > + > > + blsmsk %maskz_2x, %maskz_2x > > + kunpck %k2, %k3, %k3 > > + kmov_2x %k3, %maskm_2x > > + and %maskz_2x, %maskm_2x > > + jz L(return_old_match) > > + > > + bsr %maskm_2x, %maskm_2x > > + leaq (VEC_SIZE * 2)(%rdi, %rax, CHAR_SIZE), %rax > > + ret > > + > > + /* This block is horribly aligned (% 16 == 15). This is > > + intentional. The L(cross_page_boundary) block is exactly > > + 32-bytes of code size. Ultimately this is a cold case so > > + save the code size by leaving misaligned. */ > > +L(cross_page_boundary): > > + xorq %rdi, %rax > > + VMOVU (PAGE_SIZE - VEC_SIZE)(%rax), %YMM1 > > + VPTESTN %YMM1, %YMM1, %k0 > > + kmovd %k0, %ecx > > # ifdef USE_AS_WCSRCHR > > - /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ > > - leaq -VEC_SIZE(%rdi, %rax, 4), %rax > > -# else > > - leaq -VEC_SIZE(%rdi, %rax), %rax > > + movl %edi, %esi > > + andl $(VEC_SIZE - 1), %esi > > + shrl $2, %esi > > # endif > > - ret > > + shrxl %SHIFT_REG, %ecx, %ecx > > > > - .p2align 4 > > -L(char_and_nul): > > - /* Find both a CHAR and a null byte. */ > > - addq %rcx, %rdi > > - movl %edx, %ecx > > -L(char_and_nul_in_first_vec): > > - /* Mask out any matching bits after the null byte. */ > > - movl %ecx, %r8d > > - subl $1, %r8d > > - xorl %ecx, %r8d > > - andl %r8d, %eax > > - testl %eax, %eax > > - /* Return null pointer if the null byte comes first. */ > > - jz L(return_null) > > + testl %ecx, %ecx > > + jz L(page_cross_continue) > > + VPCMP $0, %YMMMATCH, %YMM1, %k1 > > + kmovd %k1, %eax > > + shrxl %SHIFT_REG, %eax, %eax > > + blsmskl %ecx, %ecx > > + andl %ecx, %eax > > + jz L(ret3) > > bsrl %eax, %eax > > # ifdef USE_AS_WCSRCHR > > - /* NB: Multiply wchar_t count by 4 to get the number of bytes. */ > > - leaq -VEC_SIZE(%rdi, %rax, 4), %rax > > + leaq (%rdi, %rax, CHAR_SIZE), %rax > > # else > > - leaq -VEC_SIZE(%rdi, %rax), %rax > > + addq %rdi, %rax > > # endif > > +L(ret3): > > ret > > > > - .p2align 4 > > -L(return_null): > > - xorl %eax, %eax > > - ret > > - > > -END (STRRCHR) > > +END(STRRCHR) > > #endif > > -- > > 2.25.1 > > > > > -- > H.J.