From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-oa1-x2a.google.com (mail-oa1-x2a.google.com [IPv6:2001:4860:4864:20::2a]) by sourceware.org (Postfix) with ESMTPS id 70B20385740C for ; Sat, 15 Oct 2022 03:45:33 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org 70B20385740C Authentication-Results: sourceware.org; dmarc=pass (p=none dis=none) header.from=gmail.com Authentication-Results: sourceware.org; spf=pass smtp.mailfrom=gmail.com Received: by mail-oa1-x2a.google.com with SMTP id 586e51a60fabf-12c8312131fso8036253fac.4 for ; Fri, 14 Oct 2022 20:45:33 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=cc:to:subject:message-id:date:from:in-reply-to:references :mime-version:from:to:cc:subject:date:message-id:reply-to; bh=oVUJbmJy1bwYhkx7I2szyQMQqNKJTG+hajczBEZ1610=; b=K/0zlz6tYzhUkSF61EdKhPdxal9EsjsWTas7nS8xr/V/JvLVMiBs7ZIEwtuFQSAzRV tNTpPbWlTCvxHHyEL5JvjpfYG64lrdDA9w/x1x9IKC9+uxSyGE+1BWCCr2wKdzzF7EZw lQVo/jiUK0jRKoAgBcZpufWxqn+gApNEult1TZvrnUXXOQD8E9K5kj8EsBNKF1EW3uxr fcmqyWXqlub7mGYfyZZbw1mUovzEwVT54jcVG8Jx9B/EEWUI+ydZPD8Ri2jllrF2Y7bE 43aNyprkbUzNoGW5XMtiCtV5EHoB3YPZhyKJsBCm8u95g2H7PskgFT1T8v75O488q9m+ OdeQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=cc:to:subject:message-id:date:from:in-reply-to:references :mime-version:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=oVUJbmJy1bwYhkx7I2szyQMQqNKJTG+hajczBEZ1610=; b=5ILVMxDDL1YbNCZTCIulkwl5Lc25UfasZNAI3OmLYvtoQIrZL3jgo1rIPzM9nvLdr8 WPdpFUDFoxij4fWxcaEB7eucGBr6aWq74QJ2svZaniU80m08VLcjtkEWpV89RXwWIylx pVlsGILY06w5TzeFwN78Op9GMCgyLu4cBZBFpkG/5agk87R2+1OB8GkF/vffhwduIMeL EvGZZLVag712T5STT1UFYbXKCRROwGjOAXS6ddRB5wnz7eEAMvctIyXRtzSUY2Jhbt7P c4vuVbDv0GEe6yHawTPbScseosi14tXmqNK1TkDHjWD91bYBRBcdbcagAaGl1+s/3V/E 2l1Q== X-Gm-Message-State: ACrzQf2Nvg912aI9/Qk6uWoNa6fr/SVwwDFXrwxQnRmOsknu5Huve9XJ WMlT4AEczMIXCaI/GLbnaVfci5NyYf70xiT02gMsbizi X-Google-Smtp-Source: AMsMyM5LhwOD7r6PqjX8gOiJ6Bju3l5ryFVu5mi40hXqO32TMr2A/fLm0yyDU8790y1EQb6jZydtC9bjFdMyESF63ek= X-Received: by 2002:a05:6870:d1d0:b0:12c:cfd2:8285 with SMTP id b16-20020a056870d1d000b0012ccfd28285mr524333oac.105.1665805532756; Fri, 14 Oct 2022 20:45:32 -0700 (PDT) MIME-Version: 1.0 References: <20221014164008.1325863-1-goldstein.w.n@gmail.com> <20221015030030.204172-1-goldstein.w.n@gmail.com> <20221015030030.204172-2-goldstein.w.n@gmail.com> In-Reply-To: <20221015030030.204172-2-goldstein.w.n@gmail.com> From: Sunil Pandey Date: Fri, 14 Oct 2022 20:44:56 -0700 Message-ID: Subject: Re: [PATCH v10 2/6] x86: Update memrchr to use new VEC macros To: Noah Goldstein Cc: libc-alpha@sourceware.org Content-Type: text/plain; charset="UTF-8" X-Spam-Status: No, score=-5.8 required=5.0 tests=BAYES_00,DKIM_SIGNED,DKIM_VALID,DKIM_VALID_AU,DKIM_VALID_EF,FREEMAIL_ENVFROM_END_DIGIT,FREEMAIL_FROM,GIT_PATCH_0,HK_RANDOM_ENVFROM,HK_RANDOM_FROM,RCVD_IN_DNSWL_NONE,SPF_HELO_NONE,SPF_PASS,TXREP,URIBL_BLACK autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org List-Id: On Fri, Oct 14, 2022 at 8:01 PM Noah Goldstein via Libc-alpha wrote: > > Replace %VEC(n) -> %VMM(n) > > This commit does not change libc.so > > Tested build on x86-64 > --- > sysdeps/x86_64/multiarch/memrchr-evex.S | 42 ++++++++++++------------- > 1 file changed, 21 insertions(+), 21 deletions(-) > > diff --git a/sysdeps/x86_64/multiarch/memrchr-evex.S b/sysdeps/x86_64/multiarch/memrchr-evex.S > index ea3a0a0a60..550b328c5a 100644 > --- a/sysdeps/x86_64/multiarch/memrchr-evex.S > +++ b/sysdeps/x86_64/multiarch/memrchr-evex.S > @@ -21,7 +21,7 @@ > #if ISA_SHOULD_BUILD (4) > > # include > -# include "evex256-vecs.h" > +# include "x86-evex256-vecs.h" > # if VEC_SIZE != 32 > # error "VEC_SIZE != 32 unimplemented" > # endif > @@ -31,7 +31,7 @@ > # endif > > # define PAGE_SIZE 4096 > -# define VECMATCH VEC(0) > +# define VMMMATCH VMM(0) > > .section SECTION(.text), "ax", @progbits > ENTRY_P2ALIGN(MEMRCHR, 6) > @@ -47,7 +47,7 @@ ENTRY_P2ALIGN(MEMRCHR, 6) > correct page cross check and 2) it correctly sets up end ptr to be > subtract by lzcnt aligned. */ > leaq -1(%rdi, %rdx), %rax > - vpbroadcastb %esi, %VECMATCH > + vpbroadcastb %esi, %VMMMATCH > > /* Check if we can load 1x VEC without cross a page. */ > testl $(PAGE_SIZE - VEC_SIZE), %eax > @@ -55,7 +55,7 @@ ENTRY_P2ALIGN(MEMRCHR, 6) > > /* Don't use rax for pointer here because EVEX has better encoding with > offset % VEC_SIZE == 0. */ > - vpcmpb $0, -(VEC_SIZE)(%rdi, %rdx), %VECMATCH, %k0 > + vpcmpb $0, -(VEC_SIZE)(%rdi, %rdx), %VMMMATCH, %k0 > kmovd %k0, %ecx > > /* Fall through for rdx (len) <= VEC_SIZE (expect small sizes). */ > @@ -96,7 +96,7 @@ L(more_1x_vec): > movq %rax, %rdx > > /* Need no matter what. */ > - vpcmpb $0, -(VEC_SIZE)(%rax), %VECMATCH, %k0 > + vpcmpb $0, -(VEC_SIZE)(%rax), %VMMMATCH, %k0 > kmovd %k0, %ecx > > subq %rdi, %rdx > @@ -115,7 +115,7 @@ L(last_2x_vec): > > /* Don't use rax for pointer here because EVEX has better encoding with > offset % VEC_SIZE == 0. */ > - vpcmpb $0, -(VEC_SIZE * 2)(%rdi, %rdx), %VECMATCH, %k0 > + vpcmpb $0, -(VEC_SIZE * 2)(%rdi, %rdx), %VMMMATCH, %k0 > kmovd %k0, %ecx > /* NB: 64-bit lzcnt. This will naturally add 32 to position. */ > lzcntq %rcx, %rcx > @@ -131,7 +131,7 @@ L(last_2x_vec): > L(page_cross): > movq %rax, %rsi > andq $-VEC_SIZE, %rsi > - vpcmpb $0, (%rsi), %VECMATCH, %k0 > + vpcmpb $0, (%rsi), %VMMMATCH, %k0 > kmovd %k0, %r8d > /* Shift out negative alignment (because we are starting from endptr and > working backwards). */ > @@ -165,13 +165,13 @@ L(more_2x_vec): > testl %ecx, %ecx > jnz L(ret_vec_x0_dec) > > - vpcmpb $0, -(VEC_SIZE * 2)(%rax), %VECMATCH, %k0 > + vpcmpb $0, -(VEC_SIZE * 2)(%rax), %VMMMATCH, %k0 > kmovd %k0, %ecx > testl %ecx, %ecx > jnz L(ret_vec_x1) > > /* Need no matter what. */ > - vpcmpb $0, -(VEC_SIZE * 3)(%rax), %VECMATCH, %k0 > + vpcmpb $0, -(VEC_SIZE * 3)(%rax), %VMMMATCH, %k0 > kmovd %k0, %ecx > > subq $(VEC_SIZE * 4), %rdx > @@ -185,7 +185,7 @@ L(last_vec): > > > /* Need no matter what. */ > - vpcmpb $0, -(VEC_SIZE * 4)(%rax), %VECMATCH, %k0 > + vpcmpb $0, -(VEC_SIZE * 4)(%rax), %VMMMATCH, %k0 > kmovd %k0, %ecx > lzcntl %ecx, %ecx > subq $(VEC_SIZE * 3 + 1), %rax > @@ -220,7 +220,7 @@ L(more_4x_vec): > testl %ecx, %ecx > jnz L(ret_vec_x2) > > - vpcmpb $0, -(VEC_SIZE * 4)(%rax), %VECMATCH, %k0 > + vpcmpb $0, -(VEC_SIZE * 4)(%rax), %VMMMATCH, %k0 > kmovd %k0, %ecx > > testl %ecx, %ecx > @@ -243,17 +243,17 @@ L(more_4x_vec): > L(loop_4x_vec): > /* Store 1 were not-equals and 0 where equals in k1 (used to mask later > on). */ > - vpcmpb $4, (VEC_SIZE * 3)(%rax), %VECMATCH, %k1 > + vpcmpb $4, (VEC_SIZE * 3)(%rax), %VMMMATCH, %k1 > > /* VEC(2/3) will have zero-byte where we found a CHAR. */ > - vpxorq (VEC_SIZE * 2)(%rax), %VECMATCH, %VEC(2) > - vpxorq (VEC_SIZE * 1)(%rax), %VECMATCH, %VEC(3) > - vpcmpb $0, (VEC_SIZE * 0)(%rax), %VECMATCH, %k4 > + vpxorq (VEC_SIZE * 2)(%rax), %VMMMATCH, %VMM(2) > + vpxorq (VEC_SIZE * 1)(%rax), %VMMMATCH, %VMM(3) > + vpcmpb $0, (VEC_SIZE * 0)(%rax), %VMMMATCH, %k4 > > /* Combine VEC(2/3) with min and maskz with k1 (k1 has zero bit where > CHAR is found and VEC(2/3) have zero-byte where CHAR is found. */ > - vpminub %VEC(2), %VEC(3), %VEC(3){%k1}{z} > - vptestnmb %VEC(3), %VEC(3), %k2 > + vpminub %VMM(2), %VMM(3), %VMM(3){%k1}{z} > + vptestnmb %VMM(3), %VMM(3), %k2 > > /* Any 1s and we found CHAR. */ > kortestd %k2, %k4 > @@ -270,7 +270,7 @@ L(loop_4x_vec): > L(last_4x_vec): > > /* Used no matter what. */ > - vpcmpb $0, (VEC_SIZE * -1)(%rax), %VECMATCH, %k0 > + vpcmpb $0, (VEC_SIZE * -1)(%rax), %VMMMATCH, %k0 > kmovd %k0, %ecx > > cmpl $(VEC_SIZE * 2), %edx > @@ -280,14 +280,14 @@ L(last_4x_vec): > jnz L(ret_vec_x0_dec) > > > - vpcmpb $0, (VEC_SIZE * -2)(%rax), %VECMATCH, %k0 > + vpcmpb $0, (VEC_SIZE * -2)(%rax), %VMMMATCH, %k0 > kmovd %k0, %ecx > > testl %ecx, %ecx > jnz L(ret_vec_x1) > > /* Used no matter what. */ > - vpcmpb $0, (VEC_SIZE * -3)(%rax), %VECMATCH, %k0 > + vpcmpb $0, (VEC_SIZE * -3)(%rax), %VMMMATCH, %k0 > kmovd %k0, %ecx > > cmpl $(VEC_SIZE * 3), %edx > @@ -309,7 +309,7 @@ L(loop_end): > testl %ecx, %ecx > jnz L(ret_vec_x0_end) > > - vptestnmb %VEC(2), %VEC(2), %k0 > + vptestnmb %VMM(2), %VMM(2), %k0 > kmovd %k0, %ecx > testl %ecx, %ecx > jnz L(ret_vec_x1_end) > -- > 2.34.1 > LGTM --Sunil