From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-pl1-x630.google.com (mail-pl1-x630.google.com [IPv6:2607:f8b0:4864:20::630]) by sourceware.org (Postfix) with ESMTPS id 1814A394742B for ; Mon, 19 Apr 2021 19:35:41 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.3.2 sourceware.org 1814A394742B Received: by mail-pl1-x630.google.com with SMTP id t22so18005446ply.1 for ; Mon, 19 Apr 2021 12:35:41 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:mime-version:references:in-reply-to:from:date :message-id:subject:to:cc; bh=LgDw607PUrj6gofGhEXmf0dwTkFnRWcfaVZeR3rdAuA=; b=RGI7UnlR/GhdfIQzTA5LsyE3r3WQ1F1PkSqNYgVNL9is/91DB6H1vN1jFbHXEWlfqG ZB6eSqpXgHfGvtCGrhZ1vsFhmSc8prMLkw800gcHztHwum2+2OwID7OE2g3911FAt40u DmovNbGIcAFwv12fNZwAS2ucBXw9MVK3WNVLYwfq5ip0oi/1AjoEz4RY+bCle5U1hQTz 1WQLhokEGH/UigCpOzIvmaB0nxI5Z8xYHYM0jUzFeo1sLb0WP39UGLA6IMRDMKnMcO2k y/pDw4BQ8Qr3pjw6kppUKjiVdXE59dqBb219xgUC2xUkcZrCKPI1AgwZHzvIwlNA/y75 96qA== X-Gm-Message-State: AOAM532SinnpJ4kx1HmtzUY6fNLp65KcnGZP5TgN/CN+2pZLhgdeX9BT PJ2Kn7kUAJ7QYQihrJjrDeDGJGpfKm7hvnuFrt0= X-Google-Smtp-Source: ABdhPJy1+CCqK3VxIrJX+0pT75PDD+Io1uat8mbp8oUc+0E7RLgo38lHUeF537/WXsSjFPCT78rdwFUy+IB6c+z0LeY= X-Received: by 2002:a17:90a:8b97:: with SMTP id z23mr668748pjn.131.1618860940243; Mon, 19 Apr 2021 12:35:40 -0700 (PDT) MIME-Version: 1.0 References: <20210419163025.2285675-1-goldstein.w.n@gmail.com> In-Reply-To: From: Noah Goldstein Date: Mon, 19 Apr 2021 15:35:29 -0400 Message-ID: Subject: Re: [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S To: "H.J. Lu" Cc: GNU C Library , "Carlos O'Donell" Content-Type: text/plain; charset="UTF-8" X-Spam-Status: No, score=-10.7 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, FREEMAIL_FROM, GIT_PATCH_0, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP autolearn=ham autolearn_force=no version=3.4.2 X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 19 Apr 2021 19:35:43 -0000 On Mon, Apr 19, 2021 at 2:45 PM H.J. Lu wrote: > > On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein wrote: > > > > No bug. This commit adds optimized cased for less_vec memset case that > > uses the avx512vl/avx512bw mask store avoiding the excessive > > branches. test-memset and test-wmemset are passing. > > > > Signed-off-by: Noah Goldstein > > --- > > sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++- > > .../multiarch/memset-avx512-unaligned-erms.S | 2 +- > > .../multiarch/memset-evex-unaligned-erms.S | 2 +- > > .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++---- > > 4 files changed, 47 insertions(+), 15 deletions(-) > > > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h > > index 502f946a84..eda5640541 100644 > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h > > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void) > > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > return OPTIMIZE (avx512_unaligned_erms); > > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void) > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)) > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL) > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)) > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW) > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2)) > > Please also update ifunc-impl-list.c. Done. > > > { > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS)) > > return OPTIMIZE (evex_unaligned_erms); > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > index 22e7b187c8..d03460be93 100644 > > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S > > @@ -19,6 +19,6 @@ > > # define SECTION(p) p##.evex512 > > # define MEMSET_SYMBOL(p,s) p##_avx512_##s > > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s > > - > > +# define USE_LESS_VEC_MASKMOV 1 > > USE_LESS_VEC_MASKED_STORE Done. > > > # include "memset-vec-unaligned-erms.S" > > #endif > > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > index ae0a4d6e46..eb3541ef60 100644 > > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S > > @@ -19,6 +19,6 @@ > > # define SECTION(p) p##.evex > > # define MEMSET_SYMBOL(p,s) p##_evex_##s > > # define WMEMSET_SYMBOL(p,s) p##_evex_##s > > - > > +# define USE_LESS_VEC_MASKMOV 1 > > # include "memset-vec-unaligned-erms.S" > > #endif > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > index 584747f1a1..6b02e87f48 100644 > > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S > > @@ -63,6 +63,9 @@ > > # endif > > #endif > > > > +#define PAGE_SIZE 4096 > > +#define LOG_PAGE_SIZE 12 > > + > > #ifndef SECTION > > # error SECTION is not defined! > > #endif > > @@ -213,11 +216,38 @@ L(loop): > > cmpq %rcx, %rdx > > jne L(loop) > > VZEROUPPER_SHORT_RETURN > > + > > + .p2align 4 > > L(less_vec): > > /* Less than 1 VEC. */ > > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64 > > # error Unsupported VEC_SIZE! > > # endif > > +# ifdef USE_LESS_VEC_MASKMOV > > + /* Clear high bits from edi. Only keeping bits relevant to page > > + cross check. Using sall instead of andl saves 3 bytes. Note > > + that we are using rax which is set in > > + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */ > > + sall $(32 - LOG_PAGE_SIZE), %edi > > + /* Check if VEC_SIZE load cross page. Mask loads suffer serious > > + performance degradation when it has to fault supress. */ > > + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi > > Please use AND and CMP since AND has higher throughput. AND uses more code size for VEC_SIZE=16/32 and just barely pushes the L(cross_page) to the next 16 byte chunk so the extra 3 bytes from AND end up costing 16 bytes. Not aligning L(cross_page) to 16 also introduces higher variance to benchmarks so I think it has to be all 16 bytes. As is I don't think throughput of AND / SAL is on the critical path so code size should win out. (We can also decode MOV -1, ecx first cycle with SAL as opposed to AND). What do you think? > > > + ja L(cross_page) > > +# if VEC_SIZE > 32 > > + movq $-1, %rcx > > + bzhiq %rdx, %rcx, %rcx > > + kmovq %rcx, %k1 > > +# else > > + movl $-1, %ecx > > + bzhil %edx, %ecx, %ecx > > + kmovd %ecx, %k1 > > +# endif > > + vmovdqu8 %VEC(0), (%rax) {%k1} > > + VZEROUPPER_RETURN > > + > > + .p2align 4 > > +L(cross_page): > > +# endif > > # if VEC_SIZE > 32 > > cmpb $32, %dl > > jae L(between_32_63) > > @@ -234,36 +264,36 @@ L(less_vec): > > cmpb $1, %dl > > ja L(between_2_3) > > jb 1f > > - movb %cl, (%rdi) > > + movb %cl, (%rax) > > 1: > > VZEROUPPER_RETURN > > # if VEC_SIZE > 32 > > /* From 32 to 63. No branch when size == 32. */ > > L(between_32_63): > > - VMOVU %YMM0, -32(%rdi,%rdx) > > - VMOVU %YMM0, (%rdi) > > + VMOVU %YMM0, -32(%rax,%rdx) > > + VMOVU %YMM0, (%rax) > > VZEROUPPER_RETURN > > # endif > > # if VEC_SIZE > 16 > > /* From 16 to 31. No branch when size == 16. */ > > L(between_16_31): > > - VMOVU %XMM0, -16(%rdi,%rdx) > > - VMOVU %XMM0, (%rdi) > > + VMOVU %XMM0, -16(%rax,%rdx) > > + VMOVU %XMM0, (%rax) > > VZEROUPPER_RETURN > > # endif > > /* From 8 to 15. No branch when size == 8. */ > > L(between_8_15): > > - movq %rcx, -8(%rdi,%rdx) > > - movq %rcx, (%rdi) > > + movq %rcx, -8(%rax,%rdx) > > + movq %rcx, (%rax) > > VZEROUPPER_RETURN > > L(between_4_7): > > /* From 4 to 7. No branch when size == 4. */ > > - movl %ecx, -4(%rdi,%rdx) > > - movl %ecx, (%rdi) > > + movl %ecx, -4(%rax,%rdx) > > + movl %ecx, (%rax) > > VZEROUPPER_RETURN > > L(between_2_3): > > /* From 2 to 3. No branch when size == 2. */ > > - movw %cx, -2(%rdi,%rdx) > > - movw %cx, (%rdi) > > + movw %cx, -2(%rax,%rdx) > > + movw %cx, (%rax) > > VZEROUPPER_RETURN > > END (MEMSET_SYMBOL (__memset, unaligned_erms)) > > -- > > 2.29.2 > > > > Thanks. > > -- > H.J.