From: "H.J. Lu" <hjl.tools@gmail.com>
To: Noah Goldstein <goldstein.w.n@gmail.com>
Cc: GNU C Library <libc-alpha@sourceware.org>,
"Carlos O'Donell" <carlos@systemhalted.org>
Subject: Re: [PATCH v4 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S
Date: Mon, 19 Apr 2021 14:44:11 -0700 [thread overview]
Message-ID: <CAMe9rOpx8+dJa=gOS-1Rm7fR2Kym2rWxN77=6n-Xf5AnVqsyzw@mail.gmail.com> (raw)
In-Reply-To: <20210419213605.2851266-1-goldstein.w.n@gmail.com>
On Mon, Apr 19, 2021 at 2:36 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> No bug. This commit adds optimized cased for less_vec memset case that
> uses the avx512vl/avx512bw mask store avoiding the excessive
> branches. test-memset and test-wmemset are passing.
>
> Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> ---
> sysdeps/x86_64/multiarch/ifunc-impl-list.c | 40 ++++++++++-----
> sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++-
> .../multiarch/memset-avx512-unaligned-erms.S | 2 +-
> .../multiarch/memset-evex-unaligned-erms.S | 2 +-
> .../multiarch/memset-vec-unaligned-erms.S | 51 +++++++++++++++----
> 5 files changed, 74 insertions(+), 27 deletions(-)
>
> diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> index 0b0927b124..c377cab629 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> +++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
> @@ -204,19 +204,23 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
> __memset_chk_avx2_unaligned_erms_rtm)
> IFUNC_IMPL_ADD (array, i, __memset_chk,
> (CPU_FEATURE_USABLE (AVX512VL)
> - && CPU_FEATURE_USABLE (AVX512BW)),
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __memset_chk_evex_unaligned)
> IFUNC_IMPL_ADD (array, i, __memset_chk,
> (CPU_FEATURE_USABLE (AVX512VL)
> - && CPU_FEATURE_USABLE (AVX512BW)),
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __memset_chk_evex_unaligned_erms)
> IFUNC_IMPL_ADD (array, i, __memset_chk,
> (CPU_FEATURE_USABLE (AVX512VL)
> - && CPU_FEATURE_USABLE (AVX512BW)),
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __memset_chk_avx512_unaligned_erms)
> IFUNC_IMPL_ADD (array, i, __memset_chk,
> (CPU_FEATURE_USABLE (AVX512VL)
> - && CPU_FEATURE_USABLE (AVX512BW)),
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __memset_chk_avx512_unaligned)
> IFUNC_IMPL_ADD (array, i, __memset_chk,
> CPU_FEATURE_USABLE (AVX512F),
> @@ -247,19 +251,23 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
> __memset_avx2_unaligned_erms_rtm)
> IFUNC_IMPL_ADD (array, i, memset,
> (CPU_FEATURE_USABLE (AVX512VL)
> - && CPU_FEATURE_USABLE (AVX512BW)),
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __memset_evex_unaligned)
> IFUNC_IMPL_ADD (array, i, memset,
> (CPU_FEATURE_USABLE (AVX512VL)
> - && CPU_FEATURE_USABLE (AVX512BW)),
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __memset_evex_unaligned_erms)
> IFUNC_IMPL_ADD (array, i, memset,
> (CPU_FEATURE_USABLE (AVX512VL)
> - && CPU_FEATURE_USABLE (AVX512BW)),
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __memset_avx512_unaligned_erms)
> IFUNC_IMPL_ADD (array, i, memset,
> (CPU_FEATURE_USABLE (AVX512VL)
> - && CPU_FEATURE_USABLE (AVX512BW)),
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __memset_avx512_unaligned)
> IFUNC_IMPL_ADD (array, i, memset,
> CPU_FEATURE_USABLE (AVX512F),
> @@ -728,10 +736,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
> && CPU_FEATURE_USABLE (RTM)),
> __wmemset_avx2_unaligned_rtm)
> IFUNC_IMPL_ADD (array, i, wmemset,
> - CPU_FEATURE_USABLE (AVX512VL),
> + (CPU_FEATURE_USABLE (AVX512VL)
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __wmemset_evex_unaligned)
> IFUNC_IMPL_ADD (array, i, wmemset,
> - CPU_FEATURE_USABLE (AVX512VL),
> + (CPU_FEATURE_USABLE (AVX512VL)
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __wmemset_avx512_unaligned))
>
> #ifdef SHARED
> @@ -935,10 +947,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
> CPU_FEATURE_USABLE (AVX2),
> __wmemset_chk_avx2_unaligned)
> IFUNC_IMPL_ADD (array, i, __wmemset_chk,
> - CPU_FEATURE_USABLE (AVX512VL),
> + (CPU_FEATURE_USABLE (AVX512VL)
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __wmemset_chk_evex_unaligned)
> IFUNC_IMPL_ADD (array, i, __wmemset_chk,
> - CPU_FEATURE_USABLE (AVX512F),
> + (CPU_FEATURE_USABLE (AVX512VL)
> + && CPU_FEATURE_USABLE (AVX512BW)
> + && CPU_FEATURE_USABLE (BMI2)),
> __wmemset_chk_avx512_unaligned))
> #endif
>
> diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> index 502f946a84..eda5640541 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void)
> && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> return OPTIMIZE (avx512_unaligned_erms);
> @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void)
> if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> return OPTIMIZE (evex_unaligned_erms);
> diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> index 22e7b187c8..8ad842fc2f 100644
> --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> @@ -19,6 +19,6 @@
> # define SECTION(p) p##.evex512
> # define MEMSET_SYMBOL(p,s) p##_avx512_##s
> # define WMEMSET_SYMBOL(p,s) p##_avx512_##s
> -
> +# define USE_LESS_VEC_MASK_STORE 1
> # include "memset-vec-unaligned-erms.S"
> #endif
> diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> index ae0a4d6e46..640f092903 100644
> --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> @@ -19,6 +19,6 @@
> # define SECTION(p) p##.evex
> # define MEMSET_SYMBOL(p,s) p##_evex_##s
> # define WMEMSET_SYMBOL(p,s) p##_evex_##s
> -
> +# define USE_LESS_VEC_MASK_STORE 1
> # include "memset-vec-unaligned-erms.S"
> #endif
> diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> index 584747f1a1..4eac39326e 100644
> --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> @@ -63,6 +63,8 @@
> # endif
> #endif
>
> +#define PAGE_SIZE 4096
> +
> #ifndef SECTION
> # error SECTION is not defined!
> #endif
> @@ -213,11 +215,38 @@ L(loop):
> cmpq %rcx, %rdx
> jne L(loop)
> VZEROUPPER_SHORT_RETURN
> +
> + .p2align 4
> L(less_vec):
> /* Less than 1 VEC. */
> # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
> # error Unsupported VEC_SIZE!
> # endif
> +# ifdef USE_LESS_VEC_MASK_STORE
> + /* Clear high bits from edi. Only keeping bits relevant to page
> + cross check. Note that we are using rax which is set in
> + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out.
> + */
> + andl $(PAGE_SIZE - 1), %edi
> + /* Check if VEC_SIZE load cross page. Mask loads suffer serious
It should be store, not load.
> + performance degradation when it has to fault supress. */
> + cmpl $(PAGE_SIZE - VEC_SIZE), %edi
> + ja L(cross_page)
> +# if VEC_SIZE > 32
> + movq $-1, %rcx
> + bzhiq %rdx, %rcx, %rcx
> + kmovq %rcx, %k1
> +# else
> + movl $-1, %ecx
> + bzhil %edx, %ecx, %ecx
> + kmovd %ecx, %k1
> +# endif
> + vmovdqu8 %VEC(0), (%rax) {%k1}
> + VZEROUPPER_RETURN
> +
> + .p2align 4
> +L(cross_page):
> +# endif
> # if VEC_SIZE > 32
> cmpb $32, %dl
> jae L(between_32_63)
> @@ -234,36 +263,36 @@ L(less_vec):
> cmpb $1, %dl
> ja L(between_2_3)
> jb 1f
> - movb %cl, (%rdi)
> + movb %cl, (%rax)
> 1:
> VZEROUPPER_RETURN
> # if VEC_SIZE > 32
> /* From 32 to 63. No branch when size == 32. */
> L(between_32_63):
> - VMOVU %YMM0, -32(%rdi,%rdx)
> - VMOVU %YMM0, (%rdi)
> + VMOVU %YMM0, -32(%rax,%rdx)
> + VMOVU %YMM0, (%rax)
> VZEROUPPER_RETURN
> # endif
> # if VEC_SIZE > 16
> /* From 16 to 31. No branch when size == 16. */
> L(between_16_31):
> - VMOVU %XMM0, -16(%rdi,%rdx)
> - VMOVU %XMM0, (%rdi)
> + VMOVU %XMM0, -16(%rax,%rdx)
> + VMOVU %XMM0, (%rax)
> VZEROUPPER_RETURN
> # endif
> /* From 8 to 15. No branch when size == 8. */
> L(between_8_15):
> - movq %rcx, -8(%rdi,%rdx)
> - movq %rcx, (%rdi)
> + movq %rcx, -8(%rax,%rdx)
> + movq %rcx, (%rax)
> VZEROUPPER_RETURN
> L(between_4_7):
> /* From 4 to 7. No branch when size == 4. */
> - movl %ecx, -4(%rdi,%rdx)
> - movl %ecx, (%rdi)
> + movl %ecx, -4(%rax,%rdx)
> + movl %ecx, (%rax)
> VZEROUPPER_RETURN
> L(between_2_3):
> /* From 2 to 3. No branch when size == 2. */
> - movw %cx, -2(%rdi,%rdx)
> - movw %cx, (%rdi)
> + movw %cx, -2(%rax,%rdx)
> + movw %cx, (%rax)
> VZEROUPPER_RETURN
> END (MEMSET_SYMBOL (__memset, unaligned_erms))
> --
> 2.29.2
>
--
H.J.
prev parent reply other threads:[~2021-04-19 21:44 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-19 21:36 Noah Goldstein
2021-04-19 21:36 ` [PATCH v4 2/2] x86: Expand test-memset.c and bench-memset.c Noah Goldstein
2021-04-19 21:44 ` H.J. Lu [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CAMe9rOpx8+dJa=gOS-1Rm7fR2Kym2rWxN77=6n-Xf5AnVqsyzw@mail.gmail.com' \
--to=hjl.tools@gmail.com \
--cc=carlos@systemhalted.org \
--cc=goldstein.w.n@gmail.com \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).