* [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S
@ 2021-04-19 16:30 Noah Goldstein
2021-04-19 16:30 ` [PATCH v2 2/2] x86: Expand test-memset.c and bench-memset.c Noah Goldstein
2021-04-19 18:44 ` [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S H.J. Lu
0 siblings, 2 replies; 6+ messages in thread
From: Noah Goldstein @ 2021-04-19 16:30 UTC (permalink / raw)
To: libc-alpha
No bug. This commit adds optimized cased for less_vec memset case that
uses the avx512vl/avx512bw mask store avoiding the excessive
branches. test-memset and test-wmemset are passing.
Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
---
sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++-
.../multiarch/memset-avx512-unaligned-erms.S | 2 +-
.../multiarch/memset-evex-unaligned-erms.S | 2 +-
.../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++----
4 files changed, 47 insertions(+), 15 deletions(-)
diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
index 502f946a84..eda5640541 100644
--- a/sysdeps/x86_64/multiarch/ifunc-memset.h
+++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
@@ -54,7 +54,8 @@ IFUNC_SELECTOR (void)
&& !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
{
if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
- && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
+ && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
+ && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
{
if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
return OPTIMIZE (avx512_unaligned_erms);
@@ -68,7 +69,8 @@ IFUNC_SELECTOR (void)
if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
{
if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
- && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
+ && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
+ && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
{
if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
return OPTIMIZE (evex_unaligned_erms);
diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
index 22e7b187c8..d03460be93 100644
--- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
@@ -19,6 +19,6 @@
# define SECTION(p) p##.evex512
# define MEMSET_SYMBOL(p,s) p##_avx512_##s
# define WMEMSET_SYMBOL(p,s) p##_avx512_##s
-
+# define USE_LESS_VEC_MASKMOV 1
# include "memset-vec-unaligned-erms.S"
#endif
diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
index ae0a4d6e46..eb3541ef60 100644
--- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
@@ -19,6 +19,6 @@
# define SECTION(p) p##.evex
# define MEMSET_SYMBOL(p,s) p##_evex_##s
# define WMEMSET_SYMBOL(p,s) p##_evex_##s
-
+# define USE_LESS_VEC_MASKMOV 1
# include "memset-vec-unaligned-erms.S"
#endif
diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
index 584747f1a1..6b02e87f48 100644
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
@@ -63,6 +63,9 @@
# endif
#endif
+#define PAGE_SIZE 4096
+#define LOG_PAGE_SIZE 12
+
#ifndef SECTION
# error SECTION is not defined!
#endif
@@ -213,11 +216,38 @@ L(loop):
cmpq %rcx, %rdx
jne L(loop)
VZEROUPPER_SHORT_RETURN
+
+ .p2align 4
L(less_vec):
/* Less than 1 VEC. */
# if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
# error Unsupported VEC_SIZE!
# endif
+# ifdef USE_LESS_VEC_MASKMOV
+ /* Clear high bits from edi. Only keeping bits relevant to page
+ cross check. Using sall instead of andl saves 3 bytes. Note
+ that we are using rax which is set in
+ MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
+ sall $(32 - LOG_PAGE_SIZE), %edi
+ /* Check if VEC_SIZE load cross page. Mask loads suffer serious
+ performance degradation when it has to fault supress. */
+ cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
+ ja L(cross_page)
+# if VEC_SIZE > 32
+ movq $-1, %rcx
+ bzhiq %rdx, %rcx, %rcx
+ kmovq %rcx, %k1
+# else
+ movl $-1, %ecx
+ bzhil %edx, %ecx, %ecx
+ kmovd %ecx, %k1
+# endif
+ vmovdqu8 %VEC(0), (%rax) {%k1}
+ VZEROUPPER_RETURN
+
+ .p2align 4
+L(cross_page):
+# endif
# if VEC_SIZE > 32
cmpb $32, %dl
jae L(between_32_63)
@@ -234,36 +264,36 @@ L(less_vec):
cmpb $1, %dl
ja L(between_2_3)
jb 1f
- movb %cl, (%rdi)
+ movb %cl, (%rax)
1:
VZEROUPPER_RETURN
# if VEC_SIZE > 32
/* From 32 to 63. No branch when size == 32. */
L(between_32_63):
- VMOVU %YMM0, -32(%rdi,%rdx)
- VMOVU %YMM0, (%rdi)
+ VMOVU %YMM0, -32(%rax,%rdx)
+ VMOVU %YMM0, (%rax)
VZEROUPPER_RETURN
# endif
# if VEC_SIZE > 16
/* From 16 to 31. No branch when size == 16. */
L(between_16_31):
- VMOVU %XMM0, -16(%rdi,%rdx)
- VMOVU %XMM0, (%rdi)
+ VMOVU %XMM0, -16(%rax,%rdx)
+ VMOVU %XMM0, (%rax)
VZEROUPPER_RETURN
# endif
/* From 8 to 15. No branch when size == 8. */
L(between_8_15):
- movq %rcx, -8(%rdi,%rdx)
- movq %rcx, (%rdi)
+ movq %rcx, -8(%rax,%rdx)
+ movq %rcx, (%rax)
VZEROUPPER_RETURN
L(between_4_7):
/* From 4 to 7. No branch when size == 4. */
- movl %ecx, -4(%rdi,%rdx)
- movl %ecx, (%rdi)
+ movl %ecx, -4(%rax,%rdx)
+ movl %ecx, (%rax)
VZEROUPPER_RETURN
L(between_2_3):
/* From 2 to 3. No branch when size == 2. */
- movw %cx, -2(%rdi,%rdx)
- movw %cx, (%rdi)
+ movw %cx, -2(%rax,%rdx)
+ movw %cx, (%rax)
VZEROUPPER_RETURN
END (MEMSET_SYMBOL (__memset, unaligned_erms))
--
2.29.2
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v2 2/2] x86: Expand test-memset.c and bench-memset.c
2021-04-19 16:30 [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S Noah Goldstein
@ 2021-04-19 16:30 ` Noah Goldstein
2021-04-19 18:44 ` [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S H.J. Lu
1 sibling, 0 replies; 6+ messages in thread
From: Noah Goldstein @ 2021-04-19 16:30 UTC (permalink / raw)
To: libc-alpha
No bug. This commit adds tests cases and benchmarks for page cross and
for memset to the end of the page without crossing. As well in
test-memset.c this commit adds sentinel on start/end of tstbuf to test
for overwrites
Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
---
benchtests/bench-memset.c | 6 ++++--
string/test-memset.c | 20 +++++++++++++++-----
2 files changed, 19 insertions(+), 7 deletions(-)
diff --git a/benchtests/bench-memset.c b/benchtests/bench-memset.c
index 1174900e88..d6619b4836 100644
--- a/benchtests/bench-memset.c
+++ b/benchtests/bench-memset.c
@@ -61,7 +61,7 @@ do_one_test (json_ctx_t *json_ctx, impl_t *impl, CHAR *s,
static void
do_test (json_ctx_t *json_ctx, size_t align, int c, size_t len)
{
- align &= 63;
+ align &= 4095;
if ((align + len) * sizeof (CHAR) > page_size)
return;
@@ -110,9 +110,11 @@ test_main (void)
{
for (i = 0; i < 18; ++i)
do_test (&json_ctx, 0, c, 1 << i);
- for (i = 1; i < 32; ++i)
+ for (i = 1; i < 64; ++i)
{
do_test (&json_ctx, i, c, i);
+ do_test (&json_ctx, 4096 - i, c, i);
+ do_test (&json_ctx, 4095, c, i);
if (i & (i - 1))
do_test (&json_ctx, 0, c, i);
}
diff --git a/string/test-memset.c b/string/test-memset.c
index eb71517390..82bfcd6ad4 100644
--- a/string/test-memset.c
+++ b/string/test-memset.c
@@ -109,16 +109,24 @@ SIMPLE_MEMSET (CHAR *s, int c, size_t n)
static void
do_one_test (impl_t *impl, CHAR *s, int c __attribute ((unused)), size_t n)
{
- CHAR tstbuf[n];
+ CHAR buf[n + 2];
+ CHAR *tstbuf = buf + 1;
+ CHAR sentinel = c - 1;
+ buf[0] = sentinel;
+ buf[n + 1] = sentinel;
#ifdef TEST_BZERO
simple_bzero (tstbuf, n);
CALL (impl, s, n);
- if (memcmp (s, tstbuf, n) != 0)
+ if (memcmp (s, tstbuf, n) != 0
+ || buf[0] != sentinel
+ || buf[n + 1] != sentinel)
#else
CHAR *res = CALL (impl, s, c, n);
if (res != s
|| SIMPLE_MEMSET (tstbuf, c, n) != tstbuf
- || MEMCMP (s, tstbuf, n) != 0)
+ || MEMCMP (s, tstbuf, n) != 0
+ || buf[0] != sentinel
+ || buf[n + 1] != sentinel)
#endif /* !TEST_BZERO */
{
error (0, 0, "Wrong result in function %s", impl->name);
@@ -130,7 +138,7 @@ do_one_test (impl_t *impl, CHAR *s, int c __attribute ((unused)), size_t n)
static void
do_test (size_t align, int c, size_t len)
{
- align &= 7;
+ align &= 4095;
if ((align + len) * sizeof (CHAR) > page_size)
return;
@@ -245,9 +253,11 @@ test_main (void)
{
for (i = 0; i < 18; ++i)
do_test (0, c, 1 << i);
- for (i = 1; i < 32; ++i)
+ for (i = 1; i < 64; ++i)
{
do_test (i, c, i);
+ do_test (4096 - i, c, i);
+ do_test (4095, c, i);
if (i & (i - 1))
do_test (0, c, i);
}
--
2.29.2
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S
2021-04-19 16:30 [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S Noah Goldstein
2021-04-19 16:30 ` [PATCH v2 2/2] x86: Expand test-memset.c and bench-memset.c Noah Goldstein
@ 2021-04-19 18:44 ` H.J. Lu
2021-04-19 19:35 ` Noah Goldstein
1 sibling, 1 reply; 6+ messages in thread
From: H.J. Lu @ 2021-04-19 18:44 UTC (permalink / raw)
To: Noah Goldstein; +Cc: libc-alpha, carlos
On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> No bug. This commit adds optimized cased for less_vec memset case that
> uses the avx512vl/avx512bw mask store avoiding the excessive
> branches. test-memset and test-wmemset are passing.
>
> Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> ---
> sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++-
> .../multiarch/memset-avx512-unaligned-erms.S | 2 +-
> .../multiarch/memset-evex-unaligned-erms.S | 2 +-
> .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++----
> 4 files changed, 47 insertions(+), 15 deletions(-)
>
> diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> index 502f946a84..eda5640541 100644
> --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void)
> && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> return OPTIMIZE (avx512_unaligned_erms);
> @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void)
> if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
Please also update ifunc-impl-list.c.
> {
> if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> return OPTIMIZE (evex_unaligned_erms);
> diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> index 22e7b187c8..d03460be93 100644
> --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> @@ -19,6 +19,6 @@
> # define SECTION(p) p##.evex512
> # define MEMSET_SYMBOL(p,s) p##_avx512_##s
> # define WMEMSET_SYMBOL(p,s) p##_avx512_##s
> -
> +# define USE_LESS_VEC_MASKMOV 1
USE_LESS_VEC_MASKED_STORE
> # include "memset-vec-unaligned-erms.S"
> #endif
> diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> index ae0a4d6e46..eb3541ef60 100644
> --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> @@ -19,6 +19,6 @@
> # define SECTION(p) p##.evex
> # define MEMSET_SYMBOL(p,s) p##_evex_##s
> # define WMEMSET_SYMBOL(p,s) p##_evex_##s
> -
> +# define USE_LESS_VEC_MASKMOV 1
> # include "memset-vec-unaligned-erms.S"
> #endif
> diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> index 584747f1a1..6b02e87f48 100644
> --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> @@ -63,6 +63,9 @@
> # endif
> #endif
>
> +#define PAGE_SIZE 4096
> +#define LOG_PAGE_SIZE 12
> +
> #ifndef SECTION
> # error SECTION is not defined!
> #endif
> @@ -213,11 +216,38 @@ L(loop):
> cmpq %rcx, %rdx
> jne L(loop)
> VZEROUPPER_SHORT_RETURN
> +
> + .p2align 4
> L(less_vec):
> /* Less than 1 VEC. */
> # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
> # error Unsupported VEC_SIZE!
> # endif
> +# ifdef USE_LESS_VEC_MASKMOV
> + /* Clear high bits from edi. Only keeping bits relevant to page
> + cross check. Using sall instead of andl saves 3 bytes. Note
> + that we are using rax which is set in
> + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
> + sall $(32 - LOG_PAGE_SIZE), %edi
> + /* Check if VEC_SIZE load cross page. Mask loads suffer serious
> + performance degradation when it has to fault supress. */
> + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
Please use AND and CMP since AND has higher throughput.
> + ja L(cross_page)
> +# if VEC_SIZE > 32
> + movq $-1, %rcx
> + bzhiq %rdx, %rcx, %rcx
> + kmovq %rcx, %k1
> +# else
> + movl $-1, %ecx
> + bzhil %edx, %ecx, %ecx
> + kmovd %ecx, %k1
> +# endif
> + vmovdqu8 %VEC(0), (%rax) {%k1}
> + VZEROUPPER_RETURN
> +
> + .p2align 4
> +L(cross_page):
> +# endif
> # if VEC_SIZE > 32
> cmpb $32, %dl
> jae L(between_32_63)
> @@ -234,36 +264,36 @@ L(less_vec):
> cmpb $1, %dl
> ja L(between_2_3)
> jb 1f
> - movb %cl, (%rdi)
> + movb %cl, (%rax)
> 1:
> VZEROUPPER_RETURN
> # if VEC_SIZE > 32
> /* From 32 to 63. No branch when size == 32. */
> L(between_32_63):
> - VMOVU %YMM0, -32(%rdi,%rdx)
> - VMOVU %YMM0, (%rdi)
> + VMOVU %YMM0, -32(%rax,%rdx)
> + VMOVU %YMM0, (%rax)
> VZEROUPPER_RETURN
> # endif
> # if VEC_SIZE > 16
> /* From 16 to 31. No branch when size == 16. */
> L(between_16_31):
> - VMOVU %XMM0, -16(%rdi,%rdx)
> - VMOVU %XMM0, (%rdi)
> + VMOVU %XMM0, -16(%rax,%rdx)
> + VMOVU %XMM0, (%rax)
> VZEROUPPER_RETURN
> # endif
> /* From 8 to 15. No branch when size == 8. */
> L(between_8_15):
> - movq %rcx, -8(%rdi,%rdx)
> - movq %rcx, (%rdi)
> + movq %rcx, -8(%rax,%rdx)
> + movq %rcx, (%rax)
> VZEROUPPER_RETURN
> L(between_4_7):
> /* From 4 to 7. No branch when size == 4. */
> - movl %ecx, -4(%rdi,%rdx)
> - movl %ecx, (%rdi)
> + movl %ecx, -4(%rax,%rdx)
> + movl %ecx, (%rax)
> VZEROUPPER_RETURN
> L(between_2_3):
> /* From 2 to 3. No branch when size == 2. */
> - movw %cx, -2(%rdi,%rdx)
> - movw %cx, (%rdi)
> + movw %cx, -2(%rax,%rdx)
> + movw %cx, (%rax)
> VZEROUPPER_RETURN
> END (MEMSET_SYMBOL (__memset, unaligned_erms))
> --
> 2.29.2
>
Thanks.
--
H.J.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S
2021-04-19 18:44 ` [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S H.J. Lu
@ 2021-04-19 19:35 ` Noah Goldstein
2021-04-19 20:39 ` H.J. Lu
0 siblings, 1 reply; 6+ messages in thread
From: Noah Goldstein @ 2021-04-19 19:35 UTC (permalink / raw)
To: H.J. Lu; +Cc: GNU C Library, Carlos O'Donell
On Mon, Apr 19, 2021 at 2:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > No bug. This commit adds optimized cased for less_vec memset case that
> > uses the avx512vl/avx512bw mask store avoiding the excessive
> > branches. test-memset and test-wmemset are passing.
> >
> > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> > ---
> > sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++-
> > .../multiarch/memset-avx512-unaligned-erms.S | 2 +-
> > .../multiarch/memset-evex-unaligned-erms.S | 2 +-
> > .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++----
> > 4 files changed, 47 insertions(+), 15 deletions(-)
> >
> > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > index 502f946a84..eda5640541 100644
> > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void)
> > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
> > {
> > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > {
> > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > return OPTIMIZE (avx512_unaligned_erms);
> > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void)
> > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
> > {
> > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
>
> Please also update ifunc-impl-list.c.
Done.
>
> > {
> > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > return OPTIMIZE (evex_unaligned_erms);
> > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > index 22e7b187c8..d03460be93 100644
> > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > @@ -19,6 +19,6 @@
> > # define SECTION(p) p##.evex512
> > # define MEMSET_SYMBOL(p,s) p##_avx512_##s
> > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s
> > -
> > +# define USE_LESS_VEC_MASKMOV 1
>
> USE_LESS_VEC_MASKED_STORE
Done.
>
> > # include "memset-vec-unaligned-erms.S"
> > #endif
> > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > index ae0a4d6e46..eb3541ef60 100644
> > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > @@ -19,6 +19,6 @@
> > # define SECTION(p) p##.evex
> > # define MEMSET_SYMBOL(p,s) p##_evex_##s
> > # define WMEMSET_SYMBOL(p,s) p##_evex_##s
> > -
> > +# define USE_LESS_VEC_MASKMOV 1
> > # include "memset-vec-unaligned-erms.S"
> > #endif
> > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > index 584747f1a1..6b02e87f48 100644
> > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > @@ -63,6 +63,9 @@
> > # endif
> > #endif
> >
> > +#define PAGE_SIZE 4096
> > +#define LOG_PAGE_SIZE 12
> > +
> > #ifndef SECTION
> > # error SECTION is not defined!
> > #endif
> > @@ -213,11 +216,38 @@ L(loop):
> > cmpq %rcx, %rdx
> > jne L(loop)
> > VZEROUPPER_SHORT_RETURN
> > +
> > + .p2align 4
> > L(less_vec):
> > /* Less than 1 VEC. */
> > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
> > # error Unsupported VEC_SIZE!
> > # endif
> > +# ifdef USE_LESS_VEC_MASKMOV
> > + /* Clear high bits from edi. Only keeping bits relevant to page
> > + cross check. Using sall instead of andl saves 3 bytes. Note
> > + that we are using rax which is set in
> > + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
> > + sall $(32 - LOG_PAGE_SIZE), %edi
> > + /* Check if VEC_SIZE load cross page. Mask loads suffer serious
> > + performance degradation when it has to fault supress. */
> > + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
>
> Please use AND and CMP since AND has higher throughput.
AND uses more code size for VEC_SIZE=16/32 and just barely pushes the
L(cross_page) to the next 16 byte chunk so the extra 3 bytes from AND
end up costing 16 bytes. Not aligning L(cross_page) to 16 also
introduces higher variance to benchmarks so I think it has to be all 16 bytes.
As is I don't think throughput of AND / SAL is on the critical
path so code size should win out. (We can also decode MOV -1, ecx
first cycle with SAL as opposed to AND).
What do you think?
>
> > + ja L(cross_page)
> > +# if VEC_SIZE > 32
> > + movq $-1, %rcx
> > + bzhiq %rdx, %rcx, %rcx
> > + kmovq %rcx, %k1
> > +# else
> > + movl $-1, %ecx
> > + bzhil %edx, %ecx, %ecx
> > + kmovd %ecx, %k1
> > +# endif
> > + vmovdqu8 %VEC(0), (%rax) {%k1}
> > + VZEROUPPER_RETURN
> > +
> > + .p2align 4
> > +L(cross_page):
> > +# endif
> > # if VEC_SIZE > 32
> > cmpb $32, %dl
> > jae L(between_32_63)
> > @@ -234,36 +264,36 @@ L(less_vec):
> > cmpb $1, %dl
> > ja L(between_2_3)
> > jb 1f
> > - movb %cl, (%rdi)
> > + movb %cl, (%rax)
> > 1:
> > VZEROUPPER_RETURN
> > # if VEC_SIZE > 32
> > /* From 32 to 63. No branch when size == 32. */
> > L(between_32_63):
> > - VMOVU %YMM0, -32(%rdi,%rdx)
> > - VMOVU %YMM0, (%rdi)
> > + VMOVU %YMM0, -32(%rax,%rdx)
> > + VMOVU %YMM0, (%rax)
> > VZEROUPPER_RETURN
> > # endif
> > # if VEC_SIZE > 16
> > /* From 16 to 31. No branch when size == 16. */
> > L(between_16_31):
> > - VMOVU %XMM0, -16(%rdi,%rdx)
> > - VMOVU %XMM0, (%rdi)
> > + VMOVU %XMM0, -16(%rax,%rdx)
> > + VMOVU %XMM0, (%rax)
> > VZEROUPPER_RETURN
> > # endif
> > /* From 8 to 15. No branch when size == 8. */
> > L(between_8_15):
> > - movq %rcx, -8(%rdi,%rdx)
> > - movq %rcx, (%rdi)
> > + movq %rcx, -8(%rax,%rdx)
> > + movq %rcx, (%rax)
> > VZEROUPPER_RETURN
> > L(between_4_7):
> > /* From 4 to 7. No branch when size == 4. */
> > - movl %ecx, -4(%rdi,%rdx)
> > - movl %ecx, (%rdi)
> > + movl %ecx, -4(%rax,%rdx)
> > + movl %ecx, (%rax)
> > VZEROUPPER_RETURN
> > L(between_2_3):
> > /* From 2 to 3. No branch when size == 2. */
> > - movw %cx, -2(%rdi,%rdx)
> > - movw %cx, (%rdi)
> > + movw %cx, -2(%rax,%rdx)
> > + movw %cx, (%rax)
> > VZEROUPPER_RETURN
> > END (MEMSET_SYMBOL (__memset, unaligned_erms))
> > --
> > 2.29.2
> >
>
> Thanks.
>
> --
> H.J.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S
2021-04-19 19:35 ` Noah Goldstein
@ 2021-04-19 20:39 ` H.J. Lu
2021-04-19 21:07 ` Noah Goldstein
0 siblings, 1 reply; 6+ messages in thread
From: H.J. Lu @ 2021-04-19 20:39 UTC (permalink / raw)
To: Noah Goldstein; +Cc: GNU C Library, Carlos O'Donell
On Mon, Apr 19, 2021 at 12:35 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
>
> On Mon, Apr 19, 2021 at 2:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> >
> > On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > >
> > > No bug. This commit adds optimized cased for less_vec memset case that
> > > uses the avx512vl/avx512bw mask store avoiding the excessive
> > > branches. test-memset and test-wmemset are passing.
> > >
> > > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> > > ---
> > > sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++-
> > > .../multiarch/memset-avx512-unaligned-erms.S | 2 +-
> > > .../multiarch/memset-evex-unaligned-erms.S | 2 +-
> > > .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++----
> > > 4 files changed, 47 insertions(+), 15 deletions(-)
> > >
> > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > index 502f946a84..eda5640541 100644
> > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void)
> > > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
> > > {
> > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > > {
> > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > return OPTIMIZE (avx512_unaligned_erms);
> > > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void)
> > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
> > > {
> > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> >
> > Please also update ifunc-impl-list.c.
>
> Done.
>
> >
> > > {
> > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > return OPTIMIZE (evex_unaligned_erms);
> > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > index 22e7b187c8..d03460be93 100644
> > > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > @@ -19,6 +19,6 @@
> > > # define SECTION(p) p##.evex512
> > > # define MEMSET_SYMBOL(p,s) p##_avx512_##s
> > > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s
> > > -
> > > +# define USE_LESS_VEC_MASKMOV 1
> >
> > USE_LESS_VEC_MASKED_STORE
>
> Done.
>
> >
> > > # include "memset-vec-unaligned-erms.S"
> > > #endif
> > > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > index ae0a4d6e46..eb3541ef60 100644
> > > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > @@ -19,6 +19,6 @@
> > > # define SECTION(p) p##.evex
> > > # define MEMSET_SYMBOL(p,s) p##_evex_##s
> > > # define WMEMSET_SYMBOL(p,s) p##_evex_##s
> > > -
> > > +# define USE_LESS_VEC_MASKMOV 1
> > > # include "memset-vec-unaligned-erms.S"
> > > #endif
> > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > index 584747f1a1..6b02e87f48 100644
> > > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > @@ -63,6 +63,9 @@
> > > # endif
> > > #endif
> > >
> > > +#define PAGE_SIZE 4096
> > > +#define LOG_PAGE_SIZE 12
> > > +
> > > #ifndef SECTION
> > > # error SECTION is not defined!
> > > #endif
> > > @@ -213,11 +216,38 @@ L(loop):
> > > cmpq %rcx, %rdx
> > > jne L(loop)
> > > VZEROUPPER_SHORT_RETURN
> > > +
> > > + .p2align 4
> > > L(less_vec):
> > > /* Less than 1 VEC. */
> > > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
> > > # error Unsupported VEC_SIZE!
> > > # endif
> > > +# ifdef USE_LESS_VEC_MASKMOV
> > > + /* Clear high bits from edi. Only keeping bits relevant to page
> > > + cross check. Using sall instead of andl saves 3 bytes. Note
> > > + that we are using rax which is set in
> > > + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
> > > + sall $(32 - LOG_PAGE_SIZE), %edi
> > > + /* Check if VEC_SIZE load cross page. Mask loads suffer serious
> > > + performance degradation when it has to fault supress. */
> > > + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
> >
> > Please use AND and CMP since AND has higher throughput.
>
> AND uses more code size for VEC_SIZE=16/32 and just barely pushes the
> L(cross_page) to the next 16 byte chunk so the extra 3 bytes from AND
> end up costing 16 bytes. Not aligning L(cross_page) to 16 also
> introduces higher variance to benchmarks so I think it has to be all 16 bytes.
>
> As is I don't think throughput of AND / SAL is on the critical
> path so code size should win out. (We can also decode MOV -1, ecx
> first cycle with SAL as opposed to AND).
>
> What do you think?
I prefer AND over SAL. Something like
diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
index 3a59d39267..763fb907b9 100644
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
@@ -217,21 +217,17 @@ L(loop):
jne L(loop)
VZEROUPPER_SHORT_RETURN
- .p2align 4
+ /* NB: Don't align this branch target to reduce code size. */
L(less_vec):
/* Less than 1 VEC. */
# if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
# error Unsupported VEC_SIZE!
# endif
# ifdef USE_LESS_VEC_MASK_STORE
- /* Clear high bits from edi. Only keeping bits relevant to page
- cross check. Using sall instead of andl saves 3 bytes. Note
- that we are using rax which is set in
- MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
- sall $(32 - LOG_PAGE_SIZE), %edi
- /* Check if VEC_SIZE load cross page. Mask loads suffer serious
+ /* Check if VEC_SIZE store cross page. Mask stores suffer serious
performance degradation when it has to fault supress. */
- cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
+ andl $(PAGE_SIZE - 1), %edi
+ cmpl $(PAGE_SIZE - VEC_SIZE), %edi
ja L(cross_page)
# if VEC_SIZE > 32
movq $-1, %rcx
Thanks.
--
H.J.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S
2021-04-19 20:39 ` H.J. Lu
@ 2021-04-19 21:07 ` Noah Goldstein
0 siblings, 0 replies; 6+ messages in thread
From: Noah Goldstein @ 2021-04-19 21:07 UTC (permalink / raw)
To: H.J. Lu; +Cc: GNU C Library, Carlos O'Donell
On Mon, Apr 19, 2021 at 1:39 PM H.J. Lu <hjl.tools@gmail.com> wrote:
>
> On Mon, Apr 19, 2021 at 12:35 PM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> >
> > On Mon, Apr 19, 2021 at 2:45 PM H.J. Lu <hjl.tools@gmail.com> wrote:
> > >
> > > On Mon, Apr 19, 2021 at 9:30 AM Noah Goldstein <goldstein.w.n@gmail.com> wrote:
> > > >
> > > > No bug. This commit adds optimized cased for less_vec memset case that
> > > > uses the avx512vl/avx512bw mask store avoiding the excessive
> > > > branches. test-memset and test-wmemset are passing.
> > > >
> > > > Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> > > > ---
> > > > sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++-
> > > > .../multiarch/memset-avx512-unaligned-erms.S | 2 +-
> > > > .../multiarch/memset-evex-unaligned-erms.S | 2 +-
> > > > .../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++----
> > > > 4 files changed, 47 insertions(+), 15 deletions(-)
> > > >
> > > > diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > > index 502f946a84..eda5640541 100644
> > > > --- a/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > > +++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
> > > > @@ -54,7 +54,8 @@ IFUNC_SELECTOR (void)
> > > > && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
> > > > {
> > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> > > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > > > {
> > > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > > return OPTIMIZE (avx512_unaligned_erms);
> > > > @@ -68,7 +69,8 @@ IFUNC_SELECTOR (void)
> > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
> > > > {
> > > > if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
> > > > - && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
> > > > + && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
> > > > + && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
> > >
> > > Please also update ifunc-impl-list.c.
> >
> > Done.
> >
> > >
> > > > {
> > > > if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
> > > > return OPTIMIZE (evex_unaligned_erms);
> > > > diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > > index 22e7b187c8..d03460be93 100644
> > > > --- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > > +++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
> > > > @@ -19,6 +19,6 @@
> > > > # define SECTION(p) p##.evex512
> > > > # define MEMSET_SYMBOL(p,s) p##_avx512_##s
> > > > # define WMEMSET_SYMBOL(p,s) p##_avx512_##s
> > > > -
> > > > +# define USE_LESS_VEC_MASKMOV 1
> > >
> > > USE_LESS_VEC_MASKED_STORE
> >
> > Done.
> >
> > >
> > > > # include "memset-vec-unaligned-erms.S"
> > > > #endif
> > > > diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > > index ae0a4d6e46..eb3541ef60 100644
> > > > --- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > > +++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
> > > > @@ -19,6 +19,6 @@
> > > > # define SECTION(p) p##.evex
> > > > # define MEMSET_SYMBOL(p,s) p##_evex_##s
> > > > # define WMEMSET_SYMBOL(p,s) p##_evex_##s
> > > > -
> > > > +# define USE_LESS_VEC_MASKMOV 1
> > > > # include "memset-vec-unaligned-erms.S"
> > > > #endif
> > > > diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > > index 584747f1a1..6b02e87f48 100644
> > > > --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > > +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> > > > @@ -63,6 +63,9 @@
> > > > # endif
> > > > #endif
> > > >
> > > > +#define PAGE_SIZE 4096
> > > > +#define LOG_PAGE_SIZE 12
> > > > +
> > > > #ifndef SECTION
> > > > # error SECTION is not defined!
> > > > #endif
> > > > @@ -213,11 +216,38 @@ L(loop):
> > > > cmpq %rcx, %rdx
> > > > jne L(loop)
> > > > VZEROUPPER_SHORT_RETURN
> > > > +
> > > > + .p2align 4
> > > > L(less_vec):
> > > > /* Less than 1 VEC. */
> > > > # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
> > > > # error Unsupported VEC_SIZE!
> > > > # endif
> > > > +# ifdef USE_LESS_VEC_MASKMOV
> > > > + /* Clear high bits from edi. Only keeping bits relevant to page
> > > > + cross check. Using sall instead of andl saves 3 bytes. Note
> > > > + that we are using rax which is set in
> > > > + MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
> > > > + sall $(32 - LOG_PAGE_SIZE), %edi
> > > > + /* Check if VEC_SIZE load cross page. Mask loads suffer serious
> > > > + performance degradation when it has to fault supress. */
> > > > + cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
> > >
> > > Please use AND and CMP since AND has higher throughput.
> >
> > AND uses more code size for VEC_SIZE=16/32 and just barely pushes the
> > L(cross_page) to the next 16 byte chunk so the extra 3 bytes from AND
> > end up costing 16 bytes. Not aligning L(cross_page) to 16 also
> > introduces higher variance to benchmarks so I think it has to be all 16 bytes.
> >
> > As is I don't think throughput of AND / SAL is on the critical
> > path so code size should win out. (We can also decode MOV -1, ecx
> > first cycle with SAL as opposed to AND).
> >
> > What do you think?
>
> I prefer AND over SAL. Something like
>
> diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> index 3a59d39267..763fb907b9 100644
> --- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
> @@ -217,21 +217,17 @@ L(loop):
> jne L(loop)
> VZEROUPPER_SHORT_RETURN
>
> - .p2align 4
> + /* NB: Don't align this branch target to reduce code size. */
Not aligning this branch can harm performance. Median stays about
the same but variance / geomean go up.
> L(less_vec):
> /* Less than 1 VEC. */
> # if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
> # error Unsupported VEC_SIZE!
> # endif
> # ifdef USE_LESS_VEC_MASK_STORE
> - /* Clear high bits from edi. Only keeping bits relevant to page
> - cross check. Using sall instead of andl saves 3 bytes. Note
> - that we are using rax which is set in
> - MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
> - sall $(32 - LOG_PAGE_SIZE), %edi
> - /* Check if VEC_SIZE load cross page. Mask loads suffer serious
> + /* Check if VEC_SIZE store cross page. Mask stores suffer serious
> performance degradation when it has to fault supress. */
> - cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
> + andl $(PAGE_SIZE - 1), %edi
> + cmpl $(PAGE_SIZE - VEC_SIZE), %edi
> ja L(cross_page)
> # if VEC_SIZE > 32
> movq $-1, %rcx
>
> Thanks.
>
> --
> H.J.
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2021-04-19 21:07 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-19 16:30 [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S Noah Goldstein
2021-04-19 16:30 ` [PATCH v2 2/2] x86: Expand test-memset.c and bench-memset.c Noah Goldstein
2021-04-19 18:44 ` [PATCH v2 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S H.J. Lu
2021-04-19 19:35 ` Noah Goldstein
2021-04-19 20:39 ` H.J. Lu
2021-04-19 21:07 ` Noah Goldstein
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).