* [PATCH v3 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S
@ 2021-04-19 19:35 Noah Goldstein
2021-04-19 19:35 ` [PATCH v3 2/2] x86: Expand test-memset.c and bench-memset.c Noah Goldstein
0 siblings, 1 reply; 2+ messages in thread
From: Noah Goldstein @ 2021-04-19 19:35 UTC (permalink / raw)
To: libc-alpha
No bug. This commit adds optimized cased for less_vec memset case that
uses the avx512vl/avx512bw mask store avoiding the excessive
branches. test-memset and test-wmemset are passing.
Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
---
sysdeps/x86_64/multiarch/ifunc-impl-list.c | 40 +++++++++-----
sysdeps/x86_64/multiarch/ifunc-memset.h | 6 ++-
.../multiarch/memset-avx512-unaligned-erms.S | 2 +-
.../multiarch/memset-evex-unaligned-erms.S | 2 +-
.../multiarch/memset-vec-unaligned-erms.S | 52 +++++++++++++++----
5 files changed, 75 insertions(+), 27 deletions(-)
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index 0b0927b124..c377cab629 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -204,19 +204,23 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__memset_chk_avx2_unaligned_erms_rtm)
IFUNC_IMPL_ADD (array, i, __memset_chk,
(CPU_FEATURE_USABLE (AVX512VL)
- && CPU_FEATURE_USABLE (AVX512BW)),
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__memset_chk_evex_unaligned)
IFUNC_IMPL_ADD (array, i, __memset_chk,
(CPU_FEATURE_USABLE (AVX512VL)
- && CPU_FEATURE_USABLE (AVX512BW)),
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__memset_chk_evex_unaligned_erms)
IFUNC_IMPL_ADD (array, i, __memset_chk,
(CPU_FEATURE_USABLE (AVX512VL)
- && CPU_FEATURE_USABLE (AVX512BW)),
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__memset_chk_avx512_unaligned_erms)
IFUNC_IMPL_ADD (array, i, __memset_chk,
(CPU_FEATURE_USABLE (AVX512VL)
- && CPU_FEATURE_USABLE (AVX512BW)),
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__memset_chk_avx512_unaligned)
IFUNC_IMPL_ADD (array, i, __memset_chk,
CPU_FEATURE_USABLE (AVX512F),
@@ -247,19 +251,23 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__memset_avx2_unaligned_erms_rtm)
IFUNC_IMPL_ADD (array, i, memset,
(CPU_FEATURE_USABLE (AVX512VL)
- && CPU_FEATURE_USABLE (AVX512BW)),
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__memset_evex_unaligned)
IFUNC_IMPL_ADD (array, i, memset,
(CPU_FEATURE_USABLE (AVX512VL)
- && CPU_FEATURE_USABLE (AVX512BW)),
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__memset_evex_unaligned_erms)
IFUNC_IMPL_ADD (array, i, memset,
(CPU_FEATURE_USABLE (AVX512VL)
- && CPU_FEATURE_USABLE (AVX512BW)),
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__memset_avx512_unaligned_erms)
IFUNC_IMPL_ADD (array, i, memset,
(CPU_FEATURE_USABLE (AVX512VL)
- && CPU_FEATURE_USABLE (AVX512BW)),
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__memset_avx512_unaligned)
IFUNC_IMPL_ADD (array, i, memset,
CPU_FEATURE_USABLE (AVX512F),
@@ -728,10 +736,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (RTM)),
__wmemset_avx2_unaligned_rtm)
IFUNC_IMPL_ADD (array, i, wmemset,
- CPU_FEATURE_USABLE (AVX512VL),
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__wmemset_evex_unaligned)
IFUNC_IMPL_ADD (array, i, wmemset,
- CPU_FEATURE_USABLE (AVX512VL),
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__wmemset_avx512_unaligned))
#ifdef SHARED
@@ -935,10 +947,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
CPU_FEATURE_USABLE (AVX2),
__wmemset_chk_avx2_unaligned)
IFUNC_IMPL_ADD (array, i, __wmemset_chk,
- CPU_FEATURE_USABLE (AVX512VL),
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__wmemset_chk_evex_unaligned)
IFUNC_IMPL_ADD (array, i, __wmemset_chk,
- CPU_FEATURE_USABLE (AVX512F),
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)
+ && CPU_FEATURE_USABLE (BMI2)),
__wmemset_chk_avx512_unaligned))
#endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-memset.h b/sysdeps/x86_64/multiarch/ifunc-memset.h
index 502f946a84..eda5640541 100644
--- a/sysdeps/x86_64/multiarch/ifunc-memset.h
+++ b/sysdeps/x86_64/multiarch/ifunc-memset.h
@@ -54,7 +54,8 @@ IFUNC_SELECTOR (void)
&& !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
{
if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
- && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
+ && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
+ && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
{
if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
return OPTIMIZE (avx512_unaligned_erms);
@@ -68,7 +69,8 @@ IFUNC_SELECTOR (void)
if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
{
if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
- && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW))
+ && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
+ && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
{
if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
return OPTIMIZE (evex_unaligned_erms);
diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
index 22e7b187c8..8ad842fc2f 100644
--- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
@@ -19,6 +19,6 @@
# define SECTION(p) p##.evex512
# define MEMSET_SYMBOL(p,s) p##_avx512_##s
# define WMEMSET_SYMBOL(p,s) p##_avx512_##s
-
+# define USE_LESS_VEC_MASK_STORE 1
# include "memset-vec-unaligned-erms.S"
#endif
diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
index ae0a4d6e46..640f092903 100644
--- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
@@ -19,6 +19,6 @@
# define SECTION(p) p##.evex
# define MEMSET_SYMBOL(p,s) p##_evex_##s
# define WMEMSET_SYMBOL(p,s) p##_evex_##s
-
+# define USE_LESS_VEC_MASK_STORE 1
# include "memset-vec-unaligned-erms.S"
#endif
diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
index 584747f1a1..3a59d39267 100644
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
@@ -63,6 +63,9 @@
# endif
#endif
+#define PAGE_SIZE 4096
+#define LOG_PAGE_SIZE 12
+
#ifndef SECTION
# error SECTION is not defined!
#endif
@@ -213,11 +216,38 @@ L(loop):
cmpq %rcx, %rdx
jne L(loop)
VZEROUPPER_SHORT_RETURN
+
+ .p2align 4
L(less_vec):
/* Less than 1 VEC. */
# if VEC_SIZE != 16 && VEC_SIZE != 32 && VEC_SIZE != 64
# error Unsupported VEC_SIZE!
# endif
+# ifdef USE_LESS_VEC_MASK_STORE
+ /* Clear high bits from edi. Only keeping bits relevant to page
+ cross check. Using sall instead of andl saves 3 bytes. Note
+ that we are using rax which is set in
+ MEMSET_VDUP_TO_VEC0_AND_SET_RETURN as ptr from here on out. */
+ sall $(32 - LOG_PAGE_SIZE), %edi
+ /* Check if VEC_SIZE load cross page. Mask loads suffer serious
+ performance degradation when it has to fault supress. */
+ cmpl $((PAGE_SIZE - VEC_SIZE) << (32 - LOG_PAGE_SIZE)), %edi
+ ja L(cross_page)
+# if VEC_SIZE > 32
+ movq $-1, %rcx
+ bzhiq %rdx, %rcx, %rcx
+ kmovq %rcx, %k1
+# else
+ movl $-1, %ecx
+ bzhil %edx, %ecx, %ecx
+ kmovd %ecx, %k1
+# endif
+ vmovdqu8 %VEC(0), (%rax) {%k1}
+ VZEROUPPER_RETURN
+
+ .p2align 4
+L(cross_page):
+# endif
# if VEC_SIZE > 32
cmpb $32, %dl
jae L(between_32_63)
@@ -234,36 +264,36 @@ L(less_vec):
cmpb $1, %dl
ja L(between_2_3)
jb 1f
- movb %cl, (%rdi)
+ movb %cl, (%rax)
1:
VZEROUPPER_RETURN
# if VEC_SIZE > 32
/* From 32 to 63. No branch when size == 32. */
L(between_32_63):
- VMOVU %YMM0, -32(%rdi,%rdx)
- VMOVU %YMM0, (%rdi)
+ VMOVU %YMM0, -32(%rax,%rdx)
+ VMOVU %YMM0, (%rax)
VZEROUPPER_RETURN
# endif
# if VEC_SIZE > 16
/* From 16 to 31. No branch when size == 16. */
L(between_16_31):
- VMOVU %XMM0, -16(%rdi,%rdx)
- VMOVU %XMM0, (%rdi)
+ VMOVU %XMM0, -16(%rax,%rdx)
+ VMOVU %XMM0, (%rax)
VZEROUPPER_RETURN
# endif
/* From 8 to 15. No branch when size == 8. */
L(between_8_15):
- movq %rcx, -8(%rdi,%rdx)
- movq %rcx, (%rdi)
+ movq %rcx, -8(%rax,%rdx)
+ movq %rcx, (%rax)
VZEROUPPER_RETURN
L(between_4_7):
/* From 4 to 7. No branch when size == 4. */
- movl %ecx, -4(%rdi,%rdx)
- movl %ecx, (%rdi)
+ movl %ecx, -4(%rax,%rdx)
+ movl %ecx, (%rax)
VZEROUPPER_RETURN
L(between_2_3):
/* From 2 to 3. No branch when size == 2. */
- movw %cx, -2(%rdi,%rdx)
- movw %cx, (%rdi)
+ movw %cx, -2(%rax,%rdx)
+ movw %cx, (%rax)
VZEROUPPER_RETURN
END (MEMSET_SYMBOL (__memset, unaligned_erms))
--
2.29.2
^ permalink raw reply [flat|nested] 2+ messages in thread
* [PATCH v3 2/2] x86: Expand test-memset.c and bench-memset.c
2021-04-19 19:35 [PATCH v3 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S Noah Goldstein
@ 2021-04-19 19:35 ` Noah Goldstein
0 siblings, 0 replies; 2+ messages in thread
From: Noah Goldstein @ 2021-04-19 19:35 UTC (permalink / raw)
To: libc-alpha
No bug. This commit adds tests cases and benchmarks for page cross and
for memset to the end of the page without crossing. As well in
test-memset.c this commit adds sentinel on start/end of tstbuf to test
for overwrites
Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
---
benchtests/bench-memset.c | 6 ++++--
string/test-memset.c | 20 +++++++++++++++-----
2 files changed, 19 insertions(+), 7 deletions(-)
diff --git a/benchtests/bench-memset.c b/benchtests/bench-memset.c
index 1174900e88..d6619b4836 100644
--- a/benchtests/bench-memset.c
+++ b/benchtests/bench-memset.c
@@ -61,7 +61,7 @@ do_one_test (json_ctx_t *json_ctx, impl_t *impl, CHAR *s,
static void
do_test (json_ctx_t *json_ctx, size_t align, int c, size_t len)
{
- align &= 63;
+ align &= 4095;
if ((align + len) * sizeof (CHAR) > page_size)
return;
@@ -110,9 +110,11 @@ test_main (void)
{
for (i = 0; i < 18; ++i)
do_test (&json_ctx, 0, c, 1 << i);
- for (i = 1; i < 32; ++i)
+ for (i = 1; i < 64; ++i)
{
do_test (&json_ctx, i, c, i);
+ do_test (&json_ctx, 4096 - i, c, i);
+ do_test (&json_ctx, 4095, c, i);
if (i & (i - 1))
do_test (&json_ctx, 0, c, i);
}
diff --git a/string/test-memset.c b/string/test-memset.c
index eb71517390..82bfcd6ad4 100644
--- a/string/test-memset.c
+++ b/string/test-memset.c
@@ -109,16 +109,24 @@ SIMPLE_MEMSET (CHAR *s, int c, size_t n)
static void
do_one_test (impl_t *impl, CHAR *s, int c __attribute ((unused)), size_t n)
{
- CHAR tstbuf[n];
+ CHAR buf[n + 2];
+ CHAR *tstbuf = buf + 1;
+ CHAR sentinel = c - 1;
+ buf[0] = sentinel;
+ buf[n + 1] = sentinel;
#ifdef TEST_BZERO
simple_bzero (tstbuf, n);
CALL (impl, s, n);
- if (memcmp (s, tstbuf, n) != 0)
+ if (memcmp (s, tstbuf, n) != 0
+ || buf[0] != sentinel
+ || buf[n + 1] != sentinel)
#else
CHAR *res = CALL (impl, s, c, n);
if (res != s
|| SIMPLE_MEMSET (tstbuf, c, n) != tstbuf
- || MEMCMP (s, tstbuf, n) != 0)
+ || MEMCMP (s, tstbuf, n) != 0
+ || buf[0] != sentinel
+ || buf[n + 1] != sentinel)
#endif /* !TEST_BZERO */
{
error (0, 0, "Wrong result in function %s", impl->name);
@@ -130,7 +138,7 @@ do_one_test (impl_t *impl, CHAR *s, int c __attribute ((unused)), size_t n)
static void
do_test (size_t align, int c, size_t len)
{
- align &= 7;
+ align &= 4095;
if ((align + len) * sizeof (CHAR) > page_size)
return;
@@ -245,9 +253,11 @@ test_main (void)
{
for (i = 0; i < 18; ++i)
do_test (0, c, 1 << i);
- for (i = 1; i < 32; ++i)
+ for (i = 1; i < 64; ++i)
{
do_test (i, c, i);
+ do_test (4096 - i, c, i);
+ do_test (4095, c, i);
if (i & (i - 1))
do_test (0, c, i);
}
--
2.29.2
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-04-19 19:35 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-19 19:35 [PATCH v3 1/2] x86: Optimize less_vec evex and avx512 memset-vec-unaligned-erms.S Noah Goldstein
2021-04-19 19:35 ` [PATCH v3 2/2] x86: Expand test-memset.c and bench-memset.c Noah Goldstein
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).