From: Sunil K Pandey <skpgkp2@gmail.com>
To: libc-alpha@sourceware.org
Cc: hjl.tools@gmail.com
Subject: [PATCH] x86_64: Implement evex512 version of strchrnul, strchr and wcschr
Date: Wed, 21 Sep 2022 17:16:52 -0700 [thread overview]
Message-ID: <20220922001652.4039546-1-skpgkp2@gmail.com> (raw)
This patch implements following evex512 version of string functions.
evex512 version takes up to 30% less cycle as compared to evex,
depending on length and alignment.
- strchrnul function using 512 bit vectors.
- strchr function using 512 bit vectors.
- wcschr function using 512 bit vectors.
Code size data:
strchrnul-evex.o 615 byte
strchrnul-evex512.o 573 byte (-7%)
strchr-evex.o 670 byte
strchr-evex512.o 616 byte (-8%)
wcschr-evex.o 678 byte
wcschr-evex512.o 620 byte (-9%)
Placeholder function, not used by any processor at the moment.
---
sysdeps/x86_64/multiarch/Makefile | 3 +
sysdeps/x86_64/multiarch/ifunc-impl-list.c | 12 +
sysdeps/x86_64/multiarch/strchr-evex-base.S | 294 +++++++++++++++++++
sysdeps/x86_64/multiarch/strchr-evex512.S | 7 +
sysdeps/x86_64/multiarch/strchrnul-evex512.S | 8 +
sysdeps/x86_64/multiarch/wcschr-evex512.S | 8 +
6 files changed, 332 insertions(+)
create mode 100644 sysdeps/x86_64/multiarch/strchr-evex-base.S
create mode 100644 sysdeps/x86_64/multiarch/strchr-evex512.S
create mode 100644 sysdeps/x86_64/multiarch/strchrnul-evex512.S
create mode 100644 sysdeps/x86_64/multiarch/wcschr-evex512.S
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index df4601c294..89b58fa557 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -60,11 +60,13 @@ sysdep_routines += \
strchr-avx2 \
strchr-avx2-rtm \
strchr-evex \
+ strchr-evex512 \
strchr-sse2 \
strchr-sse2-no-bsf \
strchrnul-avx2 \
strchrnul-avx2-rtm \
strchrnul-evex \
+ strchrnul-evex512 \
strchrnul-sse2 \
strcmp-avx2 \
strcmp-avx2-rtm \
@@ -129,6 +131,7 @@ sysdep_routines += \
wcschr-avx2 \
wcschr-avx2-rtm \
wcschr-evex \
+ wcschr-evex512 \
wcschr-sse2 \
wcscmp-avx2 \
wcscmp-avx2-rtm \
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index a71444eccb..bce1d15171 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -518,6 +518,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__strchr_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, strchr,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)),
+ __strchr_evex512)
X86_IFUNC_IMPL_ADD_V3 (array, i, strchr,
(CPU_FEATURE_USABLE (AVX2)
&& CPU_FEATURE_USABLE (BMI2)),
@@ -543,6 +547,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__strchrnul_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, strchrnul,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)),
+ __strchrnul_evex512)
X86_IFUNC_IMPL_ADD_V3 (array, i, strchrnul,
(CPU_FEATURE_USABLE (AVX2)
&& CPU_FEATURE_USABLE (BMI2)),
@@ -753,6 +761,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__wcschr_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, wcschr,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)),
+ __wcschr_evex512)
X86_IFUNC_IMPL_ADD_V3 (array, i, wcschr,
(CPU_FEATURE_USABLE (AVX2)
&& CPU_FEATURE_USABLE (BMI2)),
diff --git a/sysdeps/x86_64/multiarch/strchr-evex-base.S b/sysdeps/x86_64/multiarch/strchr-evex-base.S
new file mode 100644
index 0000000000..919dafc8b6
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchr-evex-base.S
@@ -0,0 +1,294 @@
+/* Placeholder function, not used by any processor at the moment.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* UNUSED. Exists purely as reference implementation. */
+
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
+# include <sysdep.h>
+
+# ifdef USE_AS_WCSCHR
+# define CHAR_REG esi
+# define CHAR_SIZE 4
+# define VPBROADCAST vpbroadcastd
+# define VPCMP vpcmpd
+# define VPMINU vpminud
+# define VPTESTN vptestnmd
+# else
+# define CHAR_REG sil
+# define CHAR_SIZE 1
+# define VPBROADCAST vpbroadcastb
+# define VPCMP vpcmpb
+# define VPMINU vpminub
+# define VPTESTN vptestnmb
+# endif
+
+# define PAGE_SIZE 4096
+# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE)
+# define XMM1 xmm17
+
+# if VEC_SIZE == 64
+# define KMOV kmovq
+# define KORTEST kortestq
+# define RAX rax
+# define RCX rcx
+# define RDX rdx
+# define SHR shrq
+# define TEXTSUFFIX evex512
+# define VMM0 zmm16
+# define VMM1 zmm17
+# define VMM2 zmm18
+# define VMM3 zmm19
+# define VMM4 zmm20
+# define VMM5 zmm21
+# define VMOVA vmovdqa64
+# define VMOVU vmovdqu64
+
+# elif VEC_SIZE == 32
+/* Currently Unused. */
+# define KMOV kmovd
+# define KORTEST kortestd
+# define RAX eax
+# define RCX ecx
+# define RDX edx
+# define SHR shrl
+# define TEXTSUFFIX evex256
+# define VMM0 ymm16
+# define VMM1 ymm17
+# define VMM2 ymm18
+# define VMM3 ymm19
+# define VMM4 ymm20
+# define VMM5 ymm21
+# define VMOVA vmovdqa32
+# define VMOVU vmovdqu32
+# endif
+
+ .section .text.TEXTSUFFIX, "ax", @progbits
+/* Aligning entry point to 64 byte, provides better performance for
+ one vector length string. */
+ENTRY_P2ALIGN (STRCHR, 6)
+
+ /* Broadcast CHAR to VMM0. */
+ VPBROADCAST %esi, %VMM0
+ movl %edi, %eax
+ andl $(PAGE_SIZE - 1), %eax
+ cmpl $(PAGE_SIZE - VEC_SIZE), %eax
+ ja L(page_cross)
+
+ /* Compare [w]char for null, mask bit will be set for match. */
+ VMOVU (%rdi), %VMM1
+
+ vpxorq %VMM1, %VMM0, %VMM2
+ VPMINU %VMM2, %VMM1, %VMM2
+ VPTESTN %VMM2, %VMM2, %k0
+
+ KMOV %k0, %RAX
+# ifndef USE_AS_STRCHRNUL
+ test %RAX, %RAX
+ jz L(align_more)
+ bsf %RAX, %RAX
+# else
+ /* For strchnul, using bsf, if string is less than 64 byte,
+ entire logic will fit in 64 byte cache line and offset
+ the perf gap as compared to evex version. Even though
+ using bsf as condition can save code size but it is not
+ preferred for conditional jump for 2 reason. 1) It's
+ latency is 3. 2) Unlike test, it can't be micro-fused
+ with jump. */
+ bsf %RAX, %RAX
+ jz L(align_more)
+# endif
+
+# ifdef USE_AS_WCSCHR
+ leaq (%rdi, %rax, CHAR_SIZE), %rax
+# else
+ add %rdi, %rax
+# endif
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero)
+# endif
+ ret
+
+# ifndef USE_AS_STRCHRNUL
+L(zero):
+ xorl %eax, %eax
+ ret
+# endif
+
+L(ret_vec_x2):
+ subq $-VEC_SIZE, %rax
+L(ret_vec_x1):
+ bsf %RCX, %RCX
+# ifdef USE_AS_WCSCHR
+ leaq (%rax, %rcx, CHAR_SIZE), %rax
+# else
+ add %rcx, %rax
+# endif
+
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero)
+# endif
+ ret
+
+L(align_more):
+ leaq VEC_SIZE(%rdi), %rax
+ /* Align rax to VEC_SIZE. */
+ andq $-VEC_SIZE, %rax
+
+ /* Loop unroll 4 times for 4 vector loop. */
+ VMOVA (%rax), %VMM1
+ vpxorq %VMM1, %VMM0, %VMM2
+ VPMINU %VMM2, %VMM1, %VMM2
+ VPTESTN %VMM2, %VMM2, %k0
+
+ KMOV %k0, %RCX
+ test %RCX, %RCX
+ jnz L(ret_vec_x1)
+
+ VMOVA VEC_SIZE(%rax), %VMM1
+ vpxorq %VMM1, %VMM0, %VMM2
+ VPMINU %VMM2, %VMM1, %VMM2
+ VPTESTN %VMM2, %VMM2, %k0
+
+ KMOV %k0, %RCX
+ test %RCX, %RCX
+ jnz L(ret_vec_x2)
+
+ VMOVA (VEC_SIZE * 2)(%rax), %VMM1
+ vpxorq %VMM1, %VMM0, %VMM2
+ VPMINU %VMM2, %VMM1, %VMM2
+ VPTESTN %VMM2, %VMM2, %k0
+ KMOV %k0, %RCX
+ test %RCX, %RCX
+ jnz L(ret_vec_x3)
+
+ VMOVA (VEC_SIZE * 3)(%rax), %VMM1
+ vpxorq %VMM1, %VMM0, %VMM2
+ VPMINU %VMM2, %VMM1, %VMM2
+ VPTESTN %VMM2, %VMM2, %k0
+ KMOV %k0, %RCX
+ test %RCX, %RCX
+ jnz L(ret_vec_x4)
+
+ /* Align address to VEC_SIZE * 4 for loop. */
+ andq $-(VEC_SIZE * 4), %rax
+
+ .p2align 4,,11
+L(loop):
+ /* VPMINU and VPCMP combination provide better performance as
+ compared to alternative combinations. */
+ VMOVA (VEC_SIZE * 4)(%rax), %VMM1
+ VMOVA (VEC_SIZE * 5)(%rax), %VMM2
+ VMOVA (VEC_SIZE * 6)(%rax), %VMM3
+ VMOVA (VEC_SIZE * 7)(%rax), %VMM4
+
+ vpxorq %VMM1, %VMM0, %VMM5
+ VPMINU %VMM5, %VMM1, %VMM1
+
+ VPCMP $4, %VMM0, %VMM2, %k1
+ VPMINU %VMM1, %VMM2, %VMM2{%k1}{z}
+
+ VPCMP $4, %VMM0, %VMM3, %k2
+ VPMINU %VMM2, %VMM3, %VMM3{%k2}{z}
+
+ VPCMP $4, %VMM0, %VMM4, %k3
+ VPMINU %VMM3, %VMM4, %VMM4{%k3}{z}
+
+ VPTESTN %VMM4, %VMM4, %k3
+
+ subq $-(VEC_SIZE * 4), %rax
+ KORTEST %k3, %k3
+ jz L(loop)
+
+ VPTESTN %VMM1, %VMM1, %k0
+ KMOV %k0, %RCX
+ test %RCX, %RCX
+ jnz L(ret_vec_x1)
+
+ VPTESTN %VMM2, %VMM2, %k0
+ KMOV %k0, %RCX
+ /* At this point, if k1 is non zero, null char must be in the
+ second vector. */
+ test %RCX, %RCX
+ jnz L(ret_vec_x2)
+
+ VPTESTN %VMM3, %VMM3, %k0
+ KMOV %k0, %RCX
+ test %RCX, %RCX
+ jnz L(ret_vec_x3)
+ /* At this point null [w]char must be in the fourth vector so no
+ need to check. */
+ KMOV %k3, %RCX
+
+L(ret_vec_x4):
+ bsf %RCX, %RCX
+ leaq (VEC_SIZE * 3)(%rax, %rcx, CHAR_SIZE), %rax
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero)
+# endif
+ ret
+
+L(ret_vec_x3):
+ bsf %RCX, %RCX
+ leaq (VEC_SIZE * 2)(%rax, %rcx, CHAR_SIZE), %rax
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero)
+# endif
+ ret
+
+L(page_cross):
+ movl %eax, %ecx
+# ifdef USE_AS_WCSCHR
+ /* Calculate number of compare result bits to be skipped for
+ wide string alignment adjustment. */
+ andl $(VEC_SIZE - 1), %ecx
+ sarl $2, %ecx
+# endif
+ /* ecx contains number of w[char] to be skipped as a result
+ of address alignment. */
+ xorq %rdi, %rax
+ VMOVA (PAGE_SIZE - VEC_SIZE)(%rax), %VMM1
+ vpxorq %VMM1, %VMM0, %VMM2
+ VPMINU %VMM2, %VMM1, %VMM2
+ VPTESTN %VMM2, %VMM2, %k0
+ KMOV %k0, %RAX
+ /* Ignore number of character for alignment adjustment. */
+ SHR %cl, %RAX
+ jz L(align_more)
+
+ bsf %RAX, %RAX
+# ifdef USE_AS_WCSCHR
+ leaq (%rdi, %rax, CHAR_SIZE), %rax
+# else
+ addq %rdi, %rax
+# endif
+
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero)
+# endif
+ ret
+
+END (STRCHR)
+#endif
diff --git a/sysdeps/x86_64/multiarch/strchr-evex512.S b/sysdeps/x86_64/multiarch/strchr-evex512.S
new file mode 100644
index 0000000000..4079bf387d
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchr-evex512.S
@@ -0,0 +1,7 @@
+# ifndef STRCHR
+# define STRCHR __strchr_evex512
+# endif
+
+#define VEC_SIZE 64
+
+#include "strchr-evex-base.S"
diff --git a/sysdeps/x86_64/multiarch/strchrnul-evex512.S b/sysdeps/x86_64/multiarch/strchrnul-evex512.S
new file mode 100644
index 0000000000..1be0b12f38
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchrnul-evex512.S
@@ -0,0 +1,8 @@
+#ifndef STRCHRNUL
+# define STRCHRNUL __strchrnul_evex512
+#endif
+
+#define STRCHR STRCHRNUL
+#define USE_AS_STRCHRNUL 1
+
+#include "strchr-evex512.S"
diff --git a/sysdeps/x86_64/multiarch/wcschr-evex512.S b/sysdeps/x86_64/multiarch/wcschr-evex512.S
new file mode 100644
index 0000000000..50c87ab1e5
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wcschr-evex512.S
@@ -0,0 +1,8 @@
+#ifndef WCSCHR
+# define WCSCHR __wcschr_evex512
+#endif
+
+#define STRCHR WCSCHR
+#define USE_AS_WCSCHR 1
+
+#include "strchr-evex512.S"
--
2.36.1
next reply other threads:[~2022-09-22 0:16 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-22 0:16 Sunil K Pandey [this message]
2022-09-22 0:50 ` Noah Goldstein
2022-09-23 3:57 ` Sunil Pandey
2022-09-29 3:41 ` Sunil Pandey
2022-09-29 4:07 ` Noah Goldstein
2022-10-21 21:23 ` [PATCH v2] " Sunil K Pandey
2022-10-25 23:35 ` [PATCH v3] " Sunil K Pandey
2022-10-26 1:35 ` Noah Goldstein
2022-10-26 2:06 ` [PATCH v4] " Sunil K Pandey
2022-10-26 4:11 ` Noah Goldstein
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220922001652.4039546-1-skpgkp2@gmail.com \
--to=skpgkp2@gmail.com \
--cc=hjl.tools@gmail.com \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).