From: Sunil K Pandey <skpgkp2@gmail.com>
To: libc-alpha@sourceware.org
Subject: [PATCH v2] x86_64: Implement evex512 version of strchrnul, strchr and wcschr
Date: Fri, 21 Oct 2022 14:23:37 -0700 [thread overview]
Message-ID: <20221021212337.3110291-1-skpgkp2@gmail.com> (raw)
In-Reply-To: <CAFUsyfLuPYE_ZAOdj0tnq089Xx49BM9HY4GNs=vmG0UkqMYQRA@mail.gmail.com>
Changes from v1:
- Use VEC API.
- Replace vec load with vec load+op where possible.
- Replace extra lea in align_more with add.
- Restructure loop logic.
- Create zero_2 to avaoid long jmp.
- Combine first, second and third vector return logic.
This patch implements following evex512 version of string functions.
evex512 version takes up to 30% less cycle as compared to evex,
depending on length and alignment.
- strchrnul function using 512 bit vectors.
- strchr function using 512 bit vectors.
- wcschr function using 512 bit vectors.
Code size data:
strchrnul-evex.o 599 byte
strchrnul-evex512.o 547 byte (-9%)
strchr-evex.o 639 byte
strchr-evex512.o 577 byte (-10%)
wcschr-evex.o 644 byte
wcschr-evex512.o 572 byte (-11%)
Placeholder function, not used by any processor at the moment.
---
sysdeps/x86_64/multiarch/Makefile | 3 +
sysdeps/x86_64/multiarch/ifunc-impl-list.c | 12 +
sysdeps/x86_64/multiarch/strchr-evex-base.S | 270 +++++++++++++++++++
sysdeps/x86_64/multiarch/strchr-evex512.S | 8 +
sysdeps/x86_64/multiarch/strchrnul-evex512.S | 8 +
sysdeps/x86_64/multiarch/wcschr-evex512.S | 9 +
6 files changed, 310 insertions(+)
create mode 100644 sysdeps/x86_64/multiarch/strchr-evex-base.S
create mode 100644 sysdeps/x86_64/multiarch/strchr-evex512.S
create mode 100644 sysdeps/x86_64/multiarch/strchrnul-evex512.S
create mode 100644 sysdeps/x86_64/multiarch/wcschr-evex512.S
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index e974b1ad97..597ac9d5e9 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -62,11 +62,13 @@ sysdep_routines += \
strchr-avx2 \
strchr-avx2-rtm \
strchr-evex \
+ strchr-evex512 \
strchr-sse2 \
strchr-sse2-no-bsf \
strchrnul-avx2 \
strchrnul-avx2-rtm \
strchrnul-evex \
+ strchrnul-evex512 \
strchrnul-sse2 \
strcmp-avx2 \
strcmp-avx2-rtm \
@@ -131,6 +133,7 @@ sysdep_routines += \
wcschr-avx2 \
wcschr-avx2-rtm \
wcschr-evex \
+ wcschr-evex512 \
wcschr-sse2 \
wcscmp-avx2 \
wcscmp-avx2-rtm \
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index 529c0b0ef0..c3d75a09f4 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -544,6 +544,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__strchr_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, strchr,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)),
+ __strchr_evex512)
X86_IFUNC_IMPL_ADD_V3 (array, i, strchr,
(CPU_FEATURE_USABLE (AVX2)
&& CPU_FEATURE_USABLE (BMI2)),
@@ -569,6 +573,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__strchrnul_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, strchrnul,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)),
+ __strchrnul_evex512)
X86_IFUNC_IMPL_ADD_V3 (array, i, strchrnul,
(CPU_FEATURE_USABLE (AVX2)
&& CPU_FEATURE_USABLE (BMI2)),
@@ -793,6 +801,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
&& CPU_FEATURE_USABLE (AVX512BW)
&& CPU_FEATURE_USABLE (BMI2)),
__wcschr_evex)
+ X86_IFUNC_IMPL_ADD_V4 (array, i, wcschr,
+ (CPU_FEATURE_USABLE (AVX512VL)
+ && CPU_FEATURE_USABLE (AVX512BW)),
+ __wcschr_evex512)
X86_IFUNC_IMPL_ADD_V3 (array, i, wcschr,
(CPU_FEATURE_USABLE (AVX2)
&& CPU_FEATURE_USABLE (BMI2)),
diff --git a/sysdeps/x86_64/multiarch/strchr-evex-base.S b/sysdeps/x86_64/multiarch/strchr-evex-base.S
new file mode 100644
index 0000000000..eb22171954
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchr-evex-base.S
@@ -0,0 +1,270 @@
+/* Placeholder function, not used by any processor at the moment.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* UNUSED. Exists purely as reference implementation. */
+
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
+# include <sysdep.h>
+
+# ifdef USE_AS_WCSCHR
+# define CHAR_REG esi
+# define CHAR_SIZE 4
+# define VPBROADCAST vpbroadcastd
+# define VPCMP vpcmpd
+# define VPCMPNE vpcmpneqd
+# define VPMINU vpminud
+# define VPTEST vptestmd
+# define VPTESTN vptestnmd
+# else
+# define CHAR_REG sil
+# define CHAR_SIZE 1
+# define VPBROADCAST vpbroadcastb
+# define VPCMP vpcmpb
+# define VPCMPNE vpcmpneqb
+# define VPMINU vpminub
+# define VPTEST vptestmb
+# define VPTESTN vptestnmb
+# endif
+
+# define PAGE_SIZE 4096
+# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE)
+
+ .section SECTION(.text), "ax", @progbits
+/* Aligning entry point to 64 byte, provides better performance for
+ one vector length string. */
+ENTRY_P2ALIGN (STRCHR, 6)
+
+ /* Broadcast CHAR to VMM(0). */
+ VPBROADCAST %esi, %VMM(0)
+ movl %edi, %eax
+ andl $(PAGE_SIZE - 1), %eax
+ cmpl $(PAGE_SIZE - VEC_SIZE), %eax
+ ja L(page_cross)
+
+ /* Compare [w]char for null, mask bit will be set for match. */
+ vpxorq (%rdi), %VMM(0), %VMM(1)
+ VPMINU (%rdi), %VMM(1), %VMM(1)
+ VPTESTN %VMM(1), %VMM(1), %k0
+
+ KMOV %k0, %VRAX
+ bsf %VRAX, %VRAX
+ jz L(align_more)
+
+# ifdef USE_AS_WCSCHR
+ leaq (%rdi, %rax, CHAR_SIZE), %rax
+# else
+ add %rdi, %rax
+# endif
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero)
+ ret
+L(zero):
+ xorl %eax, %eax
+# endif
+ ret
+
+L(ret_vec_x3):
+ subq $-VEC_SIZE, %rdi
+L(ret_vec_x2):
+ subq $-VEC_SIZE, %rdi
+L(ret_vec_x1):
+ bsf %VRAX, %VRAX
+# ifdef USE_AS_WCSCHR
+ leaq (%rdi, %rax, CHAR_SIZE), %rax
+# else
+ add %rdi, %rax
+# endif
+
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero)
+# endif
+ ret
+
+L(page_cross):
+ movl %eax, %ecx
+# ifdef USE_AS_WCSCHR
+ /* Calculate number of compare result bits to be skipped for
+ wide string alignment adjustment. */
+ andl $(VEC_SIZE - 1), %ecx
+ sarl $2, %ecx
+# endif
+ /* ecx contains number of w[char] to be skipped as a result
+ of address alignment. */
+ xorq %rdi, %rax
+ vpxorq (PAGE_SIZE - VEC_SIZE)(%rax), %VMM(0), %VMM(1)
+ VPMINU (PAGE_SIZE - VEC_SIZE)(%rax), %VMM(1), %VMM(1)
+ VPTESTN %VMM(1), %VMM(1), %k0
+ KMOV %k0, %VRAX
+ /* Ignore number of character for alignment adjustment. */
+ shr %cl, %VRAX
+ jz L(align_more)
+
+ bsf %VRAX, %VRAX
+# ifdef USE_AS_WCSCHR
+ leaq (%rdi, %rax, CHAR_SIZE), %rax
+# else
+ addq %rdi, %rax
+# endif
+
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero)
+# endif
+ ret
+
+L(align_more):
+ /* Align rax to VEC_SIZE. */
+ andq $-VEC_SIZE, %rdi
+
+ /* Loop unroll 4 times for 4 vector loop. */
+ vpxorq VEC_SIZE(%rdi), %VMM(0), %VMM(1)
+ VPMINU VEC_SIZE(%rdi), %VMM(1), %VMM(1)
+
+ /* Add VEC_SIZE here, in case of match ret_vec_x1
+ will be called. It will reduce dependency for
+ vector load. */
+ subq $-VEC_SIZE, %rdi
+
+ VPTESTN %VMM(1), %VMM(1), %k0
+
+ KMOV %k0, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x1)
+
+ vpxorq VEC_SIZE(%rdi), %VMM(0), %VMM(1)
+ VPMINU VEC_SIZE(%rdi), %VMM(1), %VMM(1)
+ VPTESTN %VMM(1), %VMM(1), %k0
+
+ KMOV %k0, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x2)
+
+ vpxorq (VEC_SIZE * 2)(%rdi), %VMM(0), %VMM(1)
+ VPMINU (VEC_SIZE * 2)(%rdi), %VMM(1), %VMM(1)
+ VPTESTN %VMM(1), %VMM(1), %k0
+
+ KMOV %k0, %VRAX
+ test %VRAX, %VRAX
+ jnz L(ret_vec_x3)
+
+ vpxorq (VEC_SIZE * 3)(%rdi), %VMM(0), %VMM(1)
+ VPMINU (VEC_SIZE * 3)(%rdi), %VMM(1), %VMM(1)
+ VPTESTN %VMM(1), %VMM(1), %k0
+
+ KMOV %k0, %VRDX
+ test %VRDX, %VRDX
+ jnz L(ret_vec_x4)
+
+ /* Align address to VEC_SIZE * 4 for loop. */
+ andq $-(VEC_SIZE * 4), %rdi
+L(loop):
+ /* VPMINU and VPCMP combination provide better performance as
+ compared to alternative combinations. */
+ VMOVA (VEC_SIZE * 4)(%rdi), %VMM(1)
+ VMOVA (VEC_SIZE * 6)(%rdi), %VMM(3)
+
+ VPCMPNE %VMM(1), %VMM(0), %k1
+ VPCMPNE (VEC_SIZE * 5)(%rdi), %VMM(0), %k2
+
+ VPMINU (VEC_SIZE * 5)(%rdi), %VMM(1), %VMM(2)
+
+ VPCMPNE %VMM(3), %VMM(0), %k3{%k1}
+ VPCMPNE (VEC_SIZE * 7)(%rdi), %VMM(0), %k4{%k2}
+
+ VPMINU (VEC_SIZE * 7)(%rdi), %VMM(3), %VMM(4)
+ VPMINU %VMM(2), %VMM(4), %VMM(4){%k3}{z}
+
+ VPTEST %VMM(4), %VMM(4), %k5{%k4}
+
+ KMOV %k5, %VRDX
+ subq $-(VEC_SIZE * 4), %rdi
+# ifdef USE_AS_WCSCHR
+# if CHAR_PER_VEC == 8
+ sub $0xff, %VRDX
+# else
+ sub $0xffff, %VRDX
+# endif
+# else
+ inc %VRDX
+# endif
+ jz L(loop)
+
+ VPTEST %VMM(1), %VMM(1), %k0{%k1}
+ KMOV %k0, %VRAX
+# ifdef USE_AS_WCSCHR
+# if CHAR_PER_VEC == 8
+ sub $0xff, %VRAX
+# else
+ sub $0xffff, %VRAX
+# endif
+# else
+ inc %VRAX
+# endif
+ jnz L(ret_vec_x1)
+
+ VPTEST %VMM(2), %VMM(2), %k0{%k2}
+ KMOV %k0, %VRAX
+ /* At this point, if k1 is non zero, null char must be in the
+ second vector. */
+# ifdef USE_AS_WCSCHR
+# if CHAR_PER_VEC == 8
+ sub $0xff, %VRAX
+# else
+ sub $0xffff, %VRAX
+# endif
+# else
+ inc %VRAX
+# endif
+ jnz L(ret_vec_x2)
+
+ VPTEST %VMM(3), %VMM(3), %k0{%k3}
+ KMOV %k0, %VRAX
+# ifdef USE_AS_WCSCHR
+# if CHAR_PER_VEC == 8
+ sub $0xff, %VRAX
+# else
+ sub $0xffff, %VRAX
+# endif
+# else
+ inc %VRAX
+# endif
+ jnz L(ret_vec_x3)
+ /* At this point null [w]char must be in the fourth vector so no
+ need to check. */
+
+L(ret_vec_x4):
+ bsf %VRDX, %VRAX
+ leaq (VEC_SIZE * 3)(%rdi, %rax, CHAR_SIZE), %rax
+# ifndef USE_AS_STRCHRNUL
+ cmp (%rax), %CHAR_REG
+ jne L(zero_2)
+# endif
+ ret
+
+# ifndef USE_AS_STRCHRNUL
+L(zero_2):
+ xor %eax, %eax
+ ret
+# endif
+END (STRCHR)
+#endif
diff --git a/sysdeps/x86_64/multiarch/strchr-evex512.S b/sysdeps/x86_64/multiarch/strchr-evex512.S
new file mode 100644
index 0000000000..a4ac022952
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchr-evex512.S
@@ -0,0 +1,8 @@
+# ifndef STRCHR
+# define STRCHR __strchr_evex512
+# endif
+
+#include "x86-evex512-vecs.h"
+#include "reg-macros.h"
+
+#include "strchr-evex-base.S"
diff --git a/sysdeps/x86_64/multiarch/strchrnul-evex512.S b/sysdeps/x86_64/multiarch/strchrnul-evex512.S
new file mode 100644
index 0000000000..1be0b12f38
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/strchrnul-evex512.S
@@ -0,0 +1,8 @@
+#ifndef STRCHRNUL
+# define STRCHRNUL __strchrnul_evex512
+#endif
+
+#define STRCHR STRCHRNUL
+#define USE_AS_STRCHRNUL 1
+
+#include "strchr-evex512.S"
diff --git a/sysdeps/x86_64/multiarch/wcschr-evex512.S b/sysdeps/x86_64/multiarch/wcschr-evex512.S
new file mode 100644
index 0000000000..3fe4e77a70
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wcschr-evex512.S
@@ -0,0 +1,9 @@
+#ifndef WCSCHR
+# define WCSCHR __wcschr_evex512
+#endif
+
+#define STRCHR WCSCHR
+#define USE_AS_WCSCHR 1
+
+#define USE_WIDE_CHAR 1
+#include "strchr-evex512.S"
--
2.36.1
next prev parent reply other threads:[~2022-10-21 21:24 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-22 0:16 [PATCH] " Sunil K Pandey
2022-09-22 0:50 ` Noah Goldstein
2022-09-23 3:57 ` Sunil Pandey
2022-09-29 3:41 ` Sunil Pandey
2022-09-29 4:07 ` Noah Goldstein
2022-10-21 21:23 ` Sunil K Pandey [this message]
2022-10-25 23:35 ` [PATCH v3] " Sunil K Pandey
2022-10-26 1:35 ` Noah Goldstein
2022-10-26 2:06 ` [PATCH v4] " Sunil K Pandey
2022-10-26 4:11 ` Noah Goldstein
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221021212337.3110291-1-skpgkp2@gmail.com \
--to=skpgkp2@gmail.com \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).