* [PATCH] x86-64: Add memchr/rawmemchr/wmemchr optimized with SSE2/AVX2
@ 2017-06-01 15:44 H.J. Lu
2017-06-02 19:47 ` H.J. Lu
0 siblings, 1 reply; 3+ messages in thread
From: H.J. Lu @ 2017-06-01 15:44 UTC (permalink / raw)
To: GNU C Library
SSE2 memchr is extended to support wmemchr. AVX2 memchr/rawmemchr/wmemchr
are added to search 32 bytes with a single vector compare instruction.
AVX2 memchr/rawmemchr/wmemchr are as fast as SSE2 memchr/rawmemchr/wmemchr
for small sizes and up to 1.5X faster for larger sizes on Haswell and
Skylake. Select AVX2 memchr/rawmemchr/wmemchr on AVX2 machines where
vzeroupper is preferred and AVX unaligned load is fast.
Any comments?
H.J.
---
* sysdeps/x86_64/memchr.S (MEMCHR): New. Depending on if
USE_AS_WMEMCHR is defined.
(PCMPEQ): Likewise.
(memchr): Renamed to ...
(MEMCHR): This. Support wmemchr if USE_AS_WMEMCHR is defined.
Replace pcmpeqb with PCMPEQ.
* sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Add
memchr-avx2, rawmemchr-avx2 and wmemchr-avx2.
* sysdeps/x86_64/multiarch/ifunc-impl-list.c
(__libc_ifunc_impl_list): Test __memchr_avx2, __memchr_sse2,
__rawmemchr_avx2, __rawmemchr_sse2, __wmemchr_avx2 and
__wmemchr_sse2.
* sysdeps/x86_64/multiarch/memchr-avx2.S: New file.
* sysdeps/x86_64/multiarch/memchr.S: Likewise.
* sysdeps/x86_64/multiarch/rawmemchr-avx2.S: Likewise.
* sysdeps/x86_64/multiarch/rawmemchr.S: Likewise.
* sysdeps/x86_64/multiarch/wmemchr-avx2.S: Likewise.
* sysdeps/x86_64/multiarch/wmemchr.S: Likewise.
* sysdeps/x86_64/wmemchr.S: Likewise.
---
sysdeps/x86_64/memchr.S | 69 +++---
sysdeps/x86_64/multiarch/Makefile | 2 +
sysdeps/x86_64/multiarch/ifunc-impl-list.c | 21 ++
sysdeps/x86_64/multiarch/memchr-avx2.S | 340 +++++++++++++++++++++++++++++
sysdeps/x86_64/multiarch/memchr.S | 64 ++++++
sysdeps/x86_64/multiarch/rawmemchr-avx2.S | 4 +
sysdeps/x86_64/multiarch/rawmemchr.S | 64 ++++++
sysdeps/x86_64/multiarch/wmemchr-avx2.S | 4 +
sysdeps/x86_64/multiarch/wmemchr.S | 67 ++++++
sysdeps/x86_64/wmemchr.S | 3 +
10 files changed, 613 insertions(+), 25 deletions(-)
create mode 100644 sysdeps/x86_64/multiarch/memchr-avx2.S
create mode 100644 sysdeps/x86_64/multiarch/memchr.S
create mode 100644 sysdeps/x86_64/multiarch/rawmemchr-avx2.S
create mode 100644 sysdeps/x86_64/multiarch/rawmemchr.S
create mode 100644 sysdeps/x86_64/multiarch/wmemchr-avx2.S
create mode 100644 sysdeps/x86_64/multiarch/wmemchr.S
create mode 100644 sysdeps/x86_64/wmemchr.S
diff --git a/sysdeps/x86_64/memchr.S b/sysdeps/x86_64/memchr.S
index d3be012..3167cd8 100644
--- a/sysdeps/x86_64/memchr.S
+++ b/sysdeps/x86_64/memchr.S
@@ -18,17 +18,31 @@
#include <sysdep.h>
+#ifdef USE_AS_WMEMCHR
+# define MEMCHR wmemchr
+# define PCMPEQ pcmpeqd
+#else
+# define MEMCHR memchr
+# define PCMPEQ pcmpeqb
+#endif
+
/* fast SSE2 version with using pmaxub and 64 byte loop */
.text
-ENTRY(memchr)
+ENTRY(MEMCHR)
movd %esi, %xmm1
mov %edi, %ecx
+#ifdef USE_AS_WMEMCHR
+ test %rdx, %rdx
+ jz L(return_null)
+ shl $2, %rdx
+#else
punpcklbw %xmm1, %xmm1
test %rdx, %rdx
jz L(return_null)
punpcklbw %xmm1, %xmm1
+#endif
and $63, %ecx
pshufd $0, %xmm1, %xmm1
@@ -37,7 +51,7 @@ ENTRY(memchr)
ja L(crosscache)
movdqu (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
@@ -58,7 +72,7 @@ L(crosscache):
and $-16, %rdi
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
/* Check if there is a match. */
pmovmskb %xmm0, %eax
/* Remove the leading bytes. */
@@ -90,25 +104,25 @@ L(unaligned_no_match):
.p2align 4
L(loop_prolog):
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
+ PCMPEQ %xmm1, %xmm2
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
movdqa 48(%rdi), %xmm4
- pcmpeqb %xmm1, %xmm4
+ PCMPEQ %xmm1, %xmm4
add $64, %rdi
pmovmskb %xmm4, %eax
test %eax, %eax
@@ -121,25 +135,25 @@ L(loop_prolog):
jbe L(exit_loop)
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
+ PCMPEQ %xmm1, %xmm2
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
movdqa 48(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
pmovmskb %xmm3, %eax
add $64, %rdi
@@ -160,10 +174,10 @@ L(align64_loop):
movdqa 32(%rdi), %xmm3
movdqa 48(%rdi), %xmm4
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm1, %xmm2
- pcmpeqb %xmm1, %xmm3
- pcmpeqb %xmm1, %xmm4
+ PCMPEQ %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm2
+ PCMPEQ %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm4
pmaxub %xmm0, %xmm3
pmaxub %xmm2, %xmm4
@@ -186,9 +200,9 @@ L(align64_loop):
jnz L(matches16)
movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
- pcmpeqb 48(%rdi), %xmm1
+ PCMPEQ 48(%rdi), %xmm1
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
@@ -204,26 +218,26 @@ L(exit_loop):
jle L(exit_loop_32)
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
+ PCMPEQ %xmm1, %xmm2
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32_1)
sub $16, %edx
jle L(return_null)
- pcmpeqb 48(%rdi), %xmm1
+ PCMPEQ 48(%rdi), %xmm1
pmovmskb %xmm1, %eax
test %eax, %eax
jnz L(matches48_1)
@@ -234,14 +248,14 @@ L(exit_loop):
L(exit_loop_32):
add $32, %edx
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches_1)
sub $16, %edx
jbe L(return_null)
- pcmpeqb 16(%rdi), %xmm1
+ PCMPEQ 16(%rdi), %xmm1
pmovmskb %xmm1, %eax
test %eax, %eax
jnz L(matches16_1)
@@ -308,8 +322,13 @@ L(matches48_1):
L(return_null):
xor %eax, %eax
ret
-END(memchr)
+END(MEMCHR)
+#ifdef USE_AS_WMEMCHR
+libc_hidden_def (__wmemchr)
+weak_alias (__wmemchr, wmemchr)
+libc_hidden_weak (wmemchr)
+#else
strong_alias (memchr, __memchr)
-
libc_hidden_builtin_def(memchr)
+#endif
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index a62def3..48aba0f 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -6,6 +6,7 @@ ifeq ($(subdir),string)
sysdep_routines += strncat-c stpncpy-c strncpy-c strcmp-ssse3 \
strcmp-sse2-unaligned strncmp-ssse3 \
+ memchr-avx2 rawmemchr-avx2 \
memcmp-avx2 \
memcmp-sse4 memcpy-ssse3 \
memmove-ssse3 \
@@ -32,6 +33,7 @@ endif
ifeq ($(subdir),wcsmbs)
sysdep_routines += wmemcmp-sse4 wmemcmp-ssse3 wmemcmp-c \
+ wmemchr-avx2 \
wmemcmp-avx2 \
wcscpy-ssse3 wcscpy-c
endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index 35f1960..ae09241 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -38,6 +38,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
size_t i = 0;
+ /* Support sysdeps/x86_64/multiarch/memchr.S. */
+ IFUNC_IMPL (i, name, memchr,
+ IFUNC_IMPL_ADD (array, i, memchr,
+ HAS_ARCH_FEATURE (AVX2_Usable),
+ __memchr_avx2)
+ IFUNC_IMPL_ADD (array, i, memchr, 1, __memchr_sse2))
+
/* Support sysdeps/x86_64/multiarch/memcmp.S. */
IFUNC_IMPL (i, name, memcmp,
IFUNC_IMPL_ADD (array, i, memcmp,
@@ -151,6 +158,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__memset_avx512_no_vzeroupper)
)
+ /* Support sysdeps/x86_64/multiarch/rawmemchr.S. */
+ IFUNC_IMPL (i, name, rawmemchr,
+ IFUNC_IMPL_ADD (array, i, rawmemchr,
+ HAS_ARCH_FEATURE (AVX2_Usable),
+ __rawmemchr_avx2)
+ IFUNC_IMPL_ADD (array, i, rawmemchr, 1, __rawmemchr_sse2))
+
/* Support sysdeps/x86_64/multiarch/stpncpy.S. */
IFUNC_IMPL (i, name, stpncpy,
IFUNC_IMPL_ADD (array, i, stpncpy, HAS_CPU_FEATURE (SSSE3),
@@ -295,6 +309,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__wcscpy_ssse3)
IFUNC_IMPL_ADD (array, i, wcscpy, 1, __wcscpy_sse2))
+ /* Support sysdeps/x86_64/multiarch/wmemchr.S. */
+ IFUNC_IMPL (i, name, wmemchr,
+ IFUNC_IMPL_ADD (array, i, wmemchr,
+ HAS_ARCH_FEATURE (AVX2_Usable),
+ __wmemchr_avx2)
+ IFUNC_IMPL_ADD (array, i, wmemchr, 1, __wmemchr_sse2))
+
/* Support sysdeps/x86_64/multiarch/wmemcmp.S. */
IFUNC_IMPL (i, name, wmemcmp,
IFUNC_IMPL_ADD (array, i, wmemcmp,
diff --git a/sysdeps/x86_64/multiarch/memchr-avx2.S b/sysdeps/x86_64/multiarch/memchr-avx2.S
new file mode 100644
index 0000000..74cb798
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memchr-avx2.S
@@ -0,0 +1,340 @@
+/* memchr/wmemchr optimized with AVX2.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if IS_IN (libc)
+
+# include <sysdep.h>
+
+# ifndef MEMCHR
+# define MEMCHR __memchr_avx2
+# endif
+
+# ifdef USE_AS_WMEMCHR
+# define VPCMPEQ vpcmpeqd
+# else
+# define VPCMPEQ vpcmpeqb
+# endif
+
+# ifndef VZEROUPPER
+# define VZEROUPPER vzeroupper
+# endif
+
+# define VEC_SIZE 32
+
+ .section .text.avx,"ax",@progbits
+ENTRY (MEMCHR)
+# ifndef USE_AS_RAWMEMCHR
+ /* Check for zero length. */
+ testq %rdx, %rdx
+ jz L(null)
+# endif
+ movl %edi, %ecx
+ /* Broadcast CHAR to YMM0. */
+ vmovd %esi, %xmm0
+# ifdef USE_AS_WMEMCHR
+ shl $2, %rdx
+ vpbroadcastd %xmm0, %ymm0
+# else
+ vpbroadcastb %xmm0, %ymm0
+# endif
+ /* Check if we may cross page boundary with one vector load. */
+ andl $(2 * VEC_SIZE - 1), %ecx
+ cmpl $VEC_SIZE, %ecx
+ ja L(cros_page_boundary)
+
+ /* Check the first VEC_SIZE bytes. */
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+
+# ifndef USE_AS_RAWMEMCHR
+ jnz L(first_vec_x0_check)
+ /* Adjust length and check the end of data. */
+ subq $VEC_SIZE, %rdx
+ jbe L(zero)
+# else
+ jnz L(first_vec_x0)
+# endif
+
+ /* Align data for aligned loads in the loop. */
+ addq $VEC_SIZE, %rdi
+ andl $(VEC_SIZE - 1), %ecx
+ andq $-VEC_SIZE, %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ /* Adjust length. */
+ addq %rcx, %rdx
+
+ subq $(VEC_SIZE * 4), %rdx
+ jbe L(last_4x_vec_or_less)
+# endif
+ jmp L(more_4x_vec)
+
+ .p2align 4
+L(cros_page_boundary):
+ andl $(VEC_SIZE - 1), %ecx
+ andq $-VEC_SIZE, %rdi
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ /* Remove the leading bytes. */
+ sarl %cl, %eax
+ testl %eax, %eax
+ jz L(aligned_more)
+ bsfl %eax, %eax
+# ifndef USE_AS_RAWMEMCHR
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+# endif
+ addq %rdi, %rax
+ addq %rcx, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(aligned_more):
+# ifndef USE_AS_RAWMEMCHR
+ /* Calculate "rdx + rcx - VEC_SIZE" with "rdx - (VEC_SIZE - rcx)"
+ instead of "(rdx + rcx) - VEC_SIZE" to void possible addition
+ overflow. */
+ negq %rcx
+ addq $VEC_SIZE, %rcx
+
+ /* Check the end of data. */
+ subq %rcx, %rdx
+ jbe L(zero)
+# endif
+
+ addq $VEC_SIZE, %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $(VEC_SIZE * 4), %rdx
+ jbe L(last_4x_vec_or_less)
+# endif
+
+L(more_4x_vec):
+ /* Check the first 4 * VEC_SIZE. Only one VEC_SIZE at a time
+ since data is only aligned to VEC_SIZE. */
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x0)
+
+ VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x1)
+
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x2)
+
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x3)
+
+ addq $(VEC_SIZE * 4), %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $(VEC_SIZE * 4), %rdx
+ jbe L(last_4x_vec_or_less)
+# endif
+
+ /* Align data to 4 * VEC_SIZE. */
+ movq %rdi, %rcx
+ andl $(4 * VEC_SIZE - 1), %ecx
+ andq $-(4 * VEC_SIZE), %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ /* Adjust length. */
+ addq %rcx, %rdx
+# endif
+
+ .p2align 4
+L(loop_4x_vec):
+ /* Compare 4 * VEC at a time forward. */
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm2
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm3
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm4
+
+ vpor %ymm1, %ymm2, %ymm5
+ vpor %ymm3, %ymm4, %ymm6
+ vpor %ymm5, %ymm6, %ymm5
+
+ vpmovmskb %ymm5, %eax
+ testl %eax, %eax
+ jnz L(4x_vec_end)
+
+ addq $(VEC_SIZE * 4), %rdi
+
+# ifdef USE_AS_RAWMEMCHR
+ jmp L(loop_4x_vec)
+# else
+ subq $(VEC_SIZE * 4), %rdx
+ ja L(loop_4x_vec)
+
+L(last_4x_vec_or_less):
+ /* Less than 4 * VEC and aligned to VEC_SIZE. */
+ addl $(VEC_SIZE * 2), %edx
+ jle L(last_2x_vec)
+
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x0)
+
+ VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x1)
+
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+
+ jnz L(first_vec_x2_check)
+ subl $VEC_SIZE, %edx
+ jle L(zero)
+
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+
+ jnz L(first_vec_x3_check)
+ xorl %eax, %eax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(last_2x_vec):
+ addl $(VEC_SIZE * 2), %edx
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+
+ jnz L(first_vec_x0_check)
+ subl $VEC_SIZE, %edx
+ jle L(zero)
+
+ VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x1_check)
+ xorl %eax, %eax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x0_check):
+ bsfl %eax, %eax
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x1_check):
+ bsfl %eax, %eax
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+ addq $VEC_SIZE, %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x2_check):
+ bsfl %eax, %eax
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+ addq $(VEC_SIZE * 2), %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x3_check):
+ bsfl %eax, %eax
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+ addq $(VEC_SIZE * 3), %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(zero):
+ VZEROUPPER
+L(null):
+ xorl %eax, %eax
+ ret
+# endif
+
+ .p2align 4
+L(first_vec_x0):
+ bsfl %eax, %eax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x1):
+ bsfl %eax, %eax
+ addq $VEC_SIZE, %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x2):
+ bsfl %eax, %eax
+ addq $(VEC_SIZE * 2), %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(4x_vec_end):
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x0)
+ vpmovmskb %ymm2, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x1)
+ vpmovmskb %ymm3, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x2)
+ vpmovmskb %ymm4, %eax
+ testl %eax, %eax
+L(first_vec_x3):
+ bsfl %eax, %eax
+ addq $(VEC_SIZE * 3), %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+END (MEMCHR)
+#endif
diff --git a/sysdeps/x86_64/multiarch/memchr.S b/sysdeps/x86_64/multiarch/memchr.S
new file mode 100644
index 0000000..dee3fd1
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memchr.S
@@ -0,0 +1,64 @@
+/* Multiple versions of memchr
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+ .text
+ENTRY(memchr)
+ .type memchr, @gnu_indirect_function
+ LOAD_RTLD_GLOBAL_RO_RDX
+ HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
+ jnz 1f
+ HAS_ARCH_FEATURE (AVX2_Usable)
+ jz 1f
+ HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
+ jz 1f
+ leaq __memchr_avx2(%rip), %rax
+ ret
+
+1: leaq __memchr_sse2(%rip), %rax
+ ret
+END(memchr)
+
+# undef ENTRY
+# define ENTRY(name) \
+ .type __memchr_sse2, @function; \
+ .p2align 4; \
+ .globl __memchr_sse2; \
+ .hidden __memchr_sse2; \
+ __memchr_sse2: cfi_startproc; \
+ CALL_MCOUNT
+# undef END
+# define END(name) \
+ cfi_endproc; .size __memchr_sse2, .-__memchr_sse2
+
+# ifdef SHARED
+# undef libc_hidden_builtin_def
+/* It doesn't make sense to send libc-internal memchr calls through a PLT.
+ The speedup we get from using AVX2 instructions is likely eaten away
+ by the indirect call in the PLT. */
+# define libc_hidden_builtin_def(name) \
+ .globl __GI_memchr; __GI_memchr = __memchr_sse2
+# endif
+#endif
+
+#include "../memchr.S"
diff --git a/sysdeps/x86_64/multiarch/rawmemchr-avx2.S b/sysdeps/x86_64/multiarch/rawmemchr-avx2.S
new file mode 100644
index 0000000..128f9ea
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/rawmemchr-avx2.S
@@ -0,0 +1,4 @@
+#define MEMCHR __rawmemchr_avx2
+#define USE_AS_RAWMEMCHR 1
+
+#include "memchr-avx2.S"
diff --git a/sysdeps/x86_64/multiarch/rawmemchr.S b/sysdeps/x86_64/multiarch/rawmemchr.S
new file mode 100644
index 0000000..b938f13
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/rawmemchr.S
@@ -0,0 +1,64 @@
+/* Multiple versions of rawmemchr
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+ .text
+ENTRY(__rawmemchr)
+ .type __rawmemchr, @gnu_indirect_function
+ LOAD_RTLD_GLOBAL_RO_RDX
+ HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
+ jnz 1f
+ HAS_ARCH_FEATURE (AVX2_Usable)
+ jz 1f
+ HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
+ jz 1f
+ leaq __rawmemchr_avx2(%rip), %rax
+ ret
+
+1: leaq __rawmemchr_sse2(%rip), %rax
+ ret
+END(__rawmemchr)
+
+# undef ENTRY
+# define ENTRY(name) \
+ .type __rawmemchr_sse2, @function; \
+ .p2align 4; \
+ .globl __rawmemchr_sse2; \
+ .hidden __rawmemchr_sse2; \
+ __rawmemchr_sse2: cfi_startproc; \
+ CALL_MCOUNT
+# undef END
+# define END(name) \
+ cfi_endproc; .size __rawmemchr_sse2, .-__rawmemchr_sse2
+
+# ifdef SHARED
+/* It doesn't make sense to send libc-internal rawmemchr calls through a
+ PLT. The speedup we get from using AVX2 instructions is likely eaten
+ away by the indirect call in the PLT. */
+# undef libc_hidden_def
+# define libc_hidden_def(name) \
+ .globl __GI___rawmemchr; __GI___rawmemchr = __rawmemchr_sse2
+# endif
+#endif
+
+#include "../rawmemchr.S"
diff --git a/sysdeps/x86_64/multiarch/wmemchr-avx2.S b/sysdeps/x86_64/multiarch/wmemchr-avx2.S
new file mode 100644
index 0000000..282854f
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wmemchr-avx2.S
@@ -0,0 +1,4 @@
+#define MEMCHR __wmemchr_avx2
+#define USE_AS_WMEMCHR 1
+
+#include "memchr-avx2.S"
diff --git a/sysdeps/x86_64/multiarch/wmemchr.S b/sysdeps/x86_64/multiarch/wmemchr.S
new file mode 100644
index 0000000..b79df23
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wmemchr.S
@@ -0,0 +1,67 @@
+/* Multiple versions of wmemchr
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <init-arch.h>
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+ .text
+ENTRY(__wmemchr)
+ .type __wmemchr, @gnu_indirect_function
+ LOAD_RTLD_GLOBAL_RO_RDX
+ HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
+ jnz 1f
+ HAS_ARCH_FEATURE (AVX2_Usable)
+ jz 1f
+ HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
+ jz 1f
+ leaq __wmemchr_avx2(%rip), %rax
+ ret
+
+1: leaq __wmemchr_sse2(%rip), %rax
+ ret
+END(__wmemchr)
+
+# undef ENTRY
+# define ENTRY(name) \
+ .type __wmemchr_sse2, @function; \
+ .p2align 4; \
+ .globl __wmemchr_sse2; \
+ .hidden __wmemchr_sse2; \
+ __wmemchr_sse2: cfi_startproc; \
+ CALL_MCOUNT
+# undef END
+# define END(name) \
+ cfi_endproc; .size __wmemchr_sse2, .-__wmemchr_sse2
+
+# ifdef SHARED
+/* It doesn't make sense to send libc-internal wmemchr calls through a PLT.
+ The speedup we get from using AVX2 instructions is likely eaten away
+ by the indirect call in the PLT. */
+# undef libc_hidden_def
+# define libc_hidden_def(name) \
+ .globl __GI___wmemchr; __GI___wmemchr = __wmemchr_sse2
+# undef libc_hidden_weak
+# define libc_hidden_weak(name) \
+ .weak __GI_wmemchr; __GI_wmemchr = __wmemchr_sse2
+# endif
+#endif
+
+#include "../wmemchr.S"
diff --git a/sysdeps/x86_64/wmemchr.S b/sysdeps/x86_64/wmemchr.S
new file mode 100644
index 0000000..9d8079b
--- /dev/null
+++ b/sysdeps/x86_64/wmemchr.S
@@ -0,0 +1,3 @@
+#define USE_AS_WMEMCHR 1
+
+#include "memchr.S"
--
2.9.4
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] x86-64: Add memchr/rawmemchr/wmemchr optimized with SSE2/AVX2
2017-06-01 15:44 [PATCH] x86-64: Add memchr/rawmemchr/wmemchr optimized with SSE2/AVX2 H.J. Lu
@ 2017-06-02 19:47 ` H.J. Lu
2017-06-09 11:31 ` H.J. Lu
0 siblings, 1 reply; 3+ messages in thread
From: H.J. Lu @ 2017-06-02 19:47 UTC (permalink / raw)
To: GNU C Library
[-- Attachment #1: Type: text/plain, Size: 1628 bytes --]
On Thu, Jun 1, 2017 at 8:44 AM, H.J. Lu <hongjiu.lu@intel.com> wrote:
> SSE2 memchr is extended to support wmemchr. AVX2 memchr/rawmemchr/wmemchr
> are added to search 32 bytes with a single vector compare instruction.
> AVX2 memchr/rawmemchr/wmemchr are as fast as SSE2 memchr/rawmemchr/wmemchr
> for small sizes and up to 1.5X faster for larger sizes on Haswell and
> Skylake. Select AVX2 memchr/rawmemchr/wmemchr on AVX2 machines where
> vzeroupper is preferred and AVX unaligned load is fast.
>
> Any comments?
>
> H.J.
> ---
> * sysdeps/x86_64/memchr.S (MEMCHR): New. Depending on if
> USE_AS_WMEMCHR is defined.
> (PCMPEQ): Likewise.
> (memchr): Renamed to ...
> (MEMCHR): This. Support wmemchr if USE_AS_WMEMCHR is defined.
> Replace pcmpeqb with PCMPEQ.
> * sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Add
> memchr-avx2, rawmemchr-avx2 and wmemchr-avx2.
> * sysdeps/x86_64/multiarch/ifunc-impl-list.c
> (__libc_ifunc_impl_list): Test __memchr_avx2, __memchr_sse2,
> __rawmemchr_avx2, __rawmemchr_sse2, __wmemchr_avx2 and
> __wmemchr_sse2.
> * sysdeps/x86_64/multiarch/memchr-avx2.S: New file.
> * sysdeps/x86_64/multiarch/memchr.S: Likewise.
> * sysdeps/x86_64/multiarch/rawmemchr-avx2.S: Likewise.
> * sysdeps/x86_64/multiarch/rawmemchr.S: Likewise.
> * sysdeps/x86_64/multiarch/wmemchr-avx2.S: Likewise.
> * sysdeps/x86_64/multiarch/wmemchr.S: Likewise.
> * sysdeps/x86_64/wmemchr.S: Likewise.
Updated patch to implement IFUNC selector in C.
--
H.J.
[-- Attachment #2: 0004-x86-64-Optimize-memchr-rawmemchr-wmemchr-with-SSE2-A.patch --]
[-- Type: text/x-patch, Size: 29104 bytes --]
From 31294451236b5aba2d1f21940ad7eb6a4135da23 Mon Sep 17 00:00:00 2001
From: "H.J. Lu" <hjl.tools@gmail.com>
Date: Thu, 18 May 2017 11:10:09 -0700
Subject: [PATCH 4/8] x86-64: Optimize memchr/rawmemchr/wmemchr with SSE2/AVX2
SSE2 memchr is extended to support wmemchr. AVX2 memchr/rawmemchr/wmemchr
are added to search 32 bytes with a single vector compare instruction.
AVX2 memchr/rawmemchr/wmemchr are as fast as SSE2 memchr/rawmemchr/wmemchr
for small sizes and up to 1.5X faster for larger sizes on Haswell and
Skylake. Select AVX2 memchr/rawmemchr/wmemchr on AVX2 machines where
vzeroupper is preferred and AVX unaligned load is fast.
NB: It uses TZCNT instead of BSF since TZCNT produces the same result
as BSF for non-zero input. TZCNT is faster than BSF and is executed
as BSF if machine doesn't support TZCNT.
* sysdeps/x86_64/memchr.S (MEMCHR): New. Depending on if
USE_AS_WMEMCHR is defined.
(PCMPEQ): Likewise.
(memchr): Renamed to ...
(MEMCHR): This. Support wmemchr if USE_AS_WMEMCHR is defined.
Replace pcmpeqb with PCMPEQ.
* sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Add
memchr-sse2, rawmemchr-sse2, memchr-avx2, rawmemchr-avx2,
wmemchr-sse2 and wmemchr-avx2.
* sysdeps/x86_64/multiarch/ifunc-impl-list.c
(__libc_ifunc_impl_list): Test __memchr_avx2, __memchr_sse2,
__rawmemchr_avx2, __rawmemchr_sse2, __wmemchr_avx2 and
__wmemchr_sse2.
* sysdeps/x86_64/multiarch/ifunc-sse2-avx2.h: New file.
* sysdeps/x86_64/multiarch/memchr-avx2.S: Likewise.
* sysdeps/x86_64/multiarch/memchr-sse2.S: Likewise.
* sysdeps/x86_64/multiarch/memchr.c: Likewise.
* sysdeps/x86_64/multiarch/rawmemchr-avx2.S: Likewise.
* sysdeps/x86_64/multiarch/rawmemchr-sse2.S: Likewise.
* sysdeps/x86_64/multiarch/rawmemchr.c: Likewise.
* sysdeps/x86_64/multiarch/wmemchr-avx2.S: Likewise.
* sysdeps/x86_64/multiarch/wmemchr-sse2.S: Likewise.
* sysdeps/x86_64/multiarch/wmemchr.c: Likewise.
* sysdeps/x86_64/wmemchr.S: Likewise.
---
sysdeps/x86_64/memchr.S | 69 +++---
sysdeps/x86_64/multiarch/Makefile | 2 +
sysdeps/x86_64/multiarch/ifunc-impl-list.c | 21 ++
sysdeps/x86_64/multiarch/ifunc-sse2-avx2.h | 36 +++
sysdeps/x86_64/multiarch/memchr-avx2.S | 340 +++++++++++++++++++++++++++++
sysdeps/x86_64/multiarch/memchr-sse2.S | 35 +++
sysdeps/x86_64/multiarch/memchr.c | 31 +++
sysdeps/x86_64/multiarch/rawmemchr-avx2.S | 4 +
sysdeps/x86_64/multiarch/rawmemchr-sse2.S | 36 +++
sysdeps/x86_64/multiarch/rawmemchr.c | 34 +++
sysdeps/x86_64/multiarch/wmemchr-avx2.S | 4 +
sysdeps/x86_64/multiarch/wmemchr-sse2.S | 39 ++++
sysdeps/x86_64/multiarch/wmemchr.c | 33 +++
sysdeps/x86_64/wmemchr.S | 3 +
14 files changed, 662 insertions(+), 25 deletions(-)
create mode 100644 sysdeps/x86_64/multiarch/ifunc-sse2-avx2.h
create mode 100644 sysdeps/x86_64/multiarch/memchr-avx2.S
create mode 100644 sysdeps/x86_64/multiarch/memchr-sse2.S
create mode 100644 sysdeps/x86_64/multiarch/memchr.c
create mode 100644 sysdeps/x86_64/multiarch/rawmemchr-avx2.S
create mode 100644 sysdeps/x86_64/multiarch/rawmemchr-sse2.S
create mode 100644 sysdeps/x86_64/multiarch/rawmemchr.c
create mode 100644 sysdeps/x86_64/multiarch/wmemchr-avx2.S
create mode 100644 sysdeps/x86_64/multiarch/wmemchr-sse2.S
create mode 100644 sysdeps/x86_64/multiarch/wmemchr.c
create mode 100644 sysdeps/x86_64/wmemchr.S
diff --git a/sysdeps/x86_64/memchr.S b/sysdeps/x86_64/memchr.S
index d3be012..3167cd8 100644
--- a/sysdeps/x86_64/memchr.S
+++ b/sysdeps/x86_64/memchr.S
@@ -18,17 +18,31 @@
#include <sysdep.h>
+#ifdef USE_AS_WMEMCHR
+# define MEMCHR wmemchr
+# define PCMPEQ pcmpeqd
+#else
+# define MEMCHR memchr
+# define PCMPEQ pcmpeqb
+#endif
+
/* fast SSE2 version with using pmaxub and 64 byte loop */
.text
-ENTRY(memchr)
+ENTRY(MEMCHR)
movd %esi, %xmm1
mov %edi, %ecx
+#ifdef USE_AS_WMEMCHR
+ test %rdx, %rdx
+ jz L(return_null)
+ shl $2, %rdx
+#else
punpcklbw %xmm1, %xmm1
test %rdx, %rdx
jz L(return_null)
punpcklbw %xmm1, %xmm1
+#endif
and $63, %ecx
pshufd $0, %xmm1, %xmm1
@@ -37,7 +51,7 @@ ENTRY(memchr)
ja L(crosscache)
movdqu (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
@@ -58,7 +72,7 @@ L(crosscache):
and $-16, %rdi
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
/* Check if there is a match. */
pmovmskb %xmm0, %eax
/* Remove the leading bytes. */
@@ -90,25 +104,25 @@ L(unaligned_no_match):
.p2align 4
L(loop_prolog):
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
+ PCMPEQ %xmm1, %xmm2
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
movdqa 48(%rdi), %xmm4
- pcmpeqb %xmm1, %xmm4
+ PCMPEQ %xmm1, %xmm4
add $64, %rdi
pmovmskb %xmm4, %eax
test %eax, %eax
@@ -121,25 +135,25 @@ L(loop_prolog):
jbe L(exit_loop)
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
+ PCMPEQ %xmm1, %xmm2
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
movdqa 48(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
pmovmskb %xmm3, %eax
add $64, %rdi
@@ -160,10 +174,10 @@ L(align64_loop):
movdqa 32(%rdi), %xmm3
movdqa 48(%rdi), %xmm4
- pcmpeqb %xmm1, %xmm0
- pcmpeqb %xmm1, %xmm2
- pcmpeqb %xmm1, %xmm3
- pcmpeqb %xmm1, %xmm4
+ PCMPEQ %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm2
+ PCMPEQ %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm4
pmaxub %xmm0, %xmm3
pmaxub %xmm2, %xmm4
@@ -186,9 +200,9 @@ L(align64_loop):
jnz L(matches16)
movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
- pcmpeqb 48(%rdi), %xmm1
+ PCMPEQ 48(%rdi), %xmm1
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32)
@@ -204,26 +218,26 @@ L(exit_loop):
jle L(exit_loop_32)
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches)
movdqa 16(%rdi), %xmm2
- pcmpeqb %xmm1, %xmm2
+ PCMPEQ %xmm1, %xmm2
pmovmskb %xmm2, %eax
test %eax, %eax
jnz L(matches16)
movdqa 32(%rdi), %xmm3
- pcmpeqb %xmm1, %xmm3
+ PCMPEQ %xmm1, %xmm3
pmovmskb %xmm3, %eax
test %eax, %eax
jnz L(matches32_1)
sub $16, %edx
jle L(return_null)
- pcmpeqb 48(%rdi), %xmm1
+ PCMPEQ 48(%rdi), %xmm1
pmovmskb %xmm1, %eax
test %eax, %eax
jnz L(matches48_1)
@@ -234,14 +248,14 @@ L(exit_loop):
L(exit_loop_32):
add $32, %edx
movdqa (%rdi), %xmm0
- pcmpeqb %xmm1, %xmm0
+ PCMPEQ %xmm1, %xmm0
pmovmskb %xmm0, %eax
test %eax, %eax
jnz L(matches_1)
sub $16, %edx
jbe L(return_null)
- pcmpeqb 16(%rdi), %xmm1
+ PCMPEQ 16(%rdi), %xmm1
pmovmskb %xmm1, %eax
test %eax, %eax
jnz L(matches16_1)
@@ -308,8 +322,13 @@ L(matches48_1):
L(return_null):
xor %eax, %eax
ret
-END(memchr)
+END(MEMCHR)
+#ifdef USE_AS_WMEMCHR
+libc_hidden_def (__wmemchr)
+weak_alias (__wmemchr, wmemchr)
+libc_hidden_weak (wmemchr)
+#else
strong_alias (memchr, __memchr)
-
libc_hidden_builtin_def(memchr)
+#endif
diff --git a/sysdeps/x86_64/multiarch/Makefile b/sysdeps/x86_64/multiarch/Makefile
index b040288..eb42b19 100644
--- a/sysdeps/x86_64/multiarch/Makefile
+++ b/sysdeps/x86_64/multiarch/Makefile
@@ -6,6 +6,7 @@ ifeq ($(subdir),string)
sysdep_routines += strncat-c stpncpy-c strncpy-c strcmp-ssse3 \
strcmp-sse2-unaligned strncmp-ssse3 \
+ memchr-sse2 rawmemchr-sse2 memchr-avx2 rawmemchr-avx2 \
memcmp-avx2-movbe \
memcmp-sse4 memcpy-ssse3 \
memmove-ssse3 \
@@ -32,6 +33,7 @@ endif
ifeq ($(subdir),wcsmbs)
sysdep_routines += wmemcmp-sse4 wmemcmp-ssse3 wmemcmp-c \
+ wmemchr-sse2 wmemchr-avx2 \
wmemcmp-avx2-movbe \
wcscpy-ssse3 wcscpy-c
endif
diff --git a/sysdeps/x86_64/multiarch/ifunc-impl-list.c b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
index b61bc9f..f60535b 100644
--- a/sysdeps/x86_64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/x86_64/multiarch/ifunc-impl-list.c
@@ -38,6 +38,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
size_t i = 0;
+ /* Support sysdeps/x86_64/multiarch/memchr.S. */
+ IFUNC_IMPL (i, name, memchr,
+ IFUNC_IMPL_ADD (array, i, memchr,
+ HAS_ARCH_FEATURE (AVX2_Usable),
+ __memchr_avx2)
+ IFUNC_IMPL_ADD (array, i, memchr, 1, __memchr_sse2))
+
/* Support sysdeps/x86_64/multiarch/memcmp.S. */
IFUNC_IMPL (i, name, memcmp,
IFUNC_IMPL_ADD (array, i, memcmp,
@@ -152,6 +159,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__memset_avx512_no_vzeroupper)
)
+ /* Support sysdeps/x86_64/multiarch/rawmemchr.S. */
+ IFUNC_IMPL (i, name, rawmemchr,
+ IFUNC_IMPL_ADD (array, i, rawmemchr,
+ HAS_ARCH_FEATURE (AVX2_Usable),
+ __rawmemchr_avx2)
+ IFUNC_IMPL_ADD (array, i, rawmemchr, 1, __rawmemchr_sse2))
+
/* Support sysdeps/x86_64/multiarch/stpncpy.S. */
IFUNC_IMPL (i, name, stpncpy,
IFUNC_IMPL_ADD (array, i, stpncpy, HAS_CPU_FEATURE (SSSE3),
@@ -296,6 +310,13 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
__wcscpy_ssse3)
IFUNC_IMPL_ADD (array, i, wcscpy, 1, __wcscpy_sse2))
+ /* Support sysdeps/x86_64/multiarch/wmemchr.S. */
+ IFUNC_IMPL (i, name, wmemchr,
+ IFUNC_IMPL_ADD (array, i, wmemchr,
+ HAS_ARCH_FEATURE (AVX2_Usable),
+ __wmemchr_avx2)
+ IFUNC_IMPL_ADD (array, i, wmemchr, 1, __wmemchr_sse2))
+
/* Support sysdeps/x86_64/multiarch/wmemcmp.S. */
IFUNC_IMPL (i, name, wmemcmp,
IFUNC_IMPL_ADD (array, i, wmemcmp,
diff --git a/sysdeps/x86_64/multiarch/ifunc-sse2-avx2.h b/sysdeps/x86_64/multiarch/ifunc-sse2-avx2.h
new file mode 100644
index 0000000..25432b8
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/ifunc-sse2-avx2.h
@@ -0,0 +1,36 @@
+/* Common definition for ifunc selections optimized with SSE2 and AVX2.
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#include <init-arch.h>
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2) attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2) attribute_hidden;
+
+static inline void *
+IFUNC_SELECTOR (void)
+{
+ const struct cpu_features* cpu_features = __get_cpu_features ();
+
+ if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
+ && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+ && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
+ return OPTIMIZE (avx2);
+
+ return OPTIMIZE (sse2);
+}
diff --git a/sysdeps/x86_64/multiarch/memchr-avx2.S b/sysdeps/x86_64/multiarch/memchr-avx2.S
new file mode 100644
index 0000000..a7275ed
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memchr-avx2.S
@@ -0,0 +1,340 @@
+/* memchr/wmemchr optimized with AVX2.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if IS_IN (libc)
+
+# include <sysdep.h>
+
+# ifndef MEMCHR
+# define MEMCHR __memchr_avx2
+# endif
+
+# ifdef USE_AS_WMEMCHR
+# define VPCMPEQ vpcmpeqd
+# else
+# define VPCMPEQ vpcmpeqb
+# endif
+
+# ifndef VZEROUPPER
+# define VZEROUPPER vzeroupper
+# endif
+
+# define VEC_SIZE 32
+
+ .section .text.avx,"ax",@progbits
+ENTRY (MEMCHR)
+# ifndef USE_AS_RAWMEMCHR
+ /* Check for zero length. */
+ testq %rdx, %rdx
+ jz L(null)
+# endif
+ movl %edi, %ecx
+ /* Broadcast CHAR to YMM0. */
+ vmovd %esi, %xmm0
+# ifdef USE_AS_WMEMCHR
+ shl $2, %rdx
+ vpbroadcastd %xmm0, %ymm0
+# else
+ vpbroadcastb %xmm0, %ymm0
+# endif
+ /* Check if we may cross page boundary with one vector load. */
+ andl $(2 * VEC_SIZE - 1), %ecx
+ cmpl $VEC_SIZE, %ecx
+ ja L(cros_page_boundary)
+
+ /* Check the first VEC_SIZE bytes. */
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+
+# ifndef USE_AS_RAWMEMCHR
+ jnz L(first_vec_x0_check)
+ /* Adjust length and check the end of data. */
+ subq $VEC_SIZE, %rdx
+ jbe L(zero)
+# else
+ jnz L(first_vec_x0)
+# endif
+
+ /* Align data for aligned loads in the loop. */
+ addq $VEC_SIZE, %rdi
+ andl $(VEC_SIZE - 1), %ecx
+ andq $-VEC_SIZE, %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ /* Adjust length. */
+ addq %rcx, %rdx
+
+ subq $(VEC_SIZE * 4), %rdx
+ jbe L(last_4x_vec_or_less)
+# endif
+ jmp L(more_4x_vec)
+
+ .p2align 4
+L(cros_page_boundary):
+ andl $(VEC_SIZE - 1), %ecx
+ andq $-VEC_SIZE, %rdi
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ /* Remove the leading bytes. */
+ sarl %cl, %eax
+ testl %eax, %eax
+ jz L(aligned_more)
+ tzcntl %eax, %eax
+# ifndef USE_AS_RAWMEMCHR
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+# endif
+ addq %rdi, %rax
+ addq %rcx, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(aligned_more):
+# ifndef USE_AS_RAWMEMCHR
+ /* Calculate "rdx + rcx - VEC_SIZE" with "rdx - (VEC_SIZE - rcx)"
+ instead of "(rdx + rcx) - VEC_SIZE" to void possible addition
+ overflow. */
+ negq %rcx
+ addq $VEC_SIZE, %rcx
+
+ /* Check the end of data. */
+ subq %rcx, %rdx
+ jbe L(zero)
+# endif
+
+ addq $VEC_SIZE, %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $(VEC_SIZE * 4), %rdx
+ jbe L(last_4x_vec_or_less)
+# endif
+
+L(more_4x_vec):
+ /* Check the first 4 * VEC_SIZE. Only one VEC_SIZE at a time
+ since data is only aligned to VEC_SIZE. */
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x0)
+
+ VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x1)
+
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x2)
+
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x3)
+
+ addq $(VEC_SIZE * 4), %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ subq $(VEC_SIZE * 4), %rdx
+ jbe L(last_4x_vec_or_less)
+# endif
+
+ /* Align data to 4 * VEC_SIZE. */
+ movq %rdi, %rcx
+ andl $(4 * VEC_SIZE - 1), %ecx
+ andq $-(4 * VEC_SIZE), %rdi
+
+# ifndef USE_AS_RAWMEMCHR
+ /* Adjust length. */
+ addq %rcx, %rdx
+# endif
+
+ .p2align 4
+L(loop_4x_vec):
+ /* Compare 4 * VEC at a time forward. */
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm2
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm3
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm4
+
+ vpor %ymm1, %ymm2, %ymm5
+ vpor %ymm3, %ymm4, %ymm6
+ vpor %ymm5, %ymm6, %ymm5
+
+ vpmovmskb %ymm5, %eax
+ testl %eax, %eax
+ jnz L(4x_vec_end)
+
+ addq $(VEC_SIZE * 4), %rdi
+
+# ifdef USE_AS_RAWMEMCHR
+ jmp L(loop_4x_vec)
+# else
+ subq $(VEC_SIZE * 4), %rdx
+ ja L(loop_4x_vec)
+
+L(last_4x_vec_or_less):
+ /* Less than 4 * VEC and aligned to VEC_SIZE. */
+ addl $(VEC_SIZE * 2), %edx
+ jle L(last_2x_vec)
+
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x0)
+
+ VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x1)
+
+ VPCMPEQ (VEC_SIZE * 2)(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+
+ jnz L(first_vec_x2_check)
+ subl $VEC_SIZE, %edx
+ jle L(zero)
+
+ VPCMPEQ (VEC_SIZE * 3)(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+
+ jnz L(first_vec_x3_check)
+ xorl %eax, %eax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(last_2x_vec):
+ addl $(VEC_SIZE * 2), %edx
+ VPCMPEQ (%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+
+ jnz L(first_vec_x0_check)
+ subl $VEC_SIZE, %edx
+ jle L(zero)
+
+ VPCMPEQ VEC_SIZE(%rdi), %ymm0, %ymm1
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x1_check)
+ xorl %eax, %eax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x0_check):
+ tzcntl %eax, %eax
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x1_check):
+ tzcntl %eax, %eax
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+ addq $VEC_SIZE, %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x2_check):
+ tzcntl %eax, %eax
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+ addq $(VEC_SIZE * 2), %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x3_check):
+ tzcntl %eax, %eax
+ /* Check the end of data. */
+ cmpq %rax, %rdx
+ jbe L(zero)
+ addq $(VEC_SIZE * 3), %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(zero):
+ VZEROUPPER
+L(null):
+ xorl %eax, %eax
+ ret
+# endif
+
+ .p2align 4
+L(first_vec_x0):
+ tzcntl %eax, %eax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x1):
+ tzcntl %eax, %eax
+ addq $VEC_SIZE, %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(first_vec_x2):
+ tzcntl %eax, %eax
+ addq $(VEC_SIZE * 2), %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+ .p2align 4
+L(4x_vec_end):
+ vpmovmskb %ymm1, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x0)
+ vpmovmskb %ymm2, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x1)
+ vpmovmskb %ymm3, %eax
+ testl %eax, %eax
+ jnz L(first_vec_x2)
+ vpmovmskb %ymm4, %eax
+ testl %eax, %eax
+L(first_vec_x3):
+ tzcntl %eax, %eax
+ addq $(VEC_SIZE * 3), %rax
+ addq %rdi, %rax
+ VZEROUPPER
+ ret
+
+END (MEMCHR)
+#endif
diff --git a/sysdeps/x86_64/multiarch/memchr-sse2.S b/sysdeps/x86_64/multiarch/memchr-sse2.S
new file mode 100644
index 0000000..12c9c70
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memchr-sse2.S
@@ -0,0 +1,35 @@
+/* memchr optimized with SSE2.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+#if IS_IN (libc)
+# define memchr __memchr_sse2
+
+# ifdef SHARED
+# undef libc_hidden_builtin_def
+/* It doesn't make sense to send libc-internal memchr calls through a PLT.
+ The speedup we get from using AVX2 instructions is likely eaten away
+ by the indirect call in the PLT. */
+# define libc_hidden_builtin_def(name) \
+ .globl __GI_memchr; __GI_memchr = __memchr_sse2
+# endif
+
+# undef strong_alias
+# define strong_alias(memchr, __memchr)
+#endif
+
+#include "../memchr.S"
diff --git a/sysdeps/x86_64/multiarch/memchr.c b/sysdeps/x86_64/multiarch/memchr.c
new file mode 100644
index 0000000..aabad61
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/memchr.c
@@ -0,0 +1,31 @@
+/* Multiple versions of memchr
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+# define memchr __redirect_memchr
+# include <string.h>
+# undef memchr
+
+# define SYMBOL_NAME memchr
+# include "ifunc-sse2-avx2.h"
+
+libc_ifunc_redirected (__redirect_memchr, memchr, IFUNC_SELECTOR ());
+strong_alias (memchr, __memchr)
+#endif
diff --git a/sysdeps/x86_64/multiarch/rawmemchr-avx2.S b/sysdeps/x86_64/multiarch/rawmemchr-avx2.S
new file mode 100644
index 0000000..128f9ea
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/rawmemchr-avx2.S
@@ -0,0 +1,4 @@
+#define MEMCHR __rawmemchr_avx2
+#define USE_AS_RAWMEMCHR 1
+
+#include "memchr-avx2.S"
diff --git a/sysdeps/x86_64/multiarch/rawmemchr-sse2.S b/sysdeps/x86_64/multiarch/rawmemchr-sse2.S
new file mode 100644
index 0000000..a44a7ff
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/rawmemchr-sse2.S
@@ -0,0 +1,36 @@
+/* rawmemchr optimized with SSE2.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+# define __rawmemchr __rawmemchr_sse2
+
+# ifdef SHARED
+/* It doesn't make sense to send libc-internal rawmemchr calls through a
+ PLT. The speedup we get from using AVX2 instructions is likely eaten
+ away by the indirect call in the PLT. */
+# undef libc_hidden_def
+# define libc_hidden_def(name) \
+ .globl __GI___rawmemchr; __GI___rawmemchr = __rawmemchr_sse2
+# endif
+
+# undefine weak_alias
+# define weak_alias(__rawmemchr, rawmemchr)
+#endif
+
+#include "../rawmemchr.S"
diff --git a/sysdeps/x86_64/multiarch/rawmemchr.c b/sysdeps/x86_64/multiarch/rawmemchr.c
new file mode 100644
index 0000000..e1afa57
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/rawmemchr.c
@@ -0,0 +1,34 @@
+/* Multiple versions of rawmemchr
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+# define rawmemchr __redirect_rawmemchr
+# define __rawmemchr __redirect___rawmemchr
+# include <string.h>
+# undef rawmemchr
+# undef __rawmemchr
+
+# define SYMBOL_NAME rawmemchr
+# include "ifunc-sse2-avx2.h"
+
+libc_ifunc_redirected (__redirect_rawmemchr, __rawmemchr,
+ IFUNC_SELECTOR ());
+weak_alias (__rawmemchr, rawmemchr)
+#endif
diff --git a/sysdeps/x86_64/multiarch/wmemchr-avx2.S b/sysdeps/x86_64/multiarch/wmemchr-avx2.S
new file mode 100644
index 0000000..282854f
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wmemchr-avx2.S
@@ -0,0 +1,4 @@
+#define MEMCHR __wmemchr_avx2
+#define USE_AS_WMEMCHR 1
+
+#include "memchr-avx2.S"
diff --git a/sysdeps/x86_64/multiarch/wmemchr-sse2.S b/sysdeps/x86_64/multiarch/wmemchr-sse2.S
new file mode 100644
index 0000000..49cd86a
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wmemchr-sse2.S
@@ -0,0 +1,39 @@
+/* wmemchr optimized with SSE2.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+# define wmemchr __wmemchr_sse2
+
+# ifdef SHARED
+/* It doesn't make sense to send libc-internal wmemchr calls through a PLT.
+ The speedup we get from using AVX2 instructions is likely eaten away
+ by the indirect call in the PLT. */
+# undef libc_hidden_def
+# define libc_hidden_def(name) \
+ .globl __GI___wmemchr; __GI___wmemchr = __wmemchr_sse2
+# undef libc_hidden_weak
+# define libc_hidden_weak(name) \
+ .weak __GI_wmemchr; __GI_wmemchr = __wmemchr_sse2
+# endif
+
+# undef weak_alias
+# define weak_alias(__wmemchr, wmemchr)
+#endif
+
+#include "../wmemchr.S"
diff --git a/sysdeps/x86_64/multiarch/wmemchr.c b/sysdeps/x86_64/multiarch/wmemchr.c
new file mode 100644
index 0000000..1825ad7
--- /dev/null
+++ b/sysdeps/x86_64/multiarch/wmemchr.c
@@ -0,0 +1,33 @@
+/* Multiple versions of wmemchr
+ All versions must be listed in ifunc-impl-list.c.
+ Copyright (C) 2017 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Define multiple versions only for the definition in libc. */
+#if IS_IN (libc)
+# define wmemchr __redirect_wmemchr
+# define __wmemchr __redirect___wmemchr
+# include <wchar.h>
+# undef wmemchr
+# undef __wmemchr
+
+# define SYMBOL_NAME wmemchr
+# include "ifunc-sse2-avx2.h"
+
+libc_ifunc_redirected (__redirect_wmemchr, __wmemchr, IFUNC_SELECTOR ());
+weak_alias (__wmemchr, wmemchr)
+#endif
diff --git a/sysdeps/x86_64/wmemchr.S b/sysdeps/x86_64/wmemchr.S
new file mode 100644
index 0000000..9d8079b
--- /dev/null
+++ b/sysdeps/x86_64/wmemchr.S
@@ -0,0 +1,3 @@
+#define USE_AS_WMEMCHR 1
+
+#include "memchr.S"
--
2.9.4
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] x86-64: Add memchr/rawmemchr/wmemchr optimized with SSE2/AVX2
2017-06-02 19:47 ` H.J. Lu
@ 2017-06-09 11:31 ` H.J. Lu
0 siblings, 0 replies; 3+ messages in thread
From: H.J. Lu @ 2017-06-09 11:31 UTC (permalink / raw)
To: GNU C Library
On Fri, Jun 2, 2017 at 12:47 PM, H.J. Lu <hjl.tools@gmail.com> wrote:
> On Thu, Jun 1, 2017 at 8:44 AM, H.J. Lu <hongjiu.lu@intel.com> wrote:
>> SSE2 memchr is extended to support wmemchr. AVX2 memchr/rawmemchr/wmemchr
>> are added to search 32 bytes with a single vector compare instruction.
>> AVX2 memchr/rawmemchr/wmemchr are as fast as SSE2 memchr/rawmemchr/wmemchr
>> for small sizes and up to 1.5X faster for larger sizes on Haswell and
>> Skylake. Select AVX2 memchr/rawmemchr/wmemchr on AVX2 machines where
>> vzeroupper is preferred and AVX unaligned load is fast.
>>
>> Any comments?
>>
>> H.J.
>> ---
>> * sysdeps/x86_64/memchr.S (MEMCHR): New. Depending on if
>> USE_AS_WMEMCHR is defined.
>> (PCMPEQ): Likewise.
>> (memchr): Renamed to ...
>> (MEMCHR): This. Support wmemchr if USE_AS_WMEMCHR is defined.
>> Replace pcmpeqb with PCMPEQ.
>> * sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Add
>> memchr-avx2, rawmemchr-avx2 and wmemchr-avx2.
>> * sysdeps/x86_64/multiarch/ifunc-impl-list.c
>> (__libc_ifunc_impl_list): Test __memchr_avx2, __memchr_sse2,
>> __rawmemchr_avx2, __rawmemchr_sse2, __wmemchr_avx2 and
>> __wmemchr_sse2.
>> * sysdeps/x86_64/multiarch/memchr-avx2.S: New file.
>> * sysdeps/x86_64/multiarch/memchr.S: Likewise.
>> * sysdeps/x86_64/multiarch/rawmemchr-avx2.S: Likewise.
>> * sysdeps/x86_64/multiarch/rawmemchr.S: Likewise.
>> * sysdeps/x86_64/multiarch/wmemchr-avx2.S: Likewise.
>> * sysdeps/x86_64/multiarch/wmemchr.S: Likewise.
>> * sysdeps/x86_64/wmemchr.S: Likewise.
>
> Updated patch to implement IFUNC selector in C.
>
> --
> H.J.
I will check it in.
--
H.J.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2017-06-09 11:31 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-01 15:44 [PATCH] x86-64: Add memchr/rawmemchr/wmemchr optimized with SSE2/AVX2 H.J. Lu
2017-06-02 19:47 ` H.J. Lu
2017-06-09 11:31 ` H.J. Lu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).