From: Noah Goldstein <goldstein.w.n@gmail.com>
To: libc-alpha@sourceware.org
Cc: goldstein.w.n@gmail.com, hjl.tools@gmail.com, carlos@systemhalted.org
Subject: [PATCH v3 3/3] x86: Update strlen-evex-base to use new reg/vec macros.
Date: Fri, 14 Oct 2022 13:22:05 -0500 [thread overview]
Message-ID: <20221014182205.115792-3-goldstein.w.n@gmail.com> (raw)
In-Reply-To: <20221014182205.115792-1-goldstein.w.n@gmail.com>
To avoid duplicate the VMM / GPR / mask insn macros in all incoming
evex512 files use the macros defined in 'reg-macros.h' and
'{vec}-macros.h'
This commit does not change libc.so
Tested build on x86-64
---
sysdeps/x86_64/multiarch/strlen-evex-base.S | 116 +++++++-------------
sysdeps/x86_64/multiarch/strlen-evex512.S | 4 +-
2 files changed, 44 insertions(+), 76 deletions(-)
diff --git a/sysdeps/x86_64/multiarch/strlen-evex-base.S b/sysdeps/x86_64/multiarch/strlen-evex-base.S
index 418e9f8411..8af9791e92 100644
--- a/sysdeps/x86_64/multiarch/strlen-evex-base.S
+++ b/sysdeps/x86_64/multiarch/strlen-evex-base.S
@@ -36,42 +36,10 @@
# define CHAR_SIZE 1
# endif
-# define XMM0 xmm16
# define PAGE_SIZE 4096
# define CHAR_PER_VEC (VEC_SIZE / CHAR_SIZE)
-# if VEC_SIZE == 64
-# define KMOV kmovq
-# define KORTEST kortestq
-# define RAX rax
-# define RCX rcx
-# define RDX rdx
-# define SHR shrq
-# define TEXTSUFFIX evex512
-# define VMM0 zmm16
-# define VMM1 zmm17
-# define VMM2 zmm18
-# define VMM3 zmm19
-# define VMM4 zmm20
-# define VMOVA vmovdqa64
-# elif VEC_SIZE == 32
-/* Currently Unused. */
-# define KMOV kmovd
-# define KORTEST kortestd
-# define RAX eax
-# define RCX ecx
-# define RDX edx
-# define SHR shrl
-# define TEXTSUFFIX evex256
-# define VMM0 ymm16
-# define VMM1 ymm17
-# define VMM2 ymm18
-# define VMM3 ymm19
-# define VMM4 ymm20
-# define VMOVA vmovdqa32
-# endif
-
- .section .text.TEXTSUFFIX, "ax", @progbits
+ .section SECTION(.text),"ax",@progbits
/* Aligning entry point to 64 byte, provides better performance for
one vector length string. */
ENTRY_P2ALIGN (STRLEN, 6)
@@ -86,18 +54,18 @@ ENTRY_P2ALIGN (STRLEN, 6)
# endif
movl %edi, %eax
- vpxorq %XMM0, %XMM0, %XMM0
+ vpxorq %VEC_xmm(0), %VEC_xmm(0), %VEC_xmm(0)
andl $(PAGE_SIZE - 1), %eax
cmpl $(PAGE_SIZE - VEC_SIZE), %eax
ja L(page_cross)
/* Compare [w]char for null, mask bit will be set for match. */
- VPCMP $0, (%rdi), %VMM0, %k0
- KMOV %k0, %RAX
- test %RAX, %RAX
+ VPCMP $0, (%rdi), %VEC(0), %k0
+ KMOV %k0, %VRAX
+ test %VRAX, %VRAX
jz L(align_more)
- bsf %RAX, %RAX
+ bsf %VRAX, %VRAX
# ifdef USE_AS_STRNLEN
cmpq %rsi, %rax
cmovnb %rsi, %rax
@@ -120,7 +88,7 @@ L(align_more):
movq %rax, %rdx
subq %rdi, %rdx
# ifdef USE_AS_WCSLEN
- SHR $2, %RDX
+ shr $2, %VRDX
# endif
/* At this point rdx contains [w]chars already compared. */
subq %rsi, %rdx
@@ -131,9 +99,9 @@ L(align_more):
# endif
/* Loop unroll 4 times for 4 vector loop. */
- VPCMP $0, (%rax), %VMM0, %k0
- KMOV %k0, %RCX
- test %RCX, %RCX
+ VPCMP $0, (%rax), %VEC(0), %k0
+ KMOV %k0, %VRCX
+ test %VRCX, %VRCX
jnz L(ret_vec_x1)
# ifdef USE_AS_STRNLEN
@@ -141,9 +109,9 @@ L(align_more):
jbe L(ret_max)
# endif
- VPCMP $0, VEC_SIZE(%rax), %VMM0, %k0
- KMOV %k0, %RCX
- test %RCX, %RCX
+ VPCMP $0, VEC_SIZE(%rax), %VEC(0), %k0
+ KMOV %k0, %VRCX
+ test %VRCX, %VRCX
jnz L(ret_vec_x2)
# ifdef USE_AS_STRNLEN
@@ -151,9 +119,9 @@ L(align_more):
jbe L(ret_max)
# endif
- VPCMP $0, (VEC_SIZE * 2)(%rax), %VMM0, %k0
- KMOV %k0, %RCX
- test %RCX, %RCX
+ VPCMP $0, (VEC_SIZE * 2)(%rax), %VEC(0), %k0
+ KMOV %k0, %VRCX
+ test %VRCX, %VRCX
jnz L(ret_vec_x3)
# ifdef USE_AS_STRNLEN
@@ -161,9 +129,9 @@ L(align_more):
jbe L(ret_max)
# endif
- VPCMP $0, (VEC_SIZE * 3)(%rax), %VMM0, %k0
- KMOV %k0, %RCX
- test %RCX, %RCX
+ VPCMP $0, (VEC_SIZE * 3)(%rax), %VEC(0), %k0
+ KMOV %k0, %VRCX
+ test %VRCX, %VRCX
jnz L(ret_vec_x4)
# ifdef USE_AS_STRNLEN
@@ -179,7 +147,7 @@ L(align_more):
# ifdef USE_AS_STRNLEN
subq %rax, %rcx
# ifdef USE_AS_WCSLEN
- SHR $2, %RCX
+ shr $2, %VRCX
# endif
/* rcx contains number of [w]char will be recompared due to
alignment fixes. rdx must be incremented by rcx to offset
@@ -199,42 +167,42 @@ L(loop_entry):
# endif
/* VPMINU and VPCMP combination provide better performance as
compared to alternative combinations. */
- VMOVA (VEC_SIZE * 4)(%rax), %VMM1
- VPMINU (VEC_SIZE * 5)(%rax), %VMM1, %VMM2
- VMOVA (VEC_SIZE * 6)(%rax), %VMM3
- VPMINU (VEC_SIZE * 7)(%rax), %VMM3, %VMM4
+ VMOVA (VEC_SIZE * 4)(%rax), %VEC(1)
+ VPMINU (VEC_SIZE * 5)(%rax), %VEC(1), %VEC(2)
+ VMOVA (VEC_SIZE * 6)(%rax), %VEC(3)
+ VPMINU (VEC_SIZE * 7)(%rax), %VEC(3), %VEC(4)
- VPTESTN %VMM2, %VMM2, %k0
- VPTESTN %VMM4, %VMM4, %k1
+ VPTESTN %VEC(2), %VEC(2), %k0
+ VPTESTN %VEC(4), %VEC(4), %k1
subq $-(VEC_SIZE * 4), %rax
KORTEST %k0, %k1
jz L(loop)
- VPTESTN %VMM1, %VMM1, %k2
- KMOV %k2, %RCX
- test %RCX, %RCX
+ VPTESTN %VEC(1), %VEC(1), %k2
+ KMOV %k2, %VRCX
+ test %VRCX, %VRCX
jnz L(ret_vec_x1)
- KMOV %k0, %RCX
+ KMOV %k0, %VRCX
/* At this point, if k0 is non zero, null char must be in the
second vector. */
- test %RCX, %RCX
+ test %VRCX, %VRCX
jnz L(ret_vec_x2)
- VPTESTN %VMM3, %VMM3, %k3
- KMOV %k3, %RCX
- test %RCX, %RCX
+ VPTESTN %VEC(3), %VEC(3), %k3
+ KMOV %k3, %VRCX
+ test %VRCX, %VRCX
jnz L(ret_vec_x3)
/* At this point null [w]char must be in the fourth vector so no
need to check. */
- KMOV %k1, %RCX
+ KMOV %k1, %VRCX
/* Fourth, third, second vector terminating are pretty much
same, implemented this way to avoid branching and reuse code
from pre loop exit condition. */
L(ret_vec_x4):
- bsf %RCX, %RCX
+ bsf %VRCX, %VRCX
subq %rdi, %rax
# ifdef USE_AS_WCSLEN
subq $-(VEC_SIZE * 3), %rax
@@ -250,7 +218,7 @@ L(ret_vec_x4):
ret
L(ret_vec_x3):
- bsf %RCX, %RCX
+ bsf %VRCX, %VRCX
subq %rdi, %rax
# ifdef USE_AS_WCSLEN
subq $-(VEC_SIZE * 2), %rax
@@ -268,7 +236,7 @@ L(ret_vec_x3):
L(ret_vec_x2):
subq $-VEC_SIZE, %rax
L(ret_vec_x1):
- bsf %RCX, %RCX
+ bsf %VRCX, %VRCX
subq %rdi, %rax
# ifdef USE_AS_WCSLEN
shrq $2, %rax
@@ -289,13 +257,13 @@ L(page_cross):
/* ecx contains number of w[char] to be skipped as a result
of address alignment. */
xorq %rdi, %rax
- VPCMP $0, (PAGE_SIZE - VEC_SIZE)(%rax), %VMM0, %k0
- KMOV %k0, %RAX
+ VPCMP $0, (PAGE_SIZE - VEC_SIZE)(%rax), %VEC(0), %k0
+ KMOV %k0, %VRAX
/* Ignore number of character for alignment adjustment. */
- SHR %cl, %RAX
+ shr %cl, %VRAX
jz L(align_more)
- bsf %RAX, %RAX
+ bsf %VRAX, %VRAX
# ifdef USE_AS_STRNLEN
cmpq %rsi, %rax
cmovnb %rsi, %rax
diff --git a/sysdeps/x86_64/multiarch/strlen-evex512.S b/sysdeps/x86_64/multiarch/strlen-evex512.S
index 116f8981c8..dfd0a7821b 100644
--- a/sysdeps/x86_64/multiarch/strlen-evex512.S
+++ b/sysdeps/x86_64/multiarch/strlen-evex512.S
@@ -2,6 +2,6 @@
# define STRLEN __strlen_evex512
#endif
-#define VEC_SIZE 64
-
+#include "evex512-vecs.h"
+#include "reg-macros.h"
#include "strlen-evex-base.S"
--
2.34.1
next prev parent reply other threads:[~2022-10-14 18:22 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-14 16:40 [PATCH v1 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 16:40 ` [PATCH v1 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:02 ` H.J. Lu
2022-10-14 18:26 ` Noah Goldstein
2022-10-14 18:35 ` H.J. Lu
2022-10-14 18:38 ` Noah Goldstein
2022-10-14 18:53 ` H.J. Lu
2022-10-14 19:00 ` Noah Goldstein
2022-10-14 19:13 ` H.J. Lu
2022-10-14 19:15 ` Noah Goldstein
2022-10-14 16:40 ` [PATCH v1 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 17:31 ` [PATCH v1 1/3] x86: Update evex256/512 vec macros H.J. Lu
2022-10-14 18:01 ` [PATCH v2 " Noah Goldstein
2022-10-14 18:01 ` [PATCH v2 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:01 ` [PATCH v2 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 18:22 ` [PATCH v3 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 18:22 ` [PATCH v3 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:22 ` Noah Goldstein [this message]
2022-10-14 18:41 ` [PATCH v4 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 18:41 ` [PATCH v4 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:41 ` [PATCH v4 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 21:14 ` [PATCH v5 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 21:15 ` [PATCH v5 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 21:28 ` H.J. Lu
2022-10-14 22:01 ` Noah Goldstein
2022-10-14 22:05 ` H.J. Lu
2022-10-14 22:27 ` Noah Goldstein
2022-10-14 22:41 ` H.J. Lu
2022-10-14 23:15 ` Noah Goldstein
2022-10-14 23:22 ` H.J. Lu
2022-10-14 23:25 ` Noah Goldstein
2022-10-14 21:15 ` [PATCH v5 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 1/7] x86: Update and move evex256/512 vec macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 2/7] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 3/7] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 4/7] x86: Remove now unused vec header macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 5/7] x86: Update memmove to use new VEC macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 6/7] x86: Update memset " Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 7/7] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 1/6] x86: Update VEC macros to complete API for evex/evex512 impls Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 3/6] x86: Update memmove " Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 4/6] x86: Update memset " Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15 0:12 ` [PATCH v8 1/6] x86: Update VEC macros to complete API for evex/evex512 impls H.J. Lu
2022-10-15 0:20 ` Noah Goldstein
2022-10-15 0:20 ` [PATCH v9 " Noah Goldstein
2022-10-15 0:20 ` [PATCH v9 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15 2:48 ` H.J. Lu
2022-10-15 0:20 ` [PATCH v9 3/6] x86: Update memmove " Noah Goldstein
2022-10-15 2:52 ` H.J. Lu
2022-10-15 2:57 ` Noah Goldstein
2022-10-15 0:20 ` [PATCH v9 4/6] x86: Update memset " Noah Goldstein
2022-10-15 2:53 ` H.J. Lu
2022-10-15 0:20 ` [PATCH v9 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15 2:56 ` H.J. Lu
2022-10-15 0:21 ` [PATCH v9 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15 2:58 ` H.J. Lu
2022-10-15 2:45 ` [PATCH v9 1/6] x86: Update VEC macros to complete API for evex/evex512 impls H.J. Lu
2022-10-15 3:00 ` [PATCH v10 " Noah Goldstein
2022-10-15 3:00 ` [PATCH v10 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15 3:44 ` Sunil Pandey
2022-10-15 3:00 ` [PATCH v10 3/6] x86: Update memmove " Noah Goldstein
2022-10-15 3:43 ` Sunil Pandey
2022-10-15 3:00 ` [PATCH v10 4/6] x86: Update memset " Noah Goldstein
2022-10-15 3:42 ` Sunil Pandey
2022-10-15 3:00 ` [PATCH v10 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15 3:39 ` Sunil Pandey
2022-10-15 3:00 ` [PATCH v10 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15 3:48 ` Sunil Pandey
2022-10-15 3:37 ` [PATCH v10 1/6] x86: Update VEC macros to complete API for evex/evex512 impls Sunil Pandey
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221014182205.115792-3-goldstein.w.n@gmail.com \
--to=goldstein.w.n@gmail.com \
--cc=carlos@systemhalted.org \
--cc=hjl.tools@gmail.com \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).