public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: Noah Goldstein <goldstein.w.n@gmail.com>
To: libc-alpha@sourceware.org
Cc: goldstein.w.n@gmail.com, hjl.tools@gmail.com, carlos@systemhalted.org
Subject: [PATCH v10 4/6] x86: Update memset to use new VEC macros
Date: Fri, 14 Oct 2022 22:00:28 -0500	[thread overview]
Message-ID: <20221015030030.204172-4-goldstein.w.n@gmail.com> (raw)
In-Reply-To: <20221015030030.204172-1-goldstein.w.n@gmail.com>

Replace %VEC(n) -> %VMM(n)

This commit does not change libc.so

Tested build on x86-64
---
 .../memset-avx2-unaligned-erms-rtm.S          |  8 +--
 .../multiarch/memset-avx2-unaligned-erms.S    | 14 +---
 .../multiarch/memset-avx512-unaligned-erms.S  | 20 +-----
 .../multiarch/memset-evex-unaligned-erms.S    | 20 +-----
 .../multiarch/memset-sse2-unaligned-erms.S    | 10 +--
 .../multiarch/memset-vec-unaligned-erms.S     | 70 ++++++++-----------
 6 files changed, 43 insertions(+), 99 deletions(-)

diff --git a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms-rtm.S b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms-rtm.S
index 8ac3e479bb..bc8605faf3 100644
--- a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms-rtm.S
+++ b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms-rtm.S
@@ -1,10 +1,6 @@
-#define ZERO_UPPER_VEC_REGISTERS_RETURN \
-  ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST
+#include "x86-avx-rtm-vecs.h"
 
-#define VZEROUPPER_RETURN jmp	 L(return)
-
-#define SECTION(p) p##.avx.rtm
 #define MEMSET_SYMBOL(p,s)	p##_avx2_##s##_rtm
 #define WMEMSET_SYMBOL(p,s)	p##_avx2_##s##_rtm
 
-#include "memset-avx2-unaligned-erms.S"
+# include "memset-avx2-unaligned-erms.S"
diff --git a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S
index a9054a9122..47cf5072a4 100644
--- a/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S
@@ -4,14 +4,9 @@
 
 # define USE_WITH_AVX2	1
 
-# define VEC_SIZE	32
-# define MOV_SIZE	4
-# define RET_SIZE	4
-
-# define VEC(i)		ymm##i
-
-# define VMOVU     vmovdqu
-# define VMOVA     vmovdqa
+# ifndef VEC_SIZE
+#  include "x86-avx-vecs.h"
+# endif
 
 # define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
   vmovd d, %xmm0; \
@@ -26,9 +21,6 @@
 # define WMEMSET_VDUP_TO_VEC0_HIGH() vpbroadcastd %xmm0, %ymm0
 # define WMEMSET_VDUP_TO_VEC0_LOW() vpbroadcastd %xmm0, %xmm0
 
-# ifndef SECTION
-#  define SECTION(p)		p##.avx
-# endif
 # ifndef MEMSET_SYMBOL
 #  define MEMSET_SYMBOL(p,s)	p##_avx2_##s
 # endif
diff --git a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
index 47623b8ee8..84145b6c27 100644
--- a/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
@@ -4,26 +4,14 @@
 
 # define USE_WITH_AVX512	1
 
-# define VEC_SIZE	64
-# define MOV_SIZE	6
-# define RET_SIZE	1
-
-# define XMM0		xmm16
-# define YMM0		ymm16
-# define VEC0		zmm16
-# define VEC(i)		VEC##i
-
-# define VMOVU     vmovdqu64
-# define VMOVA     vmovdqa64
-
-# define VZEROUPPER
+# include "x86-evex512-vecs.h"
 
 # define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
-  vpbroadcastb d, %VEC0; \
+  vpbroadcastb d, %VMM(0); \
   movq r, %rax
 
 # define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
-  vpbroadcastd d, %VEC0; \
+  vpbroadcastd d, %VMM(0); \
   movq r, %rax
 
 # define MEMSET_VDUP_TO_VEC0_HIGH()
@@ -32,8 +20,6 @@
 # define WMEMSET_VDUP_TO_VEC0_HIGH()
 # define WMEMSET_VDUP_TO_VEC0_LOW()
 
-# define SECTION(p)		p##.evex512
-
 #ifndef MEMSET_SYMBOL
 # define MEMSET_SYMBOL(p,s)	p##_avx512_##s
 #endif
diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
index ac4b2d2d50..1f03b26bf8 100644
--- a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
@@ -4,26 +4,14 @@
 
 # define USE_WITH_EVEX	1
 
-# define VEC_SIZE	32
-# define MOV_SIZE	6
-# define RET_SIZE	1
-
-# define XMM0		xmm16
-# define YMM0		ymm16
-# define VEC0		ymm16
-# define VEC(i)		VEC##i
-
-# define VMOVU     vmovdqu64
-# define VMOVA     vmovdqa64
-
-# define VZEROUPPER
+# include "x86-evex256-vecs.h"
 
 # define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
-  vpbroadcastb d, %VEC0; \
+  vpbroadcastb d, %VMM(0); \
   movq r, %rax
 
 # define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
-  vpbroadcastd d, %VEC0; \
+  vpbroadcastd d, %VMM(0); \
   movq r, %rax
 
 # define MEMSET_VDUP_TO_VEC0_HIGH()
@@ -32,8 +20,6 @@
 # define WMEMSET_VDUP_TO_VEC0_HIGH()
 # define WMEMSET_VDUP_TO_VEC0_LOW()
 
-# define SECTION(p)		p##.evex
-
 #ifndef MEMSET_SYMBOL
 # define MEMSET_SYMBOL(p,s)	p##_evex_##s
 #endif
diff --git a/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S
index 44f9b8888b..34b245d8ca 100644
--- a/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S
@@ -26,13 +26,7 @@
 # include <sysdep.h>
 # define USE_WITH_SSE2	1
 
-# define VEC_SIZE	16
-# define MOV_SIZE	3
-# define RET_SIZE	1
-
-# define VEC(i)		xmm##i
-# define VMOVU     movups
-# define VMOVA     movaps
+# include "x86-sse2-vecs.h"
 
 # define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
   movd d, %xmm0; \
@@ -52,8 +46,6 @@
 # define WMEMSET_VDUP_TO_VEC0_HIGH()
 # define WMEMSET_VDUP_TO_VEC0_LOW()
 
-# define SECTION(p)		p
-
 # ifndef MEMSET_SYMBOL
 #  define MEMSET_SYMBOL(p,s)	p##_sse2_##s
 # endif
diff --git a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
index 905d0fa464..03de0ab907 100644
--- a/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
@@ -34,14 +34,6 @@
 # define WMEMSET_CHK_SYMBOL(p,s)	WMEMSET_SYMBOL(p, s)
 #endif
 
-#ifndef XMM0
-# define XMM0				xmm0
-#endif
-
-#ifndef YMM0
-# define YMM0				ymm0
-#endif
-
 #ifndef VZEROUPPER
 # if VEC_SIZE > 16
 #  define VZEROUPPER			vzeroupper
@@ -150,8 +142,8 @@ L(entry_from_wmemset):
 	cmpq	$(VEC_SIZE * 2), %rdx
 	ja	L(more_2x_vec)
 	/* From VEC and to 2 * VEC.  No branch when size == VEC_SIZE.  */
-	VMOVU	%VEC(0), -VEC_SIZE(%rdi,%rdx)
-	VMOVU	%VEC(0), (%rdi)
+	VMOVU	%VMM(0), -VEC_SIZE(%rdi,%rdx)
+	VMOVU	%VMM(0), (%rdi)
 	VZEROUPPER_RETURN
 #if defined USE_MULTIARCH && IS_IN (libc)
 END (MEMSET_SYMBOL (__memset, unaligned))
@@ -175,19 +167,19 @@ ENTRY_P2ALIGN (MEMSET_SYMBOL (__memset, unaligned_erms), 6)
 	cmp	$(VEC_SIZE * 2), %RDX_LP
 	ja	L(stosb_more_2x_vec)
 	/* From VEC and to 2 * VEC.  No branch when size == VEC_SIZE.  */
-	VMOVU	%VEC(0), (%rdi)
-	VMOVU	%VEC(0), (VEC_SIZE * -1)(%rdi, %rdx)
+	VMOVU	%VMM(0), (%rdi)
+	VMOVU	%VMM(0), (VEC_SIZE * -1)(%rdi, %rdx)
 	VZEROUPPER_RETURN
 #endif
 
 	.p2align 4,, 4
 L(last_2x_vec):
 #ifdef USE_LESS_VEC_MASK_STORE
-	VMOVU	%VEC(0), (VEC_SIZE * -2)(%rdi, %rdx)
-	VMOVU	%VEC(0), (VEC_SIZE * -1)(%rdi, %rdx)
+	VMOVU	%VMM(0), (VEC_SIZE * -2)(%rdi, %rdx)
+	VMOVU	%VMM(0), (VEC_SIZE * -1)(%rdi, %rdx)
 #else
-	VMOVU	%VEC(0), (VEC_SIZE * -2)(%rdi)
-	VMOVU	%VEC(0), (VEC_SIZE * -1)(%rdi)
+	VMOVU	%VMM(0), (VEC_SIZE * -2)(%rdi)
+	VMOVU	%VMM(0), (VEC_SIZE * -1)(%rdi)
 #endif
 	VZEROUPPER_RETURN
 
@@ -221,7 +213,7 @@ L(less_vec_from_wmemset):
 	bzhil	%edx, %ecx, %ecx
 	kmovd	%ecx, %k1
 # endif
-	vmovdqu8 %VEC(0), (%rax){%k1}
+	vmovdqu8 %VMM(0), (%rax){%k1}
 	VZEROUPPER_RETURN
 
 # if defined USE_MULTIARCH && IS_IN (libc)
@@ -249,8 +241,8 @@ L(stosb_more_2x_vec):
 	   and (4x, 8x] jump to target.  */
 L(more_2x_vec):
 	/* Store next 2x vec regardless.  */
-	VMOVU	%VEC(0), (%rdi)
-	VMOVU	%VEC(0), (VEC_SIZE * 1)(%rdi)
+	VMOVU	%VMM(0), (%rdi)
+	VMOVU	%VMM(0), (VEC_SIZE * 1)(%rdi)
 
 
 	/* Two different methods of setting up pointers / compare. The two
@@ -278,8 +270,8 @@ L(more_2x_vec):
 #endif
 
 	/* Store next 2x vec regardless.  */
-	VMOVU	%VEC(0), (VEC_SIZE * 2)(%rax)
-	VMOVU	%VEC(0), (VEC_SIZE * 3)(%rax)
+	VMOVU	%VMM(0), (VEC_SIZE * 2)(%rax)
+	VMOVU	%VMM(0), (VEC_SIZE * 3)(%rax)
 
 
 #if defined USE_WITH_EVEX || defined USE_WITH_AVX512
@@ -304,20 +296,20 @@ L(more_2x_vec):
 	andq	$(VEC_SIZE * -2), %LOOP_REG
 	.p2align 4
 L(loop):
-	VMOVA	%VEC(0), LOOP_4X_OFFSET(%LOOP_REG)
-	VMOVA	%VEC(0), (VEC_SIZE + LOOP_4X_OFFSET)(%LOOP_REG)
-	VMOVA	%VEC(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%LOOP_REG)
-	VMOVA	%VEC(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%LOOP_REG)
+	VMOVA	%VMM(0), LOOP_4X_OFFSET(%LOOP_REG)
+	VMOVA	%VMM(0), (VEC_SIZE + LOOP_4X_OFFSET)(%LOOP_REG)
+	VMOVA	%VMM(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%LOOP_REG)
+	VMOVA	%VMM(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%LOOP_REG)
 	subq	$-(VEC_SIZE * 4), %LOOP_REG
 	cmpq	%END_REG, %LOOP_REG
 	jb	L(loop)
 	.p2align 4,, MOV_SIZE
 L(last_4x_vec):
-	VMOVU	%VEC(0), LOOP_4X_OFFSET(%END_REG)
-	VMOVU	%VEC(0), (VEC_SIZE + LOOP_4X_OFFSET)(%END_REG)
-	VMOVU	%VEC(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%END_REG)
-	VMOVU	%VEC(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%END_REG)
-L(return):
+	VMOVU	%VMM(0), LOOP_4X_OFFSET(%END_REG)
+	VMOVU	%VMM(0), (VEC_SIZE + LOOP_4X_OFFSET)(%END_REG)
+	VMOVU	%VMM(0), (VEC_SIZE * 2 + LOOP_4X_OFFSET)(%END_REG)
+	VMOVU	%VMM(0), (VEC_SIZE * 3 + LOOP_4X_OFFSET)(%END_REG)
+L(return_vzeroupper):
 #if VEC_SIZE > 16
 	ZERO_UPPER_VEC_REGISTERS_RETURN
 #else
@@ -355,7 +347,7 @@ L(cross_page):
 	jge	L(between_16_31)
 #endif
 #ifndef USE_XMM_LESS_VEC
-	MOVQ	%XMM0, %SET_REG64
+	MOVQ	%VMM_128(0), %SET_REG64
 #endif
 	cmpl	$8, %edx
 	jge	L(between_8_15)
@@ -374,8 +366,8 @@ L(between_0_0):
 	.p2align 4,, SMALL_MEMSET_ALIGN(MOV_SIZE, RET_SIZE)
 	/* From 32 to 63.  No branch when size == 32.  */
 L(between_32_63):
-	VMOVU	%YMM0, (%LESS_VEC_REG)
-	VMOVU	%YMM0, -32(%LESS_VEC_REG, %rdx)
+	VMOVU	%VMM_256(0), (%LESS_VEC_REG)
+	VMOVU	%VMM_256(0), -32(%LESS_VEC_REG, %rdx)
 	VZEROUPPER_RETURN
 #endif
 
@@ -383,8 +375,8 @@ L(between_32_63):
 	.p2align 4,, SMALL_MEMSET_ALIGN(MOV_SIZE, 1)
 L(between_16_31):
 	/* From 16 to 31.  No branch when size == 16.  */
-	VMOVU	%XMM0, (%LESS_VEC_REG)
-	VMOVU	%XMM0, -16(%LESS_VEC_REG, %rdx)
+	VMOVU	%VMM_128(0), (%LESS_VEC_REG)
+	VMOVU	%VMM_128(0), -16(%LESS_VEC_REG, %rdx)
 	ret
 #endif
 
@@ -394,8 +386,8 @@ L(between_16_31):
 L(between_8_15):
 	/* From 8 to 15.  No branch when size == 8.  */
 #ifdef USE_XMM_LESS_VEC
-	MOVQ	%XMM0, (%rdi)
-	MOVQ	%XMM0, -8(%rdi, %rdx)
+	MOVQ	%VMM_128(0), (%rdi)
+	MOVQ	%VMM_128(0), -8(%rdi, %rdx)
 #else
 	movq	%SET_REG64, (%LESS_VEC_REG)
 	movq	%SET_REG64, -8(%LESS_VEC_REG, %rdx)
@@ -408,8 +400,8 @@ L(between_8_15):
 L(between_4_7):
 	/* From 4 to 7.  No branch when size == 4.  */
 #ifdef USE_XMM_LESS_VEC
-	MOVD	%XMM0, (%rdi)
-	MOVD	%XMM0, -4(%rdi, %rdx)
+	MOVD	%VMM_128(0), (%rdi)
+	MOVD	%VMM_128(0), -4(%rdi, %rdx)
 #else
 	movl	%SET_REG32, (%LESS_VEC_REG)
 	movl	%SET_REG32, -4(%LESS_VEC_REG, %rdx)
-- 
2.34.1


  parent reply	other threads:[~2022-10-15  3:00 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-14 16:40 [PATCH v1 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 16:40 ` [PATCH v1 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:02   ` H.J. Lu
2022-10-14 18:26     ` Noah Goldstein
2022-10-14 18:35       ` H.J. Lu
2022-10-14 18:38         ` Noah Goldstein
2022-10-14 18:53           ` H.J. Lu
2022-10-14 19:00             ` Noah Goldstein
2022-10-14 19:13               ` H.J. Lu
2022-10-14 19:15                 ` Noah Goldstein
2022-10-14 16:40 ` [PATCH v1 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 17:31 ` [PATCH v1 1/3] x86: Update evex256/512 vec macros H.J. Lu
2022-10-14 18:01 ` [PATCH v2 " Noah Goldstein
2022-10-14 18:01   ` [PATCH v2 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:01   ` [PATCH v2 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 18:22 ` [PATCH v3 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 18:22   ` [PATCH v3 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:22   ` [PATCH v3 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 18:41 ` [PATCH v4 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 18:41   ` [PATCH v4 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:41   ` [PATCH v4 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 21:14 ` [PATCH v5 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 21:15   ` [PATCH v5 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 21:28     ` H.J. Lu
2022-10-14 22:01       ` Noah Goldstein
2022-10-14 22:05         ` H.J. Lu
2022-10-14 22:27           ` Noah Goldstein
2022-10-14 22:41             ` H.J. Lu
2022-10-14 23:15               ` Noah Goldstein
2022-10-14 23:22                 ` H.J. Lu
2022-10-14 23:25                   ` Noah Goldstein
2022-10-14 21:15   ` [PATCH v5 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 1/7] x86: Update and move evex256/512 vec macros Noah Goldstein
2022-10-14 22:39   ` [PATCH v6 2/7] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 22:39   ` [PATCH v6 3/7] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-14 22:39   ` [PATCH v6 4/7] x86: Remove now unused vec header macros Noah Goldstein
2022-10-14 22:39   ` [PATCH v6 5/7] x86: Update memmove to use new VEC macros Noah Goldstein
2022-10-14 22:39   ` [PATCH v6 6/7] x86: Update memset " Noah Goldstein
2022-10-14 22:39   ` [PATCH v6 7/7] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15  0:06 ` [PATCH v8 1/6] x86: Update VEC macros to complete API for evex/evex512 impls Noah Goldstein
2022-10-15  0:06   ` [PATCH v8 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15  0:06   ` [PATCH v8 3/6] x86: Update memmove " Noah Goldstein
2022-10-15  0:06   ` [PATCH v8 4/6] x86: Update memset " Noah Goldstein
2022-10-15  0:06   ` [PATCH v8 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15  0:06   ` [PATCH v8 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15  0:12   ` [PATCH v8 1/6] x86: Update VEC macros to complete API for evex/evex512 impls H.J. Lu
2022-10-15  0:20     ` Noah Goldstein
2022-10-15  0:20 ` [PATCH v9 " Noah Goldstein
2022-10-15  0:20   ` [PATCH v9 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15  2:48     ` H.J. Lu
2022-10-15  0:20   ` [PATCH v9 3/6] x86: Update memmove " Noah Goldstein
2022-10-15  2:52     ` H.J. Lu
2022-10-15  2:57       ` Noah Goldstein
2022-10-15  0:20   ` [PATCH v9 4/6] x86: Update memset " Noah Goldstein
2022-10-15  2:53     ` H.J. Lu
2022-10-15  0:20   ` [PATCH v9 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15  2:56     ` H.J. Lu
2022-10-15  0:21   ` [PATCH v9 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15  2:58     ` H.J. Lu
2022-10-15  2:45   ` [PATCH v9 1/6] x86: Update VEC macros to complete API for evex/evex512 impls H.J. Lu
2022-10-15  3:00 ` [PATCH v10 " Noah Goldstein
2022-10-15  3:00   ` [PATCH v10 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15  3:44     ` Sunil Pandey
2022-10-15  3:00   ` [PATCH v10 3/6] x86: Update memmove " Noah Goldstein
2022-10-15  3:43     ` Sunil Pandey
2022-10-15  3:00   ` Noah Goldstein [this message]
2022-10-15  3:42     ` [PATCH v10 4/6] x86: Update memset " Sunil Pandey
2022-10-15  3:00   ` [PATCH v10 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15  3:39     ` Sunil Pandey
2022-10-15  3:00   ` [PATCH v10 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15  3:48     ` Sunil Pandey
2022-10-15  3:37   ` [PATCH v10 1/6] x86: Update VEC macros to complete API for evex/evex512 impls Sunil Pandey

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221015030030.204172-4-goldstein.w.n@gmail.com \
    --to=goldstein.w.n@gmail.com \
    --cc=carlos@systemhalted.org \
    --cc=hjl.tools@gmail.com \
    --cc=libc-alpha@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).