From: Sunil Pandey <skpgkp2@gmail.com>
To: Noah Goldstein <goldstein.w.n@gmail.com>
Cc: libc-alpha@sourceware.org
Subject: Re: [PATCH v10 3/6] x86: Update memmove to use new VEC macros
Date: Fri, 14 Oct 2022 20:43:24 -0700 [thread overview]
Message-ID: <CAMAf5_eDMY0UfFWBvSX0N2SaVFOQhz1ZdwgFgNtvx=5ZRtyjmw@mail.gmail.com> (raw)
In-Reply-To: <20221015030030.204172-3-goldstein.w.n@gmail.com>
On Fri, Oct 14, 2022 at 8:02 PM Noah Goldstein via Libc-alpha
<libc-alpha@sourceware.org> wrote:
>
> Replace %VEC(n) -> %VMM(n)
>
> This commit does not change libc.so
>
> Tested build on x86-64
> ---
> .../memmove-avx-unaligned-erms-rtm.S | 11 +-
> .../multiarch/memmove-avx-unaligned-erms.S | 9 +-
> .../multiarch/memmove-avx512-unaligned-erms.S | 30 +-
> .../multiarch/memmove-evex-unaligned-erms.S | 30 +-
> .../multiarch/memmove-sse2-unaligned-erms.S | 11 +-
> .../multiarch/memmove-vec-unaligned-erms.S | 262 +++++++++---------
> 6 files changed, 132 insertions(+), 221 deletions(-)
>
> diff --git a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms-rtm.S b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms-rtm.S
> index 67a55f0c85..20746e6713 100644
> --- a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms-rtm.S
> +++ b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms-rtm.S
> @@ -1,16 +1,7 @@
> #if IS_IN (libc)
> -# define VEC_SIZE 32
> -# define VEC(i) ymm##i
> -# define VMOVNT vmovntdq
> -# define VMOVU vmovdqu
> -# define VMOVA vmovdqa
> -# define MOV_SIZE 4
> -# define ZERO_UPPER_VEC_REGISTERS_RETURN \
> - ZERO_UPPER_VEC_REGISTERS_RETURN_XTEST
>
> -# define VZEROUPPER_RETURN jmp L(return)
> +# include "x86-avx-rtm-vecs.h"
>
> -# define SECTION(p) p##.avx.rtm
> # define MEMMOVE_SYMBOL(p,s) p##_avx_##s##_rtm
>
> # include "memmove-vec-unaligned-erms.S"
> diff --git a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S
> index a14b155667..4e4b4635f9 100644
> --- a/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-avx-unaligned-erms.S
> @@ -2,14 +2,7 @@
>
> #if ISA_SHOULD_BUILD (3)
>
> -# define VEC_SIZE 32
> -# define VEC(i) ymm##i
> -# define VMOVNT vmovntdq
> -# define VMOVU vmovdqu
> -# define VMOVA vmovdqa
> -# define MOV_SIZE 4
> -
> -# define SECTION(p) p##.avx
> +# include "x86-avx-vecs.h"
>
> # ifndef MEMMOVE_SYMBOL
> # define MEMMOVE_SYMBOL(p,s) p##_avx_##s
> diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
> index 8d1568a7ba..cca97e38f8 100644
> --- a/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-avx512-unaligned-erms.S
> @@ -2,35 +2,7 @@
>
> #if ISA_SHOULD_BUILD (4)
>
> -# define VEC_SIZE 64
> -# define XMM0 xmm16
> -# define XMM1 xmm17
> -# define YMM0 ymm16
> -# define YMM1 ymm17
> -# define VEC0 zmm16
> -# define VEC1 zmm17
> -# define VEC2 zmm18
> -# define VEC3 zmm19
> -# define VEC4 zmm20
> -# define VEC5 zmm21
> -# define VEC6 zmm22
> -# define VEC7 zmm23
> -# define VEC8 zmm24
> -# define VEC9 zmm25
> -# define VEC10 zmm26
> -# define VEC11 zmm27
> -# define VEC12 zmm28
> -# define VEC13 zmm29
> -# define VEC14 zmm30
> -# define VEC15 zmm31
> -# define VEC(i) VEC##i
> -# define VMOVNT vmovntdq
> -# define VMOVU vmovdqu64
> -# define VMOVA vmovdqa64
> -# define VZEROUPPER
> -# define MOV_SIZE 6
> -
> -# define SECTION(p) p##.evex512
> +# include "x86-evex512-vecs.h"
>
> # ifndef MEMMOVE_SYMBOL
> # define MEMMOVE_SYMBOL(p,s) p##_avx512_##s
> diff --git a/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S
> index 2373017358..1f7b5715f7 100644
> --- a/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-evex-unaligned-erms.S
> @@ -2,35 +2,7 @@
>
> #if ISA_SHOULD_BUILD (4)
>
> -# define VEC_SIZE 32
> -# define XMM0 xmm16
> -# define XMM1 xmm17
> -# define YMM0 ymm16
> -# define YMM1 ymm17
> -# define VEC0 ymm16
> -# define VEC1 ymm17
> -# define VEC2 ymm18
> -# define VEC3 ymm19
> -# define VEC4 ymm20
> -# define VEC5 ymm21
> -# define VEC6 ymm22
> -# define VEC7 ymm23
> -# define VEC8 ymm24
> -# define VEC9 ymm25
> -# define VEC10 ymm26
> -# define VEC11 ymm27
> -# define VEC12 ymm28
> -# define VEC13 ymm29
> -# define VEC14 ymm30
> -# define VEC15 ymm31
> -# define VEC(i) VEC##i
> -# define VMOVNT vmovntdq
> -# define VMOVU vmovdqu64
> -# define VMOVA vmovdqa64
> -# define VZEROUPPER
> -# define MOV_SIZE 6
> -
> -# define SECTION(p) p##.evex
> +# include "x86-evex256-vecs.h"
>
> # ifndef MEMMOVE_SYMBOL
> # define MEMMOVE_SYMBOL(p,s) p##_evex_##s
> diff --git a/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S
> index 422a079902..8431bcd000 100644
> --- a/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-sse2-unaligned-erms.S
> @@ -22,18 +22,9 @@
> so we need this to build for ISA V2 builds. */
> #if ISA_SHOULD_BUILD (2)
>
> -# include <sysdep.h>
> +# include "x86-sse2-vecs.h"
>
> -# define VEC_SIZE 16
> -# define VEC(i) xmm##i
> # define PREFETCHNT prefetchnta
> -# define VMOVNT movntdq
> -/* Use movups and movaps for smaller code sizes. */
> -# define VMOVU movups
> -# define VMOVA movaps
> -# define MOV_SIZE 3
> -
> -# define SECTION(p) p
>
> # ifndef MEMMOVE_SYMBOL
> # define MEMMOVE_SYMBOL(p,s) p##_sse2_##s
> diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> index 04747133b7..5b758cae5e 100644
> --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
> @@ -60,14 +60,6 @@
> # define MEMMOVE_CHK_SYMBOL(p,s) MEMMOVE_SYMBOL(p, s)
> #endif
>
> -#ifndef XMM0
> -# define XMM0 xmm0
> -#endif
> -
> -#ifndef YMM0
> -# define YMM0 ymm0
> -#endif
> -
> #ifndef VZEROUPPER
> # if VEC_SIZE > 16
> # define VZEROUPPER vzeroupper
> @@ -225,13 +217,13 @@ L(start):
> cmp $VEC_SIZE, %RDX_LP
> jb L(less_vec)
> /* Load regardless. */
> - VMOVU (%rsi), %VEC(0)
> + VMOVU (%rsi), %VMM(0)
> cmp $(VEC_SIZE * 2), %RDX_LP
> ja L(more_2x_vec)
> /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */
> - VMOVU -VEC_SIZE(%rsi,%rdx), %VEC(1)
> - VMOVU %VEC(0), (%rdi)
> - VMOVU %VEC(1), -VEC_SIZE(%rdi,%rdx)
> + VMOVU -VEC_SIZE(%rsi,%rdx), %VMM(1)
> + VMOVU %VMM(0), (%rdi)
> + VMOVU %VMM(1), -VEC_SIZE(%rdi,%rdx)
> #if !(defined USE_MULTIARCH && IS_IN (libc))
> ZERO_UPPER_VEC_REGISTERS_RETURN
> #else
> @@ -270,15 +262,15 @@ L(start_erms):
> cmp $VEC_SIZE, %RDX_LP
> jb L(less_vec)
> /* Load regardless. */
> - VMOVU (%rsi), %VEC(0)
> + VMOVU (%rsi), %VMM(0)
> cmp $(VEC_SIZE * 2), %RDX_LP
> ja L(movsb_more_2x_vec)
> /* From VEC and to 2 * VEC. No branch when size == VEC_SIZE.
> */
> - VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(1)
> - VMOVU %VEC(0), (%rdi)
> - VMOVU %VEC(1), -VEC_SIZE(%rdi, %rdx)
> -L(return):
> + VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(1)
> + VMOVU %VMM(0), (%rdi)
> + VMOVU %VMM(1), -VEC_SIZE(%rdi, %rdx)
> +L(return_vzeroupper):
> # if VEC_SIZE > 16
> ZERO_UPPER_VEC_REGISTERS_RETURN
> # else
> @@ -359,10 +351,10 @@ L(between_16_31):
> .p2align 4,, 10
> L(between_32_63):
> /* From 32 to 63. No branch when size == 32. */
> - VMOVU (%rsi), %YMM0
> - VMOVU -32(%rsi, %rdx), %YMM1
> - VMOVU %YMM0, (%rdi)
> - VMOVU %YMM1, -32(%rdi, %rdx)
> + VMOVU (%rsi), %VMM_256(0)
> + VMOVU -32(%rsi, %rdx), %VMM_256(1)
> + VMOVU %VMM_256(0), (%rdi)
> + VMOVU %VMM_256(1), -32(%rdi, %rdx)
> VZEROUPPER_RETURN
> #endif
>
> @@ -380,12 +372,12 @@ L(last_4x_vec):
> /* Copy from 2 * VEC + 1 to 4 * VEC, inclusively. */
>
> /* VEC(0) and VEC(1) have already been loaded. */
> - VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(2)
> - VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(3)
> - VMOVU %VEC(0), (%rdi)
> - VMOVU %VEC(1), VEC_SIZE(%rdi)
> - VMOVU %VEC(2), -VEC_SIZE(%rdi, %rdx)
> - VMOVU %VEC(3), -(VEC_SIZE * 2)(%rdi, %rdx)
> + VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(2)
> + VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(3)
> + VMOVU %VMM(0), (%rdi)
> + VMOVU %VMM(1), VEC_SIZE(%rdi)
> + VMOVU %VMM(2), -VEC_SIZE(%rdi, %rdx)
> + VMOVU %VMM(3), -(VEC_SIZE * 2)(%rdi, %rdx)
> VZEROUPPER_RETURN
>
> .p2align 4
> @@ -400,24 +392,24 @@ L(more_2x_vec):
> cmpq $(VEC_SIZE * 8), %rdx
> ja L(more_8x_vec)
> /* Load VEC(1) regardless. VEC(0) has already been loaded. */
> - VMOVU VEC_SIZE(%rsi), %VEC(1)
> + VMOVU VEC_SIZE(%rsi), %VMM(1)
> cmpq $(VEC_SIZE * 4), %rdx
> jbe L(last_4x_vec)
> /* Copy from 4 * VEC + 1 to 8 * VEC, inclusively. */
> - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
> - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
> - VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(4)
> - VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(5)
> - VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(6)
> - VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(7)
> - VMOVU %VEC(0), (%rdi)
> - VMOVU %VEC(1), VEC_SIZE(%rdi)
> - VMOVU %VEC(2), (VEC_SIZE * 2)(%rdi)
> - VMOVU %VEC(3), (VEC_SIZE * 3)(%rdi)
> - VMOVU %VEC(4), -VEC_SIZE(%rdi, %rdx)
> - VMOVU %VEC(5), -(VEC_SIZE * 2)(%rdi, %rdx)
> - VMOVU %VEC(6), -(VEC_SIZE * 3)(%rdi, %rdx)
> - VMOVU %VEC(7), -(VEC_SIZE * 4)(%rdi, %rdx)
> + VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
> + VMOVU (VEC_SIZE * 3)(%rsi), %VMM(3)
> + VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(4)
> + VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(5)
> + VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VMM(6)
> + VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VMM(7)
> + VMOVU %VMM(0), (%rdi)
> + VMOVU %VMM(1), VEC_SIZE(%rdi)
> + VMOVU %VMM(2), (VEC_SIZE * 2)(%rdi)
> + VMOVU %VMM(3), (VEC_SIZE * 3)(%rdi)
> + VMOVU %VMM(4), -VEC_SIZE(%rdi, %rdx)
> + VMOVU %VMM(5), -(VEC_SIZE * 2)(%rdi, %rdx)
> + VMOVU %VMM(6), -(VEC_SIZE * 3)(%rdi, %rdx)
> + VMOVU %VMM(7), -(VEC_SIZE * 4)(%rdi, %rdx)
> VZEROUPPER_RETURN
>
> .p2align 4,, 4
> @@ -466,14 +458,14 @@ L(more_8x_vec_forward):
> */
>
> /* First vec was already loaded into VEC(0). */
> - VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(5)
> - VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(6)
> + VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(5)
> + VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(6)
> /* Save begining of dst. */
> movq %rdi, %rcx
> /* Align dst to VEC_SIZE - 1. */
> orq $(VEC_SIZE - 1), %rdi
> - VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(7)
> - VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(8)
> + VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VMM(7)
> + VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VMM(8)
>
> /* Subtract dst from src. Add back after dst aligned. */
> subq %rcx, %rsi
> @@ -488,25 +480,25 @@ L(more_8x_vec_forward):
> .p2align 4,, 11
> L(loop_4x_vec_forward):
> /* Copy 4 * VEC a time forward. */
> - VMOVU (%rsi), %VEC(1)
> - VMOVU VEC_SIZE(%rsi), %VEC(2)
> - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(3)
> - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(4)
> + VMOVU (%rsi), %VMM(1)
> + VMOVU VEC_SIZE(%rsi), %VMM(2)
> + VMOVU (VEC_SIZE * 2)(%rsi), %VMM(3)
> + VMOVU (VEC_SIZE * 3)(%rsi), %VMM(4)
> subq $-(VEC_SIZE * 4), %rsi
> - VMOVA %VEC(1), (%rdi)
> - VMOVA %VEC(2), VEC_SIZE(%rdi)
> - VMOVA %VEC(3), (VEC_SIZE * 2)(%rdi)
> - VMOVA %VEC(4), (VEC_SIZE * 3)(%rdi)
> + VMOVA %VMM(1), (%rdi)
> + VMOVA %VMM(2), VEC_SIZE(%rdi)
> + VMOVA %VMM(3), (VEC_SIZE * 2)(%rdi)
> + VMOVA %VMM(4), (VEC_SIZE * 3)(%rdi)
> subq $-(VEC_SIZE * 4), %rdi
> cmpq %rdi, %rdx
> ja L(loop_4x_vec_forward)
> /* Store the last 4 * VEC. */
> - VMOVU %VEC(5), (VEC_SIZE * 3)(%rdx)
> - VMOVU %VEC(6), (VEC_SIZE * 2)(%rdx)
> - VMOVU %VEC(7), VEC_SIZE(%rdx)
> - VMOVU %VEC(8), (%rdx)
> + VMOVU %VMM(5), (VEC_SIZE * 3)(%rdx)
> + VMOVU %VMM(6), (VEC_SIZE * 2)(%rdx)
> + VMOVU %VMM(7), VEC_SIZE(%rdx)
> + VMOVU %VMM(8), (%rdx)
> /* Store the first VEC. */
> - VMOVU %VEC(0), (%rcx)
> + VMOVU %VMM(0), (%rcx)
> /* Keep L(nop_backward) target close to jmp for 2-byte encoding.
> */
> L(nop_backward):
> @@ -523,12 +515,12 @@ L(more_8x_vec_backward):
> addresses. */
>
> /* First vec was also loaded into VEC(0). */
> - VMOVU VEC_SIZE(%rsi), %VEC(5)
> - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(6)
> + VMOVU VEC_SIZE(%rsi), %VMM(5)
> + VMOVU (VEC_SIZE * 2)(%rsi), %VMM(6)
> /* Begining of region for 4x backward copy stored in rcx. */
> leaq (VEC_SIZE * -4 + -1)(%rdi, %rdx), %rcx
> - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(7)
> - VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(8)
> + VMOVU (VEC_SIZE * 3)(%rsi), %VMM(7)
> + VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(8)
> /* Subtract dst from src. Add back after dst aligned. */
> subq %rdi, %rsi
> /* Align dst. */
> @@ -540,25 +532,25 @@ L(more_8x_vec_backward):
> .p2align 4,, 11
> L(loop_4x_vec_backward):
> /* Copy 4 * VEC a time backward. */
> - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(1)
> - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
> - VMOVU (VEC_SIZE * 1)(%rsi), %VEC(3)
> - VMOVU (VEC_SIZE * 0)(%rsi), %VEC(4)
> + VMOVU (VEC_SIZE * 3)(%rsi), %VMM(1)
> + VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
> + VMOVU (VEC_SIZE * 1)(%rsi), %VMM(3)
> + VMOVU (VEC_SIZE * 0)(%rsi), %VMM(4)
> addq $(VEC_SIZE * -4), %rsi
> - VMOVA %VEC(1), (VEC_SIZE * 3)(%rcx)
> - VMOVA %VEC(2), (VEC_SIZE * 2)(%rcx)
> - VMOVA %VEC(3), (VEC_SIZE * 1)(%rcx)
> - VMOVA %VEC(4), (VEC_SIZE * 0)(%rcx)
> + VMOVA %VMM(1), (VEC_SIZE * 3)(%rcx)
> + VMOVA %VMM(2), (VEC_SIZE * 2)(%rcx)
> + VMOVA %VMM(3), (VEC_SIZE * 1)(%rcx)
> + VMOVA %VMM(4), (VEC_SIZE * 0)(%rcx)
> addq $(VEC_SIZE * -4), %rcx
> cmpq %rcx, %rdi
> jb L(loop_4x_vec_backward)
> /* Store the first 4 * VEC. */
> - VMOVU %VEC(0), (%rdi)
> - VMOVU %VEC(5), VEC_SIZE(%rdi)
> - VMOVU %VEC(6), (VEC_SIZE * 2)(%rdi)
> - VMOVU %VEC(7), (VEC_SIZE * 3)(%rdi)
> + VMOVU %VMM(0), (%rdi)
> + VMOVU %VMM(5), VEC_SIZE(%rdi)
> + VMOVU %VMM(6), (VEC_SIZE * 2)(%rdi)
> + VMOVU %VMM(7), (VEC_SIZE * 3)(%rdi)
> /* Store the last VEC. */
> - VMOVU %VEC(8), -VEC_SIZE(%rdx, %rdi)
> + VMOVU %VMM(8), -VEC_SIZE(%rdx, %rdi)
> VZEROUPPER_RETURN
>
> #if defined USE_MULTIARCH && IS_IN (libc)
> @@ -568,7 +560,7 @@ L(loop_4x_vec_backward):
> # if ALIGN_MOVSB
> L(skip_short_movsb_check):
> # if MOVSB_ALIGN_TO > VEC_SIZE
> - VMOVU VEC_SIZE(%rsi), %VEC(1)
> + VMOVU VEC_SIZE(%rsi), %VMM(1)
> # endif
> # if MOVSB_ALIGN_TO > (VEC_SIZE * 2)
> # error Unsupported MOVSB_ALIGN_TO
> @@ -597,9 +589,9 @@ L(skip_short_movsb_check):
>
> rep movsb
>
> - VMOVU %VEC(0), (%r8)
> + VMOVU %VMM(0), (%r8)
> # if MOVSB_ALIGN_TO > VEC_SIZE
> - VMOVU %VEC(1), VEC_SIZE(%r8)
> + VMOVU %VMM(1), VEC_SIZE(%r8)
> # endif
> VZEROUPPER_RETURN
> # endif
> @@ -640,7 +632,7 @@ L(movsb):
> # endif
> # if ALIGN_MOVSB
> # if MOVSB_ALIGN_TO > VEC_SIZE
> - VMOVU VEC_SIZE(%rsi), %VEC(1)
> + VMOVU VEC_SIZE(%rsi), %VMM(1)
> # endif
> # if MOVSB_ALIGN_TO > (VEC_SIZE * 2)
> # error Unsupported MOVSB_ALIGN_TO
> @@ -664,9 +656,9 @@ L(movsb_align_dst):
> rep movsb
>
> /* Store VECs loaded for aligning. */
> - VMOVU %VEC(0), (%r8)
> + VMOVU %VMM(0), (%r8)
> # if MOVSB_ALIGN_TO > VEC_SIZE
> - VMOVU %VEC(1), VEC_SIZE(%r8)
> + VMOVU %VMM(1), VEC_SIZE(%r8)
> # endif
> VZEROUPPER_RETURN
> # else /* !ALIGN_MOVSB. */
> @@ -701,18 +693,18 @@ L(large_memcpy_2x):
>
> /* First vec was also loaded into VEC(0). */
> # if VEC_SIZE < 64
> - VMOVU VEC_SIZE(%rsi), %VEC(1)
> + VMOVU VEC_SIZE(%rsi), %VMM(1)
> # if VEC_SIZE < 32
> - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
> - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
> + VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
> + VMOVU (VEC_SIZE * 3)(%rsi), %VMM(3)
> # endif
> # endif
> - VMOVU %VEC(0), (%rdi)
> + VMOVU %VMM(0), (%rdi)
> # if VEC_SIZE < 64
> - VMOVU %VEC(1), VEC_SIZE(%rdi)
> + VMOVU %VMM(1), VEC_SIZE(%rdi)
> # if VEC_SIZE < 32
> - VMOVU %VEC(2), (VEC_SIZE * 2)(%rdi)
> - VMOVU %VEC(3), (VEC_SIZE * 3)(%rdi)
> + VMOVU %VMM(2), (VEC_SIZE * 2)(%rdi)
> + VMOVU %VMM(3), (VEC_SIZE * 3)(%rdi)
> # endif
> # endif
>
> @@ -761,12 +753,12 @@ L(loop_large_memcpy_2x_inner):
> PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE + PREFETCHED_LOAD_SIZE)
> PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE + PREFETCHED_LOAD_SIZE * 2)
> /* Load vectors from rsi. */
> - LOAD_ONE_SET((%rsi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
> - LOAD_ONE_SET((%rsi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
> + LOAD_ONE_SET((%rsi), 0, %VMM(0), %VMM(1), %VMM(2), %VMM(3))
> + LOAD_ONE_SET((%rsi), PAGE_SIZE, %VMM(4), %VMM(5), %VMM(6), %VMM(7))
> subq $-LARGE_LOAD_SIZE, %rsi
> /* Non-temporal store vectors to rdi. */
> - STORE_ONE_SET((%rdi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
> - STORE_ONE_SET((%rdi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
> + STORE_ONE_SET((%rdi), 0, %VMM(0), %VMM(1), %VMM(2), %VMM(3))
> + STORE_ONE_SET((%rdi), PAGE_SIZE, %VMM(4), %VMM(5), %VMM(6), %VMM(7))
> subq $-LARGE_LOAD_SIZE, %rdi
> decl %ecx
> jnz L(loop_large_memcpy_2x_inner)
> @@ -785,31 +777,31 @@ L(loop_large_memcpy_2x_tail):
> /* Copy 4 * VEC a time forward with non-temporal stores. */
> PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE)
> PREFETCH_ONE_SET (1, (%rdi), PREFETCHED_LOAD_SIZE)
> - VMOVU (%rsi), %VEC(0)
> - VMOVU VEC_SIZE(%rsi), %VEC(1)
> - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
> - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
> + VMOVU (%rsi), %VMM(0)
> + VMOVU VEC_SIZE(%rsi), %VMM(1)
> + VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
> + VMOVU (VEC_SIZE * 3)(%rsi), %VMM(3)
> subq $-(VEC_SIZE * 4), %rsi
> addl $-(VEC_SIZE * 4), %edx
> - VMOVA %VEC(0), (%rdi)
> - VMOVA %VEC(1), VEC_SIZE(%rdi)
> - VMOVA %VEC(2), (VEC_SIZE * 2)(%rdi)
> - VMOVA %VEC(3), (VEC_SIZE * 3)(%rdi)
> + VMOVA %VMM(0), (%rdi)
> + VMOVA %VMM(1), VEC_SIZE(%rdi)
> + VMOVA %VMM(2), (VEC_SIZE * 2)(%rdi)
> + VMOVA %VMM(3), (VEC_SIZE * 3)(%rdi)
> subq $-(VEC_SIZE * 4), %rdi
> cmpl $(VEC_SIZE * 4), %edx
> ja L(loop_large_memcpy_2x_tail)
>
> L(large_memcpy_2x_end):
> /* Store the last 4 * VEC. */
> - VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(0)
> - VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(1)
> - VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(2)
> - VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(3)
> -
> - VMOVU %VEC(0), -(VEC_SIZE * 4)(%rdi, %rdx)
> - VMOVU %VEC(1), -(VEC_SIZE * 3)(%rdi, %rdx)
> - VMOVU %VEC(2), -(VEC_SIZE * 2)(%rdi, %rdx)
> - VMOVU %VEC(3), -VEC_SIZE(%rdi, %rdx)
> + VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VMM(0)
> + VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VMM(1)
> + VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(2)
> + VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(3)
> +
> + VMOVU %VMM(0), -(VEC_SIZE * 4)(%rdi, %rdx)
> + VMOVU %VMM(1), -(VEC_SIZE * 3)(%rdi, %rdx)
> + VMOVU %VMM(2), -(VEC_SIZE * 2)(%rdi, %rdx)
> + VMOVU %VMM(3), -VEC_SIZE(%rdi, %rdx)
> VZEROUPPER_RETURN
>
> .p2align 4
> @@ -831,16 +823,16 @@ L(loop_large_memcpy_4x_inner):
> PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE * 2 + PREFETCHED_LOAD_SIZE)
> PREFETCH_ONE_SET(1, (%rsi), PAGE_SIZE * 3 + PREFETCHED_LOAD_SIZE)
> /* Load vectors from rsi. */
> - LOAD_ONE_SET((%rsi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
> - LOAD_ONE_SET((%rsi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
> - LOAD_ONE_SET((%rsi), PAGE_SIZE * 2, %VEC(8), %VEC(9), %VEC(10), %VEC(11))
> - LOAD_ONE_SET((%rsi), PAGE_SIZE * 3, %VEC(12), %VEC(13), %VEC(14), %VEC(15))
> + LOAD_ONE_SET((%rsi), 0, %VMM(0), %VMM(1), %VMM(2), %VMM(3))
> + LOAD_ONE_SET((%rsi), PAGE_SIZE, %VMM(4), %VMM(5), %VMM(6), %VMM(7))
> + LOAD_ONE_SET((%rsi), PAGE_SIZE * 2, %VMM(8), %VMM(9), %VMM(10), %VMM(11))
> + LOAD_ONE_SET((%rsi), PAGE_SIZE * 3, %VMM(12), %VMM(13), %VMM(14), %VMM(15))
> subq $-LARGE_LOAD_SIZE, %rsi
> /* Non-temporal store vectors to rdi. */
> - STORE_ONE_SET((%rdi), 0, %VEC(0), %VEC(1), %VEC(2), %VEC(3))
> - STORE_ONE_SET((%rdi), PAGE_SIZE, %VEC(4), %VEC(5), %VEC(6), %VEC(7))
> - STORE_ONE_SET((%rdi), PAGE_SIZE * 2, %VEC(8), %VEC(9), %VEC(10), %VEC(11))
> - STORE_ONE_SET((%rdi), PAGE_SIZE * 3, %VEC(12), %VEC(13), %VEC(14), %VEC(15))
> + STORE_ONE_SET((%rdi), 0, %VMM(0), %VMM(1), %VMM(2), %VMM(3))
> + STORE_ONE_SET((%rdi), PAGE_SIZE, %VMM(4), %VMM(5), %VMM(6), %VMM(7))
> + STORE_ONE_SET((%rdi), PAGE_SIZE * 2, %VMM(8), %VMM(9), %VMM(10), %VMM(11))
> + STORE_ONE_SET((%rdi), PAGE_SIZE * 3, %VMM(12), %VMM(13), %VMM(14), %VMM(15))
> subq $-LARGE_LOAD_SIZE, %rdi
> decl %ecx
> jnz L(loop_large_memcpy_4x_inner)
> @@ -858,31 +850,31 @@ L(loop_large_memcpy_4x_tail):
> /* Copy 4 * VEC a time forward with non-temporal stores. */
> PREFETCH_ONE_SET (1, (%rsi), PREFETCHED_LOAD_SIZE)
> PREFETCH_ONE_SET (1, (%rdi), PREFETCHED_LOAD_SIZE)
> - VMOVU (%rsi), %VEC(0)
> - VMOVU VEC_SIZE(%rsi), %VEC(1)
> - VMOVU (VEC_SIZE * 2)(%rsi), %VEC(2)
> - VMOVU (VEC_SIZE * 3)(%rsi), %VEC(3)
> + VMOVU (%rsi), %VMM(0)
> + VMOVU VEC_SIZE(%rsi), %VMM(1)
> + VMOVU (VEC_SIZE * 2)(%rsi), %VMM(2)
> + VMOVU (VEC_SIZE * 3)(%rsi), %VMM(3)
> subq $-(VEC_SIZE * 4), %rsi
> addl $-(VEC_SIZE * 4), %edx
> - VMOVA %VEC(0), (%rdi)
> - VMOVA %VEC(1), VEC_SIZE(%rdi)
> - VMOVA %VEC(2), (VEC_SIZE * 2)(%rdi)
> - VMOVA %VEC(3), (VEC_SIZE * 3)(%rdi)
> + VMOVA %VMM(0), (%rdi)
> + VMOVA %VMM(1), VEC_SIZE(%rdi)
> + VMOVA %VMM(2), (VEC_SIZE * 2)(%rdi)
> + VMOVA %VMM(3), (VEC_SIZE * 3)(%rdi)
> subq $-(VEC_SIZE * 4), %rdi
> cmpl $(VEC_SIZE * 4), %edx
> ja L(loop_large_memcpy_4x_tail)
>
> L(large_memcpy_4x_end):
> /* Store the last 4 * VEC. */
> - VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VEC(0)
> - VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VEC(1)
> - VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VEC(2)
> - VMOVU -VEC_SIZE(%rsi, %rdx), %VEC(3)
> -
> - VMOVU %VEC(0), -(VEC_SIZE * 4)(%rdi, %rdx)
> - VMOVU %VEC(1), -(VEC_SIZE * 3)(%rdi, %rdx)
> - VMOVU %VEC(2), -(VEC_SIZE * 2)(%rdi, %rdx)
> - VMOVU %VEC(3), -VEC_SIZE(%rdi, %rdx)
> + VMOVU -(VEC_SIZE * 4)(%rsi, %rdx), %VMM(0)
> + VMOVU -(VEC_SIZE * 3)(%rsi, %rdx), %VMM(1)
> + VMOVU -(VEC_SIZE * 2)(%rsi, %rdx), %VMM(2)
> + VMOVU -VEC_SIZE(%rsi, %rdx), %VMM(3)
> +
> + VMOVU %VMM(0), -(VEC_SIZE * 4)(%rdi, %rdx)
> + VMOVU %VMM(1), -(VEC_SIZE * 3)(%rdi, %rdx)
> + VMOVU %VMM(2), -(VEC_SIZE * 2)(%rdi, %rdx)
> + VMOVU %VMM(3), -VEC_SIZE(%rdi, %rdx)
> VZEROUPPER_RETURN
> #endif
> END (MEMMOVE_SYMBOL (__memmove, unaligned_erms))
> --
> 2.34.1
>
LGTM
--Sunil
next prev parent reply other threads:[~2022-10-15 3:44 UTC|newest]
Thread overview: 72+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-14 16:40 [PATCH v1 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 16:40 ` [PATCH v1 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:02 ` H.J. Lu
2022-10-14 18:26 ` Noah Goldstein
2022-10-14 18:35 ` H.J. Lu
2022-10-14 18:38 ` Noah Goldstein
2022-10-14 18:53 ` H.J. Lu
2022-10-14 19:00 ` Noah Goldstein
2022-10-14 19:13 ` H.J. Lu
2022-10-14 19:15 ` Noah Goldstein
2022-10-14 16:40 ` [PATCH v1 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 17:31 ` [PATCH v1 1/3] x86: Update evex256/512 vec macros H.J. Lu
2022-10-14 18:01 ` [PATCH v2 " Noah Goldstein
2022-10-14 18:01 ` [PATCH v2 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:01 ` [PATCH v2 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 18:22 ` [PATCH v3 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 18:22 ` [PATCH v3 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:22 ` [PATCH v3 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 18:41 ` [PATCH v4 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 18:41 ` [PATCH v4 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 18:41 ` [PATCH v4 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 21:14 ` [PATCH v5 1/3] x86: Update evex256/512 vec macros Noah Goldstein
2022-10-14 21:15 ` [PATCH v5 2/3] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 21:28 ` H.J. Lu
2022-10-14 22:01 ` Noah Goldstein
2022-10-14 22:05 ` H.J. Lu
2022-10-14 22:27 ` Noah Goldstein
2022-10-14 22:41 ` H.J. Lu
2022-10-14 23:15 ` Noah Goldstein
2022-10-14 23:22 ` H.J. Lu
2022-10-14 23:25 ` Noah Goldstein
2022-10-14 21:15 ` [PATCH v5 3/3] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 1/7] x86: Update and move evex256/512 vec macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 2/7] x86: Add macros for GPRs / mask insn based on VEC_SIZE Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 3/7] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 4/7] x86: Remove now unused vec header macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 5/7] x86: Update memmove to use new VEC macros Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 6/7] x86: Update memset " Noah Goldstein
2022-10-14 22:39 ` [PATCH v6 7/7] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 1/6] x86: Update VEC macros to complete API for evex/evex512 impls Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 3/6] x86: Update memmove " Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 4/6] x86: Update memset " Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15 0:06 ` [PATCH v8 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15 0:12 ` [PATCH v8 1/6] x86: Update VEC macros to complete API for evex/evex512 impls H.J. Lu
2022-10-15 0:20 ` Noah Goldstein
2022-10-15 0:20 ` [PATCH v9 " Noah Goldstein
2022-10-15 0:20 ` [PATCH v9 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15 2:48 ` H.J. Lu
2022-10-15 0:20 ` [PATCH v9 3/6] x86: Update memmove " Noah Goldstein
2022-10-15 2:52 ` H.J. Lu
2022-10-15 2:57 ` Noah Goldstein
2022-10-15 0:20 ` [PATCH v9 4/6] x86: Update memset " Noah Goldstein
2022-10-15 2:53 ` H.J. Lu
2022-10-15 0:20 ` [PATCH v9 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15 2:56 ` H.J. Lu
2022-10-15 0:21 ` [PATCH v9 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15 2:58 ` H.J. Lu
2022-10-15 2:45 ` [PATCH v9 1/6] x86: Update VEC macros to complete API for evex/evex512 impls H.J. Lu
2022-10-15 3:00 ` [PATCH v10 " Noah Goldstein
2022-10-15 3:00 ` [PATCH v10 2/6] x86: Update memrchr to use new VEC macros Noah Goldstein
2022-10-15 3:44 ` Sunil Pandey
2022-10-15 3:00 ` [PATCH v10 3/6] x86: Update memmove " Noah Goldstein
2022-10-15 3:43 ` Sunil Pandey [this message]
2022-10-15 3:00 ` [PATCH v10 4/6] x86: Update memset " Noah Goldstein
2022-10-15 3:42 ` Sunil Pandey
2022-10-15 3:00 ` [PATCH v10 5/6] x86: Remove now unused vec header macros Noah Goldstein
2022-10-15 3:39 ` Sunil Pandey
2022-10-15 3:00 ` [PATCH v10 6/6] x86: Update strlen-evex-base to use new reg/vec macros Noah Goldstein
2022-10-15 3:48 ` Sunil Pandey
2022-10-15 3:37 ` [PATCH v10 1/6] x86: Update VEC macros to complete API for evex/evex512 impls Sunil Pandey
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CAMAf5_eDMY0UfFWBvSX0N2SaVFOQhz1ZdwgFgNtvx=5ZRtyjmw@mail.gmail.com' \
--to=skpgkp2@gmail.com \
--cc=goldstein.w.n@gmail.com \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).