From: Noah Goldstein <goldstein.w.n@gmail.com>
To: libc-alpha@sourceware.org
Cc: goldstein.w.n@gmail.com, hjl.tools@gmail.com,
andrey.kolesov@intel.com, carlos@systemhalted.org
Subject: [PATCH v5 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h
Date: Fri, 18 Nov 2022 16:13:30 -0800 [thread overview]
Message-ID: <20221119001332.1428330-1-goldstein.w.n@gmail.com> (raw)
In-Reply-To: <20221118190835.1033248-1-goldstein.w.n@gmail.com>
Just reformat with the style convention used in other x86 assembler
files. This doesn't change libm.so or libmvec.so.
---
sysdeps/x86_64/fpu/svml_d_wrapper_impl.h | 474 ++++++++++----------
sysdeps/x86_64/fpu/svml_s_wrapper_impl.h | 546 +++++++++++------------
2 files changed, 510 insertions(+), 510 deletions(-)
diff --git a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
index 2334713015..b03a2122b9 100644
--- a/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_d_wrapper_impl.h
@@ -18,273 +18,273 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- movsd %xmm0, 16(%rsp)
- movsd 8(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movsd 16(%rsp), %xmm1
- movsd %xmm0, 24(%rsp)
- unpcklpd %xmm0, %xmm1
- movaps %xmm1, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- ret
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ movsd %xmm0, 16(%rsp)
+ movsd 8(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movsd 16(%rsp), %xmm1
+ movsd %xmm0, 24(%rsp)
+ unpcklpd %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset(56)
- movaps %xmm0, (%rsp)
- movaps %xmm1, 16(%rsp)
- call JUMPTARGET(\callee)
- movsd %xmm0, 32(%rsp)
- movsd 8(%rsp), %xmm0
- movsd 24(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movsd 32(%rsp), %xmm1
- movsd %xmm0, 40(%rsp)
- unpcklpd %xmm0, %xmm1
- movaps %xmm1, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset(-56)
- ret
+ subq $56, %rsp
+ cfi_adjust_cfa_offset (56)
+ movaps %xmm0, (%rsp)
+ movaps %xmm1, 16(%rsp)
+ call JUMPTARGET(\callee)
+ movsd %xmm0, 32(%rsp)
+ movsd 8(%rsp), %xmm0
+ movsd 24(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movsd 32(%rsp), %xmm1
+ movsd %xmm0, 40(%rsp)
+ unpcklpd %xmm0, %xmm1
+ movaps %xmm1, %xmm0
+ addq $56, %rsp
+ cfi_adjust_cfa_offset (-56)
+ ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- pushq %rbx
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbx, 0)
- movq %rdi, %rbp
- movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- leaq 16(%rsp), %rsi
- leaq 24(%rsp), %rdi
- movsd 24(%rsp), %xmm0
- movapd (%rsp), %xmm1
- movsd %xmm0, 0(%rbp)
- unpckhpd %xmm1, %xmm1
- movsd 16(%rsp), %xmm0
- movsd %xmm0, (%rbx)
- movapd %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movsd 24(%rsp), %xmm0
- movsd %xmm0, 8(%rbp)
- movsd 16(%rsp), %xmm0
- movsd %xmm0, 8(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %rbx
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ leaq 16(%rsp), %rsi
+ leaq 24(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ leaq 16(%rsp), %rsi
+ leaq 24(%rsp), %rdi
+ movsd 24(%rsp), %xmm0
+ movapd (%rsp), %xmm1
+ movsd %xmm0, 0(%rbp)
+ unpckhpd %xmm1, %xmm1
+ movsd 16(%rsp), %xmm0
+ movsd %xmm0, (%rbx)
+ movapd %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movsd 24(%rsp), %xmm0
+ movsd %xmm0, 8(%rbp)
+ movsd 16(%rsp), %xmm0
+ movsd %xmm0, 8(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd %xmm0, %xmm1
- vmovapd 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vextractf128 $1, %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd %xmm0, 16(%rsp)
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd %xmm0, %xmm1
+ vmovapd 16(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vextractf128 $1, %ymm0, 16(%rsp)
+ vextractf128 $1, %ymm1, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 32(%rsp)
+ vmovaps 16(%rsp), %xmm0
+ vmovaps (%rsp), %xmm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 32(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- movq %rdi, %r13
- vextractf128 $1, %ymm0, 32(%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovapd (%rsp), %xmm0
- vmovapd 16(%rsp), %xmm1
- vmovapd %xmm0, 16(%r13)
- vmovapd %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ movq %rdi, %r13
+ vextractf128 $1, %ymm0, 32(%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovapd (%rsp), %xmm0
+ vmovapd 16(%rsp), %xmm1
+ vmovapd %xmm0, 16(%r13)
+ vmovapd %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 64(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 96(%rsp)
+ vmovups 64(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovupd (%rsp), %ymm0
- vmovupd 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 128(%rsp)
- vmovupd 32(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $192, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ vmovupd (%rsp), %ymm0
+ vmovupd 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 128(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ vmovupd 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 160(%rsp)
+ vmovups 128(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r12, 0)
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- subq $176, %rsp
- movq %rsi, %r13
- vmovups %zmm0, (%rsp)
- movq %rdi, %r12
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd 64(%rsp), %ymm0
- vmovupd 96(%rsp), %ymm1
- vmovupd %ymm0, 32(%r12)
- vmovupd %ymm1, 32(%r13)
- vzeroupper
- addq $176, %rsp
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- popq %r12
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r12)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r12, 0)
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ subq $176, %rsp
+ movq %rsi, %r13
+ vmovups %zmm0, (%rsp)
+ movq %rdi, %r12
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd 64(%rsp), %ymm0
+ vmovupd 96(%rsp), %ymm1
+ vmovupd %ymm0, 32(%r12)
+ vmovupd %ymm1, 32(%r13)
+ vzeroupper
+ addq $176, %rsp
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ popq %r12
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r12)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index c23da7ec83..cecf6c8384 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -18,309 +18,309 @@
/* SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2 callee
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- movss %xmm0, 16(%rsp)
- movss 4(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss %xmm0, 20(%rsp)
- movss 8(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss %xmm0, 24(%rsp)
- movss 12(%rsp), %xmm0
- call JUMPTARGET(\callee)
- movss 16(%rsp), %xmm3
- movss 20(%rsp), %xmm2
- movss 24(%rsp), %xmm1
- movss %xmm0, 28(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- ret
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ movss %xmm0, 16(%rsp)
+ movss 4(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss %xmm0, 20(%rsp)
+ movss 8(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss %xmm0, 24(%rsp)
+ movss 12(%rsp), %xmm0
+ call JUMPTARGET(\callee)
+ movss 16(%rsp), %xmm3
+ movss 20(%rsp), %xmm2
+ movss 24(%rsp), %xmm1
+ movss %xmm0, 28(%rsp)
+ unpcklps %xmm1, %xmm3
+ unpcklps %xmm0, %xmm2
+ unpcklps %xmm2, %xmm3
+ movaps %xmm3, %xmm0
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ ret
.endm
/* 2 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_ff callee
- subq $56, %rsp
- cfi_adjust_cfa_offset(56)
- movaps %xmm0, (%rsp)
- movaps %xmm1, 16(%rsp)
- call JUMPTARGET(\callee)
- movss %xmm0, 32(%rsp)
- movss 4(%rsp), %xmm0
- movss 20(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss %xmm0, 36(%rsp)
- movss 8(%rsp), %xmm0
- movss 24(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss %xmm0, 40(%rsp)
- movss 12(%rsp), %xmm0
- movss 28(%rsp), %xmm1
- call JUMPTARGET(\callee)
- movss 32(%rsp), %xmm3
- movss 36(%rsp), %xmm2
- movss 40(%rsp), %xmm1
- movss %xmm0, 44(%rsp)
- unpcklps %xmm1, %xmm3
- unpcklps %xmm0, %xmm2
- unpcklps %xmm2, %xmm3
- movaps %xmm3, %xmm0
- addq $56, %rsp
- cfi_adjust_cfa_offset(-56)
- ret
+ subq $56, %rsp
+ cfi_adjust_cfa_offset (56)
+ movaps %xmm0, (%rsp)
+ movaps %xmm1, 16(%rsp)
+ call JUMPTARGET(\callee)
+ movss %xmm0, 32(%rsp)
+ movss 4(%rsp), %xmm0
+ movss 20(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss %xmm0, 36(%rsp)
+ movss 8(%rsp), %xmm0
+ movss 24(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss %xmm0, 40(%rsp)
+ movss 12(%rsp), %xmm0
+ movss 28(%rsp), %xmm1
+ call JUMPTARGET(\callee)
+ movss 32(%rsp), %xmm3
+ movss 36(%rsp), %xmm2
+ movss 40(%rsp), %xmm1
+ movss %xmm0, 44(%rsp)
+ unpcklps %xmm1, %xmm3
+ unpcklps %xmm0, %xmm2
+ unpcklps %xmm2, %xmm3
+ movaps %xmm3, %xmm0
+ addq $56, %rsp
+ cfi_adjust_cfa_offset (-56)
+ ret
.endm
/* 3 argument SSE2 ISA version as wrapper to scalar. */
.macro WRAPPER_IMPL_SSE2_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- pushq %rbx
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbx, 0)
- movq %rdi, %rbp
- movq %rsi, %rbx
- subq $40, %rsp
- cfi_adjust_cfa_offset(40)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movaps %xmm0, (%rsp)
- call JUMPTARGET(\callee)
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- movss %xmm0, 0(%rbp)
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, (%rbx)
- movaps %xmm1, %xmm0
- shufps $85, %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- leaq 24(%rsp), %rsi
- movss %xmm0, 4(%rbp)
- leaq 28(%rsp), %rdi
- movaps (%rsp), %xmm1
- movss 24(%rsp), %xmm0
- movss %xmm0, 4(%rbx)
- movaps %xmm1, %xmm0
- unpckhps %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movaps (%rsp), %xmm1
- leaq 24(%rsp), %rsi
- leaq 28(%rsp), %rdi
- movss 28(%rsp), %xmm0
- shufps $255, %xmm1, %xmm1
- movss %xmm0, 8(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 8(%rbx)
- movaps %xmm1, %xmm0
- call JUMPTARGET(\callee)
- movss 28(%rsp), %xmm0
- movss %xmm0, 12(%rbp)
- movss 24(%rsp), %xmm0
- movss %xmm0, 12(%rbx)
- addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %rbx
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbx)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset (40)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call JUMPTARGET(\callee)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 0(%rbp)
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, (%rbx)
+ movaps %xmm1, %xmm0
+ shufps $85, %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movss 28(%rsp), %xmm0
+ leaq 24(%rsp), %rsi
+ movss %xmm0, 4(%rbp)
+ leaq 28(%rsp), %rdi
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 4(%rbx)
+ movaps %xmm1, %xmm0
+ unpckhps %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movaps (%rsp), %xmm1
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ shufps $255, %xmm1, %xmm1
+ movss %xmm0, 8(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 8(%rbx)
+ movaps %xmm1, %xmm0
+ call JUMPTARGET(\callee)
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 12(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 12(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset (-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $32, %rsp
- vextractf128 $1, %ymm0, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 16(%rsp)
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 16(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $32, %rsp
+ vextractf128 $1, %ymm0, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 16(%rsp)
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 16(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- subq $64, %rsp
- vextractf128 $1, %ymm0, 16(%rsp)
- vextractf128 $1, %ymm1, (%rsp)
- vzeroupper
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, 32(%rsp)
- vmovaps 16(%rsp), %xmm0
- vmovaps (%rsp), %xmm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps %xmm0, %xmm1
- vmovaps 32(%rsp), %xmm0
- vinsertf128 $1, %xmm1, %ymm0, %ymm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ subq $64, %rsp
+ vextractf128 $1, %ymm0, 16(%rsp)
+ vextractf128 $1, %ymm1, (%rsp)
+ vzeroupper
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, 32(%rsp)
+ vmovaps 16(%rsp), %xmm0
+ vmovaps (%rsp), %xmm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps %xmm0, %xmm1
+ vmovaps 32(%rsp), %xmm0
+ vinsertf128 $1, %xmm1, %ymm0, %ymm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-32, %rsp
- pushq %r13
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r13, 0)
- pushq %r14
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%r14, 0)
- subq $48, %rsp
- movq %rsi, %r14
- vmovaps %ymm0, (%rsp)
- movq %rdi, %r13
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm1, 32(%rsp)
- vzeroupper
- vmovaps (%rsp), %xmm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %xmm0
- lea (%rsp), %rdi
- lea 16(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps (%rsp), %xmm0
- vmovaps 16(%rsp), %xmm1
- vmovaps %xmm0, 16(%r13)
- vmovaps %xmm1, 16(%r14)
- addq $48, %rsp
- popq %r14
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r14)
- popq %r13
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%r13)
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ vmovaps %ymm0, (%rsp)
+ movq %rdi, %r13
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm1, 32(%rsp)
+ vzeroupper
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps (%rsp), %xmm0
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm0, 16(%r13)
+ vmovaps %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $128, %rsp
- vmovups %zmm0, (%rsp)
- vmovupd (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 64(%rsp)
- vmovupd 32(%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovupd %ymm0, 96(%rsp)
- vmovups 64(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $128, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovupd (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 64(%rsp)
+ vmovupd 32(%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovupd %ymm0, 96(%rsp)
+ vmovups 64(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 2 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_ff callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- subq $192, %rsp
- vmovups %zmm0, (%rsp)
- vmovups %zmm1, 64(%rsp)
- vmovups (%rsp), %ymm0
- vmovups 64(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 128(%rsp)
- vmovups 32(%rsp), %ymm0
- vmovups 96(%rsp), %ymm1
- call HIDDEN_JUMPTARGET(\callee)
- vmovups %ymm0, 160(%rsp)
- vmovups 128(%rsp), %zmm0
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ subq $192, %rsp
+ vmovups %zmm0, (%rsp)
+ vmovups %zmm1, 64(%rsp)
+ vmovups (%rsp), %ymm0
+ vmovups 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups %ymm0, 128(%rsp)
+ vmovups 32(%rsp), %ymm0
+ vmovups 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovups %ymm0, 160(%rsp)
+ vmovups 128(%rsp), %zmm0
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512_fFF callee
- pushq %rbp
- cfi_adjust_cfa_offset (8)
- cfi_rel_offset (%rbp, 0)
- movq %rsp, %rbp
- cfi_def_cfa_register (%rbp)
- andq $-64, %rsp
- pushq %r12
- pushq %r13
- subq $176, %rsp
- movq %rsi, %r13
- vmovaps %zmm0, (%rsp)
- movq %rdi, %r12
- vmovaps (%rsp), %ymm0
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 32(%rsp), %ymm0
- lea 64(%rsp), %rdi
- lea 96(%rsp), %rsi
- call HIDDEN_JUMPTARGET(\callee)
- vmovaps 64(%rsp), %ymm0
- vmovaps 96(%rsp), %ymm1
- vmovaps %ymm0, 32(%r12)
- vmovaps %ymm1, 32(%r13)
- addq $176, %rsp
- popq %r13
- popq %r12
- movq %rbp, %rsp
- cfi_def_cfa_register (%rsp)
- popq %rbp
- cfi_adjust_cfa_offset (-8)
- cfi_restore (%rbp)
- ret
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ pushq %r13
+ subq $176, %rsp
+ movq %rsi, %r13
+ vmovaps %zmm0, (%rsp)
+ movq %rdi, %r12
+ vmovaps (%rsp), %ymm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 64(%rsp), %ymm0
+ vmovaps 96(%rsp), %ymm1
+ vmovaps %ymm0, 32(%r12)
+ vmovaps %ymm1, 32(%r13)
+ addq $176, %rsp
+ popq %r13
+ popq %r12
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
.endm
--
2.34.1
next prev parent reply other threads:[~2022-11-19 0:13 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-18 19:08 [PATCH v2 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h.S Noah Goldstein
2022-11-18 19:08 ` [PATCH v2 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h.S Noah Goldstein
2022-11-18 19:56 ` H.J. Lu
2022-11-18 20:36 ` Noah Goldstein
2022-11-18 20:35 ` [PATCH v3 1/2] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein
2022-11-18 20:35 ` [PATCH v3 2/2] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein
2022-11-18 20:49 ` H.J. Lu
2022-11-18 21:23 ` Noah Goldstein
2022-11-18 21:22 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h Noah Goldstein
2022-11-18 21:22 ` [PATCH v4 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein
2022-11-19 0:06 ` H.J. Lu
2022-11-19 0:13 ` Noah Goldstein
2022-11-18 21:22 ` [PATCH v4 3/3] x86/fpu: Factor out shared avx2/avx512 " Noah Goldstein
2022-11-18 21:27 ` H.J. Lu
2022-11-18 21:59 ` Noah Goldstein
2022-11-19 0:07 ` H.J. Lu
2022-11-18 23:25 ` [PATCH v4 1/3] x86/fpu: Reformat svml_{s|d}_wrapper_impl.h H.J. Lu
2022-11-19 0:13 ` Noah Goldstein [this message]
2022-11-19 0:13 ` [PATCH v5 2/3] x86/fpu: Cleanup code in svml_{s|d}_wrapper_impl.h Noah Goldstein
2022-11-19 0:36 ` H.J. Lu
2022-11-19 0:13 ` [PATCH v5 3/3] x86/fpu: Factor out shared avx2/avx512 " Noah Goldstein
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221119001332.1428330-1-goldstein.w.n@gmail.com \
--to=goldstein.w.n@gmail.com \
--cc=andrey.kolesov@intel.com \
--cc=carlos@systemhalted.org \
--cc=hjl.tools@gmail.com \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).