From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 7852) id DFC6E3858438; Tue, 8 Mar 2022 05:49:51 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org DFC6E3858438 Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Sunil Pandey To: glibc-cvs@sourceware.org Subject: [glibc] x86_64: Fix svml_d_exp28_core_avx512.S code formatting X-Act-Checkin: glibc X-Git-Author: Sunil K Pandey X-Git-Refname: refs/heads/master X-Git-Oldrev: 2b7494c4f8939a4edce75b3767650c0fe53078df X-Git-Newrev: 91a317cc97eddbbce58c32e8eef68199ff251c91 Message-Id: <20220308054951.DFC6E3858438@sourceware.org> Date: Tue, 8 Mar 2022 05:49:51 +0000 (GMT) X-BeenThere: glibc-cvs@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Glibc-cvs mailing list List-Unsubscribe: , List-Archive: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 08 Mar 2022 05:49:52 -0000 https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=91a317cc97eddbbce58c32e8eef68199ff251c91 commit 91a317cc97eddbbce58c32e8eef68199ff251c91 Author: Sunil K Pandey Date: Mon Mar 7 10:47:12 2022 -0800 x86_64: Fix svml_d_exp28_core_avx512.S code formatting This commit contains following formatting changes 1. Instructions proceeded by a tab. 2. Instruction less than 8 characters in length have a tab between it and the first operand. 3. Instruction greater than 7 characters in length have a space between it and the first operand. 4. Tabs after `#define`d names and their value. 5. 8 space at the beginning of line replaced by tab. 6. Indent comments with code. 7. Remove redundent .text section. 8. 1 space between line content and line comment. 9. Space after all commas. Reviewed-by: Noah Goldstein Diff: --- .../fpu/multiarch/svml_d_exp28_core_avx512.S | 439 ++++++++++----------- 1 file changed, 219 insertions(+), 220 deletions(-) diff --git a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S index d560a901c4..7a85fd8b18 100644 --- a/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S +++ b/sysdeps/x86_64/fpu/multiarch/svml_d_exp28_core_avx512.S @@ -52,250 +52,249 @@ /* Offsets for data table __svml_dexp2_data_internal_avx512 */ -#define Frac_PowerD0 0 -#define poly_coeff1 128 -#define poly_coeff2 192 -#define poly_coeff3 256 -#define poly_coeff4 320 -#define poly_coeff5 384 -#define poly_coeff6 448 -#define add_const 512 -#define AbsMask 576 -#define Threshold 640 -#define _lIndexMask 704 +#define Frac_PowerD0 0 +#define poly_coeff1 128 +#define poly_coeff2 192 +#define poly_coeff3 256 +#define poly_coeff4 320 +#define poly_coeff5 384 +#define poly_coeff6 448 +#define add_const 512 +#define AbsMask 576 +#define Threshold 640 +#define _lIndexMask 704 #include - .text - .section .text.evex512,"ax",@progbits + .section .text.evex512, "ax", @progbits ENTRY(_ZGVeN8v_exp2_skx) - pushq %rbp - cfi_def_cfa_offset(16) - movq %rsp, %rbp - cfi_def_cfa(6, 16) - cfi_offset(6, -16) - andq $-64, %rsp - subq $192, %rsp - vmovups poly_coeff5+__svml_dexp2_data_internal_avx512(%rip), %zmm14 - vmovups poly_coeff6+__svml_dexp2_data_internal_avx512(%rip), %zmm6 - -/* - * Reduced argument - * where VREDUCE is available - */ - vreducepd $65, {sae}, %zmm0, %zmm10 - vmovups poly_coeff4+__svml_dexp2_data_internal_avx512(%rip), %zmm7 - vmovups add_const+__svml_dexp2_data_internal_avx512(%rip), %zmm3 - vmovups poly_coeff3+__svml_dexp2_data_internal_avx512(%rip), %zmm8 - vmovups __svml_dexp2_data_internal_avx512(%rip), %zmm13 - -/* c6*r + c5 */ - vfmadd231pd {rn-sae}, %zmm10, %zmm6, %zmm14 - vmovups poly_coeff2+__svml_dexp2_data_internal_avx512(%rip), %zmm9 - vmovups Threshold+__svml_dexp2_data_internal_avx512(%rip), %zmm2 - -/* - * - * HA - * Variables and constants - * Load constants and vector(s) - */ - vmovups poly_coeff1+__svml_dexp2_data_internal_avx512(%rip), %zmm11 - -/* c6*r^2 + c5*r + c4 */ - vfmadd213pd {rn-sae}, %zmm7, %zmm10, %zmm14 - -/* - * Integer form of K+0.b1b2b3b4 in lower bits - call K_plus_f0 - * Mantisssa of normalized double precision FP: 1.b1b2...b52 - */ - vaddpd {rd-sae}, %zmm3, %zmm0, %zmm4 - vandpd AbsMask+__svml_dexp2_data_internal_avx512(%rip), %zmm0, %zmm1 - -/* c6*r^3 + c5*r^2 + c4*r + c3 */ - vfmadd213pd {rn-sae}, %zmm8, %zmm10, %zmm14 - vcmppd $29, {sae}, %zmm2, %zmm1, %k0 - -/* c6*r^4 + c5*r^3 + c4*r^2 + c3*r + c2 */ - vfmadd213pd {rn-sae}, %zmm9, %zmm10, %zmm14 - kmovw %k0, %edx - -/* c6*r^5 + c5*r^4 + c4*r^3 + c3*r^2 + c2*r + c1 */ - vfmadd213pd {rn-sae}, %zmm11, %zmm10, %zmm14 - -/* Table value: 2^(0.b1b2b3b4) */ - vpandq _lIndexMask+__svml_dexp2_data_internal_avx512(%rip), %zmm4, %zmm5 - vpermt2pd Frac_PowerD0+64+__svml_dexp2_data_internal_avx512(%rip), %zmm5, %zmm13 - -/* T*r */ - vmulpd {rn-sae}, %zmm10, %zmm13, %zmm12 - -/* T + (T*r*(c6*r^5 + c5*r^4 + c4*r^3 + c3*r^2 + c2*r + c1)) */ - vfmadd213pd {rn-sae}, %zmm13, %zmm12, %zmm14 - -/* Scaling placed at the end to avoid accuracy loss when T*r*scale underflows */ - vscalefpd {rn-sae}, %zmm0, %zmm14, %zmm1 - testl %edx, %edx - -/* Go to special inputs processing branch */ - jne L(SPECIAL_VALUES_BRANCH) - # LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 - -/* Restore registers - * and exit the function - */ + pushq %rbp + cfi_def_cfa_offset(16) + movq %rsp, %rbp + cfi_def_cfa(6, 16) + cfi_offset(6, -16) + andq $-64, %rsp + subq $192, %rsp + vmovups poly_coeff5+__svml_dexp2_data_internal_avx512(%rip), %zmm14 + vmovups poly_coeff6+__svml_dexp2_data_internal_avx512(%rip), %zmm6 + + /* + * Reduced argument + * where VREDUCE is available + */ + vreducepd $65, {sae}, %zmm0, %zmm10 + vmovups poly_coeff4+__svml_dexp2_data_internal_avx512(%rip), %zmm7 + vmovups add_const+__svml_dexp2_data_internal_avx512(%rip), %zmm3 + vmovups poly_coeff3+__svml_dexp2_data_internal_avx512(%rip), %zmm8 + vmovups __svml_dexp2_data_internal_avx512(%rip), %zmm13 + + /* c6*r + c5 */ + vfmadd231pd {rn-sae}, %zmm10, %zmm6, %zmm14 + vmovups poly_coeff2+__svml_dexp2_data_internal_avx512(%rip), %zmm9 + vmovups Threshold+__svml_dexp2_data_internal_avx512(%rip), %zmm2 + + /* + * + * HA + * Variables and constants + * Load constants and vector(s) + */ + vmovups poly_coeff1+__svml_dexp2_data_internal_avx512(%rip), %zmm11 + + /* c6*r^2 + c5*r + c4 */ + vfmadd213pd {rn-sae}, %zmm7, %zmm10, %zmm14 + + /* + * Integer form of K+0.b1b2b3b4 in lower bits - call K_plus_f0 + * Mantisssa of normalized double precision FP: 1.b1b2...b52 + */ + vaddpd {rd-sae}, %zmm3, %zmm0, %zmm4 + vandpd AbsMask+__svml_dexp2_data_internal_avx512(%rip), %zmm0, %zmm1 + + /* c6*r^3 + c5*r^2 + c4*r + c3 */ + vfmadd213pd {rn-sae}, %zmm8, %zmm10, %zmm14 + vcmppd $29, {sae}, %zmm2, %zmm1, %k0 + + /* c6*r^4 + c5*r^3 + c4*r^2 + c3*r + c2 */ + vfmadd213pd {rn-sae}, %zmm9, %zmm10, %zmm14 + kmovw %k0, %edx + + /* c6*r^5 + c5*r^4 + c4*r^3 + c3*r^2 + c2*r + c1 */ + vfmadd213pd {rn-sae}, %zmm11, %zmm10, %zmm14 + + /* Table value: 2^(0.b1b2b3b4) */ + vpandq _lIndexMask+__svml_dexp2_data_internal_avx512(%rip), %zmm4, %zmm5 + vpermt2pd Frac_PowerD0+64+__svml_dexp2_data_internal_avx512(%rip), %zmm5, %zmm13 + + /* T*r */ + vmulpd {rn-sae}, %zmm10, %zmm13, %zmm12 + + /* T + (T*r*(c6*r^5 + c5*r^4 + c4*r^3 + c3*r^2 + c2*r + c1)) */ + vfmadd213pd {rn-sae}, %zmm13, %zmm12, %zmm14 + + /* Scaling placed at the end to avoid accuracy loss when T*r*scale underflows */ + vscalefpd {rn-sae}, %zmm0, %zmm14, %zmm1 + testl %edx, %edx + + /* Go to special inputs processing branch */ + jne L(SPECIAL_VALUES_BRANCH) + # LOE rbx r12 r13 r14 r15 edx zmm0 zmm1 + + /* Restore registers + * and exit the function + */ L(EXIT): - vmovaps %zmm1, %zmm0 - movq %rbp, %rsp - popq %rbp - cfi_def_cfa(7, 8) - cfi_restore(6) - ret - cfi_def_cfa(6, 16) - cfi_offset(6, -16) - -/* Branch to process - * special inputs - */ + vmovaps %zmm1, %zmm0 + movq %rbp, %rsp + popq %rbp + cfi_def_cfa(7, 8) + cfi_restore(6) + ret + cfi_def_cfa(6, 16) + cfi_offset(6, -16) + + /* Branch to process + * special inputs + */ L(SPECIAL_VALUES_BRANCH): - vmovups %zmm0, 64(%rsp) - vmovups %zmm1, 128(%rsp) - # LOE rbx r12 r13 r14 r15 edx zmm1 - - xorl %eax, %eax - # LOE rbx r12 r13 r14 r15 eax edx - - vzeroupper - movq %r12, 16(%rsp) - /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ - .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 - movl %eax, %r12d - movq %r13, 8(%rsp) - /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */ - .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22 - movl %edx, %r13d - movq %r14, (%rsp) - /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */ - .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 - # LOE rbx r15 r12d r13d - -/* Range mask - * bits check - */ + vmovups %zmm0, 64(%rsp) + vmovups %zmm1, 128(%rsp) + # LOE rbx r12 r13 r14 r15 edx zmm1 + + xorl %eax, %eax + # LOE rbx r12 r13 r14 r15 eax edx + + vzeroupper + movq %r12, 16(%rsp) + /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ + .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 + movl %eax, %r12d + movq %r13, 8(%rsp) + /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */ + .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22 + movl %edx, %r13d + movq %r14, (%rsp) + /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */ + .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 + # LOE rbx r15 r12d r13d + + /* Range mask + * bits check + */ L(RANGEMASK_CHECK): - btl %r12d, %r13d + btl %r12d, %r13d -/* Call scalar math function */ - jc L(SCALAR_MATH_CALL) - # LOE rbx r15 r12d r13d + /* Call scalar math function */ + jc L(SCALAR_MATH_CALL) + # LOE rbx r15 r12d r13d -/* Special inputs - * processing loop - */ + /* Special inputs + * processing loop + */ L(SPECIAL_VALUES_LOOP): - incl %r12d - cmpl $8, %r12d - -/* Check bits in range mask */ - jl L(RANGEMASK_CHECK) - # LOE rbx r15 r12d r13d - - movq 16(%rsp), %r12 - cfi_restore(12) - movq 8(%rsp), %r13 - cfi_restore(13) - movq (%rsp), %r14 - cfi_restore(14) - vmovups 128(%rsp), %zmm1 - -/* Go to exit */ - jmp L(EXIT) - /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ - .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 - /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */ - .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22 - /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */ - .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 - # LOE rbx r12 r13 r14 r15 zmm1 - -/* Scalar math fucntion call - * to process special input - */ + incl %r12d + cmpl $8, %r12d + + /* Check bits in range mask */ + jl L(RANGEMASK_CHECK) + # LOE rbx r15 r12d r13d + + movq 16(%rsp), %r12 + cfi_restore(12) + movq 8(%rsp), %r13 + cfi_restore(13) + movq (%rsp), %r14 + cfi_restore(14) + vmovups 128(%rsp), %zmm1 + + /* Go to exit */ + jmp L(EXIT) + /* DW_CFA_expression: r12 (r12) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -176; DW_OP_plus) */ + .cfi_escape 0x10, 0x0c, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x50, 0xff, 0xff, 0xff, 0x22 + /* DW_CFA_expression: r13 (r13) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -184; DW_OP_plus) */ + .cfi_escape 0x10, 0x0d, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x48, 0xff, 0xff, 0xff, 0x22 + /* DW_CFA_expression: r14 (r14) (DW_OP_lit8; DW_OP_minus; DW_OP_const4s: -64; DW_OP_and; DW_OP_const4s: -192; DW_OP_plus) */ + .cfi_escape 0x10, 0x0e, 0x0e, 0x38, 0x1c, 0x0d, 0xc0, 0xff, 0xff, 0xff, 0x1a, 0x0d, 0x40, 0xff, 0xff, 0xff, 0x22 + # LOE rbx r12 r13 r14 r15 zmm1 + + /* Scalar math fucntion call + * to process special input + */ L(SCALAR_MATH_CALL): - movl %r12d, %r14d - movsd 64(%rsp,%r14,8), %xmm0 - call exp2@PLT - # LOE rbx r14 r15 r12d r13d xmm0 + movl %r12d, %r14d + movsd 64(%rsp, %r14, 8), %xmm0 + call exp2@PLT + # LOE rbx r14 r15 r12d r13d xmm0 - movsd %xmm0, 128(%rsp,%r14,8) + movsd %xmm0, 128(%rsp, %r14, 8) -/* Process special inputs in loop */ - jmp L(SPECIAL_VALUES_LOOP) - # LOE rbx r15 r12d r13d + /* Process special inputs in loop */ + jmp L(SPECIAL_VALUES_LOOP) + # LOE rbx r15 r12d r13d END(_ZGVeN8v_exp2_skx) - .section .rodata, "a" - .align 64 + .section .rodata, "a" + .align 64 #ifdef __svml_dexp2_data_internal_avx512_typedef typedef unsigned int VUINT32; typedef struct { - __declspec(align(64)) VUINT32 Frac_PowerD0[16][2]; - __declspec(align(64)) VUINT32 poly_coeff1[8][2]; - __declspec(align(64)) VUINT32 poly_coeff2[8][2]; - __declspec(align(64)) VUINT32 poly_coeff3[8][2]; - __declspec(align(64)) VUINT32 poly_coeff4[8][2]; - __declspec(align(64)) VUINT32 poly_coeff5[8][2]; - __declspec(align(64)) VUINT32 poly_coeff6[8][2]; - __declspec(align(64)) VUINT32 add_const[8][2]; - __declspec(align(64)) VUINT32 AbsMask[8][2]; - __declspec(align(64)) VUINT32 Threshold[8][2]; - __declspec(align(64)) VUINT32 _lIndexMask[8][2]; + __declspec(align(64)) VUINT32 Frac_PowerD0[16][2]; + __declspec(align(64)) VUINT32 poly_coeff1[8][2]; + __declspec(align(64)) VUINT32 poly_coeff2[8][2]; + __declspec(align(64)) VUINT32 poly_coeff3[8][2]; + __declspec(align(64)) VUINT32 poly_coeff4[8][2]; + __declspec(align(64)) VUINT32 poly_coeff5[8][2]; + __declspec(align(64)) VUINT32 poly_coeff6[8][2]; + __declspec(align(64)) VUINT32 add_const[8][2]; + __declspec(align(64)) VUINT32 AbsMask[8][2]; + __declspec(align(64)) VUINT32 Threshold[8][2]; + __declspec(align(64)) VUINT32 _lIndexMask[8][2]; } __svml_dexp2_data_internal_avx512; #endif __svml_dexp2_data_internal_avx512: - /*== Frac_PowerD0 ==*/ - .quad 0x3FF0000000000000 - .quad 0x3FF0B5586CF9890F - .quad 0x3FF172B83C7D517B - .quad 0x3FF2387A6E756238 - .quad 0x3FF306FE0A31B715 - .quad 0x3FF3DEA64C123422 - .quad 0x3FF4BFDAD5362A27 - .quad 0x3FF5AB07DD485429 - .quad 0x3FF6A09E667F3BCD - .quad 0x3FF7A11473EB0187 - .quad 0x3FF8ACE5422AA0DB - .quad 0x3FF9C49182A3F090 - .quad 0x3FFAE89F995AD3AD - .quad 0x3FFC199BDD85529C - .quad 0x3FFD5818DCFBA487 - .quad 0x3FFEA4AFA2A490DA - .align 64 - .quad 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B /*== poly_coeff1 ==*/ - .align 64 - .quad 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A /*== poly_coeff2 ==*/ - .align 64 - .quad 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9 /*== poly_coeff3 ==*/ - .align 64 - .quad 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252 /*== poly_coeff4 ==*/ - .align 64 - .quad 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19 /*== poly_coeff5 ==*/ - .align 64 - .quad 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B /*== poly_coeff6 ==*/ - .align 64 - .quad 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000 /* add_const */ - .align 64 - .quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff /* AbsMask */ - .align 64 - .quad 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000 /* Threshold */ - .align 64 - .quad 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F /* _lIndexMask */ - .align 64 - .type __svml_dexp2_data_internal_avx512,@object - .size __svml_dexp2_data_internal_avx512,.-__svml_dexp2_data_internal_avx512 + /* Frac_PowerD0 */ + .quad 0x3FF0000000000000 + .quad 0x3FF0B5586CF9890F + .quad 0x3FF172B83C7D517B + .quad 0x3FF2387A6E756238 + .quad 0x3FF306FE0A31B715 + .quad 0x3FF3DEA64C123422 + .quad 0x3FF4BFDAD5362A27 + .quad 0x3FF5AB07DD485429 + .quad 0x3FF6A09E667F3BCD + .quad 0x3FF7A11473EB0187 + .quad 0x3FF8ACE5422AA0DB + .quad 0x3FF9C49182A3F090 + .quad 0x3FFAE89F995AD3AD + .quad 0x3FFC199BDD85529C + .quad 0x3FFD5818DCFBA487 + .quad 0x3FFEA4AFA2A490DA + .align 64 + .quad 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B, 0x3FE62E42FEFA398B /* == poly_coeff1 == */ + .align 64 + .quad 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A, 0x3FCEBFBDFF84555A /* == poly_coeff2 == */ + .align 64 + .quad 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9, 0x3FAC6B08D4AD86B9 /* == poly_coeff3 == */ + .align 64 + .quad 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252, 0x3F83B2AD1B172252 /* == poly_coeff4 == */ + .align 64 + .quad 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19, 0x3F55D7472713CD19 /* == poly_coeff5 == */ + .align 64 + .quad 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B, 0x3F24A1D7F526371B /* == poly_coeff6 == */ + .align 64 + .quad 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000, 0x42F8000000000000 /* add_const */ + .align 64 + .quad 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff, 0x7fffffffffffffff /* AbsMask */ + .align 64 + .quad 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000, 0x408fefff00000000 /* Threshold */ + .align 64 + .quad 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F, 0x000000000000000F /* _lIndexMask */ + .align 64 + .type __svml_dexp2_data_internal_avx512, @object + .size __svml_dexp2_data_internal_avx512, .-__svml_dexp2_data_internal_avx512