From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 7852) id C29453852776; Mon, 18 Jul 2022 20:58:37 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org C29453852776 Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Sunil Pandey To: glibc-cvs@sourceware.org Subject: [glibc/release/2.35/master] x86: Cleanup bounds checking in large memcpy case X-Act-Checkin: glibc X-Git-Author: Noah Goldstein X-Git-Refname: refs/heads/release/2.35/master X-Git-Oldrev: 863987a6ef50f9fe6fd677ac042f1840d66f7313 X-Git-Newrev: ce32ad91eb7b50442d033e3ef37d9800f8fcfdde Message-Id: <20220718205837.C29453852776@sourceware.org> Date: Mon, 18 Jul 2022 20:58:37 +0000 (GMT) X-BeenThere: glibc-cvs@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Glibc-cvs mailing list List-Unsubscribe: , List-Archive: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 18 Jul 2022 20:58:37 -0000 https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=ce32ad91eb7b50442d033e3ef37d9800f8fcfdde commit ce32ad91eb7b50442d033e3ef37d9800f8fcfdde Author: Noah Goldstein Date: Wed Jun 15 10:41:28 2022 -0700 x86: Cleanup bounds checking in large memcpy case 1. Fix incorrect lower-bound threshold in L(large_memcpy_2x). Previously was using `__x86_rep_movsb_threshold` and should have been using `__x86_shared_non_temporal_threshold`. 2. Avoid reloading __x86_shared_non_temporal_threshold before the L(large_memcpy_4x) bounds check. 3. Document the second bounds check for L(large_memcpy_4x) more clearly. (cherry picked from commit 89a25c6f64746732b87eaf433af0964b564d4a92) Diff: --- .../x86_64/multiarch/memmove-vec-unaligned-erms.S | 29 ++++++++++++++++------ 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S index af51177d5d..d1518b8bab 100644 --- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S +++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S @@ -118,7 +118,13 @@ # define LARGE_LOAD_SIZE (VEC_SIZE * 4) #endif -/* Amount to shift rdx by to compare for memcpy_large_4x. */ +/* Amount to shift __x86_shared_non_temporal_threshold by for + bound for memcpy_large_4x. This is essentially use to to + indicate that the copy is far beyond the scope of L3 + (assuming no user config x86_non_temporal_threshold) and to + use a more aggressively unrolled loop. NB: before + increasing the value also update initialization of + x86_non_temporal_threshold. */ #ifndef LOG_4X_MEMCPY_THRESH # define LOG_4X_MEMCPY_THRESH 4 #endif @@ -724,9 +730,14 @@ L(skip_short_movsb_check): .p2align 4,, 10 #if (defined USE_MULTIARCH || VEC_SIZE == 16) && IS_IN (libc) L(large_memcpy_2x_check): - cmp __x86_rep_movsb_threshold(%rip), %RDX_LP - jb L(more_8x_vec_check) + /* Entry from L(large_memcpy_2x) has a redundant load of + __x86_shared_non_temporal_threshold(%rip). L(large_memcpy_2x) + is only use for the non-erms memmove which is generally less + common. */ L(large_memcpy_2x): + mov __x86_shared_non_temporal_threshold(%rip), %R11_LP + cmp %R11_LP, %RDX_LP + jb L(more_8x_vec_check) /* To reach this point it is impossible for dst > src and overlap. Remaining to check is src > dst and overlap. rcx already contains dst - src. Negate rcx to get src - dst. If @@ -774,18 +785,21 @@ L(large_memcpy_2x): /* ecx contains -(dst - src). not ecx will return dst - src - 1 which works for testing aliasing. */ notl %ecx + movq %rdx, %r10 testl $(PAGE_SIZE - VEC_SIZE * 8), %ecx jz L(large_memcpy_4x) - movq %rdx, %r10 - shrq $LOG_4X_MEMCPY_THRESH, %r10 - cmp __x86_shared_non_temporal_threshold(%rip), %r10 + /* r11 has __x86_shared_non_temporal_threshold. Shift it left + by LOG_4X_MEMCPY_THRESH to get L(large_memcpy_4x) threshold. + */ + shlq $LOG_4X_MEMCPY_THRESH, %r11 + cmp %r11, %rdx jae L(large_memcpy_4x) /* edx will store remainder size for copying tail. */ andl $(PAGE_SIZE * 2 - 1), %edx /* r10 stores outer loop counter. */ - shrq $((LOG_PAGE_SIZE + 1) - LOG_4X_MEMCPY_THRESH), %r10 + shrq $(LOG_PAGE_SIZE + 1), %r10 /* Copy 4x VEC at a time from 2 pages. */ .p2align 4 L(loop_large_memcpy_2x_outer): @@ -850,7 +864,6 @@ L(large_memcpy_2x_end): .p2align 4 L(large_memcpy_4x): - movq %rdx, %r10 /* edx will store remainder size for copying tail. */ andl $(PAGE_SIZE * 4 - 1), %edx /* r10 stores outer loop counter. */