From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: by sourceware.org (Postfix, from userid 1895) id 3B46C386101D; Wed, 10 Apr 2024 15:07:32 +0000 (GMT) DKIM-Filter: OpenDKIM Filter v2.11.0 sourceware.org 3B46C386101D DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=sourceware.org; s=default; t=1712761652; bh=gW/RrIaarjwL/FtnU18nIfCcLiLokFSCHPTwELZfPmw=; h=From:To:Subject:Date:From; b=wJOUhZetzfUkjxdjYIWWH9qSzamJbaVKmZTFQQIqMmsT2aB4YdXzt1xOGBikVojhD 1jOzxPJZChjR8V87FUaLYBkxIFodpedy0+FsfE/mwCSC3Ww3952a0PsLci5JNj7S/e xJ8CyI8x+OAWYlKHWAXqoPHVb/l7Q9SBiV50b3u4= Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: Wilco Dijkstra To: glibc-cvs@sourceware.org Subject: [glibc/release/2.35/master] AArch64: Improve SVE memcpy and memmove X-Act-Checkin: glibc X-Git-Author: Wilco Dijkstra X-Git-Refname: refs/heads/release/2.35/master X-Git-Oldrev: 115c2c771764a539fce5bbf99c32d6df1eecf419 X-Git-Newrev: b9e93c5ff77cf7d486f03b25274aba4aeac95425 Message-Id: <20240410150732.3B46C386101D@sourceware.org> Date: Wed, 10 Apr 2024 15:07:32 +0000 (GMT) List-Id: https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=b9e93c5ff77cf7d486f03b25274aba4aeac95425 commit b9e93c5ff77cf7d486f03b25274aba4aeac95425 Author: Wilco Dijkstra Date: Wed Feb 1 18:45:19 2023 +0000 AArch64: Improve SVE memcpy and memmove Improve SVE memcpy by copying 2 vectors if the size is small enough. This improves performance of random memcpy by ~9% on Neoverse V1, and 33-64 byte copies are ~16% faster. Reviewed-by: Szabolcs Nagy (cherry picked from commit d2d3f3720ce627a4fe154d8dd14db716a32bcc6e) Diff: --- sysdeps/aarch64/multiarch/memcpy_sve.S | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/sysdeps/aarch64/multiarch/memcpy_sve.S b/sysdeps/aarch64/multiarch/memcpy_sve.S index a70907ec55..6bc8390fe8 100644 --- a/sysdeps/aarch64/multiarch/memcpy_sve.S +++ b/sysdeps/aarch64/multiarch/memcpy_sve.S @@ -67,14 +67,15 @@ ENTRY (__memcpy_sve) cmp count, 128 b.hi L(copy_long) - cmp count, 32 + cntb vlen + cmp count, vlen, lsl 1 b.hi L(copy32_128) - whilelo p0.b, xzr, count - cntb vlen - tbnz vlen, 4, L(vlen128) - ld1b z0.b, p0/z, [src] - st1b z0.b, p0, [dstin] + whilelo p1.b, vlen, count + ld1b z0.b, p0/z, [src, 0, mul vl] + ld1b z1.b, p1/z, [src, 1, mul vl] + st1b z0.b, p0, [dstin, 0, mul vl] + st1b z1.b, p1, [dstin, 1, mul vl] ret /* Medium copies: 33..128 bytes. */ @@ -102,14 +103,6 @@ L(copy96): stp C_q, D_q, [dstend, -32] ret -L(vlen128): - whilelo p1.b, vlen, count - ld1b z0.b, p0/z, [src, 0, mul vl] - ld1b z1.b, p1/z, [src, 1, mul vl] - st1b z0.b, p0, [dstin, 0, mul vl] - st1b z1.b, p1, [dstin, 1, mul vl] - ret - .p2align 4 /* Copy more than 128 bytes. */ L(copy_long): @@ -158,14 +151,15 @@ ENTRY (__memmove_sve) cmp count, 128 b.hi L(move_long) - cmp count, 32 + cntb vlen + cmp count, vlen, lsl 1 b.hi L(copy32_128) - whilelo p0.b, xzr, count - cntb vlen - tbnz vlen, 4, L(vlen128) - ld1b z0.b, p0/z, [src] - st1b z0.b, p0, [dstin] + whilelo p1.b, vlen, count + ld1b z0.b, p0/z, [src, 0, mul vl] + ld1b z1.b, p1/z, [src, 1, mul vl] + st1b z0.b, p0, [dstin, 0, mul vl] + st1b z1.b, p1, [dstin, 1, mul vl] ret .p2align 4