public inbox for glibc-cvs@sourceware.org
help / color / mirror / Atom feed
* [glibc] AArch64: Improve SVE memcpy and memmove
@ 2023-02-06 16:28 Wilco Dijkstra
  0 siblings, 0 replies; only message in thread
From: Wilco Dijkstra @ 2023-02-06 16:28 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=d2d3f3720ce627a4fe154d8dd14db716a32bcc6e

commit d2d3f3720ce627a4fe154d8dd14db716a32bcc6e
Author: Wilco Dijkstra <wilco.dijkstra@arm.com>
Date:   Wed Feb 1 18:45:19 2023 +0000

    AArch64: Improve SVE memcpy and memmove
    
    Improve SVE memcpy by copying 2 vectors if the size is small enough.
    This improves performance of random memcpy by ~9% on Neoverse V1, and
    33-64 byte copies are ~16% faster.
    
    Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>

Diff:
---
 sysdeps/aarch64/multiarch/memcpy_sve.S | 34 ++++++++++++++--------------------
 1 file changed, 14 insertions(+), 20 deletions(-)

diff --git a/sysdeps/aarch64/multiarch/memcpy_sve.S b/sysdeps/aarch64/multiarch/memcpy_sve.S
index f4dc214f60..d11be6a443 100644
--- a/sysdeps/aarch64/multiarch/memcpy_sve.S
+++ b/sysdeps/aarch64/multiarch/memcpy_sve.S
@@ -67,14 +67,15 @@ ENTRY (__memcpy_sve)
 
 	cmp	count, 128
 	b.hi	L(copy_long)
-	cmp	count, 32
+	cntb	vlen
+	cmp	count, vlen, lsl 1
 	b.hi	L(copy32_128)
-
 	whilelo p0.b, xzr, count
-	cntb	vlen
-	tbnz	vlen, 4, L(vlen128)
-	ld1b	z0.b, p0/z, [src]
-	st1b	z0.b, p0, [dstin]
+	whilelo p1.b, vlen, count
+	ld1b	z0.b, p0/z, [src, 0, mul vl]
+	ld1b	z1.b, p1/z, [src, 1, mul vl]
+	st1b	z0.b, p0, [dstin, 0, mul vl]
+	st1b	z1.b, p1, [dstin, 1, mul vl]
 	ret
 
 	/* Medium copies: 33..128 bytes.  */
@@ -102,14 +103,6 @@ L(copy96):
 	stp	C_q, D_q, [dstend, -32]
 	ret
 
-L(vlen128):
-	whilelo p1.b, vlen, count
-	ld1b	z0.b, p0/z, [src, 0, mul vl]
-	ld1b	z1.b, p1/z, [src, 1, mul vl]
-	st1b	z0.b, p0, [dstin, 0, mul vl]
-	st1b	z1.b, p1, [dstin, 1, mul vl]
-	ret
-
 	.p2align 4
 	/* Copy more than 128 bytes.  */
 L(copy_long):
@@ -158,14 +151,15 @@ ENTRY (__memmove_sve)
 
 	cmp	count, 128
 	b.hi	L(move_long)
-	cmp	count, 32
+	cntb	vlen
+	cmp	count, vlen, lsl 1
 	b.hi	L(copy32_128)
-
 	whilelo p0.b, xzr, count
-	cntb	vlen
-	tbnz	vlen, 4, L(vlen128)
-	ld1b	z0.b, p0/z, [src]
-	st1b	z0.b, p0, [dstin]
+	whilelo p1.b, vlen, count
+	ld1b	z0.b, p0/z, [src, 0, mul vl]
+	ld1b	z1.b, p1/z, [src, 1, mul vl]
+	st1b	z0.b, p0, [dstin, 0, mul vl]
+	st1b	z1.b, p1, [dstin, 1, mul vl]
 	ret
 
 	.p2align 4

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-02-06 16:28 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-06 16:28 [glibc] AArch64: Improve SVE memcpy and memmove Wilco Dijkstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).