public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
* [PATCH] AArch64: Improve SVE memcpy and memmove
@ 2023-02-03 13:05 Wilco Dijkstra
  2023-02-06 14:29 ` Szabolcs Nagy
  0 siblings, 1 reply; 2+ messages in thread
From: Wilco Dijkstra @ 2023-02-03 13:05 UTC (permalink / raw)
  To: 'GNU C Library'; +Cc: Szabolcs Nagy

Improve SVE memcpy/memmove by copying 2 vectors if the size is small enough.
This improves performance of random memcpy by ~9% on Neoverse V1, and
memcpy/memmove of 33-64 bytes become ~16% faster.

Passes regress, OK for commit?

---

diff --git a/sysdeps/aarch64/multiarch/memcpy_sve.S b/sysdeps/aarch64/multiarch/memcpy_sve.S
index f4dc214f60bf25e818eb6b8de2d4093ad0c886e1..d11be6a44301af4bfd7fa4900555b769dc58d34d 100644
--- a/sysdeps/aarch64/multiarch/memcpy_sve.S
+++ b/sysdeps/aarch64/multiarch/memcpy_sve.S
@@ -67,14 +67,15 @@ ENTRY (__memcpy_sve)
 
 	cmp	count, 128
 	b.hi	L(copy_long)
-	cmp	count, 32
+	cntb	vlen
+	cmp	count, vlen, lsl 1
 	b.hi	L(copy32_128)
-
 	whilelo p0.b, xzr, count
-	cntb	vlen
-	tbnz	vlen, 4, L(vlen128)
-	ld1b	z0.b, p0/z, [src]
-	st1b	z0.b, p0, [dstin]
+	whilelo p1.b, vlen, count
+	ld1b	z0.b, p0/z, [src, 0, mul vl]
+	ld1b	z1.b, p1/z, [src, 1, mul vl]
+	st1b	z0.b, p0, [dstin, 0, mul vl]
+	st1b	z1.b, p1, [dstin, 1, mul vl]
 	ret
 
 	/* Medium copies: 33..128 bytes.  */
@@ -102,14 +103,6 @@ L(copy96):
 	stp	C_q, D_q, [dstend, -32]
 	ret
 
-L(vlen128):
-	whilelo p1.b, vlen, count
-	ld1b	z0.b, p0/z, [src, 0, mul vl]
-	ld1b	z1.b, p1/z, [src, 1, mul vl]
-	st1b	z0.b, p0, [dstin, 0, mul vl]
-	st1b	z1.b, p1, [dstin, 1, mul vl]
-	ret
-
 	.p2align 4
 	/* Copy more than 128 bytes.  */
 L(copy_long):
@@ -158,14 +151,15 @@ ENTRY (__memmove_sve)
 
 	cmp	count, 128
 	b.hi	L(move_long)
-	cmp	count, 32
+	cntb	vlen
+	cmp	count, vlen, lsl 1
 	b.hi	L(copy32_128)
-
 	whilelo p0.b, xzr, count
-	cntb	vlen
-	tbnz	vlen, 4, L(vlen128)
-	ld1b	z0.b, p0/z, [src]
-	st1b	z0.b, p0, [dstin]
+	whilelo p1.b, vlen, count
+	ld1b	z0.b, p0/z, [src, 0, mul vl]
+	ld1b	z1.b, p1/z, [src, 1, mul vl]
+	st1b	z0.b, p0, [dstin, 0, mul vl]
+	st1b	z1.b, p1, [dstin, 1, mul vl]
 	ret
 
 	.p2align 4


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] AArch64: Improve SVE memcpy and memmove
  2023-02-03 13:05 [PATCH] AArch64: Improve SVE memcpy and memmove Wilco Dijkstra
@ 2023-02-06 14:29 ` Szabolcs Nagy
  0 siblings, 0 replies; 2+ messages in thread
From: Szabolcs Nagy @ 2023-02-06 14:29 UTC (permalink / raw)
  To: Wilco Dijkstra; +Cc: 'GNU C Library'

The 02/03/2023 13:05, Wilco Dijkstra wrote:
> Improve SVE memcpy/memmove by copying 2 vectors if the size is small enough.
> This improves performance of random memcpy by ~9% on Neoverse V1, and
> memcpy/memmove of 33-64 bytes become ~16% faster.
> 
> Passes regress, OK for commit?

This is ok to commit, thanks.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>


> 
> ---
> 
> diff --git a/sysdeps/aarch64/multiarch/memcpy_sve.S b/sysdeps/aarch64/multiarch/memcpy_sve.S
> index f4dc214f60bf25e818eb6b8de2d4093ad0c886e1..d11be6a44301af4bfd7fa4900555b769dc58d34d 100644
> --- a/sysdeps/aarch64/multiarch/memcpy_sve.S
> +++ b/sysdeps/aarch64/multiarch/memcpy_sve.S
> @@ -67,14 +67,15 @@ ENTRY (__memcpy_sve)
> 
>         cmp     count, 128
>         b.hi    L(copy_long)
> -       cmp     count, 32
> +       cntb    vlen
> +       cmp     count, vlen, lsl 1
>         b.hi    L(copy32_128)
> -
>         whilelo p0.b, xzr, count
> -       cntb    vlen
> -       tbnz    vlen, 4, L(vlen128)
> -       ld1b    z0.b, p0/z, [src]
> -       st1b    z0.b, p0, [dstin]
> +       whilelo p1.b, vlen, count
> +       ld1b    z0.b, p0/z, [src, 0, mul vl]
> +       ld1b    z1.b, p1/z, [src, 1, mul vl]
> +       st1b    z0.b, p0, [dstin, 0, mul vl]
> +       st1b    z1.b, p1, [dstin, 1, mul vl]
>         ret
> 
>         /* Medium copies: 33..128 bytes.  */
> @@ -102,14 +103,6 @@ L(copy96):
>         stp     C_q, D_q, [dstend, -32]
>         ret
> 
> -L(vlen128):
> -       whilelo p1.b, vlen, count
> -       ld1b    z0.b, p0/z, [src, 0, mul vl]
> -       ld1b    z1.b, p1/z, [src, 1, mul vl]
> -       st1b    z0.b, p0, [dstin, 0, mul vl]
> -       st1b    z1.b, p1, [dstin, 1, mul vl]
> -       ret
> -
>         .p2align 4
>         /* Copy more than 128 bytes.  */
>  L(copy_long):
> @@ -158,14 +151,15 @@ ENTRY (__memmove_sve)
> 
>         cmp     count, 128
>         b.hi    L(move_long)
> -       cmp     count, 32
> +       cntb    vlen
> +       cmp     count, vlen, lsl 1
>         b.hi    L(copy32_128)
> -
>         whilelo p0.b, xzr, count
> -       cntb    vlen
> -       tbnz    vlen, 4, L(vlen128)
> -       ld1b    z0.b, p0/z, [src]
> -       st1b    z0.b, p0, [dstin]
> +       whilelo p1.b, vlen, count
> +       ld1b    z0.b, p0/z, [src, 0, mul vl]
> +       ld1b    z1.b, p1/z, [src, 1, mul vl]
> +       st1b    z0.b, p0, [dstin, 0, mul vl]
> +       st1b    z1.b, p1, [dstin, 1, mul vl]
>         ret
> 
>         .p2align 4
> 

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-02-06 14:29 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-02-03 13:05 [PATCH] AArch64: Improve SVE memcpy and memmove Wilco Dijkstra
2023-02-06 14:29 ` Szabolcs Nagy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).