public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
* [PATCH] AArch64: Optimize strnlen
@ 2023-01-12 15:54 Wilco Dijkstra
  2023-01-13 12:26 ` Szabolcs Nagy
  0 siblings, 1 reply; 2+ messages in thread
From: Wilco Dijkstra @ 2023-01-12 15:54 UTC (permalink / raw)
  To: 'GNU C Library'; +Cc: Szabolcs Nagy

Optimize strnlen using the shrn instruction and improve the main loop.
Small strings are around 10% faster, large strings are 40% faster on
modern CPUs.  Passes regress.

---

diff --git a/sysdeps/aarch64/strnlen.S b/sysdeps/aarch64/strnlen.S
index 35fd14804d42ab90573f995b20cf65ba75042978..21112fbf760b7a99a6d153c00f9cd6b6bc144f3a 100644
--- a/sysdeps/aarch64/strnlen.S
+++ b/sysdeps/aarch64/strnlen.S
@@ -44,19 +44,16 @@
 
 /*
    Core algorithm:
-
-   For each 16-byte chunk we calculate a 64-bit nibble mask value with four bits
-   per byte. We take 4 bits of every comparison byte with shift right and narrow
-   by 4 instruction. Since the bits in the nibble mask reflect the order in
-   which things occur in the original string, counting trailing zeros identifies
-   exactly which byte matched.  */
+   Process the string in 16-byte aligned chunks. Compute a 64-bit mask with
+   four bits per byte using the shrn instruction. A count trailing zeros then
+   identifies the first zero byte.  */
 
 ENTRY (__strnlen)
 	PTR_ARG (0)
 	SIZE_ARG (1)
 	bic	src, srcin, 15
 	cbz	cntin, L(nomatch)
-	ld1	{vdata.16b}, [src], 16
+	ld1	{vdata.16b}, [src]
 	cmeq	vhas_chr.16b, vdata.16b, 0
 	lsl	shift, srcin, 2
 	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
@@ -71,36 +68,40 @@ L(finish):
 	csel	result, cntin, result, ls
 	ret
 
+L(nomatch):
+	mov	result, cntin
+	ret
+
 L(start_loop):
 	sub	tmp, src, srcin
+	add	tmp, tmp, 17
 	subs	cntrem, cntin, tmp
-	b.ls	L(nomatch)
+	b.lo	L(nomatch)
 
 	/* Make sure that it won't overread by a 16-byte chunk */
-	add	tmp, cntrem, 15
-	tbnz	tmp, 4, L(loop32_2)
-
+	tbz	cntrem, 4, L(loop32_2)
+	sub	src, src, 16
 	.p2align 5
 L(loop32):
-	ldr	qdata, [src], 16
+	ldr	qdata, [src, 32]!
 	cmeq	vhas_chr.16b, vdata.16b, 0
 	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
 	fmov	synd, dend
 	cbnz	synd, L(end)
 L(loop32_2):
-	ldr	qdata, [src], 16
+	ldr	qdata, [src, 16]
 	subs	cntrem, cntrem, 32
 	cmeq	vhas_chr.16b, vdata.16b, 0
-	b.ls	L(end)
+	b.lo	L(end_2)
 	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
 	fmov	synd, dend
 	cbz	synd, L(loop32)
-
+L(end_2):
+	add	src, src, 16
 L(end):
 	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
-	sub	src, src, 16
-	mov	synd, vend.d[0]
 	sub	result, src, srcin
+	fmov	synd, dend
 #ifndef __AARCH64EB__
 	rbit	synd, synd
 #endif
@@ -110,10 +111,6 @@ L(end):
 	csel	result, cntin, result, ls
 	ret
 
-L(nomatch):
-	mov	result, cntin
-	ret
-
 END (__strnlen)
 libc_hidden_def (__strnlen)
 weak_alias (__strnlen, strnlen)


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] AArch64: Optimize strnlen
  2023-01-12 15:54 [PATCH] AArch64: Optimize strnlen Wilco Dijkstra
@ 2023-01-13 12:26 ` Szabolcs Nagy
  0 siblings, 0 replies; 2+ messages in thread
From: Szabolcs Nagy @ 2023-01-13 12:26 UTC (permalink / raw)
  To: Wilco Dijkstra; +Cc: 'GNU C Library'

The 01/12/2023 15:54, Wilco Dijkstra wrote:
> Optimize strnlen using the shrn instruction and improve the main loop.
> Small strings are around 10% faster, large strings are 40% faster on
> modern CPUs.  Passes regress.

please commit it, thanks.

Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>


> 
> ---
> 
> diff --git a/sysdeps/aarch64/strnlen.S b/sysdeps/aarch64/strnlen.S
> index 35fd14804d42ab90573f995b20cf65ba75042978..21112fbf760b7a99a6d153c00f9cd6b6bc144f3a 100644
> --- a/sysdeps/aarch64/strnlen.S
> +++ b/sysdeps/aarch64/strnlen.S
> @@ -44,19 +44,16 @@
> 
>  /*
>     Core algorithm:
> -
> -   For each 16-byte chunk we calculate a 64-bit nibble mask value with four bits
> -   per byte. We take 4 bits of every comparison byte with shift right and narrow
> -   by 4 instruction. Since the bits in the nibble mask reflect the order in
> -   which things occur in the original string, counting trailing zeros identifies
> -   exactly which byte matched.  */
> +   Process the string in 16-byte aligned chunks. Compute a 64-bit mask with
> +   four bits per byte using the shrn instruction. A count trailing zeros then
> +   identifies the first zero byte.  */
> 
>  ENTRY (__strnlen)
>         PTR_ARG (0)
>         SIZE_ARG (1)
>         bic     src, srcin, 15
>         cbz     cntin, L(nomatch)
> -       ld1     {vdata.16b}, [src], 16
> +       ld1     {vdata.16b}, [src]
>         cmeq    vhas_chr.16b, vdata.16b, 0
>         lsl     shift, srcin, 2
>         shrn    vend.8b, vhas_chr.8h, 4         /* 128->64 */
> @@ -71,36 +68,40 @@ L(finish):
>         csel    result, cntin, result, ls
>         ret
> 
> +L(nomatch):
> +       mov     result, cntin
> +       ret
> +
>  L(start_loop):
>         sub     tmp, src, srcin
> +       add     tmp, tmp, 17
>         subs    cntrem, cntin, tmp
> -       b.ls    L(nomatch)
> +       b.lo    L(nomatch)
> 
>         /* Make sure that it won't overread by a 16-byte chunk */
> -       add     tmp, cntrem, 15
> -       tbnz    tmp, 4, L(loop32_2)
> -
> +       tbz     cntrem, 4, L(loop32_2)
> +       sub     src, src, 16
>         .p2align 5
>  L(loop32):
> -       ldr     qdata, [src], 16
> +       ldr     qdata, [src, 32]!
>         cmeq    vhas_chr.16b, vdata.16b, 0
>         umaxp   vend.16b, vhas_chr.16b, vhas_chr.16b            /* 128->64 */
>         fmov    synd, dend
>         cbnz    synd, L(end)
>  L(loop32_2):
> -       ldr     qdata, [src], 16
> +       ldr     qdata, [src, 16]
>         subs    cntrem, cntrem, 32
>         cmeq    vhas_chr.16b, vdata.16b, 0
> -       b.ls    L(end)
> +       b.lo    L(end_2)
>         umaxp   vend.16b, vhas_chr.16b, vhas_chr.16b            /* 128->64 */
>         fmov    synd, dend
>         cbz     synd, L(loop32)
> -
> +L(end_2):
> +       add     src, src, 16
>  L(end):
>         shrn    vend.8b, vhas_chr.8h, 4         /* 128->64 */
> -       sub     src, src, 16
> -       mov     synd, vend.d[0]
>         sub     result, src, srcin
> +       fmov    synd, dend
>  #ifndef __AARCH64EB__
>         rbit    synd, synd
>  #endif
> @@ -110,10 +111,6 @@ L(end):
>         csel    result, cntin, result, ls
>         ret
> 
> -L(nomatch):
> -       mov     result, cntin
> -       ret
> -
>  END (__strnlen)
>  libc_hidden_def (__strnlen)
>  weak_alias (__strnlen, strnlen)
> 

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-01-13 12:27 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-12 15:54 [PATCH] AArch64: Optimize strnlen Wilco Dijkstra
2023-01-13 12:26 ` Szabolcs Nagy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).