* [PATCH] AArch64: Optimize memrchr
@ 2023-01-12 15:57 Wilco Dijkstra
2023-01-13 12:28 ` Szabolcs Nagy
0 siblings, 1 reply; 2+ messages in thread
From: Wilco Dijkstra @ 2023-01-12 15:57 UTC (permalink / raw)
To: 'GNU C Library'; +Cc: Szabolcs Nagy
Optimize the main loop - large strings are 43% faster on modern CPUs.
Passes regress.
---
diff --git a/sysdeps/aarch64/memrchr.S b/sysdeps/aarch64/memrchr.S
index 9d2d29a396d46d6c2e74e3ca637091e2f3d68d5e..621fc65109736646b74900db8d15c6f8a7c68895 100644
--- a/sysdeps/aarch64/memrchr.S
+++ b/sysdeps/aarch64/memrchr.S
@@ -26,7 +26,6 @@
* MTE compatible.
*/
-/* Arguments and results. */
#define srcin x0
#define chrin w1
#define cntin x2
@@ -77,31 +76,34 @@ ENTRY (__memrchr)
csel result, result, xzr, hi
ret
+ nop
L(start_loop):
- sub tmp, end, src
- subs cntrem, cntin, tmp
+ subs cntrem, src, srcin
b.ls L(nomatch)
/* Make sure that it won't overread by a 16-byte chunk */
- add tmp, cntrem, 15
- tbnz tmp, 4, L(loop32_2)
+ sub cntrem, cntrem, 1
+ tbz cntrem, 4, L(loop32_2)
+ add src, src, 16
- .p2align 4
+ .p2align 5
L(loop32):
- ldr qdata, [src, -16]!
+ ldr qdata, [src, -32]!
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
fmov synd, dend
cbnz synd, L(end)
L(loop32_2):
- ldr qdata, [src, -16]!
+ ldr qdata, [src, -16]
subs cntrem, cntrem, 32
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
- b.ls L(end)
+ b.lo L(end_2)
umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
fmov synd, dend
cbz synd, L(loop32)
+L(end_2):
+ sub src, src, 16
L(end):
shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */
fmov synd, dend
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] AArch64: Optimize memrchr
2023-01-12 15:57 [PATCH] AArch64: Optimize memrchr Wilco Dijkstra
@ 2023-01-13 12:28 ` Szabolcs Nagy
0 siblings, 0 replies; 2+ messages in thread
From: Szabolcs Nagy @ 2023-01-13 12:28 UTC (permalink / raw)
To: Wilco Dijkstra; +Cc: 'GNU C Library'
The 01/12/2023 15:57, Wilco Dijkstra wrote:
> Optimize the main loop - large strings are 43% faster on modern CPUs.
> Passes regress.
please commit it, thanks.
Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
>
> ---
>
> diff --git a/sysdeps/aarch64/memrchr.S b/sysdeps/aarch64/memrchr.S
> index 9d2d29a396d46d6c2e74e3ca637091e2f3d68d5e..621fc65109736646b74900db8d15c6f8a7c68895 100644
> --- a/sysdeps/aarch64/memrchr.S
> +++ b/sysdeps/aarch64/memrchr.S
> @@ -26,7 +26,6 @@
> * MTE compatible.
> */
>
> -/* Arguments and results. */
> #define srcin x0
> #define chrin w1
> #define cntin x2
> @@ -77,31 +76,34 @@ ENTRY (__memrchr)
> csel result, result, xzr, hi
> ret
>
> + nop
> L(start_loop):
> - sub tmp, end, src
> - subs cntrem, cntin, tmp
> + subs cntrem, src, srcin
> b.ls L(nomatch)
>
> /* Make sure that it won't overread by a 16-byte chunk */
> - add tmp, cntrem, 15
> - tbnz tmp, 4, L(loop32_2)
> + sub cntrem, cntrem, 1
> + tbz cntrem, 4, L(loop32_2)
> + add src, src, 16
>
> - .p2align 4
> + .p2align 5
> L(loop32):
> - ldr qdata, [src, -16]!
> + ldr qdata, [src, -32]!
> cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
> umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
> fmov synd, dend
> cbnz synd, L(end)
>
> L(loop32_2):
> - ldr qdata, [src, -16]!
> + ldr qdata, [src, -16]
> subs cntrem, cntrem, 32
> cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
> - b.ls L(end)
> + b.lo L(end_2)
> umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
> fmov synd, dend
> cbz synd, L(loop32)
> +L(end_2):
> + sub src, src, 16
> L(end):
> shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */
> fmov synd, dend
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-01-13 12:28 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-12 15:57 [PATCH] AArch64: Optimize memrchr Wilco Dijkstra
2023-01-13 12:28 ` Szabolcs Nagy
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).