* [PATCHV2] powerpc: Optimized strcmp for power10
@ 2023-10-05 16:25 Amrita H S
0 siblings, 0 replies; 3+ messages in thread
From: Amrita H S @ 2023-10-05 16:25 UTC (permalink / raw)
To: libc-alpha; +Cc: rajis, Amrita H S
This patch was based on the __strcmp_power9 and the recent
__strlen_power10.
Improvements from __strcmp_power9:
1. Uses new POWER10 instructions
This code uses lxvp to decrease contention on load by loading 32 bytes
per instruction.
The vextractbm is used to have a smaller tail code for calculating the
return value.
2. Performance improvement
This version has around 25% better performance on average. Inconsistent
performance regression is seen for sizes less than 88B, but it is
observed with out changes also.
Signed-off-by: Amrita H S <amritahs@linux.vnet.ibm.com>
---
sysdeps/powerpc/powerpc64/le/power10/strcmp.S | 337 ++++++++++++++++++
sysdeps/powerpc/powerpc64/multiarch/Makefile | 3 +-
.../powerpc64/multiarch/ifunc-impl-list.c | 4 +
.../powerpc64/multiarch/strcmp-power10.S | 26 ++
sysdeps/powerpc/powerpc64/multiarch/strcmp.c | 4 +
5 files changed, 373 insertions(+), 1 deletion(-)
create mode 100644 sysdeps/powerpc/powerpc64/le/power10/strcmp.S
create mode 100644 sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
diff --git a/sysdeps/powerpc/powerpc64/le/power10/strcmp.S b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
new file mode 100644
index 0000000000..7d0390b4a6
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
@@ -0,0 +1,337 @@
+/* Optimized strcmp implementation for PowerPC64/POWER10.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+#include <sysdep.h>
+
+#ifndef STRCMP
+# define STRCMP strcmp
+#endif
+
+/* Implements the function
+
+ int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
+
+ The implementation uses unaligned doubleword access for first 32 bytes
+ as in POWER8 patch and uses vectorised loops after that. */
+
+/* TODO: Change this to actual instructions when minimum binutils is upgraded
+ to 2.27. Macros are defined below for these newer instructions in order
+ to maintain compatibility. */
+
+#define LXVP(xtp,dq,ra) \
+ .long(((6)<<(32-6)) \
+ | ((((xtp)-32)>>1)<<(32-10)) \
+ | ((1)<<(32-11)) \
+ | ((ra)<<(32-16)) \
+ | dq)
+
+/* Get 16 bytes for unaligned case.
+ reg1: Vector to hold next 16 bytes.
+ reg2: Address to read from.
+ reg3: Permute control vector. */
+#define GET16BYTES(vreg1, reg2, vreg3) \
+ lvx vreg1, 0, reg2; \
+ vperm v8, v2, vreg1, vreg3; \
+ vcmpequb. v8, v0, v8; \
+ beq cr6, 1f; \
+ vspltisb v9, 0; \
+ b 2f; \
+ .align 4; \
+1: \
+ addi r6, reg2, 16; \
+ lvx v9, 0, r6; \
+2: \
+ vperm vreg1, v9, vreg1, vreg3;
+
+#define COMPARE_16_S1_ALIGNED(vreg1, vreg2, offset) \
+ lxv vreg1+32, offset(r7); \
+ GET16BYTES(vreg2, r4, v6); \
+ vcmpnezb. v7, vreg2, vreg1; \
+ addi r4, r4, 16; \
+ bne cr6, L(different)
+
+#define COMPARE_16_S1S2_ALIGNED(vreg1, vreg2, offset) \
+ lxv vreg1+32, offset(r7); \
+ lxv vreg2+32, offset(r4); \
+ vcmpnezb. v7, vreg1, vreg2; \
+ bne cr6, L(different);
+
+#define COMPARE_32(vreg1, vreg2, offset, label1, label2) \
+ LXVP(vreg1+32, offset, r7); \
+ LXVP(vreg2+32, offset, r4); \
+ vcmpnezb. v7, vreg1+1, vreg2+1; \
+ bne cr6, L(label1); \
+ vcmpnezb. v7, vreg1, vreg2; \
+ bne cr6, L(label2); \
+
+#define TAIL(vreg1, vreg2) \
+ vctzlsbb r6, v7; \
+ vextubrx r5, r6, vreg1; \
+ vextubrx r4, r6, vreg2; \
+ subf r3, r4, r5; \
+ extsw r3, r3; \
+ blr; \
+
+ /* TODO: change this to .machine power10 when the minimum required binutils
+ allows it. */
+
+ .machine power9
+ENTRY_TOCLESS (STRCMP, 4)
+ li r0, 0
+
+ /* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
+ the code:
+
+ (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
+
+ with PAGE_SIZE being 4096 and ITER_SIZE begin 16. */
+
+ rldicl r7, r3, 0, 52
+ rldicl r9, r4, 0, 52
+ cmpldi cr7, r7, 4096-16
+ bgt cr7, L(pagecross_check)
+ cmpldi cr5, r9, 4096-16
+ bgt cr5, L(pagecross_check)
+
+ /* For short strings up to 16 bytes, load both s1 and s2 using
+ unaligned dwords and compare. */
+ ld r8, 0(r3)
+ ld r10, 0(r4)
+ cmpb r12, r8, r0
+ cmpb r11, r8, r10
+ orc. r9, r12, r11
+ bne cr0, L(different_nocmpb)
+
+ ld r8, 8(r3)
+ ld r10, 8(r4)
+ cmpb r12, r8, r0
+ cmpb r11, r8, r10
+ orc. r9, r12, r11
+ bne cr0, L(different_nocmpb)
+
+ addi r7, r3, 16
+ addi r4, r4, 16
+
+L(align):
+ /* Now it has checked for first 16 bytes. */
+ vspltisb v0, 0
+ vspltisb v2, -1
+ lvsr v6, 0, r4 /* Compute mask. */
+ or r5, r4, r7
+ andi. r5, r5, 0xF
+ beq cr0, L(aligned)
+ andi. r5, r7, 0xF
+ beq cr0, L(s1_align)
+ lvsr v10, 0, r7 /* Compute mask. */
+
+ /* Both s1 and s2 are unaligned. */
+ GET16BYTES(v4, r7, v10)
+ GET16BYTES(v5, r4, v6)
+ vcmpnezb. v7, v5, v4
+ beq cr6, L(match)
+ b L(different)
+
+ /* Align s1 to qw and adjust s2 address. */
+ .align 4
+L(match):
+ clrldi r6, r7, 60
+ subfic r5, r6, 16
+ add r7, r7, r5
+ add r4, r4, r5
+ andi. r5, r4, 0xF
+ beq cr0, L(aligned)
+ lvsr v6, 0, r4
+ /* There are 2 loops depending on the input alignment.
+ Each loop gets 16 bytes from s1 and s2 and compares.
+ Loop until a mismatch or null occurs. */
+L(s1_align):
+ COMPARE_16_S1_ALIGNED(v4, v5, 0)
+ COMPARE_16_S1_ALIGNED(v4, v5, 16)
+ COMPARE_16_S1_ALIGNED(v4, v5, 32)
+ COMPARE_16_S1_ALIGNED(v4, v5, 48)
+ addi r7, r7, 64
+ b L(s1_align)
+ .align 4
+
+ /* Align s1 to 32B and adjust s2 address.
+ Use lxvp only if both s1 and s2 are 32B aligned. */
+L(aligned):
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 32)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 48)
+ addi r7, r7, 64
+ addi r4, r4, 64
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
+
+ clrldi r6, r7, 59
+ subfic r5, r6, 32
+ add r7, r7, r5
+ add r4, r4, r5
+ andi. r5, r4, 0x1F
+ beq cr0, L(aligned_loop)
+
+L(aligned1):
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 32)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 48)
+ addi r7, r7, 64
+ addi r4, r4, 64
+ b L(aligned1)
+
+ /* Calculate and return the difference. */
+L(different):
+ vctzlsbb r6, v7
+ vextubrx r5, r6, v4
+ vextubrx r4, r6, v5
+ subf r3, r4, r5
+ extsw r3, r3
+ blr
+
+L(aligned_loop):
+ COMPARE_32(v14, v16, 0, tail1, tail2)
+ COMPARE_32(v18, v20, 32, tail3, tail4)
+ COMPARE_32(v22, v24, 64, tail5, tail6)
+ COMPARE_32(v26, v28, 96, tail7, tail8)
+ addi r7, r7, 128
+ addi r4, r4, 128
+ b L(aligned_loop)
+
+L(tail1): TAIL(v15, v17)
+L(tail2): TAIL(v14, v16)
+L(tail3): TAIL(v19, v21)
+L(tail4): TAIL(v18, v20)
+L(tail5): TAIL(v23, v25)
+L(tail6): TAIL(v22, v24)
+L(tail7): TAIL(v27, v29)
+L(tail8): TAIL(v26, v28)
+
+ .align 4
+L(different_nocmpb):
+ neg r3, r9
+ and r9, r9, r3
+ cntlzd r9, r9
+ subfic r9, r9, 63
+ srd r3, r8, r9
+ srd r10, r10, r9
+ rldicl r10, r10, 0, 56
+ rldicl r3, r3, 0, 56
+ subf r3, r10, r3
+ extsw r3, r3
+ blr
+
+ .align 4
+L(pagecross_check):
+ /* r7 and r9 has number of bytes to be read to reach page boundary,
+ of inputs s1 and s2 respectively. */
+ subfic r9, r9, 4096
+ subfic r7, r7, 4096
+ cmpld cr7, r7, r9
+ bge cr7, L(pagecross)
+ mr r9, r7 /* store min of r7 and r9 in r11 */
+ /* If unaligned 16 bytes reads across a 4K page boundary, it uses
+ a simple byte a byte comparison until the page alignment for s1
+ is reached. */
+L(pagecross):
+ mr r11, r9 /* store min of r7 and r9 in r11. */
+ mr r8, r7 /* save a copy of s1 count. */
+ add r7, r3, r7 /* update r7 as it is required in L(align). */
+ /* update r9 with number of bytes which will be compared in
+ blocks (of 16 or less). */
+ mr r9, r11
+ cmpldi r9, 16
+ blt L(compare_less_than_16)
+
+ /* For bytes greater than 16, compare in a loop of 16. */
+ srdi r10, r9, 4 /* Compute loop count (r9 / 16). */
+ andi. r9, r9, 15 /* Set r9 with the remainder (r9 % 16). */
+ mtctr r10 /* Move loop count to counter register. */
+ li r12, 16 /* Move the loop width to a temp register. */
+ sldi r12, r12, 56
+
+ .align 4
+L(pagecross_16B_loop):
+ /* Loads 16B from s1 and s2, compare if *s1 is equal to *s2
+ and if either *s1 or *s2 is '\0'. */
+ lxvl 32+v0, r3, r12
+ lxvl 32+v2, r4, r12
+ addi r3, r3, 16
+ addi r4, r4, 16
+ vcmpnezb. v16, v0, v2
+ vctzlsbb r6, v16
+ bne cr6, L(pagecross_diff_bytes)
+ bdnz L(pagecross_16B_loop)
+
+ .align 4
+L(compare_less_than_16):
+ /* Loads the remaining number of bytes together, compare if *s1
+ is equal to *s2 and if either *s1 or *s2 is '\0'. */
+ mr r11, r9
+ sldi r9, r9, 56
+ lxvl 32+v0, r3, r9
+ lxvl 32+v2, r4, r9
+ add r3, r3, r11
+ add r4, r4, r11
+ vcmpnezb. v16, v0, v2
+ /* If index where different byte or null found is greater than r7,
+ then do byte by byte compare till s1 reaches page boundary. */
+ vctzlsbb r6, v16
+ cmpld cr7, r6, r11
+ bge cr7, L(compare_byte_by_byte)
+ bne cr6, L(pagecross_diff_bytes)
+
+L(compare_byte_by_byte):
+ sub r10, r8, r11
+ mtctr r10
+ cmpldi r10, 0
+ ble L(align)
+
+L(byte_compare_loop):
+ lbz r9, 0(r3)
+ lbz r10, 0(r4)
+ addi r3, r3, 1
+ addi r4, r4, 1
+ cmplw cr7, r9, r10
+ cmpdi cr5, r9, r0
+ bne cr7, L(pagecross_ne)
+ beq cr5, L(pagecross_nullfound)
+ bdnz L(byte_compare_loop)
+ b L(align)
+
+L(pagecross_diff_bytes):
+ vextubrx r5, r6, v0
+ vextubrx r4, r6, v2
+ subf r3, r4, r5
+ extsw r3, r3
+ blr
+ .align 4
+L(pagecross_ne):
+ extsw r3, r9
+ mr r9, r10
+L(pagecross_retdiff):
+ subf r9, r9, r3
+ extsw r3, r9
+ blr
+
+ .align 4
+L(pagecross_nullfound):
+ li r3, 0
+ b L(pagecross_retdiff)
+END (STRCMP)
+libc_hidden_builtin_def (strcmp)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index 27d8495503..d7824a922b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -33,7 +33,8 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
ifneq (,$(filter %le,$(config-machine)))
sysdep_routines += memcmp-power10 memcpy-power10 memmove-power10 memset-power10 \
rawmemchr-power9 rawmemchr-power10 \
- strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
+ strcmp-power9 strcmp-power10 strncmp-power9 \
+ strcpy-power9 stpcpy-power9 \
strlen-power9 strncpy-power9 stpncpy-power9 strlen-power10
endif
CFLAGS-strncase-power7.c += -mcpu=power7 -funroll-loops
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index ebe9434052..ca1f57e1e2 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -376,6 +376,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strcmp.c. */
IFUNC_IMPL (i, name, strcmp,
#ifdef __LITTLE_ENDIAN__
+ IFUNC_IMPL_ADD (array, i, strcmp,
+ (hwcap2 & PPC_FEATURE2_ARCH_3_1)
+ && (hwcap & PPC_FEATURE_HAS_VSX),
+ __strcmp_power10)
IFUNC_IMPL_ADD (array, i, strcmp,
hwcap2 & PPC_FEATURE2_ARCH_3_00
&& hwcap & PPC_FEATURE_HAS_ALTIVEC,
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
new file mode 100644
index 0000000000..c80067ce33
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
@@ -0,0 +1,26 @@
+/* Optimized strcmp implementation for POWER10/PPC64.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
+#define STRCMP __strcmp_power10
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/le/power10/strcmp.S>
+#endif /* __LITTLE_ENDIAN__ && IS_IN (libc) */
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
index 31fcdee916..f1dac99b66 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
@@ -29,12 +29,16 @@ extern __typeof (strcmp) __strcmp_power7 attribute_hidden;
extern __typeof (strcmp) __strcmp_power8 attribute_hidden;
# ifdef __LITTLE_ENDIAN__
extern __typeof (strcmp) __strcmp_power9 attribute_hidden;
+extern __typeof (strcmp) __strcmp_power10 attribute_hidden;
# endif
# undef strcmp
libc_ifunc_redirected (__redirect_strcmp, strcmp,
# ifdef __LITTLE_ENDIAN__
+ (hwcap2 & PPC_FEATURE2_ARCH_3_1
+ && hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strcmp_power10 :
(hwcap2 & PPC_FEATURE2_ARCH_3_00
&& hwcap & PPC_FEATURE_HAS_ALTIVEC)
? __strcmp_power9 :
--
2.39.3
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCHV2] powerpc: Optimized strcmp for power10
@ 2023-10-05 16:33 Amrita H S
2023-10-09 18:09 ` Paul E Murphy
0 siblings, 1 reply; 3+ messages in thread
From: Amrita H S @ 2023-10-05 16:33 UTC (permalink / raw)
To: libc-alpha; +Cc: rajis, Amrita H S
This patch was based on the __strcmp_power9 and the recent
__strlen_power10.
Improvements from __strcmp_power9:
1. Uses new POWER10 instructions
This code uses lxvp to decrease contention on load by loading 32 bytes
per instruction.
The vextractbm is used to have a smaller tail code for calculating the
return value.
2. Performance improvement
This version has around 25% better performance on average. Inconsistent
performance regression is seen for sizes less than 88B, but it is
observed with out changes also.
Signed-off-by: Amrita H S <amritahs@linux.vnet.ibm.com>
---
sysdeps/powerpc/powerpc64/le/power10/strcmp.S | 337 ++++++++++++++++++
sysdeps/powerpc/powerpc64/multiarch/Makefile | 3 +-
.../powerpc64/multiarch/ifunc-impl-list.c | 4 +
.../powerpc64/multiarch/strcmp-power10.S | 26 ++
sysdeps/powerpc/powerpc64/multiarch/strcmp.c | 4 +
5 files changed, 373 insertions(+), 1 deletion(-)
create mode 100644 sysdeps/powerpc/powerpc64/le/power10/strcmp.S
create mode 100644 sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
diff --git a/sysdeps/powerpc/powerpc64/le/power10/strcmp.S b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
new file mode 100644
index 0000000000..7d0390b4a6
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
@@ -0,0 +1,337 @@
+/* Optimized strcmp implementation for PowerPC64/POWER10.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+#include <sysdep.h>
+
+#ifndef STRCMP
+# define STRCMP strcmp
+#endif
+
+/* Implements the function
+
+ int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
+
+ The implementation uses unaligned doubleword access for first 32 bytes
+ as in POWER8 patch and uses vectorised loops after that. */
+
+/* TODO: Change this to actual instructions when minimum binutils is upgraded
+ to 2.27. Macros are defined below for these newer instructions in order
+ to maintain compatibility. */
+
+#define LXVP(xtp,dq,ra) \
+ .long(((6)<<(32-6)) \
+ | ((((xtp)-32)>>1)<<(32-10)) \
+ | ((1)<<(32-11)) \
+ | ((ra)<<(32-16)) \
+ | dq)
+
+/* Get 16 bytes for unaligned case.
+ reg1: Vector to hold next 16 bytes.
+ reg2: Address to read from.
+ reg3: Permute control vector. */
+#define GET16BYTES(vreg1, reg2, vreg3) \
+ lvx vreg1, 0, reg2; \
+ vperm v8, v2, vreg1, vreg3; \
+ vcmpequb. v8, v0, v8; \
+ beq cr6, 1f; \
+ vspltisb v9, 0; \
+ b 2f; \
+ .align 4; \
+1: \
+ addi r6, reg2, 16; \
+ lvx v9, 0, r6; \
+2: \
+ vperm vreg1, v9, vreg1, vreg3;
+
+#define COMPARE_16_S1_ALIGNED(vreg1, vreg2, offset) \
+ lxv vreg1+32, offset(r7); \
+ GET16BYTES(vreg2, r4, v6); \
+ vcmpnezb. v7, vreg2, vreg1; \
+ addi r4, r4, 16; \
+ bne cr6, L(different)
+
+#define COMPARE_16_S1S2_ALIGNED(vreg1, vreg2, offset) \
+ lxv vreg1+32, offset(r7); \
+ lxv vreg2+32, offset(r4); \
+ vcmpnezb. v7, vreg1, vreg2; \
+ bne cr6, L(different);
+
+#define COMPARE_32(vreg1, vreg2, offset, label1, label2) \
+ LXVP(vreg1+32, offset, r7); \
+ LXVP(vreg2+32, offset, r4); \
+ vcmpnezb. v7, vreg1+1, vreg2+1; \
+ bne cr6, L(label1); \
+ vcmpnezb. v7, vreg1, vreg2; \
+ bne cr6, L(label2); \
+
+#define TAIL(vreg1, vreg2) \
+ vctzlsbb r6, v7; \
+ vextubrx r5, r6, vreg1; \
+ vextubrx r4, r6, vreg2; \
+ subf r3, r4, r5; \
+ extsw r3, r3; \
+ blr; \
+
+ /* TODO: change this to .machine power10 when the minimum required binutils
+ allows it. */
+
+ .machine power9
+ENTRY_TOCLESS (STRCMP, 4)
+ li r0, 0
+
+ /* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
+ the code:
+
+ (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
+
+ with PAGE_SIZE being 4096 and ITER_SIZE begin 16. */
+
+ rldicl r7, r3, 0, 52
+ rldicl r9, r4, 0, 52
+ cmpldi cr7, r7, 4096-16
+ bgt cr7, L(pagecross_check)
+ cmpldi cr5, r9, 4096-16
+ bgt cr5, L(pagecross_check)
+
+ /* For short strings up to 16 bytes, load both s1 and s2 using
+ unaligned dwords and compare. */
+ ld r8, 0(r3)
+ ld r10, 0(r4)
+ cmpb r12, r8, r0
+ cmpb r11, r8, r10
+ orc. r9, r12, r11
+ bne cr0, L(different_nocmpb)
+
+ ld r8, 8(r3)
+ ld r10, 8(r4)
+ cmpb r12, r8, r0
+ cmpb r11, r8, r10
+ orc. r9, r12, r11
+ bne cr0, L(different_nocmpb)
+
+ addi r7, r3, 16
+ addi r4, r4, 16
+
+L(align):
+ /* Now it has checked for first 16 bytes. */
+ vspltisb v0, 0
+ vspltisb v2, -1
+ lvsr v6, 0, r4 /* Compute mask. */
+ or r5, r4, r7
+ andi. r5, r5, 0xF
+ beq cr0, L(aligned)
+ andi. r5, r7, 0xF
+ beq cr0, L(s1_align)
+ lvsr v10, 0, r7 /* Compute mask. */
+
+ /* Both s1 and s2 are unaligned. */
+ GET16BYTES(v4, r7, v10)
+ GET16BYTES(v5, r4, v6)
+ vcmpnezb. v7, v5, v4
+ beq cr6, L(match)
+ b L(different)
+
+ /* Align s1 to qw and adjust s2 address. */
+ .align 4
+L(match):
+ clrldi r6, r7, 60
+ subfic r5, r6, 16
+ add r7, r7, r5
+ add r4, r4, r5
+ andi. r5, r4, 0xF
+ beq cr0, L(aligned)
+ lvsr v6, 0, r4
+ /* There are 2 loops depending on the input alignment.
+ Each loop gets 16 bytes from s1 and s2 and compares.
+ Loop until a mismatch or null occurs. */
+L(s1_align):
+ COMPARE_16_S1_ALIGNED(v4, v5, 0)
+ COMPARE_16_S1_ALIGNED(v4, v5, 16)
+ COMPARE_16_S1_ALIGNED(v4, v5, 32)
+ COMPARE_16_S1_ALIGNED(v4, v5, 48)
+ addi r7, r7, 64
+ b L(s1_align)
+ .align 4
+
+ /* Align s1 to 32B and adjust s2 address.
+ Use lxvp only if both s1 and s2 are 32B aligned. */
+L(aligned):
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 32)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 48)
+ addi r7, r7, 64
+ addi r4, r4, 64
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
+
+ clrldi r6, r7, 59
+ subfic r5, r6, 32
+ add r7, r7, r5
+ add r4, r4, r5
+ andi. r5, r4, 0x1F
+ beq cr0, L(aligned_loop)
+
+L(aligned1):
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 32)
+ COMPARE_16_S1S2_ALIGNED(v4, v5, 48)
+ addi r7, r7, 64
+ addi r4, r4, 64
+ b L(aligned1)
+
+ /* Calculate and return the difference. */
+L(different):
+ vctzlsbb r6, v7
+ vextubrx r5, r6, v4
+ vextubrx r4, r6, v5
+ subf r3, r4, r5
+ extsw r3, r3
+ blr
+
+L(aligned_loop):
+ COMPARE_32(v14, v16, 0, tail1, tail2)
+ COMPARE_32(v18, v20, 32, tail3, tail4)
+ COMPARE_32(v22, v24, 64, tail5, tail6)
+ COMPARE_32(v26, v28, 96, tail7, tail8)
+ addi r7, r7, 128
+ addi r4, r4, 128
+ b L(aligned_loop)
+
+L(tail1): TAIL(v15, v17)
+L(tail2): TAIL(v14, v16)
+L(tail3): TAIL(v19, v21)
+L(tail4): TAIL(v18, v20)
+L(tail5): TAIL(v23, v25)
+L(tail6): TAIL(v22, v24)
+L(tail7): TAIL(v27, v29)
+L(tail8): TAIL(v26, v28)
+
+ .align 4
+L(different_nocmpb):
+ neg r3, r9
+ and r9, r9, r3
+ cntlzd r9, r9
+ subfic r9, r9, 63
+ srd r3, r8, r9
+ srd r10, r10, r9
+ rldicl r10, r10, 0, 56
+ rldicl r3, r3, 0, 56
+ subf r3, r10, r3
+ extsw r3, r3
+ blr
+
+ .align 4
+L(pagecross_check):
+ /* r7 and r9 has number of bytes to be read to reach page boundary,
+ of inputs s1 and s2 respectively. */
+ subfic r9, r9, 4096
+ subfic r7, r7, 4096
+ cmpld cr7, r7, r9
+ bge cr7, L(pagecross)
+ mr r9, r7 /* store min of r7 and r9 in r11 */
+ /* If unaligned 16 bytes reads across a 4K page boundary, it uses
+ a simple byte a byte comparison until the page alignment for s1
+ is reached. */
+L(pagecross):
+ mr r11, r9 /* store min of r7 and r9 in r11. */
+ mr r8, r7 /* save a copy of s1 count. */
+ add r7, r3, r7 /* update r7 as it is required in L(align). */
+ /* update r9 with number of bytes which will be compared in
+ blocks (of 16 or less). */
+ mr r9, r11
+ cmpldi r9, 16
+ blt L(compare_less_than_16)
+
+ /* For bytes greater than 16, compare in a loop of 16. */
+ srdi r10, r9, 4 /* Compute loop count (r9 / 16). */
+ andi. r9, r9, 15 /* Set r9 with the remainder (r9 % 16). */
+ mtctr r10 /* Move loop count to counter register. */
+ li r12, 16 /* Move the loop width to a temp register. */
+ sldi r12, r12, 56
+
+ .align 4
+L(pagecross_16B_loop):
+ /* Loads 16B from s1 and s2, compare if *s1 is equal to *s2
+ and if either *s1 or *s2 is '\0'. */
+ lxvl 32+v0, r3, r12
+ lxvl 32+v2, r4, r12
+ addi r3, r3, 16
+ addi r4, r4, 16
+ vcmpnezb. v16, v0, v2
+ vctzlsbb r6, v16
+ bne cr6, L(pagecross_diff_bytes)
+ bdnz L(pagecross_16B_loop)
+
+ .align 4
+L(compare_less_than_16):
+ /* Loads the remaining number of bytes together, compare if *s1
+ is equal to *s2 and if either *s1 or *s2 is '\0'. */
+ mr r11, r9
+ sldi r9, r9, 56
+ lxvl 32+v0, r3, r9
+ lxvl 32+v2, r4, r9
+ add r3, r3, r11
+ add r4, r4, r11
+ vcmpnezb. v16, v0, v2
+ /* If index where different byte or null found is greater than r7,
+ then do byte by byte compare till s1 reaches page boundary. */
+ vctzlsbb r6, v16
+ cmpld cr7, r6, r11
+ bge cr7, L(compare_byte_by_byte)
+ bne cr6, L(pagecross_diff_bytes)
+
+L(compare_byte_by_byte):
+ sub r10, r8, r11
+ mtctr r10
+ cmpldi r10, 0
+ ble L(align)
+
+L(byte_compare_loop):
+ lbz r9, 0(r3)
+ lbz r10, 0(r4)
+ addi r3, r3, 1
+ addi r4, r4, 1
+ cmplw cr7, r9, r10
+ cmpdi cr5, r9, r0
+ bne cr7, L(pagecross_ne)
+ beq cr5, L(pagecross_nullfound)
+ bdnz L(byte_compare_loop)
+ b L(align)
+
+L(pagecross_diff_bytes):
+ vextubrx r5, r6, v0
+ vextubrx r4, r6, v2
+ subf r3, r4, r5
+ extsw r3, r3
+ blr
+ .align 4
+L(pagecross_ne):
+ extsw r3, r9
+ mr r9, r10
+L(pagecross_retdiff):
+ subf r9, r9, r3
+ extsw r3, r9
+ blr
+
+ .align 4
+L(pagecross_nullfound):
+ li r3, 0
+ b L(pagecross_retdiff)
+END (STRCMP)
+libc_hidden_builtin_def (strcmp)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index 27d8495503..d7824a922b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -33,7 +33,8 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
ifneq (,$(filter %le,$(config-machine)))
sysdep_routines += memcmp-power10 memcpy-power10 memmove-power10 memset-power10 \
rawmemchr-power9 rawmemchr-power10 \
- strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
+ strcmp-power9 strcmp-power10 strncmp-power9 \
+ strcpy-power9 stpcpy-power9 \
strlen-power9 strncpy-power9 stpncpy-power9 strlen-power10
endif
CFLAGS-strncase-power7.c += -mcpu=power7 -funroll-loops
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index ebe9434052..ca1f57e1e2 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -376,6 +376,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strcmp.c. */
IFUNC_IMPL (i, name, strcmp,
#ifdef __LITTLE_ENDIAN__
+ IFUNC_IMPL_ADD (array, i, strcmp,
+ (hwcap2 & PPC_FEATURE2_ARCH_3_1)
+ && (hwcap & PPC_FEATURE_HAS_VSX),
+ __strcmp_power10)
IFUNC_IMPL_ADD (array, i, strcmp,
hwcap2 & PPC_FEATURE2_ARCH_3_00
&& hwcap & PPC_FEATURE_HAS_ALTIVEC,
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
new file mode 100644
index 0000000000..c80067ce33
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
@@ -0,0 +1,26 @@
+/* Optimized strcmp implementation for POWER10/PPC64.
+ Copyright (C) 2021-2023 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
+#define STRCMP __strcmp_power10
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/le/power10/strcmp.S>
+#endif /* __LITTLE_ENDIAN__ && IS_IN (libc) */
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
index 31fcdee916..f1dac99b66 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
@@ -29,12 +29,16 @@ extern __typeof (strcmp) __strcmp_power7 attribute_hidden;
extern __typeof (strcmp) __strcmp_power8 attribute_hidden;
# ifdef __LITTLE_ENDIAN__
extern __typeof (strcmp) __strcmp_power9 attribute_hidden;
+extern __typeof (strcmp) __strcmp_power10 attribute_hidden;
# endif
# undef strcmp
libc_ifunc_redirected (__redirect_strcmp, strcmp,
# ifdef __LITTLE_ENDIAN__
+ (hwcap2 & PPC_FEATURE2_ARCH_3_1
+ && hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strcmp_power10 :
(hwcap2 & PPC_FEATURE2_ARCH_3_00
&& hwcap & PPC_FEATURE_HAS_ALTIVEC)
? __strcmp_power9 :
--
2.39.3
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCHV2] powerpc: Optimized strcmp for power10
2023-10-05 16:33 Amrita H S
@ 2023-10-09 18:09 ` Paul E Murphy
0 siblings, 0 replies; 3+ messages in thread
From: Paul E Murphy @ 2023-10-09 18:09 UTC (permalink / raw)
To: Amrita H S, libc-alpha; +Cc: rajis
On 10/5/23 11:33 AM, Amrita H S wrote:
> This patch was based on the __strcmp_power9 and the recent
> __strlen_power10.
>
> Improvements from __strcmp_power9:
>
> 1. Uses new POWER10 instructions
>
> This code uses lxvp to decrease contention on load by loading 32 bytes
> per instruction.
> The vextractbm is used to have a smaller tail code for calculating the
> return value.
>
> 2. Performance improvement
>
> This version has around 25% better performance on average. Inconsistent
> performance regression is seen for sizes less than 88B, but it is
> observed with out changes also.
Please include the benchmarks directly in text form. The attachments are
not immediately recognizable as text files.
>
> Signed-off-by: Amrita H S <amritahs@linux.vnet.ibm.com>
> ---
> sysdeps/powerpc/powerpc64/le/power10/strcmp.S | 337 ++++++++++++++++++
> sysdeps/powerpc/powerpc64/multiarch/Makefile | 3 +-
> .../powerpc64/multiarch/ifunc-impl-list.c | 4 +
> .../powerpc64/multiarch/strcmp-power10.S | 26 ++
> sysdeps/powerpc/powerpc64/multiarch/strcmp.c | 4 +
> 5 files changed, 373 insertions(+), 1 deletion(-)
> create mode 100644 sysdeps/powerpc/powerpc64/le/power10/strcmp.S
> create mode 100644 sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
>
> diff --git a/sysdeps/powerpc/powerpc64/le/power10/strcmp.S b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
> new file mode 100644
> index 0000000000..7d0390b4a6
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
> @@ -0,0 +1,337 @@
> +/* Optimized strcmp implementation for PowerPC64/POWER10.
> + Copyright (C) 2021-2023 Free Software Foundation, Inc.
> + This file is part of the GNU C Library.
> +
> + The GNU C Library is free software; you can redistribute it and/or
> + modify it under the terms of the GNU Lesser General Public
> + License as published by the Free Software Foundation; either
> + version 2.1 of the License, or (at your option) any later version.
> +
> + The GNU C Library is distributed in the hope that it will be useful,
> + but WITHOUT ANY WARRANTY; without even the implied warranty of
> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + Lesser General Public License for more details.
> +
> + You should have received a copy of the GNU Lesser General Public
> + License along with the GNU C Library; if not, see
> + <https://www.gnu.org/licenses/>. */
> +#include <sysdep.h>
> +
> +#ifndef STRCMP
> +# define STRCMP strcmp
> +#endif
> +
> +/* Implements the function
> +
> + int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
> +
> + The implementation uses unaligned doubleword access for first 32 bytes
> + as in POWER8 patch and uses vectorised loops after that. */
> +
> +/* TODO: Change this to actual instructions when minimum binutils is upgraded
> + to 2.27. Macros are defined below for these newer instructions in order
> + to maintain compatibility. */
> +
> +#define LXVP(xtp,dq,ra) \
> + .long(((6)<<(32-6)) \
> + | ((((xtp)-32)>>1)<<(32-10)) \
> + | ((1)<<(32-11)) \
> + | ((ra)<<(32-16)) \
> + | dq)
> +
> +/* Get 16 bytes for unaligned case.
> + reg1: Vector to hold next 16 bytes.
> + reg2: Address to read from.
> + reg3: Permute control vector. */
> +#define GET16BYTES(vreg1, reg2, vreg3) \
> + lvx vreg1, 0, reg2; \
> + vperm v8, v2, vreg1, vreg3; \
> + vcmpequb. v8, v0, v8; \
> + beq cr6, 1f; \
> + vspltisb v9, 0; \
> + b 2f; \
> + .align 4; \
> +1: \
> + addi r6, reg2, 16; \
> + lvx v9, 0, r6; \
> +2: \
> + vperm vreg1, v9, vreg1, vreg3;
This seems overly complicated for the ISA available. Neither 16B load
should cross a page when looping. That is what the check is for. If it
does cross, a partial load via lxvl (or lxvll) is used up to the
boundary of the unaligned case.
I imagine once str1 is aligned, the asm should be relatively simple. In
my head, it looks something like:
// str1 is aligned to 16B
rem = page_size - (str2 & (page_size-1))
rem_step = page_size - (str2&15)
for (;;) {
while (rem >= 16) {
check_16B(str1,str2)
str1 += 16
str2 += 16
rem -= 16
}
// cross the page boundary of str2, carefully.
nafter = (str2&15)
nbefore = 16 - (str2&15)
check_n_byte(str1,str2,nbefore)
str1 += nbefore;
str2 += nbefore;
check_n_byte(str1,str2,str2&15)
str1 += nafter;
str2 += nafter;
rem = rem_step
}
Something like that could also be extended to 32B/inner-loop with some
extra work.
For initial alignment, could the initial test be more like:
nalign1 = 16 - (str1 & 15)
nalign2 = 16 - (str2 & 15)
if nalign1 == nalign2 {
check_n_bytes(str1,str2,nalign1);
goto fast_aligned_case;
}
//align s1, potentially two checks to
ncheck = min(nalign1, nalign2) // cmp + isel
check_n_bytes(str1,str2,ncheck)
str1 += ncheck
str2 += ncheck
ncheck = max(nalign1 - nalign2,0) // cmp + isel
check_n_bytes(str1,str2,ncheck)
str1 += ncheck
str2 += ncheck
goto unalign_str2_path
note, for check_n_bytes (for 0 <= n <= 16), something like:
lxvl str1, len, v1 // Partial load
lxvl str2, len, v2
vcmpneb. v3, v1, v2
bne found_check_short
Ideally, I think both aligned and unaligned loops should be able to
process 32B/iteration when not crossing a boundary. However, doing the
cross check for 32B might be more complicated.
In all cases, I don't think P10 should ever need to fallback to checking
individual bytes.
> +
> +#define COMPARE_16_S1_ALIGNED(vreg1, vreg2, offset) \
> + lxv vreg1+32, offset(r7); \
> + GET16BYTES(vreg2, r4, v6); \ > + vcmpnezb. v7, vreg2, vreg1; \
> + addi r4, r4, 16; \
> + bne cr6, L(different)
> +
> +#define COMPARE_16_S1S2_ALIGNED(vreg1, vreg2, offset) \
> + lxv vreg1+32, offset(r7); \
> + lxv vreg2+32, offset(r4); \
> + vcmpnezb. v7, vreg1, vreg2; \
> + bne cr6, L(different);
> +
> +#define COMPARE_32(vreg1, vreg2, offset, label1, label2) \
> + LXVP(vreg1+32, offset, r7); \
> + LXVP(vreg2+32, offset, r4); \
> + vcmpnezb. v7, vreg1+1, vreg2+1; \
> + bne cr6, L(label1); \
> + vcmpnezb. v7, vreg1, vreg2; \
> + bne cr6, L(label2); \
> +
> +#define TAIL(vreg1, vreg2) \
> + vctzlsbb r6, v7; \
> + vextubrx r5, r6, vreg1; \
> + vextubrx r4, r6, vreg2; \
> + subf r3, r4, r5; \
> + extsw r3, r3; \
Is the sign extend needed here?
> + blr; \
> +
> + /* TODO: change this to .machine power10 when the minimum required binutils
> + allows it. */
> +
> + .machine power9
> +ENTRY_TOCLESS (STRCMP, 4)
> + li r0, 0
> +
> + /* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
> + the code:
> +
> + (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
> +
> + with PAGE_SIZE being 4096 and ITER_SIZE begin 16. */
> +
> + rldicl r7, r3, 0, 52
> + rldicl r9, r4, 0, 52
> + cmpldi cr7, r7, 4096-16
> + bgt cr7, L(pagecross_check)
> + cmpldi cr5, r9, 4096-16
> + bgt cr5, L(pagecross_check)
> +
> + /* For short strings up to 16 bytes, load both s1 and s2 using
> + unaligned dwords and compare. */
> + ld r8, 0(r3)
> + ld r10, 0(r4)
> + cmpb r12, r8, r0
> + cmpb r11, r8, r10
> + orc. r9, r12, r11
> + bne cr0, L(different_nocmpb)
> +
> + ld r8, 8(r3)
> + ld r10, 8(r4)
> + cmpb r12, r8, r0
> + cmpb r11, r8, r10
> + orc. r9, r12, r11
> + bne cr0, L(different_nocmpb)
Can these two 8 byte checks be more efficiently replaced by one usage of
the COMPARE_16_S1S2_ALIGNED macro?
> +
> + addi r7, r3, 16
> + addi r4, r4, 16
> +
> +L(align):
> + /* Now it has checked for first 16 bytes. */
> + vspltisb v0, 0
> + vspltisb v2, -1
> + lvsr v6, 0, r4 /* Compute mask. */
> + or r5, r4, r7
> + andi. r5, r5, 0xF
> + beq cr0, L(aligned)
> + andi. r5, r7, 0xF
> + beq cr0, L(s1_align)
> + lvsr v10, 0, r7 /* Compute mask. */
> +
> + /* Both s1 and s2 are unaligned. */
> + GET16BYTES(v4, r7, v10)
> + GET16BYTES(v5, r4, v6)
> + vcmpnezb. v7, v5, v4
> + beq cr6, L(match)
> + b L(different)
> +
> + /* Align s1 to qw and adjust s2 address. */
> + .align 4
> +L(match):
> + clrldi r6, r7, 60
> + subfic r5, r6, 16
> + add r7, r7, r5
> + add r4, r4, r5
> + andi. r5, r4, 0xF
> + beq cr0, L(aligned)
> + lvsr v6, 0, r4
> + /* There are 2 loops depending on the input alignment.
> + Each loop gets 16 bytes from s1 and s2 and compares.
> + Loop until a mismatch or null occurs. */
> +L(s1_align):
> + COMPARE_16_S1_ALIGNED(v4, v5, 0)
> + COMPARE_16_S1_ALIGNED(v4, v5, 16)
> + COMPARE_16_S1_ALIGNED(v4, v5, 32)
> + COMPARE_16_S1_ALIGNED(v4, v5, 48)
> + addi r7, r7, 64
> + b L(s1_align)
> + .align 4
> +
> + /* Align s1 to 32B and adjust s2 address.
> + Use lxvp only if both s1 and s2 are 32B aligned. */
> +L(aligned):
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 32)
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 48)
> + addi r7, r7, 64
> + addi r4, r4, 64
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
> +
> + clrldi r6, r7, 59
> + subfic r5, r6, 32
> + add r7, r7, r5
> + add r4, r4, r5
> + andi. r5, r4, 0x1F
> + beq cr0, L(aligned_loop)
> +
> +L(aligned1):
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 0)
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 16)
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 32)
> + COMPARE_16_S1S2_ALIGNED(v4, v5, 48)
> + addi r7, r7, 64
> + addi r4, r4, 64
> + b L(aligned1)
> +
> + /* Calculate and return the difference. */
> +L(different):
> + vctzlsbb r6, v7
> + vextubrx r5, r6, v4
> + vextubrx r4, r6, v5
> + subf r3, r4, r5
> + extsw r3, r3
> + blr
> +
> +L(aligned_loop):
> + COMPARE_32(v14, v16, 0, tail1, tail2)
> + COMPARE_32(v18, v20, 32, tail3, tail4)
> + COMPARE_32(v22, v24, 64, tail5, tail6)
> + COMPARE_32(v26, v28, 96, tail7, tail8)
> + addi r7, r7, 128
> + addi r4, r4, 128
> + b L(aligned_loop)
> +
> +L(tail1): TAIL(v15, v17)
> +L(tail2): TAIL(v14, v16)
> +L(tail3): TAIL(v19, v21)
> +L(tail4): TAIL(v18, v20)
> +L(tail5): TAIL(v23, v25)
> +L(tail6): TAIL(v22, v24)
> +L(tail7): TAIL(v27, v29)
> +L(tail8): TAIL(v26, v28)
> +
> + .align 4
> +L(different_nocmpb):
> + neg r3, r9
> + and r9, r9, r3
> + cntlzd r9, r9
> + subfic r9, r9, 63
> + srd r3, r8, r9
> + srd r10, r10, r9
> + rldicl r10, r10, 0, 56
> + rldicl r3, r3, 0, 56
> + subf r3, r10, r3
> + extsw r3, r3
> + blr
> +
> + .align 4
> +L(pagecross_check):
> + /* r7 and r9 has number of bytes to be read to reach page boundary,
> + of inputs s1 and s2 respectively. */
> + subfic r9, r9, 4096
> + subfic r7, r7, 4096
> + cmpld cr7, r7, r9
> + bge cr7, L(pagecross)
> + mr r9, r7 /* store min of r7 and r9 in r11 */
> + /* If unaligned 16 bytes reads across a 4K page boundary, it uses
> + a simple byte a byte comparison until the page alignment for s1
> + is reached. */
> +L(pagecross):
> + mr r11, r9 /* store min of r7 and r9 in r11. */
> + mr r8, r7 /* save a copy of s1 count. */
> + add r7, r3, r7 /* update r7 as it is required in L(align). */
> + /* update r9 with number of bytes which will be compared in
> + blocks (of 16 or less). */
> + mr r9, r11
> + cmpldi r9, 16
> + blt L(compare_less_than_16)
> +
> + /* For bytes greater than 16, compare in a loop of 16. */
> + srdi r10, r9, 4 /* Compute loop count (r9 / 16). */
> + andi. r9, r9, 15 /* Set r9 with the remainder (r9 % 16). */
> + mtctr r10 /* Move loop count to counter register. */
> + li r12, 16 /* Move the loop width to a temp register. */
> + sldi r12, r12, 56
> +
> + .align 4
> +L(pagecross_16B_loop):
> + /* Loads 16B from s1 and s2, compare if *s1 is equal to *s2
> + and if either *s1 or *s2 is '\0'. */
> + lxvl 32+v0, r3, r12
> + lxvl 32+v2, r4, r12
> + addi r3, r3, 16
> + addi r4, r4, 16
> + vcmpnezb. v16, v0, v2
> + vctzlsbb r6, v16 > + bne cr6, L(pagecross_diff_bytes)
> + bdnz L(pagecross_16B_loop)
> +
> + .align 4
> +L(compare_less_than_16):
> + /* Loads the remaining number of bytes together, compare if *s1
> + is equal to *s2 and if either *s1 or *s2 is '\0'. */
> + mr r11, r9
> + sldi r9, r9, 56
> + lxvl 32+v0, r3, r9
> + lxvl 32+v2, r4, r9
> + add r3, r3, r11
> + add r4, r4, r11
> + vcmpnezb. v16, v0, v2
> + /* If index where different byte or null found is greater than r7,
> + then do byte by byte compare till s1 reaches page boundary. */
> + vctzlsbb r6, v16
> + cmpld cr7, r6, r11
> + bge cr7, L(compare_byte_by_byte > + bne cr6, L(pagecross_diff_bytes)
> +
> +L(compare_byte_by_byte):
> + sub r10, r8, r11
> + mtctr r10
> + cmpldi r10, 0
> + ble L(align)
> +
> +L(byte_compare_loop):
> + lbz r9, 0(r3)
> + lbz r10, 0(r4)
> + addi r3, r3, 1
> + addi r4, r4, 1
> + cmplw cr7, r9, r10
> + cmpdi cr5, r9, r0
> + bne cr7, L(pagecross_ne)
> + beq cr5, L(pagecross_nullfound)
> + bdnz L(byte_compare_loop)
> + b L(align)
I think the byte compare loop should be rewritten if it is used each
page cross.
> +
> +L(pagecross_diff_bytes):
> + vextubrx r5, r6, v0
> + vextubrx r4, r6, v2
> + subf r3, r4, r5
> + extsw r3, r3
> + blr
> + .align 4
> +L(pagecross_ne):
> + extsw r3, r9
> + mr r9, r10
> +L(pagecross_retdiff):
> + subf r9, r9, r3
> + extsw r3, r9
> + blr
> +
> + .align 4
> +L(pagecross_nullfound):
> + li r3, 0
> + b L(pagecross_retdiff)
> +END (STRCMP)
> +libc_hidden_builtin_def (strcmp)
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2023-10-09 18:09 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-10-05 16:25 [PATCHV2] powerpc: Optimized strcmp for power10 Amrita H S
2023-10-05 16:33 Amrita H S
2023-10-09 18:09 ` Paul E Murphy
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).