public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
* [PATCH] powerpc: Optimized strcmp for power10
@ 2023-09-21 15:37 Amrita H S
  2023-09-22 21:22 ` Paul E Murphy
  0 siblings, 1 reply; 2+ messages in thread
From: Amrita H S @ 2023-09-21 15:37 UTC (permalink / raw)
  To: libc-alpha; +Cc: rajis, Amrita H S

This patch was based on the __strcmp_power9 and the recent
__strlen_power10.

Improvements from __strcmp_power9:

    1. Uses new POWER10 instructions

       This code uses lxvp to decrease contention on load by loading 32 bytes
    per instruction.
       The vextractbm is used to have a smaller tail code for calculating the
    return value.

    2. Performance improvement

       This version has around 20% better performance on average.  Inconsistent
    performance regression is seen for sizes less than 32B, but it is
    observed with out changes also.

Signed-off-by: Amrita H S <amritahs@linux.vnet.ibm.com>
---
 sysdeps/powerpc/powerpc64/le/power10/strcmp.S | 297 ++++++++++++++++++
 sysdeps/powerpc/powerpc64/multiarch/Makefile  |   3 +-
 .../powerpc64/multiarch/ifunc-impl-list.c     |   4 +
 .../powerpc64/multiarch/strcmp-power10.S      |  26 ++
 sysdeps/powerpc/powerpc64/multiarch/strcmp.c  |   4 +
 5 files changed, 333 insertions(+), 1 deletion(-)
 create mode 100644 sysdeps/powerpc/powerpc64/le/power10/strcmp.S
 create mode 100644 sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S

diff --git a/sysdeps/powerpc/powerpc64/le/power10/strcmp.S b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
new file mode 100644
index 0000000000..a09279c5c8
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
@@ -0,0 +1,297 @@
+/* Optimized strcmp implementation for PowerPC64/POWER10.
+   Copyright (C) 2021-2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+#include <sysdep.h>
+
+#ifndef STRCMP
+# define STRCMP strcmp
+#endif
+
+/* Implements the function
+
+   int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
+
+   The implementation uses unaligned doubleword access for first 32 bytes
+   as in POWER8 patch and uses vectorised loops after that.  */
+
+/* TODO: Change this to actual instructions when minimum binutils is upgraded
+   to 2.27.  Macros are defined below for these newer instructions in order
+   to maintain compatibility.  */
+
+#define LXVP(xtp,dq,ra)             \
+	.long(((6)<<(32-6))          \
+	| ((((xtp)-32)>>1)<<(32-10)) \
+	| ((1)<<(32-11))             \
+	| ((ra)<<(32-16))            \
+	| dq)
+
+/* Get 16 bytes for unaligned case.
+   reg1: Vector to hold next 16 bytes.
+   reg2: Address to read from.
+   reg3: Permute control vector.  */
+#define GET16BYTES(reg1, reg2, reg3)    \
+	lvx	   reg1, 0, reg2;        \
+	vperm	   v8, v2, reg1, reg3;   \
+	vcmpequb.  v8, v0, v8;           \
+	beq	   cr6, 1f;              \
+	vspltisb   v9, 0;                \
+	b	   2f;                   \
+	.align 4;                        \
+1:                                      \
+	addi       r6, reg2, 16;         \
+	lvx        v9, 0, r6;            \
+2:                                      \
+	vperm      reg1, v9, reg1, reg3;
+
+#define CHECK16(vreg1, vreg2, offset)  \
+	lxv       vreg1+32, offset(r7); \
+	lxv       vreg2+32, offset(r4); \
+	vcmpnezb. v7, vreg1, vreg2;     \
+	bne       cr6, L(different);
+
+#define COMPARE_32(vreg1, vreg2, offset, label1, label2)  \
+	LXVP(vreg1+32, offset, r7);                        \
+	LXVP(vreg2+32, offset, r4);                        \
+	vcmpnezb. v7, vreg1+1, vreg2+1;                    \
+	bne	cr6, L(label1);                            \
+	vcmpnezb. v7, vreg1, vreg2;                        \
+	bne	cr6, L(label2);                            \
+
+#define TAIL(vreg1, vreg2)      \
+	vctzlsbb r6, v7;         \
+	vextubrx r5, r6, vreg1;  \
+	vextubrx r4, r6, vreg2;  \
+	subf	r3, r4, r5;      \
+	extsw	r3, r3;          \
+	blr;                     \
+
+	.machine  power9
+ENTRY_TOCLESS (STRCMP, 4)
+	li	r0, 0
+
+	/* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
+	   the code:
+
+	    (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
+
+	   with PAGE_SIZE being 4096 and ITER_SIZE begin 16.  */
+
+	rldicl	r7, r3, 0, 52
+	rldicl	r9, r4, 0, 52
+	cmpldi	cr7, r7, 4096-16
+	bgt	cr7, L(pagecross_check)
+	cmpldi	cr5, r9, 4096-16
+	bgt	cr5, L(pagecross_check)
+
+	/* For short strings up to 16 bytes,  load both s1 and s2 using
+	   unaligned dwords and compare.  */
+	ld	r8, 0(r3)
+	ld	r10, 0(r4)
+	cmpb	r12, r8, r0
+	cmpb	r11, r8, r10
+	orc.	r9, r12, r11
+	bne	cr0, L(different_nocmpb)
+
+	ld	r8, 8(r3)
+	ld	r10, 8(r4)
+	cmpb	r12, r8, r0
+	cmpb	r11, r8, r10
+	orc.	r9, r12, r11
+	bne	cr0, L(different_nocmpb)
+
+	addi	r7, r3, 16
+	addi	r4, r4, 16
+
+L(align):
+	/* Now it has checked for first 16 bytes.  */
+	vspltisb	v0, 0
+	vspltisb	v2, -1
+	lvsr	v6, 0, r4   /* Compute mask.  */
+	or	r5, r4, r7
+	andi.	r5, r5, 0xF
+	beq	cr0, L(aligned)
+	andi.	r5, r7, 0xF
+	beq	cr0, L(s1_align)
+	lvsr	v10, 0, r7   /* Compute mask.  */
+
+	/* Both s1 and s2 are unaligned.  */
+	GET16BYTES(v4, r7, v10)
+	GET16BYTES(v5, r4, v6)
+	vcmpnezb. v7, v5, v4
+	beq	cr6, L(match)
+	b	L(different)
+
+	/* Align s1 to qw and adjust s2 address.  */
+	.align  4
+L(match):
+	clrldi	r6, r7, 60
+	subfic	r5, r6, 16
+	add	r7, r7, r5
+	add	r4, r4, r5
+	andi.	r5, r4, 0xF
+	beq	cr0, L(aligned)
+	lvsr	v6, 0, r4
+	/* There are 2 loops depending on the input alignment.
+	   Each loop gets 16 bytes from s1 and s2 and compares.
+	   Loop until a mismatch or null occurs.  */
+L(s1_align):
+	lvx	v4, r7, r0
+	GET16BYTES(v5, r4, v6)
+	vcmpnezb. v7, v5, v4
+	addi	r7, r7, 16
+	addi	r4, r4, 16
+	bne	cr6, L(different)
+
+	lvx	v4, r7, r0
+	GET16BYTES(v5, r4, v6)
+	vcmpnezb. v7, v5, v4
+	addi	r7, r7, 16
+	addi	r4, r4, 16
+	bne	cr6, L(different)
+
+	lvx	v4, r7, r0
+	GET16BYTES(v5, r4, v6)
+	vcmpnezb. v7, v5, v4
+	addi	r7, r7, 16
+	addi	r4, r4, 16
+	bne	cr6, L(different)
+
+	lvx	v4, r7, r0
+	GET16BYTES(v5, r4, v6)
+	vcmpnezb. v7, v5, v4
+	addi	r7, r7, 16
+	addi	r4, r4, 16
+	beq	cr6, L(s1_align)
+	b	L(different)
+
+	.align  4
+
+        /* Align s1 to 32B and adjust s2 address.
+           Use lxvp only if both s1 and s2 are 32B aligned. */
+L(aligned):
+	CHECK16(v4, v5, 0)
+	CHECK16(v4, v5, 16)
+	CHECK16(v4, v5, 32)
+	CHECK16(v4, v5, 48)
+	addi	r7, r7, 64
+	addi	r4, r4, 64
+	CHECK16(v4, v5, 0)
+	CHECK16(v4, v5, 16)
+
+	clrldi	r6, r7, 59
+	subfic	r5, r6, 32
+	add	r7, r7, r5
+	add	r4, r4, r5
+	andi.	r5, r4, 0x1F
+	beq	cr0, L(aligned_loop)
+
+L(aligned1):
+	CHECK16(v4, v5, 0)
+	CHECK16(v4, v5, 16)
+	CHECK16(v4, v5, 32)
+	CHECK16(v4, v5, 48)
+	addi	r7, r7, 64
+	addi	r4, r4, 64
+	b	L(aligned1)
+
+	/* Calculate and return the difference.  */
+L(different):
+	vctzlsbb r6, v7
+	vextubrx r5, r6, v4
+	vextubrx r4, r6, v5
+	subf	r3, r4, r5
+	extsw	r3, r3
+	blr
+
+L(aligned_loop):
+	COMPARE_32(v14, v16, 0, tail1, tail2)
+	COMPARE_32(v18, v20, 32, tail3, tail4)
+	COMPARE_32(v22, v24, 64, tail5, tail6)
+	COMPARE_32(v26, v28, 96, tail7, tail8)
+	addi	r7, r7, 128
+	addi	r4, r4, 128
+	b	L(aligned_loop)
+
+L(tail1): TAIL(v15, v17)
+L(tail2): TAIL(v14, v16)
+L(tail3): TAIL(v19, v21)
+L(tail4): TAIL(v18, v20)
+L(tail5): TAIL(v23, v25)
+L(tail6): TAIL(v22, v24)
+L(tail7): TAIL(v27, v29)
+L(tail8): TAIL(v26, v28)
+
+	.align  4
+L(different_nocmpb):
+	neg	r3, r9
+	and	r9, r9, r3
+	cntlzd	r9, r9
+	subfic	r9, r9, 63
+	srd	r3, r8, r9
+	srd	r10, r10, r9
+	rldicl	r10, r10, 0, 56
+	rldicl	r3, r3, 0, 56
+	subf	r3, r10, r3
+	extsw	r3, r3
+	blr
+
+	.align	4
+L(pagecross_check):
+	subfic	r9, r9, 4096
+	subfic	r7, r7, 4096
+	cmpld	cr7, r7, r9
+	bge	cr7, L(pagecross)
+	mr	r7, r9
+
+	/* If unaligned 16 bytes reads across a 4K page boundary, it uses
+	   a simple byte a byte comparison until the page alignment for s1
+	   is reached.  */
+L(pagecross):
+	add	r7, r3, r7
+	subf	r9, r3, r7
+	mtctr	r9
+
+	.align	4
+L(pagecross_loop):
+	/* Loads a byte from s1 and s2, compare if *s1 is equal to *s2
+	   and if *s1 is '\0'.  */
+	lbz	r9, 0(r3)
+	lbz	r10, 0(r4)
+	addi	r3, r3, 1
+	addi	r4, r4, 1
+	cmplw	cr7, r9, r10
+	cmpdi	cr5, r9, r0
+	bne	cr7, L(pagecross_ne)
+	beq	cr5, L(pagecross_nullfound)
+	bdnz	L(pagecross_loop)
+	b	L(align)
+
+	.align	4
+L(pagecross_ne):
+	extsw	r3, r9
+	mr	r9, r10
+L(pagecross_retdiff):
+	subf	r9, r9, r3
+	extsw	r3, r9
+	blr
+
+	.align	4
+L(pagecross_nullfound):
+	li	r3, 0
+	b	L(pagecross_retdiff)
+END (STRCMP)
+libc_hidden_builtin_def (strcmp)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index 27d8495503..d7824a922b 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -33,7 +33,8 @@ sysdep_routines += memcpy-power8-cached memcpy-power7 memcpy-a2 memcpy-power6 \
 ifneq (,$(filter %le,$(config-machine)))
 sysdep_routines += memcmp-power10 memcpy-power10 memmove-power10 memset-power10 \
 		   rawmemchr-power9 rawmemchr-power10 \
-		   strcmp-power9 strncmp-power9 strcpy-power9 stpcpy-power9 \
+		   strcmp-power9 strcmp-power10 strncmp-power9 \
+		   strcpy-power9 stpcpy-power9 \
 		   strlen-power9 strncpy-power9 stpncpy-power9 strlen-power10
 endif
 CFLAGS-strncase-power7.c += -mcpu=power7 -funroll-loops
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index ebe9434052..ca1f57e1e2 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -376,6 +376,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   /* Support sysdeps/powerpc/powerpc64/multiarch/strcmp.c.  */
   IFUNC_IMPL (i, name, strcmp,
 #ifdef __LITTLE_ENDIAN__
+	      IFUNC_IMPL_ADD (array, i, strcmp,
+			      (hwcap2 & PPC_FEATURE2_ARCH_3_1)
+			      && (hwcap & PPC_FEATURE_HAS_VSX),
+			      __strcmp_power10)
 	      IFUNC_IMPL_ADD (array, i, strcmp,
 			      hwcap2 & PPC_FEATURE2_ARCH_3_00
 			      && hwcap & PPC_FEATURE_HAS_ALTIVEC,
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
new file mode 100644
index 0000000000..c80067ce33
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
@@ -0,0 +1,26 @@
+/* Optimized strcmp implementation for POWER10/PPC64.
+   Copyright (C) 2021-2023 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
+#define STRCMP __strcmp_power10
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/le/power10/strcmp.S>
+#endif /* __LITTLE_ENDIAN__ && IS_IN (libc) */
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
index 31fcdee916..f1dac99b66 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strcmp.c
@@ -29,12 +29,16 @@ extern __typeof (strcmp) __strcmp_power7 attribute_hidden;
 extern __typeof (strcmp) __strcmp_power8 attribute_hidden;
 # ifdef __LITTLE_ENDIAN__
 extern __typeof (strcmp) __strcmp_power9 attribute_hidden;
+extern __typeof (strcmp) __strcmp_power10 attribute_hidden;
 # endif
 
 # undef strcmp
 
 libc_ifunc_redirected (__redirect_strcmp, strcmp,
 # ifdef __LITTLE_ENDIAN__
+		        (hwcap2 & PPC_FEATURE2_ARCH_3_1
+			 && hwcap & PPC_FEATURE_HAS_VSX)
+			? __strcmp_power10 :
 			(hwcap2 & PPC_FEATURE2_ARCH_3_00
 			 && hwcap & PPC_FEATURE_HAS_ALTIVEC)
 			? __strcmp_power9 :
-- 
2.39.3


^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] powerpc: Optimized strcmp for power10
  2023-09-21 15:37 [PATCH] powerpc: Optimized strcmp for power10 Amrita H S
@ 2023-09-22 21:22 ` Paul E Murphy
  0 siblings, 0 replies; 2+ messages in thread
From: Paul E Murphy @ 2023-09-22 21:22 UTC (permalink / raw)
  To: Amrita H S, libc-alpha; +Cc: rajis


On 9/21/23 10:37 AM, Amrita H S wrote:
> This patch was based on the __strcmp_power9 and the recent
> __strlen_power10.
> 
> Improvements from __strcmp_power9:
> 
>      1. Uses new POWER10 instructions
> 
>         This code uses lxvp to decrease contention on load by loading 32 bytes
>      per instruction.
>         The vextractbm is used to have a smaller tail code for calculating the
>      return value.
> 
>      2. Performance improvement
> 
>         This version has around 20% better performance on average.  Inconsistent
>      performance regression is seen for sizes less than 32B, but it is
>      observed with out changes also.Can you publish the full benchtest results?  It is helpful for the 
reviewers to understand the performance of various inputs.

> 
> Signed-off-by: Amrita H S <amritahs@linux.vnet.ibm.com>
> ---
>   sysdeps/powerpc/powerpc64/le/power10/strcmp.S | 297 ++++++++++++++++++
>   sysdeps/powerpc/powerpc64/multiarch/Makefile  |   3 +-
>   .../powerpc64/multiarch/ifunc-impl-list.c     |   4 +
>   .../powerpc64/multiarch/strcmp-power10.S      |  26 ++
>   sysdeps/powerpc/powerpc64/multiarch/strcmp.c  |   4 +
>   5 files changed, 333 insertions(+), 1 deletion(-)
>   create mode 100644 sysdeps/powerpc/powerpc64/le/power10/strcmp.S
>   create mode 100644 sysdeps/powerpc/powerpc64/multiarch/strcmp-power10.S
> 
> diff --git a/sysdeps/powerpc/powerpc64/le/power10/strcmp.S b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
> new file mode 100644
> index 0000000000..a09279c5c8
> --- /dev/null
> +++ b/sysdeps/powerpc/powerpc64/le/power10/strcmp.S
> @@ -0,0 +1,297 @@
> +/* Optimized strcmp implementation for PowerPC64/POWER10.
> +   Copyright (C) 2021-2023 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <https://www.gnu.org/licenses/>.  */
> +#include <sysdep.h>
> +
> +#ifndef STRCMP
> +# define STRCMP strcmp
> +#endif
> +
> +/* Implements the function
> +
> +   int [r3] strcmp (const char *s1 [r3], const char *s2 [r4])
> +
> +   The implementation uses unaligned doubleword access for first 32 bytes
> +   as in POWER8 patch and uses vectorised loops after that.  */
> +
> +/* TODO: Change this to actual instructions when minimum binutils is upgraded
> +   to 2.27.  Macros are defined below for these newer instructions in order
> +   to maintain compatibility.  */
> +
> +#define LXVP(xtp,dq,ra)             \
> +	.long(((6)<<(32-6))          \
> +	| ((((xtp)-32)>>1)<<(32-10)) \
> +	| ((1)<<(32-11))             \
> +	| ((ra)<<(32-16))            \
> +	| dq)
> +
> +/* Get 16 bytes for unaligned case.
> +   reg1: Vector to hold next 16 bytes.
> +   reg2: Address to read from.
> +   reg3: Permute control vector.  */
> +#define GET16BYTES(reg1, reg2, reg3)    \
> +	lvx	   reg1, 0, reg2;        \
> +	vperm	   v8, v2, reg1, reg3;   \
> +	vcmpequb.  v8, v0, v8;           \
> +	beq	   cr6, 1f;              \
> +	vspltisb   v9, 0;                \
> +	b	   2f;                   \
> +	.align 4;                        \
> +1:                                      \
> +	addi       r6, reg2, 16;         \
> +	lvx        v9, 0, r6;            \
> +2:                                      \
> +	vperm      reg1, v9, reg1, reg3;
Could this sequenced be replaced by lxv? Is there a requirement to avoid 
unaligned loads?


> +
> +#define CHECK16(vreg1, vreg2, offset)  \
> +	lxv       vreg1+32, offset(r7); \
> +	lxv       vreg2+32, offset(r4); \
> +	vcmpnezb. v7, vreg1, vreg2;     \
> +	bne       cr6, L(different);
For consistency, should this be named COMPARE_16? Nit, should the branch 
target also be encoded as a macro parameter similar to COMPARE_32?


> +
> +#define COMPARE_32(vreg1, vreg2, offset, label1, label2)  \
> +	LXVP(vreg1+32, offset, r7);                        \
> +	LXVP(vreg2+32, offset, r4);                        \
> +	vcmpnezb. v7, vreg1+1, vreg2+1;                    \
> +	bne	cr6, L(label1);                            \
> +	vcmpnezb. v7, vreg1, vreg2;                        \
> +	bne	cr6, L(label2);                            \
> +
> +#define TAIL(vreg1, vreg2)      \
> +	vctzlsbb r6, v7;         \
> +	vextubrx r5, r6, vreg1;  \
> +	vextubrx r4, r6, vreg2;  \
> +	subf	r3, r4, r5;      \
> +	extsw	r3, r3;          \
> +	blr;                     \
> +
> +	.machine  power9
This is a power10 target, this should be power10 or have a note 
explaining why not.


> +ENTRY_TOCLESS (STRCMP, 4)
> +	li	r0, 0
> +
> +	/* Check if [s1]+16 or [s2]+16 will cross a 4K page boundary using
> +	   the code:
> +
> +	    (((size_t) s1) % PAGE_SIZE > (PAGE_SIZE - ITER_SIZE))
> +
> +	   with PAGE_SIZE being 4096 and ITER_SIZE begin 16.  */
> +
> +	rldicl	r7, r3, 0, 52
> +	rldicl	r9, r4, 0, 52
> +	cmpldi	cr7, r7, 4096-16
> +	bgt	cr7, L(pagecross_check)
> +	cmpldi	cr5, r9, 4096-16
> +	bgt	cr5, L(pagecross_check)
> +
> +	/* For short strings up to 16 bytes,  load both s1 and s2 using
> +	   unaligned dwords and compare.  */
> +	ld	r8, 0(r3)
> +	ld	r10, 0(r4)
> +	cmpb	r12, r8, r0
> +	cmpb	r11, r8, r10
> +	orc.	r9, r12, r11
> +	bne	cr0, L(different_nocmpb)
> +
> +	ld	r8, 8(r3)
> +	ld	r10, 8(r4)
> +	cmpb	r12, r8, r0
> +	cmpb	r11, r8, r10
> +	orc.	r9, r12, r11
> +	bne	cr0, L(different_nocmpb)
> +
> +	addi	r7, r3, 16
> +	addi	r4, r4, 16
> +
> +L(align):
> +	/* Now it has checked for first 16 bytes.  */
> +	vspltisb	v0, 0
> +	vspltisb	v2, -1
> +	lvsr	v6, 0, r4   /* Compute mask.  */
> +	or	r5, r4, r7
> +	andi.	r5, r5, 0xF
> +	beq	cr0, L(aligned)
> +	andi.	r5, r7, 0xF
> +	beq	cr0, L(s1_align)
> +	lvsr	v10, 0, r7   /* Compute mask.  */
> +
> +	/* Both s1 and s2 are unaligned.  */
> +	GET16BYTES(v4, r7, v10)
> +	GET16BYTES(v5, r4, v6)
> +	vcmpnezb. v7, v5, v4
> +	beq	cr6, L(match)
> +	b	L(different)
> +
> +	/* Align s1 to qw and adjust s2 address.  */
> +	.align  4
> +L(match):
> +	clrldi	r6, r7, 60
> +	subfic	r5, r6, 16
> +	add	r7, r7, r5
> +	add	r4, r4, r5
> +	andi.	r5, r4, 0xF
> +	beq	cr0, L(aligned)
> +	lvsr	v6, 0, r4
> +	/* There are 2 loops depending on the input alignment.
> +	   Each loop gets 16 bytes from s1 and s2 and compares.
> +	   Loop until a mismatch or null occurs.  */
> +L(s1_align):
> +	lvx	v4, r7, r0
> +	GET16BYTES(v5, r4, v6)
> +	vcmpnezb. v7, v5, v4
> +	addi	r7, r7, 16
> +	addi	r4, r4, 16
> +	bne	cr6, L(different)
> +
> +	lvx	v4, r7, r0
> +	GET16BYTES(v5, r4, v6)
> +	vcmpnezb. v7, v5, v4
> +	addi	r7, r7, 16
> +	addi	r4, r4, 16
> +	bne	cr6, L(different)
> +
> +	lvx	v4, r7, r0
> +	GET16BYTES(v5, r4, v6)
> +	vcmpnezb. v7, v5, v4
> +	addi	r7, r7, 16
> +	addi	r4, r4, 16
> +	bne	cr6, L(different)
> +
> +	lvx	v4, r7, r0
> +	GET16BYTES(v5, r4, v6)
> +	vcmpnezb. v7, v5, v4
> +	addi	r7, r7, 16
> +	addi	r4, r4, 16
> +	beq	cr6, L(s1_align)
> +	b	L(different)
I think lxv is preferable over lvx here too. Though less important since 
v4 is aligned.

> +
> +	.align  4
> +
> +        /* Align s1 to 32B and adjust s2 address.
> +           Use lxvp only if both s1 and s2 are 32B aligned. */
> +L(aligned):
> +	CHECK16(v4, v5, 0)
> +	CHECK16(v4, v5, 16)
> +	CHECK16(v4, v5, 32)
> +	CHECK16(v4, v5, 48)
> +	addi	r7, r7, 64
> +	addi	r4, r4, 64
> +	CHECK16(v4, v5, 0)
> +	CHECK16(v4, v5, 16)
> +
> +	clrldi	r6, r7, 59
> +	subfic	r5, r6, 32
> +	add	r7, r7, r5
> +	add	r4, r4, r5
> +	andi.	r5, r4, 0x1F
> +	beq	cr0, L(aligned_loop)
> +
> +L(aligned1):
> +	CHECK16(v4, v5, 0)
> +	CHECK16(v4, v5, 16)
> +	CHECK16(v4, v5, 32)
> +	CHECK16(v4, v5, 48)
> +	addi	r7, r7, 64
> +	addi	r4, r4, 64
> +	b	L(aligned1)
> +
> +	/* Calculate and return the difference.  */
> +L(different):
> +	vctzlsbb r6, v7
> +	vextubrx r5, r6, v4
> +	vextubrx r4, r6, v5
> +	subf	r3, r4, r5
> +	extsw	r3, r3
> +	blr
> +
> +L(aligned_loop):
> +	COMPARE_32(v14, v16, 0, tail1, tail2)
> +	COMPARE_32(v18, v20, 32, tail3, tail4)
> +	COMPARE_32(v22, v24, 64, tail5, tail6)
> +	COMPARE_32(v26, v28, 96, tail7, tail8)
> +	addi	r7, r7, 128
> +	addi	r4, r4, 128
> +	b	L(aligned_loop)
> +
> +L(tail1): TAIL(v15, v17)
> +L(tail2): TAIL(v14, v16)
> +L(tail3): TAIL(v19, v21)
> +L(tail4): TAIL(v18, v20)
> +L(tail5): TAIL(v23, v25)
> +L(tail6): TAIL(v22, v24)
> +L(tail7): TAIL(v27, v29)
> +L(tail8): TAIL(v26, v28)
> +
> +	.align  4
> +L(different_nocmpb):
> +	neg	r3, r9
> +	and	r9, r9, r3
> +	cntlzd	r9, r9
> +	subfic	r9, r9, 63
> +	srd	r3, r8, r9
> +	srd	r10, r10, r9
> +	rldicl	r10, r10, 0, 56
> +	rldicl	r3, r3, 0, 56
> +	subf	r3, r10, r3
> +	extsw	r3, r3
> +	blr
> +
> +	.align	4
> +L(pagecross_check):
> +	subfic	r9, r9, 4096
> +	subfic	r7, r7, 4096
> +	cmpld	cr7, r7, r9
> +	bge	cr7, L(pagecross)
> +	mr	r7, r9
> +
> +	/* If unaligned 16 bytes reads across a 4K page boundary, it uses
> +	   a simple byte a byte comparison until the page alignment for s1
> +	   is reached.  */
I haven't put alot of thought into this portion, but can lxvl be used to 
load a < 16B chunk of both strings instead? The bytewise loops with 
counter are slow for small values.


> +L(pagecross):
> +	add	r7, r3, r7
> +	subf	r9, r3, r7
> +	mtctr	r9
> +
> +	.align	4
> +L(pagecross_loop):
> +	/* Loads a byte from s1 and s2, compare if *s1 is equal to *s2
> +	   and if *s1 is '\0'.  */
> +	lbz	r9, 0(r3)
> +	lbz	r10, 0(r4)
> +	addi	r3, r3, 1
> +	addi	r4, r4, 1
> +	cmplw	cr7, r9, r10
> +	cmpdi	cr5, r9, r0
> +	bne	cr7, L(pagecross_ne)
> +	beq	cr5, L(pagecross_nullfound)
> +	bdnz	L(pagecross_loop)
> +	b	L(align)
> +
> +	.align	4
> +L(pagecross_ne):
> +	extsw	r3, r9
> +	mr	r9, r10
> +L(pagecross_retdiff):
> +	subf	r9, r9, r3
> +	extsw	r3, r9
> +	blr
> +
> +	.align	4
> +L(pagecross_nullfound):
> +	li	r3, 0
> +	b	L(pagecross_retdiff)
> +END (STRCMP)
> +libc_hidden_builtin_def (strcmp)


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-09-22 21:22 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-21 15:37 [PATCH] powerpc: Optimized strcmp for power10 Amrita H S
2023-09-22 21:22 ` Paul E Murphy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).