From: Christoph Muellner <christoph.muellner@vrull.eu>
To: libc-alpha@sourceware.org, Palmer Dabbelt <palmer@dabbelt.com>,
Darius Rad <darius@bluespec.com>,
Andrew Waterman <andrew@sifive.com>, DJ Delorie <dj@redhat.com>,
Vineet Gupta <vineetg@rivosinc.com>,
Kito Cheng <kito.cheng@sifive.com>,
Jeff Law <jeffreyalaw@gmail.com>,
Philipp Tomsich <philipp.tomsich@vrull.eu>,
Heiko Stuebner <heiko.stuebner@vrull.eu>
Cc: "Christoph Müllner" <christoph.muellner@vrull.eu>
Subject: [RFC PATCH 10/19] riscv: Add accelerated memset routines for RV64
Date: Tue, 7 Feb 2023 01:16:09 +0100 [thread overview]
Message-ID: <20230207001618.458947-11-christoph.muellner@vrull.eu> (raw)
In-Reply-To: <20230207001618.458947-1-christoph.muellner@vrull.eu>
From: Christoph Müllner <christoph.muellner@vrull.eu>
The implementation of memset() can be accelerated by
loop unrolling, fast unaligned accesses and cbo.zero.
Let's provide an implementation that supports that,
with a cbo.zero being optional and only available for
a block size of 64 bytes.
Signed-off-by: Christoph Müllner <christoph.muellner@vrull.eu>
---
sysdeps/riscv/multiarch/Makefile | 4 +-
sysdeps/riscv/multiarch/ifunc-impl-list.c | 4 +
sysdeps/riscv/multiarch/memset.c | 12 +
.../riscv/multiarch/memset_rv64_unaligned.S | 31 +++
.../multiarch/memset_rv64_unaligned_cboz64.S | 217 ++++++++++++++++++
5 files changed, 267 insertions(+), 1 deletion(-)
create mode 100644 sysdeps/riscv/multiarch/memset_rv64_unaligned.S
create mode 100644 sysdeps/riscv/multiarch/memset_rv64_unaligned_cboz64.S
diff --git a/sysdeps/riscv/multiarch/Makefile b/sysdeps/riscv/multiarch/Makefile
index 453f0f4e4c..6e8ebb42d8 100644
--- a/sysdeps/riscv/multiarch/Makefile
+++ b/sysdeps/riscv/multiarch/Makefile
@@ -1,4 +1,6 @@
ifeq ($(subdir),string)
sysdep_routines += \
- memset_generic
+ memset_generic \
+ memset_rv64_unaligned \
+ memset_rv64_unaligned_cboz64
endif
diff --git a/sysdeps/riscv/multiarch/ifunc-impl-list.c b/sysdeps/riscv/multiarch/ifunc-impl-list.c
index fd1752bc46..e878977b73 100644
--- a/sysdeps/riscv/multiarch/ifunc-impl-list.c
+++ b/sysdeps/riscv/multiarch/ifunc-impl-list.c
@@ -36,6 +36,10 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
size_t i = 0;
IFUNC_IMPL (i, name, memset,
+#if __riscv_xlen == 64
+ IFUNC_IMPL_ADD (array, i, memset, 1, __memset_rv64_unaligned_cboz64)
+ IFUNC_IMPL_ADD (array, i, memset, 1, __memset_rv64_unaligned)
+#endif
IFUNC_IMPL_ADD (array, i, memset, 1, __memset_generic))
return i;
diff --git a/sysdeps/riscv/multiarch/memset.c b/sysdeps/riscv/multiarch/memset.c
index ae4289ab03..7ba10dd3da 100644
--- a/sysdeps/riscv/multiarch/memset.c
+++ b/sysdeps/riscv/multiarch/memset.c
@@ -31,7 +31,19 @@
extern __typeof (__redirect_memset) __libc_memset;
extern __typeof (__redirect_memset) __memset_generic attribute_hidden;
+#if __riscv_xlen == 64
+extern __typeof (__redirect_memset) __memset_rv64_unaligned_cboz64 attribute_hidden;
+extern __typeof (__redirect_memset) __memset_rv64_unaligned attribute_hidden;
+
+libc_ifunc (__libc_memset,
+ (IS_RV64() && HAVE_FAST_UNALIGNED() && HAVE_RV(zicboz) && HAVE_CBOZ_BLOCKSIZE(64)
+ ? __memset_rv64_unaligned_cboz64
+ : (IS_RV64() && HAVE_FAST_UNALIGNED()
+ ? __memset_rv64_unaligned
+ : __memset_generic)));
+#else
libc_ifunc (__libc_memset, __memset_generic);
+#endif
# undef memset
strong_alias (__libc_memset, memset);
diff --git a/sysdeps/riscv/multiarch/memset_rv64_unaligned.S b/sysdeps/riscv/multiarch/memset_rv64_unaligned.S
new file mode 100644
index 0000000000..561e564b42
--- /dev/null
+++ b/sysdeps/riscv/multiarch/memset_rv64_unaligned.S
@@ -0,0 +1,31 @@
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+#include <sys/asm.h>
+
+#ifndef MEMSET
+# define MEMSET __memset_rv64_unaligned
+#endif
+
+#undef CBO_ZERO_THRESHOLD
+#define CBO_ZERO_THRESHOLD 0
+
+/* Assumptions: rv64i unaligned accesses. */
+
+#include "./memset_rv64_unaligned_cboz64.S"
diff --git a/sysdeps/riscv/multiarch/memset_rv64_unaligned_cboz64.S b/sysdeps/riscv/multiarch/memset_rv64_unaligned_cboz64.S
new file mode 100644
index 0000000000..710bb41e44
--- /dev/null
+++ b/sysdeps/riscv/multiarch/memset_rv64_unaligned_cboz64.S
@@ -0,0 +1,217 @@
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#if __riscv_xlen == 64
+
+#include <sysdep.h>
+#include <sys/asm.h>
+
+#define dstin a0
+#define val a1
+#define count a2
+#define dst a3
+#define dstend a4
+#define tmp1 a5
+
+#ifndef MEMSET
+# define MEMSET __memset_rv64_unaligned_cboz64
+#endif
+
+/* cbo.zero can be used to improve the performance of memset-zero.
+ * However, the performance gain depends on the amount of data
+ * to be cleared. This threshold allows to set the minimum amount
+ * of bytes to enable the cbo.zero loop.
+ * To disable cbo.zero, set this threshold to 0. */
+#ifndef CBO_ZERO_THRESHOLD
+# define CBO_ZERO_THRESHOLD 128
+#endif
+
+/* Assumptions:
+ * rv64i_zicboz, 64 byte cbo.zero block size, unaligned accesses. */
+
+ENTRY_ALIGN (MEMSET, 6)
+
+ /* Repeat the byte. */
+ slli tmp1, val, 8
+ or val, tmp1, a1
+ slli tmp1, val, 16
+ or val, tmp1, a1
+ slli tmp1, val, 32
+ or val, tmp1, val
+
+ /* Calculate the end position. */
+ add dstend, dstin, count
+
+ /* Decide how to process. */
+ li tmp1, 96
+ bgtu count, tmp1, L(set_long)
+ li tmp1, 16
+ bgtu count, tmp1, L(set_medium)
+
+ /* Set 0..16 bytes. */
+ li tmp1, 8
+ bltu count, tmp1, 1f
+ /* Set 8..16 bytes. */
+ sd val, 0(dstin)
+ sd val, -8(dstend)
+ ret
+
+ .p2align 3
+ /* Set 0..7 bytes. */
+1: li tmp1, 4
+ bltu count, tmp1, 2f
+ /* Set 4..7 bytes. */
+ sw val, 0(dstin)
+ sw val, -4(dstend)
+ ret
+
+ /* Set 0..3 bytes. */
+2: beqz count, 3f
+ sb val, 0(dstin)
+ li tmp1, 2
+ bltu count, tmp1, 3f
+ sh val, -2(dstend)
+3: ret
+
+ .p2align 3
+ /* Set 17..96 bytes. */
+L(set_medium):
+ sd val, 0(dstin)
+ sd val, 8(dstin)
+ li tmp1, 64
+ bgtu count, tmp1, L(set96)
+ sd val, -16(dstend)
+ sd val, -8(dstend)
+ li tmp1, 32
+ bleu count, tmp1, 1f
+ sd val, 16(dstin)
+ sd val, 24(dstin)
+ sd val, -32(dstend)
+ sd val, -24(dstend)
+1: ret
+
+ .p2align 4
+ /* Set 65..96 bytes. Write 64 bytes from the start and
+ 32 bytes from the end. */
+L(set96):
+ sd val, 16(dstin)
+ sd val, 24(dstin)
+ sd val, 32(dstin)
+ sd val, 40(dstin)
+ sd val, 48(dstin)
+ sd val, 56(dstin)
+ sd val, -32(dstend)
+ sd val, -24(dstend)
+ sd val, -16(dstend)
+ sd val, -8(dstend)
+ ret
+
+ .p2align 4
+ /* Set 97+ bytes. */
+L(set_long):
+ /* Store 16 bytes unaligned. */
+ sd val, 0(dstin)
+ sd val, 8(dstin)
+
+#if CBO_ZERO_THRESHOLD
+ li tmp1, CBO_ZERO_THRESHOLD
+ blt count, tmp1, 1f
+ beqz val, L(cbo_zero_64)
+1:
+#endif
+
+ /* Round down to the previous 16 byte boundary (keep offset of 16). */
+ andi dst, dstin, -16
+
+ /* Calculate loop termination position. */
+ addi tmp1, dstend, -(16+64)
+
+ /* Store 64 bytes in a loop. */
+ .p2align 4
+1: sd val, 16(dst)
+ sd val, 24(dst)
+ sd val, 32(dst)
+ sd val, 40(dst)
+ sd val, 48(dst)
+ sd val, 56(dst)
+ sd val, 64(dst)
+ sd val, 72(dst)
+ addi dst, dst, 64
+ bltu dst, tmp1, 1b
+
+ /* Calculate remainder (dst2 is 16 too less). */
+ sub count, dstend, dst
+
+ /* Check if we have more than 32 bytes to copy. */
+ li tmp1, (32+16)
+ ble count, tmp1, 1f
+ sd val, 16(dst)
+ sd val, 24(dst)
+ sd val, 32(dst)
+ sd val, 40(dst)
+1: sd val, -32(dstend)
+ sd val, -24(dstend)
+ sd val, -16(dstend)
+ sd val, -8(dstend)
+ ret
+
+#if CBO_ZERO_THRESHOLD
+ .option push
+ .option arch,+zicboz
+ .p2align 3
+L(cbo_zero_64):
+ /* Align dst (down). */
+ sd val, 16(dstin)
+ sd val, 24(dstin)
+ sd val, 32(dstin)
+ sd val, 40(dstin)
+ sd val, 48(dstin)
+ sd val, 56(dstin)
+
+ /* Round up to the next 64 byte boundary. */
+ andi dst, dstin, -64
+ addi dst, dst, 64
+
+ /* Calculate loop termination position. */
+ addi tmp1, dstend, -64
+
+ /* cbo.zero sets 64 bytes each time. */
+ .p2align 4
+1: cbo.zero (dst)
+ addi dst, dst, 64
+ bltu dst, tmp1, 1b
+
+ sub count, dstend, dst
+ li tmp1, 32
+ ble count, tmp1, 1f
+ sd val, 0(dst)
+ sd val, 8(dst)
+ sd val, 16(dst)
+ sd val, 24(dst)
+1: sd val, -32(dstend)
+ sd val, -24(dstend)
+ sd val, -16(dstend)
+ sd val, -8(dstend)
+ ret
+ .option pop
+#endif /* CBO_ZERO_THRESHOLD */
+
+END (MEMSET)
+libc_hidden_builtin_def (MEMSET)
+
+#endif /* __riscv_xlen == 64 */
--
2.39.1
next prev parent reply other threads:[~2023-02-07 0:16 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-07 0:15 [RFC PATCH 00/19] riscv: ifunc support with optimized mem*/str*/cpu_relax routines Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 01/19] Inhibit early libcalls before ifunc support is ready Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 02/19] riscv: LEAF: Use C_LABEL() to construct the asm name for a C symbol Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 03/19] riscv: Add ENTRY_ALIGN() macro Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 04/19] riscv: Add hart feature run-time detection framework Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 05/19] riscv: Introduction of ISA extensions Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 06/19] riscv: Adding ISA string parser for environment variables Christoph Muellner
2023-02-07 6:20 ` David Abdurachmanov
2023-02-07 0:16 ` [RFC PATCH 07/19] riscv: hart-features: Add fast_unaligned property Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 08/19] riscv: Add (empty) ifunc framework Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 09/19] riscv: Add ifunc support for memset Christoph Muellner
2023-02-07 0:16 ` Christoph Muellner [this message]
2023-02-07 0:16 ` [RFC PATCH 11/19] riscv: Add ifunc support for memcpy/memmove Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 12/19] riscv: Add accelerated memcpy/memmove routines for RV64 Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 13/19] riscv: Add ifunc support for strlen Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 14/19] riscv: Add accelerated strlen routine Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 15/19] riscv: Add ifunc support for strcmp Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 16/19] riscv: Add accelerated strcmp routines Christoph Muellner
2023-02-07 11:57 ` Xi Ruoyao
2023-02-07 14:15 ` Christoph Müllner
2023-03-31 5:06 ` Jeff Law
2023-03-31 12:31 ` Adhemerval Zanella Netto
2023-03-31 14:30 ` Jeff Law
2023-03-31 14:48 ` Adhemerval Zanella Netto
2023-03-31 17:19 ` Palmer Dabbelt
2023-03-31 14:32 ` Jeff Law
2023-02-07 0:16 ` [RFC PATCH 17/19] riscv: Add ifunc support for strncmp Christoph Muellner
2023-02-07 0:16 ` [RFC PATCH 18/19] riscv: Add an optimized strncmp routine Christoph Muellner
2023-02-07 1:19 ` Noah Goldstein
2023-02-08 15:13 ` Philipp Tomsich
2023-02-08 17:55 ` Palmer Dabbelt
2023-02-08 19:48 ` Adhemerval Zanella Netto
2023-02-08 18:04 ` Noah Goldstein
2023-02-07 0:16 ` [RFC PATCH 19/19] riscv: Add __riscv_cpu_relax() to allow yielding in busy loops Christoph Muellner
2023-02-07 0:23 ` Andrew Waterman
2023-02-07 0:29 ` Christoph Müllner
2023-02-07 2:59 ` [RFC PATCH 00/19] riscv: ifunc support with optimized mem*/str*/cpu_relax routines Kito Cheng
2023-02-07 16:40 ` Adhemerval Zanella Netto
2023-02-07 17:16 ` DJ Delorie
2023-02-07 19:32 ` Philipp Tomsich
2023-02-07 21:14 ` DJ Delorie
2023-02-08 11:26 ` Christoph Müllner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230207001618.458947-11-christoph.muellner@vrull.eu \
--to=christoph.muellner@vrull.eu \
--cc=andrew@sifive.com \
--cc=darius@bluespec.com \
--cc=dj@redhat.com \
--cc=heiko.stuebner@vrull.eu \
--cc=jeffreyalaw@gmail.com \
--cc=kito.cheng@sifive.com \
--cc=libc-alpha@sourceware.org \
--cc=palmer@dabbelt.com \
--cc=philipp.tomsich@vrull.eu \
--cc=vineetg@rivosinc.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).