From: caiyinyu <caiyinyu@loongson.cn>
To: libc-alpha@sourceware.org, adhemerval.zanella@linaro.org
Cc: joseph_myers@mentor.com, xuchenghua@loongson.cn, caiyinyu@loongson.cn
Subject: [PATCH v3 07/13] LoongArch: Atomic and Locking Routines
Date: Fri, 15 Apr 2022 09:31:08 +0800 [thread overview]
Message-ID: <20220415013114.29658-8-caiyinyu@loongson.cn> (raw)
In-Reply-To: <20220415013114.29658-1-caiyinyu@loongson.cn>
---
.../sysv/linux/loongarch/atomic-machine.h | 181 ++++++++++++++++++
1 file changed, 181 insertions(+)
create mode 100644 sysdeps/unix/sysv/linux/loongarch/atomic-machine.h
diff --git a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h
new file mode 100644
index 0000000000..60db25587e
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h
@@ -0,0 +1,181 @@
+/* Atomic operations.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library. If not, see
+ <https://www.gnu.org/licenses/>. */
+
+#ifndef _LINUX_LOONGARCH_BITS_ATOMIC_H
+#define _LINUX_LOONGARCH_BITS_ATOMIC_H 1
+
+#define atomic_full_barrier() __sync_synchronize ()
+
+#define __HAVE_64B_ATOMICS (__loongarch_grlen >=64)
+#define USE_ATOMIC_COMPILER_BUILTINS 1
+#define ATOMIC_EXCHANGE_USES_CAS 0
+
+/* Compare and exchange.
+ For all "bool" routines, we return FALSE if exchange succesful. */
+
+#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
+ __ATOMIC_RELAXED); \
+ })
+
+#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
+ __ATOMIC_RELAXED); \
+ })
+
+#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
+ __ATOMIC_RELAXED); \
+ })
+
+#define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
+ __ATOMIC_RELAXED); \
+ })
+
+#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
+ __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+
+#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
+ __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+
+#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
+ __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+
+#define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
+ ({ \
+ typeof (*mem) __oldval = (oldval); \
+ __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
+ __ATOMIC_RELAXED); \
+ __oldval; \
+ })
+
+/* Atomic compare and exchange. */
+
+#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
+ __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, mem, new, old, \
+ __ATOMIC_ACQUIRE)
+
+#define atomic_compare_and_exchange_val_acq(mem, new, old) \
+ __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \
+ __ATOMIC_ACQUIRE)
+
+#define atomic_compare_and_exchange_val_rel(mem, new, old) \
+ __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \
+ __ATOMIC_RELEASE)
+
+/* Atomic exchange (without compare). */
+
+#define __arch_exchange_8_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+
+#define __arch_exchange_16_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+
+#define __arch_exchange_32_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+
+#define __arch_exchange_64_int(mem, newval, model) \
+ __atomic_exchange_n (mem, newval, model)
+
+#define atomic_exchange_acq(mem, value) \
+ __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
+
+#define atomic_exchange_rel(mem, value) \
+ __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
+
+/* Atomically add value and return the previous (unincremented) value. */
+
+#define __arch_exchange_and_add_8_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+
+#define __arch_exchange_and_add_16_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+
+#define __arch_exchange_and_add_32_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+
+#define __arch_exchange_and_add_64_int(mem, value, model) \
+ __atomic_fetch_add (mem, value, model)
+
+#define atomic_exchange_and_add_acq(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
+ __ATOMIC_ACQUIRE)
+
+#define atomic_exchange_and_add_rel(mem, value) \
+ __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
+ __ATOMIC_RELEASE)
+
+/* Miscellaneous. */
+
+#define asm_amo(which, mem, value) \
+ ({ \
+ __atomic_check_size (mem); \
+ typeof (*mem) __tmp; \
+ if (sizeof (__tmp) == 4) \
+ asm volatile(which ".w" \
+ "\t%0, %z2, %1" \
+ : "=&r"(__tmp), "+ZB"(*(mem)) \
+ : "rJ"(value)); \
+ else if (sizeof (__tmp) == 8) \
+ asm volatile(which ".d" \
+ "\t%0, %z2, %1" \
+ : "=&r"(__tmp), "+ZB"(*(mem)) \
+ : "rJ"(value)); \
+ else \
+ abort (); \
+ __tmp; \
+ })
+
+#define atomic_max(mem, value) asm_amo ("ammax_db", mem, value)
+#define atomic_min(mem, value) asm_amo ("ammin_db", mem, value)
+
+#define atomic_bit_test_set(mem, bit) \
+ ({ \
+ typeof (*mem) __mask = (typeof (*mem)) 1 << (bit); \
+ asm_amo ("amor_db", mem, __mask) & __mask; \
+ })
+
+#define catomic_exchange_and_add(mem, value) \
+ atomic_exchange_and_add (mem, value)
+#define catomic_max(mem, value) atomic_max (mem, value)
+
+#endif /* bits/atomic.h */
--
2.20.1
next prev parent reply other threads:[~2022-04-15 1:31 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-15 1:31 [PATCH v3 00/13] GLIBC LoongArch PATCHES caiyinyu
2022-04-15 1:31 ` [PATCH v3 01/13] LoongArch: Update NEWS and README for the LoongArch port caiyinyu
2022-04-19 20:06 ` Joseph Myers
2022-05-09 2:26 ` caiyinyu
2022-04-15 1:31 ` [PATCH v3 02/13] LoongArch: Add LoongArch entries to config.h.in caiyinyu
2022-04-15 1:31 ` [PATCH v3 03/13] LoongArch: Add relocations and ELF flags to elf.h caiyinyu
2022-04-15 1:31 ` [PATCH v3 04/13] LoongArch: ABI Implementation caiyinyu
2022-04-15 1:31 ` [PATCH v3 05/13] LoongArch: Thread-Local Storage Support caiyinyu
2022-04-15 1:31 ` [PATCH v3 06/13] LoongArch: Generic <math.h> and soft-fp Routines caiyinyu
2022-04-15 1:31 ` caiyinyu [this message]
2022-04-15 1:31 ` [PATCH v3 08/13] LoongArch: Linux Syscall Interface caiyinyu
2022-04-15 1:31 ` [PATCH v3 09/13] LoongArch: Linux ABI caiyinyu
2022-04-15 1:31 ` [PATCH v3 10/13] LoongArch: Add ABI Lists caiyinyu
2022-04-15 1:31 ` [PATCH v3 11/13] LoongArch: Build Infastructure caiyinyu
2022-04-15 1:31 ` [PATCH v3 12/13] LoongArch: Hard Float Support caiyinyu
2022-04-15 1:31 ` [PATCH v3 13/13] LoongArch: Update build-many-glibcs.py for the LoongArch Port caiyinyu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220415013114.29658-8-caiyinyu@loongson.cn \
--to=caiyinyu@loongson.cn \
--cc=adhemerval.zanella@linaro.org \
--cc=joseph_myers@mentor.com \
--cc=libc-alpha@sourceware.org \
--cc=xuchenghua@loongson.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).