public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: caiyinyu <caiyinyu@loongson.cn>
To: Adhemerval Zanella Netto <adhemerval.zanella@linaro.org>,
	libc-alpha@sourceware.org
Cc: joseph_myers@mentor.com
Subject: Re: [PATCH v6 07/13] LoongArch: Atomic and Locking Routines
Date: Fri, 15 Jul 2022 09:46:56 +0800	[thread overview]
Message-ID: <89885b7a-521d-2688-cd01-70b59dab9546@loongson.cn> (raw)
In-Reply-To: <9cc5ba1e-97ee-3ce6-7634-8fcfaec34821@linaro.org>


+
+/* Miscellaneous.  */
+
+#define asm_amo(which, mem, value) \
+  ({ \
+    __atomic_check_size (mem); \
+    typeof (*mem) __tmp; \
+    if (sizeof (__tmp) == 4) \
+      asm volatile(which ".w" \
+			 "\t%0, %z2, %1" \
+		   : "=&r"(__tmp), "+ZB"(*(mem)) \
+		   : "rJ"(value)); \
+    else if (sizeof (__tmp) == 8) \

It does not have the case of !__HAVE_64B_ATOMICS, but since only 64-bit
is support I don think this will be troublesome.  Also, the idea is to
remove such atomic macros on next release.

*Removed.**
*

*>>>>>>>>>>>>>>>>>>>>>>>*

**

*diff --git a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h 
b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h**
**index b51463915e..d1b8f1c11b 100644**
**--- a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h**
**+++ b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h**
**@@ -144,38 +144,4 @@**
**   __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \**
**                       __ATOMIC_RELEASE)**
****
**-/* Miscellaneous.  */**
**-**
**-#define asm_amo(which, mem, value) \**
**-  ({ \**
**-    __atomic_check_size (mem); \**
**-    typeof (*mem) __tmp; \**
**-    if (sizeof (__tmp) == 4) \**
**-      asm volatile (which ".w" \**
**-                        "\t%0, %z2, %1" \**
**-                  : "=&r"(__tmp), "+ZB"(*(mem)) \**
**-                  : "rJ"(value)); \**
**-    else if (sizeof (__tmp) == 8) \**
**-      asm volatile (which ".d" \**
**-                        "\t%0, %z2, %1" \**
**-                  : "=&r"(__tmp), "+ZB"(*(mem)) \**
**-                  : "rJ"(value)); \**
**-    else \**
**-      abort (); \**
**-    __tmp; \**
**-  })**
**-**
**-#define atomic_max(mem, value) asm_amo ("ammax_db", mem, value)**
**-#define atomic_min(mem, value) asm_amo ("ammin_db", mem, value)**
**-**
**-#define atomic_bit_test_set(mem, bit) \**
**-  ({ \**
**-    typeof (*mem) __mask = (typeof (*mem)) 1 << (bit); \**
**-    asm_amo ("amor_db", mem, __mask) & __mask; \**
**-  })**
**-**
**-#define catomic_exchange_and_add(mem, value) \**
**-  atomic_exchange_and_add (mem, value)**
**-#define catomic_max(mem, value) atomic_max (mem, value)**
**-**
** #endif /* bits/atomic.h */**
*

**

*<<<<<<<<<<<<<<<<<<<<<<<*



在 2022/7/13 下午9:53, Adhemerval Zanella Netto 写道:
> Although it would be to a more streamlined macros, it follows current practice.
> Only a small nit below.
>
> Reviewed-by: Adhemerval Zanella<adhemerval.zanella@linaro.org>
>
> On 08/07/22 03:52, caiyinyu wrote:
>> ---
>>   .../sysv/linux/loongarch/atomic-machine.h     | 181 ++++++++++++++++++
>>   1 file changed, 181 insertions(+)
>>   create mode 100644 sysdeps/unix/sysv/linux/loongarch/atomic-machine.h
>>
>> diff --git a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h
>> new file mode 100644
>> index 0000000000..60db25587e
>> --- /dev/null
>> +++ b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h
>> @@ -0,0 +1,181 @@
>> +/* Atomic operations.
>> +   Copyright (C) 2022 Free Software Foundation, Inc.
>> +   This file is part of the GNU C Library.
>> +
>> +   The GNU C Library is free software; you can redistribute it and/or
>> +   modify it under the terms of the GNU Lesser General Public
>> +   License as published by the Free Software Foundation; either
>> +   version 2.1 of the License, or (at your option) any later version.
>> +
>> +   The GNU C Library is distributed in the hope that it will be useful,
>> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
>> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>> +   Lesser General Public License for more details.
>> +
>> +   You should have received a copy of the GNU Lesser General Public
>> +   License along with the GNU C Library.  If not, see
>> +<https://www.gnu.org/licenses/>.  */
>> +
>> +#ifndef _LINUX_LOONGARCH_BITS_ATOMIC_H
>> +#define _LINUX_LOONGARCH_BITS_ATOMIC_H 1
>> +
>> +#define atomic_full_barrier() __sync_synchronize ()
>> +
>> +#define __HAVE_64B_ATOMICS (__loongarch_grlen >=64)
> Missing space after >=
>
>> +#define USE_ATOMIC_COMPILER_BUILTINS 1
>> +#define ATOMIC_EXCHANGE_USES_CAS 0
>> +
>> +/* Compare and exchange.
>> +   For all "bool" routines, we return FALSE if exchange succesful.  */
>> +
>> +#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
>> +  ({ \
>> +    typeof (*mem) __oldval = (oldval); \
>> +    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
>> +				  __ATOMIC_RELAXED); \
>> +  })
>> +
>> +#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
>> +  ({ \
>> +    typeof (*mem) __oldval = (oldval); \
>> +    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
>> +				  __ATOMIC_RELAXED); \
>> +  })
>> +
>> +#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
>> +  ({ \
>> +    typeof (*mem) __oldval = (oldval); \
>> +    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
>> +				  __ATOMIC_RELAXED); \
>> +  })
>> +
>> +#define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
>> +  ({ \
>> +    typeof (*mem) __oldval = (oldval); \
>> +    !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
>> +				  __ATOMIC_RELAXED); \
>> +  })
>> +
>> +#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
>> +  ({ \
>> +    typeof (*mem) __oldval = (oldval); \
>> +    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
>> +				 __ATOMIC_RELAXED); \
>> +    __oldval; \
>> +  })
>> +
>> +#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
>> +  ({ \
>> +    typeof (*mem) __oldval = (oldval); \
>> +    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
>> +				 __ATOMIC_RELAXED); \
>> +    __oldval; \
>> +  })
>> +
>> +#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
>> +  ({ \
>> +    typeof (*mem) __oldval = (oldval); \
>> +    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
>> +				 __ATOMIC_RELAXED); \
>> +    __oldval; \
>> +  })
>> +
>> +#define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
>> +  ({ \
>> +    typeof (*mem) __oldval = (oldval); \
>> +    __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \
>> +				 __ATOMIC_RELAXED); \
>> +    __oldval; \
>> +  })
>> +
>> +/* Atomic compare and exchange.  */
>> +
>> +#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
>> +  __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, mem, new, old, \
>> +			__ATOMIC_ACQUIRE)
>> +
>> +#define atomic_compare_and_exchange_val_acq(mem, new, old) \
>> +  __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \
>> +		       __ATOMIC_ACQUIRE)
>> +
>> +#define atomic_compare_and_exchange_val_rel(mem, new, old) \
>> +  __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \
>> +		       __ATOMIC_RELEASE)
>> +
>> +/* Atomic exchange (without compare).  */
>> +
>> +#define __arch_exchange_8_int(mem, newval, model) \
>> +  __atomic_exchange_n (mem, newval, model)
>> +
>> +#define __arch_exchange_16_int(mem, newval, model) \
>> +  __atomic_exchange_n (mem, newval, model)
>> +
>> +#define __arch_exchange_32_int(mem, newval, model) \
>> +  __atomic_exchange_n (mem, newval, model)
>> +
>> +#define __arch_exchange_64_int(mem, newval, model) \
>> +  __atomic_exchange_n (mem, newval, model)
>> +
>> +#define atomic_exchange_acq(mem, value) \
>> +  __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
>> +
>> +#define atomic_exchange_rel(mem, value) \
>> +  __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
>> +
>> +/* Atomically add value and return the previous (unincremented) value.  */
>> +
>> +#define __arch_exchange_and_add_8_int(mem, value, model) \
>> +  __atomic_fetch_add (mem, value, model)
>> +
>> +#define __arch_exchange_and_add_16_int(mem, value, model) \
>> +  __atomic_fetch_add (mem, value, model)
>> +
>> +#define __arch_exchange_and_add_32_int(mem, value, model) \
>> +  __atomic_fetch_add (mem, value, model)
>> +
>> +#define __arch_exchange_and_add_64_int(mem, value, model) \
>> +  __atomic_fetch_add (mem, value, model)
>> +
>> +#define atomic_exchange_and_add_acq(mem, value) \
>> +  __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
>> +		       __ATOMIC_ACQUIRE)
>> +
>> +#define atomic_exchange_and_add_rel(mem, value) \
>> +  __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
>> +		       __ATOMIC_RELEASE)
>> +
>> +/* Miscellaneous.  */
>> +
>> +#define asm_amo(which, mem, value) \
>> +  ({ \
>> +    __atomic_check_size (mem); \
>> +    typeof (*mem) __tmp; \
>> +    if (sizeof (__tmp) == 4) \
>> +      asm volatile(which ".w" \
>> +			 "\t%0, %z2, %1" \
>> +		   : "=&r"(__tmp), "+ZB"(*(mem)) \
>> +		   : "rJ"(value)); \
>> +    else if (sizeof (__tmp) == 8) \
> It does not have the case of !__HAVE_64B_ATOMICS, but since only 64-bit
> is support I don think this will be troublesome.  Also, the idea is to
> remove such atomic macros on next release.
>
>> +      asm volatile(which ".d" \
>> +			 "\t%0, %z2, %1" \
>> +		   : "=&r"(__tmp), "+ZB"(*(mem)) \
>> +		   : "rJ"(value)); \
>> +    else \
>> +      abort (); \
>> +    __tmp; \
>> +  })
>> +
>> +#define atomic_max(mem, value) asm_amo ("ammax_db", mem, value)
>> +#define atomic_min(mem, value) asm_amo ("ammin_db", mem, value)
>> +
>> +#define atomic_bit_test_set(mem, bit) \
>> +  ({ \
>> +    typeof (*mem) __mask = (typeof (*mem)) 1 << (bit); \
>> +    asm_amo ("amor_db", mem, __mask) & __mask; \
>> +  })
>> +
>> +#define catomic_exchange_and_add(mem, value) \
>> +  atomic_exchange_and_add (mem, value)
>> +#define catomic_max(mem, value) atomic_max (mem, value)
>> +
>> +#endif /* bits/atomic.h */

  reply	other threads:[~2022-07-15  1:47 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-08  6:52 [PATCH v6 00/13] GLIBC LoongArch PATCHES caiyinyu
2022-07-08  6:52 ` [PATCH v6 01/13] LoongArch: Update NEWS and README for the LoongArch port caiyinyu
2022-07-12 11:06   ` Adhemerval Zanella Netto
2022-07-08  6:52 ` [PATCH v6 02/13] LoongArch: Add LoongArch entries to config.h.in caiyinyu
2022-07-12 18:51   ` Adhemerval Zanella Netto
2022-07-08  6:52 ` [PATCH v6 03/13] LoongArch: Add relocations and ELF flags to elf.h and scripts/glibcelf.py caiyinyu
2022-07-12 19:27   ` Adhemerval Zanella Netto
2022-07-08  6:52 ` [PATCH v6 04/13] LoongArch: ABI Implementation caiyinyu
2022-07-12 20:39   ` Adhemerval Zanella Netto
2022-07-15  1:46     ` caiyinyu
2022-07-08  6:52 ` [PATCH v6 05/13] LoongArch: Thread-Local Storage Support caiyinyu
2022-07-13 13:10   ` Adhemerval Zanella Netto
2022-07-08  6:52 ` [PATCH v6 06/13] LoongArch: Generic <math.h> and soft-fp Routines caiyinyu
2022-07-13 13:25   ` Adhemerval Zanella Netto
2022-07-08  6:52 ` [PATCH v6 07/13] LoongArch: Atomic and Locking Routines caiyinyu
2022-07-13 13:53   ` Adhemerval Zanella Netto
2022-07-15  1:46     ` caiyinyu [this message]
2022-07-08  6:52 ` [PATCH v6 08/13] LoongArch: Linux Syscall Interface caiyinyu
2022-07-13 16:19   ` Adhemerval Zanella Netto
2023-09-13 11:05     ` caiyinyu
2022-07-08  6:52 ` [PATCH v6 09/13] LoongArch: Linux ABI caiyinyu
2022-07-13 16:35   ` Adhemerval Zanella Netto
2022-07-15  1:48     ` caiyinyu
2022-07-08  6:52 ` [PATCH v6 10/13] LoongArch: Add ABI Lists caiyinyu
2022-07-13 17:12   ` Adhemerval Zanella Netto
2022-07-08  6:52 ` [PATCH v6 11/13] LoongArch: Build Infastructure caiyinyu
2022-07-13  3:07   ` Xi Ruoyao
2022-07-13  3:43     ` WANG Xuerui
2022-07-13  7:51       ` caiyinyu
2022-07-13 17:48   ` Adhemerval Zanella Netto
2022-07-15  1:49     ` caiyinyu
2022-07-08  6:52 ` [PATCH v6 12/13] LoongArch: Hard Float Support caiyinyu
2022-07-13 19:22   ` Adhemerval Zanella Netto
2022-07-14 13:00     ` caiyinyu
2022-07-08  7:13 ` [PATCH v6 00/13] GLIBC LoongArch PATCHES Xi Ruoyao
2022-07-08  7:15   ` Xi Ruoyao
2022-07-13 19:55   ` Adhemerval Zanella Netto
2022-07-14 11:33     ` Xi Ruoyao
2022-07-14 12:11       ` Adhemerval Zanella Netto
2022-07-18 13:54 ` Carlos O'Donell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=89885b7a-521d-2688-cd01-70b59dab9546@loongson.cn \
    --to=caiyinyu@loongson.cn \
    --cc=adhemerval.zanella@linaro.org \
    --cc=joseph_myers@mentor.com \
    --cc=libc-alpha@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).