From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from loongson.cn (mail.loongson.cn [114.242.206.163]) by sourceware.org (Postfix) with ESMTP id 14906385702F for ; Fri, 15 Jul 2022 01:47:00 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org 14906385702F Authentication-Results: sourceware.org; dmarc=none (p=none dis=none) header.from=loongson.cn Authentication-Results: sourceware.org; spf=pass smtp.mailfrom=loongson.cn Received: from [10.20.4.187] (unknown [10.20.4.187]) by mail.loongson.cn (Coremail) with SMTP id AQAAf9Dx79IQx9Bit6wfAA--.24869S3; Fri, 15 Jul 2022 09:46:56 +0800 (CST) From: caiyinyu Subject: Re: [PATCH v6 07/13] LoongArch: Atomic and Locking Routines To: Adhemerval Zanella Netto , libc-alpha@sourceware.org Cc: joseph_myers@mentor.com References: <20220708065255.2316410-1-caiyinyu@loongson.cn> <20220708065255.2316410-8-caiyinyu@loongson.cn> <9cc5ba1e-97ee-3ce6-7634-8fcfaec34821@linaro.org> Message-ID: <89885b7a-521d-2688-cd01-70b59dab9546@loongson.cn> Date: Fri, 15 Jul 2022 09:46:56 +0800 User-Agent: Mozilla/5.0 (X11; Linux mips64; rv:68.0) Gecko/20100101 Thunderbird/68.7.0 MIME-Version: 1.0 In-Reply-To: <9cc5ba1e-97ee-3ce6-7634-8fcfaec34821@linaro.org> Content-Language: en-US X-CM-TRANSID: AQAAf9Dx79IQx9Bit6wfAA--.24869S3 X-Coremail-Antispam: 1UD129KBjvJXoW3WF1xur47WrW3tFW5Jw17Awb_yoWftryDpr 48AFn8Jr4Yq3yfGayvya15Gr98Gr40ya1DXayqka47Xr9rGrn2q3W0qFZI9FyUArZ3Cw1r JF1Fqa4Dua43JrUanT9S1TB71UUUUUUqnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUv0b7Iv0xC_Kw4lb4IE77IF4wAFF20E14v26r1j6r4UM7CY07I2 0VC2zVCF04k26cxKx2IYs7xG6rWj6s0DM7CIcVAFz4kK6r1j6r18M28lY4IEw2IIxxk0rw A2F7IY1VAKz4vEj48ve4kI8wA2z4x0Y4vE2Ix0cI8IcVAFwI0_Ar0_tr1l84ACjcxK6xII jxv20xvEc7CjxVAFwI0_Gr1j6F4UJwA2z4x0Y4vEx4A2jsIE14v26r4UJVWxJr1l84ACjc xK6I8E87Iv6xkF7I0E14v26r4UJVWxJr1le2I262IYc4CY6c8Ij28IcVAaY2xG8wAv7VC0 I7IYx2IY67AKxVWUGVWUXwAv7VC2z280aVAFwI0_Jr0_Gr1lOx8S6xCaFVCjc4AY6r1j6r 4UM4x0Y48IcVAKI48JMx8GjcxK6IxK0xIIj40E5I8CrwCYjI0SjxkI62AI1cAE67vIY487 MxkIecxEwVCm-wCF04k20xvY0x0EwIxGrwCFx2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c 02F40E14v26r106r1rMI8I3I0E7480Y4vE14v26r106r1rMI8E67AF67kF1VAFwI0_Jrv_ JF1lIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY67AKxVWUJVWUCwCI42IY6xIIjxv20xvEc7 CjxVAFwI0_Jr0_Gr1lIxAIcVCF04k26cxKx2IYs7xG6r4j6FyUMIIF0xvEx4A2jsIE14v2 6r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_Jr0_GrUvcSsGvfC2KfnxnUUI43ZEXa7IU5 4nQUUUUUU== X-CM-SenderInfo: 5fdl5xhq1xqz5rrqw2lrqou0/ X-Spam-Status: No, score=-11.9 required=5.0 tests=BAYES_00, BODY_8BITS, GIT_PATCH_0, HTML_MESSAGE, KAM_DMARC_STATUS, KAM_SHORT, NICE_REPLY_A, SPF_HELO_PASS, SPF_PASS, TXREP, T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org Content-Type: text/plain; charset=utf-8; format=flowed Content-Transfer-Encoding: 8bit X-Content-Filtered-By: Mailman/MimeDel 2.1.29 X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 15 Jul 2022 01:47:04 -0000 + +/* Miscellaneous. */ + +#define asm_amo(which, mem, value) \ + ({ \ + __atomic_check_size (mem); \ + typeof (*mem) __tmp; \ + if (sizeof (__tmp) == 4) \ + asm volatile(which ".w" \ + "\t%0, %z2, %1" \ + : "=&r"(__tmp), "+ZB"(*(mem)) \ + : "rJ"(value)); \ + else if (sizeof (__tmp) == 8) \ It does not have the case of !__HAVE_64B_ATOMICS, but since only 64-bit is support I don think this will be troublesome. Also, the idea is to remove such atomic macros on next release. *Removed.** * *>>>>>>>>>>>>>>>>>>>>>>>* ** *diff --git a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h** **index b51463915e..d1b8f1c11b 100644** **--- a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h** **+++ b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h** **@@ -144,38 +144,4 @@** **   __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \** **                       __ATOMIC_RELEASE)** **** **-/* Miscellaneous.  */** **-** **-#define asm_amo(which, mem, value) \** **-  ({ \** **-    __atomic_check_size (mem); \** **-    typeof (*mem) __tmp; \** **-    if (sizeof (__tmp) == 4) \** **-      asm volatile (which ".w" \** **-                        "\t%0, %z2, %1" \** **-                  : "=&r"(__tmp), "+ZB"(*(mem)) \** **-                  : "rJ"(value)); \** **-    else if (sizeof (__tmp) == 8) \** **-      asm volatile (which ".d" \** **-                        "\t%0, %z2, %1" \** **-                  : "=&r"(__tmp), "+ZB"(*(mem)) \** **-                  : "rJ"(value)); \** **-    else \** **-      abort (); \** **-    __tmp; \** **-  })** **-** **-#define atomic_max(mem, value) asm_amo ("ammax_db", mem, value)** **-#define atomic_min(mem, value) asm_amo ("ammin_db", mem, value)** **-** **-#define atomic_bit_test_set(mem, bit) \** **-  ({ \** **-    typeof (*mem) __mask = (typeof (*mem)) 1 << (bit); \** **-    asm_amo ("amor_db", mem, __mask) & __mask; \** **-  })** **-** **-#define catomic_exchange_and_add(mem, value) \** **-  atomic_exchange_and_add (mem, value)** **-#define catomic_max(mem, value) atomic_max (mem, value)** **-** ** #endif /* bits/atomic.h */** * ** *<<<<<<<<<<<<<<<<<<<<<<<* 在 2022/7/13 下午9:53, Adhemerval Zanella Netto 写道: > Although it would be to a more streamlined macros, it follows current practice. > Only a small nit below. > > Reviewed-by: Adhemerval Zanella > > On 08/07/22 03:52, caiyinyu wrote: >> --- >> .../sysv/linux/loongarch/atomic-machine.h | 181 ++++++++++++++++++ >> 1 file changed, 181 insertions(+) >> create mode 100644 sysdeps/unix/sysv/linux/loongarch/atomic-machine.h >> >> diff --git a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h >> new file mode 100644 >> index 0000000000..60db25587e >> --- /dev/null >> +++ b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h >> @@ -0,0 +1,181 @@ >> +/* Atomic operations. >> + Copyright (C) 2022 Free Software Foundation, Inc. >> + This file is part of the GNU C Library. >> + >> + The GNU C Library is free software; you can redistribute it and/or >> + modify it under the terms of the GNU Lesser General Public >> + License as published by the Free Software Foundation; either >> + version 2.1 of the License, or (at your option) any later version. >> + >> + The GNU C Library is distributed in the hope that it will be useful, >> + but WITHOUT ANY WARRANTY; without even the implied warranty of >> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> + Lesser General Public License for more details. >> + >> + You should have received a copy of the GNU Lesser General Public >> + License along with the GNU C Library. If not, see >> +. */ >> + >> +#ifndef _LINUX_LOONGARCH_BITS_ATOMIC_H >> +#define _LINUX_LOONGARCH_BITS_ATOMIC_H 1 >> + >> +#define atomic_full_barrier() __sync_synchronize () >> + >> +#define __HAVE_64B_ATOMICS (__loongarch_grlen >=64) > Missing space after >= > >> +#define USE_ATOMIC_COMPILER_BUILTINS 1 >> +#define ATOMIC_EXCHANGE_USES_CAS 0 >> + >> +/* Compare and exchange. >> + For all "bool" routines, we return FALSE if exchange succesful. */ >> + >> +#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ >> + ({ \ >> + typeof (*mem) __oldval = (oldval); \ >> + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ >> + __ATOMIC_RELAXED); \ >> + }) >> + >> +#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ >> + ({ \ >> + typeof (*mem) __oldval = (oldval); \ >> + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ >> + __ATOMIC_RELAXED); \ >> + }) >> + >> +#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ >> + ({ \ >> + typeof (*mem) __oldval = (oldval); \ >> + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ >> + __ATOMIC_RELAXED); \ >> + }) >> + >> +#define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ >> + ({ \ >> + typeof (*mem) __oldval = (oldval); \ >> + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ >> + __ATOMIC_RELAXED); \ >> + }) >> + >> +#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ >> + ({ \ >> + typeof (*mem) __oldval = (oldval); \ >> + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ >> + __ATOMIC_RELAXED); \ >> + __oldval; \ >> + }) >> + >> +#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ >> + ({ \ >> + typeof (*mem) __oldval = (oldval); \ >> + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ >> + __ATOMIC_RELAXED); \ >> + __oldval; \ >> + }) >> + >> +#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ >> + ({ \ >> + typeof (*mem) __oldval = (oldval); \ >> + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ >> + __ATOMIC_RELAXED); \ >> + __oldval; \ >> + }) >> + >> +#define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ >> + ({ \ >> + typeof (*mem) __oldval = (oldval); \ >> + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ >> + __ATOMIC_RELAXED); \ >> + __oldval; \ >> + }) >> + >> +/* Atomic compare and exchange. */ >> + >> +#define atomic_compare_and_exchange_bool_acq(mem, new, old) \ >> + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, mem, new, old, \ >> + __ATOMIC_ACQUIRE) >> + >> +#define atomic_compare_and_exchange_val_acq(mem, new, old) \ >> + __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \ >> + __ATOMIC_ACQUIRE) >> + >> +#define atomic_compare_and_exchange_val_rel(mem, new, old) \ >> + __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \ >> + __ATOMIC_RELEASE) >> + >> +/* Atomic exchange (without compare). */ >> + >> +#define __arch_exchange_8_int(mem, newval, model) \ >> + __atomic_exchange_n (mem, newval, model) >> + >> +#define __arch_exchange_16_int(mem, newval, model) \ >> + __atomic_exchange_n (mem, newval, model) >> + >> +#define __arch_exchange_32_int(mem, newval, model) \ >> + __atomic_exchange_n (mem, newval, model) >> + >> +#define __arch_exchange_64_int(mem, newval, model) \ >> + __atomic_exchange_n (mem, newval, model) >> + >> +#define atomic_exchange_acq(mem, value) \ >> + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) >> + >> +#define atomic_exchange_rel(mem, value) \ >> + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) >> + >> +/* Atomically add value and return the previous (unincremented) value. */ >> + >> +#define __arch_exchange_and_add_8_int(mem, value, model) \ >> + __atomic_fetch_add (mem, value, model) >> + >> +#define __arch_exchange_and_add_16_int(mem, value, model) \ >> + __atomic_fetch_add (mem, value, model) >> + >> +#define __arch_exchange_and_add_32_int(mem, value, model) \ >> + __atomic_fetch_add (mem, value, model) >> + >> +#define __arch_exchange_and_add_64_int(mem, value, model) \ >> + __atomic_fetch_add (mem, value, model) >> + >> +#define atomic_exchange_and_add_acq(mem, value) \ >> + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ >> + __ATOMIC_ACQUIRE) >> + >> +#define atomic_exchange_and_add_rel(mem, value) \ >> + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ >> + __ATOMIC_RELEASE) >> + >> +/* Miscellaneous. */ >> + >> +#define asm_amo(which, mem, value) \ >> + ({ \ >> + __atomic_check_size (mem); \ >> + typeof (*mem) __tmp; \ >> + if (sizeof (__tmp) == 4) \ >> + asm volatile(which ".w" \ >> + "\t%0, %z2, %1" \ >> + : "=&r"(__tmp), "+ZB"(*(mem)) \ >> + : "rJ"(value)); \ >> + else if (sizeof (__tmp) == 8) \ > It does not have the case of !__HAVE_64B_ATOMICS, but since only 64-bit > is support I don think this will be troublesome. Also, the idea is to > remove such atomic macros on next release. > >> + asm volatile(which ".d" \ >> + "\t%0, %z2, %1" \ >> + : "=&r"(__tmp), "+ZB"(*(mem)) \ >> + : "rJ"(value)); \ >> + else \ >> + abort (); \ >> + __tmp; \ >> + }) >> + >> +#define atomic_max(mem, value) asm_amo ("ammax_db", mem, value) >> +#define atomic_min(mem, value) asm_amo ("ammin_db", mem, value) >> + >> +#define atomic_bit_test_set(mem, bit) \ >> + ({ \ >> + typeof (*mem) __mask = (typeof (*mem)) 1 << (bit); \ >> + asm_amo ("amor_db", mem, __mask) & __mask; \ >> + }) >> + >> +#define catomic_exchange_and_add(mem, value) \ >> + atomic_exchange_and_add (mem, value) >> +#define catomic_max(mem, value) atomic_max (mem, value) >> + >> +#endif /* bits/atomic.h */