From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-oi1-x230.google.com (mail-oi1-x230.google.com [IPv6:2607:f8b0:4864:20::230]) by sourceware.org (Postfix) with ESMTPS id 3963B383A365 for ; Wed, 13 Jul 2022 13:53:08 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.4.1 sourceware.org 3963B383A365 Received: by mail-oi1-x230.google.com with SMTP id p132so8346704oif.9 for ; Wed, 13 Jul 2022 06:53:08 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:message-id:date:mime-version:user-agent:subject :content-language:to:references:from:organization:cc:in-reply-to :content-transfer-encoding; bh=HoRZmFVA29/zatUhhzYffvNSiYjsepbWwBKrBQdV2KA=; b=SwxEUvHTnH98+b/o61GtL1BmgirDcXF37XZHedtboiJN9YlOdEcL+0QOLtR5hIbP5p shTlSZ4aRj9RsFbHOrS6tCnDNffDCrm48Qrv9nKLXXkOiKeY9JSrA0OutkZJa0B1NSYQ oz+we+y68T+pe+Pv9fkDWAG/lMnQENaWBnnXMbpYhcbhT0xO1aVURQaubl4PKPz/RxJl n4DSVAqveo8g2/x6qi4WHEvb97ZlLvIyMiNnU1JGnAdGaV5JZEMwrJKkUBUKzwRrYHYJ raXg1B4t/5MvCfmoshyfViZIHYsyekHJsxfBqWDBFo5wMlG7PbSrP2Fd2v50sxlTtlBa I0vw== X-Gm-Message-State: AJIora+z7BrgA0hCPMFV26kWdZWCSkUXlg/xguaC4CoLZ+vS5K1jLGfI LhQRvYth+lWrQz5aeWATRxbH6OyhXfUA1w== X-Google-Smtp-Source: AGRyM1tkf4z5Yuko4nnxxYGMdBi3Jo5Z2VBYFfiTORETkbgXGxR6Y+LACtN3ZD/Pjqc8MKNn1XA96Q== X-Received: by 2002:a05:6808:1447:b0:339:c893:674c with SMTP id x7-20020a056808144700b00339c893674cmr1939551oiv.171.1657720387393; Wed, 13 Jul 2022 06:53:07 -0700 (PDT) Received: from ?IPV6:2804:431:c7ca:19c3:4427:c171:4fa9:c3d9? ([2804:431:c7ca:19c3:4427:c171:4fa9:c3d9]) by smtp.gmail.com with ESMTPSA id n9-20020acabd09000000b00333f889c9c2sm5279734oif.33.2022.07.13.06.53.05 (version=TLS1_3 cipher=TLS_AES_128_GCM_SHA256 bits=128/128); Wed, 13 Jul 2022 06:53:07 -0700 (PDT) Message-ID: <9cc5ba1e-97ee-3ce6-7634-8fcfaec34821@linaro.org> Date: Wed, 13 Jul 2022 10:53:03 -0300 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:102.0) Gecko/20100101 Thunderbird/102.0.2 Subject: Re: [PATCH v6 07/13] LoongArch: Atomic and Locking Routines Content-Language: en-US To: libc-alpha@sourceware.org, caiyinyu References: <20220708065255.2316410-1-caiyinyu@loongson.cn> <20220708065255.2316410-8-caiyinyu@loongson.cn> From: Adhemerval Zanella Netto Organization: Linaro Cc: joseph_myers@mentor.com In-Reply-To: <20220708065255.2316410-8-caiyinyu@loongson.cn> Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 7bit X-Spam-Status: No, score=-12.9 required=5.0 tests=BAYES_00, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, GIT_PATCH_0, KAM_SHORT, NICE_REPLY_A, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP, T_SCC_BODY_TEXT_LINE autolearn=ham autolearn_force=no version=3.4.6 X-Spam-Checker-Version: SpamAssassin 3.4.6 (2021-04-09) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 13 Jul 2022 13:53:12 -0000 Although it would be to a more streamlined macros, it follows current practice. Only a small nit below. Reviewed-by: Adhemerval Zanella On 08/07/22 03:52, caiyinyu wrote: > --- > .../sysv/linux/loongarch/atomic-machine.h | 181 ++++++++++++++++++ > 1 file changed, 181 insertions(+) > create mode 100644 sysdeps/unix/sysv/linux/loongarch/atomic-machine.h > > diff --git a/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h > new file mode 100644 > index 0000000000..60db25587e > --- /dev/null > +++ b/sysdeps/unix/sysv/linux/loongarch/atomic-machine.h > @@ -0,0 +1,181 @@ > +/* Atomic operations. > + Copyright (C) 2022 Free Software Foundation, Inc. > + This file is part of the GNU C Library. > + > + The GNU C Library is free software; you can redistribute it and/or > + modify it under the terms of the GNU Lesser General Public > + License as published by the Free Software Foundation; either > + version 2.1 of the License, or (at your option) any later version. > + > + The GNU C Library is distributed in the hope that it will be useful, > + but WITHOUT ANY WARRANTY; without even the implied warranty of > + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + Lesser General Public License for more details. > + > + You should have received a copy of the GNU Lesser General Public > + License along with the GNU C Library. If not, see > + . */ > + > +#ifndef _LINUX_LOONGARCH_BITS_ATOMIC_H > +#define _LINUX_LOONGARCH_BITS_ATOMIC_H 1 > + > +#define atomic_full_barrier() __sync_synchronize () > + > +#define __HAVE_64B_ATOMICS (__loongarch_grlen >=64) Missing space after >= > +#define USE_ATOMIC_COMPILER_BUILTINS 1 > +#define ATOMIC_EXCHANGE_USES_CAS 0 > + > +/* Compare and exchange. > + For all "bool" routines, we return FALSE if exchange succesful. */ > + > +#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ > + ({ \ > + typeof (*mem) __oldval = (oldval); \ > + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ > + __ATOMIC_RELAXED); \ > + }) > + > +#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ > + ({ \ > + typeof (*mem) __oldval = (oldval); \ > + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ > + __ATOMIC_RELAXED); \ > + }) > + > +#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ > + ({ \ > + typeof (*mem) __oldval = (oldval); \ > + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ > + __ATOMIC_RELAXED); \ > + }) > + > +#define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ > + ({ \ > + typeof (*mem) __oldval = (oldval); \ > + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ > + __ATOMIC_RELAXED); \ > + }) > + > +#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ > + ({ \ > + typeof (*mem) __oldval = (oldval); \ > + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ > + __ATOMIC_RELAXED); \ > + __oldval; \ > + }) > + > +#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ > + ({ \ > + typeof (*mem) __oldval = (oldval); \ > + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ > + __ATOMIC_RELAXED); \ > + __oldval; \ > + }) > + > +#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ > + ({ \ > + typeof (*mem) __oldval = (oldval); \ > + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ > + __ATOMIC_RELAXED); \ > + __oldval; \ > + }) > + > +#define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ > + ({ \ > + typeof (*mem) __oldval = (oldval); \ > + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, model, \ > + __ATOMIC_RELAXED); \ > + __oldval; \ > + }) > + > +/* Atomic compare and exchange. */ > + > +#define atomic_compare_and_exchange_bool_acq(mem, new, old) \ > + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, mem, new, old, \ > + __ATOMIC_ACQUIRE) > + > +#define atomic_compare_and_exchange_val_acq(mem, new, old) \ > + __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \ > + __ATOMIC_ACQUIRE) > + > +#define atomic_compare_and_exchange_val_rel(mem, new, old) \ > + __atomic_val_bysize (__arch_compare_and_exchange_val, int, mem, new, old, \ > + __ATOMIC_RELEASE) > + > +/* Atomic exchange (without compare). */ > + > +#define __arch_exchange_8_int(mem, newval, model) \ > + __atomic_exchange_n (mem, newval, model) > + > +#define __arch_exchange_16_int(mem, newval, model) \ > + __atomic_exchange_n (mem, newval, model) > + > +#define __arch_exchange_32_int(mem, newval, model) \ > + __atomic_exchange_n (mem, newval, model) > + > +#define __arch_exchange_64_int(mem, newval, model) \ > + __atomic_exchange_n (mem, newval, model) > + > +#define atomic_exchange_acq(mem, value) \ > + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) > + > +#define atomic_exchange_rel(mem, value) \ > + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) > + > +/* Atomically add value and return the previous (unincremented) value. */ > + > +#define __arch_exchange_and_add_8_int(mem, value, model) \ > + __atomic_fetch_add (mem, value, model) > + > +#define __arch_exchange_and_add_16_int(mem, value, model) \ > + __atomic_fetch_add (mem, value, model) > + > +#define __arch_exchange_and_add_32_int(mem, value, model) \ > + __atomic_fetch_add (mem, value, model) > + > +#define __arch_exchange_and_add_64_int(mem, value, model) \ > + __atomic_fetch_add (mem, value, model) > + > +#define atomic_exchange_and_add_acq(mem, value) \ > + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ > + __ATOMIC_ACQUIRE) > + > +#define atomic_exchange_and_add_rel(mem, value) \ > + __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \ > + __ATOMIC_RELEASE) > + > +/* Miscellaneous. */ > + > +#define asm_amo(which, mem, value) \ > + ({ \ > + __atomic_check_size (mem); \ > + typeof (*mem) __tmp; \ > + if (sizeof (__tmp) == 4) \ > + asm volatile(which ".w" \ > + "\t%0, %z2, %1" \ > + : "=&r"(__tmp), "+ZB"(*(mem)) \ > + : "rJ"(value)); \ > + else if (sizeof (__tmp) == 8) \ It does not have the case of !__HAVE_64B_ATOMICS, but since only 64-bit is support I don think this will be troublesome. Also, the idea is to remove such atomic macros on next release. > + asm volatile(which ".d" \ > + "\t%0, %z2, %1" \ > + : "=&r"(__tmp), "+ZB"(*(mem)) \ > + : "rJ"(value)); \ > + else \ > + abort (); \ > + __tmp; \ > + }) > + > +#define atomic_max(mem, value) asm_amo ("ammax_db", mem, value) > +#define atomic_min(mem, value) asm_amo ("ammin_db", mem, value) > + > +#define atomic_bit_test_set(mem, bit) \ > + ({ \ > + typeof (*mem) __mask = (typeof (*mem)) 1 << (bit); \ > + asm_amo ("amor_db", mem, __mask) & __mask; \ > + }) > + > +#define catomic_exchange_and_add(mem, value) \ > + atomic_exchange_and_add (mem, value) > +#define catomic_max(mem, value) atomic_max (mem, value) > + > +#endif /* bits/atomic.h */