From: DJ Delorie <dj@redhat.com>
To: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Cc: libc-alpha@sourceware.org, Wilco.Dijkstra@arm.com
Subject: Re: [PATCH v3] malloc: Use C11 atomics on memusage
Date: Wed, 29 Mar 2023 15:16:38 -0400 [thread overview]
Message-ID: <xno7obxt9l.fsf@greed.delorie.com> (raw)
In-Reply-To: <20230323161737.2592579-1-adhemerval.zanella@linaro.org>
LGTM
Reviewed-by: DJ Delorie <dj@redhat.com>
Adhemerval Zanella <adhemerval.zanella@linaro.org> writes:
> -#include <atomic.h>
> -#include <errno.h>
> -#include <inttypes.h>
> -#include <signal.h>
> -#include <stdarg.h>
> -#include <stdint.h>
> -#include <string.h>
> -#include <unistd.h>
> +#include <stdarg.h>
> +#include <stdatomic.h>
> +#include <unistd.h>
Ok. I admit I'm also the kind of programmer that block-pastes a
#include list from existing source ;-)
> @@ -73,20 +68,20 @@ struct header
> #define MAGIC 0xfeedbeaf
>
>
> -static unsigned long int calls[idx_last];
> -static unsigned long int failed[idx_last];
> -static size_t total[idx_last];
> -static size_t grand_total;
> -static unsigned long int histogram[65536 / 16];
> -static unsigned long int large;
> -static unsigned long int calls_total;
> -static unsigned long int inplace;
> -static unsigned long int decreasing;
> -static unsigned long int realloc_free;
> -static unsigned long int inplace_mremap;
> -static unsigned long int decreasing_mremap;
> -static size_t current_heap;
> -static size_t peak_use[3];
> +static _Atomic unsigned long int calls[idx_last];
> +static _Atomic unsigned long int failed[idx_last];
> +static _Atomic size_t total[idx_last];
> +static _Atomic size_t grand_total;
> +static _Atomic unsigned long int histogram[65536 / 16];
> +static _Atomic unsigned long int large;
> +static _Atomic unsigned long int calls_total;
> +static _Atomic unsigned long int inplace;
> +static _Atomic unsigned long int decreasing;
> +static _Atomic unsigned long int realloc_free;
> +static _Atomic unsigned long int inplace_mremap;
> +static _Atomic unsigned long int decreasing_mremap;
> +static _Atomic size_t current_heap;
> +static _Atomic size_t peak_use[3];
> static __thread uintptr_t start_sp;
Ok.
> -static uint32_t buffer_cnt;
> +static _Atomic uint32_t buffer_cnt;
Ok.
> +static inline void
> +peak_atomic_max (_Atomic size_t *peak, size_t val)
> +{
> + size_t v;
> + do
> + {
> + v = atomic_load_explicit (peak, memory_order_relaxed);
> + if (v >= val)
> + break;
> + }
> + while (! atomic_compare_exchange_weak (peak, &v, val));
> +}
Ok.
> /* Compute current heap usage and compare it with the maximum value. */
> size_t heap
> - = catomic_exchange_and_add (¤t_heap, len - old_len) + len - old_len;
> - catomic_max (&peak_heap, heap);
> + = atomic_fetch_add_explicit (¤t_heap, len - old_len,
> + memory_order_relaxed) + len - old_len;
> + peak_atomic_max (&peak_heap, heap);
>
Ok.
> - catomic_max (&peak_stack, current_stack);
> + peak_atomic_max (&peak_stack, current_stack);
Ok.
> /* Add up heap and stack usage and compare it with the maximum value. */
> - catomic_max (&peak_total, heap + current_stack);
> + peak_atomic_max (&peak_total, heap + current_stack);
Ok.
> /* Store the value only if we are writing to a file. */
> if (fd != -1)
> {
> - uint32_t idx = catomic_exchange_and_add (&buffer_cnt, 1);
> + uint32_t idx = atomic_fetch_add_explicit (&buffer_cnt, 1,
> + memory_order_relaxed);
Ok.
> - catomic_compare_and_exchange_val_acq (&buffer_cnt, reset, idx + 1);
> + uint32_t expected = idx + 1;
> + atomic_compare_exchange_weak (&buffer_cnt, &expected, reset);
Ok.
> /* Keep track of number of calls. */
> - catomic_increment (&calls[idx_malloc]);
> + atomic_fetch_add_explicit (&calls[idx_malloc], 1, memory_order_relaxed);
Ok.
> /* Keep track of total memory consumption for `malloc'. */
> - catomic_add (&total[idx_malloc], len);
> + atomic_fetch_add_explicit (&total[idx_malloc], len, memory_order_relaxed);
Ok.
> /* Keep track of total memory requirement. */
> - catomic_add (&grand_total, len);
> + atomic_fetch_add_explicit (&grand_total, len, memory_order_relaxed);
Ok.
> /* Remember the size of the request. */
> if (len < 65536)
> - catomic_increment (&histogram[len / 16]);
> + atomic_fetch_add_explicit (&histogram[len / 16], 1, memory_order_relaxed);
> else
> - catomic_increment (&large);
> + atomic_fetch_add_explicit (&large, 1, memory_order_relaxed);
Ok.
> /* Total number of calls of any of the functions. */
> - catomic_increment (&calls_total);
> + atomic_fetch_add_explicit (&calls_total, 1, memory_order_relaxed);
Ok.
> /* Do the real work. */
> result = (struct header *) (*mallocp)(len + sizeof (struct header));
> if (result == NULL)
> {
> - catomic_increment (&failed[idx_malloc]);
> + atomic_fetch_add_explicit (&failed[idx_malloc], 1,
> + memory_order_relaxed);
> return NULL;
> }
Ok.
> /* Keep track of number of calls. */
> - catomic_increment (&calls[idx_realloc]);
> + atomic_fetch_add_explicit (&calls[idx_realloc], 1, memory_order_relaxed);
Ok.
> /* Keep track of total memory consumption for `realloc'. */
> - catomic_add (&total[idx_realloc], len - old_len);
> + atomic_fetch_add_explicit (&total[idx_realloc], len - old_len,
> + memory_order_relaxed);
Ok.
> /* Keep track of total memory requirement. */
> - catomic_add (&grand_total, len - old_len);
> + atomic_fetch_add_explicit (&grand_total, len - old_len,
> + memory_order_relaxed);
Ok.
> /* Special case. */
> - catomic_increment (&realloc_free);
> + atomic_fetch_add_explicit (&realloc_free, 1, memory_order_relaxed);
> /* Keep track of total memory freed using `free'. */
> - catomic_add (&total[idx_free], real->length);
> + atomic_fetch_add_explicit (&total[idx_free], real->length,
> + memory_order_relaxed);
Ok.
> /* Remember the size of the request. */
> if (len < 65536)
> - catomic_increment (&histogram[len / 16]);
> + atomic_fetch_add_explicit (&histogram[len / 16], 1, memory_order_relaxed);
> else
> - catomic_increment (&large);
> + atomic_fetch_add_explicit (&large, 1, memory_order_relaxed);
Ok.
> /* Total number of calls of any of the functions. */
> - catomic_increment (&calls_total);
> + atomic_fetch_add_explicit (&calls_total, 1, memory_order_relaxed);
Ok.
> - catomic_increment (&failed[idx_realloc]);
> + atomic_fetch_add_explicit (&failed[idx_realloc], 1,
> + memory_order_relaxed);
Ok.
> if (real == result)
> - catomic_increment (&inplace);
> + atomic_fetch_add_explicit (&inplace, 1, memory_order_relaxed);
Ok.
> /* Was the buffer increased? */
> if (old_len > len)
> - catomic_increment (&decreasing);
> + atomic_fetch_add_explicit (&decreasing, 1, memory_order_relaxed);
Ok.
> /* Keep track of number of calls. */
> - catomic_increment (&calls[idx_calloc]);
> + atomic_fetch_add_explicit (&calls[idx_calloc], 1, memory_order_relaxed);
Ok.
> /* Keep track of total memory consumption for `calloc'. */
> - catomic_add (&total[idx_calloc], size);
> + atomic_fetch_add_explicit (&total[idx_calloc], size, memory_order_relaxed);
Ok.
> /* Keep track of total memory requirement. */
> - catomic_add (&grand_total, size);
> + atomic_fetch_add_explicit (&grand_total, size, memory_order_relaxed);
Ok.
> /* Remember the size of the request. */
> if (size < 65536)
> - catomic_increment (&histogram[size / 16]);
> + atomic_fetch_add_explicit (&histogram[size / 16], 1,
> + memory_order_relaxed);
Ok.
> else
> - catomic_increment (&large);
> + atomic_fetch_add_explicit (&large, 1, memory_order_relaxed);
Ok.
> - catomic_increment (&failed[idx_calloc]);
> + atomic_fetch_add_explicit (&failed[idx_calloc], 1,
> + memory_order_relaxed);
Ok.
> - catomic_increment (&calls[idx_free]);
> + atomic_fetch_add_explicit (&calls[idx_free], 1, memory_order_relaxed);
Ok.
> /* Keep track of number of calls. */
> - catomic_increment (&calls[idx_free]);
> + atomic_fetch_add_explicit (&calls[idx_free], 1, memory_order_relaxed);
Ok.
> /* Keep track of total memory freed using `free'. */
> - catomic_add (&total[idx_free], real->length);
> + atomic_fetch_add_explicit (&total[idx_free], real->length,
> + memory_order_relaxed);
Ok.
> /* Keep track of number of calls. */
> - catomic_increment (&calls[idx]);
> + atomic_fetch_add_explicit (&calls[idx], 1, memory_order_relaxed);
Ok.
> /* Keep track of total memory consumption for `malloc'. */
> - catomic_add (&total[idx], len);
> + atomic_fetch_add_explicit (&total[idx], len, memory_order_relaxed);
Ok.
> /* Keep track of total memory requirement. */
> - catomic_add (&grand_total, len);
> + atomic_fetch_add_explicit (&grand_total, len, memory_order_relaxed);
Ok.
> /* Remember the size of the request. */
> if (len < 65536)
> - catomic_increment (&histogram[len / 16]);
> + atomic_fetch_add_explicit (&histogram[len / 16], 1,
> + memory_order_relaxed);
Ok.
> else
> - catomic_increment (&large);
> + atomic_fetch_add_explicit (&large, 1, memory_order_relaxed);
Ok.
> /* Total number of calls of any of the functions. */
> - catomic_increment (&calls_total);
> + atomic_fetch_add_explicit (&calls_total, 1, memory_order_relaxed);
Ok.
> /* Check for failures. */
> if (result == NULL)
> - catomic_increment (&failed[idx]);
> + atomic_fetch_add_explicit (&failed[idx], 1, memory_order_relaxed);
Ok.
> /* Keep track of number of calls. */
> - catomic_increment (&calls[idx]);
> + atomic_fetch_add_explicit (&calls[idx], 1, memory_order_relaxed);
Ok.
> /* Keep track of total memory consumption for `malloc'. */
> - catomic_add (&total[idx], len);
> + atomic_fetch_add_explicit (&total[idx], len, memory_order_relaxed);
Ok.
> /* Keep track of total memory requirement. */
> - catomic_add (&grand_total, len);
> + atomic_fetch_add_explicit (&grand_total, len, memory_order_relaxed);
Ok.
> /* Remember the size of the request. */
> if (len < 65536)
> - catomic_increment (&histogram[len / 16]);
> + atomic_fetch_add_explicit (&histogram[len / 16], 1,
> + memory_order_relaxed);
Ok.
> else
> - catomic_increment (&large);
> + atomic_fetch_add_explicit (&large, 1, memory_order_relaxed);
Ok.
> /* Total number of calls of any of the functions. */
> - catomic_increment (&calls_total);
> + atomic_fetch_add_explicit (&calls_total, 1, memory_order_relaxed);
Ok.
> /* Check for failures. */
> if (result == NULL)
> - catomic_increment (&failed[idx]);
> + atomic_fetch_add_explicit (&failed[idx], 1, memory_order_relaxed);
Ok.
> /* Keep track of number of calls. */
> - catomic_increment (&calls[idx_mremap]);
> + atomic_fetch_add_explicit (&calls[idx_mremap], 1, memory_order_relaxed);
Ok.
> /* Keep track of total memory consumption for `malloc'. */
> - catomic_add (&total[idx_mremap], len - old_len);
> + atomic_fetch_add_explicit (&total[idx_mremap], len - old_len,
> + memory_order_relaxed);
Ok.
> /* Keep track of total memory requirement. */
> - catomic_add (&grand_total, len - old_len);
> + atomic_fetch_add_explicit (&grand_total, len - old_len,
> + memory_order_relaxed);
Ok.
> if (len < 65536)
> - catomic_increment (&histogram[len / 16]);
> + atomic_fetch_add_explicit (&histogram[len / 16], 1,
> + memory_order_relaxed);
Ok.
> else
> - catomic_increment (&large);
> + atomic_fetch_add_explicit (&large, 1, memory_order_relaxed);
Ok.
> /* Total number of calls of any of the functions. */
> - catomic_increment (&calls_total);
> + atomic_fetch_add_explicit (&calls_total, 1, memory_order_relaxed);
Ok.
> /* Check for failures. */
> if (result == NULL)
> - catomic_increment (&failed[idx_mremap]);
> + atomic_fetch_add_explicit (&failed[idx_mremap], 1,
> + memory_order_relaxed);
Ok.
> /* Record whether the reduction/increase happened in place. */
> if (start == result)
> - catomic_increment (&inplace_mremap);
> + atomic_fetch_add_explicit (&inplace_mremap, 1,
> + memory_order_relaxed);
Ok.
> /* Was the buffer increased? */
> if (old_len > len)
> - catomic_increment (&decreasing_mremap);
> + atomic_fetch_add_explicit (&decreasing_mremap, 1,
> + memory_order_relaxed);
Ok.
> /* Keep track of number of calls. */
> - catomic_increment (&calls[idx_munmap]);
> + atomic_fetch_add_explicit (&calls[idx_munmap], 1, memory_order_relaxed);
Ok.
> /* Keep track of total memory freed using `free'. */
> - catomic_add (&total[idx_munmap], len);
> + atomic_fetch_add_explicit (&total[idx_munmap], len,
> + memory_order_relaxed);
Ok.
> - catomic_increment (&failed[idx_munmap]);
> + atomic_fetch_add_explicit (&failed[idx_munmap], 1,
> + memory_order_relaxed);
Ok.
next prev parent reply other threads:[~2023-03-29 19:16 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-23 16:17 Adhemerval Zanella
2023-03-29 19:16 ` DJ Delorie [this message]
2023-04-03 13:10 ` Wilco Dijkstra
2023-04-03 13:35 ` Adhemerval Zanella Netto
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=xno7obxt9l.fsf@greed.delorie.com \
--to=dj@redhat.com \
--cc=Wilco.Dijkstra@arm.com \
--cc=adhemerval.zanella@linaro.org \
--cc=libc-alpha@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).