public inbox for glibc-cvs@sourceware.org
help / color / mirror / Atom feed
* [glibc/nsz/mtag] malloc: use memsize instead of CHUNK_AVAILABLE_SIZE
@ 2021-03-11 17:39 Szabolcs Nagy
  0 siblings, 0 replies; only message in thread
From: Szabolcs Nagy @ 2021-03-11 17:39 UTC (permalink / raw)
  To: glibc-cvs

https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=0c487d5920710ab8132a41dab6794f246e4e7181

commit 0c487d5920710ab8132a41dab6794f246e4e7181
Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
Date:   Mon Mar 8 12:59:05 2021 +0000

    malloc: use memsize instead of CHUNK_AVAILABLE_SIZE
    
    code refactor, no behaviour change, moved the definition later

Diff:
---
 malloc/hooks.c  | 11 +++++------
 malloc/malloc.c | 39 +++++++++++++++++++--------------------
 2 files changed, 24 insertions(+), 26 deletions(-)

diff --git a/malloc/hooks.c b/malloc/hooks.c
index 9474e199c3..b82ff5781b 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -102,7 +102,7 @@ malloc_check_get_size (mchunkptr p)
 
   assert (using_malloc_checking == 1);
 
-  for (size = CHUNK_AVAILABLE_SIZE (p) - 1;
+  for (size = CHUNK_HDR_SZ + memsize (p) - 1;
        (c = *SAFE_CHAR_OFFSET (p, size)) != magic;
        size -= c)
     {
@@ -130,7 +130,7 @@ mem2mem_check (void *ptr, size_t req_sz)
 
   p = mem2chunk (ptr);
   magic = magicbyte (p);
-  max_sz = CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ;
+  max_sz = memsize (p);
 
   for (i = max_sz - 1; i > req_sz; i -= block_sz)
     {
@@ -175,7 +175,7 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
                                next_chunk (prev_chunk (p)) != p)))
         return NULL;
 
-      for (sz = CHUNK_AVAILABLE_SIZE (p) - 1;
+      for (sz = CHUNK_HDR_SZ + memsize (p) - 1;
 	   (c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
 	   sz -= c)
         {
@@ -200,7 +200,7 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
           ((prev_size (p) + sz) & page_mask) != 0)
         return NULL;
 
-      for (sz = CHUNK_AVAILABLE_SIZE (p) - 1;
+      for (sz = CHUNK_HDR_SZ + memsize (p) - 1;
 	   (c = *SAFE_CHAR_OFFSET (p, sz)) != magic;
 	   sz -= c)
         {
@@ -279,8 +279,7 @@ free_check (void *mem, const void *caller)
   else
     {
       /* Mark the chunk as belonging to the library again.  */
-      (void)tag_region (chunk2rawmem (p), CHUNK_AVAILABLE_SIZE (p)
-                                         - CHUNK_HDR_SZ);
+      (void)tag_region (chunk2rawmem (p), memsize (p));
       _int_free (&main_arena, p, 1);
       __libc_lock_unlock (main_arena.mutex);
     }
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 36583120ce..03eb0f40fa 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1331,18 +1331,6 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
    MINSIZE :                                                      \
    ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
 
-/* Available size of chunk.  This is the size of the real usable data
-   in the chunk, plus the chunk header.  Note: If memory tagging is
-   enabled the layout changes to accomodate the granule size, this is
-   wasteful for small allocations so not done by default.  The logic
-   does not work if chunk headers are not granule aligned.  */
-_Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
-		"memory tagging is not supported with large granule.");
-#define CHUNK_AVAILABLE_SIZE(p)                                       \
-  (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
-    chunksize (p) :                                                   \
-    chunksize (p) + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
-
 /* Check if REQ overflows when padded and aligned and if the resulting value
    is less than PTRDIFF_T.  Returns TRUE and the requested size or MINSIZE in
    case the value is less than MINSIZE on SZ or false if any of the previous
@@ -1465,14 +1453,26 @@ checked_request2size (size_t req, size_t *sz) __nonnull (1)
 #pragma GCC poison mchunk_size
 #pragma GCC poison mchunk_prev_size
 
+/* This is the size of the real usable data in the chunk.  Not valid for
+   dumped heap chunks.  */
+#define memsize(p)                                                    \
+  (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
+    chunksize (p) - CHUNK_HDR_SZ :                                    \
+    chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
+
+/* If memory tagging is enabled the layout changes to accomodate the granule
+   size, this is wasteful for small allocations so not done by default.
+   Both the chunk header and user data has to be granule aligned.  */
+_Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
+		"memory tagging is not supported with large granule.");
+
 static __always_inline void *
 tag_new_usable (void *ptr)
 {
   if (__glibc_unlikely (mtag_enabled) && ptr)
     {
       mchunkptr cp = mem2chunk(ptr);
-      ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr),
-				    CHUNK_AVAILABLE_SIZE (cp) - CHUNK_HDR_SZ);
+      ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
     }
   return ptr;
 }
@@ -3316,8 +3316,7 @@ __libc_free (void *mem)
       MAYBE_INIT_TCACHE ();
 
       /* Mark the chunk as belonging to the library again.  */
-      (void)tag_region (chunk2rawmem (p),
-			CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
+      (void)tag_region (chunk2rawmem (p), memsize (p));
 
       ar_ptr = arena_for_chunk (p);
       _int_free (ar_ptr, p, 0);
@@ -3459,7 +3458,7 @@ __libc_realloc (void *oldmem, size_t bytes)
       newp = __libc_malloc (bytes);
       if (newp != NULL)
         {
-	  size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ;
+	  size_t sz = memsize (oldp);
 	  memcpy (newp, oldmem, sz);
 	  (void) tag_region (chunk2rawmem (oldp), sz);
           _int_free (ar_ptr, oldp, 0);
@@ -3675,7 +3674,7 @@ __libc_calloc (size_t n, size_t elem_size)
      regardless of MORECORE_CLEARS, so we zero the whole block while
      doing so.  */
   if (__glibc_unlikely (mtag_enabled))
-    return tag_new_zero_region (mem, CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ);
+    return tag_new_zero_region (mem, memsize (p));
 
   INTERNAL_SIZE_T csz = chunksize (p);
 
@@ -4863,7 +4862,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
           else
             {
 	      void *oldmem = chunk2rawmem (oldp);
-	      size_t sz = CHUNK_AVAILABLE_SIZE (oldp) - CHUNK_HDR_SZ;
+	      size_t sz = memsize (oldp);
 	      (void) tag_region (oldmem, sz);
 	      newmem = tag_new_usable (newmem);
 	      memcpy (newmem, oldmem, sz);
@@ -5110,7 +5109,7 @@ musable (void *mem)
 	    result = chunksize (p) - CHUNK_HDR_SZ;
 	}
       else if (inuse (p))
-	result = CHUNK_AVAILABLE_SIZE (p) - CHUNK_HDR_SZ;
+	result = memsize (p);
 
       return result;
     }


^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-03-11 17:39 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-11 17:39 [glibc/nsz/mtag] malloc: use memsize instead of CHUNK_AVAILABLE_SIZE Szabolcs Nagy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).