public inbox for libc-alpha@sourceware.org
 help / color / mirror / Atom feed
From: Adhemerval Zanella <adhemerval.zanella@linaro.org>
To: libc-alpha@sourceware.org
Cc: Norbert Manthey <nmanthey@conp-solutions.com>,
	Guillaume Morin <guillaume@morinfr.org>,
	Siddhesh Poyarekar <siddhesh@sourceware.org>
Subject: [PATCH v2 3/4] malloc: Move mmap logic to its own function
Date: Wed, 18 Aug 2021 11:19:59 -0300	[thread overview]
Message-ID: <20210818142000.128752-4-adhemerval.zanella@linaro.org> (raw)
In-Reply-To: <20210818142000.128752-1-adhemerval.zanella@linaro.org>

So it can be used with different pagesize and flags.
---
 malloc/malloc.c | 155 +++++++++++++++++++++++++-----------------------
 1 file changed, 82 insertions(+), 73 deletions(-)

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 1a2c798a35..4bfcea286f 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2414,6 +2414,85 @@ do_check_malloc_state (mstate av)
    be extended or replaced.
  */
 
+static void *
+sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
+{
+  long int size;
+
+  /*
+    Round up size to nearest page.  For mmapped chunks, the overhead is one
+    SIZE_SZ unit larger than for normal chunks, because there is no
+    following chunk whose prev_size field could be used.
+
+    See the front_misalign handling below, for glibc there is no need for
+    further alignments unless we have have high alignment.
+   */
+  if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
+    size = ALIGN_UP (nb + SIZE_SZ, pagesize);
+  else
+    size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
+
+  /* Don't try if size wraps around 0.  */
+  if ((unsigned long) (size) <= (unsigned long) (nb))
+    return MAP_FAILED;
+
+  char *mm = (char *) MMAP (0, size,
+			    mtag_mmap_flags | PROT_READ | PROT_WRITE,
+			    extra_flags);
+  if (mm == MAP_FAILED)
+    return mm;
+
+  sysmadvise_thp (mm, size);
+
+  /*
+    The offset to the start of the mmapped region is stored in the prev_size
+    field of the chunk.  This allows us to adjust returned start address to
+    meet alignment requirements here and in memalign(), and still be able to
+    compute proper address argument for later munmap in free() and realloc().
+   */
+
+  INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
+
+  if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
+    {
+      /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
+	 MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1.  Each mmap'ed area is page
+	 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
+      assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
+      front_misalign = 0;
+    }
+  else
+    front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
+
+  mchunkptr p;                    /* the allocated/returned chunk */
+
+  if (front_misalign > 0)
+    {
+      ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
+      p = (mchunkptr) (mm + correction);
+      set_prev_size (p, correction);
+      set_head (p, (size - correction) | IS_MMAPPED);
+    }
+  else
+    {
+      p = (mchunkptr) mm;
+      set_prev_size (p, 0);
+      set_head (p, size | IS_MMAPPED);
+    }
+
+  /* update statistics */
+  int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
+  atomic_max (&mp_.max_n_mmaps, new);
+
+  unsigned long sum;
+  sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
+  atomic_max (&mp_.max_mmapped_mem, sum);
+
+  check_chunk (av, p);
+
+  return chunk2mem (p);
+}
+
 static void *
 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
 {
@@ -2451,81 +2530,11 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
       || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
 	  && (mp_.n_mmaps < mp_.n_mmaps_max)))
     {
-      char *mm;           /* return value from mmap call*/
-
     try_mmap:
-      /*
-         Round up size to nearest page.  For mmapped chunks, the overhead
-         is one SIZE_SZ unit larger than for normal chunks, because there
-         is no following chunk whose prev_size field could be used.
-
-         See the front_misalign handling below, for glibc there is no
-         need for further alignments unless we have have high alignment.
-       */
-      if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
-        size = ALIGN_UP (nb + SIZE_SZ, pagesize);
-      else
-        size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
+      char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
+      if (mm != MAP_FAILED)
+	return mm;
       tried_mmap = true;
-
-      /* Don't try if size wraps around 0 */
-      if ((unsigned long) (size) > (unsigned long) (nb))
-        {
-          mm = (char *) (MMAP (0, size,
-			       mtag_mmap_flags | PROT_READ | PROT_WRITE, 0));
-
-          if (mm != MAP_FAILED)
-            {
-	      sysmadvise_thp (mm, size);
-
-              /*
-                 The offset to the start of the mmapped region is stored
-                 in the prev_size field of the chunk. This allows us to adjust
-                 returned start address to meet alignment requirements here
-                 and in memalign(), and still be able to compute proper
-                 address argument for later munmap in free() and realloc().
-               */
-
-              if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
-                {
-                  /* For glibc, chunk2mem increases the address by
-                     CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is
-                     CHUNK_HDR_SZ-1.  Each mmap'ed area is page
-                     aligned and therefore definitely
-                     MALLOC_ALIGN_MASK-aligned.  */
-                  assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
-                  front_misalign = 0;
-                }
-              else
-                front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
-              if (front_misalign > 0)
-                {
-                  correction = MALLOC_ALIGNMENT - front_misalign;
-                  p = (mchunkptr) (mm + correction);
-		  set_prev_size (p, correction);
-                  set_head (p, (size - correction) | IS_MMAPPED);
-                }
-              else
-                {
-                  p = (mchunkptr) mm;
-		  set_prev_size (p, 0);
-                  set_head (p, size | IS_MMAPPED);
-                }
-
-              /* update statistics */
-
-              int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
-              atomic_max (&mp_.max_n_mmaps, new);
-
-              unsigned long sum;
-              sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
-              atomic_max (&mp_.max_mmapped_mem, sum);
-
-              check_chunk (av, p);
-
-              return chunk2mem (p);
-            }
-        }
     }
 
   /* There are no usable arenas and mmap also failed.  */
-- 
2.30.2


  parent reply	other threads:[~2021-08-18 14:20 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-18 14:19 [PATCH v2 0/4] malloc: Improve Huge Page support Adhemerval Zanella
2021-08-18 14:19 ` [PATCH v2 1/4] malloc: Add madvise support for Transparent Huge Pages Adhemerval Zanella
2021-08-18 18:42   ` Siddhesh Poyarekar
2021-08-19 12:00     ` Adhemerval Zanella
2021-08-19 12:22       ` Siddhesh Poyarekar
2021-08-18 14:19 ` [PATCH v2 2/4] malloc: Add THP/madvise support for sbrk Adhemerval Zanella
2021-08-18 14:19 ` Adhemerval Zanella [this message]
2021-08-19  0:47   ` [PATCH v2 3/4] malloc: Move mmap logic to its own function Siddhesh Poyarekar
2021-08-18 14:20 ` [PATCH v2 4/4] malloc: Add Huge Page support for sysmalloc Adhemerval Zanella
2021-08-19  1:03   ` Siddhesh Poyarekar
2021-08-19 12:08     ` Adhemerval Zanella
2021-08-19 17:58   ` Matheus Castanho
2021-08-19 18:50     ` Adhemerval Zanella
2021-08-20 12:34       ` Matheus Castanho
2021-08-18 18:11 ` [PATCH v2 0/4] malloc: Improve Huge Page support Siddhesh Poyarekar
2021-08-19 11:26   ` Adhemerval Zanella
2021-08-19 11:48     ` Siddhesh Poyarekar
2021-08-19 12:04       ` Adhemerval Zanella
2021-08-19 12:26         ` Siddhesh Poyarekar
2021-08-19 12:42           ` Adhemerval Zanella
2021-08-19 16:42 ` Guillaume Morin
2021-08-19 16:55   ` Adhemerval Zanella
2021-08-19 17:17     ` Guillaume Morin
2021-08-19 17:27       ` Adhemerval Zanella

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210818142000.128752-4-adhemerval.zanella@linaro.org \
    --to=adhemerval.zanella@linaro.org \
    --cc=guillaume@morinfr.org \
    --cc=libc-alpha@sourceware.org \
    --cc=nmanthey@conp-solutions.com \
    --cc=siddhesh@sourceware.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).