public inbox for libc-stable@sourceware.org
 help / color / mirror / Atom feed
* [PATCH 06/10] Fix deadlock in _int_free consistency check
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
                   ` (5 preceding siblings ...)
  2017-01-01  0:00 ` [PATCH 07/10] Add single-threaded path to _int_free Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 09/10] Add single-threaded path to malloc/realloc/calloc/memalloc Siddhesh Poyarekar
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Wilco Dijkstra

From: Wilco Dijkstra <wdijkstr@arm.com>

This patch fixes a deadlock in the fastbin consistency check.
If we fail the fast check due to concurrent modifications to
the next chunk or system_mem, we should not lock if we already
have the arena lock.  Simplify the check to make it obviously
correct.

	* malloc/malloc.c (_int_free): Fix deadlock bug in consistency check.

(cherry-pick d74e6f6c0de55fc588b1ac09c88eb0fb8b8600af)
---
 ChangeLog       |  4 ++++
 malloc/malloc.c | 21 ++++++++++++---------
 2 files changed, 16 insertions(+), 9 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index d536c9a..49b720f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2017-10-19  Wilco Dijkstra  <wdijkstr@arm.com>
+
+	* malloc/malloc.c (_int_free): Fix deadlock bug in consistency check.
+
 2017-08-31  Florian Weimer  <fweimer@redhat.com>
 
 	* malloc/malloc.c (_int_free): Remove locked variable and related
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 3608b34..44996e0 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4148,17 +4148,20 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
 			     >= av->system_mem, 0))
       {
+	bool fail = true;
 	/* We might not have a lock at this point and concurrent modifications
-	   of system_mem might have let to a false positive.  Redo the test
-	   after getting the lock.  */
-	if (!have_lock
-	    || ({ __libc_lock_lock (av->mutex);
-		  chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
-		  || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
-	        }))
+	   of system_mem might result in a false positive.  Redo the test after
+	   getting the lock.  */
+	if (!have_lock)
+	  {
+	    __libc_lock_lock (av->mutex);
+	    fail = (chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
+		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
+	    __libc_lock_unlock (av->mutex);
+	  }
+
+	if (fail)
 	  malloc_printerr ("free(): invalid next size (fast)");
-	if (! have_lock)
-	  __libc_lock_unlock (av->mutex);
       }
 
     free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 04/10] malloc: Change top_check return type to void
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
                   ` (9 preceding siblings ...)
  2017-01-01  0:00 ` [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Florian Weimer

From: Florian Weimer <fweimer@redhat.com>

After commit ec2c1fcefb200c6cb7e09553f3c6af8815013d83,
(malloc: Abort on heap corruption, without a backtrace), the function
always returns 0.

(cherry-picked from 5129873a8e913e207e5f7b4b521c72f41a1bbf6d)
---
 ChangeLog       |  7 +++++++
 malloc/hooks.c  | 26 ++++++++++++--------------
 malloc/malloc.c |  2 +-
 3 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 577643c..519db42 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2017-08-31  Florian Weimer  <fweimer@redhat.com>
+
+	* malloc/malloc.c (top_check): Change return type to void.  Remove
+	internal_function.
+	* malloc/hooks.c (top_check): Likewise.
+	(malloc_check, realloc_check, memalign_check): Adjust.
+
 2017-08-30  Florian Weimer  <fweimer@redhat.com>
 
 	* malloc/malloc.c (ARENA_CORRUPTION_BIT, arena_is_corrupt)
diff --git a/malloc/hooks.c b/malloc/hooks.c
index dcd311e..4398c0a 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -228,8 +228,7 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
 }
 
 /* Check for corruption of the top chunk.  */
-static int
-internal_function
+static void
 top_check (void)
 {
   mchunkptr t = top (&main_arena);
@@ -240,7 +239,7 @@ top_check (void)
        prev_inuse (t) &&
        (!contiguous (&main_arena) ||
         (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
-    return 0;
+    return;
 
   malloc_printerr ("malloc: top chunk is corrupt");
 }
@@ -257,7 +256,8 @@ malloc_check (size_t sz, const void *caller)
     }
 
   __libc_lock_lock (main_arena.mutex);
-  victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL;
+  top_check ();
+  victim = _int_malloc (&main_arena, sz + 1);
   __libc_lock_unlock (main_arena.mutex);
   return mem2mem_check (victim, sz);
 }
@@ -329,8 +329,8 @@ realloc_check (void *oldmem, size_t bytes, const void *caller)
         else
           {
             /* Must alloc, copy, free. */
-            if (top_check () >= 0)
-              newmem = _int_malloc (&main_arena, bytes + 1);
+	    top_check ();
+	    newmem = _int_malloc (&main_arena, bytes + 1);
             if (newmem)
               {
                 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
@@ -341,12 +341,10 @@ realloc_check (void *oldmem, size_t bytes, const void *caller)
     }
   else
     {
-      if (top_check () >= 0)
-        {
-          INTERNAL_SIZE_T nb;
-          checked_request2size (bytes + 1, nb);
-          newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
-        }
+      top_check ();
+      INTERNAL_SIZE_T nb;
+      checked_request2size (bytes + 1, nb);
+      newmem = _int_realloc (&main_arena, oldp, oldsize, nb);
     }
 
   /* mem2chunk_check changed the magic byte in the old chunk.
@@ -396,8 +394,8 @@ memalign_check (size_t alignment, size_t bytes, const void *caller)
     }
 
   __libc_lock_lock (main_arena.mutex);
-  mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) :
-        NULL;
+  top_check ();
+  mem = _int_memalign (&main_arena, alignment, bytes + 1);
   __libc_lock_unlock (main_arena.mutex);
   return mem2mem_check (mem, bytes);
 }
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 65deb2f..417ffbb 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1022,7 +1022,7 @@ static void*  _mid_memalign(size_t, size_t, void *);
 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
 
 static void* internal_function mem2mem_check(void *p, size_t sz);
-static int internal_function top_check(void);
+static void top_check (void);
 static void internal_function munmap_chunk(mchunkptr p);
 #if HAVE_MREMAP
 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 01/10] malloc: Abort on heap corruption, without a backtrace [BZ #21754]
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 08/10] Fix build issue with SINGLE_THREAD_P Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 02/10] malloc: Remove check_action variable [BZ #21754] Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 05/10] malloc: Resolve compilation failure in NDEBUG mode Siddhesh Poyarekar
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Florian Weimer

From: Florian Weimer <fweimer@redhat.com>

The stack trace printing caused deadlocks and has been itself been
targeted by code execution exploits.

(cherry-picked from ec2c1fcefb200c6cb7e09553f3c6af8815013d83)
---
 ChangeLog            |  9 +++++++++
 NEWS                 | 10 ++++++++++
 malloc/malloc.c      | 23 ++++-------------------
 manual/memory.texi   | 20 +++++++++-----------
 manual/tunables.texi | 28 +++++++---------------------
 5 files changed, 39 insertions(+), 51 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 812c538..7a35bff 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2017-08-30  Florian Weimer  <fweimer@redhat.com>
+
+	[BZ #21754]
+	* malloc/malloc.c (malloc_printerr): Always terminate the process,
+	without printing a backtrace.  Do not leak any information in the
+	error message.
+	* manual/memory.texi (Heap Consistency Checking): Update.
+	* manual/tunables.texi (Memory Allocation Tunables): Likewise.
+
 2017-11-17  Tulio Magno Quites Machado Filho  <tuliom@linux.vnet.ibm.com>
 
 	* sysdeps/powerpc/bits/hwcap.h (PPC_FEATURE2_HTM_NO_SUSPEND): New
diff --git a/NEWS b/NEWS
index e7b62a8..359465f 100644
--- a/NEWS
+++ b/NEWS
@@ -7,6 +7,16 @@ using `glibc' in the "product" field.
 \f
 Version 2.26.1
 
+Major new features:
+
+* In order to support faster and safer process termination the malloc API
+  family of functions will no longer print a failure address and stack
+  backtrace after detecting heap corruption.  The goal is to minimize the
+  amount of work done after corruption is detected and to avoid potential
+  security issues in continued process execution.  Reducing shutdown time
+  leads to lower overall process restart latency, so there is benefit both
+  from a security and performance perspective.
+
 Security related changes:
 
   CVE-2009-5064: The ldd script would sometimes run the program under
diff --git a/malloc/malloc.c b/malloc/malloc.c
index dd9f699..c91fc09 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1019,7 +1019,8 @@ static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
 static void*  _int_memalign(mstate, size_t, size_t);
 static void*  _mid_memalign(size_t, size_t, void *);
 
-static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
+static void malloc_printerr(int action, const char *str, void *ptr, mstate av)
+  __attribute__ ((noreturn));
 
 static void* internal_function mem2mem_check(void *p, size_t sz);
 static int internal_function top_check(void);
@@ -5399,24 +5400,8 @@ malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr)
   if (ar_ptr)
     set_arena_corrupt (ar_ptr);
 
-  if ((action & 5) == 5)
-    __libc_message ((action & 2) ? (do_abort | do_backtrace) : do_message,
-		    "%s\n", str);
-  else if (action & 1)
-    {
-      char buf[2 * sizeof (uintptr_t) + 1];
-
-      buf[sizeof (buf) - 1] = '\0';
-      char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
-      while (cp > buf)
-        *--cp = '0';
-
-      __libc_message ((action & 2) ? (do_abort | do_backtrace) : do_message,
-		      "*** Error in `%s': %s: 0x%s ***\n",
-                      __libc_argv[0] ? : "<unknown>", str, cp);
-    }
-  else if (action & 2)
-    abort ();
+  __libc_message (do_abort, "%s\n", str);
+  __builtin_unreachable ();
 }
 
 /* We need a wrapper function for one of the additions of POSIX.  */
diff --git a/manual/memory.texi b/manual/memory.texi
index 82f4738..13cce7a 100644
--- a/manual/memory.texi
+++ b/manual/memory.texi
@@ -1309,17 +1309,15 @@ The block was already freed.
 
 Another possibility to check for and guard against bugs in the use of
 @code{malloc}, @code{realloc} and @code{free} is to set the environment
-variable @code{MALLOC_CHECK_}.  When @code{MALLOC_CHECK_} is set, a
-special (less efficient) implementation is used which is designed to be
-tolerant against simple errors, such as double calls of @code{free} with
-the same argument, or overruns of a single byte (off-by-one bugs).  Not
-all such errors can be protected against, however, and memory leaks can
-result.  If @code{MALLOC_CHECK_} is set to @code{0}, any detected heap
-corruption is silently ignored; if set to @code{1}, a diagnostic is
-printed on @code{stderr}; if set to @code{2}, @code{abort} is called
-immediately.  This can be useful because otherwise a crash may happen
-much later, and the true cause for the problem is then very hard to
-track down.
+variable @code{MALLOC_CHECK_}.  When @code{MALLOC_CHECK_} is set to a
+non-zero value, a special (less efficient) implementation is used which
+is designed to be tolerant against simple errors, such as double calls
+of @code{free} with the same argument, or overruns of a single byte
+(off-by-one bugs).  Not all such errors can be protected against,
+however, and memory leaks can result.
+
+Any detected heap corruption results in immediate termination of the
+process.
 
 There is one problem with @code{MALLOC_CHECK_}: in SUID or SGID binaries
 it could possibly be exploited since diverging from the normal programs
diff --git a/manual/tunables.texi b/manual/tunables.texi
index 3c19567..b09e3fe 100644
--- a/manual/tunables.texi
+++ b/manual/tunables.texi
@@ -71,27 +71,13 @@ following tunables in the @code{malloc} namespace:
 This tunable supersedes the @env{MALLOC_CHECK_} environment variable and is
 identical in features.
 
-Setting this tunable enables a special (less efficient) memory allocator for
-the malloc family of functions that is designed to be tolerant against simple
-errors such as double calls of free with the same argument, or overruns of a
-single byte (off-by-one bugs). Not all such errors can be protected against,
-however, and memory leaks can result.  The following list describes the values
-that this tunable can take and the effect they have on malloc functionality:
-
-@itemize @bullet
-@item @code{0} Ignore all errors.  The default allocator continues to be in
-use, but all errors are silently ignored.
-@item @code{1} Report errors.  The alternate allocator is selected and heap
-corruption, if detected, is reported as diagnostic messages to @code{stderr}
-and the program continues execution.
-@item @code{2} Abort on errors.  The alternate allocator is selected and if
-heap corruption is detected, the program is ended immediately by calling
-@code{abort}.
-@item @code{3} Fully enabled.  The alternate allocator is selected and is fully
-functional.  That is, if heap corruption is detected, a verbose diagnostic
-message is printed to @code{stderr} and the program is ended by calling
-@code{abort}.
-@end itemize
+Setting this tunable to a non-zero value enables a special (less
+efficient) memory allocator for the malloc family of functions that is
+designed to be tolerant against simple errors such as double calls of
+free with the same argument, or overruns of a single byte (off-by-one
+bugs). Not all such errors can be protected against, however, and memory
+leaks can result.  Any detected heap corruption results in immediate
+termination of the process.
 
 Like @env{MALLOC_CHECK_}, @code{glibc.malloc.check} has a problem in that it
 diverges from normal program behavior by writing to @code{stderr}, which could
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 07/10] Add single-threaded path to _int_free
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
                   ` (4 preceding siblings ...)
  2017-01-01  0:00 ` [PATCH 03/10] malloc: Remove corrupt arena flag Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 06/10] Fix deadlock in _int_free consistency check Siddhesh Poyarekar
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Wilco Dijkstra

From: Wilco Dijkstra <wdijkstr@arm.com>

This patch adds single-threaded fast paths to _int_free.
Bypass the explicit locking for larger allocations.

	* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.

(cherry-picked from a15d53e2de4c7d83bda251469d92a3c7b49a90db)
---
 ChangeLog       |  4 ++++
 malloc/malloc.c | 41 ++++++++++++++++++++++++++++-------------
 2 files changed, 32 insertions(+), 13 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 49b720f..30e6f50 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+2017-10-20  Wilco Dijkstra  <wdijkstr@arm.com>
+
+	* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.
+
 2017-10-19  Wilco Dijkstra  <wdijkstr@arm.com>
 
 	* malloc/malloc.c (_int_free): Fix deadlock bug in consistency check.
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 44996e0..78676a6 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4172,24 +4172,34 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 
     /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
     mchunkptr old = *fb, old2;
-    unsigned int old_idx = ~0u;
-    do
+
+    if (SINGLE_THREAD_P)
       {
-	/* Check that the top of the bin is not the record we are going to add
-	   (i.e., double free).  */
+	/* Check that the top of the bin is not the record we are going to
+	   add (i.e., double free).  */
 	if (__builtin_expect (old == p, 0))
 	  malloc_printerr ("double free or corruption (fasttop)");
-	/* Check that size of fastbin chunk at the top is the same as
-	   size of the chunk that we are adding.  We can dereference OLD
-	   only if we have the lock, otherwise it might have already been
-	   deallocated.  See use of OLD_IDX below for the actual check.  */
-	if (have_lock && old != NULL)
-	  old_idx = fastbin_index(chunksize(old));
-	p->fd = old2 = old;
+	p->fd = old;
+	*fb = p;
       }
-    while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
+    else
+      do
+	{
+	  /* Check that the top of the bin is not the record we are going to
+	     add (i.e., double free).  */
+	  if (__builtin_expect (old == p, 0))
+	    malloc_printerr ("double free or corruption (fasttop)");
+	  p->fd = old2 = old;
+	}
+      while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
+	     != old2);
 
-    if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
+    /* Check that size of fastbin chunk at the top is the same as
+       size of the chunk that we are adding.  We can dereference OLD
+       only if we have the lock, otherwise it might have already been
+       allocated again.  */
+    if (have_lock && old != NULL
+	&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
       malloc_printerr ("invalid fastbin entry (free)");
   }
 
@@ -4198,6 +4208,11 @@ _int_free (mstate av, mchunkptr p, int have_lock)
   */
 
   else if (!chunk_is_mmapped(p)) {
+
+    /* If we're single-threaded, don't lock the arena.  */
+    if (SINGLE_THREAD_P)
+      have_lock = true;
+
     if (!have_lock)
       __libc_lock_lock (av->mutex);
 
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 09/10] Add single-threaded path to malloc/realloc/calloc/memalloc
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
                   ` (6 preceding siblings ...)
  2017-01-01  0:00 ` [PATCH 06/10] Fix deadlock in _int_free consistency check Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 10/10] Add single-threaded path to _int_malloc Siddhesh Poyarekar
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Wilco Dijkstra

From: Wilco Dijkstra <wdijkstr@arm.com>

This patch adds a single-threaded fast path to malloc, realloc,
calloc and memalloc.  When we're single-threaded, we can bypass
arena_get (which always locks the arena it returns) and just use
the main arena.  Also avoid retrying a different arena since
there is just the main arena.

	* malloc/malloc.c (__libc_malloc): Add SINGLE_THREAD_P path.
	(__libc_realloc): Likewise.
	(_mid_memalign): Likewise.
	(__libc_calloc): Likewise.

(cherry-picked 3f6bb8a32e5f5efd78ac08c41e623651cc242a89)
---
 ChangeLog       |  7 +++++++
 malloc/malloc.c | 50 +++++++++++++++++++++++++++++++++++++++++---------
 2 files changed, 48 insertions(+), 9 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 06da839..75aa92c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2017-10-23  Wilco Dijkstra  <wdijkstr@arm.com>
+
+	* malloc/malloc.c (__libc_malloc): Add SINGLE_THREAD_P path.
+	(__libc_realloc): Likewise.
+	(_mid_memalign): Likewise.
+	(__libc_calloc): Likewise.
+
 2017-10-20  Wilco Dijkstra  <wdijkstr@arm.com>
 
 	* malloc/malloc.c (sysdep-cancel.h): Add include.
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 236ded8..f8495f3 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -3045,6 +3045,14 @@ __libc_malloc (size_t bytes)
   DIAG_POP_NEEDS_COMMENT;
 #endif
 
+  if (SINGLE_THREAD_P)
+    {
+      victim = _int_malloc (&main_arena, bytes);
+      assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
+	      &main_arena == arena_for_chunk (mem2chunk (victim)));
+      return victim;
+    }
+
   arena_get (ar_ptr, bytes);
 
   victim = _int_malloc (ar_ptr, bytes);
@@ -3201,6 +3209,15 @@ __libc_realloc (void *oldmem, size_t bytes)
       return newmem;
     }
 
+  if (SINGLE_THREAD_P)
+    {
+      newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
+      assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
+	      ar_ptr == arena_for_chunk (mem2chunk (newp)));
+
+      return newp;
+    }
+
   __libc_lock_lock (ar_ptr->mutex);
 
   newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
@@ -3276,6 +3293,15 @@ _mid_memalign (size_t alignment, size_t bytes, void *address)
       alignment = a;
     }
 
+  if (SINGLE_THREAD_P)
+    {
+      p = _int_memalign (&main_arena, alignment, bytes);
+      assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
+	      &main_arena == arena_for_chunk (mem2chunk (p)));
+
+      return p;
+    }
+
   arena_get (ar_ptr, bytes + alignment + MINSIZE);
 
   p = _int_memalign (ar_ptr, alignment, bytes);
@@ -3368,7 +3394,11 @@ __libc_calloc (size_t n, size_t elem_size)
 
   MAYBE_INIT_TCACHE ();
 
-  arena_get (av, sz);
+  if (SINGLE_THREAD_P)
+    av = &main_arena;
+  else
+    arena_get (av, sz);
+
   if (av)
     {
       /* Check if we hand out the top chunk, in which case there may be no
@@ -3398,19 +3428,21 @@ __libc_calloc (size_t n, size_t elem_size)
     }
   mem = _int_malloc (av, sz);
 
-
   assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
           av == arena_for_chunk (mem2chunk (mem)));
 
-  if (mem == 0 && av != NULL)
+  if (!SINGLE_THREAD_P)
     {
-      LIBC_PROBE (memory_calloc_retry, 1, sz);
-      av = arena_get_retry (av, sz);
-      mem = _int_malloc (av, sz);
-    }
+      if (mem == 0 && av != NULL)
+	{
+	  LIBC_PROBE (memory_calloc_retry, 1, sz);
+	  av = arena_get_retry (av, sz);
+	  mem = _int_malloc (av, sz);
+	}
 
-  if (av != NULL)
-    __libc_lock_unlock (av->mutex);
+      if (av != NULL)
+	__libc_lock_unlock (av->mutex);
+    }
 
   /* Allocation failed even after a retry.  */
   if (mem == 0)
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 00/10][2.26] Malloc fixes and improvements
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
                   ` (8 preceding siblings ...)
  2017-01-01  0:00 ` [PATCH 10/10] Add single-threaded path to _int_malloc Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 04/10] malloc: Change top_check return type to void Siddhesh Poyarekar
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable

This should have read as [committed].

Siddhesh

On Tuesday 28 November 2017 07:39 PM, Siddhesh Poyarekar wrote:
> Hi,
> 
> This series backports the bunch of fixes Florian made to malloc to avoid
> tripping over one's own heap corruption and the performance improvements Wilco
> made to make single-threaded programs faster.
> 
> Siddhesh
> 
> Florian Weimer (5):
>   malloc: Abort on heap corruption, without a backtrace [BZ #21754]
>   malloc: Remove check_action variable [BZ #21754]
>   malloc: Remove corrupt arena flag
>   malloc: Change top_check return type to void
>   malloc: Resolve compilation failure in NDEBUG mode
> 
> Wilco Dijkstra (5):
>   Fix deadlock in _int_free consistency check
>   Add single-threaded path to _int_free
>   Fix build issue with SINGLE_THREAD_P
>   Add single-threaded path to malloc/realloc/calloc/memalloc
>   Add single-threaded path to _int_malloc
> 
>  ChangeLog            |  79 +++++++++++
>  NEWS                 |  10 ++
>  malloc/arena.c       |  31 +----
>  malloc/hooks.c       |  81 +++--------
>  malloc/malloc.c      | 371 +++++++++++++++++++++------------------------------
>  manual/memory.texi   |  21 ++-
>  manual/probes.texi   |   7 -
>  manual/tunables.texi |  28 +---
>  8 files changed, 280 insertions(+), 348 deletions(-)
> 

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 00/10][2.26] Malloc fixes and improvements
@ 2017-01-01  0:00 Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 08/10] Fix build issue with SINGLE_THREAD_P Siddhesh Poyarekar
                   ` (10 more replies)
  0 siblings, 11 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable

Hi,

This series backports the bunch of fixes Florian made to malloc to avoid
tripping over one's own heap corruption and the performance improvements Wilco
made to make single-threaded programs faster.

Siddhesh

Florian Weimer (5):
  malloc: Abort on heap corruption, without a backtrace [BZ #21754]
  malloc: Remove check_action variable [BZ #21754]
  malloc: Remove corrupt arena flag
  malloc: Change top_check return type to void
  malloc: Resolve compilation failure in NDEBUG mode

Wilco Dijkstra (5):
  Fix deadlock in _int_free consistency check
  Add single-threaded path to _int_free
  Fix build issue with SINGLE_THREAD_P
  Add single-threaded path to malloc/realloc/calloc/memalloc
  Add single-threaded path to _int_malloc

 ChangeLog            |  79 +++++++++++
 NEWS                 |  10 ++
 malloc/arena.c       |  31 +----
 malloc/hooks.c       |  81 +++--------
 malloc/malloc.c      | 371 +++++++++++++++++++++------------------------------
 manual/memory.texi   |  21 ++-
 manual/probes.texi   |   7 -
 manual/tunables.texi |  28 +---
 8 files changed, 280 insertions(+), 348 deletions(-)

-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 03/10] malloc: Remove corrupt arena flag
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
                   ` (3 preceding siblings ...)
  2017-01-01  0:00 ` [PATCH 05/10] malloc: Resolve compilation failure in NDEBUG mode Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 07/10] Add single-threaded path to _int_free Siddhesh Poyarekar
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Florian Weimer

From: Florian Weimer <fweimer@redhat.com>

This is no longer needed because we now abort immediately
once heap corruption is detected.

(cherry-picked from a9da0bb2667ab20f1dbcd0a9ae6846db02fbc96a)
---
 ChangeLog       |  8 ++++++++
 malloc/arena.c  | 20 ++------------------
 malloc/malloc.c | 13 -------------
 3 files changed, 10 insertions(+), 31 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 7ab9222..577643c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,13 @@
 2017-08-30  Florian Weimer  <fweimer@redhat.com>
 
+	* malloc/malloc.c (ARENA_CORRUPTION_BIT, arena_is_corrupt)
+	(set_arena_corrupt): Remove definitions.
+	(mtrim): Do not check for corrupt arena.
+	* malloc/arena.c (arena_lock, reused_arena, arena_get_retry):
+	Likewise.
+
+2017-08-30  Florian Weimer  <fweimer@redhat.com>
+
 	[BZ #21754]
 	* malloc/arena.c (TUNABLE_CALLBACK set_mallopt_check): Do not set
 	check_action.
diff --git a/malloc/arena.c b/malloc/arena.c
index 39cbfbc..afd4232 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -116,7 +116,7 @@ int __malloc_initialized = -1;
   } while (0)
 
 #define arena_lock(ptr, size) do {					      \
-      if (ptr && !arena_is_corrupt (ptr))				      \
+      if (ptr)								      \
         __libc_lock_lock (ptr->mutex);					      \
       else								      \
         ptr = arena_get2 ((size), NULL);				      \
@@ -832,7 +832,7 @@ reused_arena (mstate avoid_arena)
   result = next_to_use;
   do
     {
-      if (!arena_is_corrupt (result) && !__libc_lock_trylock (result->mutex))
+      if (!__libc_lock_trylock (result->mutex))
         goto out;
 
       /* FIXME: This is a data race, see _int_new_arena.  */
@@ -845,18 +845,6 @@ reused_arena (mstate avoid_arena)
   if (result == avoid_arena)
     result = result->next;
 
-  /* Make sure that the arena we get is not corrupted.  */
-  mstate begin = result;
-  while (arena_is_corrupt (result) || result == avoid_arena)
-    {
-      result = result->next;
-      if (result == begin)
-	/* We looped around the arena list.  We could not find any
-	   arena that was either not corrupted or not the one we
-	   wanted to avoid.  */
-	return NULL;
-    }
-
   /* No arena available without contention.  Wait for the next in line.  */
   LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena);
   __libc_lock_lock (result->mutex);
@@ -953,10 +941,6 @@ arena_get_retry (mstate ar_ptr, size_t bytes)
   if (ar_ptr != &main_arena)
     {
       __libc_lock_unlock (ar_ptr->mutex);
-      /* Don't touch the main arena if it is corrupt.  */
-      if (arena_is_corrupt (&main_arena))
-	return NULL;
-
       ar_ptr = &main_arena;
       __libc_lock_lock (ar_ptr->mutex);
     }
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 7a90fda..65deb2f 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1626,15 +1626,6 @@ typedef struct malloc_chunk *mfastbinptr;
 #define set_noncontiguous(M)   ((M)->flags |= NONCONTIGUOUS_BIT)
 #define set_contiguous(M)      ((M)->flags &= ~NONCONTIGUOUS_BIT)
 
-/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
-   arena.  Such an arena is no longer used to allocate chunks.  Chunks
-   allocated in that arena before detecting corruption are not freed.  */
-
-#define ARENA_CORRUPTION_BIT (4U)
-
-#define arena_is_corrupt(A)	(((A)->flags & ARENA_CORRUPTION_BIT))
-#define set_arena_corrupt(A)	((A)->flags |= ARENA_CORRUPTION_BIT)
-
 /* Maximum size of memory handled in fastbins.  */
 static INTERNAL_SIZE_T global_max_fast;
 
@@ -4718,10 +4709,6 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
 static int
 mtrim (mstate av, size_t pad)
 {
-  /* Don't touch corrupt arenas.  */
-  if (arena_is_corrupt (av))
-    return 0;
-
   /* Ensure initialization/consolidation */
   malloc_consolidate (av);
 
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 08/10] Fix build issue with SINGLE_THREAD_P
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 02/10] malloc: Remove check_action variable [BZ #21754] Siddhesh Poyarekar
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Wilco Dijkstra

From: Wilco Dijkstra <wdijkstr@arm.com>

Add sysdep-cancel.h include.

	* malloc/malloc.c (sysdep-cancel.h): Add include.

(cherry-picked 6d43de4b85b11d26a19bebe4f55f31be16e3d419)
---
 ChangeLog       | 4 ++++
 malloc/malloc.c | 3 +++
 2 files changed, 7 insertions(+)

diff --git a/ChangeLog b/ChangeLog
index 30e6f50..06da839 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,9 @@
 2017-10-20  Wilco Dijkstra  <wdijkstr@arm.com>
 
+	* malloc/malloc.c (sysdep-cancel.h): Add include.
+
+2017-10-20  Wilco Dijkstra  <wdijkstr@arm.com>
+
 	* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P fast paths.
 
 2017-10-19  Wilco Dijkstra  <wdijkstr@arm.com>
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 78676a6..236ded8 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -243,6 +243,9 @@
 
 #include <malloc/malloc-internal.h>
 
+/* For SINGLE_THREAD_P.  */
+#include <sysdep-cancel.h>
+
 /*
   Debugging:
 
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 02/10] malloc: Remove check_action variable [BZ #21754]
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 08/10] Fix build issue with SINGLE_THREAD_P Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 01/10] malloc: Abort on heap corruption, without a backtrace " Siddhesh Poyarekar
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Florian Weimer

From: Florian Weimer <fweimer@redhat.com>

Clean up calls to malloc_printerr and trim its argument list.

This also removes a few bits of work done before calling
malloc_printerr (such as unlocking operations).

The tunable/environment variable still enables the lightweight
additional malloc checking, but mallopt (M_CHECK_ACTION)
no longer has any effect.

(cherry-picked from ac3ed168d0c0b2b702319ac0db72c9b475a8c72e)
---
 ChangeLog          |  27 ++++++++++
 malloc/arena.c     |  11 ++--
 malloc/hooks.c     |  55 ++-----------------
 malloc/malloc.c    | 152 +++++++++++------------------------------------------
 manual/memory.texi |   1 -
 manual/probes.texi |   7 ---
 6 files changed, 65 insertions(+), 188 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 7a35bff..7ab9222 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,6 +1,33 @@
 2017-08-30  Florian Weimer  <fweimer@redhat.com>
 
 	[BZ #21754]
+	* malloc/arena.c (TUNABLE_CALLBACK set_mallopt_check): Do not set
+	check_action.
+	(ptmalloc_init): Do not set or use check_action.
+	* malloc/hooks.c (malloc_check_get_size, realloc_check): Adjust
+	call to malloc_printerr.  Remove return statement.
+	(free_check): Likewise.  Remove arena unlock.
+	(top_check): Update comment.  Adjust call to malloc_printerr.
+	Remove heap repair code.
+	* malloc/malloc.c (unlink): Adjust calls to malloc_printerr.
+	(DEFAULT_CHECK_ACTION, check_action): Remove definitions.
+	(sysmalloc): Adjust call to malloc_printerr.
+	(munmap_chunk, __libc_realloc): Likewise.  Remove return
+	statement.
+	(_int_malloc, int_realloc): Likewise.  Remove errstr variable.
+	Remove errout label and corresponding gotos.
+	(_int_free): Likewise.  Remove arena unlock.
+	(do_set_mallopt_check): Do not set check_action.
+	(malloc_printerr): Adjust parameter list.  Do not mark arena as
+	corrupt.
+	* manual/memory.texi (Malloc Tunable Parameters): Remove TODO
+	comment.
+	* manual/probes.texi (Memory Allocation Probes): Remove
+	memory_mallopt_check_action.
+
+2017-08-30  Florian Weimer  <fweimer@redhat.com>
+
+	[BZ #21754]
 	* malloc/malloc.c (malloc_printerr): Always terminate the process,
 	without printing a backtrace.  Do not leak any information in the
 	error message.
diff --git a/malloc/arena.c b/malloc/arena.c
index dc14fae..39cbfbc 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -215,8 +215,7 @@ void
 TUNABLE_CALLBACK (set_mallopt_check) (tunable_val_t *valp)
 {
   int32_t value = (int32_t) valp->numval;
-  do_set_mallopt_check (value);
-  if (check_action != 0)
+  if (value != 0)
     __malloc_check_init ();
 }
 
@@ -397,12 +396,8 @@ ptmalloc_init (void)
             }
         }
     }
-  if (s && s[0])
-    {
-      __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
-      if (check_action != 0)
-        __malloc_check_init ();
-    }
+  if (s && s[0] != '\0' && s[0] != '0')
+    __malloc_check_init ();
 #endif
 
 #if HAVE_MALLOC_INIT_HOOK
diff --git a/malloc/hooks.c b/malloc/hooks.c
index 1d80be2..dcd311e 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -121,12 +121,7 @@ malloc_check_get_size (mchunkptr p)
        size -= c)
     {
       if (c <= 0 || size < (c + 2 * SIZE_SZ))
-        {
-          malloc_printerr (check_action, "malloc_check_get_size: memory corruption",
-                           chunk2mem (p),
-			   chunk_is_mmapped (p) ? NULL : arena_for_chunk (p));
-          return 0;
-        }
+	malloc_printerr ("malloc_check_get_size: memory corruption");
     }
 
   /* chunk2mem size.  */
@@ -232,17 +227,12 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
   return p;
 }
 
-/* Check for corruption of the top chunk, and try to recover if
-   necessary. */
-
+/* Check for corruption of the top chunk.  */
 static int
 internal_function
 top_check (void)
 {
   mchunkptr t = top (&main_arena);
-  char *brk, *new_brk;
-  INTERNAL_SIZE_T front_misalign, sbrk_size;
-  unsigned long pagesz = GLRO (dl_pagesize);
 
   if (t == initial_top (&main_arena) ||
       (!chunk_is_mmapped (t) &&
@@ -252,32 +242,7 @@ top_check (void)
         (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem)))
     return 0;
 
-  malloc_printerr (check_action, "malloc: top chunk is corrupt", t,
-		   &main_arena);
-
-  /* Try to set up a new top chunk. */
-  brk = MORECORE (0);
-  front_misalign = (unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK;
-  if (front_misalign > 0)
-    front_misalign = MALLOC_ALIGNMENT - front_misalign;
-  sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
-  sbrk_size += pagesz - ((unsigned long) (brk + sbrk_size) & (pagesz - 1));
-  new_brk = (char *) (MORECORE (sbrk_size));
-  if (new_brk == (char *) (MORECORE_FAILURE))
-    {
-      __set_errno (ENOMEM);
-      return -1;
-    }
-  /* Call the `morecore' hook if necessary.  */
-  void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
-  if (hook)
-    (*hook)();
-  main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
-
-  top (&main_arena) = (mchunkptr) (brk + front_misalign);
-  set_head (top (&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
-
-  return 0;
+  malloc_printerr ("malloc: top chunk is corrupt");
 }
 
 static void *
@@ -308,13 +273,7 @@ free_check (void *mem, const void *caller)
   __libc_lock_lock (main_arena.mutex);
   p = mem2chunk_check (mem, NULL);
   if (!p)
-    {
-      __libc_lock_unlock (main_arena.mutex);
-
-      malloc_printerr (check_action, "free(): invalid pointer", mem,
-		       &main_arena);
-      return;
-    }
+    malloc_printerr ("free(): invalid pointer");
   if (chunk_is_mmapped (p))
     {
       __libc_lock_unlock (main_arena.mutex);
@@ -349,11 +308,7 @@ realloc_check (void *oldmem, size_t bytes, const void *caller)
   const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p);
   __libc_lock_unlock (main_arena.mutex);
   if (!oldp)
-    {
-      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
-		       &main_arena);
-      return malloc_check (bytes, NULL);
-    }
+    malloc_printerr ("realloc(): invalid pointer");
   const INTERNAL_SIZE_T oldsize = chunksize (oldp);
 
   checked_request2size (bytes + 1, nb);
diff --git a/malloc/malloc.c b/malloc/malloc.c
index c91fc09..7a90fda 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1019,8 +1019,7 @@ static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
 static void*  _int_memalign(mstate, size_t, size_t);
 static void*  _mid_memalign(size_t, size_t, void *);
 
-static void malloc_printerr(int action, const char *str, void *ptr, mstate av)
-  __attribute__ ((noreturn));
+static void malloc_printerr(const char *str) __attribute__ ((noreturn));
 
 static void* internal_function mem2mem_check(void *p, size_t sz);
 static int internal_function top_check(void);
@@ -1404,11 +1403,11 @@ typedef struct malloc_chunk *mbinptr;
 /* Take a chunk off a bin list */
 #define unlink(AV, P, BK, FD) {                                            \
     if (__builtin_expect (chunksize(P) != prev_size (next_chunk(P)), 0))      \
-      malloc_printerr (check_action, "corrupted size vs. prev_size", P, AV);  \
+      malloc_printerr ("corrupted size vs. prev_size");			      \
     FD = P->fd;								      \
     BK = P->bk;								      \
     if (__builtin_expect (FD->bk != P || BK->fd != P, 0))		      \
-      malloc_printerr (check_action, "corrupted double-linked list", P, AV);  \
+      malloc_printerr ("corrupted double-linked list");			      \
     else {								      \
         FD->bk = BK;							      \
         BK->fd = FD;							      \
@@ -1416,9 +1415,7 @@ typedef struct malloc_chunk *mbinptr;
             && __builtin_expect (P->fd_nextsize != NULL, 0)) {		      \
 	    if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0)	      \
 		|| __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0))    \
-	      malloc_printerr (check_action,				      \
-			       "corrupted double-linked list (not small)",    \
-			       P, AV);					      \
+	      malloc_printerr ("corrupted double-linked list (not small)");   \
             if (FD->fd_nextsize == NULL) {				      \
                 if (P->fd_nextsize == P)				      \
                   FD->fd_nextsize = FD->bk_nextsize = FD;		      \
@@ -1887,15 +1884,6 @@ void *weak_variable (*__memalign_hook)
 void weak_variable (*__after_morecore_hook) (void) = NULL;
 
 
-/* ---------------- Error behavior ------------------------------------ */
-
-#ifndef DEFAULT_CHECK_ACTION
-# define DEFAULT_CHECK_ACTION 3
-#endif
-
-static int check_action = DEFAULT_CHECK_ACTION;
-
-
 /* ------------------ Testing support ----------------------------------*/
 
 static int perturb_byte;
@@ -2568,11 +2556,8 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
             set_head (old_top, (size + old_size) | PREV_INUSE);
 
           else if (contiguous (av) && old_size && brk < old_end)
-            {
-              /* Oops!  Someone else killed our space..  Can't touch anything.  */
-              malloc_printerr (3, "break adjusted to free malloc space", brk,
-			       av);
-            }
+	    /* Oops!  Someone else killed our space..  Can't touch anything.  */
+	    malloc_printerr ("break adjusted to free malloc space");
 
           /*
              Otherwise, make adjustments:
@@ -2863,11 +2848,7 @@ munmap_chunk (mchunkptr p)
      (in the moment at least) so we combine the two values into one before
      the bit test.  */
   if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
-    {
-      malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
-                       chunk2mem (p), NULL);
-      return;
-    }
+    malloc_printerr ("munmap_chunk(): invalid pointer");
 
   atomic_decrement (&mp_.n_mmaps);
   atomic_add (&mp_.mmapped_mem, -total_size);
@@ -3181,11 +3162,7 @@ __libc_realloc (void *oldmem, size_t bytes)
   if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
        || __builtin_expect (misaligned_chunk (oldp), 0))
       && !DUMPED_MAIN_ARENA_CHUNK (oldp))
-    {
-      malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
-		       ar_ptr);
-      return NULL;
-    }
+      malloc_printerr ("realloc(): invalid pointer");
 
   checked_request2size (bytes, nb);
 
@@ -3531,8 +3508,6 @@ _int_malloc (mstate av, size_t bytes)
   size_t tcache_unsorted_count;	    /* count of unsorted chunks processed */
 #endif
 
-  const char *errstr = NULL;
-
   /*
      Convert request size to internal form by adding SIZE_SZ bytes
      overhead plus possibly more to obtain necessary alignment and/or
@@ -3579,12 +3554,7 @@ _int_malloc (mstate av, size_t bytes)
       if (victim != 0)
         {
           if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
-            {
-              errstr = "malloc(): memory corruption (fast)";
-            errout:
-              malloc_printerr (check_action, errstr, chunk2mem (victim), av);
-              return NULL;
-            }
+	    malloc_printerr ("malloc(): memory corruption (fast)");
           check_remalloced_chunk (av, victim, nb);
 #if USE_TCACHE
 	  /* While we're here, if we see other chunks of the same size,
@@ -3632,11 +3602,9 @@ _int_malloc (mstate av, size_t bytes)
           else
             {
               bck = victim->bk;
-	if (__glibc_unlikely (bck->fd != victim))
-                {
-                  errstr = "malloc(): smallbin double linked list corrupted";
-                  goto errout;
-                }
+	      if (__glibc_unlikely (bck->fd != victim))
+		malloc_printerr
+		  ("malloc(): smallbin double linked list corrupted");
               set_inuse_bit_at_offset (victim, nb);
               bin->bk = bck;
               bck->fd = bin;
@@ -3727,8 +3695,7 @@ _int_malloc (mstate av, size_t bytes)
           if (__builtin_expect (chunksize_nomask (victim) <= 2 * SIZE_SZ, 0)
               || __builtin_expect (chunksize_nomask (victim)
 				   > av->system_mem, 0))
-            malloc_printerr (check_action, "malloc(): memory corruption",
-                             chunk2mem (victim), av);
+            malloc_printerr ("malloc(): memory corruption");
           size = chunksize (victim);
 
           /*
@@ -3933,11 +3900,8 @@ _int_malloc (mstate av, size_t bytes)
                      have to perform a complete insert here.  */
                   bck = unsorted_chunks (av);
                   fwd = bck->fd;
-	  if (__glibc_unlikely (fwd->bk != bck))
-                    {
-                      errstr = "malloc(): corrupted unsorted chunks";
-                      goto errout;
-                    }
+		  if (__glibc_unlikely (fwd->bk != bck))
+		    malloc_printerr ("malloc(): corrupted unsorted chunks");
                   remainder->bk = bck;
                   remainder->fd = fwd;
                   bck->fd = remainder;
@@ -4040,11 +4004,8 @@ _int_malloc (mstate av, size_t bytes)
                      have to perform a complete insert here.  */
                   bck = unsorted_chunks (av);
                   fwd = bck->fd;
-	  if (__glibc_unlikely (fwd->bk != bck))
-                    {
-                      errstr = "malloc(): corrupted unsorted chunks 2";
-                      goto errout;
-                    }
+		  if (__glibc_unlikely (fwd->bk != bck))
+		    malloc_printerr ("malloc(): corrupted unsorted chunks 2");
                   remainder->bk = bck;
                   remainder->fd = fwd;
                   bck->fd = remainder;
@@ -4145,7 +4106,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
   mchunkptr bck;               /* misc temp for linking */
   mchunkptr fwd;               /* misc temp for linking */
 
-  const char *errstr = NULL;
   int locked = 0;
 
   size = chunksize (p);
@@ -4156,21 +4116,11 @@ _int_free (mstate av, mchunkptr p, int have_lock)
      here by accident or by "design" from some intruder.  */
   if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
       || __builtin_expect (misaligned_chunk (p), 0))
-    {
-      errstr = "free(): invalid pointer";
-    errout:
-      if (!have_lock && locked)
-        __libc_lock_unlock (av->mutex);
-      malloc_printerr (check_action, errstr, chunk2mem (p), av);
-      return;
-    }
+    malloc_printerr ("free(): invalid pointer");
   /* We know that each chunk is at least MINSIZE bytes in size or a
      multiple of MALLOC_ALIGNMENT.  */
   if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
-    {
-      errstr = "free(): invalid size";
-      goto errout;
-    }
+    malloc_printerr ("free(): invalid size");
 
   check_inuse_chunk(av, p);
 
@@ -4219,10 +4169,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 		  chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
 		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
 	      }))
-	  {
-	    errstr = "free(): invalid next size (fast)";
-	    goto errout;
-	  }
+	  malloc_printerr ("free(): invalid next size (fast)");
 	if (! have_lock)
 	  {
 	    __libc_lock_unlock (av->mutex);
@@ -4244,10 +4191,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 	/* Check that the top of the bin is not the record we are going to add
 	   (i.e., double free).  */
 	if (__builtin_expect (old == p, 0))
-	  {
-	    errstr = "double free or corruption (fasttop)";
-	    goto errout;
-	  }
+	  malloc_printerr ("double free or corruption (fasttop)");
 	/* Check that size of fastbin chunk at the top is the same as
 	   size of the chunk that we are adding.  We can dereference OLD
 	   only if we have the lock, otherwise it might have already been
@@ -4259,10 +4203,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
     while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
 
     if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
-      {
-	errstr = "invalid fastbin entry (free)";
-	goto errout;
-      }
+      malloc_printerr ("invalid fastbin entry (free)");
   }
 
   /*
@@ -4280,32 +4221,20 @@ _int_free (mstate av, mchunkptr p, int have_lock)
     /* Lightweight tests: check whether the block is already the
        top block.  */
     if (__glibc_unlikely (p == av->top))
-      {
-	errstr = "double free or corruption (top)";
-	goto errout;
-      }
+      malloc_printerr ("double free or corruption (top)");
     /* Or whether the next chunk is beyond the boundaries of the arena.  */
     if (__builtin_expect (contiguous (av)
 			  && (char *) nextchunk
 			  >= ((char *) av->top + chunksize(av->top)), 0))
-      {
-	errstr = "double free or corruption (out)";
-	goto errout;
-      }
+	malloc_printerr ("double free or corruption (out)");
     /* Or whether the block is actually not marked used.  */
     if (__glibc_unlikely (!prev_inuse(nextchunk)))
-      {
-	errstr = "double free or corruption (!prev)";
-	goto errout;
-      }
+      malloc_printerr ("double free or corruption (!prev)");
 
     nextsize = chunksize(nextchunk);
     if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
 	|| __builtin_expect (nextsize >= av->system_mem, 0))
-      {
-	errstr = "free(): invalid next size (normal)";
-	goto errout;
-      }
+      malloc_printerr ("free(): invalid next size (normal)");
 
     free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
 
@@ -4337,10 +4266,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
       bck = unsorted_chunks(av);
       fwd = bck->fd;
       if (__glibc_unlikely (fwd->bk != bck))
-	{
-	  errstr = "free(): corrupted unsorted chunks";
-	  goto errout;
-	}
+	malloc_printerr ("free(): corrupted unsorted chunks");
       p->fd = fwd;
       p->bk = bck;
       if (!in_smallbin_range(size))
@@ -4553,17 +4479,10 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
   INTERNAL_SIZE_T* s;               /* copy source */
   INTERNAL_SIZE_T* d;               /* copy destination */
 
-  const char *errstr = NULL;
-
   /* oldmem size */
   if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
       || __builtin_expect (oldsize >= av->system_mem, 0))
-    {
-      errstr = "realloc(): invalid old size";
-    errout:
-      malloc_printerr (check_action, errstr, chunk2mem (oldp), av);
-      return NULL;
-    }
+    malloc_printerr ("realloc(): invalid old size");
 
   check_inuse_chunk (av, oldp);
 
@@ -4574,10 +4493,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
   INTERNAL_SIZE_T nextsize = chunksize (next);
   if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
       || __builtin_expect (nextsize >= av->system_mem, 0))
-    {
-      errstr = "realloc(): invalid next size";
-      goto errout;
-    }
+    malloc_printerr ("realloc(): invalid next size");
 
   if ((unsigned long) (oldsize) >= (unsigned long) (nb))
     {
@@ -5117,8 +5033,6 @@ static inline int
 __always_inline
 do_set_mallopt_check (int32_t value)
 {
-  LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
-  check_action = value;
   return 1;
 }
 
@@ -5392,14 +5306,8 @@ libc_hidden_def (__libc_mallopt)
 extern char **__libc_argv attribute_hidden;
 
 static void
-malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr)
+malloc_printerr (const char *str)
 {
-  /* Avoid using this arena in future.  We do not attempt to synchronize this
-     with anything else because we minimally want to ensure that __libc_message
-     gets its resources safely without stumbling on the current corruption.  */
-  if (ar_ptr)
-    set_arena_corrupt (ar_ptr);
-
   __libc_message (do_abort, "%s\n", str);
   __builtin_unreachable ();
 }
diff --git a/manual/memory.texi b/manual/memory.texi
index 13cce7a..51a5f4e 100644
--- a/manual/memory.texi
+++ b/manual/memory.texi
@@ -1104,7 +1104,6 @@ When calling @code{mallopt}, the @var{param} argument specifies the
 parameter to be set, and @var{value} the new value to be set.  Possible
 choices for @var{param}, as defined in @file{malloc.h}, are:
 
-@comment TODO: @item M_CHECK_ACTION
 @vtable @code
 @item M_MMAP_MAX
 The maximum number of chunks to allocate with @code{mmap}.  Setting this
diff --git a/manual/probes.texi b/manual/probes.texi
index 96acaed..8ab6756 100644
--- a/manual/probes.texi
+++ b/manual/probes.texi
@@ -195,13 +195,6 @@ this @code{malloc} parameter, and @var{$arg3} is nonzero if dynamic
 threshold adjustment was already disabled.
 @end deftp
 
-@deftp Probe memory_mallopt_check_action (int @var{$arg1}, int @var{$arg2})
-This probe is triggered shortly after the @code{memory_mallopt} probe,
-when the parameter to be changed is @code{M_CHECK_ACTION}.  Argument
-@var{$arg1} is the requested value, and @var{$arg2} is the previous
-value of this @code{malloc} parameter.
-@end deftp
-
 @deftp Probe memory_mallopt_perturb (int @var{$arg1}, int @var{$arg2})
 This probe is triggered shortly after the @code{memory_mallopt} probe,
 when the parameter to be changed is @code{M_PERTURB}.  Argument
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 10/10] Add single-threaded path to _int_malloc
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
                   ` (7 preceding siblings ...)
  2017-01-01  0:00 ` [PATCH 09/10] Add single-threaded path to malloc/realloc/calloc/memalloc Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 04/10] malloc: Change top_check return type to void Siddhesh Poyarekar
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Wilco Dijkstra

From: Wilco Dijkstra <wdijkstr@arm.com>

This patch adds single-threaded fast paths to _int_malloc.

	* malloc/malloc.c (_int_malloc): Add SINGLE_THREAD_P path.

(cherry-picked 905a7725e9157ea522d8ab97b4c8b96aeb23df54)
---
 ChangeLog       |  4 ++++
 malloc/malloc.c | 63 ++++++++++++++++++++++++++++++++++-----------------------
 2 files changed, 42 insertions(+), 25 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 75aa92c..1793816 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,9 @@
 2017-10-23  Wilco Dijkstra  <wdijkstr@arm.com>
 
+	* malloc/malloc.c (_int_malloc): Add SINGLE_THREAD_P path.
+
+2017-10-23  Wilco Dijkstra  <wdijkstr@arm.com>
+
 	* malloc/malloc.c (__libc_malloc): Add SINGLE_THREAD_P path.
 	(__libc_realloc): Likewise.
 	(_mid_memalign): Likewise.
diff --git a/malloc/malloc.c b/malloc/malloc.c
index f8495f3..7783d05 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -3575,37 +3575,50 @@ _int_malloc (mstate av, size_t bytes)
     {
       idx = fastbin_index (nb);
       mfastbinptr *fb = &fastbin (av, idx);
-      mchunkptr pp = *fb;
-      REMOVE_FB (fb, victim, pp);
-      if (victim != 0)
-        {
-          if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
-	    malloc_printerr ("malloc(): memory corruption (fast)");
-          check_remalloced_chunk (av, victim, nb);
-#if USE_TCACHE
-	  /* While we're here, if we see other chunks of the same size,
-	     stash them in the tcache.  */
-	  size_t tc_idx = csize2tidx (nb);
-	  if (tcache && tc_idx < mp_.tcache_bins)
+      mchunkptr pp;
+      victim = *fb;
+
+      if (victim != NULL)
+	{
+	  if (SINGLE_THREAD_P)
+	    *fb = victim->fd;
+	  else
+	    REMOVE_FB (fb, pp, victim);
+	  if (__glibc_likely (victim != NULL))
 	    {
-	      mchunkptr tc_victim;
-
-	      /* While bin not empty and tcache not full, copy chunks over.  */
-	      while (tcache->counts[tc_idx] < mp_.tcache_count
-		     && (pp = *fb) != NULL)
+	      size_t victim_idx = fastbin_index (chunksize (victim));
+	      if (__builtin_expect (victim_idx != idx, 0))
+		malloc_printerr ("malloc(): memory corruption (fast)");
+	      check_remalloced_chunk (av, victim, nb);
+#if USE_TCACHE
+	      /* While we're here, if we see other chunks of the same size,
+		 stash them in the tcache.  */
+	      size_t tc_idx = csize2tidx (nb);
+	      if (tcache && tc_idx < mp_.tcache_bins)
 		{
-		  REMOVE_FB (fb, tc_victim, pp);
-		  if (tc_victim != 0)
+		  mchunkptr tc_victim;
+
+		  /* While bin not empty and tcache not full, copy chunks.  */
+		  while (tcache->counts[tc_idx] < mp_.tcache_count
+			 && (tc_victim = *fb) != NULL)
 		    {
+		      if (SINGLE_THREAD_P)
+			*fb = tc_victim->fd;
+		      else
+			{
+			  REMOVE_FB (fb, pp, tc_victim);
+			  if (__glibc_unlikely (tc_victim == NULL))
+			    break;
+			}
 		      tcache_put (tc_victim, tc_idx);
-	            }
+		    }
 		}
-	    }
 #endif
-          void *p = chunk2mem (victim);
-          alloc_perturb (p, bytes);
-          return p;
-        }
+	      void *p = chunk2mem (victim);
+	      alloc_perturb (p, bytes);
+	      return p;
+	    }
+	}
     }
 
   /*
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 05/10] malloc: Resolve compilation failure in NDEBUG mode
  2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
                   ` (2 preceding siblings ...)
  2017-01-01  0:00 ` [PATCH 01/10] malloc: Abort on heap corruption, without a backtrace " Siddhesh Poyarekar
@ 2017-01-01  0:00 ` Siddhesh Poyarekar
  2017-01-01  0:00 ` [PATCH 03/10] malloc: Remove corrupt arena flag Siddhesh Poyarekar
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Siddhesh Poyarekar @ 2017-01-01  0:00 UTC (permalink / raw)
  To: libc-stable; +Cc: Florian Weimer

From: Florian Weimer <fweimer@redhat.com>

In _int_free, the locked variable is not used if NDEBUG is defined.

(cherry-picked from 24cffce7366c4070d8f823702a4fcec2cb732595)
---
 ChangeLog       |  5 +++++
 malloc/malloc.c | 25 +++++++------------------
 2 files changed, 12 insertions(+), 18 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 519db42..d536c9a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,10 @@
 2017-08-31  Florian Weimer  <fweimer@redhat.com>
 
+	* malloc/malloc.c (_int_free): Remove locked variable and related
+	asserts.
+
+2017-08-31  Florian Weimer  <fweimer@redhat.com>
+
 	* malloc/malloc.c (top_check): Change return type to void.  Remove
 	internal_function.
 	* malloc/hooks.c (top_check): Likewise.
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 417ffbb..3608b34 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4097,8 +4097,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
   mchunkptr bck;               /* misc temp for linking */
   mchunkptr fwd;               /* misc temp for linking */
 
-  int locked = 0;
-
   size = chunksize (p);
 
   /* Little security check which won't hurt performance: the
@@ -4153,19 +4151,14 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 	/* We might not have a lock at this point and concurrent modifications
 	   of system_mem might have let to a false positive.  Redo the test
 	   after getting the lock.  */
-	if (have_lock
-	    || ({ assert (locked == 0);
-		  __libc_lock_lock (av->mutex);
-		  locked = 1;
+	if (!have_lock
+	    || ({ __libc_lock_lock (av->mutex);
 		  chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
-		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
-	      }))
+		  || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
+	        }))
 	  malloc_printerr ("free(): invalid next size (fast)");
 	if (! have_lock)
-	  {
-	    __libc_lock_unlock (av->mutex);
-	    locked = 0;
-	  }
+	  __libc_lock_unlock (av->mutex);
       }
 
     free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
@@ -4202,10 +4195,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
   */
 
   else if (!chunk_is_mmapped(p)) {
-    if (! have_lock) {
+    if (!have_lock)
       __libc_lock_lock (av->mutex);
-      locked = 1;
-    }
 
     nextchunk = chunk_at_offset(p, size);
 
@@ -4319,10 +4310,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
       }
     }
 
-    if (! have_lock) {
-      assert (locked);
+    if (!have_lock)
       __libc_lock_unlock (av->mutex);
-    }
   }
   /*
     If the chunk was allocated via mmap, release via munmap().
-- 
2.7.5

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2017-11-28 14:39 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-01  0:00 [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 08/10] Fix build issue with SINGLE_THREAD_P Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 02/10] malloc: Remove check_action variable [BZ #21754] Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 01/10] malloc: Abort on heap corruption, without a backtrace " Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 05/10] malloc: Resolve compilation failure in NDEBUG mode Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 03/10] malloc: Remove corrupt arena flag Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 07/10] Add single-threaded path to _int_free Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 06/10] Fix deadlock in _int_free consistency check Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 09/10] Add single-threaded path to malloc/realloc/calloc/memalloc Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 10/10] Add single-threaded path to _int_malloc Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 00/10][2.26] Malloc fixes and improvements Siddhesh Poyarekar
2017-01-01  0:00 ` [PATCH 04/10] malloc: Change top_check return type to void Siddhesh Poyarekar

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).