This is the mail archive of the glibc-cvs@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

GNU C Library master sources branch release/2.22/master updated. glibc-2.22-65-gf95984b


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".

The branch, release/2.22/master has been updated
       via  f95984beb2d3d61c71c14c10cdc5ab8fda321dec (commit)
       via  13a601a0dfb468552c1eebf5c225392260f0bda2 (commit)
       via  eb8c932bac2e6c1273107b8a2721f382575b002a (commit)
       via  00cd4dad175f648783f808aef681d16c10fc34fa (commit)
       via  c252c193e2583e4506b141d052df29a0987ac290 (commit)
      from  f664b663118642490b8776dcf4f30524a646dcbc (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=f95984beb2d3d61c71c14c10cdc5ab8fda321dec

commit f95984beb2d3d61c71c14c10cdc5ab8fda321dec
Author: Florian Weimer <fweimer@redhat.com>
Date:   Wed Apr 13 14:11:42 2016 -0500

    malloc: Update comment for list_lock
    
    (cherry picked from commit 7962541a32eff5597bc4207e781cfac8d1bb0d87)

diff --git a/ChangeLog b/ChangeLog
index d97ce5a..fa02ac9 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,9 @@
 2016-04-13  Florian Weimer  <fweimer@redhat.com>
 
+	* malloc/arena.c (list_lock): Update comment.
+
+2016-04-13  Florian Weimer  <fweimer@redhat.com>
+
 	* malloc/tst-malloc-thread-exit.c: Include test-skeleton.c early.
 	(do_test): Limit the number of arenas, so that we can use fewer
 	outer threads.  Limit timeout to 3 seconds, in preparation for a
diff --git a/malloc/arena.c b/malloc/arena.c
index 463d31d..f03dcb2 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -85,9 +85,10 @@ static mstate free_list;
    _int_new_arena.  This suffers from data races; see the FIXME
    comments in _int_new_arena and reused_arena.
 
-   list_lock also prevents concurrent forks.  When list_lock is
-   acquired, no arena lock must be acquired, but it is permitted to
-   acquire arena locks after list_lock.  */
+   list_lock also prevents concurrent forks.  At the time list_lock is
+   acquired, no arena lock must have been acquired, but it is
+   permitted to acquire arena locks subsequently, while list_lock is
+   acquired.  */
 static mutex_t list_lock = MUTEX_INITIALIZER;
 
 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=13a601a0dfb468552c1eebf5c225392260f0bda2

commit 13a601a0dfb468552c1eebf5c225392260f0bda2
Author: Florian Weimer <fweimer@redhat.com>
Date:   Wed Apr 13 14:11:27 2016 -0500

    tst-malloc-thread-exit: Use fewer system resources
    
    (cherry picked from commit 2a38688932243b5b16fb12d84c7ac1138ce50363)

diff --git a/ChangeLog b/ChangeLog
index e0cb376..d97ce5a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,12 @@
 2016-04-13  Florian Weimer  <fweimer@redhat.com>
 
+	* malloc/tst-malloc-thread-exit.c: Include test-skeleton.c early.
+	(do_test): Limit the number of arenas, so that we can use fewer
+	outer threads.  Limit timeout to 3 seconds, in preparation for a
+	larger TIMEOUT value.
+
+2016-04-13  Florian Weimer  <fweimer@redhat.com>
+
 	[BZ #19182]
 	* malloc/arena.c (list_lock): Document lock ordering requirements.
 	(free_list_lock): New lock.
diff --git a/malloc/tst-malloc-thread-exit.c b/malloc/tst-malloc-thread-exit.c
index da7297e..7bce012 100644
--- a/malloc/tst-malloc-thread-exit.c
+++ b/malloc/tst-malloc-thread-exit.c
@@ -26,13 +26,17 @@
    particularly related to the arena free list.  */
 
 #include <errno.h>
+#include <malloc.h>
 #include <pthread.h>
 #include <stdbool.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
 
-#define TIMEOUT 7
+static int do_test (void);
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"
 
 static bool termination_requested;
 static int inner_thread_count = 4;
@@ -156,20 +160,20 @@ outer_thread (void *closure)
 static int
 do_test (void)
 {
-  /* The number of top-level threads should be equal to the number of
-     arenas.  See arena_get2.  */
-  long outer_thread_count = sysconf (_SC_NPROCESSORS_ONLN);
-  if (outer_thread_count >= 1)
+  /* The number of threads should be smaller than the number of
+     arenas, so that there will be some free arenas to add to the
+     arena free list.  */
+  enum { outer_thread_count = 2 };
+  if (mallopt (M_ARENA_MAX, 8) == 0)
     {
-      /* See NARENAS_FROM_NCORES in malloc.c.  */
-      if (sizeof (long) == 4)
-        outer_thread_count *= 2;
-      else
-        outer_thread_count *= 8;
+      printf ("error: mallopt (M_ARENA_MAX) failed\n");
+      return 1;
     }
 
   /* Leave some room for shutting down all threads gracefully.  */
-  int timeout = TIMEOUT - 2;
+  int timeout = 3;
+  if (timeout > TIMEOUT)
+    timeout = TIMEOUT - 1;
 
   pthread_t *threads = calloc (sizeof (*threads), outer_thread_count);
   if (threads == NULL)
@@ -212,6 +216,3 @@ do_test (void)
 
   return 0;
 }
-
-#define TEST_FUNCTION do_test ()
-#include "../test-skeleton.c"

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=eb8c932bac2e6c1273107b8a2721f382575b002a

commit eb8c932bac2e6c1273107b8a2721f382575b002a
Author: Florian Weimer <fweimer@redhat.com>
Date:   Wed Apr 13 14:10:49 2016 -0500

    malloc: Fix list_lock/arena lock deadlock [BZ #19182]
    
    	* malloc/arena.c (list_lock): Document lock ordering requirements.
    	(free_list_lock): New lock.
    	(ptmalloc_lock_all): Comment on free_list_lock.
    	(ptmalloc_unlock_all2): Reinitialize free_list_lock.
    	(detach_arena): Update comment.  free_list_lock is now needed.
    	(_int_new_arena): Use free_list_lock around detach_arena call.
    	Acquire arena lock after list_lock.  Add comment, including FIXME
    	about incorrect synchronization.
    	(get_free_list): Switch to free_list_lock.
    	(reused_arena): Acquire free_list_lock around detach_arena call
    	and attached threads counter update.  Add two FIXMEs about
    	incorrect synchronization.
    	(arena_thread_freeres): Switch to free_list_lock.
    	* malloc/malloc.c (struct malloc_state): Update comments to
    	mention free_list_lock.
    
    (cherry picked from commit 90c400bd4904b0240a148f0b357a5cbc36179239)

diff --git a/ChangeLog b/ChangeLog
index fde59f1..e0cb376 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,24 @@
 2016-04-13  Florian Weimer  <fweimer@redhat.com>
 
+	[BZ #19182]
+	* malloc/arena.c (list_lock): Document lock ordering requirements.
+	(free_list_lock): New lock.
+	(ptmalloc_lock_all): Comment on free_list_lock.
+	(ptmalloc_unlock_all2): Reinitialize free_list_lock.
+	(detach_arena): Update comment.  free_list_lock is now needed.
+	(_int_new_arena): Use free_list_lock around detach_arena call.
+	Acquire arena lock after list_lock.  Add comment, including FIXME
+	about incorrect synchronization.
+	(get_free_list): Switch to free_list_lock.
+	(reused_arena): Acquire free_list_lock around detach_arena call
+	and attached threads counter update.  Add two FIXMEs about
+	incorrect synchronization.
+	(arena_thread_freeres): Switch to free_list_lock.
+	* malloc/malloc.c (struct malloc_state): Update comments to
+	mention free_list_lock.
+
+2016-04-13  Florian Weimer  <fweimer@redhat.com>
+
 	[BZ #19243]
 	* malloc/arena.c (get_free_list): Remove assert and adjust
 	reference count handling.  Add comment about reused_arena
diff --git a/NEWS b/NEWS
index 21c85ca..7b13178 100644
--- a/NEWS
+++ b/NEWS
@@ -25,7 +25,7 @@ Version 2.22.1
 
   17905, 18420, 18421, 18480, 18589, 18743, 18778, 18781, 18787, 18796,
   18870, 18887, 18921, 18928, 18969, 18985, 19003, 19018, 19048, 19058,
-  19174, 19178, 19243, 19590, 19682, 19791, 19822, 19853, 19879.
+  19174, 19178, 19182, 19243, 19590, 19682, 19791, 19822, 19853, 19879.
 
 * The getnetbyname implementation in nss_dns had a potentially unbounded
   alloca call (in the form of a call to strdupa), leading to a stack
diff --git a/malloc/arena.c b/malloc/arena.c
index f05c502..463d31d 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -67,10 +67,29 @@ extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
 /* Thread specific data */
 
 static tsd_key_t arena_key;
-static mutex_t list_lock = MUTEX_INITIALIZER;
+
+/* Arena free list.  free_list_lock synchronizes access to the
+   free_list variable below, and the next_free and attached_threads
+   members of struct malloc_state objects.  No other locks must be
+   acquired after free_list_lock has been acquired.  */
+
+static mutex_t free_list_lock = MUTEX_INITIALIZER;
 static size_t narenas = 1;
 static mstate free_list;
 
+/* list_lock prevents concurrent writes to the next member of struct
+   malloc_state objects.
+
+   Read access to the next member is supposed to synchronize with the
+   atomic_write_barrier and the write to the next member in
+   _int_new_arena.  This suffers from data races; see the FIXME
+   comments in _int_new_arena and reused_arena.
+
+   list_lock also prevents concurrent forks.  When list_lock is
+   acquired, no arena lock must be acquired, but it is permitted to
+   acquire arena locks after list_lock.  */
+static mutex_t list_lock = MUTEX_INITIALIZER;
+
 /* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
 static unsigned long arena_mem;
 
@@ -210,6 +229,9 @@ ptmalloc_lock_all (void)
   if (__malloc_initialized < 1)
     return;
 
+  /* We do not acquire free_list_lock here because we completely
+     reconstruct free_list in ptmalloc_unlock_all2.  */
+
   if (mutex_trylock (&list_lock))
     {
       void *my_arena;
@@ -291,6 +313,7 @@ ptmalloc_unlock_all2 (void)
 
   /* Push all arenas to the free list, except save_arena, which is
      attached to the current thread.  */
+  mutex_init (&free_list_lock);
   if (save_arena != NULL)
     ((mstate) save_arena)->attached_threads = 1;
   free_list = NULL;
@@ -308,6 +331,7 @@ ptmalloc_unlock_all2 (void)
       if (ar_ptr == &main_arena)
         break;
     }
+
   mutex_init (&list_lock);
   atfork_recursive_cntr = 0;
 }
@@ -735,7 +759,7 @@ heap_trim (heap_info *heap, size_t pad)
 /* Create a new arena with initial size "size".  */
 
 /* If REPLACED_ARENA is not NULL, detach it from this thread.  Must be
-   called while list_lock is held.  */
+   called while free_list_lock is held.  */
 static void
 detach_arena (mstate replaced_arena)
 {
@@ -789,19 +813,34 @@ _int_new_arena (size_t size)
   tsd_getspecific (arena_key, replaced_arena);
   tsd_setspecific (arena_key, (void *) a);
   mutex_init (&a->mutex);
-  (void) mutex_lock (&a->mutex);
 
   (void) mutex_lock (&list_lock);
 
-  detach_arena (replaced_arena);
-
   /* Add the new arena to the global list.  */
   a->next = main_arena.next;
+  /* FIXME: The barrier is an attempt to synchronize with read access
+     in reused_arena, which does not acquire list_lock while
+     traversing the list.  */
   atomic_write_barrier ();
   main_arena.next = a;
 
   (void) mutex_unlock (&list_lock);
 
+  (void) mutex_lock (&free_list_lock);
+  detach_arena (replaced_arena);
+  (void) mutex_unlock (&free_list_lock);
+
+  /* Lock this arena.  NB: Another thread may have been attached to
+     this arena because the arena is now accessible from the
+     main_arena.next list and could have been picked by reused_arena.
+     This can only happen for the last arena created (before the arena
+     limit is reached).  At this point, some arena has to be attached
+     to two threads.  We could acquire the arena lock before list_lock
+     to make it less likely that reused_arena picks this new arena,
+     but this could result in a deadlock with ptmalloc_lock_all.  */
+
+  (void) mutex_lock (&a->mutex);
+
   return a;
 }
 
@@ -818,7 +857,7 @@ get_free_list (void)
 
   if (result != NULL)
     {
-      (void) mutex_lock (&list_lock);
+      (void) mutex_lock (&free_list_lock);
       result = free_list;
       if (result != NULL)
 	{
@@ -829,7 +868,7 @@ get_free_list (void)
 
 	  detach_arena (replaced_arena);
 	}
-      (void) mutex_unlock (&list_lock);
+      (void) mutex_unlock (&free_list_lock);
 
       if (result != NULL)
         {
@@ -849,6 +888,7 @@ static mstate
 reused_arena (mstate avoid_arena)
 {
   mstate result;
+  /* FIXME: Access to next_to_use suffers from data races.  */
   static mstate next_to_use;
   if (next_to_use == NULL)
     next_to_use = &main_arena;
@@ -861,6 +901,7 @@ reused_arena (mstate avoid_arena)
       if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex))
         goto out;
 
+      /* FIXME: This is a data race, see _int_new_arena.  */
       result = result->next;
     }
   while (result != next_to_use);
@@ -895,10 +936,10 @@ out:
     mstate replaced_arena;
 
     tsd_getspecific (arena, replaced_arena);
-    (void) mutex_lock (&list_lock);
+    (void) mutex_lock (&free_list_lock);
     detach_arena (replaced_arena);
     ++result->attached_threads;
-    (void) mutex_unlock (&list_lock);
+    (void) mutex_unlock (&free_list_lock);
   }
 
   LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
@@ -993,7 +1034,7 @@ arena_thread_freeres (void)
 
   if (a != NULL)
     {
-      (void) mutex_lock (&list_lock);
+      (void) mutex_lock (&free_list_lock);
       /* If this was the last attached thread for this arena, put the
 	 arena on the free list.  */
       assert (a->attached_threads > 0);
@@ -1002,7 +1043,7 @@ arena_thread_freeres (void)
 	  a->next_free = free_list;
 	  free_list = a;
 	}
-      (void) mutex_unlock (&list_lock);
+      (void) mutex_unlock (&free_list_lock);
     }
 }
 text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 037d4ff..5c84e62 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1710,12 +1710,12 @@ struct malloc_state
   struct malloc_state *next;
 
   /* Linked list for free arenas.  Access to this field is serialized
-     by list_lock in arena.c.  */
+     by free_list_lock in arena.c.  */
   struct malloc_state *next_free;
 
   /* Number of threads attached to this arena.  0 if the arena is on
-     the free list.  Access to this field is serialized by list_lock
-     in arena.c.  */
+     the free list.  Access to this field is serialized by
+     free_list_lock in arena.c.  */
   INTERNAL_SIZE_T attached_threads;
 
   /* Memory allocated from the system in this arena.  */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=00cd4dad175f648783f808aef681d16c10fc34fa

commit 00cd4dad175f648783f808aef681d16c10fc34fa
Author: Florian Weimer <fweimer@redhat.com>
Date:   Wed Apr 13 14:08:24 2016 -0500

    malloc: Fix attached thread reference count handling [BZ #19243]
    
    reused_arena can increase the attached thread count of arenas on the
    free list.  This means that the assertion that the reference count is
    zero is incorrect.  In this case, the reference count initialization
    is incorrect as well and could cause arenas to be put on the free
    list too early (while they still have attached threads).
    
    	* malloc/arena.c (get_free_list): Remove assert and adjust
    	reference count handling.  Add comment about reused_arena
    	interaction.
    	(reused_arena): Add comments abount get_free_list interaction.
    	* malloc/tst-malloc-thread-exit.c: New file.
    	* malloc/Makefile (tests): Add tst-malloc-thread-exit.
    	(tst-malloc-thread-exit): Link against libpthread.
    
    (cherry picked from commit 3da825ce483903e3a881a016113b3e59fd4041de)

diff --git a/ChangeLog b/ChangeLog
index 7032239..fde59f1 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,16 @@
 2016-04-13  Florian Weimer  <fweimer@redhat.com>
 
+	[BZ #19243]
+	* malloc/arena.c (get_free_list): Remove assert and adjust
+	reference count handling.  Add comment about reused_arena
+	interaction.
+	(reused_arena): Add comments abount get_free_list interaction.
+	* malloc/tst-malloc-thread-exit.c: New file.
+	* malloc/Makefile (tests): Add tst-malloc-thread-exit.
+	(tst-malloc-thread-exit): Link against libpthread.
+
+2016-04-13  Florian Weimer  <fweimer@redhat.com>
+
 	[BZ# 19048]
 	* malloc/malloc.c (struct malloc_state): Update comment.  Add
 	attached_threads member.
diff --git a/NEWS b/NEWS
index 7d6a7d6..21c85ca 100644
--- a/NEWS
+++ b/NEWS
@@ -25,7 +25,7 @@ Version 2.22.1
 
   17905, 18420, 18421, 18480, 18589, 18743, 18778, 18781, 18787, 18796,
   18870, 18887, 18921, 18928, 18969, 18985, 19003, 19018, 19048, 19058,
-  19174, 19178, 19590, 19682, 19791, 19822, 19853, 19879.
+  19174, 19178, 19243, 19590, 19682, 19791, 19822, 19853, 19879.
 
 * The getnetbyname implementation in nss_dns had a potentially unbounded
   alloca call (in the form of a call to strdupa), leading to a stack
diff --git a/malloc/Makefile b/malloc/Makefile
index 67ed293..aa0579c 100644
--- a/malloc/Makefile
+++ b/malloc/Makefile
@@ -28,7 +28,7 @@ tests := mallocbug tst-malloc tst-valloc tst-calloc tst-obstack \
 	 tst-mallocstate tst-mcheck tst-mallocfork tst-trim1 \
 	 tst-malloc-usable tst-realloc tst-posix_memalign \
 	 tst-pvalloc tst-memalign tst-mallopt tst-scratch_buffer \
-	 tst-malloc-backtrace
+	 tst-malloc-backtrace tst-malloc-thread-exit
 test-srcs = tst-mtrace
 
 routines = malloc morecore mcheck mtrace obstack \
@@ -47,6 +47,8 @@ libmemusage-inhibit-o = $(filter-out .os,$(object-suffixes))
 
 $(objpfx)tst-malloc-backtrace: $(common-objpfx)nptl/libpthread.so \
 			       $(common-objpfx)nptl/libpthread_nonshared.a
+$(objpfx)tst-malloc-thread-exit: $(common-objpfx)nptl/libpthread.so \
+			       $(common-objpfx)nptl/libpthread_nonshared.a
 
 # These should be removed by `make clean'.
 extra-objs = mcheck-init.o libmcheck.a
diff --git a/malloc/arena.c b/malloc/arena.c
index ca5b8a2..f05c502 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -806,6 +806,8 @@ _int_new_arena (size_t size)
 }
 
 
+/* Remove an arena from free_list.  The arena may be in use because it
+   was attached concurrently to a thread by reused_arena below.  */
 static mstate
 get_free_list (void)
 {
@@ -822,10 +824,8 @@ get_free_list (void)
 	{
 	  free_list = result->next_free;
 
-	  /* Arenas on the free list are not attached to any thread.  */
-	  assert (result->attached_threads == 0);
-	  /* But the arena will now be attached to this thread.  */
-	  result->attached_threads = 1;
+	  /* The arena will be attached to this thread.  */
+	  ++result->attached_threads;
 
 	  detach_arena (replaced_arena);
 	}
@@ -853,6 +853,8 @@ reused_arena (mstate avoid_arena)
   if (next_to_use == NULL)
     next_to_use = &main_arena;
 
+  /* Iterate over all arenas (including those linked from
+     free_list).  */
   result = next_to_use;
   do
     {
@@ -887,6 +889,8 @@ reused_arena (mstate avoid_arena)
   (void) mutex_lock (&result->mutex);
 
 out:
+  /* Attach the arena to the current thread.  Note that we may have
+     selected an arena which was on free_list.  */
   {
     mstate replaced_arena;
 
diff --git a/malloc/tst-malloc-thread-exit.c b/malloc/tst-malloc-thread-exit.c
new file mode 100644
index 0000000..da7297e
--- /dev/null
+++ b/malloc/tst-malloc-thread-exit.c
@@ -0,0 +1,217 @@
+/* Test malloc with concurrent thread termination.
+   Copyright (C) 2015 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+/* This thread spawns a number of outer threads, equal to the arena
+   limit.  The outer threads run a loop which start and join two
+   different kinds of threads: the first kind allocates (attaching an
+   arena to the thread; malloc_first_thread) and waits, the second
+   kind waits and allocates (wait_first_threads).  Both kinds of
+   threads exit immediately after waiting.  The hope is that this will
+   exhibit races in thread termination and arena management,
+   particularly related to the arena free list.  */
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#define TIMEOUT 7
+
+static bool termination_requested;
+static int inner_thread_count = 4;
+static size_t malloc_size = 32;
+
+static void
+__attribute__ ((noinline, noclone))
+unoptimized_free (void *ptr)
+{
+  free (ptr);
+}
+
+static void *
+malloc_first_thread (void * closure)
+{
+  pthread_barrier_t *barrier = closure;
+  void *ptr = malloc (malloc_size);
+  if (ptr == NULL)
+    {
+      printf ("error: malloc: %m\n");
+      abort ();
+    }
+  int ret = pthread_barrier_wait (barrier);
+  if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD)
+    {
+      errno = ret;
+      printf ("error: pthread_barrier_wait: %m\n");
+      abort ();
+    }
+  unoptimized_free (ptr);
+  return NULL;
+}
+
+static void *
+wait_first_thread (void * closure)
+{
+  pthread_barrier_t *barrier = closure;
+  int ret = pthread_barrier_wait (barrier);
+  if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD)
+    {
+      errno = ret;
+      printf ("error: pthread_barrier_wait: %m\n");
+      abort ();
+    }
+  void *ptr = malloc (malloc_size);
+  if (ptr == NULL)
+    {
+      printf ("error: malloc: %m\n");
+      abort ();
+    }
+  unoptimized_free (ptr);
+  return NULL;
+}
+
+static void *
+outer_thread (void *closure)
+{
+  pthread_t *threads = calloc (sizeof (*threads), inner_thread_count);
+  if (threads == NULL)
+    {
+      printf ("error: calloc: %m\n");
+      abort ();
+    }
+
+  while (!__atomic_load_n (&termination_requested, __ATOMIC_RELAXED))
+    {
+      pthread_barrier_t barrier;
+      int ret = pthread_barrier_init (&barrier, NULL, inner_thread_count + 1);
+      if (ret != 0)
+        {
+          errno = ret;
+          printf ("pthread_barrier_init: %m\n");
+          abort ();
+        }
+      for (int i = 0; i < inner_thread_count; ++i)
+        {
+          void *(*func) (void *);
+          if ((i  % 2) == 0)
+            func = malloc_first_thread;
+          else
+            func = wait_first_thread;
+          ret = pthread_create (threads + i, NULL, func, &barrier);
+          if (ret != 0)
+            {
+              errno = ret;
+              printf ("error: pthread_create: %m\n");
+              abort ();
+            }
+        }
+      ret = pthread_barrier_wait (&barrier);
+      if (ret != 0 && ret != PTHREAD_BARRIER_SERIAL_THREAD)
+        {
+          errno = ret;
+          printf ("pthread_wait: %m\n");
+          abort ();
+        }
+      for (int i = 0; i < inner_thread_count; ++i)
+        {
+          ret = pthread_join (threads[i], NULL);
+          if (ret != 0)
+            {
+              ret = errno;
+              printf ("error: pthread_join: %m\n");
+              abort ();
+            }
+        }
+      ret = pthread_barrier_destroy (&barrier);
+      if (ret != 0)
+        {
+          ret = errno;
+          printf ("pthread_barrier_destroy: %m\n");
+          abort ();
+        }
+    }
+
+  free (threads);
+
+  return NULL;
+}
+
+static int
+do_test (void)
+{
+  /* The number of top-level threads should be equal to the number of
+     arenas.  See arena_get2.  */
+  long outer_thread_count = sysconf (_SC_NPROCESSORS_ONLN);
+  if (outer_thread_count >= 1)
+    {
+      /* See NARENAS_FROM_NCORES in malloc.c.  */
+      if (sizeof (long) == 4)
+        outer_thread_count *= 2;
+      else
+        outer_thread_count *= 8;
+    }
+
+  /* Leave some room for shutting down all threads gracefully.  */
+  int timeout = TIMEOUT - 2;
+
+  pthread_t *threads = calloc (sizeof (*threads), outer_thread_count);
+  if (threads == NULL)
+    {
+      printf ("error: calloc: %m\n");
+      abort ();
+    }
+
+  for (long i = 0; i < outer_thread_count; ++i)
+    {
+      int ret = pthread_create (threads + i, NULL, outer_thread, NULL);
+      if (ret != 0)
+        {
+          errno = ret;
+          printf ("error: pthread_create: %m\n");
+          abort ();
+        }
+    }
+
+  struct timespec ts = {timeout, 0};
+  if (nanosleep (&ts, NULL))
+    {
+      printf ("error: error: nanosleep: %m\n");
+      abort ();
+    }
+
+  __atomic_store_n (&termination_requested, true, __ATOMIC_RELAXED);
+
+  for (long i = 0; i < outer_thread_count; ++i)
+    {
+      int ret = pthread_join (threads[i], NULL);
+      if (ret != 0)
+        {
+          errno = ret;
+          printf ("error: pthread_join: %m\n");
+          abort ();
+        }
+    }
+  free (threads);
+
+  return 0;
+}
+
+#define TEST_FUNCTION do_test ()
+#include "../test-skeleton.c"

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=c252c193e2583e4506b141d052df29a0987ac290

commit c252c193e2583e4506b141d052df29a0987ac290
Author: Florian Weimer <fweimer@redhat.com>
Date:   Wed Apr 13 14:07:58 2016 -0500

    malloc: Prevent arena free_list from turning cyclic [BZ #19048]
    
    	[BZ# 19048]
    	* malloc/malloc.c (struct malloc_state): Update comment.  Add
    	attached_threads member.
    	(main_arena): Initialize attached_threads.
    	* malloc/arena.c (list_lock): Update comment.
    	(ptmalloc_lock_all, ptmalloc_unlock_all): Likewise.
    	(ptmalloc_unlock_all2): Reinitialize arena reference counts.
    	(deattach_arena): New function.
    	(_int_new_arena): Initialize arena reference count and deattach
    	replaced arena.
    	(get_free_list, reused_arena): Update reference count and deattach
    	replaced arena.
    	(arena_thread_freeres): Update arena reference count and only put
    	unreferenced arenas on the free list.
    
    (cherry picked from commit a62719ba90e2fa1728890ae7dc8df9e32a622e7b)

diff --git a/ChangeLog b/ChangeLog
index f7664af..7032239 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,20 @@
+2016-04-13  Florian Weimer  <fweimer@redhat.com>
+
+	[BZ# 19048]
+	* malloc/malloc.c (struct malloc_state): Update comment.  Add
+	attached_threads member.
+	(main_arena): Initialize attached_threads.
+	* malloc/arena.c (list_lock): Update comment.
+	(ptmalloc_lock_all, ptmalloc_unlock_all): Likewise.
+	(ptmalloc_unlock_all2): Reinitialize arena reference counts.
+	(deattach_arena): New function.
+	(_int_new_arena): Initialize arena reference count and deattach
+	replaced arena.
+	(get_free_list, reused_arena): Update reference count and deattach
+	replaced arena.
+	(arena_thread_freeres): Update arena reference count and only put
+	unreferenced arenas on the free list.
+
 2016-04-12  Paul E. Murphy  <murphyp@linux.vnet.ibm.com>
 
 	[BZ #19853]
diff --git a/NEWS b/NEWS
index 2a43aec..7d6a7d6 100644
--- a/NEWS
+++ b/NEWS
@@ -24,8 +24,8 @@ Version 2.22.1
 * The following bugs are resolved with this release:
 
   17905, 18420, 18421, 18480, 18589, 18743, 18778, 18781, 18787, 18796,
-  18870, 18887, 18921, 18928, 18969, 18985, 19003, 19018, 19058, 19174,
-  19178, 19590, 19682, 19791, 19822, 19853, 19879.
+  18870, 18887, 18921, 18928, 18969, 18985, 19003, 19018, 19048, 19058,
+  19174, 19178, 19590, 19682, 19791, 19822, 19853, 19879.
 
 * The getnetbyname implementation in nss_dns had a potentially unbounded
   alloca call (in the form of a call to strdupa), leading to a stack
@@ -34,6 +34,15 @@ Version 2.22.1
 
 * The LD_POINTER_GUARD environment variable can no longer be used to
   disable the pointer guard feature.  It is always enabled.
+
+* A defect in the malloc implementation, present since glibc 2.15 (2012) or
+  glibc 2.10 via --enable-experimental-malloc (2009), could result in the
+  unnecessary serialization of memory allocation requests across threads.
+  The defect is now corrected.  Users should see a substantial increase in
+  the concurent throughput of allocation requests for applications which
+  trigger this bug.  Affected applications typically create create and
+  destroy threads frequently.  (Bug 19048 was reported and analyzed by
+  Ericsson.)
 
 Version 2.22
 
diff --git a/malloc/arena.c b/malloc/arena.c
index 21ecc5a..ca5b8a2 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -233,7 +233,10 @@ ptmalloc_lock_all (void)
   save_free_hook = __free_hook;
   __malloc_hook = malloc_atfork;
   __free_hook = free_atfork;
-  /* Only the current thread may perform malloc/free calls now. */
+  /* Only the current thread may perform malloc/free calls now.
+     save_arena will be reattached to the current thread, in
+     ptmalloc_lock_all, so save_arena->attached_threads is not
+     updated.  */
   tsd_getspecific (arena_key, save_arena);
   tsd_setspecific (arena_key, ATFORK_ARENA_PTR);
 out:
@@ -251,6 +254,9 @@ ptmalloc_unlock_all (void)
   if (--atfork_recursive_cntr != 0)
     return;
 
+  /* Replace ATFORK_ARENA_PTR with save_arena.
+     save_arena->attached_threads was not changed in ptmalloc_lock_all
+     and is still correct.  */
   tsd_setspecific (arena_key, save_arena);
   __malloc_hook = save_malloc_hook;
   __free_hook = save_free_hook;
@@ -282,12 +288,19 @@ ptmalloc_unlock_all2 (void)
   tsd_setspecific (arena_key, save_arena);
   __malloc_hook = save_malloc_hook;
   __free_hook = save_free_hook;
+
+  /* Push all arenas to the free list, except save_arena, which is
+     attached to the current thread.  */
+  if (save_arena != NULL)
+    ((mstate) save_arena)->attached_threads = 1;
   free_list = NULL;
   for (ar_ptr = &main_arena;; )
     {
       mutex_init (&ar_ptr->mutex);
       if (ar_ptr != save_arena)
         {
+	  /* This arena is no longer attached to any thread.  */
+	  ar_ptr->attached_threads = 0;
           ar_ptr->next_free = free_list;
           free_list = ar_ptr;
         }
@@ -721,6 +734,22 @@ heap_trim (heap_info *heap, size_t pad)
 
 /* Create a new arena with initial size "size".  */
 
+/* If REPLACED_ARENA is not NULL, detach it from this thread.  Must be
+   called while list_lock is held.  */
+static void
+detach_arena (mstate replaced_arena)
+{
+  if (replaced_arena != NULL)
+    {
+      assert (replaced_arena->attached_threads > 0);
+      /* The current implementation only detaches from main_arena in
+	 case of allocation failure.  This means that it is likely not
+	 beneficial to put the arena on free_list even if the
+	 reference count reaches zero.  */
+      --replaced_arena->attached_threads;
+    }
+}
+
 static mstate
 _int_new_arena (size_t size)
 {
@@ -742,6 +771,7 @@ _int_new_arena (size_t size)
     }
   a = h->ar_ptr = (mstate) (h + 1);
   malloc_init_state (a);
+  a->attached_threads = 1;
   /*a->next = NULL;*/
   a->system_mem = a->max_system_mem = h->size;
   arena_mem += h->size;
@@ -755,12 +785,16 @@ _int_new_arena (size_t size)
   set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);
 
   LIBC_PROBE (memory_arena_new, 2, a, size);
+  mstate replaced_arena;
+  tsd_getspecific (arena_key, replaced_arena);
   tsd_setspecific (arena_key, (void *) a);
   mutex_init (&a->mutex);
   (void) mutex_lock (&a->mutex);
 
   (void) mutex_lock (&list_lock);
 
+  detach_arena (replaced_arena);
+
   /* Add the new arena to the global list.  */
   a->next = main_arena.next;
   atomic_write_barrier ();
@@ -775,13 +809,26 @@ _int_new_arena (size_t size)
 static mstate
 get_free_list (void)
 {
+  mstate replaced_arena;
   mstate result = free_list;
+
+  tsd_getspecific (arena, replaced_arena);
+
   if (result != NULL)
     {
       (void) mutex_lock (&list_lock);
       result = free_list;
       if (result != NULL)
-        free_list = result->next_free;
+	{
+	  free_list = result->next_free;
+
+	  /* Arenas on the free list are not attached to any thread.  */
+	  assert (result->attached_threads == 0);
+	  /* But the arena will now be attached to this thread.  */
+	  result->attached_threads = 1;
+
+	  detach_arena (replaced_arena);
+	}
       (void) mutex_unlock (&list_lock);
 
       if (result != NULL)
@@ -840,6 +887,16 @@ reused_arena (mstate avoid_arena)
   (void) mutex_lock (&result->mutex);
 
 out:
+  {
+    mstate replaced_arena;
+
+    tsd_getspecific (arena, replaced_arena);
+    (void) mutex_lock (&list_lock);
+    detach_arena (replaced_arena);
+    ++result->attached_threads;
+    (void) mutex_unlock (&list_lock);
+  }
+
   LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena);
   tsd_setspecific (arena_key, (void *) result);
   next_to_use = result->next;
@@ -933,8 +990,14 @@ arena_thread_freeres (void)
   if (a != NULL)
     {
       (void) mutex_lock (&list_lock);
-      a->next_free = free_list;
-      free_list = a;
+      /* If this was the last attached thread for this arena, put the
+	 arena on the free list.  */
+      assert (a->attached_threads > 0);
+      if (--a->attached_threads == 0)
+	{
+	  a->next_free = free_list;
+	  free_list = a;
+	}
       (void) mutex_unlock (&list_lock);
     }
 }
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 452f036..037d4ff 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1709,9 +1709,15 @@ struct malloc_state
   /* Linked list */
   struct malloc_state *next;
 
-  /* Linked list for free arenas.  */
+  /* Linked list for free arenas.  Access to this field is serialized
+     by list_lock in arena.c.  */
   struct malloc_state *next_free;
 
+  /* Number of threads attached to this arena.  0 if the arena is on
+     the free list.  Access to this field is serialized by list_lock
+     in arena.c.  */
+  INTERNAL_SIZE_T attached_threads;
+
   /* Memory allocated from the system in this arena.  */
   INTERNAL_SIZE_T system_mem;
   INTERNAL_SIZE_T max_system_mem;
@@ -1755,7 +1761,8 @@ struct malloc_par
 static struct malloc_state main_arena =
 {
   .mutex = MUTEX_INITIALIZER,
-  .next = &main_arena
+  .next = &main_arena,
+  .attached_threads = 1
 };
 
 /* There is only one instance of the malloc parameters.  */

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                       |   58 ++++++++++
 NEWS                            |   13 ++-
 malloc/Makefile                 |    4 +-
 malloc/arena.c                  |  129 +++++++++++++++++++++--
 malloc/malloc.c                 |   11 ++-
 malloc/tst-malloc-thread-exit.c |  218 +++++++++++++++++++++++++++++++++++++++
 6 files changed, 418 insertions(+), 15 deletions(-)
 create mode 100644 malloc/tst-malloc-thread-exit.c


hooks/post-receive
-- 
GNU C Library master sources


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]