This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH 3/5] Add single-threaded path to _int_free


This patch adds single-threaded fast paths to _int_free.
Move and simplify the consistency checks so that they don't
make the fastbin list update overly complex.  Bypass the
explicit locking for larger allocations.

Passes GLIBC tests, OK for commit?

ChangeLog:
2017-10-11  Wilco Dijkstra  <wdijkstr@arm.com>

	* malloc/malloc.c (_int_free): Add SINGLE_THREAD_P paths.

--

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 13247c64e2be0779050dfbd0fd25f205ba7184f7..c00df205c6004ee5b5d0aee9ffd5130b3c8f9e9f 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -4188,24 +4188,29 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 
     /* Atomically link P to its fastbin: P->FD = *FB; *FB = P;  */
     mchunkptr old = *fb, old2;
-    unsigned int old_idx = ~0u;
-    do
+
+    /* Check that the top of the bin is not the record we are going to
+       add (i.e., double free).  */
+    if (__builtin_expect (old == p, 0))
+      malloc_printerr ("double free or corruption (fasttop)");
+
+    if (SINGLE_THREAD_P)
       {
-	/* Check that the top of the bin is not the record we are going to add
-	   (i.e., double free).  */
-	if (__builtin_expect (old == p, 0))
-	  malloc_printerr ("double free or corruption (fasttop)");
-	/* Check that size of fastbin chunk at the top is the same as
-	   size of the chunk that we are adding.  We can dereference OLD
-	   only if we have the lock, otherwise it might have already been
-	   deallocated.  See use of OLD_IDX below for the actual check.  */
-	if (have_lock && old != NULL)
-	  old_idx = fastbin_index(chunksize(old));
-	p->fd = old2 = old;
+	p->fd = old;
+	*fb = p;
       }
-    while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
-
-    if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
+    else
+      do
+	p->fd = old2 = old;
+      while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
+	     != old2);
+
+    /* Check that size of fastbin chunk at the top is the same as
+       size of the chunk that we are adding.  We can dereference OLD
+       only if we have the lock, otherwise it might have already been
+       allocated again.  */
+    if (have_lock && old != NULL
+	&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
       malloc_printerr ("invalid fastbin entry (free)");
   }
 
@@ -4214,7 +4219,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
   */
 
   else if (!chunk_is_mmapped(p)) {
-    if (!have_lock)
+    if (!SINGLE_THREAD_P && !have_lock)
       __libc_lock_lock (av->mutex);
 
     nextchunk = chunk_at_offset(p, size);
@@ -4329,7 +4334,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
       }
     }
 
-    if (!have_lock)
+    if (!SINGLE_THREAD_P && !have_lock)
       __libc_lock_unlock (av->mutex);
   }
   /*

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]