This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
Re: [RFC 2/*] Remove atomic condition from fastbin.
- From: OndÅej BÃlka <neleai at seznam dot cz>
- To: libc-alpha at sourceware dot org
- Date: Wed, 26 Feb 2014 22:28:44 +0100
- Subject: Re: [RFC 2/*] Remove atomic condition from fastbin.
- Authentication-results: sourceware.org; auth=none
- References: <20140226204908 dot GA25189 at domone dot podge>
On Wed, Feb 26, 2014 at 09:49:08PM +0100, OndÅej BÃlka wrote:
> This as consequences makes lot of int_free code dead which could be
> removed. Then fastbin could use ordinary operations instead atomic ones.
>
After previous change fastbins will be accessed only when holding lock
so atomic operations are not necessary, also we do not need to handle
!have_lock conditions in free.
Then we could convert these to per-thread cache that will be
periodically refilled from arena which will come as subsequent patch.
* malloc/malloc.c (_int_malloc): Replace atomic fastbin
operation with normal.
(_int_free): Likewise.
(_int_free): Remove dead code.
diff --git a/malloc/malloc.c b/malloc/malloc.c
index eea4ca6..516120f 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -3250,15 +3250,10 @@ _int_malloc (mstate av, size_t bytes)
{
idx = fastbin_index (nb);
mfastbinptr *fb = &fastbin (av, idx);
- mchunkptr pp = *fb;
- do
- {
- victim = pp;
- if (victim == NULL)
- break;
- }
- while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
- != victim);
+ victim = *fb;
+ if (victim)
+ *fb = victim->fd;
+
if (victim != 0)
{
if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
@@ -3760,8 +3755,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
return;
}
-
-
/* Little security check which won't hurt performance: the
allocator never wrapps around at the end of the address space.
Therefore we can exclude some size values which might appear
@@ -3771,8 +3764,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
{
errstr = "free(): invalid pointer";
errout:
- if (!have_lock && locked)
- (void) mutex_unlock (&av->mutex);
malloc_printerr (check_action, errstr, chunk2mem (p));
return;
}
@@ -3806,25 +3797,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
>= av->system_mem, 0))
{
- /* We might not have a lock at this point and concurrent modifications
- of system_mem might have let to a false positive. Redo the test
- after getting the lock. */
- if (have_lock
- || ({ assert (locked == 0);
- mutex_lock(&av->mutex);
- locked = 1;
- chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
- || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
- }))
- {
- errstr = "free(): invalid next size (fast)";
- goto errout;
- }
- if (! have_lock)
- {
- (void)mutex_unlock(&av->mutex);
- locked = 0;
- }
+ errstr = "free(): invalid next size (fast)";
+ goto errout;
}
free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
@@ -3833,33 +3807,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
unsigned int idx = fastbin_index(size);
fb = &fastbin (av, idx);
- /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
- mchunkptr old = *fb, old2;
- unsigned int old_idx = ~0u;
- do
- {
- /* Check that the top of the bin is not the record we are going to add
- (i.e., double free). */
- if (__builtin_expect (old == p, 0))
- {
- errstr = "double free or corruption (fasttop)";
- goto errout;
- }
- /* Check that size of fastbin chunk at the top is the same as
- size of the chunk that we are adding. We can dereference OLD
- only if we have the lock, otherwise it might have already been
- deallocated. See use of OLD_IDX below for the actual check. */
- if (have_lock && old != NULL)
- old_idx = fastbin_index(chunksize(old));
- p->fd = old2 = old;
- }
- while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
-
- if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
- {
- errstr = "invalid fastbin entry (free)";
- goto errout;
- }
+ /* Link P to its fastbin. */
+ p->fd = *fb; *fb = p;
}
/*
@@ -3867,10 +3816,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
*/
else if (!chunk_is_mmapped(p)) {
- if (! have_lock) {
- (void)mutex_lock(&av->mutex);
- locked = 1;
- }
nextchunk = chunk_at_offset(p, size);
@@ -3998,11 +3943,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
heap_trim(heap, mp_.top_pad);
}
}
-
- if (! have_lock) {
- assert (locked);
- (void)mutex_unlock(&av->mutex);
- }
}
/*
If the chunk was allocated via mmap, release via munmap().