This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH] malloc: always free objects locklessly
- From: Joern Engel <joern at purestorage dot com>
- To: "GNU C. Library" <libc-alpha at sourceware dot org>
- Cc: Siddhesh Poyarekar <siddhesh dot poyarekar at gmail dot com>, Joern Engel <joern at purestorage dot com>
- Date: Mon, 25 Jan 2016 16:25:15 -0800
- Subject: [PATCH] malloc: always free objects locklessly
- Authentication-results: sourceware.org; auth=none
- References: <1453767942-19369-1-git-send-email-joern at purestorage dot com>
Tcache_free has become lockless, but only covered objects small enough
for caching. Have it handle larger objects as well, spreading the
lockless goodness a bit further.
JIRA: PURE-27597
---
tpc/malloc2.13/malloc.c | 11 +----------
tpc/malloc2.13/tcache.h | 32 ++++++++++++++++++++++++++------
2 files changed, 27 insertions(+), 16 deletions(-)
diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c
index 90d0e7e552b9..022b9a4ce712 100644
--- a/tpc/malloc2.13/malloc.c
+++ b/tpc/malloc2.13/malloc.c
@@ -3293,7 +3293,6 @@ out:
void public_fREe(Void_t * mem)
{
- struct malloc_state *ar_ptr;
mchunkptr p; /* chunk corresponding to mem */
void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = force_reg(dlfree_hook);
@@ -3312,15 +3311,7 @@ void public_fREe(Void_t * mem)
return;
}
- if (tcache_free(p)) {
- /* Object could be freed on fast path */
- return;
- }
-
- ar_ptr = arena_for_chunk(p);
- arena_lock(ar_ptr);
- _int_free(ar_ptr, p);
- arena_unlock(ar_ptr);
+ tcache_free(p);
}
Void_t*
diff --git a/tpc/malloc2.13/tcache.h b/tpc/malloc2.13/tcache.h
index 55bf3862af91..7ebbc139a6ca 100644
--- a/tpc/malloc2.13/tcache.h
+++ b/tpc/malloc2.13/tcache.h
@@ -108,6 +108,13 @@ static void free_atomic_list(struct malloc_state *arena)
{
struct malloc_chunk *victim, *next;
+ /*
+ * Check without using atomic first - if we lose the race we will
+ * free things next time around.
+ */
+ if (!arena->atomic_free_list)
+ return;
+
victim = __sync_lock_test_and_set(&arena->atomic_free_list, NULL);
while (victim) {
next = victim->fd;
@@ -290,19 +297,20 @@ static void *tcache_malloc(size_t size)
/*
* returns 1 if object was freed
*/
-static int tcache_free(mchunkptr p)
+static void tcache_free(mchunkptr p)
{
struct thread_cache *cache;
+ struct malloc_state *arena;
struct malloc_chunk **bin;
size_t size;
int bin_no;
tsd_getspecific(cache_key, cache);
if (!cache)
- return 0;
+ goto uncached_free;
size = chunksize(p);
if (size > MAX_CACHED_SIZE)
- return 0;
+ goto uncached_free;
bin_no = cache_bin(size);
assert(bin_no < CACHE_NO_BINS);
@@ -311,15 +319,27 @@ static int tcache_free(mchunkptr p)
bin = &cache->tc_bin[bin_no];
if (*bin == p) {
malloc_printerr(check_action, "double free or corruption (tcache)", chunk2mem(p));
- return 0;
+ return;
}
if (*bin && cache_bin(chunksize(*bin)) != bin_no) {
malloc_printerr(check_action, "invalid tcache entry", chunk2mem(p));
- return 0;
+ return;
}
p->fd = *bin;
*bin = p;
if (cache->tc_size > CACHE_SIZE)
tcache_gc(cache);
- return 1;
+ return;
+
+ uncached_free:
+ arena = arena_for_chunk(p);
+ if(!mutex_trylock(&arena->mutex)) {
+ _int_free(arena, p);
+ free_atomic_list(arena);
+ arena_unlock(arena);
+ } else {
+ do {
+ p->fd = arena->atomic_free_list;
+ } while (!__sync_bool_compare_and_swap(&arena->atomic_free_list, p->fd, p));
+ }
}
--
2.7.0.rc3