This is the mail archive of the
libc-alpha@sourceware.org
mailing list for the glibc project.
[PATCH] malloc: rename *.ch to *.h
- From: Joern Engel <joern at purestorage dot com>
- To: "GNU C. Library" <libc-alpha at sourceware dot org>
- Cc: Siddhesh Poyarekar <siddhesh dot poyarekar at gmail dot com>, Joern Engel <joern at purestorage dot org>
- Date: Mon, 25 Jan 2016 16:24:46 -0800
- Subject: [PATCH] malloc: rename *.ch to *.h
- Authentication-results: sourceware.org; auth=none
- References: <1453767942-19369-1-git-send-email-joern at purestorage dot com>
From: Joern Engel <joern@purestorage.org>
Editors will switch to c mode when edition *.c or *.h files, but not
when editing *.ch files. Avoids constant annoyances.
JIRA: PURE-27597
---
tpc/malloc2.13/arena.ch | 1092 -----------------------------------------------
tpc/malloc2.13/arena.h | 1092 +++++++++++++++++++++++++++++++++++++++++++++++
tpc/malloc2.13/hooks.ch | 643 ----------------------------
tpc/malloc2.13/hooks.h | 643 ++++++++++++++++++++++++++++
tpc/malloc2.13/malloc.c | 4 +-
5 files changed, 1737 insertions(+), 1737 deletions(-)
delete mode 100644 tpc/malloc2.13/arena.ch
create mode 100644 tpc/malloc2.13/arena.h
delete mode 100644 tpc/malloc2.13/hooks.ch
create mode 100644 tpc/malloc2.13/hooks.h
diff --git a/tpc/malloc2.13/arena.ch b/tpc/malloc2.13/arena.ch
deleted file mode 100644
index 0aaccb914d92..000000000000
--- a/tpc/malloc2.13/arena.ch
+++ /dev/null
@@ -1,1092 +0,0 @@
-/* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 2001,2002,2003,2004,2005,2006,2007,2009,2010
- Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of the
- License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; see the file COPYING.LIB. If not,
- write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
-
-#include <stdbool.h>
-
-/* Compile-time constants. */
-
-#define HEAP_MIN_SIZE (32*1024)
-#ifndef HEAP_MAX_SIZE
-# ifdef DEFAULT_MMAP_THRESHOLD_MAX
-# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
-# else
-# define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
-# endif
-#endif
-
-/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
- that are dynamically created for multi-threaded programs. The
- maximum size must be a power of two, for fast determination of
- which heap belongs to a chunk. It should be much larger than the
- mmap threshold, so that requests with a size just below that
- threshold can be fulfilled without creating too many heaps. */
-
-
-#ifndef THREAD_STATS
-#define THREAD_STATS 0
-#endif
-
-/* If THREAD_STATS is non-zero, some statistics on mutex locking are
- computed. */
-
-/***************************************************************************/
-
-#define top(ar_ptr) ((ar_ptr)->top)
-
-/* A heap is a single contiguous memory region holding (coalesceable)
- malloc_chunks. It is allocated with mmap() and always starts at an
- address aligned to HEAP_MAX_SIZE. Not used unless compiling with
- USE_ARENAS. */
-
-typedef struct _heap_info {
- struct malloc_state * ar_ptr; /* Arena for this heap. */
- struct _heap_info *prev; /* Previous heap. */
- size_t size; /* Current size in bytes. */
- size_t mprotect_size; /* Size in bytes that has been mprotected
- PROT_READ|PROT_WRITE. */
- /* Make sure the following data is properly aligned, particularly
- that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
- MALLOC_ALIGNMENT. */
- char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
-} heap_info;
-
-/* Get a compile-time error if the heap_info padding is not correct
- to make alignment work as expected in sYSMALLOc. */
-extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
- + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
- ? -1 : 1];
-
-/* Thread specific data */
-
-static tsd_key_t arena_key;
-static mutex_t list_lock;
-#ifdef PER_THREAD
-static size_t narenas;
-static struct malloc_state * free_list;
-#endif
-
-#if THREAD_STATS
-static int stat_n_heaps;
-#define THREAD_STAT(x) x
-#else
-#define THREAD_STAT(x) do ; while(0)
-#endif
-
-/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
-static unsigned long arena_mem;
-
-/* Already initialized? */
-static int __malloc_initialized = -1;
-
-/**************************************************************************/
-
-#if USE_ARENAS
-
-/* arena_get() acquires an arena and locks the corresponding mutex.
- First, try the one last locked successfully by this thread. (This
- is the common case and handled with a macro for speed.) Then, loop
- once over the circularly linked list of arenas. If no arena is
- readily available, create a new one. In this latter case, `size'
- is just a hint as to how much memory will be required immediately
- in the new arena. */
-
-#define arena_get(ptr, size) do { \
- arena_lookup(ptr); \
- arena_lock(ptr, size); \
-} while(0)
-
-#define arena_lookup(ptr) do { \
- Void_t *vptr = NULL; \
- ptr = (struct malloc_state *)tsd_getspecific(arena_key, vptr); \
-} while(0)
-
-#ifdef PER_THREAD
-#define arena_lock(ptr, size) do { \
- if(ptr) \
- (void)mutex_lock(&ptr->mutex); \
- else \
- ptr = arena_get2(ptr, (size)); \
-} while(0)
-#else
-#define arena_lock(ptr, size) do { \
- if(ptr && !mutex_trylock(&ptr->mutex)) { \
- THREAD_STAT(++(ptr->stat_lock_direct)); \
- } else \
- ptr = arena_get2(ptr, (size)); \
-} while(0)
-#endif
-
-/* find the heap and corresponding arena for a given ptr */
-
-#define heap_for_ptr(ptr) \
- ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
-#define arena_for_chunk(ptr) \
- (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
-
-#else /* !USE_ARENAS */
-
-/* There is only one arena, main_arena. */
-
-#if THREAD_STATS
-#define arena_get(ar_ptr, sz) do { \
- ar_ptr = &main_arena; \
- if(!mutex_trylock(&ar_ptr->mutex)) \
- ++(ar_ptr->stat_lock_direct); \
- else { \
- (void)mutex_lock(&ar_ptr->mutex); \
- ++(ar_ptr->stat_lock_wait); \
- } \
-} while(0)
-#else
-#define arena_get(ar_ptr, sz) do { \
- ar_ptr = &main_arena; \
- (void)mutex_lock(&ar_ptr->mutex); \
-} while(0)
-#endif
-#define arena_for_chunk(ptr) (&main_arena)
-
-#endif /* USE_ARENAS */
-
-/**************************************************************************/
-
-#ifndef NO_THREADS
-
-/* atfork support. */
-
-static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
- __const __malloc_ptr_t);
-# if !defined _LIBC || (defined SHARED && !USE___THREAD)
-static __malloc_ptr_t (*save_memalign_hook) (size_t __align, size_t __size,
- __const __malloc_ptr_t);
-# endif
-static void (*save_free_hook) (__malloc_ptr_t __ptr,
- __const __malloc_ptr_t);
-static Void_t* save_arena;
-
-#ifdef ATFORK_MEM
-ATFORK_MEM;
-#endif
-
-/* Magic value for the thread-specific arena pointer when
- malloc_atfork() is in use. */
-
-#define ATFORK_ARENA_PTR ((Void_t*)-1)
-
-/* The following hooks are used while the `atfork' handling mechanism
- is active. */
-
-static Void_t*
-malloc_atfork(size_t sz, const Void_t *caller)
-{
- Void_t *vptr = NULL;
- Void_t *victim;
-
- tsd_getspecific(arena_key, vptr);
- if(vptr == ATFORK_ARENA_PTR) {
- /* We are the only thread that may allocate at all. */
- if(save_malloc_hook != malloc_check) {
- return _int_malloc(&main_arena, sz);
- } else {
- if(top_check()<0)
- return 0;
- victim = _int_malloc(&main_arena, sz+1);
- return mem2mem_check(victim, sz);
- }
- } else {
- /* Suspend the thread until the `atfork' handlers have completed.
- By that time, the hooks will have been reset as well, so that
- mALLOc() can be used again. */
- (void)mutex_lock(&list_lock);
- (void)mutex_unlock(&list_lock);
- return public_mALLOc(sz);
- }
-}
-
-static void
-free_atfork(Void_t* mem, const Void_t *caller)
-{
- Void_t *vptr = NULL;
- struct malloc_state * ar_ptr;
- mchunkptr p; /* chunk corresponding to mem */
-
- if (mem == 0) /* free(0) has no effect */
- return;
-
- p = mem2chunk(mem); /* do not bother to replicate free_check here */
-
-#if HAVE_MMAP
- if (chunk_is_mmapped(p)) /* release mmapped memory. */
- {
- munmap_chunk(p);
- return;
- }
-#endif
-
-#ifdef ATOMIC_FASTBINS
- ar_ptr = arena_for_chunk(p);
- tsd_getspecific(arena_key, vptr);
- _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
-#else
- ar_ptr = arena_for_chunk(p);
- tsd_getspecific(arena_key, vptr);
- if(vptr != ATFORK_ARENA_PTR)
- (void)mutex_lock(&ar_ptr->mutex);
- _int_free(ar_ptr, p);
- if(vptr != ATFORK_ARENA_PTR)
- (void)mutex_unlock(&ar_ptr->mutex);
-#endif
-}
-
-
-/* Counter for number of times the list is locked by the same thread. */
-static unsigned int atfork_recursive_cntr;
-
-/* The following two functions are registered via thread_atfork() to
- make sure that the mutexes remain in a consistent state in the
- fork()ed version of a thread. Also adapt the malloc and free hooks
- temporarily, because the `atfork' handler mechanism may use
- malloc/free internally (e.g. in LinuxThreads). */
-
-static void
-ptmalloc_lock_all (void)
-{
- struct malloc_state * ar_ptr;
-
- if(__malloc_initialized < 1)
- return;
- if (mutex_trylock(&list_lock))
- {
- Void_t *my_arena;
- tsd_getspecific(arena_key, my_arena);
- if (my_arena == ATFORK_ARENA_PTR)
- /* This is the same thread which already locks the global list.
- Just bump the counter. */
- goto out;
-
- /* This thread has to wait its turn. */
- (void)mutex_lock(&list_lock);
- }
- for(ar_ptr = &main_arena;;) {
- (void)mutex_lock(&ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
- save_malloc_hook = dlmalloc_hook;
- save_free_hook = dlfree_hook;
- dlmalloc_hook = malloc_atfork;
- dlfree_hook = free_atfork;
- /* Only the current thread may perform malloc/free calls now. */
- tsd_getspecific(arena_key, save_arena);
- tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
- out:
- ++atfork_recursive_cntr;
-}
-
-static void
-ptmalloc_unlock_all (void)
-{
- struct malloc_state * ar_ptr;
-
- if(__malloc_initialized < 1)
- return;
- if (--atfork_recursive_cntr != 0)
- return;
- tsd_setspecific(arena_key, save_arena);
- dlmalloc_hook = save_malloc_hook;
- dlfree_hook = save_free_hook;
- for(ar_ptr = &main_arena;;) {
- (void)mutex_unlock(&ar_ptr->mutex);
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
- (void)mutex_unlock(&list_lock);
-}
-
-#ifdef __linux__
-
-/* In NPTL, unlocking a mutex in the child process after a
- fork() is currently unsafe, whereas re-initializing it is safe and
- does not leak resources. Therefore, a special atfork handler is
- installed for the child. */
-
-static void
-ptmalloc_unlock_all2 (void)
-{
- struct malloc_state * ar_ptr;
-
- if(__malloc_initialized < 1)
- return;
-#if defined _LIBC || defined MALLOC_HOOKS
- tsd_setspecific(arena_key, save_arena);
- dlmalloc_hook = save_malloc_hook;
- dlfree_hook = save_free_hook;
-#endif
-#ifdef PER_THREAD
- free_list = NULL;
-#endif
- for(ar_ptr = &main_arena;;) {
- mutex_init(&ar_ptr->mutex);
-#ifdef PER_THREAD
- if (ar_ptr != save_arena) {
- ar_ptr->next_free = free_list;
- free_list = ar_ptr;
- }
-#endif
- ar_ptr = ar_ptr->next;
- if(ar_ptr == &main_arena) break;
- }
- mutex_init(&list_lock);
- atfork_recursive_cntr = 0;
-}
-
-#else
-
-#define ptmalloc_unlock_all2 ptmalloc_unlock_all
-
-#endif
-
-#endif /* !defined NO_THREADS */
-
-/* Initialization routine. */
-#ifdef _LIBC
-#include <string.h>
-extern char **_environ;
-
-static char *
-internal_function
-next_env_entry (char ***position)
-{
- char **current = *position;
- char *result = NULL;
-
- while (*current != NULL)
- {
- if (__builtin_expect ((*current)[0] == 'M', 0)
- && (*current)[1] == 'A'
- && (*current)[2] == 'L'
- && (*current)[3] == 'L'
- && (*current)[4] == 'O'
- && (*current)[5] == 'C'
- && (*current)[6] == '_')
- {
- result = &(*current)[7];
-
- /* Save current position for next visit. */
- *position = ++current;
-
- break;
- }
-
- ++current;
- }
-
- return result;
-}
-#endif /* _LIBC */
-
-/* Set up basic state so that _int_malloc et al can work. */
-static void
-ptmalloc_init_minimal (void)
-{
-#if DEFAULT_TOP_PAD != 0
- mp_.top_pad = DEFAULT_TOP_PAD;
-#endif
- mp_.n_mmaps_max = DEFAULT_MMAP_MAX;
- mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
- mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
- mp_.pagesize = malloc_getpagesize;
-#ifdef PER_THREAD
-# define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
- mp_.arena_test = NARENAS_FROM_NCORES (1);
- narenas = 1;
-#endif
-}
-
-
-#ifdef _LIBC
-# ifdef SHARED
-static void *
-__failing_morecore (ptrdiff_t d)
-{
- return (void *) MORECORE_FAILURE;
-}
-
-extern struct dl_open_hook *_dl_open_hook;
-libc_hidden_proto (_dl_open_hook);
-# endif
-
-# if defined SHARED && !USE___THREAD
-/* This is called by __pthread_initialize_minimal when it needs to use
- malloc to set up the TLS state. We cannot do the full work of
- ptmalloc_init (below) until __pthread_initialize_minimal has finished,
- so it has to switch to using the special startup-time hooks while doing
- those allocations. */
-void
-__libc_malloc_pthread_startup (bool first_time)
-{
- if (first_time)
- {
- ptmalloc_init_minimal ();
- save_malloc_hook = dlmalloc_hook;
- save_memalign_hook = dlmemalign_hook;
- save_free_hook = dlfree_hook;
- dlmalloc_hook = malloc_starter;
- dlmemalign_hook = memalign_starter;
- dlfree_hook = free_starter;
- }
- else
- {
- dlmalloc_hook = save_malloc_hook;
- dlmemalign_hook = save_memalign_hook;
- dlfree_hook = save_free_hook;
- }
-}
-# endif
-#endif
-
-static void
-ptmalloc_init (void)
-{
- const char* s;
- int secure = 0;
-
- if(__malloc_initialized >= 0) return;
- __malloc_initialized = 0;
-
-#ifdef _LIBC
-# if defined SHARED && !USE___THREAD
- /* ptmalloc_init_minimal may already have been called via
- __libc_malloc_pthread_startup, above. */
- if (mp_.pagesize == 0)
-# endif
-#endif
- ptmalloc_init_minimal();
-
-#ifndef NO_THREADS
-# if defined _LIBC
- /* We know __pthread_initialize_minimal has already been called,
- and that is enough. */
-# define NO_STARTER
-# endif
-# ifndef NO_STARTER
- /* With some threads implementations, creating thread-specific data
- or initializing a mutex may call malloc() itself. Provide a
- simple starter version (realloc() won't work). */
- save_malloc_hook = dlmalloc_hook;
- save_memalign_hook = dlmemalign_hook;
- save_free_hook = dlfree_hook;
- dlmalloc_hook = malloc_starter;
- dlmemalign_hook = memalign_starter;
- dlfree_hook = free_starter;
-# ifdef _LIBC
- /* Initialize the pthreads interface. */
- if (__pthread_initialize != NULL)
- __pthread_initialize();
-# endif /* !defined _LIBC */
-# endif /* !defined NO_STARTER */
-#endif /* !defined NO_THREADS */
- mutex_init(&main_arena.mutex);
- main_arena.next = &main_arena;
-
-#if defined _LIBC && defined SHARED
- /* In case this libc copy is in a non-default namespace, never use brk.
- Likewise if dlopened from statically linked program. */
- Dl_info di;
- struct link_map *l;
-
- if (_dl_open_hook != NULL
- || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
- && l->l_ns != LM_ID_BASE))
- __morecore = __failing_morecore;
-#endif
-
- mutex_init(&list_lock);
- tsd_key_create(&arena_key, NULL);
- tsd_setspecific(arena_key, (Void_t *)&main_arena);
- thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
-#ifndef NO_THREADS
-# ifndef NO_STARTER
- dlmalloc_hook = save_malloc_hook;
- dlmemalign_hook = save_memalign_hook;
- dlfree_hook = save_free_hook;
-# else
-# undef NO_STARTER
-# endif
-#endif
-#ifdef _LIBC
- secure = __libc_enable_secure;
- s = NULL;
- if (__builtin_expect (_environ != NULL, 1))
- {
- char **runp = _environ;
- char *envline;
-
- while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
- 0))
- {
- size_t len = strcspn (envline, "=");
-
- if (envline[len] != '=')
- /* This is a "MALLOC_" variable at the end of the string
- without a '=' character. Ignore it since otherwise we
- will access invalid memory below. */
- continue;
-
- switch (len)
- {
- case 6:
- if (memcmp (envline, "CHECK_", 6) == 0)
- s = &envline[7];
- break;
- case 8:
- if (! secure)
- {
- if (memcmp (envline, "TOP_PAD_", 8) == 0)
- mALLOPt(M_TOP_PAD, atoi(&envline[9]));
- else if (memcmp (envline, "PERTURB_", 8) == 0)
- mALLOPt(M_PERTURB, atoi(&envline[9]));
- }
- break;
- case 9:
- if (! secure)
- {
- if (memcmp (envline, "MMAP_MAX_", 9) == 0)
- mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
-#ifdef PER_THREAD
- else if (memcmp (envline, "ARENA_MAX", 9) == 0)
- mALLOPt(M_ARENA_MAX, atoi(&envline[10]));
-#endif
- }
- break;
-#ifdef PER_THREAD
- case 10:
- if (! secure)
- {
- if (memcmp (envline, "ARENA_TEST", 10) == 0)
- mALLOPt(M_ARENA_TEST, atoi(&envline[11]));
- }
- break;
-#endif
- case 15:
- if (! secure)
- {
- if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
- mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
- else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
- mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
- }
- break;
- default:
- break;
- }
- }
- }
-#else
- if (! secure)
- {
- if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
- mALLOPt(M_TRIM_THRESHOLD, atoi(s));
- if((s = getenv("MALLOC_TOP_PAD_")))
- mALLOPt(M_TOP_PAD, atoi(s));
- if((s = getenv("MALLOC_PERTURB_")))
- mALLOPt(M_PERTURB, atoi(s));
- if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
- mALLOPt(M_MMAP_THRESHOLD, atoi(s));
- if((s = getenv("MALLOC_MMAP_MAX_")))
- mALLOPt(M_MMAP_MAX, atoi(s));
- }
- s = getenv("MALLOC_CHECK_");
-#endif
- if(s && s[0]) {
- mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
- if (check_action != 0)
- dlmalloc_check_init();
- }
- void (*hook) (void) = force_reg (dlmalloc_initialize_hook);
- if (hook != NULL)
- (*hook)();
- __malloc_initialized = 1;
-}
-
-/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
-#ifdef thread_atfork_static
-thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
- ptmalloc_unlock_all2)
-#endif
-
-
-
-/* Managing heaps and arenas (for concurrent threads) */
-
-#if USE_ARENAS
-
-#if MALLOC_DEBUG > 1
-
-/* Print the complete contents of a single heap to stderr. */
-
-static void
-dump_heap(heap_info *heap)
-{
- char *ptr;
- mchunkptr p;
-
- fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
- ptr = (heap->ar_ptr != (struct malloc_state *)(heap+1)) ?
- (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
- p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
- ~MALLOC_ALIGN_MASK);
- for(;;) {
- fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
- if(p == top(heap->ar_ptr)) {
- fprintf(stderr, " (top)\n");
- break;
- } else if(p->size == (0|PREV_INUSE)) {
- fprintf(stderr, " (fence)\n");
- break;
- }
- fprintf(stderr, "\n");
- p = next_chunk(p);
- }
-}
-
-#endif /* MALLOC_DEBUG > 1 */
-
-/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
- addresses as opposed to increasing, new_heap would badly fragment the
- address space. In that case remember the second HEAP_MAX_SIZE part
- aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
- call (if it is already aligned) and try to reuse it next time. We need
- no locking for it, as kernel ensures the atomicity for us - worst case
- we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
- multiple threads, but only one will succeed. */
-static char *aligned_heap_area;
-
-static void *mmap_for_heap(void *addr, size_t length, int *must_clear)
-{
- int prot = PROT_READ | PROT_WRITE;
- int flags = MAP_PRIVATE;
- void *ret;
-
- ret = MMAP(addr, length, prot, flags | MAP_HUGETLB);
- if (ret != MAP_FAILED) {
- *must_clear = 1;
- return ret;
- }
- *must_clear = 0;
- return MMAP(addr, length, prot, flags | MAP_NORESERVE);
-}
-
-/* Create a new heap. size is automatically rounded up to a multiple
- of the page size. */
-static heap_info *new_heap(size_t size, size_t top_pad)
-{
- size_t page_mask = malloc_getpagesize - 1;
- char *p1, *p2;
- unsigned long ul;
- heap_info *h;
- int must_clear;
-
- if (size + top_pad < HEAP_MIN_SIZE)
- size = HEAP_MIN_SIZE;
- else if (size + top_pad <= HEAP_MAX_SIZE)
- size += top_pad;
- else if (size > HEAP_MAX_SIZE)
- return 0;
- else
- size = HEAP_MAX_SIZE;
- size = (size + page_mask) & ~page_mask;
-
- /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
- No swap space needs to be reserved for the following large
- mapping (on Linux, this is the case for all non-writable mappings
- anyway). */
- p2 = MAP_FAILED;
- if (aligned_heap_area) {
- p2 = mmap_for_heap(aligned_heap_area, HEAP_MAX_SIZE, &must_clear);
- aligned_heap_area = NULL;
- if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE - 1))) {
- munmap(p2, HEAP_MAX_SIZE);
- p2 = MAP_FAILED;
- }
- }
- if (p2 == MAP_FAILED) {
- p1 = mmap_for_heap(0, HEAP_MAX_SIZE << 1, &must_clear);
- if (p1 != MAP_FAILED) {
- p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE - 1))
- & ~(HEAP_MAX_SIZE - 1));
- ul = p2 - p1;
- if (ul)
- munmap(p1, ul);
- else
- aligned_heap_area = p2 + HEAP_MAX_SIZE;
- munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
- } else {
- /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
- is already aligned. */
- p2 = mmap_for_heap(0, HEAP_MAX_SIZE, &must_clear);
- if (p2 == MAP_FAILED)
- return 0;
- if ((unsigned long)p2 & (HEAP_MAX_SIZE - 1)) {
- munmap(p2, HEAP_MAX_SIZE);
- return 0;
- }
- }
- }
- if (must_clear)
- memset(p2, 0, HEAP_MAX_SIZE);
- h = (heap_info *) p2;
- h->size = size;
- h->mprotect_size = size;
- THREAD_STAT(stat_n_heaps++);
- return h;
-}
-
-/* Grow a heap. size is automatically rounded up to a
- multiple of the page size. */
-
-static int
-grow_heap(heap_info *h, long diff)
-{
- size_t page_mask = malloc_getpagesize - 1;
- long new_size;
-
- diff = (diff + page_mask) & ~page_mask;
- new_size = (long)h->size + diff;
- if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
- return -1;
- if((unsigned long) new_size > h->mprotect_size) {
- h->mprotect_size = new_size;
- }
-
- h->size = new_size;
- return 0;
-}
-
-/* Shrink a heap. */
-
-static int
-shrink_heap(heap_info *h, long diff)
-{
- long new_size;
-
- new_size = (long)h->size - diff;
- if(new_size < (long)sizeof(*h))
- return -1;
- /* Try to re-map the extra heap space freshly to save memory, and
- make it inaccessible. */
- madvise ((char *)h + new_size, diff, MADV_DONTNEED);
- /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
-
- h->size = new_size;
- return 0;
-}
-
-/* Delete a heap. */
-
-#define delete_heap(heap) \
- do { \
- if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
- aligned_heap_area = NULL; \
- munmap((char*)(heap), HEAP_MAX_SIZE); \
- } while (0)
-
-static int
-internal_function
-heap_trim(heap_info *heap, size_t pad)
-{
- struct malloc_state * ar_ptr = heap->ar_ptr;
- unsigned long pagesz = mp_.pagesize;
- mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
- heap_info *prev_heap;
- long new_size, top_size, extra;
-
- /* Can this heap go away completely? */
- while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
- prev_heap = heap->prev;
- p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
- assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
- p = prev_chunk(p);
- new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
- assert(new_size>0 && new_size<(long)(2*MINSIZE));
- if(!prev_inuse(p))
- new_size += p->prev_size;
- assert(new_size>0 && new_size<HEAP_MAX_SIZE);
- if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
- break;
- ar_ptr->system_mem -= heap->size;
- arena_mem -= heap->size;
- delete_heap(heap);
- heap = prev_heap;
- if(!prev_inuse(p)) { /* consolidate backward */
- p = prev_chunk(p);
- unlink(p, bck, fwd);
- }
- assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
- assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
- top(ar_ptr) = top_chunk = p;
- set_head(top_chunk, new_size | PREV_INUSE);
- /*check_chunk(ar_ptr, top_chunk);*/
- }
- top_size = chunksize(top_chunk);
- extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
- if(extra < (long)pagesz)
- return 0;
- /* Try to shrink. */
- if(shrink_heap(heap, extra) != 0)
- return 0;
- ar_ptr->system_mem -= extra;
- arena_mem -= extra;
-
- /* Success. Adjust top accordingly. */
- set_head(top_chunk, (top_size - extra) | PREV_INUSE);
- /*check_chunk(ar_ptr, top_chunk);*/
- return 1;
-}
-
-/* Create a new arena with initial size "size". */
-
-static struct malloc_state *
-_int_new_arena(size_t size)
-{
- struct malloc_state * a;
- heap_info *h;
- char *ptr;
- unsigned long misalign;
-
- h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
- mp_.top_pad);
- if(!h) {
- /* Maybe size is too large to fit in a single heap. So, just try
- to create a minimally-sized arena and let _int_malloc() attempt
- to deal with the large request via mmap_chunk(). */
- h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
- if(!h)
- return 0;
- }
- a = h->ar_ptr = (struct malloc_state *)(h+1);
- malloc_init_state(a);
- /*a->next = NULL;*/
- a->system_mem = a->max_system_mem = h->size;
- arena_mem += h->size;
-#ifdef NO_THREADS
- if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
- mp_.max_total_mem)
- mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
-#endif
-
- /* Set up the top chunk, with proper alignment. */
- ptr = (char *)(a + 1);
- misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
- if (misalign > 0)
- ptr += MALLOC_ALIGNMENT - misalign;
- top(a) = (mchunkptr)ptr;
- set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
-
- tsd_setspecific(arena_key, (Void_t *)a);
- mutex_init(&a->mutex);
- (void)mutex_lock(&a->mutex);
-
-#ifdef PER_THREAD
- (void)mutex_lock(&list_lock);
-#endif
-
- /* Add the new arena to the global list. */
- a->next = main_arena.next;
- atomic_write_barrier ();
- main_arena.next = a;
-
-#ifdef PER_THREAD
- ++narenas;
-
- (void)mutex_unlock(&list_lock);
-#endif
-
- THREAD_STAT(++(a->stat_lock_loop));
-
- return a;
-}
-
-
-#ifdef PER_THREAD
-static struct malloc_state *
-get_free_list (void)
-{
- struct malloc_state * result = free_list;
- if (result != NULL)
- {
- (void)mutex_lock(&list_lock);
- result = free_list;
- if (result != NULL)
- free_list = result->next_free;
- (void)mutex_unlock(&list_lock);
-
- if (result != NULL)
- {
- (void)mutex_lock(&result->mutex);
- tsd_setspecific(arena_key, (Void_t *)result);
- THREAD_STAT(++(result->stat_lock_loop));
- }
- }
-
- return result;
-}
-
-
-static struct malloc_state *
-reused_arena (void)
-{
- if (narenas <= mp_.arena_test)
- return NULL;
-
- static int narenas_limit;
- if (narenas_limit == 0)
- {
- if (mp_.arena_max != 0)
- narenas_limit = mp_.arena_max;
- else
- {
- int n = __get_nprocs ();
-
- if (n >= 1)
- narenas_limit = NARENAS_FROM_NCORES (n);
- else
- /* We have no information about the system. Assume two
- cores. */
- narenas_limit = NARENAS_FROM_NCORES (2);
- }
- }
-
- if (narenas < narenas_limit)
- return NULL;
-
- struct malloc_state * result;
- static struct malloc_state * next_to_use;
- if (next_to_use == NULL)
- next_to_use = &main_arena;
-
- result = next_to_use;
- do
- {
- if (!mutex_trylock(&result->mutex))
- goto out;
-
- result = result->next;
- }
- while (result != next_to_use);
-
- /* No arena available. Wait for the next in line. */
- (void)mutex_lock(&result->mutex);
-
- out:
- tsd_setspecific(arena_key, (Void_t *)result);
- THREAD_STAT(++(result->stat_lock_loop));
- next_to_use = result->next;
-
- return result;
-}
-#endif
-
-static struct malloc_state *
-internal_function
-arena_get2(struct malloc_state * a_tsd, size_t size)
-{
- struct malloc_state * a;
-
-#ifdef PER_THREAD
- if ((a = get_free_list ()) == NULL
- && (a = reused_arena ()) == NULL)
- /* Nothing immediately available, so generate a new arena. */
- a = _int_new_arena(size);
-#else
- if(!a_tsd)
- a = a_tsd = &main_arena;
- else {
- a = a_tsd->next;
- if(!a) {
- /* This can only happen while initializing the new arena. */
- (void)mutex_lock(&main_arena.mutex);
- THREAD_STAT(++(main_arena.stat_lock_wait));
- return &main_arena;
- }
- }
-
- /* Check the global, circularly linked list for available arenas. */
- bool retried = false;
- repeat:
- do {
- if(!mutex_trylock(&a->mutex)) {
- if (retried)
- (void)mutex_unlock(&list_lock);
- THREAD_STAT(++(a->stat_lock_loop));
- tsd_setspecific(arena_key, (Void_t *)a);
- return a;
- }
- a = a->next;
- } while(a != a_tsd);
-
- /* If not even the list_lock can be obtained, try again. This can
- happen during `atfork', or for example on systems where thread
- creation makes it temporarily impossible to obtain _any_
- locks. */
- if(!retried && mutex_trylock(&list_lock)) {
- /* We will block to not run in a busy loop. */
- (void)mutex_lock(&list_lock);
-
- /* Since we blocked there might be an arena available now. */
- retried = true;
- a = a_tsd;
- goto repeat;
- }
-
- /* Nothing immediately available, so generate a new arena. */
- a = _int_new_arena(size);
- (void)mutex_unlock(&list_lock);
-#endif
-
- return a;
-}
-
-#ifdef PER_THREAD
-static void __attribute__ ((section ("__libc_thread_freeres_fn")))
-arena_thread_freeres (void)
-{
- Void_t *vptr = NULL;
- struct malloc_state * a = tsd_getspecific(arena_key, vptr);
- tsd_setspecific(arena_key, NULL);
-
- if (a != NULL)
- {
- (void)mutex_lock(&list_lock);
- a->next_free = free_list;
- free_list = a;
- (void)mutex_unlock(&list_lock);
- }
-}
-text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
-#endif
-
-#endif /* USE_ARENAS */
-
-/*
- * Local variables:
- * c-basic-offset: 2
- * End:
- */
diff --git a/tpc/malloc2.13/arena.h b/tpc/malloc2.13/arena.h
new file mode 100644
index 000000000000..0aaccb914d92
--- /dev/null
+++ b/tpc/malloc2.13/arena.h
@@ -0,0 +1,1092 @@
+/* Malloc implementation for multiple threads without lock contention.
+ Copyright (C) 2001,2002,2003,2004,2005,2006,2007,2009,2010
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <stdbool.h>
+
+/* Compile-time constants. */
+
+#define HEAP_MIN_SIZE (32*1024)
+#ifndef HEAP_MAX_SIZE
+# ifdef DEFAULT_MMAP_THRESHOLD_MAX
+# define HEAP_MAX_SIZE (2 * DEFAULT_MMAP_THRESHOLD_MAX)
+# else
+# define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
+# endif
+#endif
+
+/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
+ that are dynamically created for multi-threaded programs. The
+ maximum size must be a power of two, for fast determination of
+ which heap belongs to a chunk. It should be much larger than the
+ mmap threshold, so that requests with a size just below that
+ threshold can be fulfilled without creating too many heaps. */
+
+
+#ifndef THREAD_STATS
+#define THREAD_STATS 0
+#endif
+
+/* If THREAD_STATS is non-zero, some statistics on mutex locking are
+ computed. */
+
+/***************************************************************************/
+
+#define top(ar_ptr) ((ar_ptr)->top)
+
+/* A heap is a single contiguous memory region holding (coalesceable)
+ malloc_chunks. It is allocated with mmap() and always starts at an
+ address aligned to HEAP_MAX_SIZE. Not used unless compiling with
+ USE_ARENAS. */
+
+typedef struct _heap_info {
+ struct malloc_state * ar_ptr; /* Arena for this heap. */
+ struct _heap_info *prev; /* Previous heap. */
+ size_t size; /* Current size in bytes. */
+ size_t mprotect_size; /* Size in bytes that has been mprotected
+ PROT_READ|PROT_WRITE. */
+ /* Make sure the following data is properly aligned, particularly
+ that sizeof (heap_info) + 2 * SIZE_SZ is a multiple of
+ MALLOC_ALIGNMENT. */
+ char pad[-6 * SIZE_SZ & MALLOC_ALIGN_MASK];
+} heap_info;
+
+/* Get a compile-time error if the heap_info padding is not correct
+ to make alignment work as expected in sYSMALLOc. */
+extern int sanity_check_heap_info_alignment[(sizeof (heap_info)
+ + 2 * SIZE_SZ) % MALLOC_ALIGNMENT
+ ? -1 : 1];
+
+/* Thread specific data */
+
+static tsd_key_t arena_key;
+static mutex_t list_lock;
+#ifdef PER_THREAD
+static size_t narenas;
+static struct malloc_state * free_list;
+#endif
+
+#if THREAD_STATS
+static int stat_n_heaps;
+#define THREAD_STAT(x) x
+#else
+#define THREAD_STAT(x) do ; while(0)
+#endif
+
+/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
+static unsigned long arena_mem;
+
+/* Already initialized? */
+static int __malloc_initialized = -1;
+
+/**************************************************************************/
+
+#if USE_ARENAS
+
+/* arena_get() acquires an arena and locks the corresponding mutex.
+ First, try the one last locked successfully by this thread. (This
+ is the common case and handled with a macro for speed.) Then, loop
+ once over the circularly linked list of arenas. If no arena is
+ readily available, create a new one. In this latter case, `size'
+ is just a hint as to how much memory will be required immediately
+ in the new arena. */
+
+#define arena_get(ptr, size) do { \
+ arena_lookup(ptr); \
+ arena_lock(ptr, size); \
+} while(0)
+
+#define arena_lookup(ptr) do { \
+ Void_t *vptr = NULL; \
+ ptr = (struct malloc_state *)tsd_getspecific(arena_key, vptr); \
+} while(0)
+
+#ifdef PER_THREAD
+#define arena_lock(ptr, size) do { \
+ if(ptr) \
+ (void)mutex_lock(&ptr->mutex); \
+ else \
+ ptr = arena_get2(ptr, (size)); \
+} while(0)
+#else
+#define arena_lock(ptr, size) do { \
+ if(ptr && !mutex_trylock(&ptr->mutex)) { \
+ THREAD_STAT(++(ptr->stat_lock_direct)); \
+ } else \
+ ptr = arena_get2(ptr, (size)); \
+} while(0)
+#endif
+
+/* find the heap and corresponding arena for a given ptr */
+
+#define heap_for_ptr(ptr) \
+ ((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
+#define arena_for_chunk(ptr) \
+ (chunk_non_main_arena(ptr) ? heap_for_ptr(ptr)->ar_ptr : &main_arena)
+
+#else /* !USE_ARENAS */
+
+/* There is only one arena, main_arena. */
+
+#if THREAD_STATS
+#define arena_get(ar_ptr, sz) do { \
+ ar_ptr = &main_arena; \
+ if(!mutex_trylock(&ar_ptr->mutex)) \
+ ++(ar_ptr->stat_lock_direct); \
+ else { \
+ (void)mutex_lock(&ar_ptr->mutex); \
+ ++(ar_ptr->stat_lock_wait); \
+ } \
+} while(0)
+#else
+#define arena_get(ar_ptr, sz) do { \
+ ar_ptr = &main_arena; \
+ (void)mutex_lock(&ar_ptr->mutex); \
+} while(0)
+#endif
+#define arena_for_chunk(ptr) (&main_arena)
+
+#endif /* USE_ARENAS */
+
+/**************************************************************************/
+
+#ifndef NO_THREADS
+
+/* atfork support. */
+
+static __malloc_ptr_t (*save_malloc_hook) (size_t __size,
+ __const __malloc_ptr_t);
+# if !defined _LIBC || (defined SHARED && !USE___THREAD)
+static __malloc_ptr_t (*save_memalign_hook) (size_t __align, size_t __size,
+ __const __malloc_ptr_t);
+# endif
+static void (*save_free_hook) (__malloc_ptr_t __ptr,
+ __const __malloc_ptr_t);
+static Void_t* save_arena;
+
+#ifdef ATFORK_MEM
+ATFORK_MEM;
+#endif
+
+/* Magic value for the thread-specific arena pointer when
+ malloc_atfork() is in use. */
+
+#define ATFORK_ARENA_PTR ((Void_t*)-1)
+
+/* The following hooks are used while the `atfork' handling mechanism
+ is active. */
+
+static Void_t*
+malloc_atfork(size_t sz, const Void_t *caller)
+{
+ Void_t *vptr = NULL;
+ Void_t *victim;
+
+ tsd_getspecific(arena_key, vptr);
+ if(vptr == ATFORK_ARENA_PTR) {
+ /* We are the only thread that may allocate at all. */
+ if(save_malloc_hook != malloc_check) {
+ return _int_malloc(&main_arena, sz);
+ } else {
+ if(top_check()<0)
+ return 0;
+ victim = _int_malloc(&main_arena, sz+1);
+ return mem2mem_check(victim, sz);
+ }
+ } else {
+ /* Suspend the thread until the `atfork' handlers have completed.
+ By that time, the hooks will have been reset as well, so that
+ mALLOc() can be used again. */
+ (void)mutex_lock(&list_lock);
+ (void)mutex_unlock(&list_lock);
+ return public_mALLOc(sz);
+ }
+}
+
+static void
+free_atfork(Void_t* mem, const Void_t *caller)
+{
+ Void_t *vptr = NULL;
+ struct malloc_state * ar_ptr;
+ mchunkptr p; /* chunk corresponding to mem */
+
+ if (mem == 0) /* free(0) has no effect */
+ return;
+
+ p = mem2chunk(mem); /* do not bother to replicate free_check here */
+
+#if HAVE_MMAP
+ if (chunk_is_mmapped(p)) /* release mmapped memory. */
+ {
+ munmap_chunk(p);
+ return;
+ }
+#endif
+
+#ifdef ATOMIC_FASTBINS
+ ar_ptr = arena_for_chunk(p);
+ tsd_getspecific(arena_key, vptr);
+ _int_free(ar_ptr, p, vptr == ATFORK_ARENA_PTR);
+#else
+ ar_ptr = arena_for_chunk(p);
+ tsd_getspecific(arena_key, vptr);
+ if(vptr != ATFORK_ARENA_PTR)
+ (void)mutex_lock(&ar_ptr->mutex);
+ _int_free(ar_ptr, p);
+ if(vptr != ATFORK_ARENA_PTR)
+ (void)mutex_unlock(&ar_ptr->mutex);
+#endif
+}
+
+
+/* Counter for number of times the list is locked by the same thread. */
+static unsigned int atfork_recursive_cntr;
+
+/* The following two functions are registered via thread_atfork() to
+ make sure that the mutexes remain in a consistent state in the
+ fork()ed version of a thread. Also adapt the malloc and free hooks
+ temporarily, because the `atfork' handler mechanism may use
+ malloc/free internally (e.g. in LinuxThreads). */
+
+static void
+ptmalloc_lock_all (void)
+{
+ struct malloc_state * ar_ptr;
+
+ if(__malloc_initialized < 1)
+ return;
+ if (mutex_trylock(&list_lock))
+ {
+ Void_t *my_arena;
+ tsd_getspecific(arena_key, my_arena);
+ if (my_arena == ATFORK_ARENA_PTR)
+ /* This is the same thread which already locks the global list.
+ Just bump the counter. */
+ goto out;
+
+ /* This thread has to wait its turn. */
+ (void)mutex_lock(&list_lock);
+ }
+ for(ar_ptr = &main_arena;;) {
+ (void)mutex_lock(&ar_ptr->mutex);
+ ar_ptr = ar_ptr->next;
+ if(ar_ptr == &main_arena) break;
+ }
+ save_malloc_hook = dlmalloc_hook;
+ save_free_hook = dlfree_hook;
+ dlmalloc_hook = malloc_atfork;
+ dlfree_hook = free_atfork;
+ /* Only the current thread may perform malloc/free calls now. */
+ tsd_getspecific(arena_key, save_arena);
+ tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
+ out:
+ ++atfork_recursive_cntr;
+}
+
+static void
+ptmalloc_unlock_all (void)
+{
+ struct malloc_state * ar_ptr;
+
+ if(__malloc_initialized < 1)
+ return;
+ if (--atfork_recursive_cntr != 0)
+ return;
+ tsd_setspecific(arena_key, save_arena);
+ dlmalloc_hook = save_malloc_hook;
+ dlfree_hook = save_free_hook;
+ for(ar_ptr = &main_arena;;) {
+ (void)mutex_unlock(&ar_ptr->mutex);
+ ar_ptr = ar_ptr->next;
+ if(ar_ptr == &main_arena) break;
+ }
+ (void)mutex_unlock(&list_lock);
+}
+
+#ifdef __linux__
+
+/* In NPTL, unlocking a mutex in the child process after a
+ fork() is currently unsafe, whereas re-initializing it is safe and
+ does not leak resources. Therefore, a special atfork handler is
+ installed for the child. */
+
+static void
+ptmalloc_unlock_all2 (void)
+{
+ struct malloc_state * ar_ptr;
+
+ if(__malloc_initialized < 1)
+ return;
+#if defined _LIBC || defined MALLOC_HOOKS
+ tsd_setspecific(arena_key, save_arena);
+ dlmalloc_hook = save_malloc_hook;
+ dlfree_hook = save_free_hook;
+#endif
+#ifdef PER_THREAD
+ free_list = NULL;
+#endif
+ for(ar_ptr = &main_arena;;) {
+ mutex_init(&ar_ptr->mutex);
+#ifdef PER_THREAD
+ if (ar_ptr != save_arena) {
+ ar_ptr->next_free = free_list;
+ free_list = ar_ptr;
+ }
+#endif
+ ar_ptr = ar_ptr->next;
+ if(ar_ptr == &main_arena) break;
+ }
+ mutex_init(&list_lock);
+ atfork_recursive_cntr = 0;
+}
+
+#else
+
+#define ptmalloc_unlock_all2 ptmalloc_unlock_all
+
+#endif
+
+#endif /* !defined NO_THREADS */
+
+/* Initialization routine. */
+#ifdef _LIBC
+#include <string.h>
+extern char **_environ;
+
+static char *
+internal_function
+next_env_entry (char ***position)
+{
+ char **current = *position;
+ char *result = NULL;
+
+ while (*current != NULL)
+ {
+ if (__builtin_expect ((*current)[0] == 'M', 0)
+ && (*current)[1] == 'A'
+ && (*current)[2] == 'L'
+ && (*current)[3] == 'L'
+ && (*current)[4] == 'O'
+ && (*current)[5] == 'C'
+ && (*current)[6] == '_')
+ {
+ result = &(*current)[7];
+
+ /* Save current position for next visit. */
+ *position = ++current;
+
+ break;
+ }
+
+ ++current;
+ }
+
+ return result;
+}
+#endif /* _LIBC */
+
+/* Set up basic state so that _int_malloc et al can work. */
+static void
+ptmalloc_init_minimal (void)
+{
+#if DEFAULT_TOP_PAD != 0
+ mp_.top_pad = DEFAULT_TOP_PAD;
+#endif
+ mp_.n_mmaps_max = DEFAULT_MMAP_MAX;
+ mp_.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
+ mp_.trim_threshold = DEFAULT_TRIM_THRESHOLD;
+ mp_.pagesize = malloc_getpagesize;
+#ifdef PER_THREAD
+# define NARENAS_FROM_NCORES(n) ((n) * (sizeof(long) == 4 ? 2 : 8))
+ mp_.arena_test = NARENAS_FROM_NCORES (1);
+ narenas = 1;
+#endif
+}
+
+
+#ifdef _LIBC
+# ifdef SHARED
+static void *
+__failing_morecore (ptrdiff_t d)
+{
+ return (void *) MORECORE_FAILURE;
+}
+
+extern struct dl_open_hook *_dl_open_hook;
+libc_hidden_proto (_dl_open_hook);
+# endif
+
+# if defined SHARED && !USE___THREAD
+/* This is called by __pthread_initialize_minimal when it needs to use
+ malloc to set up the TLS state. We cannot do the full work of
+ ptmalloc_init (below) until __pthread_initialize_minimal has finished,
+ so it has to switch to using the special startup-time hooks while doing
+ those allocations. */
+void
+__libc_malloc_pthread_startup (bool first_time)
+{
+ if (first_time)
+ {
+ ptmalloc_init_minimal ();
+ save_malloc_hook = dlmalloc_hook;
+ save_memalign_hook = dlmemalign_hook;
+ save_free_hook = dlfree_hook;
+ dlmalloc_hook = malloc_starter;
+ dlmemalign_hook = memalign_starter;
+ dlfree_hook = free_starter;
+ }
+ else
+ {
+ dlmalloc_hook = save_malloc_hook;
+ dlmemalign_hook = save_memalign_hook;
+ dlfree_hook = save_free_hook;
+ }
+}
+# endif
+#endif
+
+static void
+ptmalloc_init (void)
+{
+ const char* s;
+ int secure = 0;
+
+ if(__malloc_initialized >= 0) return;
+ __malloc_initialized = 0;
+
+#ifdef _LIBC
+# if defined SHARED && !USE___THREAD
+ /* ptmalloc_init_minimal may already have been called via
+ __libc_malloc_pthread_startup, above. */
+ if (mp_.pagesize == 0)
+# endif
+#endif
+ ptmalloc_init_minimal();
+
+#ifndef NO_THREADS
+# if defined _LIBC
+ /* We know __pthread_initialize_minimal has already been called,
+ and that is enough. */
+# define NO_STARTER
+# endif
+# ifndef NO_STARTER
+ /* With some threads implementations, creating thread-specific data
+ or initializing a mutex may call malloc() itself. Provide a
+ simple starter version (realloc() won't work). */
+ save_malloc_hook = dlmalloc_hook;
+ save_memalign_hook = dlmemalign_hook;
+ save_free_hook = dlfree_hook;
+ dlmalloc_hook = malloc_starter;
+ dlmemalign_hook = memalign_starter;
+ dlfree_hook = free_starter;
+# ifdef _LIBC
+ /* Initialize the pthreads interface. */
+ if (__pthread_initialize != NULL)
+ __pthread_initialize();
+# endif /* !defined _LIBC */
+# endif /* !defined NO_STARTER */
+#endif /* !defined NO_THREADS */
+ mutex_init(&main_arena.mutex);
+ main_arena.next = &main_arena;
+
+#if defined _LIBC && defined SHARED
+ /* In case this libc copy is in a non-default namespace, never use brk.
+ Likewise if dlopened from statically linked program. */
+ Dl_info di;
+ struct link_map *l;
+
+ if (_dl_open_hook != NULL
+ || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
+ && l->l_ns != LM_ID_BASE))
+ __morecore = __failing_morecore;
+#endif
+
+ mutex_init(&list_lock);
+ tsd_key_create(&arena_key, NULL);
+ tsd_setspecific(arena_key, (Void_t *)&main_arena);
+ thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
+#ifndef NO_THREADS
+# ifndef NO_STARTER
+ dlmalloc_hook = save_malloc_hook;
+ dlmemalign_hook = save_memalign_hook;
+ dlfree_hook = save_free_hook;
+# else
+# undef NO_STARTER
+# endif
+#endif
+#ifdef _LIBC
+ secure = __libc_enable_secure;
+ s = NULL;
+ if (__builtin_expect (_environ != NULL, 1))
+ {
+ char **runp = _environ;
+ char *envline;
+
+ while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
+ 0))
+ {
+ size_t len = strcspn (envline, "=");
+
+ if (envline[len] != '=')
+ /* This is a "MALLOC_" variable at the end of the string
+ without a '=' character. Ignore it since otherwise we
+ will access invalid memory below. */
+ continue;
+
+ switch (len)
+ {
+ case 6:
+ if (memcmp (envline, "CHECK_", 6) == 0)
+ s = &envline[7];
+ break;
+ case 8:
+ if (! secure)
+ {
+ if (memcmp (envline, "TOP_PAD_", 8) == 0)
+ mALLOPt(M_TOP_PAD, atoi(&envline[9]));
+ else if (memcmp (envline, "PERTURB_", 8) == 0)
+ mALLOPt(M_PERTURB, atoi(&envline[9]));
+ }
+ break;
+ case 9:
+ if (! secure)
+ {
+ if (memcmp (envline, "MMAP_MAX_", 9) == 0)
+ mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
+#ifdef PER_THREAD
+ else if (memcmp (envline, "ARENA_MAX", 9) == 0)
+ mALLOPt(M_ARENA_MAX, atoi(&envline[10]));
+#endif
+ }
+ break;
+#ifdef PER_THREAD
+ case 10:
+ if (! secure)
+ {
+ if (memcmp (envline, "ARENA_TEST", 10) == 0)
+ mALLOPt(M_ARENA_TEST, atoi(&envline[11]));
+ }
+ break;
+#endif
+ case 15:
+ if (! secure)
+ {
+ if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
+ mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
+ else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
+ mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+#else
+ if (! secure)
+ {
+ if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
+ mALLOPt(M_TRIM_THRESHOLD, atoi(s));
+ if((s = getenv("MALLOC_TOP_PAD_")))
+ mALLOPt(M_TOP_PAD, atoi(s));
+ if((s = getenv("MALLOC_PERTURB_")))
+ mALLOPt(M_PERTURB, atoi(s));
+ if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
+ mALLOPt(M_MMAP_THRESHOLD, atoi(s));
+ if((s = getenv("MALLOC_MMAP_MAX_")))
+ mALLOPt(M_MMAP_MAX, atoi(s));
+ }
+ s = getenv("MALLOC_CHECK_");
+#endif
+ if(s && s[0]) {
+ mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
+ if (check_action != 0)
+ dlmalloc_check_init();
+ }
+ void (*hook) (void) = force_reg (dlmalloc_initialize_hook);
+ if (hook != NULL)
+ (*hook)();
+ __malloc_initialized = 1;
+}
+
+/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
+#ifdef thread_atfork_static
+thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
+ ptmalloc_unlock_all2)
+#endif
+
+
+
+/* Managing heaps and arenas (for concurrent threads) */
+
+#if USE_ARENAS
+
+#if MALLOC_DEBUG > 1
+
+/* Print the complete contents of a single heap to stderr. */
+
+static void
+dump_heap(heap_info *heap)
+{
+ char *ptr;
+ mchunkptr p;
+
+ fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
+ ptr = (heap->ar_ptr != (struct malloc_state *)(heap+1)) ?
+ (char*)(heap + 1) : (char*)(heap + 1) + sizeof(struct malloc_state);
+ p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
+ ~MALLOC_ALIGN_MASK);
+ for(;;) {
+ fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
+ if(p == top(heap->ar_ptr)) {
+ fprintf(stderr, " (top)\n");
+ break;
+ } else if(p->size == (0|PREV_INUSE)) {
+ fprintf(stderr, " (fence)\n");
+ break;
+ }
+ fprintf(stderr, "\n");
+ p = next_chunk(p);
+ }
+}
+
+#endif /* MALLOC_DEBUG > 1 */
+
+/* If consecutive mmap (0, HEAP_MAX_SIZE << 1, ...) calls return decreasing
+ addresses as opposed to increasing, new_heap would badly fragment the
+ address space. In that case remember the second HEAP_MAX_SIZE part
+ aligned to HEAP_MAX_SIZE from last mmap (0, HEAP_MAX_SIZE << 1, ...)
+ call (if it is already aligned) and try to reuse it next time. We need
+ no locking for it, as kernel ensures the atomicity for us - worst case
+ we'll call mmap (addr, HEAP_MAX_SIZE, ...) for some value of addr in
+ multiple threads, but only one will succeed. */
+static char *aligned_heap_area;
+
+static void *mmap_for_heap(void *addr, size_t length, int *must_clear)
+{
+ int prot = PROT_READ | PROT_WRITE;
+ int flags = MAP_PRIVATE;
+ void *ret;
+
+ ret = MMAP(addr, length, prot, flags | MAP_HUGETLB);
+ if (ret != MAP_FAILED) {
+ *must_clear = 1;
+ return ret;
+ }
+ *must_clear = 0;
+ return MMAP(addr, length, prot, flags | MAP_NORESERVE);
+}
+
+/* Create a new heap. size is automatically rounded up to a multiple
+ of the page size. */
+static heap_info *new_heap(size_t size, size_t top_pad)
+{
+ size_t page_mask = malloc_getpagesize - 1;
+ char *p1, *p2;
+ unsigned long ul;
+ heap_info *h;
+ int must_clear;
+
+ if (size + top_pad < HEAP_MIN_SIZE)
+ size = HEAP_MIN_SIZE;
+ else if (size + top_pad <= HEAP_MAX_SIZE)
+ size += top_pad;
+ else if (size > HEAP_MAX_SIZE)
+ return 0;
+ else
+ size = HEAP_MAX_SIZE;
+ size = (size + page_mask) & ~page_mask;
+
+ /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
+ No swap space needs to be reserved for the following large
+ mapping (on Linux, this is the case for all non-writable mappings
+ anyway). */
+ p2 = MAP_FAILED;
+ if (aligned_heap_area) {
+ p2 = mmap_for_heap(aligned_heap_area, HEAP_MAX_SIZE, &must_clear);
+ aligned_heap_area = NULL;
+ if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE - 1))) {
+ munmap(p2, HEAP_MAX_SIZE);
+ p2 = MAP_FAILED;
+ }
+ }
+ if (p2 == MAP_FAILED) {
+ p1 = mmap_for_heap(0, HEAP_MAX_SIZE << 1, &must_clear);
+ if (p1 != MAP_FAILED) {
+ p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE - 1))
+ & ~(HEAP_MAX_SIZE - 1));
+ ul = p2 - p1;
+ if (ul)
+ munmap(p1, ul);
+ else
+ aligned_heap_area = p2 + HEAP_MAX_SIZE;
+ munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
+ } else {
+ /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
+ is already aligned. */
+ p2 = mmap_for_heap(0, HEAP_MAX_SIZE, &must_clear);
+ if (p2 == MAP_FAILED)
+ return 0;
+ if ((unsigned long)p2 & (HEAP_MAX_SIZE - 1)) {
+ munmap(p2, HEAP_MAX_SIZE);
+ return 0;
+ }
+ }
+ }
+ if (must_clear)
+ memset(p2, 0, HEAP_MAX_SIZE);
+ h = (heap_info *) p2;
+ h->size = size;
+ h->mprotect_size = size;
+ THREAD_STAT(stat_n_heaps++);
+ return h;
+}
+
+/* Grow a heap. size is automatically rounded up to a
+ multiple of the page size. */
+
+static int
+grow_heap(heap_info *h, long diff)
+{
+ size_t page_mask = malloc_getpagesize - 1;
+ long new_size;
+
+ diff = (diff + page_mask) & ~page_mask;
+ new_size = (long)h->size + diff;
+ if((unsigned long) new_size > (unsigned long) HEAP_MAX_SIZE)
+ return -1;
+ if((unsigned long) new_size > h->mprotect_size) {
+ h->mprotect_size = new_size;
+ }
+
+ h->size = new_size;
+ return 0;
+}
+
+/* Shrink a heap. */
+
+static int
+shrink_heap(heap_info *h, long diff)
+{
+ long new_size;
+
+ new_size = (long)h->size - diff;
+ if(new_size < (long)sizeof(*h))
+ return -1;
+ /* Try to re-map the extra heap space freshly to save memory, and
+ make it inaccessible. */
+ madvise ((char *)h + new_size, diff, MADV_DONTNEED);
+ /*fprintf(stderr, "shrink %p %08lx\n", h, new_size);*/
+
+ h->size = new_size;
+ return 0;
+}
+
+/* Delete a heap. */
+
+#define delete_heap(heap) \
+ do { \
+ if ((char *)(heap) + HEAP_MAX_SIZE == aligned_heap_area) \
+ aligned_heap_area = NULL; \
+ munmap((char*)(heap), HEAP_MAX_SIZE); \
+ } while (0)
+
+static int
+internal_function
+heap_trim(heap_info *heap, size_t pad)
+{
+ struct malloc_state * ar_ptr = heap->ar_ptr;
+ unsigned long pagesz = mp_.pagesize;
+ mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
+ heap_info *prev_heap;
+ long new_size, top_size, extra;
+
+ /* Can this heap go away completely? */
+ while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
+ prev_heap = heap->prev;
+ p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
+ assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
+ p = prev_chunk(p);
+ new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
+ assert(new_size>0 && new_size<(long)(2*MINSIZE));
+ if(!prev_inuse(p))
+ new_size += p->prev_size;
+ assert(new_size>0 && new_size<HEAP_MAX_SIZE);
+ if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
+ break;
+ ar_ptr->system_mem -= heap->size;
+ arena_mem -= heap->size;
+ delete_heap(heap);
+ heap = prev_heap;
+ if(!prev_inuse(p)) { /* consolidate backward */
+ p = prev_chunk(p);
+ unlink(p, bck, fwd);
+ }
+ assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
+ assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
+ top(ar_ptr) = top_chunk = p;
+ set_head(top_chunk, new_size | PREV_INUSE);
+ /*check_chunk(ar_ptr, top_chunk);*/
+ }
+ top_size = chunksize(top_chunk);
+ extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
+ if(extra < (long)pagesz)
+ return 0;
+ /* Try to shrink. */
+ if(shrink_heap(heap, extra) != 0)
+ return 0;
+ ar_ptr->system_mem -= extra;
+ arena_mem -= extra;
+
+ /* Success. Adjust top accordingly. */
+ set_head(top_chunk, (top_size - extra) | PREV_INUSE);
+ /*check_chunk(ar_ptr, top_chunk);*/
+ return 1;
+}
+
+/* Create a new arena with initial size "size". */
+
+static struct malloc_state *
+_int_new_arena(size_t size)
+{
+ struct malloc_state * a;
+ heap_info *h;
+ char *ptr;
+ unsigned long misalign;
+
+ h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
+ mp_.top_pad);
+ if(!h) {
+ /* Maybe size is too large to fit in a single heap. So, just try
+ to create a minimally-sized arena and let _int_malloc() attempt
+ to deal with the large request via mmap_chunk(). */
+ h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
+ if(!h)
+ return 0;
+ }
+ a = h->ar_ptr = (struct malloc_state *)(h+1);
+ malloc_init_state(a);
+ /*a->next = NULL;*/
+ a->system_mem = a->max_system_mem = h->size;
+ arena_mem += h->size;
+#ifdef NO_THREADS
+ if((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
+ mp_.max_total_mem)
+ mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
+#endif
+
+ /* Set up the top chunk, with proper alignment. */
+ ptr = (char *)(a + 1);
+ misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
+ if (misalign > 0)
+ ptr += MALLOC_ALIGNMENT - misalign;
+ top(a) = (mchunkptr)ptr;
+ set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
+
+ tsd_setspecific(arena_key, (Void_t *)a);
+ mutex_init(&a->mutex);
+ (void)mutex_lock(&a->mutex);
+
+#ifdef PER_THREAD
+ (void)mutex_lock(&list_lock);
+#endif
+
+ /* Add the new arena to the global list. */
+ a->next = main_arena.next;
+ atomic_write_barrier ();
+ main_arena.next = a;
+
+#ifdef PER_THREAD
+ ++narenas;
+
+ (void)mutex_unlock(&list_lock);
+#endif
+
+ THREAD_STAT(++(a->stat_lock_loop));
+
+ return a;
+}
+
+
+#ifdef PER_THREAD
+static struct malloc_state *
+get_free_list (void)
+{
+ struct malloc_state * result = free_list;
+ if (result != NULL)
+ {
+ (void)mutex_lock(&list_lock);
+ result = free_list;
+ if (result != NULL)
+ free_list = result->next_free;
+ (void)mutex_unlock(&list_lock);
+
+ if (result != NULL)
+ {
+ (void)mutex_lock(&result->mutex);
+ tsd_setspecific(arena_key, (Void_t *)result);
+ THREAD_STAT(++(result->stat_lock_loop));
+ }
+ }
+
+ return result;
+}
+
+
+static struct malloc_state *
+reused_arena (void)
+{
+ if (narenas <= mp_.arena_test)
+ return NULL;
+
+ static int narenas_limit;
+ if (narenas_limit == 0)
+ {
+ if (mp_.arena_max != 0)
+ narenas_limit = mp_.arena_max;
+ else
+ {
+ int n = __get_nprocs ();
+
+ if (n >= 1)
+ narenas_limit = NARENAS_FROM_NCORES (n);
+ else
+ /* We have no information about the system. Assume two
+ cores. */
+ narenas_limit = NARENAS_FROM_NCORES (2);
+ }
+ }
+
+ if (narenas < narenas_limit)
+ return NULL;
+
+ struct malloc_state * result;
+ static struct malloc_state * next_to_use;
+ if (next_to_use == NULL)
+ next_to_use = &main_arena;
+
+ result = next_to_use;
+ do
+ {
+ if (!mutex_trylock(&result->mutex))
+ goto out;
+
+ result = result->next;
+ }
+ while (result != next_to_use);
+
+ /* No arena available. Wait for the next in line. */
+ (void)mutex_lock(&result->mutex);
+
+ out:
+ tsd_setspecific(arena_key, (Void_t *)result);
+ THREAD_STAT(++(result->stat_lock_loop));
+ next_to_use = result->next;
+
+ return result;
+}
+#endif
+
+static struct malloc_state *
+internal_function
+arena_get2(struct malloc_state * a_tsd, size_t size)
+{
+ struct malloc_state * a;
+
+#ifdef PER_THREAD
+ if ((a = get_free_list ()) == NULL
+ && (a = reused_arena ()) == NULL)
+ /* Nothing immediately available, so generate a new arena. */
+ a = _int_new_arena(size);
+#else
+ if(!a_tsd)
+ a = a_tsd = &main_arena;
+ else {
+ a = a_tsd->next;
+ if(!a) {
+ /* This can only happen while initializing the new arena. */
+ (void)mutex_lock(&main_arena.mutex);
+ THREAD_STAT(++(main_arena.stat_lock_wait));
+ return &main_arena;
+ }
+ }
+
+ /* Check the global, circularly linked list for available arenas. */
+ bool retried = false;
+ repeat:
+ do {
+ if(!mutex_trylock(&a->mutex)) {
+ if (retried)
+ (void)mutex_unlock(&list_lock);
+ THREAD_STAT(++(a->stat_lock_loop));
+ tsd_setspecific(arena_key, (Void_t *)a);
+ return a;
+ }
+ a = a->next;
+ } while(a != a_tsd);
+
+ /* If not even the list_lock can be obtained, try again. This can
+ happen during `atfork', or for example on systems where thread
+ creation makes it temporarily impossible to obtain _any_
+ locks. */
+ if(!retried && mutex_trylock(&list_lock)) {
+ /* We will block to not run in a busy loop. */
+ (void)mutex_lock(&list_lock);
+
+ /* Since we blocked there might be an arena available now. */
+ retried = true;
+ a = a_tsd;
+ goto repeat;
+ }
+
+ /* Nothing immediately available, so generate a new arena. */
+ a = _int_new_arena(size);
+ (void)mutex_unlock(&list_lock);
+#endif
+
+ return a;
+}
+
+#ifdef PER_THREAD
+static void __attribute__ ((section ("__libc_thread_freeres_fn")))
+arena_thread_freeres (void)
+{
+ Void_t *vptr = NULL;
+ struct malloc_state * a = tsd_getspecific(arena_key, vptr);
+ tsd_setspecific(arena_key, NULL);
+
+ if (a != NULL)
+ {
+ (void)mutex_lock(&list_lock);
+ a->next_free = free_list;
+ free_list = a;
+ (void)mutex_unlock(&list_lock);
+ }
+}
+text_set_element (__libc_thread_subfreeres, arena_thread_freeres);
+#endif
+
+#endif /* USE_ARENAS */
+
+/*
+ * Local variables:
+ * c-basic-offset: 2
+ * End:
+ */
diff --git a/tpc/malloc2.13/hooks.ch b/tpc/malloc2.13/hooks.ch
deleted file mode 100644
index 05cfafbb78ba..000000000000
--- a/tpc/malloc2.13/hooks.ch
+++ /dev/null
@@ -1,643 +0,0 @@
-/* Malloc implementation for multiple threads without lock contention.
- Copyright (C) 2001-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public License as
- published by the Free Software Foundation; either version 2.1 of the
- License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; see the file COPYING.LIB. If not,
- write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
-
-/* What to do if the standard debugging hooks are in place and a
- corrupt pointer is detected: do nothing (0), print an error message
- (1), or call abort() (2). */
-
-/* Hooks for debugging versions. The initial hooks just call the
- initialization routine, then do the normal work. */
-
-static Void_t*
-malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
-{
- dlmalloc_hook = NULL;
- ptmalloc_init();
- return public_mALLOc(sz);
-}
-
-static Void_t*
-realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
-{
- dlmalloc_hook = NULL;
- dlrealloc_hook = NULL;
- ptmalloc_init();
- return public_rEALLOc(ptr, sz);
-}
-
-static Void_t*
-memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
-{
- dlmemalign_hook = NULL;
- ptmalloc_init();
- return public_mEMALIGn(alignment, sz);
-}
-
-/* Whether we are using malloc checking. */
-static int using_malloc_checking;
-
-/* A flag that is set by malloc_set_state, to signal that malloc checking
- must not be enabled on the request from the user (via the MALLOC_CHECK_
- environment variable). It is reset by __malloc_check_init to tell
- malloc_set_state that the user has requested malloc checking.
-
- The purpose of this flag is to make sure that malloc checking is not
- enabled when the heap to be restored was constructed without malloc
- checking, and thus does not contain the required magic bytes.
- Otherwise the heap would be corrupted by calls to free and realloc. If
- it turns out that the heap was created with malloc checking and the
- user has requested it malloc_set_state just calls __malloc_check_init
- again to enable it. On the other hand, reusing such a heap without
- further malloc checking is safe. */
-static int disallow_malloc_check;
-
-/* Activate a standard set of debugging hooks. */
-void
-dlmalloc_check_init()
-{
- if (disallow_malloc_check) {
- disallow_malloc_check = 0;
- return;
- }
- using_malloc_checking = 1;
- dlmalloc_hook = malloc_check;
- dlfree_hook = free_check;
- dlrealloc_hook = realloc_check;
- dlmemalign_hook = memalign_check;
-}
-
-/* A simple, standard set of debugging hooks. Overhead is `only' one
- byte per chunk; still this will catch most cases of double frees or
- overruns. The goal here is to avoid obscure crashes due to invalid
- usage, unlike in the MALLOC_DEBUG code. */
-
-#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
-
-/* Instrument a chunk with overrun detector byte(s) and convert it
- into a user pointer with requested size sz. */
-
-static Void_t*
-internal_function
-mem2mem_check(Void_t *ptr, size_t sz)
-{
- mchunkptr p;
- unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
- size_t i;
-
- if (!ptr)
- return ptr;
- p = mem2chunk(ptr);
- for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
- i > sz;
- i -= 0xFF) {
- if(i-sz < 0x100) {
- m_ptr[i] = (unsigned char)(i-sz);
- break;
- }
- m_ptr[i] = 0xFF;
- }
- m_ptr[sz] = MAGICBYTE(p);
- return (Void_t*)m_ptr;
-}
-
-/* Convert a pointer to be free()d or realloc()ed to a valid chunk
- pointer. If the provided pointer is not valid, return NULL. */
-
-static mchunkptr
-internal_function
-mem2chunk_check(Void_t* mem, unsigned char **magic_p)
-{
- mchunkptr p;
- INTERNAL_SIZE_T sz, c;
- unsigned char magic;
-
- if(!aligned_OK(mem)) return NULL;
- p = mem2chunk(mem);
- if (!chunk_is_mmapped(p)) {
- /* Must be a chunk in conventional heap memory. */
- int contig = contiguous(&main_arena);
- sz = chunksize(p);
- if((contig &&
- ((char*)p<mp_.sbrk_base ||
- ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
- sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
- ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
- (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
- next_chunk(prev_chunk(p))!=p) ))
- return NULL;
- magic = MAGICBYTE(p);
- for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
- if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
- }
- } else {
- unsigned long offset, page_mask = malloc_getpagesize-1;
-
- /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
- alignment relative to the beginning of a page. Check this
- first. */
- offset = (unsigned long)mem & page_mask;
- if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
- offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
- offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
- offset<0x2000) ||
- !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
- ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
- ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
- return NULL;
- magic = MAGICBYTE(p);
- for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
- if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
- }
- }
- ((unsigned char*)p)[sz] ^= 0xFF;
- if (magic_p)
- *magic_p = (unsigned char *)p + sz;
- return p;
-}
-
-/* Check for corruption of the top chunk, and try to recover if
- necessary. */
-
-static int
-internal_function
-top_check(void)
-{
- mchunkptr t = top(&main_arena);
- char* brk, * new_brk;
- INTERNAL_SIZE_T front_misalign, sbrk_size;
- unsigned long pagesz = malloc_getpagesize;
-
- if (t == initial_top(&main_arena) ||
- (!chunk_is_mmapped(t) &&
- chunksize(t)>=MINSIZE &&
- prev_inuse(t) &&
- (!contiguous(&main_arena) ||
- (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
- return 0;
-
- malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
-
- /* Try to set up a new top chunk. */
- brk = MORECORE(0);
- front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0)
- front_misalign = MALLOC_ALIGNMENT - front_misalign;
- sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
- sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
- new_brk = (char*)(MORECORE (sbrk_size));
- if (new_brk == (char*)(MORECORE_FAILURE))
- {
- MALLOC_FAILURE_ACTION;
- return -1;
- }
- /* Call the `morecore' hook if necessary. */
- void (*hook) (void) = force_reg (dlafter_morecore_hook);
- if (hook)
- (*hook) ();
- main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
-
- top(&main_arena) = (mchunkptr)(brk + front_misalign);
- set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
-
- return 0;
-}
-
-static Void_t*
-malloc_check(size_t sz, const Void_t *caller)
-{
- Void_t *victim;
-
- if (sz+1 == 0) {
- MALLOC_FAILURE_ACTION;
- return NULL;
- }
-
- (void)mutex_lock(&main_arena.mutex);
- victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
- (void)mutex_unlock(&main_arena.mutex);
- return mem2mem_check(victim, sz);
-}
-
-static void
-free_check(Void_t* mem, const Void_t *caller)
-{
- mchunkptr p;
-
- if(!mem) return;
- (void)mutex_lock(&main_arena.mutex);
- p = mem2chunk_check(mem, NULL);
- if(!p) {
- (void)mutex_unlock(&main_arena.mutex);
-
- malloc_printerr(check_action, "free(): invalid pointer", mem);
- return;
- }
-#if HAVE_MMAP
- if (chunk_is_mmapped(p)) {
- (void)mutex_unlock(&main_arena.mutex);
- munmap_chunk(p);
- return;
- }
-#endif
-#if 0 /* Erase freed memory. */
- memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
-#endif
-#ifdef ATOMIC_FASTBINS
- _int_free(&main_arena, p, 1);
-#else
- _int_free(&main_arena, p);
-#endif
- (void)mutex_unlock(&main_arena.mutex);
-}
-
-static Void_t*
-realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
-{
- INTERNAL_SIZE_T nb;
- Void_t* newmem = 0;
- unsigned char *magic_p;
-
- if (bytes+1 == 0) {
- MALLOC_FAILURE_ACTION;
- return NULL;
- }
- if (oldmem == 0) return malloc_check(bytes, NULL);
- if (bytes == 0) {
- free_check (oldmem, NULL);
- return NULL;
- }
- (void)mutex_lock(&main_arena.mutex);
- const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
- (void)mutex_unlock(&main_arena.mutex);
- if(!oldp) {
- malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
- return malloc_check(bytes, NULL);
- }
- const INTERNAL_SIZE_T oldsize = chunksize(oldp);
-
- checked_request2size(bytes+1, nb);
- (void)mutex_lock(&main_arena.mutex);
-
-#if HAVE_MMAP
- if (chunk_is_mmapped(oldp)) {
-#if HAVE_MREMAP
- mchunkptr newp = mremap_chunk(oldp, nb);
- if(newp)
- newmem = chunk2mem(newp);
- else
-#endif
- {
- /* Note the extra SIZE_SZ overhead. */
- if(oldsize - SIZE_SZ >= nb)
- newmem = oldmem; /* do nothing */
- else {
- /* Must alloc, copy, free. */
- if (top_check() >= 0)
- newmem = _int_malloc(&main_arena, bytes+1);
- if (newmem) {
- MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
- munmap_chunk(oldp);
- }
- }
- }
- } else {
-#endif /* HAVE_MMAP */
- if (top_check() >= 0) {
- INTERNAL_SIZE_T nb;
- checked_request2size(bytes + 1, nb);
- newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
- }
-#if 0 /* Erase freed memory. */
- if(newmem)
- newp = mem2chunk(newmem);
- nb = chunksize(newp);
- if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
- memset((char*)oldmem + 2*sizeof(mbinptr), 0,
- oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
- } else if(nb > oldsize+SIZE_SZ) {
- memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
- 0, nb - (oldsize+SIZE_SZ));
- }
-#endif
-#if HAVE_MMAP
- }
-#endif
-
- /* mem2chunk_check changed the magic byte in the old chunk.
- If newmem is NULL, then the old chunk will still be used though,
- so we need to invert that change here. */
- if (newmem == NULL) *magic_p ^= 0xFF;
-
- (void)mutex_unlock(&main_arena.mutex);
-
- return mem2mem_check(newmem, bytes);
-}
-
-static Void_t*
-memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
-{
- INTERNAL_SIZE_T nb;
- Void_t* mem;
-
- if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
- if (alignment < MINSIZE) alignment = MINSIZE;
-
- if (bytes+1 == 0) {
- MALLOC_FAILURE_ACTION;
- return NULL;
- }
- checked_request2size(bytes+1, nb);
- (void)mutex_lock(&main_arena.mutex);
- mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
- NULL;
- (void)mutex_unlock(&main_arena.mutex);
- return mem2mem_check(mem, bytes);
-}
-
-#ifndef NO_THREADS
-
-# ifdef _LIBC
-# if USE___THREAD || !defined SHARED
- /* These routines are never needed in this configuration. */
-# define NO_STARTER
-# endif
-# endif
-
-# ifdef NO_STARTER
-# undef NO_STARTER
-# else
-
-/* The following hooks are used when the global initialization in
- ptmalloc_init() hasn't completed yet. */
-
-static Void_t*
-malloc_starter(size_t sz, const Void_t *caller)
-{
- Void_t* victim;
-
- victim = _int_malloc(&main_arena, sz);
-
- return victim ? BOUNDED_N(victim, sz) : 0;
-}
-
-static Void_t*
-memalign_starter(size_t align, size_t sz, const Void_t *caller)
-{
- Void_t* victim;
-
- victim = _int_memalign(&main_arena, align, sz);
-
- return victim ? BOUNDED_N(victim, sz) : 0;
-}
-
-static void
-free_starter(Void_t* mem, const Void_t *caller)
-{
- mchunkptr p;
-
- if(!mem) return;
- p = mem2chunk(mem);
-#if HAVE_MMAP
- if (chunk_is_mmapped(p)) {
- munmap_chunk(p);
- return;
- }
-#endif
-#ifdef ATOMIC_FASTBINS
- _int_free(&main_arena, p, 1);
-#else
- _int_free(&main_arena, p);
-#endif
-}
-
-# endif /* !defiend NO_STARTER */
-#endif /* NO_THREADS */
-
-
-/* Get/set state: malloc_get_state() records the current state of all
- malloc variables (_except_ for the actual heap contents and `hook'
- function pointers) in a system dependent, opaque data structure.
- This data structure is dynamically allocated and can be free()d
- after use. malloc_set_state() restores the state of all malloc
- variables to the previously obtained state. This is especially
- useful when using this malloc as part of a shared library, and when
- the heap contents are saved/restored via some other method. The
- primary example for this is GNU Emacs with its `dumping' procedure.
- `Hook' function pointers are never saved or restored by these
- functions, with two exceptions: If malloc checking was in use when
- malloc_get_state() was called, then malloc_set_state() calls
- __malloc_check_init() if possible; if malloc checking was not in
- use in the recorded state but the user requested malloc checking,
- then the hooks are reset to 0. */
-
-#define MALLOC_STATE_MAGIC 0x444c4541l
-#define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
-
-struct malloc_save_state {
- long magic;
- long version;
- mbinptr av[NBINS * 2 + 2];
- char* sbrk_base;
- int sbrked_mem_bytes;
- unsigned long trim_threshold;
- unsigned long top_pad;
- unsigned int n_mmaps_max;
- unsigned long mmap_threshold;
- int check_action;
- unsigned long max_sbrked_mem;
- unsigned long max_total_mem;
- unsigned int n_mmaps;
- unsigned int max_n_mmaps;
- unsigned long mmapped_mem;
- unsigned long max_mmapped_mem;
- int using_malloc_checking;
- unsigned long max_fast;
- unsigned long arena_test;
- unsigned long arena_max;
- unsigned long narenas;
-};
-
-Void_t*
-public_gET_STATe(void)
-{
- struct malloc_save_state* ms;
- int i;
- mbinptr b;
-
- ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
- if (!ms)
- return 0;
- (void)mutex_lock(&main_arena.mutex);
- malloc_consolidate(&main_arena);
- ms->magic = MALLOC_STATE_MAGIC;
- ms->version = MALLOC_STATE_VERSION;
- ms->av[0] = 0;
- ms->av[1] = 0; /* used to be binblocks, now no longer used */
- ms->av[2] = top(&main_arena);
- ms->av[3] = 0; /* used to be undefined */
- for(i=1; i<NBINS; i++) {
- b = bin_at(&main_arena, i);
- if(first(b) == b)
- ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
- else {
- ms->av[2*i+2] = first(b);
- ms->av[2*i+3] = last(b);
- }
- }
- ms->sbrk_base = mp_.sbrk_base;
- ms->sbrked_mem_bytes = main_arena.system_mem;
- ms->trim_threshold = mp_.trim_threshold;
- ms->top_pad = mp_.top_pad;
- ms->n_mmaps_max = mp_.n_mmaps_max;
- ms->mmap_threshold = mp_.mmap_threshold;
- ms->check_action = check_action;
- ms->max_sbrked_mem = main_arena.max_system_mem;
-#ifdef NO_THREADS
- ms->max_total_mem = mp_.max_total_mem;
-#else
- ms->max_total_mem = 0;
-#endif
- ms->n_mmaps = mp_.n_mmaps;
- ms->max_n_mmaps = mp_.max_n_mmaps;
- ms->mmapped_mem = mp_.mmapped_mem;
- ms->max_mmapped_mem = mp_.max_mmapped_mem;
- ms->using_malloc_checking = using_malloc_checking;
- ms->max_fast = get_max_fast();
-#ifdef PER_THREAD
- ms->arena_test = mp_.arena_test;
- ms->arena_max = mp_.arena_max;
- ms->narenas = narenas;
-#endif
- (void)mutex_unlock(&main_arena.mutex);
- return (Void_t*)ms;
-}
-
-int
-public_sET_STATe(Void_t* msptr)
-{
- struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
- size_t i;
- mbinptr b;
-
- disallow_malloc_check = 1;
- ptmalloc_init();
- if(ms->magic != MALLOC_STATE_MAGIC) return -1;
- /* Must fail if the major version is too high. */
- if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
- (void)mutex_lock(&main_arena.mutex);
- /* There are no fastchunks. */
- clear_fastchunks(&main_arena);
- if (ms->version >= 4)
- set_max_fast(ms->max_fast);
- else
- set_max_fast(64); /* 64 used to be the value we always used. */
- for (i=0; i<NFASTBINS; ++i)
- fastbin (&main_arena, i) = 0;
- for (i=0; i<BINMAPSIZE; ++i)
- main_arena.binmap[i] = 0;
- top(&main_arena) = ms->av[2];
- main_arena.last_remainder = 0;
- for(i=1; i<NBINS; i++) {
- b = bin_at(&main_arena, i);
- if(ms->av[2*i+2] == 0) {
- assert(ms->av[2*i+3] == 0);
- first(b) = last(b) = b;
- } else {
- if(ms->version >= 3 &&
- (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
- largebin_index(chunksize(ms->av[2*i+3]))==i))) {
- first(b) = ms->av[2*i+2];
- last(b) = ms->av[2*i+3];
- /* Make sure the links to the bins within the heap are correct. */
- first(b)->bk = b;
- last(b)->fd = b;
- /* Set bit in binblocks. */
- mark_bin(&main_arena, i);
- } else {
- /* Oops, index computation from chunksize must have changed.
- Link the whole list into unsorted_chunks. */
- first(b) = last(b) = b;
- b = unsorted_chunks(&main_arena);
- ms->av[2*i+2]->bk = b;
- ms->av[2*i+3]->fd = b->fd;
- b->fd->bk = ms->av[2*i+3];
- b->fd = ms->av[2*i+2];
- }
- }
- }
- if (ms->version < 3) {
- /* Clear fd_nextsize and bk_nextsize fields. */
- b = unsorted_chunks(&main_arena)->fd;
- while (b != unsorted_chunks(&main_arena)) {
- if (!in_smallbin_range(chunksize(b))) {
- b->fd_nextsize = NULL;
- b->bk_nextsize = NULL;
- }
- b = b->fd;
- }
- }
- mp_.sbrk_base = ms->sbrk_base;
- main_arena.system_mem = ms->sbrked_mem_bytes;
- mp_.trim_threshold = ms->trim_threshold;
- mp_.top_pad = ms->top_pad;
- mp_.n_mmaps_max = ms->n_mmaps_max;
- mp_.mmap_threshold = ms->mmap_threshold;
- check_action = ms->check_action;
- main_arena.max_system_mem = ms->max_sbrked_mem;
-#ifdef NO_THREADS
- mp_.max_total_mem = ms->max_total_mem;
-#endif
- mp_.n_mmaps = ms->n_mmaps;
- mp_.max_n_mmaps = ms->max_n_mmaps;
- mp_.mmapped_mem = ms->mmapped_mem;
- mp_.max_mmapped_mem = ms->max_mmapped_mem;
- /* add version-dependent code here */
- if (ms->version >= 1) {
- /* Check whether it is safe to enable malloc checking, or whether
- it is necessary to disable it. */
- if (ms->using_malloc_checking && !using_malloc_checking &&
- !disallow_malloc_check)
- dlmalloc_check_init ();
- else if (!ms->using_malloc_checking && using_malloc_checking) {
- dlmalloc_hook = NULL;
- dlfree_hook = NULL;
- dlrealloc_hook = NULL;
- dlmemalign_hook = NULL;
- using_malloc_checking = 0;
- }
- }
- if (ms->version >= 4) {
-#ifdef PER_THREAD
- mp_.arena_test = ms->arena_test;
- mp_.arena_max = ms->arena_max;
- narenas = ms->narenas;
-#endif
- }
- check_malloc_state(&main_arena);
-
- (void)mutex_unlock(&main_arena.mutex);
- return 0;
-}
-
-/*
- * Local variables:
- * c-basic-offset: 2
- * End:
- */
diff --git a/tpc/malloc2.13/hooks.h b/tpc/malloc2.13/hooks.h
new file mode 100644
index 000000000000..05cfafbb78ba
--- /dev/null
+++ b/tpc/malloc2.13/hooks.h
@@ -0,0 +1,643 @@
+/* Malloc implementation for multiple threads without lock contention.
+ Copyright (C) 2001-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Wolfram Gloger <wg@malloc.de>, 2001.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+/* What to do if the standard debugging hooks are in place and a
+ corrupt pointer is detected: do nothing (0), print an error message
+ (1), or call abort() (2). */
+
+/* Hooks for debugging versions. The initial hooks just call the
+ initialization routine, then do the normal work. */
+
+static Void_t*
+malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
+{
+ dlmalloc_hook = NULL;
+ ptmalloc_init();
+ return public_mALLOc(sz);
+}
+
+static Void_t*
+realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
+{
+ dlmalloc_hook = NULL;
+ dlrealloc_hook = NULL;
+ ptmalloc_init();
+ return public_rEALLOc(ptr, sz);
+}
+
+static Void_t*
+memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
+{
+ dlmemalign_hook = NULL;
+ ptmalloc_init();
+ return public_mEMALIGn(alignment, sz);
+}
+
+/* Whether we are using malloc checking. */
+static int using_malloc_checking;
+
+/* A flag that is set by malloc_set_state, to signal that malloc checking
+ must not be enabled on the request from the user (via the MALLOC_CHECK_
+ environment variable). It is reset by __malloc_check_init to tell
+ malloc_set_state that the user has requested malloc checking.
+
+ The purpose of this flag is to make sure that malloc checking is not
+ enabled when the heap to be restored was constructed without malloc
+ checking, and thus does not contain the required magic bytes.
+ Otherwise the heap would be corrupted by calls to free and realloc. If
+ it turns out that the heap was created with malloc checking and the
+ user has requested it malloc_set_state just calls __malloc_check_init
+ again to enable it. On the other hand, reusing such a heap without
+ further malloc checking is safe. */
+static int disallow_malloc_check;
+
+/* Activate a standard set of debugging hooks. */
+void
+dlmalloc_check_init()
+{
+ if (disallow_malloc_check) {
+ disallow_malloc_check = 0;
+ return;
+ }
+ using_malloc_checking = 1;
+ dlmalloc_hook = malloc_check;
+ dlfree_hook = free_check;
+ dlrealloc_hook = realloc_check;
+ dlmemalign_hook = memalign_check;
+}
+
+/* A simple, standard set of debugging hooks. Overhead is `only' one
+ byte per chunk; still this will catch most cases of double frees or
+ overruns. The goal here is to avoid obscure crashes due to invalid
+ usage, unlike in the MALLOC_DEBUG code. */
+
+#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
+
+/* Instrument a chunk with overrun detector byte(s) and convert it
+ into a user pointer with requested size sz. */
+
+static Void_t*
+internal_function
+mem2mem_check(Void_t *ptr, size_t sz)
+{
+ mchunkptr p;
+ unsigned char* m_ptr = (unsigned char*)BOUNDED_N(ptr, sz);
+ size_t i;
+
+ if (!ptr)
+ return ptr;
+ p = mem2chunk(ptr);
+ for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
+ i > sz;
+ i -= 0xFF) {
+ if(i-sz < 0x100) {
+ m_ptr[i] = (unsigned char)(i-sz);
+ break;
+ }
+ m_ptr[i] = 0xFF;
+ }
+ m_ptr[sz] = MAGICBYTE(p);
+ return (Void_t*)m_ptr;
+}
+
+/* Convert a pointer to be free()d or realloc()ed to a valid chunk
+ pointer. If the provided pointer is not valid, return NULL. */
+
+static mchunkptr
+internal_function
+mem2chunk_check(Void_t* mem, unsigned char **magic_p)
+{
+ mchunkptr p;
+ INTERNAL_SIZE_T sz, c;
+ unsigned char magic;
+
+ if(!aligned_OK(mem)) return NULL;
+ p = mem2chunk(mem);
+ if (!chunk_is_mmapped(p)) {
+ /* Must be a chunk in conventional heap memory. */
+ int contig = contiguous(&main_arena);
+ sz = chunksize(p);
+ if((contig &&
+ ((char*)p<mp_.sbrk_base ||
+ ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
+ sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
+ ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
+ (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
+ next_chunk(prev_chunk(p))!=p) ))
+ return NULL;
+ magic = MAGICBYTE(p);
+ for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
+ if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
+ }
+ } else {
+ unsigned long offset, page_mask = malloc_getpagesize-1;
+
+ /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
+ alignment relative to the beginning of a page. Check this
+ first. */
+ offset = (unsigned long)mem & page_mask;
+ if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
+ offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
+ offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
+ offset<0x2000) ||
+ !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
+ ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
+ ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
+ return NULL;
+ magic = MAGICBYTE(p);
+ for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
+ if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
+ }
+ }
+ ((unsigned char*)p)[sz] ^= 0xFF;
+ if (magic_p)
+ *magic_p = (unsigned char *)p + sz;
+ return p;
+}
+
+/* Check for corruption of the top chunk, and try to recover if
+ necessary. */
+
+static int
+internal_function
+top_check(void)
+{
+ mchunkptr t = top(&main_arena);
+ char* brk, * new_brk;
+ INTERNAL_SIZE_T front_misalign, sbrk_size;
+ unsigned long pagesz = malloc_getpagesize;
+
+ if (t == initial_top(&main_arena) ||
+ (!chunk_is_mmapped(t) &&
+ chunksize(t)>=MINSIZE &&
+ prev_inuse(t) &&
+ (!contiguous(&main_arena) ||
+ (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem)))
+ return 0;
+
+ malloc_printerr (check_action, "malloc: top chunk is corrupt", t);
+
+ /* Try to set up a new top chunk. */
+ brk = MORECORE(0);
+ front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+ if (front_misalign > 0)
+ front_misalign = MALLOC_ALIGNMENT - front_misalign;
+ sbrk_size = front_misalign + mp_.top_pad + MINSIZE;
+ sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
+ new_brk = (char*)(MORECORE (sbrk_size));
+ if (new_brk == (char*)(MORECORE_FAILURE))
+ {
+ MALLOC_FAILURE_ACTION;
+ return -1;
+ }
+ /* Call the `morecore' hook if necessary. */
+ void (*hook) (void) = force_reg (dlafter_morecore_hook);
+ if (hook)
+ (*hook) ();
+ main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size;
+
+ top(&main_arena) = (mchunkptr)(brk + front_misalign);
+ set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
+
+ return 0;
+}
+
+static Void_t*
+malloc_check(size_t sz, const Void_t *caller)
+{
+ Void_t *victim;
+
+ if (sz+1 == 0) {
+ MALLOC_FAILURE_ACTION;
+ return NULL;
+ }
+
+ (void)mutex_lock(&main_arena.mutex);
+ victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL;
+ (void)mutex_unlock(&main_arena.mutex);
+ return mem2mem_check(victim, sz);
+}
+
+static void
+free_check(Void_t* mem, const Void_t *caller)
+{
+ mchunkptr p;
+
+ if(!mem) return;
+ (void)mutex_lock(&main_arena.mutex);
+ p = mem2chunk_check(mem, NULL);
+ if(!p) {
+ (void)mutex_unlock(&main_arena.mutex);
+
+ malloc_printerr(check_action, "free(): invalid pointer", mem);
+ return;
+ }
+#if HAVE_MMAP
+ if (chunk_is_mmapped(p)) {
+ (void)mutex_unlock(&main_arena.mutex);
+ munmap_chunk(p);
+ return;
+ }
+#endif
+#if 0 /* Erase freed memory. */
+ memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
+#endif
+#ifdef ATOMIC_FASTBINS
+ _int_free(&main_arena, p, 1);
+#else
+ _int_free(&main_arena, p);
+#endif
+ (void)mutex_unlock(&main_arena.mutex);
+}
+
+static Void_t*
+realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
+{
+ INTERNAL_SIZE_T nb;
+ Void_t* newmem = 0;
+ unsigned char *magic_p;
+
+ if (bytes+1 == 0) {
+ MALLOC_FAILURE_ACTION;
+ return NULL;
+ }
+ if (oldmem == 0) return malloc_check(bytes, NULL);
+ if (bytes == 0) {
+ free_check (oldmem, NULL);
+ return NULL;
+ }
+ (void)mutex_lock(&main_arena.mutex);
+ const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
+ (void)mutex_unlock(&main_arena.mutex);
+ if(!oldp) {
+ malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
+ return malloc_check(bytes, NULL);
+ }
+ const INTERNAL_SIZE_T oldsize = chunksize(oldp);
+
+ checked_request2size(bytes+1, nb);
+ (void)mutex_lock(&main_arena.mutex);
+
+#if HAVE_MMAP
+ if (chunk_is_mmapped(oldp)) {
+#if HAVE_MREMAP
+ mchunkptr newp = mremap_chunk(oldp, nb);
+ if(newp)
+ newmem = chunk2mem(newp);
+ else
+#endif
+ {
+ /* Note the extra SIZE_SZ overhead. */
+ if(oldsize - SIZE_SZ >= nb)
+ newmem = oldmem; /* do nothing */
+ else {
+ /* Must alloc, copy, free. */
+ if (top_check() >= 0)
+ newmem = _int_malloc(&main_arena, bytes+1);
+ if (newmem) {
+ MALLOC_COPY(BOUNDED_N(newmem, bytes+1), oldmem, oldsize - 2*SIZE_SZ);
+ munmap_chunk(oldp);
+ }
+ }
+ }
+ } else {
+#endif /* HAVE_MMAP */
+ if (top_check() >= 0) {
+ INTERNAL_SIZE_T nb;
+ checked_request2size(bytes + 1, nb);
+ newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
+ }
+#if 0 /* Erase freed memory. */
+ if(newmem)
+ newp = mem2chunk(newmem);
+ nb = chunksize(newp);
+ if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
+ memset((char*)oldmem + 2*sizeof(mbinptr), 0,
+ oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
+ } else if(nb > oldsize+SIZE_SZ) {
+ memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
+ 0, nb - (oldsize+SIZE_SZ));
+ }
+#endif
+#if HAVE_MMAP
+ }
+#endif
+
+ /* mem2chunk_check changed the magic byte in the old chunk.
+ If newmem is NULL, then the old chunk will still be used though,
+ so we need to invert that change here. */
+ if (newmem == NULL) *magic_p ^= 0xFF;
+
+ (void)mutex_unlock(&main_arena.mutex);
+
+ return mem2mem_check(newmem, bytes);
+}
+
+static Void_t*
+memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
+{
+ INTERNAL_SIZE_T nb;
+ Void_t* mem;
+
+ if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
+ if (alignment < MINSIZE) alignment = MINSIZE;
+
+ if (bytes+1 == 0) {
+ MALLOC_FAILURE_ACTION;
+ return NULL;
+ }
+ checked_request2size(bytes+1, nb);
+ (void)mutex_lock(&main_arena.mutex);
+ mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) :
+ NULL;
+ (void)mutex_unlock(&main_arena.mutex);
+ return mem2mem_check(mem, bytes);
+}
+
+#ifndef NO_THREADS
+
+# ifdef _LIBC
+# if USE___THREAD || !defined SHARED
+ /* These routines are never needed in this configuration. */
+# define NO_STARTER
+# endif
+# endif
+
+# ifdef NO_STARTER
+# undef NO_STARTER
+# else
+
+/* The following hooks are used when the global initialization in
+ ptmalloc_init() hasn't completed yet. */
+
+static Void_t*
+malloc_starter(size_t sz, const Void_t *caller)
+{
+ Void_t* victim;
+
+ victim = _int_malloc(&main_arena, sz);
+
+ return victim ? BOUNDED_N(victim, sz) : 0;
+}
+
+static Void_t*
+memalign_starter(size_t align, size_t sz, const Void_t *caller)
+{
+ Void_t* victim;
+
+ victim = _int_memalign(&main_arena, align, sz);
+
+ return victim ? BOUNDED_N(victim, sz) : 0;
+}
+
+static void
+free_starter(Void_t* mem, const Void_t *caller)
+{
+ mchunkptr p;
+
+ if(!mem) return;
+ p = mem2chunk(mem);
+#if HAVE_MMAP
+ if (chunk_is_mmapped(p)) {
+ munmap_chunk(p);
+ return;
+ }
+#endif
+#ifdef ATOMIC_FASTBINS
+ _int_free(&main_arena, p, 1);
+#else
+ _int_free(&main_arena, p);
+#endif
+}
+
+# endif /* !defiend NO_STARTER */
+#endif /* NO_THREADS */
+
+
+/* Get/set state: malloc_get_state() records the current state of all
+ malloc variables (_except_ for the actual heap contents and `hook'
+ function pointers) in a system dependent, opaque data structure.
+ This data structure is dynamically allocated and can be free()d
+ after use. malloc_set_state() restores the state of all malloc
+ variables to the previously obtained state. This is especially
+ useful when using this malloc as part of a shared library, and when
+ the heap contents are saved/restored via some other method. The
+ primary example for this is GNU Emacs with its `dumping' procedure.
+ `Hook' function pointers are never saved or restored by these
+ functions, with two exceptions: If malloc checking was in use when
+ malloc_get_state() was called, then malloc_set_state() calls
+ __malloc_check_init() if possible; if malloc checking was not in
+ use in the recorded state but the user requested malloc checking,
+ then the hooks are reset to 0. */
+
+#define MALLOC_STATE_MAGIC 0x444c4541l
+#define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */
+
+struct malloc_save_state {
+ long magic;
+ long version;
+ mbinptr av[NBINS * 2 + 2];
+ char* sbrk_base;
+ int sbrked_mem_bytes;
+ unsigned long trim_threshold;
+ unsigned long top_pad;
+ unsigned int n_mmaps_max;
+ unsigned long mmap_threshold;
+ int check_action;
+ unsigned long max_sbrked_mem;
+ unsigned long max_total_mem;
+ unsigned int n_mmaps;
+ unsigned int max_n_mmaps;
+ unsigned long mmapped_mem;
+ unsigned long max_mmapped_mem;
+ int using_malloc_checking;
+ unsigned long max_fast;
+ unsigned long arena_test;
+ unsigned long arena_max;
+ unsigned long narenas;
+};
+
+Void_t*
+public_gET_STATe(void)
+{
+ struct malloc_save_state* ms;
+ int i;
+ mbinptr b;
+
+ ms = (struct malloc_save_state*)public_mALLOc(sizeof(*ms));
+ if (!ms)
+ return 0;
+ (void)mutex_lock(&main_arena.mutex);
+ malloc_consolidate(&main_arena);
+ ms->magic = MALLOC_STATE_MAGIC;
+ ms->version = MALLOC_STATE_VERSION;
+ ms->av[0] = 0;
+ ms->av[1] = 0; /* used to be binblocks, now no longer used */
+ ms->av[2] = top(&main_arena);
+ ms->av[3] = 0; /* used to be undefined */
+ for(i=1; i<NBINS; i++) {
+ b = bin_at(&main_arena, i);
+ if(first(b) == b)
+ ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */
+ else {
+ ms->av[2*i+2] = first(b);
+ ms->av[2*i+3] = last(b);
+ }
+ }
+ ms->sbrk_base = mp_.sbrk_base;
+ ms->sbrked_mem_bytes = main_arena.system_mem;
+ ms->trim_threshold = mp_.trim_threshold;
+ ms->top_pad = mp_.top_pad;
+ ms->n_mmaps_max = mp_.n_mmaps_max;
+ ms->mmap_threshold = mp_.mmap_threshold;
+ ms->check_action = check_action;
+ ms->max_sbrked_mem = main_arena.max_system_mem;
+#ifdef NO_THREADS
+ ms->max_total_mem = mp_.max_total_mem;
+#else
+ ms->max_total_mem = 0;
+#endif
+ ms->n_mmaps = mp_.n_mmaps;
+ ms->max_n_mmaps = mp_.max_n_mmaps;
+ ms->mmapped_mem = mp_.mmapped_mem;
+ ms->max_mmapped_mem = mp_.max_mmapped_mem;
+ ms->using_malloc_checking = using_malloc_checking;
+ ms->max_fast = get_max_fast();
+#ifdef PER_THREAD
+ ms->arena_test = mp_.arena_test;
+ ms->arena_max = mp_.arena_max;
+ ms->narenas = narenas;
+#endif
+ (void)mutex_unlock(&main_arena.mutex);
+ return (Void_t*)ms;
+}
+
+int
+public_sET_STATe(Void_t* msptr)
+{
+ struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
+ size_t i;
+ mbinptr b;
+
+ disallow_malloc_check = 1;
+ ptmalloc_init();
+ if(ms->magic != MALLOC_STATE_MAGIC) return -1;
+ /* Must fail if the major version is too high. */
+ if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
+ (void)mutex_lock(&main_arena.mutex);
+ /* There are no fastchunks. */
+ clear_fastchunks(&main_arena);
+ if (ms->version >= 4)
+ set_max_fast(ms->max_fast);
+ else
+ set_max_fast(64); /* 64 used to be the value we always used. */
+ for (i=0; i<NFASTBINS; ++i)
+ fastbin (&main_arena, i) = 0;
+ for (i=0; i<BINMAPSIZE; ++i)
+ main_arena.binmap[i] = 0;
+ top(&main_arena) = ms->av[2];
+ main_arena.last_remainder = 0;
+ for(i=1; i<NBINS; i++) {
+ b = bin_at(&main_arena, i);
+ if(ms->av[2*i+2] == 0) {
+ assert(ms->av[2*i+3] == 0);
+ first(b) = last(b) = b;
+ } else {
+ if(ms->version >= 3 &&
+ (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
+ largebin_index(chunksize(ms->av[2*i+3]))==i))) {
+ first(b) = ms->av[2*i+2];
+ last(b) = ms->av[2*i+3];
+ /* Make sure the links to the bins within the heap are correct. */
+ first(b)->bk = b;
+ last(b)->fd = b;
+ /* Set bit in binblocks. */
+ mark_bin(&main_arena, i);
+ } else {
+ /* Oops, index computation from chunksize must have changed.
+ Link the whole list into unsorted_chunks. */
+ first(b) = last(b) = b;
+ b = unsorted_chunks(&main_arena);
+ ms->av[2*i+2]->bk = b;
+ ms->av[2*i+3]->fd = b->fd;
+ b->fd->bk = ms->av[2*i+3];
+ b->fd = ms->av[2*i+2];
+ }
+ }
+ }
+ if (ms->version < 3) {
+ /* Clear fd_nextsize and bk_nextsize fields. */
+ b = unsorted_chunks(&main_arena)->fd;
+ while (b != unsorted_chunks(&main_arena)) {
+ if (!in_smallbin_range(chunksize(b))) {
+ b->fd_nextsize = NULL;
+ b->bk_nextsize = NULL;
+ }
+ b = b->fd;
+ }
+ }
+ mp_.sbrk_base = ms->sbrk_base;
+ main_arena.system_mem = ms->sbrked_mem_bytes;
+ mp_.trim_threshold = ms->trim_threshold;
+ mp_.top_pad = ms->top_pad;
+ mp_.n_mmaps_max = ms->n_mmaps_max;
+ mp_.mmap_threshold = ms->mmap_threshold;
+ check_action = ms->check_action;
+ main_arena.max_system_mem = ms->max_sbrked_mem;
+#ifdef NO_THREADS
+ mp_.max_total_mem = ms->max_total_mem;
+#endif
+ mp_.n_mmaps = ms->n_mmaps;
+ mp_.max_n_mmaps = ms->max_n_mmaps;
+ mp_.mmapped_mem = ms->mmapped_mem;
+ mp_.max_mmapped_mem = ms->max_mmapped_mem;
+ /* add version-dependent code here */
+ if (ms->version >= 1) {
+ /* Check whether it is safe to enable malloc checking, or whether
+ it is necessary to disable it. */
+ if (ms->using_malloc_checking && !using_malloc_checking &&
+ !disallow_malloc_check)
+ dlmalloc_check_init ();
+ else if (!ms->using_malloc_checking && using_malloc_checking) {
+ dlmalloc_hook = NULL;
+ dlfree_hook = NULL;
+ dlrealloc_hook = NULL;
+ dlmemalign_hook = NULL;
+ using_malloc_checking = 0;
+ }
+ }
+ if (ms->version >= 4) {
+#ifdef PER_THREAD
+ mp_.arena_test = ms->arena_test;
+ mp_.arena_max = ms->arena_max;
+ narenas = ms->narenas;
+#endif
+ }
+ check_malloc_state(&main_arena);
+
+ (void)mutex_unlock(&main_arena.mutex);
+ return 0;
+}
+
+/*
+ * Local variables:
+ * c-basic-offset: 2
+ * End:
+ */
diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c
index 6b75c9a6beb0..f1c7b219a0bd 100644
--- a/tpc/malloc2.13/malloc.c
+++ b/tpc/malloc2.13/malloc.c
@@ -2450,7 +2450,7 @@ static int perturb_byte;
/* ------------------- Support for multiple arenas -------------------- */
-#include "arena.ch"
+#include "arena.h"
/*
Debugging support
@@ -2813,7 +2813,7 @@ static void do_check_malloc_state(struct malloc_state * av)
/* ----------------- Support for debugging hooks -------------------- */
-#include "hooks.ch"
+#include "hooks.h"
/* ----------- Routines dealing with system allocation -------------- */
--
2.7.0.rc3