This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: [PATCH 2/2] Consolidate valloc/pvalloc code. large.


On 7 November 2013 21:14, OndÅej BÃlka <neleai@seznam.cz> wrote:
> Hi,
>
> As valloc/pvalloc are legacy functionality they should occupy minimal
> amount of code. Here we redirect valloc/pvalloc calls to memalign calls.
>
> This allows us simplify _int_memalign as several checks are already done
> is __libc_memalign.
>
> We could go further with refactoring as hook call is almost duplicated
> except that we need to define new function and pass caller address as
> argument.
>
> Comments?

I am in favour of this refactor in principle. However you also need to
look at hooks.c as _int_memalign is called there too. You can test
that code by setting MALLOC_CHECK_=3 and running the tests.

>         * malloc/malloc.c (__libc_valloc, __libc_pvalloc): Call __libc_memalign.
>         (__libc_memalign): Add rounding of alignment.
>         (_int_memalign): Remove redundant checks.
>         (_int_valloc, _int_pvalloc): Delete.
>
> diff --git a/malloc/malloc.c b/malloc/malloc.c
> index 897c43a..e7fbc2b 100644
> --- a/malloc/malloc.c
> +++ b/malloc/malloc.c
> @@ -1054,8 +1054,6 @@ static void     _int_free(mstate, mchunkptr, int);
>  static void*  _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
>                            INTERNAL_SIZE_T);
>  static void*  _int_memalign(mstate, size_t, size_t);
> -static void*  _int_valloc(mstate, size_t);
> -static void*  _int_pvalloc(mstate, size_t);
>  static void malloc_printerr(int action, const char *str, void *ptr);
>
>  static void* internal_function mem2mem_check(void *p, size_t sz);
> @@ -3005,6 +3003,9 @@ __libc_memalign(size_t alignment, size_t bytes)
>    mstate ar_ptr;
>    void *p;
>
> +  if(__malloc_initialized < 0)
> +    ptmalloc_init ();
> +
>    void *(*hook) (size_t, size_t, const void *) =
>      force_reg (__memalign_hook);
>    if (__builtin_expect (hook != NULL, 0))
> @@ -3024,6 +3025,13 @@ __libc_memalign(size_t alignment, size_t bytes)
>        return 0;
>      }
>
> +  /* Make sure alignment is power of 2 (in case MINSIZE is not).  */
> +  if ((alignment & (alignment - 1)) != 0) {
> +    size_t a = MALLOC_ALIGNMENT * 2;
> +    while ((unsigned long)a < (unsigned long)alignment) a <<= 1;

These casts seem superfluous.

> +    alignment = a;
> +  }
> +
>    /* Check for overflow.  */
>    if (bytes > SIZE_MAX - alignment - MINSIZE)
>      {
> @@ -3034,6 +3042,9 @@ __libc_memalign(size_t alignment, size_t bytes)
>    arena_get(ar_ptr, bytes + alignment + MINSIZE);
>    if(!ar_ptr)
>      return 0;
> +
> +  if (have_fastchunks(ar_ptr) && alignment >= GLRO(dl_pagesize))
> +    malloc_consolidate(ar_ptr);

As I mentioned in the review of the other patch, I think I would
favour removing the consolidation here.

>    p = _int_memalign(ar_ptr, alignment, bytes);
>    if(!p) {
>      LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
> @@ -3055,51 +3066,22 @@ libc_hidden_def (__libc_memalign)
>  void*
>  __libc_valloc(size_t bytes)
>  {
> -  mstate ar_ptr;
> -  void *p;
> -
>    if(__malloc_initialized < 0)
>      ptmalloc_init ();
>
>    size_t pagesz = GLRO(dl_pagesize);
>
> -  /* Check for overflow.  */
> -  if (bytes > SIZE_MAX - pagesz - MINSIZE)
> -    {
> -      __set_errno (ENOMEM);
> -      return 0;
> -    }
> -
>    void *(*hook) (size_t, size_t, const void *) =
>      force_reg (__memalign_hook);
>    if (__builtin_expect (hook != NULL, 0))
>      return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
>
> -  arena_get(ar_ptr, bytes + pagesz + MINSIZE);
> -  if(!ar_ptr)
> -    return 0;
> -  p = _int_valloc(ar_ptr, bytes);
> -  if(!p) {
> -    LIBC_PROBE (memory_valloc_retry, 1, bytes);

This would require a documentation update.

> -    ar_ptr = arena_get_retry (ar_ptr, bytes);
> -    if (__builtin_expect(ar_ptr != NULL, 1)) {
> -      p = _int_memalign(ar_ptr, pagesz, bytes);
> -      (void)mutex_unlock(&ar_ptr->mutex);
> -    }
> -  } else
> -    (void)mutex_unlock (&ar_ptr->mutex);
> -  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
> -        ar_ptr == arena_for_chunk(mem2chunk(p)));
> -
> -  return p;
> +  return __libc_memalign(pagesz, bytes);
>  }
>
>  void*
>  __libc_pvalloc(size_t bytes)
>  {
> -  mstate ar_ptr;
> -  void *p;
> -
>    if(__malloc_initialized < 0)
>      ptmalloc_init ();
>
> @@ -3108,7 +3090,7 @@ __libc_pvalloc(size_t bytes)
>    size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
>
>    /* Check for overflow.  */
> -  if (bytes > SIZE_MAX - 2*pagesz - MINSIZE)
> +  if (bytes > rounded_bytes)
>      {
>        __set_errno (ENOMEM);
>        return 0;
> @@ -3119,21 +3101,7 @@ __libc_pvalloc(size_t bytes)
>    if (__builtin_expect (hook != NULL, 0))
>      return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
>
> -  arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
> -  p = _int_pvalloc(ar_ptr, bytes);
> -  if(!p) {
> -    LIBC_PROBE (memory_pvalloc_retry, 1, bytes);

Also here.

-- 
Will Newton
Toolchain Working Group, Linaro


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]