This is the mail archive of the glibc-cvs@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

GNU C Library master sources branch fw/heap-protector created. glibc-2.24-308-g1488185


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".

The branch, fw/heap-protector has been created
        at  148818503502c6d0c406964332c683353d27e228 (commit)

- Log -----------------------------------------------------------------
http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=148818503502c6d0c406964332c683353d27e228

commit 148818503502c6d0c406964332c683353d27e228
Author: Florian Weimer <fweimer@redhat.com>
Date:   Thu Oct 27 19:25:15 2016 +0200

    malloc: Implement heap protector
    
    TBD: Propagate cookie from rtld to libc.

diff --git a/ChangeLog b/ChangeLog
index aae7d6e..2440b6c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,26 @@
 2016-10-27  Florian Weimer  <fweimer@redhat.com>
 
+	* malloc/mallc-internal.h (__malloc_header_guard)
+	(__malloc_footer_guard): Declare.
+	* malloc/malloc-guard.c: New file.
+	* malloc/Makefile (routines): Add it.
+	* malloc/malloc.c (HEAP_CRYPT_SIZE, HEAP_CRYPT_PREVSIZE): Define.
+	(chunksize_nomask, prev_size, set_prev_size, set_head_size)
+	(set_head, set_foot): Add encryption.
+	* malloc/arena.c (ptmalloc_init): Initialize the top chunk.
+	* malloc/hooks.c (malloc_set_state): Apply the heap guard to the
+	dumped heap.
+	* csu/libc-start.c (LIBC_START_MAIN): Initialize heap guard
+	variables.
+	* elf/rtld.c (security_init): Likewise.
+	* malloc/tst-mallocstate.c (malloc_usable_size_valid): New
+	variable.
+	(check_allocation): Check malloc_usable_size result if
+	malloc_usable_size_valid.
+	(init_heap): Set malloc_usable_size_valid.
+
+2016-10-27  Florian Weimer  <fweimer@redhat.com>
+
 	* elf/dl-keysetup.h: New file.
 	* elf/dl-keysetup.c: Likewise.
 	* elf/Makefile (dl-routines): Add dl-keysetup.
diff --git a/csu/libc-start.c b/csu/libc-start.c
index 333a4cc..d42aaa4 100644
--- a/csu/libc-start.c
+++ b/csu/libc-start.c
@@ -22,6 +22,7 @@
 #include <ldsodefs.h>
 #include <exit-thread.h>
 #include <elf/dl-keysetup.h>
+#include <malloc/malloc-internal.h>
 
 extern void __libc_init_first (int argc, char **argv, char **envp);
 
@@ -210,6 +211,9 @@ LIBC_START_MAIN (int (*main) (int, char **, char ** MAIN_AUXVEC_DECL),
   __pointer_chk_guard_local = keys.pointer;
 # endif
 
+  __malloc_header_guard = keys.heap_header;
+  __malloc_footer_guard = keys.heap_footer;
+
 #endif
 
   /* Register the destructor of the dynamic linker if there is any.  */
diff --git a/elf/rtld.c b/elf/rtld.c
index de965da..40d1e2e 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -42,6 +42,7 @@
 #include <stap-probe.h>
 #include <stackinfo.h>
 #include <dl-keysetup.h>
+#include <malloc/malloc-internal.h>
 
 #include <assert.h>
 
@@ -716,6 +717,9 @@ security_init (void)
 #endif
   __pointer_chk_guard_local = keys.pointer;
 
+  __malloc_header_guard = keys.heap_header;
+  __malloc_footer_guard = keys.heap_footer;
+
   /* We do not need the _dl_random value anymore.  The less
      information we leave behind, the better, so clear the
      variable.  */
diff --git a/malloc/Makefile b/malloc/Makefile
index b8efcd6..cd289f8 100644
--- a/malloc/Makefile
+++ b/malloc/Makefile
@@ -41,7 +41,7 @@ tests-static := \
 tests += $(tests-static)
 test-srcs = tst-mtrace
 
-routines = malloc morecore mcheck mtrace obstack \
+routines = malloc morecore mcheck mtrace obstack malloc-guard \
   scratch_buffer_grow scratch_buffer_grow_preserve \
   scratch_buffer_set_array_size
 
diff --git a/malloc/arena.c b/malloc/arena.c
index 1904de3..4fc547d 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -340,6 +340,11 @@ ptmalloc_init (void)
       if (check_action != 0)
         __malloc_check_init ();
     }
+
+  /* Initialize the top chunk.  */
+  malloc_init_state (&main_arena);
+  set_head (main_arena.top, 0);
+
 #if HAVE_MALLOC_INIT_HOOK
   void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
   if (hook != NULL)
diff --git a/malloc/hooks.c b/malloc/hooks.c
index 12995d3..7b7068a 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -559,12 +559,15 @@ malloc_set_state (void *msptr)
   mchunkptr top = ms->av[2];
   while (chunk < top)
     {
+      /* Do not use the chunksize accessor for access because the
+	 dumped heap does not use the heap protector.  */
+      size_t size = ((size_t *) chunk)[1] & ~SIZE_BITS;
       if (inuse (chunk))
-	{
-	  /* Mark chunk as mmapped, to trigger the fallback path.  */
-	  size_t size = chunksize (chunk);
-	  set_head (chunk, size | IS_MMAPPED);
-	}
+	/* Mark chunk as mmapped, to trigger the fallback path.  */
+	set_head (chunk, size | IS_MMAPPED);
+      else
+	/* Unused dumped chunk.  */
+	set_head (chunk, size);
       chunk = next_chunk (chunk);
     }
 
diff --git a/malloc/malloc-guard.c b/malloc/malloc-guard.c
new file mode 100644
index 0000000..c8b3581
--- /dev/null
+++ b/malloc/malloc-guard.c
@@ -0,0 +1,29 @@
+/* Heap protector variables.
+   Copyright (C) 2016 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+/* These variables are defined in a separate file because the static
+   startup code initializes them, but this should not pull the rest of
+   the libc malloc implementation into the link.  */
+
+#include <malloc-internal.h>
+
+/* The heap cookie.  The lowest three bits (corresponding to
+   SIZE_BITS) in __malloc_guard_header must be clear.  Initialized
+   during libc startup, and computed by elf/dl-keysetup.c.  */
+INTERNAL_SIZE_T __malloc_header_guard; /* For size.  */
+INTERNAL_SIZE_T __malloc_footer_guard; /* For prev_size.  */
diff --git a/malloc/malloc-internal.h b/malloc/malloc-internal.h
index a3df8c3..e723539 100644
--- a/malloc/malloc-internal.h
+++ b/malloc/malloc-internal.h
@@ -81,5 +81,8 @@ void __malloc_fork_unlock_parent (void) internal_function attribute_hidden;
 /* Called in the child process after a fork.  */
 void __malloc_fork_unlock_child (void) internal_function attribute_hidden;
 
+/* Random values for the heap protector.  */
+extern INTERNAL_SIZE_T __malloc_header_guard attribute_hidden;
+extern INTERNAL_SIZE_T __malloc_footer_guard attribute_hidden;
 
 #endif /* _MALLOC_INTERNAL_H */
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 72d22bd..e1e732d 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1221,6 +1221,10 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 /* Mark a chunk as not being on the main arena.  */
 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
 
+/* Decrypt a heap header chunk.  */
+#define HEAP_CRYPT_SIZE(val) (__malloc_header_guard ^ ((INTERNAL_SIZE_T) val))
+#define HEAP_CRYPT_PREVSIZE(val) \
+  (__malloc_footer_guard ^ ((INTERNAL_SIZE_T) val))
 
 /*
    Bits to mask off when extracting size
@@ -1236,16 +1240,16 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
 
 /* Like chunksize, but do not mask SIZE_BITS.  */
-#define chunksize_nomask(p)         ((p)->mchunk_size)
+#define chunksize_nomask(p) HEAP_CRYPT_SIZE ((p)->mchunk_size)
 
 /* Ptr to next physical malloc_chunk. */
 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
 
 /* Size of the chunk below P.  Only valid if prev_inuse (P).  */
-#define prev_size(p) ((p)->mchunk_prev_size)
+#define prev_size(p) HEAP_CRYPT_PREVSIZE ((p)->mchunk_prev_size)
 
 /* Set the size of the chunk below P.  Only valid if prev_inuse (P).  */
-#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
+#define set_prev_size(p, sz) ((p)->mchunk_prev_size = HEAP_CRYPT_PREVSIZE (sz))
 
 /* Ptr to previous physical malloc_chunk.  Only valid if prev_inuse (P).  */
 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
@@ -1277,13 +1281,16 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 
 
 /* Set size at head, without disturbing its use bit */
-#define set_head_size(p, s)  ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
+#define set_head_size(p, s) \
+  ((p)->mchunk_size = ((p)->mchunk_size & SIZE_BITS) | HEAP_CRYPT_SIZE (s))
 
 /* Set size/use field */
-#define set_head(p, s)       ((p)->mchunk_size = (s))
+#define set_head(p, s) ((p)->mchunk_size = HEAP_CRYPT_SIZE (s))
 
 /* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
+#define set_foot(p, s) \
+  (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size \
+    = HEAP_CRYPT_PREVSIZE (s))
 
 
 #pragma GCC poison mchunk_size
diff --git a/malloc/tst-mallocstate.c b/malloc/tst-mallocstate.c
index 7e081c5..bd6678e 100644
--- a/malloc/tst-mallocstate.c
+++ b/malloc/tst-mallocstate.c
@@ -186,6 +186,10 @@ struct allocation
   unsigned int seed;
 };
 
+/* After heap initialization, we can call malloc_usable_size to check
+   if it gives valid results.  */
+static bool malloc_usable_size_valid;
+
 /* Check that the allocation task allocation has the expected
    contents.  */
 static void
@@ -221,6 +225,23 @@ check_allocation (const struct allocation *alloc, int index)
       putc ('\n', stdout);
       errors = true;
     }
+
+  if (malloc_usable_size_valid)
+    {
+      size_t usable = malloc_usable_size (alloc->data);
+      if (usable < size)
+        {
+          printf ("error: allocation %d has reported size %zu (expected %zu)\n",
+                  index, usable, size);
+          errors = true;
+        }
+      else if (usable > size + 4096)
+        {
+          printf ("error: allocation %d reported as %zu bytes (requested %zu)\n",
+                  index, usable, size);
+          errors = true;
+        }
+    }
 }
 
 /* A heap allocation combined with pending actions on it.  */
@@ -317,6 +338,10 @@ init_heap (void)
       write_message ("error: malloc_set_state failed\n");
       _exit (1);
     }
+
+  /* The heap has been initialized.  We may now call
+     malloc_usable_size.  */
+  malloc_usable_size_valid = true;
 }
 
 /* Interpose the initialization callback.  */

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=37a0a9067f0c088bcf0c42ee5597d9b35e23f6b7

commit 37a0a9067f0c088bcf0c42ee5597d9b35e23f6b7
Author: Florian Weimer <fweimer@redhat.com>
Date:   Thu Oct 27 13:48:24 2016 +0200

    Generate additional secret keys for the heap protector

diff --git a/ChangeLog b/ChangeLog
index eda5b9c..aae7d6e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+2016-10-27  Florian Weimer  <fweimer@redhat.com>
+
+	* elf/dl-keysetup.h: New file.
+	* elf/dl-keysetup.c: Likewise.
+	* elf/Makefile (dl-routines): Add dl-keysetup.
+	* csu/libc-start.c (LIBC_START_MAIN): Call __compute_keys to
+	obtain key material.
+	* elf/rtld.c (security_init): Likewise.
+	* crypt/sha256.c: Use relative #include for sha256-block.c
+
 2016-10-27  Siddhesh Poyarekar  <siddhesh@sourceware.org>
 
 	* malloc/malloc.c (do_set_mallopt_check): New function.
diff --git a/crypt/sha256.c b/crypt/sha256.c
index e858f4b..56cae14 100644
--- a/crypt/sha256.c
+++ b/crypt/sha256.c
@@ -212,4 +212,4 @@ __sha256_process_bytes (const void *buffer, size_t len, struct sha256_ctx *ctx)
     }
 }
 
-#include <sha256-block.c>
+#include "sha256-block.c"
diff --git a/csu/libc-start.c b/csu/libc-start.c
index 99c040a..333a4cc 100644
--- a/csu/libc-start.c
+++ b/csu/libc-start.c
@@ -21,6 +21,7 @@
 #include <unistd.h>
 #include <ldsodefs.h>
 #include <exit-thread.h>
+#include <elf/dl-keysetup.h>
 
 extern void __libc_init_first (int argc, char **argv, char **envp);
 
@@ -192,21 +193,21 @@ LIBC_START_MAIN (int (*main) (int, char **, char ** MAIN_AUXVEC_DECL),
      we need to setup errno.  */
   __pthread_initialize_minimal ();
 
+  struct key_setup keys;
+  __compute_keys (_dl_random, &keys);
+
   /* Set up the stack checker's canary.  */
-  uintptr_t stack_chk_guard = _dl_setup_stack_chk_guard (_dl_random);
 # ifdef THREAD_SET_STACK_GUARD
-  THREAD_SET_STACK_GUARD (stack_chk_guard);
+  THREAD_SET_STACK_GUARD (keys.stack);
 # else
-  __stack_chk_guard = stack_chk_guard;
+  __stack_chk_guard = keys.stack;
 # endif
 
   /* Set up the pointer guard value.  */
-  uintptr_t pointer_chk_guard = _dl_setup_pointer_guard (_dl_random,
-							 stack_chk_guard);
 # ifdef THREAD_SET_POINTER_GUARD
-  THREAD_SET_POINTER_GUARD (pointer_chk_guard);
+  THREAD_SET_POINTER_GUARD (keys.pointer);
 # else
-  __pointer_chk_guard_local = pointer_chk_guard;
+  __pointer_chk_guard_local = keys.pointer;
 # endif
 
 #endif
diff --git a/elf/Makefile b/elf/Makefile
index caffd92..e84141e 100644
--- a/elf/Makefile
+++ b/elf/Makefile
@@ -31,7 +31,8 @@ routines	= $(all-dl-routines) dl-support dl-iteratephdr \
 dl-routines	= $(addprefix dl-,load lookup object reloc deps hwcaps \
 				  runtime error init fini debug misc \
 				  version profile conflict tls origin scope \
-				  execstack caller open close trampoline)
+				  execstack caller open close trampoline \
+				  keysetup)
 ifeq (yes,$(use-ldconfig))
 dl-routines += dl-cache
 endif
diff --git a/elf/dl-keysetup.c b/elf/dl-keysetup.c
new file mode 100644
index 0000000..9f0b4fd
--- /dev/null
+++ b/elf/dl-keysetup.c
@@ -0,0 +1,71 @@
+/* Compute secret keys used for protection heuristics.
+   Copyright (C) 2016 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#include <dl-keysetup.h>
+#include <string.h>
+
+enum { at_random_size = 16 };
+
+#if __WORDSIZE == 64
+enum { sha256_output_size = 32 };
+static void compute_sha256_of_random (const void *random, void *result);
+#endif
+
+void
+__compute_keys (const void *random, struct key_setup *result)
+{
+#if __WORDSIZE == 32
+  _Static_assert (sizeof (*result) == at_random_size,
+                  "no key expansion required");
+  memcpy (random, result, sizeof (result));
+#else
+  /* We use SHA-256 to expand the 16 bytes of randomness into 32
+     bytes, so that it is hard to guess the remaining keys once a
+     subset of them is known.  */
+  _Static_assert (sizeof (*result) == sha256_output_size,
+                  "SHA-256 provides required size");
+  compute_sha256_of_random (random, result);
+#endif
+
+  /* Prevent leakage of the stack canary through a read buffer
+     overflow of a NUL-terminated string.  */
+  *(char *) &result->stack = '\0';
+
+  /* Clear the lowest three bits in the heap header guard value, so
+     that the flag bits remain unchanged.  */
+  result->heap_header <<= 3;
+}
+
+#if __WORDSIZE == 64
+
+#pragma GCC visibility push (hidden)
+
+#include "../crypt/sha256.h"
+#include "../crypt/sha256.c"
+
+#pragma GCC visibility pop
+
+static void
+compute_sha256_of_random (const void *random, void *result)
+{
+  struct sha256_ctx ctx;
+  __sha256_init_ctx (&ctx);
+  __sha256_process_bytes (random, at_random_size, &ctx);
+  __sha256_finish_ctx (&ctx, result);
+}
+#endif
diff --git a/elf/dl-keysetup.h b/elf/dl-keysetup.h
new file mode 100644
index 0000000..3c7e9bb
--- /dev/null
+++ b/elf/dl-keysetup.h
@@ -0,0 +1,45 @@
+/* Compute secret keys used for protection heuristics.
+   Copyright (C) 2016 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public License as
+   published by the Free Software Foundation; either version 2.1 of the
+   License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; see the file COPYING.LIB.  If
+   not, see <http://www.gnu.org/licenses/>.  */
+
+#ifndef KEY_SETUP_H
+#define KEY_SETUP_H
+
+#include <stdint.h>
+
+/* The set of protection keys used by glibc.  */
+struct key_setup
+{
+  /* Canary for the stack-smashing protector.  */
+  uintptr_t stack;
+
+  /* Pointer guard, protecting selected function pointers.  */
+  uintptr_t pointer;
+
+  /* Heap guard, proctecting the malloc chunk header.  */
+  uintptr_t heap_header;
+
+  /* Heap guard part two, protecting the previous chunk size field.  */
+  uintptr_t heap_footer;
+};
+
+/* Derive the keys in *RESULT from RANDOM, which comes from the
+   auxiliary vector and points to 16 bytes of randomness.  */
+void __compute_keys (const void *random, struct key_setup *result)
+  attribute_hidden;
+
+#endif /* KEY_SETUP_H */
diff --git a/elf/rtld.c b/elf/rtld.c
index 647661c..de965da 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -41,6 +41,7 @@
 #include <tls.h>
 #include <stap-probe.h>
 #include <stackinfo.h>
+#include <dl-keysetup.h>
 
 #include <assert.h>
 
@@ -699,21 +700,21 @@ rtld_lock_default_unlock_recursive (void *lock)
 static void
 security_init (void)
 {
+  struct key_setup keys;
+  __compute_keys (_dl_random, &keys);
+
   /* Set up the stack checker's canary.  */
-  uintptr_t stack_chk_guard = _dl_setup_stack_chk_guard (_dl_random);
 #ifdef THREAD_SET_STACK_GUARD
-  THREAD_SET_STACK_GUARD (stack_chk_guard);
+  THREAD_SET_STACK_GUARD (keys.stack);
 #else
-  __stack_chk_guard = stack_chk_guard;
+  __stack_chk_guard = keys.stack;
 #endif
 
   /* Set up the pointer guard as well, if necessary.  */
-  uintptr_t pointer_chk_guard
-    = _dl_setup_pointer_guard (_dl_random, stack_chk_guard);
 #ifdef THREAD_SET_POINTER_GUARD
-  THREAD_SET_POINTER_GUARD (pointer_chk_guard);
+  THREAD_SET_POINTER_GUARD (keys.pointer);
 #endif
-  __pointer_chk_guard_local = pointer_chk_guard;
+  __pointer_chk_guard_local = keys.pointer;
 
   /* We do not need the _dl_random value anymore.  The less
      information we leave behind, the better, so clear the

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=e1afae2fd2032e26c511a366e5d54132ddf35c8f

commit e1afae2fd2032e26c511a366e5d54132ddf35c8f
Author: Florian Weimer <fweimer@redhat.com>
Date:   Tue May 24 16:21:24 2016 +0200

    sysmalloc: Initialize previous size field of mmaped chunks

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 186e174..72d22bd 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2306,6 +2306,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
               else
                 {
                   p = (mchunkptr) mm;
+		  set_prev_size (p, 0);
                   set_head (p, size | IS_MMAPPED);
                 }
 

http://sourceware.org/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=41bc6bb855a982ec6172e48686773b59af249391

commit 41bc6bb855a982ec6172e48686773b59af249391
Author: Florian Weimer <fweimer@redhat.com>
Date:   Fri May 27 17:30:45 2016 +0200

    malloc: Use accessors for chunk metadata access

diff --git a/malloc/arena.c b/malloc/arena.c
index 9760483..1904de3 100644
--- a/malloc/arena.c
+++ b/malloc/arena.c
@@ -560,12 +560,13 @@ heap_trim (heap_info *heap, size_t pad)
       /* fencepost must be properly aligned.  */
       misalign = ((long) p) & MALLOC_ALIGN_MASK;
       p = chunk_at_offset (prev_heap, prev_size - misalign);
-      assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
+      assert (chunksize_nomask (p) == (0 | PREV_INUSE)
+	      && prev_inuse (p)); /* must be fencepost */
       p = prev_chunk (p);
       new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
       assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
       if (!prev_inuse (p))
-        new_size += p->prev_size;
+        new_size += prev_size (p);
       assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
       if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
         break;
diff --git a/malloc/hooks.c b/malloc/hooks.c
index ecfe9c1..12995d3 100644
--- a/malloc/hooks.c
+++ b/malloc/hooks.c
@@ -192,7 +192,7 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
            ((char *) p < mp_.sbrk_base ||
             ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) ||
           sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) ||
-          (!prev_inuse (p) && (p->prev_size & MALLOC_ALIGN_MASK ||
+          (!prev_inuse (p) && ((prev_size (p) & MALLOC_ALIGN_MASK) != 0 ||
                                (contig && (char *) prev_chunk (p) < mp_.sbrk_base) ||
                                next_chunk (prev_chunk (p)) != p)))
         return NULL;
@@ -215,9 +215,9 @@ mem2chunk_check (void *mem, unsigned char **magic_p)
            offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 &&
            offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 &&
            offset < 0x2000) ||
-          !chunk_is_mmapped (p) || (p->size & PREV_INUSE) ||
-          ((((unsigned long) p - p->prev_size) & page_mask) != 0) ||
-          ((p->prev_size + sz) & page_mask) != 0)
+          !chunk_is_mmapped (p) || prev_inuse (p) ||
+          ((((unsigned long) p - prev_size (p)) & page_mask) != 0) ||
+          ((prev_size (p) + sz) & page_mask) != 0)
         return NULL;
 
       for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c)
diff --git a/malloc/malloc.c b/malloc/malloc.c
index e99fca0..186e174 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -1040,8 +1040,8 @@ static void*   memalign_check(size_t alignment, size_t bytes,
 
 struct malloc_chunk {
 
-  INTERNAL_SIZE_T      prev_size;  /* Size of previous chunk (if free).  */
-  INTERNAL_SIZE_T      size;       /* Size in bytes, including overhead. */
+  INTERNAL_SIZE_T      mchunk_prev_size;  /* Size of previous chunk (if free).  */
+  INTERNAL_SIZE_T      mchunk_size;       /* Size in bytes, including overhead. */
 
   struct malloc_chunk* fd;         /* double links -- used only if free. */
   struct malloc_chunk* bk;
@@ -1200,14 +1200,14 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 #define PREV_INUSE 0x1
 
 /* extract inuse bit of previous chunk */
-#define prev_inuse(p)       ((p)->size & PREV_INUSE)
+#define prev_inuse(p)       ((p)->mchunk_size & PREV_INUSE)
 
 
 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
 #define IS_MMAPPED 0x2
 
 /* check for mmap()'ed chunk */
-#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
+#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
 
 
 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
@@ -1216,7 +1216,10 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 #define NON_MAIN_ARENA 0x4
 
 /* check for chunk from non-main arena */
-#define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
+#define chunk_non_main_arena(p) ((p)->mchunk_size & NON_MAIN_ARENA)
+
+/* Mark a chunk as not being on the main arena.  */
+#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
 
 
 /*
@@ -1230,51 +1233,62 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
 
 /* Get size, ignoring use bits */
-#define chunksize(p)         ((p)->size & ~(SIZE_BITS))
+#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
 
+/* Like chunksize, but do not mask SIZE_BITS.  */
+#define chunksize_nomask(p)         ((p)->mchunk_size)
 
 /* Ptr to next physical malloc_chunk. */
-#define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))
+#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
+
+/* Size of the chunk below P.  Only valid if prev_inuse (P).  */
+#define prev_size(p) ((p)->mchunk_prev_size)
+
+/* Set the size of the chunk below P.  Only valid if prev_inuse (P).  */
+#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
 
-/* Ptr to previous physical malloc_chunk */
-#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size)))
+/* Ptr to previous physical malloc_chunk.  Only valid if prev_inuse (P).  */
+#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
 
 /* Treat space at ptr + offset as a chunk */
 #define chunk_at_offset(p, s)  ((mchunkptr) (((char *) (p)) + (s)))
 
 /* extract p's inuse bit */
 #define inuse(p)							      \
-  ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
+  ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
 
 /* set/clear chunk as being inuse without otherwise disturbing */
 #define set_inuse(p)							      \
-  ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
+  ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
 
 #define clear_inuse(p)							      \
-  ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
+  ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
 
 
 /* check/set/clear inuse bits in known places */
 #define inuse_bit_at_offset(p, s)					      \
-  (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE)
+  (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
 
 #define set_inuse_bit_at_offset(p, s)					      \
-  (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE)
+  (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
 
 #define clear_inuse_bit_at_offset(p, s)					      \
-  (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE))
+  (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
 
 
 /* Set size at head, without disturbing its use bit */
-#define set_head_size(p, s)  ((p)->size = (((p)->size & SIZE_BITS) | (s)))
+#define set_head_size(p, s)  ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
 
 /* Set size/use field */
-#define set_head(p, s)       ((p)->size = (s))
+#define set_head(p, s)       ((p)->mchunk_size = (s))
 
 /* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s))
+#define set_foot(p, s)       (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
 
 
+#pragma GCC poison mchunk_size
+#pragma GCC poison mchunk_prev_size
+
 /*
    -------------------- Internal data structures --------------------
 
@@ -1349,7 +1363,7 @@ typedef struct malloc_chunk *mbinptr;
     else {								      \
         FD->bk = BK;							      \
         BK->fd = FD;							      \
-        if (!in_smallbin_range (P->size)				      \
+        if (!in_smallbin_range (chunksize_nomask (P))			      \
             && __builtin_expect (P->fd_nextsize != NULL, 0)) {		      \
 	    if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0)	      \
 		|| __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0))    \
@@ -1901,7 +1915,7 @@ do_check_chunk (mstate av, mchunkptr p)
           assert (((char *) p) < min_address || ((char *) p) >= max_address);
         }
       /* chunk is page-aligned */
-      assert (((p->prev_size + sz) & (GLRO (dl_pagesize) - 1)) == 0);
+      assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
       /* mem is aligned */
       assert (aligned_OK (chunk2mem (p)));
     }
@@ -1929,7 +1943,7 @@ do_check_free_chunk (mstate av, mchunkptr p)
       assert ((sz & MALLOC_ALIGN_MASK) == 0);
       assert (aligned_OK (chunk2mem (p)));
       /* ... matching footer field */
-      assert (next->prev_size == sz);
+      assert (prev_size (p) == sz);
       /* ... and is fully consolidated */
       assert (prev_inuse (p));
       assert (next == av->top || inuse (next));
@@ -2286,7 +2300,7 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
                 {
                   correction = MALLOC_ALIGNMENT - front_misalign;
                   p = (mchunkptr) (mm + correction);
-                  p->prev_size = correction;
+		  set_prev_size (p, correction);
                   set_head (p, (size - correction) | IS_MMAPPED);
                 }
               else
@@ -2641,11 +2655,10 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
                          intentional. We need the fencepost, even if old_top otherwise gets
                          lost.
                        */
-                      chunk_at_offset (old_top, old_size)->size =
-                        (2 * SIZE_SZ) | PREV_INUSE;
-
-                      chunk_at_offset (old_top, old_size + 2 * SIZE_SZ)->size =
-                        (2 * SIZE_SZ) | PREV_INUSE;
+		      set_head (chunk_at_offset (old_top, old_size),
+				(2 * SIZE_SZ) | PREV_INUSE);
+		      set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
+				(2 * SIZE_SZ) | PREV_INUSE);
 
                       /* If possible, release the rest. */
                       if (old_size >= MINSIZE)
@@ -2773,8 +2786,8 @@ munmap_chunk (mchunkptr p)
   if (DUMPED_MAIN_ARENA_CHUNK (p))
     return;
 
-  uintptr_t block = (uintptr_t) p - p->prev_size;
-  size_t total_size = p->prev_size + size;
+  uintptr_t block = (uintptr_t) p - prev_size (p);
+  size_t total_size = prev_size (p) + size;
   /* Unfortunately we have to do the compilers job by hand here.  Normally
      we would test BLOCK and TOTAL-SIZE separately for compliance with the
      page size.  But gcc does not recognize the optimization possibility
@@ -2803,7 +2816,7 @@ internal_function
 mremap_chunk (mchunkptr p, size_t new_size)
 {
   size_t pagesize = GLRO (dl_pagesize);
-  INTERNAL_SIZE_T offset = p->prev_size;
+  INTERNAL_SIZE_T offset = prev_size (p);
   INTERNAL_SIZE_T size = chunksize (p);
   char *cp;
 
@@ -2827,7 +2840,7 @@ mremap_chunk (mchunkptr p, size_t new_size)
 
   assert (aligned_OK (chunk2mem (p)));
 
-  assert ((p->prev_size == offset));
+  assert (prev_size (p) == offset);
   set_head (p, (new_size - offset) | IS_MMAPPED);
 
   INTERNAL_SIZE_T new;
@@ -2896,8 +2909,8 @@ __libc_free (void *mem)
       /* See if the dynamic brk/mmap threshold needs adjusting.
 	 Dumped fake mmapped chunks do not affect the threshold.  */
       if (!mp_.no_dyn_threshold
-          && p->size > mp_.mmap_threshold
-          && p->size <= DEFAULT_MMAP_THRESHOLD_MAX
+          && chunksize_nomask (p) > mp_.mmap_threshold
+          && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
 	  && !DUMPED_MAIN_ARENA_CHUNK (p))
         {
           mp_.mmap_threshold = chunksize (p);
@@ -3389,7 +3402,7 @@ _int_malloc (mstate av, size_t bytes)
               bck->fd = bin;
 
               if (av != &main_arena)
-                victim->size |= NON_MAIN_ARENA;
+		set_non_main_arena (victim);
               check_malloced_chunk (av, victim, nb);
               void *p = chunk2mem (victim);
               alloc_perturb (p, bytes);
@@ -3435,8 +3448,9 @@ _int_malloc (mstate av, size_t bytes)
       while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
         {
           bck = victim->bk;
-          if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
-              || __builtin_expect (victim->size > av->system_mem, 0))
+          if (__builtin_expect (chunksize_nomask (victim) <= 2 * SIZE_SZ, 0)
+              || __builtin_expect (chunksize_nomask (victim)
+				   > av->system_mem, 0))
             malloc_printerr (check_action, "malloc(): memory corruption",
                              chunk2mem (victim), av);
           size = chunksize (victim);
@@ -3487,7 +3501,7 @@ _int_malloc (mstate av, size_t bytes)
             {
               set_inuse_bit_at_offset (victim, size);
               if (av != &main_arena)
-                victim->size |= NON_MAIN_ARENA;
+		set_non_main_arena (victim);
               check_malloced_chunk (av, victim, nb);
               void *p = chunk2mem (victim);
               alloc_perturb (p, bytes);
@@ -3514,8 +3528,9 @@ _int_malloc (mstate av, size_t bytes)
                   /* Or with inuse bit to speed comparisons */
                   size |= PREV_INUSE;
                   /* if smaller than smallest, bypass loop below */
-                  assert ((bck->bk->size & NON_MAIN_ARENA) == 0);
-                  if ((unsigned long) (size) < (unsigned long) (bck->bk->size))
+                  assert (! chunk_non_main_arena (bck->bk));
+                  if ((unsigned long) (size)
+		      < (unsigned long) chunksize_nomask (bck->bk))
                     {
                       fwd = bck;
                       bck = bck->bk;
@@ -3526,14 +3541,15 @@ _int_malloc (mstate av, size_t bytes)
                     }
                   else
                     {
-                      assert ((fwd->size & NON_MAIN_ARENA) == 0);
-                      while ((unsigned long) size < fwd->size)
+                      assert (! chunk_non_main_arena (fwd));
+                      while ((unsigned long) size < chunksize_nomask (fwd))
                         {
                           fwd = fwd->fd_nextsize;
-                          assert ((fwd->size & NON_MAIN_ARENA) == 0);
+			  assert (! chunk_non_main_arena (fwd));
                         }
 
-                      if ((unsigned long) size == (unsigned long) fwd->size)
+                      if ((unsigned long) size
+			  == (unsigned long) chunksize_nomask (fwd))
                         /* Always insert in the second position.  */
                         fwd = fwd->fd;
                       else
@@ -3571,8 +3587,9 @@ _int_malloc (mstate av, size_t bytes)
           bin = bin_at (av, idx);
 
           /* skip scan if empty or largest chunk is too small */
-          if ((victim = first (bin)) != bin &&
-              (unsigned long) (victim->size) >= (unsigned long) (nb))
+          if ((victim = first (bin)) != bin
+	      && (unsigned long) chunksize_nomask (victim)
+	        >= (unsigned long) (nb))
             {
               victim = victim->bk_nextsize;
               while (((unsigned long) (size = chunksize (victim)) <
@@ -3581,7 +3598,9 @@ _int_malloc (mstate av, size_t bytes)
 
               /* Avoid removing the first entry for a size so that the skip
                  list does not have to be rerouted.  */
-              if (victim != last (bin) && victim->size == victim->fd->size)
+              if (victim != last (bin)
+		  && chunksize_nomask (victim)
+		    == chunksize_nomask (victim->fd))
                 victim = victim->fd;
 
               remainder_size = size - nb;
@@ -3592,7 +3611,7 @@ _int_malloc (mstate av, size_t bytes)
                 {
                   set_inuse_bit_at_offset (victim, size);
                   if (av != &main_arena)
-                    victim->size |= NON_MAIN_ARENA;
+		    set_non_main_arena (victim);
                 }
               /* Split */
               else
@@ -3697,7 +3716,7 @@ _int_malloc (mstate av, size_t bytes)
                 {
                   set_inuse_bit_at_offset (victim, size);
                   if (av != &main_arena)
-                    victim->size |= NON_MAIN_ARENA;
+		    set_non_main_arena (victim);
                 }
 
               /* Split */
@@ -3859,7 +3878,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 #endif
       ) {
 
-    if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
+    if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
+			  <= 2 * SIZE_SZ, 0)
 	|| __builtin_expect (chunksize (chunk_at_offset (p, size))
 			     >= av->system_mem, 0))
       {
@@ -3870,7 +3890,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 	    || ({ assert (locked == 0);
 		  __libc_lock_lock (av->mutex);
 		  locked = 1;
-		  chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
+		  chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
 		    || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
 	      }))
 	  {
@@ -3954,7 +3974,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
       }
 
     nextsize = chunksize(nextchunk);
-    if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
+    if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
 	|| __builtin_expect (nextsize >= av->system_mem, 0))
       {
 	errstr = "free(): invalid next size (normal)";
@@ -3965,7 +3985,7 @@ _int_free (mstate av, mchunkptr p, int have_lock)
 
     /* consolidate backward */
     if (!prev_inuse(p)) {
-      prevsize = p->prev_size;
+      prevsize = prev_size (p);
       size += prevsize;
       p = chunk_at_offset(p, -((long) prevsize));
       unlink(av, p, bck, fwd);
@@ -4130,12 +4150,12 @@ static void malloc_consolidate(mstate av)
 	  nextp = p->fd;
 
 	  /* Slightly streamlined version of consolidation code in free() */
-	  size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
+	  size = chunksize (p);
 	  nextchunk = chunk_at_offset(p, size);
 	  nextsize = chunksize(nextchunk);
 
 	  if (!prev_inuse(p)) {
-	    prevsize = p->prev_size;
+	    prevsize = prev_size (p);
 	    size += prevsize;
 	    p = chunk_at_offset(p, -((long) prevsize));
 	    unlink(av, p, bck, fwd);
@@ -4210,7 +4230,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
   const char *errstr = NULL;
 
   /* oldmem size */
-  if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
+  if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
       || __builtin_expect (oldsize >= av->system_mem, 0))
     {
       errstr = "realloc(): invalid old size";
@@ -4226,7 +4246,7 @@ _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
 
   next = chunk_at_offset (oldp, oldsize);
   INTERNAL_SIZE_T nextsize = chunksize (next);
-  if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
+  if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
       || __builtin_expect (nextsize >= av->system_mem, 0))
     {
       errstr = "realloc(): invalid next size";
@@ -4412,7 +4432,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
       /* For mmapped chunks, just adjust offset */
       if (chunk_is_mmapped (p))
         {
-          newp->prev_size = p->prev_size + leadsize;
+          set_prev_size (newp, prev_size (p) + leadsize);
           set_head (newp, newsize | IS_MMAPPED);
           return chunk2mem (newp);
         }
@@ -5154,12 +5174,13 @@ __malloc_info (int options, FILE *fp)
 	  if (r != NULL)
 	    while (r != bin)
 	      {
+		size_t r_size = chunksize_nomask (r);
 		++sizes[NFASTBINS - 1 + i].count;
-		sizes[NFASTBINS - 1 + i].total += r->size;
+		sizes[NFASTBINS - 1 + i].total += r_size;
 		sizes[NFASTBINS - 1 + i].from
-		  = MIN (sizes[NFASTBINS - 1 + i].from, r->size);
+		  = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
 		sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
-						   r->size);
+						   r_size);
 
 		r = r->fd;
 	      }

-----------------------------------------------------------------------


hooks/post-receive
-- 
GNU C Library master sources


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]