This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] malloc: use MAP_HUGETLB when possible


From: Joern Engel <joern@purestorage.org>

Replicates the logic from purity to try huge pages first, then fall back
to small pages.  Care must be taken not to combine MAP_HUGETLB with
MAP_NORESERVE, as the result is a successful mmap() followed by SIGBUS
when accessing the memory.

More care must be taken to memset() the returned memory, as our kernel
does not and malloc assumes cleared memory.  There is optimization
potential, as memset(128GB) takes around 20ms.

JIRA: PURE-27597
---
 tpc/malloc2.13/arena.ch | 22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)

diff --git a/tpc/malloc2.13/arena.ch b/tpc/malloc2.13/arena.ch
index 3e778f3f96f7..fae6c2f7ee4c 100644
--- a/tpc/malloc2.13/arena.ch
+++ b/tpc/malloc2.13/arena.ch
@@ -687,6 +687,20 @@ dump_heap(heap) heap_info *heap;
    multiple threads, but only one will succeed.  */
 static char *aligned_heap_area;
 
+static void *mmap_for_heap(void *addr, size_t length)
+{
+	int prot = PROT_READ | PROT_WRITE;
+	int flags = MAP_PRIVATE;
+	void *ret;
+
+	ret = MMAP(addr, length, prot, flags | MAP_HUGETLB);
+	if (ret != MAP_FAILED) {
+		memset(ret, 0, length);
+		return ret;
+	}
+	return MMAP(addr, length, prot, flags | MAP_NORESERVE);
+}
+
 /* Create a new heap.  size is automatically rounded up to a multiple
    of the page size. */
 
@@ -719,8 +733,7 @@ new_heap(size, top_pad) size_t size, top_pad;
      anyway). */
   p2 = MAP_FAILED;
   if(aligned_heap_area) {
-    p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_READ|PROT_WRITE,
-		      MAP_PRIVATE|MAP_NORESERVE);
+    p2 = mmap_for_heap(aligned_heap_area, HEAP_MAX_SIZE);
     aligned_heap_area = NULL;
     if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
       munmap(p2, HEAP_MAX_SIZE);
@@ -728,8 +741,7 @@ new_heap(size, top_pad) size_t size, top_pad;
     }
   }
   if(p2 == MAP_FAILED) {
-    p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_READ|PROT_WRITE,
-		      MAP_PRIVATE|MAP_NORESERVE);
+    p1 = mmap_for_heap(0, HEAP_MAX_SIZE<<1);
     if(p1 != MAP_FAILED) {
       p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
 		    & ~(HEAP_MAX_SIZE-1));
@@ -742,7 +754,7 @@ new_heap(size, top_pad) size_t size, top_pad;
     } else {
       /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
 	 is already aligned. */
-      p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_NORESERVE);
+      p2 = mmap_for_heap(0, HEAP_MAX_SIZE);
       if(p2 == MAP_FAILED)
 	return 0;
       if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
-- 
2.7.0.rc3


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]