This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: PATCH: Malloc fails if MALLOC_ALIGNMENT > 2 * SIZE_SZ


On Fri, Aug 19, 2011 at 4:00 PM, Roland McGrath <roland@hack.frob.com> wrote:
>> I can't comment on powerpc32. ?Bit a target shouldn't define
>>
>> MALLOC_ALIGNMENT > 2 * SIZE_SZ
>>
>> if it will break binary compatibility.
>
> Well, AFAICT this patch as is does that for powerpc32 if -mlong-double-128
> is the norm. ?Hence my concern.
>
>

In that case, we don't change default MALLOC_ALIGNMENT

Thanks.

-- 
H.J.
---
2007-11-30  Daniel Jacobowitz  <dan@codesourcery.com>

	* malloc/malloc.c (MALLOC_ALIGNMENT): Use __alignof__ (long double).
	(SMALLBIN_CORRECTION): New.
	(MIN_LARGE_SIZE, smallbin_index): Use it to handle 16-byte alignment.
	(largebin_index_32_big): New.
	(largebin_index): Use it for 16-byte alignment.
	(sYSMALLOc): Handle MALLOC_ALIGNMENT > 2 * SIZE_SZ.
2007-11-30  Daniel Jacobowitz  <dan@codesourcery.com>

	* malloc/malloc.c (SMALLBIN_CORRECTION): New.
	(MIN_LARGE_SIZE, smallbin_index): Use it to handle 16-byte alignment.
	(largebin_index_32_big): New.
	(largebin_index): Use it for 16-byte alignment.
	(sYSMALLOc): Handle MALLOC_ALIGNMENT > 2 * SIZE_SZ.

diff --git a/malloc/malloc.c b/malloc/malloc.c
index 3e06b87..c4bc1fb 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2157,18 +2149,23 @@ typedef struct malloc_chunk* mbinptr;
 
     The bins top out around 1MB because we expect to service large
     requests via mmap.
+
+    Bin 0 does not exist.  Bin 1 is the unordered list; if that would be
+    a valid chunk size the small bins are bumped up one.
 */
 
 #define NBINS             128
 #define NSMALLBINS         64
 #define SMALLBIN_WIDTH    MALLOC_ALIGNMENT
-#define MIN_LARGE_SIZE    (NSMALLBINS * SMALLBIN_WIDTH)
+#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
+#define MIN_LARGE_SIZE    ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
 
 #define in_smallbin_range(sz)  \
   ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
 
 #define smallbin_index(sz) \
-  (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
+  ((SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3)) \
+   + SMALLBIN_CORRECTION)
 
 #define largebin_index_32(sz)                                                \
 (((((unsigned long)(sz)) >>  6) <= 38)?  56 + (((unsigned long)(sz)) >>  6): \
@@ -2178,6 +2175,14 @@ typedef struct malloc_chunk* mbinptr;
  ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
 					126)
 
+#define largebin_index_32_big(sz)                                            \
+(((((unsigned long)(sz)) >>  6) <= 45)?  49 + (((unsigned long)(sz)) >>  6): \
+ ((((unsigned long)(sz)) >>  9) <= 20)?  91 + (((unsigned long)(sz)) >>  9): \
+ ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
+ ((((unsigned long)(sz)) >> 15) <=  4)? 119 + (((unsigned long)(sz)) >> 15): \
+ ((((unsigned long)(sz)) >> 18) <=  2)? 124 + (((unsigned long)(sz)) >> 18): \
+                                        126)
+
 // XXX It remains to be seen whether it is good to keep the widths of
 // XXX the buckets the same or whether it should be scaled by a factor
 // XXX of two as well.
@@ -2190,7 +2195,9 @@ typedef struct malloc_chunk* mbinptr;
 					126)
 
 #define largebin_index(sz) \
-  (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
+  (SIZE_SZ == 8 ? largebin_index_64 (sz)                                     \
+   : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz)                     \
+   : largebin_index_32 (sz))
 
 #define bin_index(sz) \
  ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
@@ -3010,14 +3017,14 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
       Round up size to nearest page.  For mmapped chunks, the overhead
       is one SIZE_SZ unit larger than for normal chunks, because there
       is no following chunk whose prev_size field could be used.
+
+      See the front_misalign handling below, for glibc there is no
+      need for further alignments unless we have have high alignment.
     */
-#if 1
-    /* See the front_misalign handling below, for glibc there is no
-       need for further alignments.  */
-    size = (nb + SIZE_SZ + pagemask) & ~pagemask;
-#else
-    size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
-#endif
+    if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+      size = (nb + SIZE_SZ + pagemask) & ~pagemask;
+    else
+      size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
     tried_mmap = true;
 
     /* Don't try if size wraps around 0 */
@@ -3035,13 +3042,16 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
 	  address argument for later munmap in free() and realloc().
 	*/
 
-#if 1
-	/* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
-	   MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
-	   aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
-	assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
-#else
-	front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
+	if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+	  {
+	    /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
+	       MALLOC_ALIGN_MASK is 2*SIZE_SZ-1.  Each mmap'ed area is page
+	       aligned and therefore definitely MALLOC_ALIGN_MASK-aligned.  */
+	    assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
+	    front_misalign = 0;
+	  }
+	else
+	  front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
 	if (front_misalign > 0) {
 	  correction = MALLOC_ALIGNMENT - front_misalign;
 	  p = (mchunkptr)(mm + correction);
@@ -3049,7 +3059,6 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
 	  set_head(p, (size - correction) |IS_MMAPPED);
 	}
 	else
-#endif
 	  {
 	    p = (mchunkptr)mm;
 	    set_head(p, size|IS_MMAPPED);
@@ -3346,8 +3355,25 @@ static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
 
       /* handle non-contiguous cases */
       else {
-	/* MORECORE/mmap must correctly align */
-	assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
+	if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
+	  /* MORECORE/mmap must correctly align */
+	  assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
+	else {
+	  front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
+	  if (front_misalign > 0) {
+
+	    /*
+	      Skip over some bytes to arrive at an aligned position.
+	      We don't need to specially mark these wasted front bytes.
+	      They will never be accessed anyway because
+	      prev_inuse of av->top (and any chunk created from its start)
+	      is always true after initialization.
+	    */
+
+	    correction = MALLOC_ALIGNMENT - front_misalign;
+	    aligned_brk += correction;
+	  }
+	}
 
 	/* Find out current end of memory */
 	if (snd_brk == (char*)(MORECORE_FAILURE)) {

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]