This is the mail archive of the libc-alpha@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] malloc: Lindent users of arena_get2


From: Joern Engel <joern@purestorage.org>

Preparation for functional change

JIRA: PURE-27597
---
 tpc/malloc2.13/malloc.c | 495 +++++++++++++++++++++++-------------------------
 1 file changed, 239 insertions(+), 256 deletions(-)

diff --git a/tpc/malloc2.13/malloc.c b/tpc/malloc2.13/malloc.c
index 94b55241d3bf..28d9d902b7ec 100644
--- a/tpc/malloc2.13/malloc.c
+++ b/tpc/malloc2.13/malloc.c
@@ -3399,44 +3399,42 @@ mremap_chunk(mchunkptr p, size_t new_size)
 
 /*------------------------ Public wrappers. --------------------------------*/
 
-Void_t*
-public_mALLOc(size_t bytes)
+Void_t *public_mALLOc(size_t bytes)
 {
-  struct malloc_state * ar_ptr;
-  Void_t *victim;
-
-  __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t)
-    = force_reg (dlmalloc_hook);
-  if (__builtin_expect (hook != NULL, 0))
-    return (*hook)(bytes, RETURN_ADDRESS (0));
-
-  arena_lookup(ar_ptr);
-  arena_lock(ar_ptr, bytes);
-  if(!ar_ptr)
-    return 0;
-  victim = _int_malloc(ar_ptr, bytes);
-  if(!victim) {
-    /* Maybe the failure is due to running out of mmapped areas. */
-    if(ar_ptr != &main_arena) {
-      (void)mutex_unlock(&ar_ptr->mutex);
-      ar_ptr = &main_arena;
-      (void)mutex_lock(&ar_ptr->mutex);
-      victim = _int_malloc(ar_ptr, bytes);
-      (void)mutex_unlock(&ar_ptr->mutex);
-    } else {
-      /* ... or sbrk() has failed and there is still a chance to mmap() */
-      ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
-      (void)mutex_unlock(&main_arena.mutex);
-      if(ar_ptr) {
+	struct malloc_state *ar_ptr;
+	Void_t *victim;
+
+	__malloc_ptr_t(*hook) (size_t, __const __malloc_ptr_t)
+	    = force_reg(dlmalloc_hook);
+	if (__builtin_expect(hook != NULL, 0))
+		return (*hook) (bytes, RETURN_ADDRESS(0));
+
+	arena_lookup(ar_ptr);
+	arena_lock(ar_ptr, bytes);
+	if (!ar_ptr)
+		return 0;
 	victim = _int_malloc(ar_ptr, bytes);
-	(void)mutex_unlock(&ar_ptr->mutex);
-      }
-    }
-  } else
-    (void)mutex_unlock(&ar_ptr->mutex);
-  assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
-	 ar_ptr == arena_for_chunk(mem2chunk(victim)));
-  return victim;
+	if (!victim) {
+		/* Maybe the failure is due to running out of mmapped areas. */
+		if (ar_ptr != &main_arena) {
+			(void)mutex_unlock(&ar_ptr->mutex);
+			ar_ptr = &main_arena;
+			(void)mutex_lock(&ar_ptr->mutex);
+			victim = _int_malloc(ar_ptr, bytes);
+			(void)mutex_unlock(&ar_ptr->mutex);
+		} else {
+			/* ... or sbrk() has failed and there is still a chance to mmap() */
+			ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
+			(void)mutex_unlock(&main_arena.mutex);
+			if (ar_ptr) {
+				victim = _int_malloc(ar_ptr, bytes);
+				(void)mutex_unlock(&ar_ptr->mutex);
+			}
+		}
+	} else
+		(void)mutex_unlock(&ar_ptr->mutex);
+	assert(!victim || chunk_is_mmapped(mem2chunk(victim)) || ar_ptr == arena_for_chunk(mem2chunk(victim)));
+	return victim;
 }
 
 void
@@ -3598,278 +3596,263 @@ public_rEALLOc(Void_t* oldmem, size_t bytes)
   return newp;
 }
 
-Void_t*
-public_mEMALIGn(size_t alignment, size_t bytes)
+Void_t *public_mEMALIGn(size_t alignment, size_t bytes)
 {
-  struct malloc_state * ar_ptr;
-  Void_t *p;
+	struct malloc_state *ar_ptr;
+	Void_t *p;
 
-  __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
-					__const __malloc_ptr_t)) =
-    force_reg (dlmemalign_hook);
-  if (__builtin_expect (hook != NULL, 0))
-    return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
+	__malloc_ptr_t(*hook) __MALLOC_PMT((size_t, size_t, __const __malloc_ptr_t)) = force_reg(dlmemalign_hook);
+	if (__builtin_expect(hook != NULL, 0))
+		return (*hook) (alignment, bytes, RETURN_ADDRESS(0));
 
-  /* If need less alignment than we give anyway, just relay to malloc */
-  if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
+	/* If need less alignment than we give anyway, just relay to malloc */
+	if (alignment <= MALLOC_ALIGNMENT)
+		return public_mALLOc(bytes);
 
-  /* Otherwise, ensure that it is at least a minimum chunk size */
-  if (alignment <  MINSIZE) alignment = MINSIZE;
+	/* Otherwise, ensure that it is at least a minimum chunk size */
+	if (alignment < MINSIZE)
+		alignment = MINSIZE;
 
-  arena_get(ar_ptr, bytes + alignment + MINSIZE);
-  if(!ar_ptr)
-    return 0;
-  p = _int_memalign(ar_ptr, alignment, bytes);
-  if(!p) {
-    /* Maybe the failure is due to running out of mmapped areas. */
-    if(ar_ptr != &main_arena) {
-      (void)mutex_unlock(&ar_ptr->mutex);
-      ar_ptr = &main_arena;
-      (void)mutex_lock(&ar_ptr->mutex);
-      p = _int_memalign(ar_ptr, alignment, bytes);
-      (void)mutex_unlock(&ar_ptr->mutex);
-    } else {
-      /* ... or sbrk() has failed and there is still a chance to mmap() */
-      struct malloc_state * prev = ar_ptr->next ? ar_ptr : 0;
-      (void)mutex_unlock(&ar_ptr->mutex);
-      ar_ptr = arena_get2(prev, bytes);
-      if(ar_ptr) {
+	arena_get(ar_ptr, bytes + alignment + MINSIZE);
+	if (!ar_ptr)
+		return 0;
 	p = _int_memalign(ar_ptr, alignment, bytes);
-	(void)mutex_unlock(&ar_ptr->mutex);
-      }
-    }
-  } else
-    (void)mutex_unlock(&ar_ptr->mutex);
-  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
-	 ar_ptr == arena_for_chunk(mem2chunk(p)));
-  return p;
+	if (!p) {
+		/* Maybe the failure is due to running out of mmapped areas. */
+		if (ar_ptr != &main_arena) {
+			(void)mutex_unlock(&ar_ptr->mutex);
+			ar_ptr = &main_arena;
+			(void)mutex_lock(&ar_ptr->mutex);
+			p = _int_memalign(ar_ptr, alignment, bytes);
+			(void)mutex_unlock(&ar_ptr->mutex);
+		} else {
+			/* ... or sbrk() has failed and there is still a chance to mmap() */
+			struct malloc_state *prev = ar_ptr->next ? ar_ptr : 0;
+			(void)mutex_unlock(&ar_ptr->mutex);
+			ar_ptr = arena_get2(prev, bytes);
+			if (ar_ptr) {
+				p = _int_memalign(ar_ptr, alignment, bytes);
+				(void)mutex_unlock(&ar_ptr->mutex);
+			}
+		}
+	} else
+		(void)mutex_unlock(&ar_ptr->mutex);
+	assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
+	return p;
 }
 
-Void_t*
-public_vALLOc(size_t bytes)
+Void_t *public_vALLOc(size_t bytes)
 {
-  struct malloc_state * ar_ptr;
-  Void_t *p;
+	struct malloc_state *ar_ptr;
+	Void_t *p;
 
-  if(__malloc_initialized < 0)
-    ptmalloc_init ();
+	if (__malloc_initialized < 0)
+		ptmalloc_init();
 
-  size_t pagesz = mp_.pagesize;
+	size_t pagesz = mp_.pagesize;
 
-  __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
-					__const __malloc_ptr_t)) =
-    force_reg (dlmemalign_hook);
-  if (__builtin_expect (hook != NULL, 0))
-    return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
+	__malloc_ptr_t(*hook) __MALLOC_PMT((size_t, size_t, __const __malloc_ptr_t)) = force_reg(dlmemalign_hook);
+	if (__builtin_expect(hook != NULL, 0))
+		return (*hook) (pagesz, bytes, RETURN_ADDRESS(0));
 
-  arena_get(ar_ptr, bytes + pagesz + MINSIZE);
-  if(!ar_ptr)
-    return 0;
-  p = _int_valloc(ar_ptr, bytes);
-  (void)mutex_unlock(&ar_ptr->mutex);
-  if(!p) {
-    /* Maybe the failure is due to running out of mmapped areas. */
-    if(ar_ptr != &main_arena) {
-      ar_ptr = &main_arena;
-      (void)mutex_lock(&ar_ptr->mutex);
-      p = _int_memalign(ar_ptr, pagesz, bytes);
-      (void)mutex_unlock(&ar_ptr->mutex);
-    } else {
-      /* ... or sbrk() has failed and there is still a chance to mmap() */
-      ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
-      if(ar_ptr) {
-	p = _int_memalign(ar_ptr, pagesz, bytes);
+	arena_get(ar_ptr, bytes + pagesz + MINSIZE);
+	if (!ar_ptr)
+		return 0;
+	p = _int_valloc(ar_ptr, bytes);
 	(void)mutex_unlock(&ar_ptr->mutex);
-      }
-    }
-  }
-  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
-	 ar_ptr == arena_for_chunk(mem2chunk(p)));
+	if (!p) {
+		/* Maybe the failure is due to running out of mmapped areas. */
+		if (ar_ptr != &main_arena) {
+			ar_ptr = &main_arena;
+			(void)mutex_lock(&ar_ptr->mutex);
+			p = _int_memalign(ar_ptr, pagesz, bytes);
+			(void)mutex_unlock(&ar_ptr->mutex);
+		} else {
+			/* ... or sbrk() has failed and there is still a chance to mmap() */
+			ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
+			if (ar_ptr) {
+				p = _int_memalign(ar_ptr, pagesz, bytes);
+				(void)mutex_unlock(&ar_ptr->mutex);
+			}
+		}
+	}
+	assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
 
-  return p;
+	return p;
 }
 
-Void_t*
-public_pVALLOc(size_t bytes)
+Void_t *public_pVALLOc(size_t bytes)
 {
-  struct malloc_state * ar_ptr;
-  Void_t *p;
+	struct malloc_state *ar_ptr;
+	Void_t *p;
 
-  if(__malloc_initialized < 0)
-    ptmalloc_init ();
+	if (__malloc_initialized < 0)
+		ptmalloc_init();
 
-  size_t pagesz = mp_.pagesize;
-  size_t page_mask = mp_.pagesize - 1;
-  size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
+	size_t pagesz = mp_.pagesize;
+	size_t page_mask = mp_.pagesize - 1;
+	size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
 
-  __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
-					__const __malloc_ptr_t)) =
-    force_reg (dlmemalign_hook);
-  if (__builtin_expect (hook != NULL, 0))
-    return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
+	__malloc_ptr_t(*hook) __MALLOC_PMT((size_t, size_t, __const __malloc_ptr_t)) = force_reg(dlmemalign_hook);
+	if (__builtin_expect(hook != NULL, 0))
+		return (*hook) (pagesz, rounded_bytes, RETURN_ADDRESS(0));
 
-  arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
-  p = _int_pvalloc(ar_ptr, bytes);
-  (void)mutex_unlock(&ar_ptr->mutex);
-  if(!p) {
-    /* Maybe the failure is due to running out of mmapped areas. */
-    if(ar_ptr != &main_arena) {
-      ar_ptr = &main_arena;
-      (void)mutex_lock(&ar_ptr->mutex);
-      p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
-      (void)mutex_unlock(&ar_ptr->mutex);
-    } else {
-      /* ... or sbrk() has failed and there is still a chance to mmap() */
-      ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
-			  bytes + 2*pagesz + MINSIZE);
-      if(ar_ptr) {
-	p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
+	arena_get(ar_ptr, bytes + 2 * pagesz + MINSIZE);
+	p = _int_pvalloc(ar_ptr, bytes);
 	(void)mutex_unlock(&ar_ptr->mutex);
-      }
-    }
-  }
-  assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
-	 ar_ptr == arena_for_chunk(mem2chunk(p)));
+	if (!p) {
+		/* Maybe the failure is due to running out of mmapped areas. */
+		if (ar_ptr != &main_arena) {
+			ar_ptr = &main_arena;
+			(void)mutex_lock(&ar_ptr->mutex);
+			p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
+			(void)mutex_unlock(&ar_ptr->mutex);
+		} else {
+			/* ... or sbrk() has failed and there is still a chance to mmap() */
+			ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes + 2 * pagesz + MINSIZE);
+			if (ar_ptr) {
+				p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
+				(void)mutex_unlock(&ar_ptr->mutex);
+			}
+		}
+	}
+	assert(!p || chunk_is_mmapped(mem2chunk(p)) || ar_ptr == arena_for_chunk(mem2chunk(p)));
 
-  return p;
+	return p;
 }
 
-Void_t*
-public_cALLOc(size_t n, size_t elem_size)
+Void_t *public_cALLOc(size_t n, size_t elem_size)
 {
-  struct malloc_state * av;
-  mchunkptr oldtop, p;
-  INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
-  Void_t* mem;
-  unsigned long clearsize;
-  unsigned long nclears;
-  INTERNAL_SIZE_T* d;
-
-  /* size_t is unsigned so the behavior on overflow is defined.  */
-  bytes = n * elem_size;
+	struct malloc_state *av;
+	mchunkptr oldtop, p;
+	INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
+	Void_t *mem;
+	unsigned long clearsize;
+	unsigned long nclears;
+	INTERNAL_SIZE_T *d;
+
+	/* size_t is unsigned so the behavior on overflow is defined.  */
+	bytes = n * elem_size;
 #define HALF_INTERNAL_SIZE_T \
   (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
-  if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
-    if (elem_size != 0 && bytes / elem_size != n) {
-      MALLOC_FAILURE_ACTION;
-      return 0;
-    }
-  }
+	if (__builtin_expect((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
+		if (elem_size != 0 && bytes / elem_size != n) {
+			MALLOC_FAILURE_ACTION;
+			return 0;
+		}
+	}
 
-  __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
-    force_reg (dlmalloc_hook);
-  if (__builtin_expect (hook != NULL, 0)) {
-    sz = bytes;
-    mem = (*hook)(sz, RETURN_ADDRESS (0));
-    if(mem == 0)
-      return 0;
+	__malloc_ptr_t(*hook) __MALLOC_PMT((size_t, __const __malloc_ptr_t)) = force_reg(dlmalloc_hook);
+	if (__builtin_expect(hook != NULL, 0)) {
+		sz = bytes;
+		mem = (*hook) (sz, RETURN_ADDRESS(0));
+		if (mem == 0)
+			return 0;
 #ifdef HAVE_MEMCPY
-    return memset(mem, 0, sz);
+		return memset(mem, 0, sz);
 #else
-    while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
-    return mem;
+		while (sz > 0)
+			((char *)mem)[--sz] = 0;	/* rather inefficient */
+		return mem;
 #endif
-  }
+	}
 
-  sz = bytes;
+	sz = bytes;
 
-  arena_get(av, sz);
-  if(!av)
-    return 0;
+	arena_get(av, sz);
+	if (!av)
+		return 0;
 
-  /* Check if we hand out the top chunk, in which case there may be no
-     need to clear. */
+	/* Check if we hand out the top chunk, in which case there may be no
+	   need to clear. */
 #if MORECORE_CLEARS
-  oldtop = top(av);
-  oldtopsize = chunksize(top(av));
+	oldtop = top(av);
+	oldtopsize = chunksize(top(av));
 #if MORECORE_CLEARS < 2
-  /* Only newly allocated memory is guaranteed to be cleared.  */
-  if (av == &main_arena &&
-      oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
-    oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
+	/* Only newly allocated memory is guaranteed to be cleared.  */
+	if (av == &main_arena && oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
+		oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
 #endif
-  if (av != &main_arena)
-    {
-      heap_info *heap = heap_for_ptr (oldtop);
-      if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
-	oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
-    }
+	if (av != &main_arena) {
+		heap_info *heap = heap_for_ptr(oldtop);
+		if (oldtopsize < (char *)heap + heap->mprotect_size - (char *)oldtop)
+			oldtopsize = (char *)heap + heap->mprotect_size - (char *)oldtop;
+	}
 #endif
-  mem = _int_malloc(av, sz);
-
-  /* Only clearing follows, so we can unlock early. */
-  (void)mutex_unlock(&av->mutex);
-
-  assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
-	 av == arena_for_chunk(mem2chunk(mem)));
-
-  if (mem == 0) {
-    /* Maybe the failure is due to running out of mmapped areas. */
-    if(av != &main_arena) {
-      (void)mutex_lock(&main_arena.mutex);
-      mem = _int_malloc(&main_arena, sz);
-      (void)mutex_unlock(&main_arena.mutex);
-    } else {
-      /* ... or sbrk() has failed and there is still a chance to mmap() */
-      (void)mutex_lock(&main_arena.mutex);
-      av = arena_get2(av->next ? av : 0, sz);
-      (void)mutex_unlock(&main_arena.mutex);
-      if(av) {
 	mem = _int_malloc(av, sz);
+
+	/* Only clearing follows, so we can unlock early. */
 	(void)mutex_unlock(&av->mutex);
-      }
-    }
-    if (mem == 0) return 0;
-  }
-  p = mem2chunk(mem);
 
-  /* Two optional cases in which clearing not necessary */
-  if (chunk_is_mmapped (p))
-    {
-      if (__builtin_expect (perturb_byte, 0))
-	MALLOC_ZERO (mem, sz);
-      return mem;
-    }
+	assert(!mem || chunk_is_mmapped(mem2chunk(mem)) || av == arena_for_chunk(mem2chunk(mem)));
+
+	if (mem == 0) {
+		/* Maybe the failure is due to running out of mmapped areas. */
+		if (av != &main_arena) {
+			(void)mutex_lock(&main_arena.mutex);
+			mem = _int_malloc(&main_arena, sz);
+			(void)mutex_unlock(&main_arena.mutex);
+		} else {
+			/* ... or sbrk() has failed and there is still a chance to mmap() */
+			(void)mutex_lock(&main_arena.mutex);
+			av = arena_get2(av->next ? av : 0, sz);
+			(void)mutex_unlock(&main_arena.mutex);
+			if (av) {
+				mem = _int_malloc(av, sz);
+				(void)mutex_unlock(&av->mutex);
+			}
+		}
+		if (mem == 0)
+			return 0;
+	}
+	p = mem2chunk(mem);
+
+	/* Two optional cases in which clearing not necessary */
+	if (chunk_is_mmapped(p)) {
+		if (__builtin_expect(perturb_byte, 0))
+			MALLOC_ZERO(mem, sz);
+		return mem;
+	}
 
-  csz = chunksize(p);
+	csz = chunksize(p);
 
 #if MORECORE_CLEARS
-  if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
-    /* clear only the bytes from non-freshly-sbrked memory */
-    csz = oldtopsize;
-  }
+	if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
+		/* clear only the bytes from non-freshly-sbrked memory */
+		csz = oldtopsize;
+	}
 #endif
 
-  /* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
-     contents have an odd number of INTERNAL_SIZE_T-sized words;
-     minimally 3.  */
-  d = (INTERNAL_SIZE_T*)mem;
-  clearsize = csz - SIZE_SZ;
-  nclears = clearsize / sizeof(INTERNAL_SIZE_T);
-  assert(nclears >= 3);
+	/* Unroll clear of <= 36 bytes (72 if 8byte sizes).  We know that
+	   contents have an odd number of INTERNAL_SIZE_T-sized words;
+	   minimally 3.  */
+	d = (INTERNAL_SIZE_T *) mem;
+	clearsize = csz - SIZE_SZ;
+	nclears = clearsize / sizeof(INTERNAL_SIZE_T);
+	assert(nclears >= 3);
 
-  if (nclears > 9)
-    MALLOC_ZERO(d, clearsize);
+	if (nclears > 9)
+		MALLOC_ZERO(d, clearsize);
 
-  else {
-    *(d+0) = 0;
-    *(d+1) = 0;
-    *(d+2) = 0;
-    if (nclears > 4) {
-      *(d+3) = 0;
-      *(d+4) = 0;
-      if (nclears > 6) {
-	*(d+5) = 0;
-	*(d+6) = 0;
-	if (nclears > 8) {
-	  *(d+7) = 0;
-	  *(d+8) = 0;
+	else {
+		*(d + 0) = 0;
+		*(d + 1) = 0;
+		*(d + 2) = 0;
+		if (nclears > 4) {
+			*(d + 3) = 0;
+			*(d + 4) = 0;
+			if (nclears > 6) {
+				*(d + 5) = 0;
+				*(d + 6) = 0;
+				if (nclears > 8) {
+					*(d + 7) = 0;
+					*(d + 8) = 0;
+				}
+			}
+		}
 	}
-      }
-    }
-  }
 
-  return mem;
+	return mem;
 }
 
 #ifndef _LIBC
-- 
2.7.0.rc3


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]