summaryrefslogtreecommitdiffstats
path: root/src/chunk_mmap.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2012-04-21 20:33:48 (GMT)
committerJason Evans <jasone@canonware.com>2012-04-21 20:33:48 (GMT)
commit8f0e0eb1c01d5d934586ea62e519ca8b8637aebc (patch)
treec0281d50a3731242730249b7285a67fec4536428 /src/chunk_mmap.c
parent606f1fdc3cdbc700717133ca56685313caea24bb (diff)
downloadjemalloc-8f0e0eb1c01d5d934586ea62e519ca8b8637aebc.zip
jemalloc-8f0e0eb1c01d5d934586ea62e519ca8b8637aebc.tar.gz
jemalloc-8f0e0eb1c01d5d934586ea62e519ca8b8637aebc.tar.bz2
Fix a memory corruption bug in chunk_alloc_dss().
Fix a memory corruption bug in chunk_alloc_dss() that was due to claiming newly allocated memory is zeroed. Reverse order of preference between mmap() and sbrk() to prefer mmap(). Clean up management of 'zero' parameter in chunk_alloc*().
Diffstat (limited to 'src/chunk_mmap.c')
-rw-r--r--src/chunk_mmap.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c
index 9dea831..126406a 100644
--- a/src/chunk_mmap.c
+++ b/src/chunk_mmap.c
@@ -18,7 +18,7 @@ malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false,
static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
- bool unaligned);
+ bool unaligned, bool *zero);
/******************************************************************************/
@@ -87,7 +87,7 @@ pages_purge(void *addr, size_t length)
}
static void *
-chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero)
{
void *ret, *pages;
size_t alloc_size, leadsize, trailsize;
@@ -122,11 +122,13 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned)
mmap_unaligned_tsd_set(&mu);
}
+ assert(ret != NULL);
+ *zero = true;
return (ret);
}
void *
-chunk_alloc_mmap(size_t size, size_t alignment)
+chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
@@ -177,8 +179,8 @@ chunk_alloc_mmap(size_t size, size_t alignment)
* the reliable-but-expensive method.
*/
pages_unmap(ret, size);
- ret = chunk_alloc_mmap_slow(size, alignment,
- true);
+ return (chunk_alloc_mmap_slow(size, alignment,
+ true, zero));
} else {
/* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset);
@@ -187,8 +189,10 @@ chunk_alloc_mmap(size_t size, size_t alignment)
}
}
} else
- ret = chunk_alloc_mmap_slow(size, alignment, false);
+ return (chunk_alloc_mmap_slow(size, alignment, false, zero));
+ assert(ret != NULL);
+ *zero = true;
return (ret);
}