diff options
author | Jason Evans <jasone@canonware.com> | 2012-04-21 20:33:48 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2012-04-21 20:33:48 (GMT) |
commit | 8f0e0eb1c01d5d934586ea62e519ca8b8637aebc (patch) | |
tree | c0281d50a3731242730249b7285a67fec4536428 /src | |
parent | 606f1fdc3cdbc700717133ca56685313caea24bb (diff) | |
download | jemalloc-8f0e0eb1c01d5d934586ea62e519ca8b8637aebc.zip jemalloc-8f0e0eb1c01d5d934586ea62e519ca8b8637aebc.tar.gz jemalloc-8f0e0eb1c01d5d934586ea62e519ca8b8637aebc.tar.bz2 |
Fix a memory corruption bug in chunk_alloc_dss().
Fix a memory corruption bug in chunk_alloc_dss() that was due to
claiming newly allocated memory is zeroed.
Reverse order of preference between mmap() and sbrk() to prefer mmap().
Clean up management of 'zero' parameter in chunk_alloc*().
Diffstat (limited to 'src')
-rw-r--r-- | src/chunk.c | 10 | ||||
-rw-r--r-- | src/chunk_dss.c | 1 | ||||
-rw-r--r-- | src/chunk_mmap.c | 16 |
3 files changed, 15 insertions, 12 deletions
diff --git a/src/chunk.c b/src/chunk.c index bcaedea..3148505 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -125,16 +125,16 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero) ret = chunk_recycle(size, alignment, zero); if (ret != NULL) goto label_return; + + ret = chunk_alloc_mmap(size, alignment, zero); + if (ret != NULL) + goto label_return; + if (config_dss) { ret = chunk_alloc_dss(size, alignment, zero); if (ret != NULL) goto label_return; } - ret = chunk_alloc_mmap(size, alignment); - if (ret != NULL) { - *zero = true; - goto label_return; - } /* All strategies for allocation failed. */ ret = NULL; diff --git a/src/chunk_dss.c b/src/chunk_dss.c index b05509a..bd4a724 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -89,7 +89,6 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero) malloc_mutex_unlock(&dss_mtx); if (cpad_size != 0) chunk_dealloc(cpad, cpad_size, true); - *zero = true; return (ret); } } while (dss_prev != (void *)-1); diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index 9dea831..126406a 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -18,7 +18,7 @@ malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false, static void *pages_map(void *addr, size_t size); static void pages_unmap(void *addr, size_t size); static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, - bool unaligned); + bool unaligned, bool *zero); /******************************************************************************/ @@ -87,7 +87,7 @@ pages_purge(void *addr, size_t length) } static void * -chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned) +chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned, bool *zero) { void *ret, *pages; size_t alloc_size, leadsize, trailsize; @@ -122,11 +122,13 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned) mmap_unaligned_tsd_set(&mu); } + assert(ret != NULL); + *zero = true; return (ret); } void * -chunk_alloc_mmap(size_t size, size_t alignment) +chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) { void *ret; @@ -177,8 +179,8 @@ chunk_alloc_mmap(size_t size, size_t alignment) * the reliable-but-expensive method. */ pages_unmap(ret, size); - ret = chunk_alloc_mmap_slow(size, alignment, - true); + return (chunk_alloc_mmap_slow(size, alignment, + true, zero)); } else { /* Clean up unneeded leading space. */ pages_unmap(ret, chunksize - offset); @@ -187,8 +189,10 @@ chunk_alloc_mmap(size_t size, size_t alignment) } } } else - ret = chunk_alloc_mmap_slow(size, alignment, false); + return (chunk_alloc_mmap_slow(size, alignment, false, zero)); + assert(ret != NULL); + *zero = true; return (ret); } |