diff options
author | Guilherme Goncalves <guilherme.p.gonc@gmail.com> | 2014-12-08 21:12:41 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2014-12-15 01:07:26 (GMT) |
commit | 2c5cb613dfbdf58f88152321b63e60c58cd23972 (patch) | |
tree | 9c32a6519cb435203141d19a0c0f60fa111803e0 /src/huge.c | |
parent | b74041fb6e279bd8bbc133250241249f90cd619f (diff) | |
download | jemalloc-2c5cb613dfbdf58f88152321b63e60c58cd23972.zip jemalloc-2c5cb613dfbdf58f88152321b63e60c58cd23972.tar.gz jemalloc-2c5cb613dfbdf58f88152321b63e60c58cd23972.tar.bz2 |
Introduce two new modes of junk filling: "alloc" and "free".
In addition to true/false, opt.junk can now be either "alloc" or "free",
giving applications the possibility of junking memory only on allocation
or deallocation.
This resolves #172.
Diffstat (limited to 'src/huge.c')
-rw-r--r-- | src/huge.c | 12 |
1 files changed, 6 insertions, 6 deletions
@@ -67,7 +67,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) memset(ret, 0, usize); - } else if (config_fill && unlikely(opt_junk)) + } else if (config_fill && unlikely(opt_junk_alloc)) memset(ret, 0xa5, usize); return (ret); @@ -81,7 +81,7 @@ static void huge_dalloc_junk(void *ptr, size_t usize) { - if (config_fill && have_dss && unlikely(opt_junk)) { + if (config_fill && have_dss && unlikely(opt_junk_free)) { /* * Only bother junk filling if the chunk isn't about to be * unmapped. @@ -117,7 +117,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize, size_t sdiff = CHUNK_CEILING(usize) - usize; zeroed = (sdiff != 0) ? !pages_purge((void *)((uintptr_t)ptr + usize), sdiff) : true; - if (config_fill && unlikely(opt_junk)) { + if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, oldsize - usize); zeroed = false; @@ -147,7 +147,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize, memset((void *)((uintptr_t)ptr + oldsize), 0, usize - oldsize); } - } else if (config_fill && unlikely(opt_junk)) { + } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - oldsize); } @@ -165,7 +165,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) sdiff = CHUNK_CEILING(usize) - usize; zeroed = (sdiff != 0) ? !pages_purge((void *)((uintptr_t)ptr + usize), sdiff) : true; - if (config_fill && unlikely(opt_junk)) { + if (config_fill && unlikely(opt_junk_free)) { huge_dalloc_junk((void *)((uintptr_t)ptr + usize), oldsize - usize); zeroed = false; @@ -234,7 +234,7 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) { CHUNK_CEILING(oldsize)), 0, usize - CHUNK_CEILING(oldsize)); } - } else if (config_fill && unlikely(opt_junk)) { + } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize - oldsize); } |