diff options
author | Daniel Micay <danielmicay@gmail.com> | 2014-10-06 07:42:10 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2014-10-08 06:57:09 (GMT) |
commit | f22214a29ddd3bed005cbcc8f2aff7c61ef4940b (patch) | |
tree | 13bcb71ddaeafbb5889fa4b1bd7f612fc4b04c9d /test/unit/junk.c | |
parent | 8bb3198f72fc7587dc93527f9f19fb5be52fa553 (diff) | |
download | jemalloc-f22214a29ddd3bed005cbcc8f2aff7c61ef4940b.zip jemalloc-f22214a29ddd3bed005cbcc8f2aff7c61ef4940b.tar.gz jemalloc-f22214a29ddd3bed005cbcc8f2aff7c61ef4940b.tar.bz2 |
Use regular arena allocation for huge tree nodes.
This avoids grabbing the base mutex, as a step towards fine-grained
locking for huge allocations. The thread cache also provides a tiny
(~3%) improvement for serial huge allocations.
Diffstat (limited to 'test/unit/junk.c')
-rw-r--r-- | test/unit/junk.c | 27 |
1 files changed, 20 insertions, 7 deletions
diff --git a/test/unit/junk.c b/test/unit/junk.c index 5b35a87..1522a61 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -8,7 +8,16 @@ const char *malloc_conf = static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; static huge_dalloc_junk_t *huge_dalloc_junk_orig; -static void *most_recently_junked; +static void *watch_for_junking; +static bool saw_junking; + +static void +watch_junking(void *p) +{ + + watch_for_junking = p; + saw_junking = false; +} static void arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) @@ -21,7 +30,8 @@ arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) "Missing junk fill for byte %zu/%zu of deallocated region", i, bin_info->reg_size); } - most_recently_junked = ptr; + if (ptr == watch_for_junking) + saw_junking = true; } static void @@ -35,7 +45,8 @@ arena_dalloc_junk_large_intercept(void *ptr, size_t usize) "Missing junk fill for byte %zu/%zu of deallocated region", i, usize); } - most_recently_junked = ptr; + if (ptr == watch_for_junking) + saw_junking = true; } static void @@ -48,7 +59,8 @@ huge_dalloc_junk_intercept(void *ptr, size_t usize) * enough that it doesn't make sense to duplicate the decision logic in * test code, so don't actually check that the region is junk-filled. */ - most_recently_junked = ptr; + if (ptr == watch_for_junking) + saw_junking = true; } static void @@ -87,18 +99,19 @@ test_junk(size_t sz_min, size_t sz_max) } if (xallocx(s, sz+1, 0, 0) == sz) { - void *junked = (void *)s; + watch_junking(s); s = (char *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); - assert_ptr_eq(most_recently_junked, junked, + assert_true(saw_junking, "Expected region of size %zu to be junk-filled", sz); } } + watch_junking(s); dallocx(s, 0); - assert_ptr_eq(most_recently_junked, (void *)s, + assert_true(saw_junking, "Expected region of size %zu to be junk-filled", sz); arena_dalloc_junk_small = arena_dalloc_junk_small_orig; |