diff options
author | Jason Evans <jasone@canonware.com> | 2015-01-31 06:54:08 (GMT) |
---|---|---|
committer | Jason Evans <jasone@canonware.com> | 2015-02-05 00:51:53 (GMT) |
commit | 8d0e04d42f4750970ac3052a6c76379b60aba5dc (patch) | |
tree | 25d71a94a914eb4f69c524f14b5f8d28eaf01881 /test/unit | |
parent | c810fcea1fa7983ef5bcabe6556cdc19dde6dd8d (diff) | |
download | jemalloc-8d0e04d42f4750970ac3052a6c76379b60aba5dc.zip jemalloc-8d0e04d42f4750970ac3052a6c76379b60aba5dc.tar.gz jemalloc-8d0e04d42f4750970ac3052a6c76379b60aba5dc.tar.bz2 |
Refactor rtree to be lock-free.
Recent huge allocation refactoring associates huge allocations with
arenas, but it remains necessary to quickly look up huge allocation
metadata during reallocation/deallocation. A global radix tree remains
a good solution to this problem, but locking would have become the
primary bottleneck after (upcoming) migration of chunk management from
global to per arena data structures.
This lock-free implementation uses double-checked reads to traverse the
tree, so that in the steady state, each read or write requires only a
single atomic operation.
This implementation also assures that no more than two tree levels
actually exist, through a combination of careful virtual memory
allocation which makes large sparse nodes cheap, and skipping the root
node on x64 (possible because the top 16 bits are all 0 in practice).
Diffstat (limited to 'test/unit')
-rw-r--r-- | test/unit/rtree.c | 77 |
1 files changed, 52 insertions, 25 deletions
diff --git a/test/unit/rtree.c b/test/unit/rtree.c index 77a947d..556c4a8 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -1,14 +1,30 @@ #include "test/jemalloc_test.h" +static rtree_node_elm_t * +node_alloc(size_t nelms) +{ + + return (calloc(nelms, sizeof(rtree_node_elm_t))); +} + +static void +node_dalloc(rtree_node_elm_t *node) +{ + + free(node); +} + TEST_BEGIN(test_rtree_get_empty) { unsigned i; for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t *rtree = rtree_new(i, malloc, free); - assert_u_eq(rtree_get(rtree, 0), 0, + rtree_t rtree; + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); + assert_ptr_eq(rtree_get(&rtree, 0), NULL, "rtree_get() should return NULL for empty tree"); - rtree_delete(rtree); + rtree_delete(&rtree); } } TEST_END @@ -16,19 +32,22 @@ TEST_END TEST_BEGIN(test_rtree_extrema) { unsigned i; + extent_node_t node_a, node_b; for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t *rtree = rtree_new(i, malloc, free); + rtree_t rtree; + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); - rtree_set(rtree, 0, 1); - assert_u_eq(rtree_get(rtree, 0), 1, + rtree_set(&rtree, 0, &node_a); + assert_ptr_eq(rtree_get(&rtree, 0), &node_a, "rtree_get() should return previously set value"); - rtree_set(rtree, ~((uintptr_t)0), 1); - assert_u_eq(rtree_get(rtree, ~((uintptr_t)0)), 1, + rtree_set(&rtree, ~((uintptr_t)0), &node_b); + assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0)), &node_b, "rtree_get() should return previously set value"); - rtree_delete(rtree); + rtree_delete(&rtree); } } TEST_END @@ -40,26 +59,30 @@ TEST_BEGIN(test_rtree_bits) for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { uintptr_t keys[] = {0, 1, (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; - rtree_t *rtree = rtree_new(i, malloc, free); + extent_node_t node; + rtree_t rtree; + + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { - rtree_set(rtree, keys[j], 1); + rtree_set(&rtree, keys[j], &node); for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { - assert_u_eq(rtree_get(rtree, keys[k]), 1, + assert_ptr_eq(rtree_get(&rtree, keys[k]), &node, "rtree_get() should return previously set " "value and ignore insignificant key bits; " "i=%u, j=%u, k=%u, set key=%#"PRIxPTR", " "get key=%#"PRIxPTR, i, j, k, keys[j], keys[k]); } - assert_u_eq(rtree_get(rtree, - (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), 0, + assert_ptr_eq(rtree_get(&rtree, + (((uintptr_t)1) << (sizeof(uintptr_t)*8-i))), NULL, "Only leftmost rtree leaf should be set; " "i=%u, j=%u", i, j); - rtree_set(rtree, keys[j], 0); + rtree_set(&rtree, keys[j], NULL); } - rtree_delete(rtree); + rtree_delete(&rtree); } } TEST_END @@ -68,37 +91,41 @@ TEST_BEGIN(test_rtree_random) { unsigned i; sfmt_t *sfmt; -#define NSET 100 +#define NSET 16 #define SEED 42 sfmt = init_gen_rand(SEED); for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { - rtree_t *rtree = rtree_new(i, malloc, free); uintptr_t keys[NSET]; + extent_node_t node; unsigned j; + rtree_t rtree; + + assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), + "Unexpected rtree_new() failure"); for (j = 0; j < NSET; j++) { keys[j] = (uintptr_t)gen_rand64(sfmt); - rtree_set(rtree, keys[j], 1); - assert_u_eq(rtree_get(rtree, keys[j]), 1, + rtree_set(&rtree, keys[j], &node); + assert_ptr_eq(rtree_get(&rtree, keys[j]), &node, "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { - assert_u_eq(rtree_get(rtree, keys[j]), 1, + assert_ptr_eq(rtree_get(&rtree, keys[j]), &node, "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { - rtree_set(rtree, keys[j], 0); - assert_u_eq(rtree_get(rtree, keys[j]), 0, + rtree_set(&rtree, keys[j], NULL); + assert_ptr_eq(rtree_get(&rtree, keys[j]), NULL, "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { - assert_u_eq(rtree_get(rtree, keys[j]), 0, + assert_ptr_eq(rtree_get(&rtree, keys[j]), NULL, "rtree_get() should return previously set value"); } - rtree_delete(rtree); + rtree_delete(&rtree); } fini_gen_rand(sfmt); #undef NSET |