diff options
| author | Qi Wang <interwq@gwu.edu> | 2019-04-03 00:50:42 (GMT) |
|---|---|---|
| committer | Qi Wang <interwq@gwu.edu> | 2019-04-03 00:50:42 (GMT) |
| commit | b0b3e49a54ec29e32636f4577d9d5a896d67fd20 (patch) | |
| tree | e80fd5feaedd401e7e2c884e73f8c884f51b5a65 /test/unit/tsd.c | |
| parent | 61efbda7098de6fe64c362d309824864308c36d4 (diff) | |
| parent | f7489dc8f1fac233b0cd4e40331de8b738b1f2e2 (diff) | |
| download | jemalloc-5.2.0.zip jemalloc-5.2.0.tar.gz jemalloc-5.2.0.tar.bz2 | |
Merge branch 'dev'5.2.0
Diffstat (limited to 'test/unit/tsd.c')
| -rw-r--r-- | test/unit/tsd.c | 134 |
1 files changed, 131 insertions, 3 deletions
diff --git a/test/unit/tsd.c b/test/unit/tsd.c index 6c47913..917884d 100644 --- a/test/unit/tsd.c +++ b/test/unit/tsd.c @@ -1,5 +1,10 @@ #include "test/jemalloc_test.h" +/* + * If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't + * be asserting that we're on one. + */ +static bool originally_fast; static int data_cleanup_count; void @@ -98,11 +103,11 @@ thd_start_reincarnated(void *arg) { tsd_cleanup((void *)tsd); assert_ptr_null(*tsd_arenap_get_unsafe(tsd), "TSD arena should have been cleared."); - assert_u_eq(tsd->state, tsd_state_purgatory, + assert_u_eq(tsd_state_get(tsd), tsd_state_purgatory, "TSD state should be purgatory\n"); free(p); - assert_u_eq(tsd->state, tsd_state_reincarnated, + assert_u_eq(tsd_state_get(tsd), tsd_state_reincarnated, "TSD state should be reincarnated\n"); p = mallocx(1, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected malloc() failure"); @@ -124,6 +129,128 @@ TEST_BEGIN(test_tsd_reincarnation) { } TEST_END +typedef struct { + atomic_u32_t phase; + atomic_b_t error; +} global_slow_data_t; + +static void * +thd_start_global_slow(void *arg) { + /* PHASE 0 */ + global_slow_data_t *data = (global_slow_data_t *)arg; + free(mallocx(1, 0)); + + tsd_t *tsd = tsd_fetch(); + /* + * No global slowness has happened yet; there was an error if we were + * originally fast but aren't now. + */ + atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd), + ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST); + + /* PHASE 2 */ + while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) { + } + free(mallocx(1, 0)); + atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST); + + /* PHASE 4 */ + while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) { + } + free(mallocx(1, 0)); + atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST); + + /* PHASE 6 */ + while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) { + } + free(mallocx(1, 0)); + /* Only one decrement so far. */ + atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST); + + /* PHASE 8 */ + while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) { + } + free(mallocx(1, 0)); + /* + * Both decrements happened; we should be fast again (if we ever + * were) + */ + atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd), + ATOMIC_SEQ_CST); + atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST); + + return NULL; +} + +TEST_BEGIN(test_tsd_global_slow) { + global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)}; + /* + * Note that the "mallocx" here (vs. malloc) is important, since the + * compiler is allowed to optimize away free(malloc(1)) but not + * free(mallocx(1)). + */ + free(mallocx(1, 0)); + tsd_t *tsd = tsd_fetch(); + originally_fast = tsd_fast(tsd); + + thd_t thd; + thd_create(&thd, thd_start_global_slow, (void *)&data.phase); + /* PHASE 1 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) { + /* + * We don't have a portable condvar/semaphore mechanism. + * Spin-wait. + */ + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + tsd_global_slow_inc(tsd_tsdn(tsd)); + free(mallocx(1, 0)); + assert_false(tsd_fast(tsd), ""); + atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST); + + /* PHASE 3 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) { + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + /* Increase again, so that we can test multiple fast/slow changes. */ + tsd_global_slow_inc(tsd_tsdn(tsd)); + atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST); + free(mallocx(1, 0)); + assert_false(tsd_fast(tsd), ""); + + /* PHASE 5 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) { + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + tsd_global_slow_dec(tsd_tsdn(tsd)); + atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST); + /* We only decreased once; things should still be slow. */ + free(mallocx(1, 0)); + assert_false(tsd_fast(tsd), ""); + + /* PHASE 7 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) { + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + tsd_global_slow_dec(tsd_tsdn(tsd)); + atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST); + /* We incremented and then decremented twice; we should be fast now. */ + free(mallocx(1, 0)); + assert_true(!originally_fast || tsd_fast(tsd), ""); + + /* PHASE 9 */ + while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) { + } + assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), ""); + + thd_join(thd, NULL); +} +TEST_END + int main(void) { /* Ensure tsd bootstrapped. */ @@ -135,5 +262,6 @@ main(void) { return test_no_reentrancy( test_tsd_main_thread, test_tsd_sub_thread, - test_tsd_reincarnation); + test_tsd_reincarnation, + test_tsd_global_slow); } |
