diff options
author | Qi Wang <interwq@gwu.edu> | 2017-04-12 23:16:27 (GMT) |
---|---|---|
committer | Qi Wang <interwq@gmail.com> | 2017-04-15 02:48:06 (GMT) |
commit | c2fcf9c2cfcbaba58db1941c91c7a8a4b6623401 (patch) | |
tree | 3be371fb3b3b9b003de89943a7e088959550e169 /src/tsd.c | |
parent | b348ba29bb94b6e9da8dcea1105d4614556aceb9 (diff) | |
download | jemalloc-c2fcf9c2cfcbaba58db1941c91c7a8a4b6623401.zip jemalloc-c2fcf9c2cfcbaba58db1941c91c7a8a4b6623401.tar.gz jemalloc-c2fcf9c2cfcbaba58db1941c91c7a8a4b6623401.tar.bz2 |
Switch to fine-grained reentrancy support.
Previously we had a general detection and support of reentrancy, at the cost of
having branches and inc / dec operations on fast paths. To avoid taxing fast
paths, we move the reentrancy operations onto tsd slow state, and only modify
reentrancy level around external calls (that might trigger reentrancy).
Diffstat (limited to 'src/tsd.c')
-rw-r--r-- | src/tsd.c | 4 |
1 files changed, 3 insertions, 1 deletions
@@ -15,7 +15,8 @@ malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) void tsd_slow_update(tsd_t *tsd) { if (tsd_nominal(tsd)) { - if (malloc_slow || !tsd->tcache_enabled) { + if (malloc_slow || !tsd->tcache_enabled || + tsd_reentrancy_level_get(tsd) > 0) { tsd->state = tsd_state_nominal_slow; } else { tsd->state = tsd_state_nominal; @@ -28,6 +29,7 @@ tsd_fetch_slow(tsd_t *tsd) { if (tsd->state == tsd_state_nominal_slow) { /* On slow path but no work needed. */ assert(malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0 || *tsd_arenas_tdata_bypassp_get(tsd)); } else if (tsd->state == tsd_state_uninitialized) { tsd->state = tsd_state_nominal; |