summaryrefslogtreecommitdiffstats
path: root/include/jemalloc
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2017-05-03 04:45:46 (GMT)
committerJason Evans <jasone@canonware.com>2017-05-03 15:52:33 (GMT)
commit0798fe6e7056a2eb571dde06927e87635dd2e74c (patch)
treef3db638ec6be05581380d81cb4776d350db85190 /include/jemalloc
parent344dd342dddf341f2db47c0a37f8b2aadccfdce7 (diff)
downloadjemalloc-0798fe6e7056a2eb571dde06927e87635dd2e74c.zip
jemalloc-0798fe6e7056a2eb571dde06927e87635dd2e74c.tar.gz
jemalloc-0798fe6e7056a2eb571dde06927e87635dd2e74c.tar.bz2
Fix rtree_leaf_elm_szind_slab_update().
Re-read the leaf element when atomic CAS fails due to a race with another thread that has locked the leaf element, since atomic_compare_exchange_strong_p() overwrites the expected value with the actual value on failure. This regression was introduced by 0ee0e0c155a05d0d028a9972ad86b9eaac4ccabd (Implement compact rtree leaf element representation.). This resolves #798.
Diffstat (limited to 'include/jemalloc')
-rw-r--r--include/jemalloc/internal/rtree_inlines.h15
1 files changed, 7 insertions, 8 deletions
diff --git a/include/jemalloc/internal/rtree_inlines.h b/include/jemalloc/internal/rtree_inlines.h
index b66e8ae..bcc2041 100644
--- a/include/jemalloc/internal/rtree_inlines.h
+++ b/include/jemalloc/internal/rtree_inlines.h
@@ -251,17 +251,16 @@ rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
* modified by another thread, the fact that the lock is embedded in the
* same word requires that a CAS operation be used here.
*/
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, false,
- true) & ~((uintptr_t)0x1); /* Mask lock bit. */
- uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
- (((uintptr_t)0x1 << LG_VADDR) - 1)) |
- ((uintptr_t)slab << 1);
spin_t spinner = SPIN_INITIALIZER;
while (true) {
+ void *old_bits = (void *)(rtree_leaf_elm_bits_read(tsdn, rtree,
+ elm, false, true) & ~((uintptr_t)0x1)); /* Mask lock bit. */
+ void *bits = (void *)(((uintptr_t)szind << LG_VADDR) |
+ ((uintptr_t)rtree_leaf_elm_bits_extent_get(
+ (uintptr_t)old_bits) & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
+ ((uintptr_t)slab << 1));
if (likely(atomic_compare_exchange_strong_p(&elm->le_bits,
- (void **)&old_bits, (void *)bits, ATOMIC_ACQUIRE,
- ATOMIC_RELAXED))) {
+ &old_bits, bits, ATOMIC_ACQUIRE, ATOMIC_RELAXED))) {
break;
}
spin_adaptive(&spinner);