summaryrefslogtreecommitdiffstats
path: root/src/extent_dss.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2017-05-16 20:25:17 (GMT)
committerJason Evans <jasone@canonware.com>2017-05-23 22:31:29 (GMT)
commit196a53c2aec9fd9fbd17af6803d75c7f631c1ae3 (patch)
tree9a1fa9424562269a4d361b58f09de057a814eae6 /src/extent_dss.c
parent067b97013018211b39e9911ae528ff54edd8fe5e (diff)
downloadjemalloc-196a53c2aec9fd9fbd17af6803d75c7f631c1ae3.zip
jemalloc-196a53c2aec9fd9fbd17af6803d75c7f631c1ae3.tar.gz
jemalloc-196a53c2aec9fd9fbd17af6803d75c7f631c1ae3.tar.bz2
Do not assume dss never decreases.
An sbrk() caller outside jemalloc can decrease the dss, so add a separate atomic boolean to explicitly track whether jemalloc is concurrently calling sbrk(), rather than depending on state outside jemalloc's full control. This resolves #802.
Diffstat (limited to 'src/extent_dss.c')
-rw-r--r--src/extent_dss.c72
1 files changed, 34 insertions, 38 deletions
diff --git a/src/extent_dss.c b/src/extent_dss.c
index 6b5d066..8e0ca65 100644
--- a/src/extent_dss.c
+++ b/src/extent_dss.c
@@ -28,6 +28,8 @@ static atomic_u_t dss_prec_default = ATOMIC_INIT(
/* Base address of the DSS. */
static void *dss_base;
+/* Atomic boolean indicating whether a thread is currently extending DSS. */
+static atomic_b_t dss_extending;
/* Atomic boolean indicating whether the DSS is exhausted. */
static atomic_b_t dss_exhausted;
/* Atomic current upper limit on DSS addresses. */
@@ -65,37 +67,41 @@ extent_dss_prec_set(dss_prec_t dss_prec) {
return false;
}
+static void
+extent_dss_extending_start(void) {
+ spin_t spinner = SPIN_INITIALIZER;
+ while (true) {
+ bool expected = false;
+ if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
+ true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
+ break;
+ }
+ spin_adaptive(&spinner);
+ }
+}
+
+static void
+extent_dss_extending_finish(void) {
+ assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
+
+ atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
+}
+
static void *
extent_dss_max_update(void *new_addr) {
- void *max_cur;
-
/*
* Get the current end of the DSS as max_cur and assure that dss_max is
* up to date.
*/
- spin_t spinner = SPIN_INITIALIZER;
- while (true) {
- void *max_prev = atomic_load_p(&dss_max, ATOMIC_RELAXED);
-
- max_cur = extent_dss_sbrk(0);
- if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
- /*
- * Another thread optimistically updated dss_max. Wait
- * for it to finish.
- */
- spin_adaptive(&spinner);
- continue;
- }
- if (atomic_compare_exchange_weak_p(&dss_max, &max_prev,
- max_cur, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
- break;
- }
+ void *max_cur = extent_dss_sbrk(0);
+ if (max_cur == (void *)-1) {
+ return NULL;
}
+ atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr) {
return NULL;
}
-
return max_cur;
}
@@ -121,6 +127,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL;
}
+ extent_dss_extending_start();
if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
/*
* The loop is necessary to recover from races with other
@@ -168,21 +175,14 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
size);
- /*
- * Optimistically update dss_max, and roll back below if
- * sbrk() fails. No other thread will try to extend the
- * DSS while dss_max is greater than the current DSS
- * max reported by sbrk(0).
- */
- if (!atomic_compare_exchange_weak_p(&dss_max, &max_cur,
- dss_next, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
- continue;
- }
-
/* Try to allocate. */
void *dss_prev = extent_dss_sbrk(incr);
if (dss_prev == max_cur) {
/* Success. */
+ atomic_store_p(&dss_max, dss_next,
+ ATOMIC_RELEASE);
+ extent_dss_extending_finish();
+
if (gap_size_page != 0) {
extent_dalloc_gap(tsdn, arena, gap);
} else {
@@ -209,14 +209,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
/*
* Failure, whether due to OOM or a race with a raw
- * sbrk() call from outside the allocator. Try to roll
- * back optimistic dss_max update; if rollback fails,
- * it's due to another caller of this function having
- * succeeded since this invocation started, in which
- * case rollback is not necessary.
+ * sbrk() call from outside the allocator.
*/
- atomic_compare_exchange_strong_p(&dss_max, &dss_next,
- max_cur, ATOMIC_ACQ_REL, ATOMIC_RELAXED);
if (dss_prev == (void *)-1) {
/* OOM. */
atomic_store_b(&dss_exhausted, true,
@@ -226,6 +220,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
}
label_oom:
+ extent_dss_extending_finish();
extent_dalloc(tsdn, arena, gap);
return NULL;
}
@@ -265,6 +260,7 @@ extent_dss_boot(void) {
cassert(have_dss);
dss_base = extent_dss_sbrk(0);
+ atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
}