summaryrefslogtreecommitdiffstats
path: root/jemalloc/src
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2010-12-01 00:50:58 (GMT)
committerJason Evans <je@fb.com>2010-12-01 00:50:58 (GMT)
commitcfdc8cfbd626e83d38417bd8c73ac018b611e390 (patch)
tree7adad90dfe6ed086e698c94fda910d974f1b8500 /jemalloc/src
parentaee7fd2b70050fb434f2c9f52153194de73dc051 (diff)
downloadjemalloc-cfdc8cfbd626e83d38417bd8c73ac018b611e390.zip
jemalloc-cfdc8cfbd626e83d38417bd8c73ac018b611e390.tar.gz
jemalloc-cfdc8cfbd626e83d38417bd8c73ac018b611e390.tar.bz2
Use mremap(2) for huge realloc().
If mremap(2) is available and supports MREMAP_FIXED, use it for huge realloc(). Initialize rtree later during bootstrapping, so that --enable-debug --enable-dss works. Fix a minor swap_avail stats bug.
Diffstat (limited to 'jemalloc/src')
-rw-r--r--jemalloc/src/chunk.c10
-rw-r--r--jemalloc/src/chunk_dss.c16
-rw-r--r--jemalloc/src/chunk_swap.c24
-rw-r--r--jemalloc/src/huge.c57
4 files changed, 93 insertions, 14 deletions
diff --git a/jemalloc/src/chunk.c b/jemalloc/src/chunk.c
index 00bf50a..301519e 100644
--- a/jemalloc/src/chunk.c
+++ b/jemalloc/src/chunk.c
@@ -146,11 +146,6 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> PAGE_SHIFT);
-#ifdef JEMALLOC_IVSALLOC
- chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
- if (chunks_rtree == NULL)
- return (true);
-#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (malloc_mutex_init(&chunks_mtx))
return (true);
@@ -166,6 +161,11 @@ chunk_boot(void)
if (chunk_dss_boot())
return (true);
#endif
+#ifdef JEMALLOC_IVSALLOC
+ chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
+ if (chunks_rtree == NULL)
+ return (true);
+#endif
return (false);
}
diff --git a/jemalloc/src/chunk_dss.c b/jemalloc/src/chunk_dss.c
index d9bd63c..5c0e290 100644
--- a/jemalloc/src/chunk_dss.c
+++ b/jemalloc/src/chunk_dss.c
@@ -200,6 +200,22 @@ chunk_dealloc_dss_record(void *chunk, size_t size)
}
bool
+chunk_in_dss(void *chunk)
+{
+ bool ret;
+
+ malloc_mutex_lock(&dss_mtx);
+ if ((uintptr_t)chunk >= (uintptr_t)dss_base
+ && (uintptr_t)chunk < (uintptr_t)dss_max)
+ ret = true;
+ else
+ ret = false;
+ malloc_mutex_unlock(&dss_mtx);
+
+ return (ret);
+}
+
+bool
chunk_dealloc_dss(void *chunk, size_t size)
{
bool ret;
diff --git a/jemalloc/src/chunk_swap.c b/jemalloc/src/chunk_swap.c
index ee038ba..cb25ae0 100644
--- a/jemalloc/src/chunk_swap.c
+++ b/jemalloc/src/chunk_swap.c
@@ -185,6 +185,24 @@ chunk_dealloc_swap_record(void *chunk, size_t size)
}
bool
+chunk_in_swap(void *chunk)
+{
+ bool ret;
+
+ assert(swap_enabled);
+
+ malloc_mutex_lock(&swap_mtx);
+ if ((uintptr_t)chunk >= (uintptr_t)swap_base
+ && (uintptr_t)chunk < (uintptr_t)swap_max)
+ ret = true;
+ else
+ ret = false;
+ malloc_mutex_unlock(&swap_mtx);
+
+ return (ret);
+}
+
+bool
chunk_dealloc_swap(void *chunk, size_t size)
{
bool ret;
@@ -219,15 +237,15 @@ chunk_dealloc_swap(void *chunk, size_t size)
} else
madvise(chunk, size, MADV_DONTNEED);
+#ifdef JEMALLOC_STATS
+ swap_avail += size;
+#endif
ret = false;
goto RETURN;
}
ret = true;
RETURN:
-#ifdef JEMALLOC_STATS
- swap_avail += size;
-#endif
malloc_mutex_unlock(&swap_mtx);
return (ret);
}
diff --git a/jemalloc/src/huge.c b/jemalloc/src/huge.c
index a035197..0aadc43 100644
--- a/jemalloc/src/huge.c
+++ b/jemalloc/src/huge.c
@@ -215,13 +215,56 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
- memcpy(ret, ptr, copysize);
- idalloc(ptr);
+
+ /*
+ * Use mremap(2) if this is a huge-->huge reallocation, and neither the
+ * source nor the destination are in swap or dss.
+ */
+#ifdef JEMALLOC_MREMAP_FIXED
+ if (oldsize >= chunksize
+# ifdef JEMALLOC_SWAP
+ && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
+ chunk_in_swap(ret) == false))
+# endif
+# ifdef JEMALLOC_DSS
+ && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
+# endif
+ ) {
+ size_t newsize = huge_salloc(ret);
+
+ if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
+ ret) == MAP_FAILED) {
+ /*
+ * Assuming no chunk management bugs in the allocator,
+ * the only documented way an error can occur here is
+ * if the application changed the map type for a
+ * portion of the old allocation. This is firmly in
+ * undefined behavior territory, so write a diagnostic
+ * message, and optionally abort.
+ */
+ char buf[BUFERROR_BUF];
+
+ buferror(errno, buf, sizeof(buf));
+ malloc_write("<jemalloc>: Error in mremap(): ");
+ malloc_write(buf);
+ malloc_write("\n");
+ if (opt_abort)
+ abort();
+ memcpy(ret, ptr, copysize);
+ idalloc(ptr);
+ } else
+ huge_dalloc(ptr, false);
+ } else
+#endif
+ {
+ memcpy(ret, ptr, copysize);
+ idalloc(ptr);
+ }
return (ret);
}
void
-huge_dalloc(void *ptr)
+huge_dalloc(void *ptr, bool unmap)
{
extent_node_t *node, key;
@@ -241,14 +284,16 @@ huge_dalloc(void *ptr)
malloc_mutex_unlock(&huge_mtx);
+ if (unmap) {
/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
- if (opt_junk)
- memset(node->addr, 0x5a, node->size);
+ if (opt_junk)
+ memset(node->addr, 0x5a, node->size);
#endif
#endif
- chunk_dealloc(node->addr, node->size);
+ chunk_dealloc(node->addr, node->size);
+ }
base_node_dealloc(node);
}