summaryrefslogtreecommitdiffstats
path: root/src/chunk_dss.c
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2012-10-11 20:53:15 (GMT)
committerJason Evans <je@fb.com>2012-10-13 01:26:16 (GMT)
commit609ae595f0358157b19311b0f9f9591db7cee705 (patch)
tree2a51758c6218f26167eb8ad2155ccc925c898c3b /src/chunk_dss.c
parentd0ffd8ed4f6aa4cf7248028eddfcb35f93247fe4 (diff)
downloadjemalloc-609ae595f0358157b19311b0f9f9591db7cee705.zip
jemalloc-609ae595f0358157b19311b0f9f9591db7cee705.tar.gz
jemalloc-609ae595f0358157b19311b0f9f9591db7cee705.tar.bz2
Add arena-specific and selective dss allocation.
Add the "arenas.extend" mallctl, so that it is possible to create new arenas that are outside the set that jemalloc automatically multiplexes threads onto. Add the ALLOCM_ARENA() flag for {,r,d}allocm(), so that it is possible to explicitly allocate from a particular arena. Add the "opt.dss" mallctl, which controls the default precedence of dss allocation relative to mmap allocation. Add the "arena.<i>.dss" mallctl, which makes it possible to set the default dss precedence on a per arena or global basis. Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge". Add the "stats.arenas.<i>.dss" mallctl.
Diffstat (limited to 'src/chunk_dss.c')
-rw-r--r--src/chunk_dss.c37
1 files changed, 36 insertions, 1 deletions
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index 2d68e48..24781cc 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -3,6 +3,16 @@
/******************************************************************************/
/* Data. */
+const char *dss_prec_names[] = {
+ "disabled",
+ "primary",
+ "secondary",
+ "N/A"
+};
+
+/* Current dss precedence default, used when creating new arenas. */
+static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
+
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
@@ -29,6 +39,31 @@ sbrk(intptr_t increment)
}
#endif
+dss_prec_t
+chunk_dss_prec_get(void)
+{
+ dss_prec_t ret;
+
+ if (config_dss == false)
+ return (dss_prec_disabled);
+ malloc_mutex_lock(&dss_mtx);
+ ret = dss_prec_default;
+ malloc_mutex_unlock(&dss_mtx);
+ return (ret);
+}
+
+bool
+chunk_dss_prec_set(dss_prec_t dss_prec)
+{
+
+ if (config_dss == false)
+ return (true);
+ malloc_mutex_lock(&dss_mtx);
+ dss_prec_default = dss_prec;
+ malloc_mutex_unlock(&dss_mtx);
+ return (false);
+}
+
void *
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
@@ -88,7 +123,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
- chunk_dealloc(cpad, cpad_size, true);
+ chunk_unmap(cpad, cpad_size);
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);