summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INSTALL4
-rw-r--r--configure.ac23
-rw-r--r--doc/jemalloc.xml.in29
-rw-r--r--include/jemalloc/internal/arena.h2
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in2
-rw-r--r--include/jemalloc/internal/jemalloc_internal_defs.h.in3
-rw-r--r--src/arena.c5
-rw-r--r--src/chunk.c8
-rw-r--r--src/chunk_dss.c20
-rw-r--r--src/ctl.c6
-rw-r--r--src/huge.c6
-rw-r--r--test/integration/MALLOCX_ARENA.c17
-rw-r--r--test/unit/mallctl.c20
13 files changed, 72 insertions, 73 deletions
diff --git a/INSTALL b/INSTALL
index 55c919a..07f51d1 100644
--- a/INSTALL
+++ b/INSTALL
@@ -145,10 +145,6 @@ any of the following arguments (not a definitive list) to 'configure':
memory allocation algorithm that causes semi-permanent VM map holes under
normal jemalloc operation.
---enable-dss
- Enable support for page allocation/deallocation via sbrk(2), in addition to
- mmap(2).
-
--disable-fill
Disable support for junk/zero filling of memory, quarantine, and redzones.
See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option
diff --git a/configure.ac b/configure.ac
index b47d572..dc817e1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -836,34 +836,22 @@ if test "x$enable_munmap" = "x1" ; then
fi
AC_SUBST([enable_munmap])
-dnl Do not enable allocation from DSS by default.
-AC_ARG_ENABLE([dss],
- [AS_HELP_STRING([--enable-dss], [Enable allocation from DSS])],
-[if test "x$enable_dss" = "xno" ; then
- enable_dss="0"
-else
- enable_dss="1"
-fi
-],
-[enable_dss="0"]
-)
+dnl Enable allocation from DSS if supported by the OS.
+have_dss="1"
dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support.
AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
if test "x$have_sbrk" = "x1" ; then
if test "x$sbrk_deprecated" == "x1" ; then
AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
- enable_dss="0"
- else
- AC_DEFINE([JEMALLOC_HAVE_SBRK], [ ])
+ have_dss="0"
fi
else
- enable_dss="0"
+ have_dss="0"
fi
-if test "x$enable_dss" = "x1" ; then
+if test "x$have_dss" = "x1" ; then
AC_DEFINE([JEMALLOC_DSS], [ ])
fi
-AC_SUBST([enable_dss])
dnl Support the junk/zero filling option by default.
AC_ARG_ENABLE([fill],
@@ -1461,7 +1449,6 @@ AC_MSG_RESULT([valgrind : ${enable_valgrind}])
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
AC_MSG_RESULT([mremap : ${enable_mremap}])
AC_MSG_RESULT([munmap : ${enable_munmap}])
-AC_MSG_RESULT([dss : ${enable_dss}])
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
AC_MSG_RESULT([tls : ${enable_tls}])
AC_MSG_RESULT([===============================================================================])
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
index 4acb07f..16dd0bb 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -448,8 +448,10 @@ for (i = 0; i < nbins; i++) {
<manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
suboptimal for several reasons, including race conditions, increased
fragmentation, and artificial limitations on maximum usable memory. If
- <option>--enable-dss</option> is specified during configuration, this
- allocator uses both <citerefentry><refentrytitle>mmap</refentrytitle>
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system, this allocator uses both
+ <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> and
<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry>, in that order of preference;
@@ -625,16 +627,6 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem>
</varlistentry>
- <varlistentry id="config.dss">
- <term>
- <mallctl>config.dss</mallctl>
- (<type>bool</type>)
- <literal>r-</literal>
- </term>
- <listitem><para><option>--enable-dss</option> was specified during
- build configuration.</para></listitem>
- </varlistentry>
-
<varlistentry id="config.fill">
<term>
<mallctl>config.fill</mallctl>
@@ -790,10 +782,15 @@ for (i = 0; i < nbins; i++) {
<manvolnum>2</manvolnum></citerefentry>) allocation precedence as
related to <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> allocation. The following
- settings are supported: &ldquo;disabled&rdquo;, &ldquo;primary&rdquo;,
- and &ldquo;secondary&rdquo;. The default is &ldquo;secondary&rdquo; if
- <link linkend="config.dss"><mallctl>config.dss</mallctl></link> is
- true, &ldquo;disabled&rdquo; otherwise.
+ settings are supported if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system: &ldquo;disabled&rdquo;, &ldquo;primary&rdquo;, and
+ &ldquo;secondary&rdquo;; otherwise only &ldquo;disabled&rdquo; is
+ supported. The default is &ldquo;secondary&rdquo; if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system; &ldquo;disabled&rdquo; otherwise.
</para></listitem>
</varlistentry>
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 0e14c2c..6de312e 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -434,7 +434,7 @@ void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
bool try_tcache_dalloc);
dss_prec_t arena_dss_prec_get(arena_t *arena);
-void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats);
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index a374e2a..4821b9b 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -85,7 +85,7 @@ static const bool config_debug =
false
#endif
;
-static const bool config_dss =
+static const bool have_dss =
#ifdef JEMALLOC_DSS
true
#else
diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in
index c166fbd..fc95967 100644
--- a/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -76,9 +76,6 @@
*/
#undef JEMALLOC_MUTEX_INIT_CB
-/* Defined if sbrk() is supported. */
-#undef JEMALLOC_HAVE_SBRK
-
/* Non-empty if the tls_model attribute is supported. */
#undef JEMALLOC_TLS_MODEL
diff --git a/src/arena.c b/src/arena.c
index d574100..8aa36fd 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2243,13 +2243,16 @@ arena_dss_prec_get(arena_t *arena)
return (ret);
}
-void
+bool
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{
+ if (have_dss == false)
+ return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec;
malloc_mutex_unlock(&arena->lock);
+ return (false);
}
void
diff --git a/src/chunk.c b/src/chunk.c
index 90ab116..fdd693e 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -153,7 +153,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
assert((alignment & chunksize_mask) == 0);
/* "primary" dss. */
- if (config_dss && dss_prec == dss_prec_primary) {
+ if (have_dss && dss_prec == dss_prec_primary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
goto label_return;
@@ -167,7 +167,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
goto label_return;
/* "secondary" dss. */
- if (config_dss && dss_prec == dss_prec_secondary) {
+ if (have_dss && dss_prec == dss_prec_secondary) {
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
alignment, base, zero)) != NULL)
goto label_return;
@@ -305,7 +305,7 @@ chunk_unmap(void *chunk, size_t size)
assert(size != 0);
assert((size & chunksize_mask) == 0);
- if (config_dss && chunk_in_dss(chunk))
+ if (have_dss && chunk_in_dss(chunk))
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
else if (chunk_dealloc_mmap(chunk, size))
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
@@ -348,7 +348,7 @@ chunk_boot(void)
return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
}
- if (config_dss && chunk_dss_boot())
+ if (have_dss && chunk_dss_boot())
return (true);
extent_tree_szad_new(&chunks_szad_mmap);
extent_tree_ad_new(&chunks_ad_mmap);
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index 510bb8b..36133f1 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -32,7 +32,7 @@ static void *
chunk_dss_sbrk(intptr_t increment)
{
-#ifdef JEMALLOC_HAVE_SBRK
+#ifdef JEMALLOC_DSS
return (sbrk(increment));
#else
not_implemented();
@@ -45,7 +45,7 @@ chunk_dss_prec_get(void)
{
dss_prec_t ret;
- if (config_dss == false)
+ if (have_dss == false)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
@@ -57,8 +57,8 @@ bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
- if (config_dss == false)
- return (true);
+ if (have_dss == false)
+ return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
@@ -70,7 +70,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
void *ret;
- cassert(config_dss);
+ cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
@@ -143,7 +143,7 @@ chunk_in_dss(void *chunk)
{
bool ret;
- cassert(config_dss);
+ cassert(have_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
@@ -160,7 +160,7 @@ bool
chunk_dss_boot(void)
{
- cassert(config_dss);
+ cassert(have_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
@@ -175,7 +175,7 @@ void
chunk_dss_prefork(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_prefork(&dss_mtx);
}
@@ -183,7 +183,7 @@ void
chunk_dss_postfork_parent(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_postfork_parent(&dss_mtx);
}
@@ -191,7 +191,7 @@ void
chunk_dss_postfork_child(void)
{
- if (config_dss)
+ if (have_dss)
malloc_mutex_postfork_child(&dss_mtx);
}
diff --git a/src/ctl.c b/src/ctl.c
index cc2c5ae..0340a27 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -74,7 +74,6 @@ CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
CTL_PROTO(config_debug)
-CTL_PROTO(config_dss)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
CTL_PROTO(config_mremap)
@@ -213,7 +212,6 @@ static const ctl_named_node_t thread_node[] = {
static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)},
- {NAME("dss"), CTL(config_dss)},
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
{NAME("mremap"), CTL(config_mremap)},
@@ -1136,7 +1134,6 @@ label_return:
/******************************************************************************/
CTL_RO_BOOL_CONFIG_GEN(config_debug)
-CTL_RO_BOOL_CONFIG_GEN(config_dss)
CTL_RO_BOOL_CONFIG_GEN(config_fill)
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
CTL_RO_BOOL_CONFIG_GEN(config_mremap)
@@ -1356,8 +1353,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
arena_t *arena = arenas[arena_ind];
if (arena != NULL) {
dss_prec_old = arena_dss_prec_get(arena);
- arena_dss_prec_set(arena, dss_prec);
- err = false;
+ err = arena_dss_prec_set(arena, dss_prec);
} else
err = true;
} else {
diff --git a/src/huge.c b/src/huge.c
index d72f213..e725fd9 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -140,7 +140,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
* source nor the destination are in dss.
*/
- if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
+ if (oldsize >= chunksize && (have_dss == false || (chunk_in_dss(ptr)
== false && chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret);
@@ -198,12 +198,12 @@ static void
huge_dalloc_junk(void *ptr, size_t usize)
{
- if (config_fill && config_dss && opt_junk) {
+ if (config_fill && have_dss && opt_junk) {
/*
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
- if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
+ if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize);
}
}
diff --git a/test/integration/MALLOCX_ARENA.c b/test/integration/MALLOCX_ARENA.c
index 695a5b6..30c203a 100644
--- a/test/integration/MALLOCX_ARENA.c
+++ b/test/integration/MALLOCX_ARENA.c
@@ -2,6 +2,14 @@
#define NTHREADS 10
+static bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+
void *
thd_start(void *arg)
{
@@ -18,13 +26,16 @@ thd_start(void *arg)
size_t mib[3];
size_t miblen = sizeof(mib) / sizeof(size_t);
const char *dss_precs[] = {"disabled", "primary", "secondary"};
- const char *dss = dss_precs[thread_ind %
- (sizeof(dss_precs)/sizeof(char*))];
+ unsigned prec_ind = thread_ind %
+ (sizeof(dss_precs)/sizeof(char*));
+ const char *dss = dss_precs[prec_ind];
+ int expected_err = (have_dss || prec_ind == 0) ? 0 : EFAULT;
assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
"Error in mallctlnametomib()");
mib[1] = arena_ind;
assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
- sizeof(const char *)), 0, "Error in mallctlbymib()");
+ sizeof(const char *)), expected_err,
+ "Error in mallctlbymib()");
}
p = mallocx(1, MALLOCX_ARENA(arena_ind));
diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c
index 31fb810..caf20f8 100644
--- a/test/unit/mallctl.c
+++ b/test/unit/mallctl.c
@@ -127,7 +127,6 @@ TEST_BEGIN(test_mallctl_config)
} while (0)
TEST_MALLCTL_CONFIG(debug);
- TEST_MALLCTL_CONFIG(dss);
TEST_MALLCTL_CONFIG(fill);
TEST_MALLCTL_CONFIG(lazy_lock);
TEST_MALLCTL_CONFIG(mremap);
@@ -255,15 +254,28 @@ TEST_BEGIN(test_arena_i_dss)
{
const char *dss_prec_old, *dss_prec_new;
size_t sz = sizeof(dss_prec_old);
+ size_t mib[3];
+ size_t miblen;
+
+ miblen = sizeof(mib)/sizeof(size_t);
+ assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0,
+ "Unexpected mallctlnametomib() error");
- dss_prec_new = "primary";
- assert_d_eq(mallctl("arena.0.dss", &dss_prec_old, &sz, &dss_prec_new,
+ dss_prec_new = "disabled";
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
assert_str_ne(dss_prec_old, "primary",
"Unexpected default for dss precedence");
- assert_d_eq(mallctl("arena.0.dss", &dss_prec_new, &sz, &dss_prec_old,
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_new, &sz, &dss_prec_old,
sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure");
+
+ mib[1] = narenas_total_get();
+ dss_prec_new = "disabled";
+ assert_d_eq(mallctlbymib(mib, miblen, &dss_prec_old, &sz, &dss_prec_new,
+ sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure");
+ assert_str_ne(dss_prec_old, "primary",
+ "Unexpected default for dss precedence");
}
TEST_END