summaryrefslogtreecommitdiffstats
path: root/jemalloc/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2010-10-24 01:37:06 (GMT)
committerJason Evans <jasone@canonware.com>2010-10-24 01:37:06 (GMT)
commite73397062ac3ab28a9d377591b63ed19fd154cca (patch)
treed78dbc832be4e8c8b504017bc15f0256f91e033d /jemalloc/src
parente4f7846f1fd279a039ffa2a41707348187219de4 (diff)
downloadjemalloc-e73397062ac3ab28a9d377591b63ed19fd154cca.zip
jemalloc-e73397062ac3ab28a9d377591b63ed19fd154cca.tar.gz
jemalloc-e73397062ac3ab28a9d377591b63ed19fd154cca.tar.bz2
Replace JEMALLOC_OPTIONS with MALLOC_CONF.
Replace the single-character run-time flags with key/value pairs, which can be set via the malloc_conf global, /etc/malloc.conf, and the MALLOC_CONF environment variable. Replace the JEMALLOC_PROF_PREFIX environment variable with the "opt.prof_prefix" option. Replace umax2s() with u2s().
Diffstat (limited to 'jemalloc/src')
-rw-r--r--jemalloc/src/arena.c6
-rw-r--r--jemalloc/src/chunk.c10
-rw-r--r--jemalloc/src/ctl.c67
-rw-r--r--jemalloc/src/jemalloc.c654
-rw-r--r--jemalloc/src/prof.c123
-rw-r--r--jemalloc/src/stats.c200
-rw-r--r--jemalloc/src/tcache.c12
7 files changed, 575 insertions, 497 deletions
diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c
index d811f65..00f425f 100644
--- a/jemalloc/src/arena.c
+++ b/jemalloc/src/arena.c
@@ -290,7 +290,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
assert((uintptr_t)ptr >= (uintptr_t)run +
(uintptr_t)run->bin->reg0_offset);
/*
- * Freeing a pointer in the run's wilderness can cause assertion
+ * Freeing a pointer past in the run's frontier can cause assertion
* failure.
*/
assert((uintptr_t)ptr < (uintptr_t)run->next);
@@ -2532,7 +2532,7 @@ arena_boot(void)
if (nbins > 255) {
char line_buf[UMAX2S_BUFSIZE];
malloc_write("<jemalloc>: Too many small size classes (");
- malloc_write(umax2s(nbins, 10, line_buf));
+ malloc_write(u2s(nbins, 10, line_buf));
malloc_write(" > max 255)\n");
abort();
}
@@ -2541,7 +2541,7 @@ arena_boot(void)
if (nbins > 256) {
char line_buf[UMAX2S_BUFSIZE];
malloc_write("<jemalloc>: Too many small size classes (");
- malloc_write(umax2s(nbins, 10, line_buf));
+ malloc_write(u2s(nbins, 10, line_buf));
malloc_write(" > max 256)\n");
abort();
}
diff --git a/jemalloc/src/chunk.c b/jemalloc/src/chunk.c
index 0be24fb..00bf50a 100644
--- a/jemalloc/src/chunk.c
+++ b/jemalloc/src/chunk.c
@@ -78,7 +78,7 @@ RETURN:
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (ret != NULL) {
# ifdef JEMALLOC_PROF
- bool udump;
+ bool gdump;
# endif
malloc_mutex_lock(&chunks_mtx);
# ifdef JEMALLOC_STATS
@@ -88,17 +88,17 @@ RETURN:
if (stats_chunks.curchunks > stats_chunks.highchunks) {
stats_chunks.highchunks = stats_chunks.curchunks;
# ifdef JEMALLOC_PROF
- udump = true;
+ gdump = true;
# endif
}
# ifdef JEMALLOC_PROF
else
- udump = false;
+ gdump = false;
# endif
malloc_mutex_unlock(&chunks_mtx);
# ifdef JEMALLOC_PROF
- if (opt_prof && opt_prof_udump && udump)
- prof_udump();
+ if (opt_prof && opt_prof_gdump && gdump)
+ prof_gdump();
# endif
}
#endif
diff --git a/jemalloc/src/ctl.c b/jemalloc/src/ctl.c
index dbc5cd4..c83ee4f 100644
--- a/jemalloc/src/ctl.c
+++ b/jemalloc/src/ctl.c
@@ -62,8 +62,15 @@ CTL_PROTO(config_tiny)
CTL_PROTO(config_tls)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
+CTL_PROTO(opt_lg_qspace_max)
+CTL_PROTO(opt_lg_cspace_max)
+CTL_PROTO(opt_lg_chunk)
+CTL_PROTO(opt_narenas)
+CTL_PROTO(opt_lg_dirty_mult)
+CTL_PROTO(opt_stats_print)
#ifdef JEMALLOC_FILL
CTL_PROTO(opt_junk)
+CTL_PROTO(opt_zero)
#endif
#ifdef JEMALLOC_SYSV
CTL_PROTO(opt_sysv)
@@ -71,29 +78,22 @@ CTL_PROTO(opt_sysv)
#ifdef JEMALLOC_XMALLOC
CTL_PROTO(opt_xmalloc)
#endif
-#ifdef JEMALLOC_ZERO
-CTL_PROTO(opt_zero)
-#endif
#ifdef JEMALLOC_TCACHE
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_gc_sweep)
#endif
#ifdef JEMALLOC_PROF
CTL_PROTO(opt_prof)
+CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
CTL_PROTO(opt_lg_prof_bt_max)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
-CTL_PROTO(opt_prof_udump)
+CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
CTL_PROTO(opt_lg_prof_tcmax)
#endif
-CTL_PROTO(opt_stats_print)
-CTL_PROTO(opt_lg_qspace_max)
-CTL_PROTO(opt_lg_cspace_max)
-CTL_PROTO(opt_lg_dirty_mult)
-CTL_PROTO(opt_lg_chunk)
#ifdef JEMALLOC_SWAP
CTL_PROTO(opt_overcommit)
#endif
@@ -247,38 +247,43 @@ static const ctl_node_t config_node[] = {
static const ctl_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
+ {NAME("lg_qspace_max"), CTL(opt_lg_qspace_max)},
+ {NAME("lg_cspace_max"), CTL(opt_lg_cspace_max)},
+ {NAME("lg_chunk"), CTL(opt_lg_chunk)},
+ {NAME("narenas"), CTL(opt_narenas)},
+ {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
+ {NAME("stats_print"), CTL(opt_stats_print)}
#ifdef JEMALLOC_FILL
+ ,
{NAME("junk"), CTL(opt_junk)},
+ {NAME("zero"), CTL(opt_zero)}
#endif
#ifdef JEMALLOC_SYSV
- {NAME("sysv"), CTL(opt_sysv)},
+ ,
+ {NAME("sysv"), CTL(opt_sysv)}
#endif
#ifdef JEMALLOC_XMALLOC
- {NAME("xmalloc"), CTL(opt_xmalloc)},
-#endif
-#ifdef JEMALLOC_ZERO
- {NAME("zero"), CTL(opt_zero)},
+ ,
+ {NAME("xmalloc"), CTL(opt_xmalloc)}
#endif
#ifdef JEMALLOC_TCACHE
+ ,
{NAME("tcache"), CTL(opt_tcache)},
- {NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)},
+ {NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)}
#endif
#ifdef JEMALLOC_PROF
+ ,
{NAME("prof"), CTL(opt_prof)},
+ {NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
{NAME("lg_prof_bt_max"), CTL(opt_lg_prof_bt_max)},
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
- {NAME("prof_udump"), CTL(opt_prof_udump)},
+ {NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)},
- {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)},
+ {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}
#endif
- {NAME("stats_print"), CTL(opt_stats_print)},
- {NAME("lg_qspace_max"), CTL(opt_lg_qspace_max)},
- {NAME("lg_cspace_max"), CTL(opt_lg_cspace_max)},
- {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("lg_chunk"), CTL(opt_lg_chunk)}
#ifdef JEMALLOC_SWAP
,
{NAME("overcommit"), CTL(opt_overcommit)}
@@ -1201,8 +1206,15 @@ CTL_RO_FALSE_GEN(config_xmalloc)
/******************************************************************************/
CTL_RO_GEN(opt_abort, opt_abort, bool)
+CTL_RO_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
+CTL_RO_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
+CTL_RO_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
+CTL_RO_GEN(opt_narenas, opt_narenas, size_t)
+CTL_RO_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_GEN(opt_stats_print, opt_stats_print, bool)
#ifdef JEMALLOC_FILL
CTL_RO_GEN(opt_junk, opt_junk, bool)
+CTL_RO_GEN(opt_zero, opt_zero, bool)
#endif
#ifdef JEMALLOC_SYSV
CTL_RO_GEN(opt_sysv, opt_sysv, bool)
@@ -1210,29 +1222,22 @@ CTL_RO_GEN(opt_sysv, opt_sysv, bool)
#ifdef JEMALLOC_XMALLOC
CTL_RO_GEN(opt_xmalloc, opt_xmalloc, bool)
#endif
-#ifdef JEMALLOC_ZERO
-CTL_RO_GEN(opt_zero, opt_zero, bool)
-#endif
#ifdef JEMALLOC_TCACHE
CTL_RO_GEN(opt_tcache, opt_tcache, bool)
CTL_RO_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
#endif
#ifdef JEMALLOC_PROF
CTL_RO_GEN(opt_prof, opt_prof, bool)
+CTL_RO_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_GEN(opt_prof_active, opt_prof_active, bool)
CTL_RO_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
CTL_RO_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_GEN(opt_prof_udump, opt_prof_udump, bool)
+CTL_RO_GEN(opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_GEN(opt_prof_leak, opt_prof_leak, bool)
CTL_RO_GEN(opt_prof_accum, opt_prof_accum, bool)
CTL_RO_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
#endif
-CTL_RO_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
-CTL_RO_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
-CTL_RO_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
-CTL_RO_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
#ifdef JEMALLOC_SWAP
CTL_RO_GEN(opt_overcommit, opt_overcommit, bool)
#endif
diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c
index dedf011..012c4a8 100644
--- a/jemalloc/src/jemalloc.c
+++ b/jemalloc/src/jemalloc.c
@@ -41,8 +41,7 @@ size_t lg_pagesize;
unsigned ncpus;
/* Runtime configuration options. */
-const char *JEMALLOC_P(malloc_options)
- JEMALLOC_ATTR(visibility("default"));
+const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
#ifdef JEMALLOC_DEBUG
bool opt_abort = true;
# ifdef JEMALLOC_FILL
@@ -63,7 +62,7 @@ bool opt_xmalloc = false;
#ifdef JEMALLOC_FILL
bool opt_zero = false;
#endif
-static int opt_narenas_lshift = 0;
+size_t opt_narenas = 0;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@@ -74,6 +73,11 @@ static unsigned malloc_ncpus(void);
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
static void thread_allocated_cleanup(void *arg);
#endif
+static bool malloc_conf_next(char const **opts_p, char const **k_p,
+ size_t *klen_p, char const **v_p, size_t *vlen_p);
+static void malloc_conf_error(const char *msg, const char *k, size_t klen,
+ const char *v, size_t vlen);
+static void malloc_conf_init(void);
static bool malloc_init_hard(void);
/******************************************************************************/
@@ -260,100 +264,173 @@ malloc_init(void)
}
static bool
-malloc_init_hard(void)
+malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
+ char const **v_p, size_t *vlen_p)
{
- unsigned i;
- int linklen;
- char buf[PATH_MAX + 1];
- const char *opts;
- arena_t *init_arenas[1];
-
- malloc_mutex_lock(&init_lock);
- if (malloc_initialized || malloc_initializer == pthread_self()) {
- /*
- * Another thread initialized the allocator before this one
- * acquired init_lock, or this thread is the initializing
- * thread, and it is recursively allocating.
- */
- malloc_mutex_unlock(&init_lock);
- return (false);
+ bool accept;
+ const char *opts = *opts_p;
+
+ *k_p = opts;
+
+ for (accept = false; accept == false;) {
+ switch (*opts) {
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F': case 'G': case 'H': case 'I': case 'J':
+ case 'K': case 'L': case 'M': case 'N': case 'O':
+ case 'P': case 'Q': case 'R': case 'S': case 'T':
+ case 'U': case 'V': case 'W': case 'X': case 'Y':
+ case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f': case 'g': case 'h': case 'i': case 'j':
+ case 'k': case 'l': case 'm': case 'n': case 'o':
+ case 'p': case 'q': case 'r': case 's': case 't':
+ case 'u': case 'v': case 'w': case 'x': case 'y':
+ case 'z':
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case '_':
+ opts++;
+ break;
+ case ':':
+ opts++;
+ *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
+ *v_p = opts;
+ accept = true;
+ break;
+ case '\0':
+ if (opts != *opts_p) {
+ malloc_write("<jemalloc>: Conf string "
+ "ends with key\n");
+ }
+ return (true);
+ default:
+ malloc_write("<jemalloc>: Malformed conf "
+ "string\n");
+ return (true);
+ }
}
- if (malloc_initializer != (unsigned long)0) {
- /* Busy-wait until the initializing thread completes. */
- do {
- malloc_mutex_unlock(&init_lock);
- CPU_SPINWAIT;
- malloc_mutex_lock(&init_lock);
- } while (malloc_initialized == false);
- malloc_mutex_unlock(&init_lock);
- return (false);
+
+ for (accept = false; accept == false;) {
+ switch (*opts) {
+ case ',':
+ opts++;
+ /*
+ * Look ahead one character here, because the
+ * next time this function is called, it will
+ * assume that end of input has been cleanly
+ * reached if no input remains, but we have
+ * optimistically already consumed the comma if
+ * one exists.
+ */
+ if (*opts == '\0') {
+ malloc_write("<jemalloc>: Conf string "
+ "ends with comma\n");
+ }
+ *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ case '\0':
+ *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ default:
+ opts++;
+ break;
+ }
}
-#ifdef DYNAMIC_PAGE_SHIFT
- /* Get page size. */
- {
- long result;
+ *opts_p = opts;
+ return (false);
+}
- result = sysconf(_SC_PAGESIZE);
- assert(result != -1);
- pagesize = (unsigned)result;
+static void
+malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
+ size_t vlen)
+{
+ char buf[PATH_MAX + 1];
- /*
- * We assume that pagesize is a power of 2 when calculating
- * pagesize_mask and lg_pagesize.
- */
- assert(((result - 1) & result) == 0);
- pagesize_mask = result - 1;
- lg_pagesize = ffs((int)result) - 1;
- }
-#endif
+ malloc_write("<jemalloc>: ");
+ malloc_write(msg);
+ malloc_write(": ");
+ memcpy(buf, k, klen);
+ memcpy(&buf[klen], ":", 1);
+ memcpy(&buf[klen+1], v, vlen);
+ buf[klen+1+vlen] = '\0';
+ malloc_write(buf);
+ malloc_write("\n");
+}
- for (i = 0; i < 3; i++) {
- unsigned j;
+static void
+malloc_conf_init(void)
+{
+ unsigned i;
+ char buf[PATH_MAX + 1];
+ const char *opts, *k, *v;
+ size_t klen, vlen;
+ for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
case 0:
- if ((linklen = readlink("/etc/jemalloc.conf", buf,
- sizeof(buf) - 1)) != -1) {
+ if (JEMALLOC_P(malloc_conf) != NULL) {
/*
- * Use the contents of the "/etc/jemalloc.conf"
- * symbolic link's name.
+ * Use options that were compiled into the
+ * program.
*/
- buf[linklen] = '\0';
- opts = buf;
+ opts = JEMALLOC_P(malloc_conf);
} else {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
- case 1:
- if ((opts = getenv("JEMALLOC_OPTIONS")) != NULL) {
+ case 1: {
+ int linklen;
+ const char *linkname =
+#ifdef JEMALLOC_PREFIX
+ "/etc/"JEMALLOC_PREFIX"malloc.conf"
+#else
+ "/etc/malloc.conf"
+#endif
+ ;
+
+ if ((linklen = readlink(linkname, buf,
+ sizeof(buf) - 1)) != -1) {
/*
- * Do nothing; opts is already initialized to
- * the value of the JEMALLOC_OPTIONS
- * environment variable.
+ * Use the contents of the "/etc/malloc.conf"
+ * symbolic link's name.
*/
+ buf[linklen] = '\0';
+ opts = buf;
} else {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
- case 2:
- if (JEMALLOC_P(malloc_options) != NULL) {
+ }
+ case 2: {
+ const char *envname =
+#ifdef JEMALLOC_PREFIX
+ JEMALLOC_CPREFIX"MALLOC_CONF"
+#else
+ "MALLOC_CONF"
+#endif
+ ;
+
+ if ((opts = getenv(envname)) != NULL) {
/*
- * Use options that were compiled into the
- * program.
+ * Do nothing; opts is already initialized to
+ * the value of the JEMALLOC_OPTIONS
+ * environment variable.
*/
- opts = JEMALLOC_P(malloc_options);
} else {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
+ }
default:
/* NOTREACHED */
assert(false);
@@ -361,249 +438,194 @@ malloc_init_hard(void)
opts = buf;
}
- for (j = 0; opts[j] != '\0'; j++) {
- unsigned k, nreps;
- bool nseen;
-
- /* Parse repetition count, if any. */
- for (nreps = 0, nseen = false;; j++, nseen = true) {
- switch (opts[j]) {
- case '0': case '1': case '2': case '3':
- case '4': case '5': case '6': case '7':
- case '8': case '9':
- nreps *= 10;
- nreps += opts[j] - '0';
- break;
- default:
- goto MALLOC_OUT;
- }
+ while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
+ &vlen) == false) {
+#define CONF_HANDLE_BOOL(n) \
+ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+ klen) == 0) { \
+ if (strncmp("true", v, vlen) == 0 && \
+ vlen == sizeof("true")-1) \
+ opt_##n = true; \
+ else if (strncmp("false", v, vlen) == \
+ 0 && vlen == sizeof("false")-1) \
+ opt_##n = false; \
+ else { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } \
+ continue; \
}
-MALLOC_OUT:
- if (nseen == false)
- nreps = 1;
-
- for (k = 0; k < nreps; k++) {
- switch (opts[j]) {
- case 'a':
- opt_abort = false;
- break;
- case 'A':
- opt_abort = true;
- break;
-#ifdef JEMALLOC_PROF
- case 'b':
- if (opt_lg_prof_bt_max > 0)
- opt_lg_prof_bt_max--;
- break;
- case 'B':
- if (opt_lg_prof_bt_max < LG_PROF_BT_MAX)
- opt_lg_prof_bt_max++;
- break;
-#endif
- case 'c':
- if (opt_lg_cspace_max - 1 >
- opt_lg_qspace_max &&
- opt_lg_cspace_max >
- LG_CACHELINE)
- opt_lg_cspace_max--;
- break;
- case 'C':
- if (opt_lg_cspace_max < PAGE_SHIFT
- - 1)
- opt_lg_cspace_max++;
- break;
- case 'd':
- if (opt_lg_dirty_mult + 1 <
- (sizeof(size_t) << 3))
- opt_lg_dirty_mult++;
- break;
- case 'D':
- if (opt_lg_dirty_mult >= 0)
- opt_lg_dirty_mult--;
- break;
-#ifdef JEMALLOC_PROF
- case 'e':
- opt_prof_active = false;
- break;
- case 'E':
- opt_prof_active = true;
- break;
- case 'f':
- opt_prof = false;
- break;
- case 'F':
- opt_prof = true;
- break;
-#endif
-#ifdef JEMALLOC_TCACHE
- case 'g':
- if (opt_lg_tcache_gc_sweep >= 0)
- opt_lg_tcache_gc_sweep--;
- break;
- case 'G':
- if (opt_lg_tcache_gc_sweep + 1 <
- (sizeof(size_t) << 3))
- opt_lg_tcache_gc_sweep++;
- break;
- case 'h':
- opt_tcache = false;
- break;
- case 'H':
- opt_tcache = true;
- break;
-#endif
-#ifdef JEMALLOC_PROF
- case 'i':
- if (opt_lg_prof_interval >= 0)
- opt_lg_prof_interval--;
- break;
- case 'I':
- if (opt_lg_prof_interval + 1 <
- (sizeof(uint64_t) << 3))
- opt_lg_prof_interval++;
- break;
-#endif
+#define CONF_HANDLE_SIZE_T(n, min, max) \
+ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+ klen) == 0) { \
+ unsigned long ul; \
+ char *end; \
+ \
+ errno = 0; \
+ ul = strtoul(v, &end, 0); \
+ if (errno != 0 || (uintptr_t)end - \
+ (uintptr_t)v != vlen) { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } else if (ul < min || ul > max) { \
+ malloc_conf_error( \
+ "Out-of-range conf value", \
+ k, klen, v, vlen); \
+ } else \
+ opt_##n = ul; \
+ continue; \
+ }
+#define CONF_HANDLE_SSIZE_T(n, min, max) \
+ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+ klen) == 0) { \
+ long l; \
+ char *end; \
+ \
+ errno = 0; \
+ l = strtol(v, &end, 0); \
+ if (errno != 0 || (uintptr_t)end - \
+ (uintptr_t)v != vlen) { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } else if (l < (ssize_t)min || l > \
+ (ssize_t)max) { \
+ malloc_conf_error( \
+ "Out-of-range conf value", \
+ k, klen, v, vlen); \
+ } else \
+ opt_##n = l; \
+ continue; \
+ }
+#define CONF_HANDLE_CHAR_P(n, d) \
+ if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+ klen) == 0) { \
+ size_t cpylen = (vlen <= \
+ sizeof(opt_##n)-1) ? vlen : \
+ sizeof(opt_##n)-1; \
+ strncpy(opt_##n, v, cpylen); \
+ opt_##n[cpylen] = '\0'; \
+ continue; \
+ }
+
+ CONF_HANDLE_BOOL(abort)
+ CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
+ PAGE_SHIFT-1)
+ CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
+ PAGE_SHIFT-1)
+ /*
+ * Chunks always require at least one * header page,
+ * plus one data page.
+ */
+ CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
+ (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
+ CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
+ (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_BOOL(stats_print)
#ifdef JEMALLOC_FILL
- case 'j':
- opt_junk = false;
- break;
- case 'J':
- opt_junk = true;
- break;
-#endif
- case 'k':
- /*
- * Chunks always require at least one
- * header page, plus one data page.
- */
- if ((1U << (opt_lg_chunk - 1)) >=
- (2U << PAGE_SHIFT))
- opt_lg_chunk--;
- break;
- case 'K':
- if (opt_lg_chunk + 1 <
- (sizeof(size_t) << 3))
- opt_lg_chunk++;
- break;
-#ifdef JEMALLOC_PROF
- case 'l':
- opt_prof_leak = false;
- break;
- case 'L':
- opt_prof_leak = true;
- break;
-#endif
-#ifdef JEMALLOC_TCACHE
- case 'm':
- if (opt_lg_tcache_maxclass >= 0)
- opt_lg_tcache_maxclass--;
- break;
- case 'M':
- if (opt_lg_tcache_maxclass + 1 <
- (sizeof(size_t) << 3))
- opt_lg_tcache_maxclass++;
- break;
-#endif
- case 'n':
- opt_narenas_lshift--;
- break;
- case 'N':
- opt_narenas_lshift++;
- break;
-#ifdef JEMALLOC_SWAP
- case 'o':
- opt_overcommit = false;
- break;
- case 'O':
- opt_overcommit = true;
- break;
-#endif
- case 'p':
- opt_stats_print = false;
- break;
- case 'P':
- opt_stats_print = true;
- break;
- case 'q':
- if (opt_lg_qspace_max > LG_QUANTUM)
- opt_lg_qspace_max--;
- break;
- case 'Q':
- if (opt_lg_qspace_max + 1 <
- opt_lg_cspace_max)
- opt_lg_qspace_max++;
- break;
-#ifdef JEMALLOC_PROF
- case 'r':
- opt_prof_accum = false;
- break;
- case 'R':
- opt_prof_accum = true;
- break;
- case 's':
- if (opt_lg_prof_sample > 0)
- opt_lg_prof_sample--;
- break;
- case 'S':
- if (opt_lg_prof_sample + 1 <
- (sizeof(uint64_t) << 3))
- opt_lg_prof_sample++;
- break;
- case 't':
- if (opt_lg_prof_tcmax >= 0)
- opt_lg_prof_tcmax--;
- break;
- case 'T':
- if (opt_lg_prof_tcmax + 1 <
- (sizeof(size_t) << 3))
- opt_lg_prof_tcmax++;
- break;
- case 'u':
- opt_prof_udump = false;
- break;
- case 'U':
- opt_prof_udump = true;
- break;
+ CONF_HANDLE_BOOL(junk)
+ CONF_HANDLE_BOOL(zero)
#endif
#ifdef JEMALLOC_SYSV
- case 'v':
- opt_sysv = false;
- break;
- case 'V':
- opt_sysv = true;
- break;
+ CONF_HANDLE_BOOL(sysv)
#endif
#ifdef JEMALLOC_XMALLOC
- case 'x':
- opt_xmalloc = false;
- break;
- case 'X':
- opt_xmalloc = true;
- break;
+ CONF_HANDLE_BOOL(xmalloc)
#endif
-#ifdef JEMALLOC_FILL
- case 'z':
- opt_zero = false;
- break;
- case 'Z':
- opt_zero = true;
- break;
-#endif
- default: {
- char cbuf[2];
-
- cbuf[0] = opts[j];
- cbuf[1] = '\0';
- malloc_write(
- "<jemalloc>: Unsupported character "
- "in malloc options: '");
- malloc_write(cbuf);
- malloc_write("'\n");
- }
- }
- }
+#ifdef JEMALLOC_TCACHE
+ CONF_HANDLE_BOOL(tcache)
+ CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
+ (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
+ (sizeof(size_t) << 3) - 1)
+#endif
+#ifdef JEMALLOC_PROF
+ CONF_HANDLE_BOOL(prof)
+ CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
+ CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
+ CONF_HANDLE_BOOL(prof_active)
+ CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(prof_accum)
+ CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
+ (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(prof_gdump)
+ CONF_HANDLE_BOOL(prof_leak)
+#endif
+#ifdef JEMALLOC_SWAP
+ CONF_HANDLE_BOOL(overcommit)
+#endif
+ malloc_conf_error("Invalid conf pair", k, klen, v,
+ vlen);
+#undef CONF_HANDLE_BOOL
+#undef CONF_HANDLE_SIZE_T
+#undef CONF_HANDLE_SSIZE_T
+#undef CONF_HANDLE_CHAR_P
+ }
+
+ /* Validate configuration of options that are inter-related. */
+ if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
+ malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
+ "relationship; restoring defaults\n");
+ opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
+ opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
}
}
+}
+
+static bool
+malloc_init_hard(void)
+{
+ arena_t *init_arenas[1];
+
+ malloc_mutex_lock(&init_lock);
+ if (malloc_initialized || malloc_initializer == pthread_self()) {
+ /*
+ * Another thread initialized the allocator before this one
+ * acquired init_lock, or this thread is the initializing
+ * thread, and it is recursively allocating.
+ */
+ malloc_mutex_unlock(&init_lock);
+ return (false);
+ }
+ if (malloc_initializer != (unsigned long)0) {
+ /* Busy-wait until the initializing thread completes. */
+ do {
+ malloc_mutex_unlock(&init_lock);
+ CPU_SPINWAIT;
+ malloc_mutex_lock(&init_lock);
+ } while (malloc_initialized == false);
+ malloc_mutex_unlock(&init_lock);
+ return (false);
+ }
+
+#ifdef DYNAMIC_PAGE_SHIFT
+ /* Get page size. */
+ {
+ long result;
+
+ result = sysconf(_SC_PAGESIZE);
+ assert(result != -1);
+ pagesize = (unsigned)result;
+
+ /*
+ * We assume that pagesize is a power of 2 when calculating
+ * pagesize_mask and lg_pagesize.
+ */
+ assert(((result - 1) & result) == 0);
+ pagesize_mask = result - 1;
+ lg_pagesize = ffs((int)result) - 1;
+ }
+#endif
+
+ prof_boot0();
+
+ malloc_conf_init();
/* Register fork handlers. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
@@ -638,7 +660,7 @@ MALLOC_OUT:
}
#ifdef JEMALLOC_PROF
- prof_boot0();
+ prof_boot1();
#endif
if (arena_boot()) {
@@ -692,7 +714,7 @@ MALLOC_OUT:
malloc_mutex_init(&arenas_lock);
#ifdef JEMALLOC_PROF
- if (prof_boot1()) {
+ if (prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
@@ -704,31 +726,29 @@ MALLOC_OUT:
ncpus = malloc_ncpus();
malloc_mutex_lock(&init_lock);
- if (ncpus > 1) {
+ if (opt_narenas == 0) {
/*
* For SMP systems, create more than one arena per CPU by
* default.
*/
- opt_narenas_lshift += 2;
+ if (ncpus > 1)
+ opt_narenas = ncpus << 2;
+ else
+ opt_narenas = 1;
}
+ narenas = opt_narenas;
+ /*
+ * Make sure that the arenas array can be allocated. In practice, this
+ * limit is enough to allow the allocator to function, but the ctl
+ * machinery will fail to allocate memory at far lower limits.
+ */
+ if (narenas > chunksize / sizeof(arena_t *)) {
+ char buf[UMAX2S_BUFSIZE];
- /* Determine how many arenas to use. */
- narenas = ncpus;
- if (opt_narenas_lshift > 0) {
- if ((narenas << opt_narenas_lshift) > narenas)
- narenas <<= opt_narenas_lshift;
- /*
- * Make sure not to exceed the limits of what base_alloc() can
- * handle.
- */
- if (narenas * sizeof(arena_t *) > chunksize)
- narenas = chunksize / sizeof(arena_t *);
- } else if (opt_narenas_lshift < 0) {
- if ((narenas >> -opt_narenas_lshift) < narenas)
- narenas >>= -opt_narenas_lshift;
- /* Make sure there is at least one arena. */
- if (narenas == 0)
- narenas = 1;
+ narenas = chunksize / sizeof(arena_t *);
+ malloc_write("<jemalloc>: Reducing narenas to limit (");
+ malloc_write(u2s(narenas, 10, buf));
+ malloc_write(")\n");
}
next_arena = (narenas > 0) ? 1 : 0;
diff --git a/jemalloc/src/prof.c b/jemalloc/src/prof.c
index fb0e765..84ce1ba 100644
--- a/jemalloc/src/prof.c
+++ b/jemalloc/src/prof.c
@@ -20,10 +20,11 @@ bool opt_prof_active = true;
size_t opt_lg_prof_bt_max = LG_PROF_BT_MAX_DEFAULT;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
-bool opt_prof_udump = false;
+bool opt_prof_gdump = false;
bool opt_prof_leak = false;
bool opt_prof_accum = true;
ssize_t opt_lg_prof_tcmax = LG_PROF_TCMAX_DEFAULT;
+char opt_prof_prefix[PATH_MAX + 1];
uint64_t prof_interval;
bool prof_promote;
@@ -64,7 +65,7 @@ static bool prof_booted = false;
static malloc_mutex_t enq_mtx;
static bool enq;
static bool enq_idump;
-static bool enq_udump;
+static bool enq_gdump;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@@ -150,7 +151,7 @@ prof_enter(void)
static inline void
prof_leave(void)
{
- bool idump, udump;
+ bool idump, gdump;
malloc_mutex_unlock(&bt2ctx_mtx);
@@ -158,14 +159,14 @@ prof_leave(void)
enq = false;
idump = enq_idump;
enq_idump = false;
- udump = enq_udump;
- enq_udump = false;
+ gdump = enq_gdump;
+ enq_gdump = false;
malloc_mutex_unlock(&enq_mtx);
if (idump)
prof_idump();
- if (udump)
- prof_udump();
+ if (gdump)
+ prof_gdump();
}
#ifdef JEMALLOC_PROF_LIBGCC
@@ -681,22 +682,22 @@ prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err)
return (false);
}
- if (prof_write(umax2s(ctx->cnt_summed.curobjs, 10, buf), propagate_err)
+ if (prof_write(u2s(ctx->cnt_summed.curobjs, 10, buf), propagate_err)
|| prof_write(": ", propagate_err)
- || prof_write(umax2s(ctx->cnt_summed.curbytes, 10, buf),
+ || prof_write(u2s(ctx->cnt_summed.curbytes, 10, buf),
propagate_err)
|| prof_write(" [", propagate_err)
- || prof_write(umax2s(ctx->cnt_summed.accumobjs, 10, buf),
+ || prof_write(u2s(ctx->cnt_summed.accumobjs, 10, buf),
propagate_err)
|| prof_write(": ", propagate_err)
- || prof_write(umax2s(ctx->cnt_summed.accumbytes, 10, buf),
+ || prof_write(u2s(ctx->cnt_summed.accumbytes, 10, buf),
propagate_err)
|| prof_write("] @", propagate_err))
return (true);
for (i = 0; i < bt->len; i++) {
if (prof_write(" 0x", propagate_err)
- || prof_write(umax2s((uintptr_t)bt->vec[i], 16, buf),
+ || prof_write(u2s((uintptr_t)bt->vec[i], 16, buf),
propagate_err))
return (true);
}
@@ -725,7 +726,7 @@ prof_dump_maps(bool propagate_err)
memcpy(&mpath[i], s, slen);
i += slen;
- s = umax2s(getpid(), 10, buf);
+ s = u2s(getpid(), 10, buf);
slen = strlen(s);
memcpy(&mpath[i], s, slen);
i += slen;
@@ -799,13 +800,13 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
/* Dump profile header. */
if (prof_write("heap profile: ", propagate_err)
- || prof_write(umax2s(cnt_all.curobjs, 10, buf), propagate_err)
+ || prof_write(u2s(cnt_all.curobjs, 10, buf), propagate_err)
|| prof_write(": ", propagate_err)
- || prof_write(umax2s(cnt_all.curbytes, 10, buf), propagate_err)
+ || prof_write(u2s(cnt_all.curbytes, 10, buf), propagate_err)
|| prof_write(" [", propagate_err)
- || prof_write(umax2s(cnt_all.accumobjs, 10, buf), propagate_err)
+ || prof_write(u2s(cnt_all.accumobjs, 10, buf), propagate_err)
|| prof_write(": ", propagate_err)
- || prof_write(umax2s(cnt_all.accumbytes, 10, buf), propagate_err))
+ || prof_write(u2s(cnt_all.accumbytes, 10, buf), propagate_err))
goto ERROR;
if (opt_lg_prof_sample == 0) {
@@ -813,7 +814,7 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
goto ERROR;
} else {
if (prof_write("] @ heap_v2/", propagate_err)
- || prof_write(umax2s((uint64_t)1U << opt_lg_prof_sample, 10,
+ || prof_write(u2s((uint64_t)1U << opt_lg_prof_sample, 10,
buf), propagate_err)
|| prof_write("\n", propagate_err))
goto ERROR;
@@ -837,12 +838,12 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
if (leakcheck && cnt_all.curbytes != 0) {
malloc_write("<jemalloc>: Leak summary: ");
- malloc_write(umax2s(cnt_all.curbytes, 10, buf));
+ malloc_write(u2s(cnt_all.curbytes, 10, buf));
malloc_write((cnt_all.curbytes != 1) ? " bytes, " : " byte, ");
- malloc_write(umax2s(cnt_all.curobjs, 10, buf));
+ malloc_write(u2s(cnt_all.curobjs, 10, buf));
malloc_write((cnt_all.curobjs != 1) ? " objects, " :
" object, ");
- malloc_write(umax2s(leak_nctx, 10, buf));
+ malloc_write(u2s(leak_nctx, 10, buf));
malloc_write((leak_nctx != 1) ? " contexts\n" : " context\n");
malloc_write("<jemalloc>: Run pprof on \"");
malloc_write(filename);
@@ -872,31 +873,21 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
* Construct a filename of the form:
*
* <prefix>.<pid>.<seq>.v<vseq>.heap\0
- * or
- * jeprof.<pid>.<seq>.v<vseq>.heap\0
*/
i = 0;
- /*
- * Use JEMALLOC_PROF_PREFIX if it's set, and if it is short enough to
- * avoid overflowing DUMP_FILENAME_BUFSIZE. The result may exceed
- * PATH_MAX, but creat(2) will catch that problem.
- */
- if ((s = getenv("JEMALLOC_PROF_PREFIX")) != NULL
- && strlen(s) + (DUMP_FILENAME_BUFSIZE - PATH_MAX) <= PATH_MAX) {
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
+ s = opt_prof_prefix;
+ slen = strlen(s);
+ memcpy(&filename[i], s, slen);
+ i += slen;
- s = ".";
- } else
- s = "jeprof.";
+ s = ".";
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
- s = umax2s(getpid(), 10, buf);
+ s = u2s(getpid(), 10, buf);
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
@@ -906,7 +897,7 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
memcpy(&filename[i], s, slen);
i += slen;
- s = umax2s(prof_dump_seq, 10, buf);
+ s = u2s(prof_dump_seq, 10, buf);
prof_dump_seq++;
slen = strlen(s);
memcpy(&filename[i], s, slen);
@@ -921,7 +912,7 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
i++;
if (vseq != 0xffffffffffffffffLLU) {
- s = umax2s(vseq, 10, buf);
+ s = u2s(vseq, 10, buf);
slen = strlen(s);
memcpy(&filename[i], s, slen);
i += slen;
@@ -943,10 +934,12 @@ prof_fdump(void)
if (prof_booted == false)
return;
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', 0xffffffffffffffffLLU);
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(filename, opt_prof_leak, false);
+ if (opt_prof_prefix[0] != '\0') {
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', 0xffffffffffffffffLLU);
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(filename, opt_prof_leak, false);
+ }
}
void
@@ -964,11 +957,13 @@ prof_idump(void)
}
malloc_mutex_unlock(&enq_mtx);
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'i', prof_dump_iseq);
- prof_dump_iseq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(filename, false, false);
+ if (opt_prof_prefix[0] != '\0') {
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'i', prof_dump_iseq);
+ prof_dump_iseq++;
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(filename, false, false);
+ }
}
bool
@@ -981,6 +976,8 @@ prof_mdump(const char *filename)
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
+ if (opt_prof_prefix[0] == '\0')
+ return (true);
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
@@ -991,7 +988,7 @@ prof_mdump(const char *filename)
}
void
-prof_udump(void)
+prof_gdump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
@@ -999,17 +996,19 @@ prof_udump(void)
return;
malloc_mutex_lock(&enq_mtx);
if (enq) {
- enq_udump = true;
+ enq_gdump = true;
malloc_mutex_unlock(&enq_mtx);
return;
}
malloc_mutex_unlock(&enq_mtx);
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'u', prof_dump_useq);
- prof_dump_useq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(filename, false, false);
+ if (opt_prof_prefix[0] != '\0') {
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'u', prof_dump_useq);
+ prof_dump_useq++;
+ malloc_mutex_unlock(&prof_dump_seq_mtx);
+ prof_dump(filename, false, false);
+ }
}
static void
@@ -1122,6 +1121,14 @@ void
prof_boot0(void)
{
+ memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
+ sizeof(PROF_PREFIX_DEFAULT));
+}
+
+void
+prof_boot1(void)
+{
+
/*
* opt_prof and prof_promote must be in their final state before any
* arenas are initialized, so this function must be executed early.
@@ -1133,7 +1140,7 @@ prof_boot0(void)
* automatically dumped.
*/
opt_prof = true;
- opt_prof_udump = false;
+ opt_prof_gdump = false;
prof_interval = 0;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
@@ -1147,7 +1154,7 @@ prof_boot0(void)
}
bool
-prof_boot1(void)
+prof_boot2(void)
{
if (opt_prof) {
@@ -1171,7 +1178,7 @@ prof_boot1(void)
return (true);
enq = false;
enq_idump = false;
- enq_udump = false;
+ enq_gdump = false;
if (atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
diff --git a/jemalloc/src/stats.c b/jemalloc/src/stats.c
index 9b3271b..3dfe0d2 100644
--- a/jemalloc/src/stats.c
+++ b/jemalloc/src/stats.c
@@ -57,12 +57,12 @@ static void stats_arena_print(void (*write_cb)(void *, const char *),
/*
* We don't want to depend on vsnprintf() for production builds, since that can
- * cause unnecessary bloat for static binaries. umax2s() provides minimal
- * integer printing functionality, so that malloc_printf() use can be limited to
+ * cause unnecessary bloat for static binaries. u2s() provides minimal integer
+ * printing functionality, so that malloc_printf() use can be limited to
* JEMALLOC_STATS code.
*/
char *
-umax2s(uintmax_t x, unsigned base, char *s)
+u2s(uint64_t x, unsigned base, char *s)
{
unsigned i;
@@ -72,8 +72,8 @@ umax2s(uintmax_t x, unsigned base, char *s)
case 10:
do {
i--;
- s[i] = "0123456789"[x % 10];
- x /= 10;
+ s[i] = "0123456789"[x % (uint64_t)10];
+ x /= (uint64_t)10;
} while (x > 0);
break;
case 16:
@@ -86,8 +86,9 @@ umax2s(uintmax_t x, unsigned base, char *s)
default:
do {
i--;
- s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x % base];
- x /= base;
+ s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x %
+ (uint64_t)base];
+ x /= (uint64_t)base;
} while (x > 0);
}
@@ -374,6 +375,7 @@ void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
+ int err;
uint64_t epoch;
size_t u64sz;
char s[UMAX2S_BUFSIZE];
@@ -383,10 +385,27 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool bins = true;
bool large = true;
- /* Refresh stats, in case mallctl() was called by the application. */
+ /*
+ * Refresh stats, in case mallctl() was called by the application.
+ *
+ * Check for OOM here, since refreshing the ctl cache can trigger
+ * allocation. In practice, none of the subsequent mallctl()-related
+ * calls in this function will cause OOM if this one succeeds.
+ * */
epoch = 1;
u64sz = sizeof(uint64_t);
- xmallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
+ err = JEMALLOC_P(mallctl)("epoch", &epoch, &u64sz, &epoch,
+ sizeof(uint64_t));
+ if (err != 0) {
+ if (err == EAGAIN) {
+ malloc_write("<jemalloc>: Memory allocation failure in "
+ "mallctl(\"epoch\", ...)\n");
+ return;
+ }
+ malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+ "...)\n");
+ abort();
+ }
if (write_cb == NULL) {
/*
@@ -430,10 +449,12 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool bv;
unsigned uv;
ssize_t ssv;
- size_t sv, bsz, ssz;
+ size_t sv, bsz, ssz, sssz, cpsz;
bsz = sizeof(bool);
ssz = sizeof(size_t);
+ sssz = sizeof(ssize_t);
+ cpsz = sizeof(const char *);
CTL_GET("version", &cpv, const char *);
write_cb(cbopaque, "Version: ");
@@ -444,116 +465,140 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
write_cb(cbopaque, bv ? "enabled" : "disabled");
write_cb(cbopaque, "\n");
- write_cb(cbopaque, "Boolean JEMALLOC_OPTIONS: ");
- if ((err = JEMALLOC_P(mallctl)("opt.abort", &bv, &bsz, NULL, 0))
- == 0)
- write_cb(cbopaque, bv ? "A" : "a");
- if ((err = JEMALLOC_P(mallctl)("prof.active", &bv, &bsz,
- NULL, 0)) == 0)
- write_cb(cbopaque, bv ? "E" : "e");
- if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
- == 0)
- write_cb(cbopaque, bv ? "F" : "f");
- if ((err = JEMALLOC_P(mallctl)("opt.tcache", &bv, &bsz, NULL,
- 0)) == 0)
- write_cb(cbopaque, bv ? "H" : "h");
- if ((err = JEMALLOC_P(mallctl)("opt.junk", &bv, &bsz, NULL, 0))
- == 0)
- write_cb(cbopaque, bv ? "J" : "j");
- if ((err = JEMALLOC_P(mallctl)("opt.prof_leak", &bv, &bsz, NULL,
- 0)) == 0)
- write_cb(cbopaque, bv ? "L" : "l");
- if ((err = JEMALLOC_P(mallctl)("opt.overcommit", &bv, &bsz,
- NULL, 0)) == 0)
- write_cb(cbopaque, bv ? "O" : "o");
- if ((err = JEMALLOC_P(mallctl)("opt.stats_print", &bv, &bsz,
- NULL, 0)) == 0)
- write_cb(cbopaque, bv ? "P" : "p");
- if ((err = JEMALLOC_P(mallctl)("opt.prof_accum", &bv, &bsz,
- NULL, 0)) == 0)
- write_cb(cbopaque, bv ? "R" : "r");
- if ((err = JEMALLOC_P(mallctl)("opt.prof_udump", &bv, &bsz,
- NULL, 0)) == 0)
- write_cb(cbopaque, bv ? "U" : "u");
- if ((err = JEMALLOC_P(mallctl)("opt.sysv", &bv, &bsz, NULL, 0))
- == 0)
- write_cb(cbopaque, bv ? "V" : "v");
- if ((err = JEMALLOC_P(mallctl)("opt.xmalloc", &bv, &bsz, NULL,
- 0)) == 0)
- write_cb(cbopaque, bv ? "X" : "x");
- if ((err = JEMALLOC_P(mallctl)("opt.zero", &bv, &bsz, NULL, 0))
- == 0)
- write_cb(cbopaque, bv ? "Z" : "z");
- write_cb(cbopaque, "\n");
+#define OPT_WRITE_BOOL(n) \
+ if ((err = JEMALLOC_P(mallctl)("opt."#n, &bv, &bsz, \
+ NULL, 0)) == 0) { \
+ write_cb(cbopaque, " opt."#n": "); \
+ write_cb(cbopaque, bv ? "true" : "false"); \
+ write_cb(cbopaque, "\n"); \
+ }
+#define OPT_WRITE_SIZE_T(n) \
+ if ((err = JEMALLOC_P(mallctl)("opt."#n, &sv, &ssz, \
+ NULL, 0)) == 0) { \
+ write_cb(cbopaque, " opt."#n": "); \
+ write_cb(cbopaque, u2s(sv, 10, s)); \
+ write_cb(cbopaque, "\n"); \
+ }
+#define OPT_WRITE_SSIZE_T(n) \
+ if ((err = JEMALLOC_P(mallctl)("opt."#n, &ssv, &sssz, \
+ NULL, 0)) == 0) { \
+ if (ssv >= 0) { \
+ write_cb(cbopaque, " opt."#n": "); \
+ write_cb(cbopaque, u2s(ssv, 10, s)); \
+ } else { \
+ write_cb(cbopaque, " opt."#n": -"); \
+ write_cb(cbopaque, u2s(-ssv, 10, s)); \
+ } \
+ write_cb(cbopaque, "\n"); \
+ }
+#define OPT_WRITE_CHAR_P(n) \
+ if ((err = JEMALLOC_P(mallctl)("opt."#n, &cpv, &cpsz, \
+ NULL, 0)) == 0) { \
+ write_cb(cbopaque, " opt."#n": \""); \
+ write_cb(cbopaque, cpv); \
+ write_cb(cbopaque, "\"\n"); \
+ }
+
+ write_cb(cbopaque, "Run-time option settings:\n");
+ OPT_WRITE_BOOL(abort)
+ OPT_WRITE_SIZE_T(lg_qspace_max)
+ OPT_WRITE_SIZE_T(lg_cspace_max)
+ OPT_WRITE_SIZE_T(lg_chunk)
+ OPT_WRITE_SIZE_T(narenas)
+ OPT_WRITE_SSIZE_T(lg_dirty_mult)
+ OPT_WRITE_BOOL(stats_print)
+ OPT_WRITE_BOOL(junk)
+ OPT_WRITE_BOOL(zero)
+ OPT_WRITE_BOOL(sysv)
+ OPT_WRITE_BOOL(xmalloc)
+ OPT_WRITE_BOOL(tcache)
+ OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep)
+ OPT_WRITE_SSIZE_T(lg_tcache_max)
+ OPT_WRITE_BOOL(prof)
+ OPT_WRITE_CHAR_P(prof_prefix)
+ OPT_WRITE_SIZE_T(lg_prof_bt_max)
+ OPT_WRITE_BOOL(prof_active)
+ OPT_WRITE_SSIZE_T(lg_prof_sample)
+ OPT_WRITE_BOOL(prof_accum)
+ OPT_WRITE_SSIZE_T(lg_prof_tcmax)
+ OPT_WRITE_SSIZE_T(lg_prof_interval)
+ OPT_WRITE_BOOL(prof_gdump)
+ OPT_WRITE_BOOL(prof_leak)
+ OPT_WRITE_BOOL(overcommit)
+
+#undef OPT_WRITE_BOOL
+#undef OPT_WRITE_SIZE_T
+#undef OPT_WRITE_SSIZE_T
+#undef OPT_WRITE_CHAR_P
write_cb(cbopaque, "CPUs: ");
- write_cb(cbopaque, umax2s(ncpus, 10, s));
+ write_cb(cbopaque, u2s(ncpus, 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.narenas", &uv, unsigned);
write_cb(cbopaque, "Max arenas: ");
- write_cb(cbopaque, umax2s(uv, 10, s));
+ write_cb(cbopaque, u2s(uv, 10, s));
write_cb(cbopaque, "\n");
write_cb(cbopaque, "Pointer size: ");
- write_cb(cbopaque, umax2s(sizeof(void *), 10, s));
+ write_cb(cbopaque, u2s(sizeof(void *), 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.quantum", &sv, size_t);
write_cb(cbopaque, "Quantum size: ");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.cacheline", &sv, size_t);
write_cb(cbopaque, "Cacheline size (assumed): ");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
CTL_GET("arenas.subpage", &sv, size_t);
write_cb(cbopaque, "Subpage spacing: ");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
if ((err = JEMALLOC_P(mallctl)("arenas.tspace_min", &sv, &ssz,
NULL, 0)) == 0) {
write_cb(cbopaque, "Tiny 2^n-spaced sizes: [");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.tspace_max", &sv, size_t);
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
}
CTL_GET("arenas.qspace_min", &sv, size_t);
write_cb(cbopaque, "Quantum-spaced sizes: [");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.qspace_max", &sv, size_t);
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("arenas.cspace_min", &sv, size_t);
write_cb(cbopaque, "Cacheline-spaced sizes: [");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.cspace_max", &sv, size_t);
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("arenas.sspace_min", &sv, size_t);
write_cb(cbopaque, "Subpage-spaced sizes: [");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "..");
CTL_GET("arenas.sspace_max", &sv, size_t);
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "]\n");
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
write_cb(cbopaque,
"Min active:dirty page ratio per arena: ");
- write_cb(cbopaque, umax2s((1U << ssv), 10, s));
+ write_cb(cbopaque, u2s((1U << ssv), 10, s));
write_cb(cbopaque, ":1\n");
} else {
write_cb(cbopaque,
@@ -563,7 +608,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
&ssz, NULL, 0)) == 0) {
write_cb(cbopaque,
"Maximum thread-cached size class: ");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, "\n");
}
if ((err = JEMALLOC_P(mallctl)("opt.lg_tcache_gc_sweep", &ssv,
@@ -573,50 +618,51 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("opt.tcache", &tcache_enabled, bool);
write_cb(cbopaque, "Thread cache GC sweep interval: ");
write_cb(cbopaque, tcache_enabled && ssv >= 0 ?
- umax2s(tcache_gc_sweep, 10, s) : "N/A");
+ u2s(tcache_gc_sweep, 10, s) : "N/A");
write_cb(cbopaque, "\n");
}
if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
== 0 && bv) {
CTL_GET("opt.lg_prof_bt_max", &sv, size_t);
write_cb(cbopaque, "Maximum profile backtrace depth: ");
- write_cb(cbopaque, umax2s((1U << sv), 10, s));
+ write_cb(cbopaque, u2s((1U << sv), 10, s));
write_cb(cbopaque, "\n");
CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t);
write_cb(cbopaque,
"Maximum per thread backtrace cache: ");
if (ssv >= 0) {
- write_cb(cbopaque, umax2s((1U << ssv), 10, s));
+ write_cb(cbopaque, u2s((1U << ssv), 10, s));
write_cb(cbopaque, " (2^");
- write_cb(cbopaque, umax2s(ssv, 10, s));
+ write_cb(cbopaque, u2s(ssv, 10, s));
write_cb(cbopaque, ")\n");
} else
write_cb(cbopaque, "N/A\n");
CTL_GET("opt.lg_prof_sample", &sv, size_t);
write_cb(cbopaque, "Average profile sample interval: ");
- write_cb(cbopaque, umax2s((1U << sv), 10, s));
+ write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));
write_cb(cbopaque, " (2^");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, ")\n");
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
write_cb(cbopaque, "Average profile dump interval: ");
if (ssv >= 0) {
- write_cb(cbopaque, umax2s((1U << ssv), 10, s));
+ write_cb(cbopaque, u2s((((uint64_t)1U) << ssv),
+ 10, s));
write_cb(cbopaque, " (2^");
- write_cb(cbopaque, umax2s(ssv, 10, s));
+ write_cb(cbopaque, u2s(ssv, 10, s));
write_cb(cbopaque, ")\n");
} else
write_cb(cbopaque, "N/A\n");
}
CTL_GET("arenas.chunksize", &sv, size_t);
write_cb(cbopaque, "Chunk size: ");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
CTL_GET("opt.lg_chunk", &sv, size_t);
write_cb(cbopaque, " (2^");
- write_cb(cbopaque, umax2s(sv, 10, s));
+ write_cb(cbopaque, u2s(sv, 10, s));
write_cb(cbopaque, ")\n");
}
diff --git a/jemalloc/src/tcache.c b/jemalloc/src/tcache.c
index 3fb8f2b..cbbe7a1 100644
--- a/jemalloc/src/tcache.c
+++ b/jemalloc/src/tcache.c
@@ -5,7 +5,7 @@
/* Data. */
bool opt_tcache = true;
-ssize_t opt_lg_tcache_maxclass = LG_TCACHE_MAXCLASS_DEFAULT;
+ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
/* Map of thread-specific caches. */
@@ -384,16 +384,16 @@ tcache_boot(void)
if (opt_tcache) {
/*
- * If necessary, clamp opt_lg_tcache_maxclass, now that
+ * If necessary, clamp opt_lg_tcache_max, now that
* small_maxclass and arena_maxclass are known.
*/
- if (opt_lg_tcache_maxclass < 0 || (1U <<
- opt_lg_tcache_maxclass) < small_maxclass)
+ if (opt_lg_tcache_max < 0 || (1U <<
+ opt_lg_tcache_max) < small_maxclass)
tcache_maxclass = small_maxclass;
- else if ((1U << opt_lg_tcache_maxclass) > arena_maxclass)
+ else if ((1U << opt_lg_tcache_max) > arena_maxclass)
tcache_maxclass = arena_maxclass;
else
- tcache_maxclass = (1U << opt_lg_tcache_maxclass);
+ tcache_maxclass = (1U << opt_lg_tcache_max);
nhbins = nbins + (tcache_maxclass >> PAGE_SHIFT);