summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2012-02-14 02:04:26 (GMT)
committerJason Evans <je@fb.com>2012-02-14 02:04:26 (GMT)
commit0b526ff94da7e59aa947a4d3529b2376794f8b01 (patch)
tree6a8f678b97a57a4279a7774812402660acf15c38 /src
parente7a1058aaa6b2cbdd19da297bf2250f86dcdac89 (diff)
downloadjemalloc-0b526ff94da7e59aa947a4d3529b2376794f8b01.zip
jemalloc-0b526ff94da7e59aa947a4d3529b2376794f8b01.tar.gz
jemalloc-0b526ff94da7e59aa947a4d3529b2376794f8b01.tar.bz2
Remove the opt.lg_prof_tcmax option.
Remove the opt.lg_prof_tcmax option and hard-code a cache size of 1024. This setting is something that users just shouldn't have to worry about. If lock contention actually ends up being a problem, the simple solution available to the user is to reduce sampling frequency.
Diffstat (limited to 'src')
-rw-r--r--src/ctl.c5
-rw-r--r--src/jemalloc.c2
-rw-r--r--src/prof.c8
-rw-r--r--src/stats.c12
4 files changed, 3 insertions, 24 deletions
diff --git a/src/ctl.c b/src/ctl.c
index e33ce67..12b4185 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -80,7 +80,6 @@ CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
-CTL_PROTO(opt_lg_prof_tcmax)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
@@ -222,8 +221,7 @@ static const ctl_node_t opt_node[] = {
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)},
- {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
};
static const ctl_node_t arenas_bin_i_node[] = {
@@ -1133,7 +1131,6 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
/******************************************************************************/
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 796c815..d2a6009 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -603,8 +603,6 @@ malloc_conf_init(void)
CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(prof_accum)
- CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
- (sizeof(size_t) << 3) - 1)
CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
(sizeof(uint64_t) << 3) - 1)
CONF_HANDLE_BOOL(prof_gdump)
diff --git a/src/prof.c b/src/prof.c
index 113cf15..a4012f0 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -22,7 +22,6 @@ ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
bool opt_prof_leak = false;
bool opt_prof_accum = true;
-ssize_t opt_lg_prof_tcmax = LG_PROF_TCMAX_DEFAULT;
char opt_prof_prefix[PATH_MAX + 1];
uint64_t prof_interval;
@@ -519,8 +518,7 @@ prof_lookup(prof_bt_t *bt)
prof_leave();
/* Link a prof_thd_cnt_t into ctx for this thread. */
- if (opt_lg_prof_tcmax >= 0 && ckh_count(&prof_tdata->bt2cnt)
- == (ZU(1) << opt_lg_prof_tcmax)) {
+ if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
assert(ckh_count(&prof_tdata->bt2cnt) > 0);
/*
* Flush the least recently used cnt in order to keep
@@ -535,9 +533,7 @@ prof_lookup(prof_bt_t *bt)
prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */
} else {
- assert(opt_lg_prof_tcmax < 0 ||
- ckh_count(&prof_tdata->bt2cnt) < (ZU(1) <<
- opt_lg_prof_tcmax));
+ assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
/* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
diff --git a/src/stats.c b/src/stats.c
index 1e90782..86a48c6 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -515,7 +515,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
- OPT_WRITE_SSIZE_T(lg_prof_tcmax)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
OPT_WRITE_BOOL(prof_leak)
@@ -622,17 +621,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
write_cb(cbopaque, u2s((1U << sv), 10, s));
write_cb(cbopaque, "\n");
- CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t);
- write_cb(cbopaque,
- "Maximum per thread backtrace cache: ");
- if (ssv >= 0) {
- write_cb(cbopaque, u2s((1U << ssv), 10, s));
- write_cb(cbopaque, " (2^");
- write_cb(cbopaque, u2s(ssv, 10, s));
- write_cb(cbopaque, ")\n");
- } else
- write_cb(cbopaque, "N/A\n");
-
CTL_GET("opt.lg_prof_sample", &sv, size_t);
write_cb(cbopaque, "Average profile sample interval: ");
write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));